Thread overview | ||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
August 13, 2017 D outperformed by C++, what am I doing wrong? | ||||
---|---|---|---|---|
| ||||
Hi all, I'm solving below task: given container T and value R return sum of R-ranges over T. An example: input : T=[1,1,1] R=2 output : [2, 1] input : T=[1,2,3] R=1 output : [1,2,3] (see dlang unittests for more examples) Below c++ code compiled with g++-5.4.0 -O2 -std=c++14 runs on my machine in 656 836 us. Below D code compiled with dmd v2.067.1 -O runs on my machine in ~ 14.5 sec. Each language has it's own "way of programming", and as I'm a beginner in D - probably I'm running through bushes instead of highway. Therefore I'd like to ask you, experienced dlang devs, to shed some light on "how to do it dlang-way". C++ code: #include <algorithm> #include <chrono> #include <iostream> #include <iterator> #include <list> #include <map> #include <string> #include <utility> #include <numeric> #include <vector> template<typename T, typename K> std::vector<K> sum_elements(const T& beg, const T& end, std::size_t k, K def) { if (k == 0) { return std::vector<K>{}; } return sum_elements(beg, end, k, def, [](auto &l, auto &r){ return r+l;}); } template<typename T, typename K, class BinaryOp> std::vector<K> sum_elements( const T& beg, const T& end, std::size_t k, K def, BinaryOp op) { std::vector<K> out; out.reserve((std::distance(beg, end) - 1)/k + 1); for (auto it = beg; it!=end; std::advance(it, std::min(static_cast<std::size_t>(std::distance(it, end)), k))) { out.push_back(std::accumulate(it, std::next(it, std::min(static_cast<std::size_t>(std::distance(it, end)), k)), def, op)); } return out; } int main() { std::vector<int> vec; auto size = 1000000; vec.reserve(size); for (int i=0; i < size; ++i) vec.push_back(i); auto beg = std::chrono::system_clock::now(); auto sum = 0; for (int i=0; i < 100; i++) sum += sum_elements(vec.begin(), vec.end(), 2, 0).size(); auto end = std::chrono::system_clock::now(); std::cout << std::chrono::duration_cast<std::chrono::microseconds>(end-beg).count() << std::endl; std::cout << sum << std::endl; return sum; } D code: import std.stdio : writeln; import std.algorithm.comparison: min; import std.algorithm.iteration: sum; import core.time: MonoTime, Duration; T[] sum_subranges(T)(T[] input, uint range) { T[] result; if (range == 0) { return result; } for (uint i; i < input.length; i=min(i+range, input.length)) { result ~= sum(input[i..min(i+range, input.length)]); } return result; } unittest { assert(sum_subranges([1,1,1], 2) == [2, 1]); assert(sum_subranges([1,1,1,2,3,3], 2) == [2, 3, 6]); assert(sum_subranges([], 2) == []); assert(sum_subranges([1], 2) == [1]); assert(sum_subranges([1], 0) == []); } int main() { int[1000000] v; for (int i=0; i < 1000000; ++i) v[i] = i; int sum; MonoTime beg = MonoTime.currTime; for (int i=0; i < 100; i++) sum += cast(int)sum_subranges(v,2).length; MonoTime end = MonoTime.currTime; writeln(end-beg); writeln(sum); return sum; } |
August 13, 2017 Re: D outperformed by C++, what am I doing wrong? | ||||
---|---|---|---|---|
| ||||
Posted in reply to amfvcg | On 13/08/2017 7:09 AM, amfvcg wrote:
> Hi all,
> I'm solving below task:
>
> given container T and value R return sum of R-ranges over T. An example:
> input : T=[1,1,1] R=2
> output : [2, 1]
>
> input : T=[1,2,3] R=1
> output : [1,2,3]
> (see dlang unittests for more examples)
>
>
> Below c++ code compiled with g++-5.4.0 -O2 -std=c++14 runs on my machine in 656 836 us.
> Below D code compiled with dmd v2.067.1 -O runs on my machine in ~ 14.5 sec.
>
> Each language has it's own "way of programming", and as I'm a beginner in D - probably I'm running through bushes instead of highway. Therefore I'd like to ask you, experienced dlang devs, to shed some light on "how to do it dlang-way".
>
>
> C++ code:
>
> #include <algorithm>
> #include <chrono>
> #include <iostream>
> #include <iterator>
> #include <list>
> #include <map>
> #include <string>
> #include <utility>
> #include <numeric>
> #include <vector>
>
>
> template<typename T, typename K>
> std::vector<K> sum_elements(const T& beg, const T& end, std::size_t k, K def)
> {
> if (k == 0) {
> return std::vector<K>{};
> }
> return sum_elements(beg, end, k, def, [](auto &l, auto &r){ return r+l;});
> }
>
> template<typename T, typename K, class BinaryOp>
> std::vector<K>
> sum_elements(
> const T& beg,
> const T& end,
> std::size_t k,
> K def,
> BinaryOp op)
> {
> std::vector<K> out;
> out.reserve((std::distance(beg, end) - 1)/k + 1);
> for (auto it = beg; it!=end; std::advance(it, std::min(static_cast<std::size_t>(std::distance(it, end)), k)))
> {
> out.push_back(std::accumulate(it, std::next(it, std::min(static_cast<std::size_t>(std::distance(it, end)), k)), def, op));
> }
> return out;
> }
>
> int main()
> {
> std::vector<int> vec;
> auto size = 1000000;
> vec.reserve(size);
> for (int i=0; i < size; ++i)
> vec.push_back(i);
> auto beg = std::chrono::system_clock::now();
> auto sum = 0;
> for (int i=0; i < 100; i++)
> sum += sum_elements(vec.begin(), vec.end(), 2, 0).size();
> auto end = std::chrono::system_clock::now();
> std::cout << std::chrono::duration_cast<std::chrono::microseconds>(end-beg).count() << std::endl;
> std::cout << sum << std::endl;
>
> return sum;
> }
>
>
> D code:
>
> import std.stdio : writeln;
> import std.algorithm.comparison: min;
> import std.algorithm.iteration: sum;
> import core.time: MonoTime, Duration;
>
>
> T[] sum_subranges(T)(T[] input, uint range)
> {
> T[] result;
> if (range == 0)
> {
> return result;
> }
> for (uint i; i < input.length; i=min(i+range, input.length))
> {
> result ~= sum(input[i..min(i+range, input.length)]);
> }
> return result;
> }
>
> unittest
> {
> assert(sum_subranges([1,1,1], 2) == [2, 1]);
> assert(sum_subranges([1,1,1,2,3,3], 2) == [2, 3, 6]);
> assert(sum_subranges([], 2) == []);
> assert(sum_subranges([1], 2) == [1]);
> assert(sum_subranges([1], 0) == []);
> }
>
>
> int main()
> {
> int[1000000] v;
> for (int i=0; i < 1000000; ++i)
> v[i] = i;
> int sum;
> MonoTime beg = MonoTime.currTime;
> for (int i=0; i < 100; i++)
> sum += cast(int)sum_subranges(v,2).length;
> MonoTime end = MonoTime.currTime;
> writeln(end-beg);
> writeln(sum);
> return sum;
> }
Dmd compiles quickly, but doesn't create all that optimized code.
Try ldc or gdc and get back to us about it ;)
|
August 13, 2017 Re: D outperformed by C++, what am I doing wrong? | ||||
---|---|---|---|---|
| ||||
Posted in reply to amfvcg | On Sunday, 13 August 2017 at 06:09:39 UTC, amfvcg wrote:
> Hi all,
> I'm solving below task:
>
> given container T and value R return sum of R-ranges over T. An example:
> input : T=[1,1,1] R=2
> output : [2, 1]
>
> input : T=[1,2,3] R=1
> output : [1,2,3]
> (see dlang unittests for more examples)
>
>
> Below c++ code compiled with g++-5.4.0 -O2 -std=c++14 runs on my machine in 656 836 us.
> Below D code compiled with dmd v2.067.1 -O runs on my machine in ~ 14.5 sec.
If I had to guess, this could be due to backend and optimizer.
I don't want to go in to detail on my thoughts because I am not an expert
on codegen optimization, but I might suggest running your test compiled with
GDC (using identical optimization settings as G++) and ldc2 with similar settings.
|
August 13, 2017 Re: D outperformed by C++, what am I doing wrong? | ||||
---|---|---|---|---|
| ||||
Posted in reply to amfvcg | On Sunday, 13 August 2017 at 06:09:39 UTC, amfvcg wrote:
> Hi all,
> I'm solving below task:
Well, for one thing, you are preallocating in C++ code but not in D.
On my machine, your version of the code completes in 3.175 seconds. Changing it a little reduces it to 0.420s:
T[] result = new T[input.length];
size_t o = 0;
for (uint i; i < input.length; i=min(i+range, input.length))
{
result[o] = sum(input[i..min(i+range, input.length)]);
o++;
}
return result[0..o];
You can also use Appender from std.array.
|
August 13, 2017 Re: D outperformed by C++, what am I doing wrong? | ||||
---|---|---|---|---|
| ||||
Posted in reply to Neia Neutuladh Attachments:
| this works ok for me with ldc compiler, gdc does not work on my arch machine so I can not do comparsion to your c++ versin (clang does not work with your c++ code) import std.stdio : writeln; import std.algorithm.comparison: min; import std.algorithm.iteration: sum; import core.time: MonoTime, Duration; T[] sum_subranges(T)(T[] input, uint range) { import std.array : appender; auto app = appender!(T[])(); if (range == 0) { return app.data; } for (uint i; i < input.length; i=min(i+range, input.length)) { app.put(sum(input[i..min(i+range, input.length)])); } return app.data; } unittest { assert(sum_subranges([1,1,1], 2) == [2, 1]); assert(sum_subranges([1,1,1,2,3,3], 2) == [2, 3, 6]); assert(sum_subranges([], 2) == []); assert(sum_subranges([1], 2) == [1]); assert(sum_subranges([1], 0) == []); } int main() { import std.range : iota, array; auto v = iota(0,1000000).array; int sum; MonoTime beg = MonoTime.currTime; for (int i=0; i < 100; i++) sum += cast(int)sum_subranges(v,2).length; MonoTime end = MonoTime.currTime; writeln(end-beg); writeln(sum); return sum; } On Sun, Aug 13, 2017 at 9:03 AM, Neia Neutuladh via Digitalmars-d-learn < digitalmars-d-learn@puremagic.com> wrote: > On Sunday, 13 August 2017 at 06:09:39 UTC, amfvcg wrote: > >> Hi all, >> I'm solving below task: >> > > Well, for one thing, you are preallocating in C++ code but not in D. > > On my machine, your version of the code completes in 3.175 seconds. Changing it a little reduces it to 0.420s: > > T[] result = new T[input.length]; > size_t o = 0; > for (uint i; i < input.length; i=min(i+range, input.length)) > { > result[o] = sum(input[i..min(i+range, input.length)]); > o++; > } > return result[0..o]; > > You can also use Appender from std.array. > |
August 13, 2017 Re: D outperformed by C++, what am I doing wrong? | ||||
---|---|---|---|---|
| ||||
Attachments:
| Here is more D idiomatic way: import std.stdio : writeln; import std.algorithm.comparison: min; import std.algorithm.iteration: sum; import core.time: MonoTime, Duration; auto sum_subranges(T)(T input, uint range) { import std.array : array; import std.range : chunks, ElementType; import std.algorithm : map; if (range == 0) { return ElementType!(T)[].init; } return input.chunks(range).map!(sum).array; } unittest { assert(sum_subranges([1,1,1], 2) == [2, 1]); assert(sum_subranges([1,1,1,2,3,3], 2) == [2, 3, 6]); assert(sum_subranges([], 2) == []); assert(sum_subranges([1], 2) == [1]); assert(sum_subranges([1], 0) == []); } int main() { import std.range : iota, array; auto v = iota(0,1000000); int sum; MonoTime beg = MonoTime.currTime; for (int i=0; i < 100; i++) sum += cast(int)sum_subranges(v,2).length; MonoTime end = MonoTime.currTime; writeln(end-beg); writeln(sum); return sum; } On Sun, Aug 13, 2017 at 9:13 AM, Daniel Kozak <kozzi11@gmail.com> wrote: > this works ok for me with ldc compiler, gdc does not work on my arch machine so I can not do comparsion to your c++ versin (clang does not work with your c++ code) > > import std.stdio : writeln; > import std.algorithm.comparison: min; > import std.algorithm.iteration: sum; > import core.time: MonoTime, Duration; > > > T[] sum_subranges(T)(T[] input, uint range) > { > import std.array : appender; > auto app = appender!(T[])(); > if (range == 0) > { > return app.data; > } > for (uint i; i < input.length; i=min(i+range, input.length)) > { > app.put(sum(input[i..min(i+range, input.length)])); > } > return app.data; > } > > unittest > { > assert(sum_subranges([1,1,1], 2) == [2, 1]); > assert(sum_subranges([1,1,1,2,3,3], 2) == [2, 3, 6]); > assert(sum_subranges([], 2) == []); > assert(sum_subranges([1], 2) == [1]); > assert(sum_subranges([1], 0) == []); > } > > > int main() > { > import std.range : iota, array; > auto v = iota(0,1000000).array; > int sum; > MonoTime beg = MonoTime.currTime; > for (int i=0; i < 100; i++) > sum += cast(int)sum_subranges(v,2).length; > MonoTime end = MonoTime.currTime; > writeln(end-beg); > writeln(sum); > return sum; > } > > On Sun, Aug 13, 2017 at 9:03 AM, Neia Neutuladh via Digitalmars-d-learn < digitalmars-d-learn@puremagic.com> wrote: > >> On Sunday, 13 August 2017 at 06:09:39 UTC, amfvcg wrote: >> >>> Hi all, >>> I'm solving below task: >>> >> >> Well, for one thing, you are preallocating in C++ code but not in D. >> >> On my machine, your version of the code completes in 3.175 seconds. Changing it a little reduces it to 0.420s: >> >> T[] result = new T[input.length]; >> size_t o = 0; >> for (uint i; i < input.length; i=min(i+range, input.length)) >> { >> result[o] = sum(input[i..min(i+range, input.length)]); >> o++; >> } >> return result[0..o]; >> >> You can also use Appender from std.array. >> > > |
August 13, 2017 Re: D outperformed by C++, what am I doing wrong? | ||||
---|---|---|---|---|
| ||||
Posted in reply to Daniel Kozak | On Sunday, 13 August 2017 at 07:30:32 UTC, Daniel Kozak wrote: > Here is more D idiomatic way: > > import std.stdio : writeln; > import std.algorithm.comparison: min; > import std.algorithm.iteration: sum; > import core.time: MonoTime, Duration; > > > auto sum_subranges(T)(T input, uint range) > { > import std.array : array; > import std.range : chunks, ElementType; > import std.algorithm : map; > > if (range == 0) > { > return ElementType!(T)[].init; > } > return input.chunks(range).map!(sum).array; > } > > unittest > { > assert(sum_subranges([1,1,1], 2) == [2, 1]); > assert(sum_subranges([1,1,1,2,3,3], 2) == [2, 3, 6]); > assert(sum_subranges([], 2) == []); > assert(sum_subranges([1], 2) == [1]); > assert(sum_subranges([1], 0) == []); > } > > > int main() > { > import std.range : iota, array; > auto v = iota(0,1000000); > int sum; > MonoTime beg = MonoTime.currTime; > for (int i=0; i < 100; i++) > sum += cast(int)sum_subranges(v,2).length; > MonoTime end = MonoTime.currTime; > writeln(end-beg); > writeln(sum); > return sum; > } > > On Sun, Aug 13, 2017 at 9:13 AM, Daniel Kozak <kozzi11@gmail.com> wrote: > >> this works ok for me with ldc compiler, gdc does not work on my arch machine so I can not do comparsion to your c++ versin (clang does not work with your c++ code) >> >> import std.stdio : writeln; >> import std.algorithm.comparison: min; >> import std.algorithm.iteration: sum; >> import core.time: MonoTime, Duration; >> >> >> T[] sum_subranges(T)(T[] input, uint range) >> { >> import std.array : appender; >> auto app = appender!(T[])(); >> if (range == 0) >> { >> return app.data; >> } >> for (uint i; i < input.length; i=min(i+range, input.length)) >> { >> app.put(sum(input[i..min(i+range, input.length)])); >> } >> return app.data; >> } >> >> unittest >> { >> assert(sum_subranges([1,1,1], 2) == [2, 1]); >> assert(sum_subranges([1,1,1,2,3,3], 2) == [2, 3, 6]); >> assert(sum_subranges([], 2) == []); >> assert(sum_subranges([1], 2) == [1]); >> assert(sum_subranges([1], 0) == []); >> } >> >> >> int main() >> { >> import std.range : iota, array; >> auto v = iota(0,1000000).array; >> int sum; >> MonoTime beg = MonoTime.currTime; >> for (int i=0; i < 100; i++) >> sum += cast(int)sum_subranges(v,2).length; >> MonoTime end = MonoTime.currTime; >> writeln(end-beg); >> writeln(sum); >> return sum; >> } >> >> On Sun, Aug 13, 2017 at 9:03 AM, Neia Neutuladh via Digitalmars-d-learn < digitalmars-d-learn@puremagic.com> wrote: >> >>> [...] Thank you all for the replies. Good to know the community is alive in d :) Let's settle the playground: D : http://ideone.com/h4fnsD C++: http://ideone.com/X1pyXG Both using GCC under the hood. C++ in 112 ms; D in : - 2.5 sec with original source; - 2.5 sec with Daniel's 1st version; - 5 sec timeout exceeded with Daniel's 2nd version; - 1.8 sec with Neia-like preallocation; So still it's not that neaty. (What's interesting C++ code generates 2KLOC of assembly, and Dlang @ ldc 12KLOC - checked at godbolt). P.S. For C++ version to work under clang, the function which takes (BinaryOp) must go before the other one (my bad). |
August 13, 2017 Re: D outperformed by C++, what am I doing wrong? | ||||
---|---|---|---|---|
| ||||
Posted in reply to amfvcg Attachments:
| my second version on ldc takes 380ms and c++ version on same compiler (clang), takes 350ms, so it seems to be almost same On Sun, Aug 13, 2017 at 9:51 AM, amfvcg via Digitalmars-d-learn < digitalmars-d-learn@puremagic.com> wrote: > On Sunday, 13 August 2017 at 07:30:32 UTC, Daniel Kozak wrote: > >> Here is more D idiomatic way: >> >> import std.stdio : writeln; >> import std.algorithm.comparison: min; >> import std.algorithm.iteration: sum; >> import core.time: MonoTime, Duration; >> >> >> auto sum_subranges(T)(T input, uint range) >> { >> import std.array : array; >> import std.range : chunks, ElementType; >> import std.algorithm : map; >> >> if (range == 0) >> { >> return ElementType!(T)[].init; >> } >> return input.chunks(range).map!(sum).array; >> } >> >> unittest >> { >> assert(sum_subranges([1,1,1], 2) == [2, 1]); >> assert(sum_subranges([1,1,1,2,3,3], 2) == [2, 3, 6]); >> assert(sum_subranges([], 2) == []); >> assert(sum_subranges([1], 2) == [1]); >> assert(sum_subranges([1], 0) == []); >> } >> >> >> int main() >> { >> import std.range : iota, array; >> auto v = iota(0,1000000); >> int sum; >> MonoTime beg = MonoTime.currTime; >> for (int i=0; i < 100; i++) >> sum += cast(int)sum_subranges(v,2).length; >> MonoTime end = MonoTime.currTime; >> writeln(end-beg); >> writeln(sum); >> return sum; >> } >> >> On Sun, Aug 13, 2017 at 9:13 AM, Daniel Kozak <kozzi11@gmail.com> wrote: >> >> this works ok for me with ldc compiler, gdc does not work on my arch >>> machine so I can not do comparsion to your c++ versin (clang does not work >>> with your c++ code) >>> >>> import std.stdio : writeln; >>> import std.algorithm.comparison: min; >>> import std.algorithm.iteration: sum; >>> import core.time: MonoTime, Duration; >>> >>> >>> T[] sum_subranges(T)(T[] input, uint range) >>> { >>> import std.array : appender; >>> auto app = appender!(T[])(); >>> if (range == 0) >>> { >>> return app.data; >>> } >>> for (uint i; i < input.length; i=min(i+range, input.length)) >>> { >>> app.put(sum(input[i..min(i+range, input.length)])); >>> } >>> return app.data; >>> } >>> >>> unittest >>> { >>> assert(sum_subranges([1,1,1], 2) == [2, 1]); >>> assert(sum_subranges([1,1,1,2,3,3], 2) == [2, 3, 6]); >>> assert(sum_subranges([], 2) == []); >>> assert(sum_subranges([1], 2) == [1]); >>> assert(sum_subranges([1], 0) == []); >>> } >>> >>> >>> int main() >>> { >>> import std.range : iota, array; >>> auto v = iota(0,1000000).array; >>> int sum; >>> MonoTime beg = MonoTime.currTime; >>> for (int i=0; i < 100; i++) >>> sum += cast(int)sum_subranges(v,2).length; >>> MonoTime end = MonoTime.currTime; >>> writeln(end-beg); >>> writeln(sum); >>> return sum; >>> } >>> >>> On Sun, Aug 13, 2017 at 9:03 AM, Neia Neutuladh via Digitalmars-d-learn < digitalmars-d-learn@puremagic.com> wrote: >>> >>> [...] >>>> >>> > Thank you all for the replies. Good to know the community is alive in d :) > > Let's settle the playground: > D : http://ideone.com/h4fnsD > C++: http://ideone.com/X1pyXG > > Both using GCC under the hood. > C++ in 112 ms; > D in : > - 2.5 sec with original source; > - 2.5 sec with Daniel's 1st version; > - 5 sec timeout exceeded with Daniel's 2nd version; > - 1.8 sec with Neia-like preallocation; > > So still it's not that neaty. > > (What's interesting C++ code generates 2KLOC of assembly, and Dlang @ ldc 12KLOC - checked at godbolt). > > P.S. For C++ version to work under clang, the function which takes > (BinaryOp) must go before the other one (my bad). > |
August 13, 2017 Re: D outperformed by C++, what am I doing wrong? | ||||
---|---|---|---|---|
| ||||
Posted in reply to Daniel Kozak | On Sunday, 13 August 2017 at 08:00:53 UTC, Daniel Kozak wrote:
> my second version on ldc takes 380ms and c++ version on same compiler (clang), takes 350ms, so it seems to be almost same
>
Ok, on ideone (ldc 1.1.0) it timeouts, on dpaste (ldc 0.12.0) it gets killed.
What version are you using?
Either way, if that'd be the case - that's slick. (and ldc would be the compiler of choice for real use cases).
|
August 13, 2017 Re: D outperformed by C++, what am I doing wrong? | ||||
---|---|---|---|---|
| ||||
Posted in reply to amfvcg | On Sunday, 13 August 2017 at 08:13:56 UTC, amfvcg wrote: > On Sunday, 13 August 2017 at 08:00:53 UTC, Daniel Kozak wrote: >> my second version on ldc takes 380ms and c++ version on same compiler (clang), takes 350ms, so it seems to be almost same >> > > Ok, on ideone (ldc 1.1.0) it timeouts, on dpaste (ldc 0.12.0) it gets killed. > What version are you using? > > Either way, if that'd be the case - that's slick. (and ldc would be the compiler of choice for real use cases). import std.stdio : writeln; import std.algorithm.comparison: min; import std.algorithm.iteration: sum; import core.time: MonoTime, Duration; import std.range; import std.algorithm; auto s1(T)(T input, uint r) { return input.chunks(r).map!sum; } T[] sum_subranges(T)(T[] input, uint range) { T[] result; if (range == 0) { return result; } for (uint i; i < input.length; i=min(i+range, input.length)) { result ~= sum(input[i..min(i+range, input.length)]); } return result; } unittest { assert(sum_subranges([1,1,1], 2) == [2, 1]); assert(sum_subranges([1,1,1,2,3,3], 2) == [2, 3, 6]); assert(sum_subranges([], 2) == []); assert(sum_subranges([1], 2) == [1]); assert(sum_subranges([1], 0) == []); assert(s1([1,1,1], 2).array == [2, 1]); assert(s1([1,1,1,2,3,3], 2).array == [2, 3, 6]); } int main() { int sum; MonoTime beg0 = MonoTime.currTime; for (int i=0; i < 100; i++) sum += s1(iota(1000000),2).length; MonoTime end0 = MonoTime.currTime; writeln(end0-beg0); writeln(sum); sum = 0; int[1000000] v; for (int i=0; i < 1000000; ++i) v[i] = i; MonoTime beg = MonoTime.currTime; for (int i=0; i < 100; i++) sum += cast(int)sum_subranges(v,2).length; MonoTime end = MonoTime.currTime; writeln(end-beg); writeln(sum); return sum; } Gives me 5 μs and 2 hnsecs 50000000 3 secs, 228 ms, 837 μs, and 4 hnsecs 50000000 |
Copyright © 1999-2021 by the D Language Foundation