diff options
Diffstat (limited to 'cli')
28 files changed, 482 insertions, 495 deletions
diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 6005a634f..6ed812b72 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -74,9 +74,9 @@ import_map = "=0.9.0" jsonc-parser = { version = "=0.19.0", features = ["serde"] } libc = "=0.2.121" log = { version = "=0.4.14", features = ["serde"] } +mitata = '=0.0.6' node_resolver = "=0.1.1" notify = "=5.0.0-pre.14" -num-format = "=0.4.0" once_cell = "=1.10.0" os_pipe = "=1.0.1" percent-encoding = "=2.1.0" diff --git a/cli/dts/lib.deno.unstable.d.ts b/cli/dts/lib.deno.unstable.d.ts index 564a7a073..0ad070469 100644 --- a/cli/dts/lib.deno.unstable.d.ts +++ b/cli/dts/lib.deno.unstable.d.ts @@ -8,14 +8,12 @@ declare namespace Deno { fn: () => void | Promise<void>; name: string; ignore?: boolean; - /** Specify number of iterations benchmark should perform. Defaults to 1000. */ - n?: number; - /** Specify number of warmup iterations benchmark should perform. Defaults - * to 1000. - * - * These iterations are not measured. It allows the code to be optimized - * by JIT compiler before measuring its performance. */ - warmup?: number; + /** Group name for the benchmark. + * Grouped benchmarks produce a time summary */ + group?: string; + /** Benchmark should be used as the baseline for other benchmarks + * If there are multiple baselines in a group, the first one is used as the baseline */ + baseline?: boolean; /** If at least one bench has `only` set to true, only run benches that have * `only` set to true and fail the bench suite. */ only?: boolean; diff --git a/cli/tests/integration/bench_tests.rs b/cli/tests/integration/bench_tests.rs index e889a8224..928f08bea 100644 --- a/cli/tests/integration/bench_tests.rs +++ b/cli/tests/integration/bench_tests.rs @@ -111,6 +111,12 @@ itest!(finally_timeout { output: "bench/finally_timeout.out", }); +itest!(group_baseline { + args: "bench --unstable bench/group_baseline.ts", + exit_code: 0, + output: "bench/group_baseline.out", +}); + itest!(unresolved_promise { args: "bench --unstable bench/unresolved_promise.ts", exit_code: 1, diff --git a/cli/tests/testdata/bench/allow_all.out b/cli/tests/testdata/bench/allow_all.out index eb7d2005c..c4a60fcf5 100644 --- a/cli/tests/testdata/bench/allow_all.out +++ b/cli/tests/testdata/bench/allow_all.out @@ -1,18 +1,21 @@ -[WILDCARD] -running 14 benches from [WILDCARD] -bench read false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench read true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench write false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench write true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench net false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench net true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench env false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench env true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench run false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench run true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench ffi false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench ffi true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench hrtime false ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench hrtime true ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] +Check [WILDCARD]/bench/allow_all.ts +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) -bench result: ok. 14 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD] +[WILDCARD]/bench/allow_all.ts +benchmark time (avg) (min … max) p75 p99 p995 +---------------------------------------------------- ----------------------------- +read false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +read true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +write false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +write true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +net false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +net true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +env false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +env true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +run false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +run true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +ffi false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +ffi true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +hrtime false [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +hrtime true [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] diff --git a/cli/tests/testdata/bench/allow_none.out b/cli/tests/testdata/bench/allow_none.out index 0eb2ba5a3..3fd649ebe 100644 --- a/cli/tests/testdata/bench/allow_none.out +++ b/cli/tests/testdata/bench/allow_none.out @@ -1,51 +1,22 @@ -[WILDCARD] -running 7 benches from [WILDCARD] -bench read ... 1000 iterations FAILED [WILDCARD] -bench write ... 1000 iterations FAILED [WILDCARD] -bench net ... 1000 iterations FAILED [WILDCARD] -bench env ... 1000 iterations FAILED [WILDCARD] -bench run ... 1000 iterations FAILED [WILDCARD] -bench ffi ... 1000 iterations FAILED [WILDCARD] -bench hrtime ... 1000 iterations FAILED [WILDCARD] - -failures: +Check [WILDCARD]/bench/allow_none.ts +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) -read -PermissionDenied: Can't escalate parent thread permissions +[WILDCARD]/bench/allow_none.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +read error: PermissionDenied: Can't escalate parent thread permissions [WILDCARD] - -write -PermissionDenied: Can't escalate parent thread permissions +write error: PermissionDenied: Can't escalate parent thread permissions [WILDCARD] - -net -PermissionDenied: Can't escalate parent thread permissions +net error: PermissionDenied: Can't escalate parent thread permissions [WILDCARD] - -env -PermissionDenied: Can't escalate parent thread permissions +env error: PermissionDenied: Can't escalate parent thread permissions [WILDCARD] - -run -PermissionDenied: Can't escalate parent thread permissions +run error: PermissionDenied: Can't escalate parent thread permissions [WILDCARD] - -ffi -PermissionDenied: Can't escalate parent thread permissions +ffi error: PermissionDenied: Can't escalate parent thread permissions [WILDCARD] - -hrtime -PermissionDenied: Can't escalate parent thread permissions +hrtime error: PermissionDenied: Can't escalate parent thread permissions [WILDCARD] - -failures: - - read - write - net - env - run - ffi - hrtime - -bench result: FAILED. 0 passed; 7 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD] +error: Bench failed diff --git a/cli/tests/testdata/bench/clear_timeout.out b/cli/tests/testdata/bench/clear_timeout.out index 10aa47d75..b66c49718 100644 --- a/cli/tests/testdata/bench/clear_timeout.out +++ b/cli/tests/testdata/bench/clear_timeout.out @@ -1,8 +1,10 @@ Check [WILDCARD]/bench/clear_timeout.ts -running 3 benches from [WILDCARD]/bench/clear_timeout.ts -bench bench1 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench2 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench3 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) - -bench result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/clear_timeout.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +bench1 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench2 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench3 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] diff --git a/cli/tests/testdata/bench/collect.out b/cli/tests/testdata/bench/collect.out index 570b2e4f2..18b27f8b9 100644 --- a/cli/tests/testdata/bench/collect.out +++ b/cli/tests/testdata/bench/collect.out @@ -1,5 +1,7 @@ Check [WILDCARD]/bench/collect/bench.ts -running 0 benches from [WILDCARD]/bench/collect/bench.ts - -bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/collect/bench.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- diff --git a/cli/tests/testdata/bench/exit_sanitizer.out b/cli/tests/testdata/bench/exit_sanitizer.out index 23ce871fc..ba063642d 100644 --- a/cli/tests/testdata/bench/exit_sanitizer.out +++ b/cli/tests/testdata/bench/exit_sanitizer.out @@ -1,35 +1,14 @@ Check [WILDCARD]/bench/exit_sanitizer.ts -running 3 benches from [WILDCARD]/bench/exit_sanitizer.ts -bench exit(0) ... 1000 iterations FAILED ([WILDCARD]) -bench exit(1) ... 1000 iterations FAILED ([WILDCARD]) -bench exit(2) ... 1000 iterations FAILED ([WILDCARD]) - -failures: - -exit(0) -AssertionError: Bench attempted to exit with exit code: 0 - at [WILDCARD] - at [WILDCARD]/bench/exit_sanitizer.ts:2:8 - at [WILDCARD] - -exit(1) -AssertionError: Bench attempted to exit with exit code: 1 - at [WILDCARD] - at [WILDCARD]/bench/exit_sanitizer.ts:6:8 - at [WILDCARD] - -exit(2) -AssertionError: Bench attempted to exit with exit code: 2 - at [WILDCARD] - at [WILDCARD]/bench/exit_sanitizer.ts:10:8 - at [WILDCARD] - -failures: - - exit(0) - exit(1) - exit(2) - -bench result: FAILED. 0 passed; 3 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) - +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) + +[WILDCARD]/bench/exit_sanitizer.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +exit(0) error: AssertionError: Bench attempted to exit with exit code: 0 +[WILDCARD] +exit(1) error: AssertionError: Bench attempted to exit with exit code: 1 +[WILDCARD] +exit(2) error: AssertionError: Bench attempted to exit with exit code: 2 +[WILDCARD] error: Bench failed diff --git a/cli/tests/testdata/bench/fail.out b/cli/tests/testdata/bench/fail.out index 9779a27fe..cc92a08b4 100644 --- a/cli/tests/testdata/bench/fail.out +++ b/cli/tests/testdata/bench/fail.out @@ -1,81 +1,28 @@ Check [WILDCARD]/bench/fail.ts -running 10 benches from [WILDCARD]/bench/fail.ts -bench bench0 ... 1000 iterations FAILED ([WILDCARD]) -bench bench1 ... 1000 iterations FAILED ([WILDCARD]) -bench bench2 ... 1000 iterations FAILED ([WILDCARD]) -bench bench3 ... 1000 iterations FAILED ([WILDCARD]) -bench bench4 ... 1000 iterations FAILED ([WILDCARD]) -bench bench5 ... 1000 iterations FAILED ([WILDCARD]) -bench bench6 ... 1000 iterations FAILED ([WILDCARD]) -bench bench7 ... 1000 iterations FAILED ([WILDCARD]) -bench bench8 ... 1000 iterations FAILED ([WILDCARD]) -bench bench9 ... 1000 iterations FAILED ([WILDCARD]) - -failures: - -bench0 -Error - at [WILDCARD]/bench/fail.ts:2:9 - at [WILDCARD] - -bench1 -Error - at [WILDCARD]/bench/fail.ts:5:9 - at [WILDCARD] - -bench2 -Error - at [WILDCARD]/bench/fail.ts:8:9 - at [WILDCARD] - -bench3 -Error - at [WILDCARD]/bench/fail.ts:11:9 - at [WILDCARD] - -bench4 -Error - at [WILDCARD]/bench/fail.ts:14:9 - at [WILDCARD] - -bench5 -Error - at [WILDCARD]/bench/fail.ts:17:9 - at [WILDCARD] - -bench6 -Error - at [WILDCARD]/bench/fail.ts:20:9 - at [WILDCARD] - -bench7 -Error - at [WILDCARD]/bench/fail.ts:23:9 - at [WILDCARD] - -bench8 -Error - at [WILDCARD]/bench/fail.ts:26:9 - at [WILDCARD] - -bench9 -Error - at [WILDCARD]/bench/fail.ts:29:9 - at [WILDCARD] - -failures: - - bench0 - bench1 - bench2 - bench3 - bench4 - bench5 - bench6 - bench7 - bench8 - bench9 - -bench result: FAILED. 0 passed; 10 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) - +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) + +[WILDCARD]/bench/fail.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +bench0 error: Error +[WILDCARD] +bench1 error: Error +[WILDCARD] +bench2 error: Error +[WILDCARD] +bench3 error: Error +[WILDCARD] +bench4 error: Error +[WILDCARD] +bench5 error: Error +[WILDCARD] +bench6 error: Error +[WILDCARD] +bench7 error: Error +[WILDCARD] +bench8 error: Error +[WILDCARD] +bench9 error: Error +[WILDCARD] error: Bench failed diff --git a/cli/tests/testdata/bench/filter.out b/cli/tests/testdata/bench/filter.out index 8657e56cc..3356ded99 100644 --- a/cli/tests/testdata/bench/filter.out +++ b/cli/tests/testdata/bench/filter.out @@ -1,12 +1,20 @@ Check [WILDCARD]/bench/filter/a_bench.ts Check [WILDCARD]/bench/filter/b_bench.ts Check [WILDCARD]/bench/filter/c_bench.ts -running 1 bench from [WILDCARD]/bench/filter/a_bench.ts -bench foo ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -running 1 bench from [WILDCARD]/bench/filter/b_bench.ts -bench foo ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -running 1 bench from [WILDCARD]/bench/filter/c_bench.ts -bench foo ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) -bench result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 6 filtered out ([WILDCARD]) +[WILDCARD]/bench/filter/a_bench.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +foo [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +[WILDCARD]/bench/filter/b_bench.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +foo [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] + +[WILDCARD]/bench/filter/c_bench.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +foo [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] diff --git a/cli/tests/testdata/bench/finally_timeout.out b/cli/tests/testdata/bench/finally_timeout.out index dfae4607d..ec91900aa 100644 --- a/cli/tests/testdata/bench/finally_timeout.out +++ b/cli/tests/testdata/bench/finally_timeout.out @@ -1,19 +1,11 @@ Check [WILDCARD]/bench/finally_timeout.ts -running 2 benches from [WILDCARD]/bench/finally_timeout.ts -bench error ... 1000 iterations FAILED ([WILDCARD]) -bench success ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) - -failures: - -error -Error: fail - at [WILDCARD]/bench/finally_timeout.ts:4:11 - at [WILDCARD] - -failures: - - error - -bench result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) - +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) + +[WILDCARD]/bench/finally_timeout.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +error error: Error: fail +[WILDCARD] +success [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] error: Bench failed diff --git a/cli/tests/testdata/bench/group_baseline.out b/cli/tests/testdata/bench/group_baseline.out new file mode 100644 index 000000000..da7157b5b --- /dev/null +++ b/cli/tests/testdata/bench/group_baseline.out @@ -0,0 +1,18 @@ +[WILDCARD]/bench/group_baseline.ts +benchmark time (avg) (min … max) p75 p99 p995 +---------------------------------------------------- ----------------------------- +noop [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +noop2 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] + +summary + noo[WILDCARD] + [WILDCARD]x times [WILDCARD] than noo[WILDCARD] + +noop3 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +parse url 2x [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +parse url 6x [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] + +summary + parse url 2x + [WILDCARD]x times slower than noop3 + [WILDCARD]x times faster than parse url 6x diff --git a/cli/tests/testdata/bench/group_baseline.ts b/cli/tests/testdata/bench/group_baseline.ts new file mode 100644 index 000000000..86c545116 --- /dev/null +++ b/cli/tests/testdata/bench/group_baseline.ts @@ -0,0 +1,18 @@ +Deno.bench("noop", () => {}); +Deno.bench("noop2", { baseline: true }, () => {}); + +Deno.bench("noop3", { group: "url" }, () => {}); + +Deno.bench("parse url 2x", { group: "url", baseline: true }, () => { + new URL("https://deno.land/std/http/server.ts"); + new URL("https://deno.land/std/http/server.ts"); +}); + +Deno.bench("parse url 6x", { group: "url" }, () => { + new URL("https://deno.land/std/http/server.ts"); + new URL("https://deno.land/std/http/server.ts"); + new URL("https://deno.land/std/http/server.ts"); + new URL("https://deno.land/std/http/server.ts"); + new URL("https://deno.land/std/http/server.ts"); + new URL("https://deno.land/std/http/server.ts"); +}); diff --git a/cli/tests/testdata/bench/ignore.out b/cli/tests/testdata/bench/ignore.out index cda77ea52..e2a35621d 100644 --- a/cli/tests/testdata/bench/ignore.out +++ b/cli/tests/testdata/bench/ignore.out @@ -1,15 +1,7 @@ Check [WILDCARD]/bench/ignore.ts -running 10 benches from [WILDCARD]/bench/ignore.ts -bench bench0 ... 1000 iterations ignored ([WILDCARD]) -bench bench1 ... 1000 iterations ignored ([WILDCARD]) -bench bench2 ... 1000 iterations ignored ([WILDCARD]) -bench bench3 ... 1000 iterations ignored ([WILDCARD]) -bench bench4 ... 1000 iterations ignored ([WILDCARD]) -bench bench5 ... 1000 iterations ignored ([WILDCARD]) -bench bench6 ... 1000 iterations ignored ([WILDCARD]) -bench bench7 ... 1000 iterations ignored ([WILDCARD]) -bench bench8 ... 1000 iterations ignored ([WILDCARD]) -bench bench9 ... 1000 iterations ignored ([WILDCARD]) - -bench result: ok. 0 passed; 0 failed; 10 ignored; 0 measured; 0 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/ignore.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- diff --git a/cli/tests/testdata/bench/ignore_permissions.out b/cli/tests/testdata/bench/ignore_permissions.out index c55ccaa21..a518c572c 100644 --- a/cli/tests/testdata/bench/ignore_permissions.out +++ b/cli/tests/testdata/bench/ignore_permissions.out @@ -1,6 +1,7 @@ Check [WILDCARD]/bench/ignore_permissions.ts -running 1 bench from [WILDCARD]/bench/ignore_permissions.ts -bench ignore ... 1000 iterations ignored ([WILDCARD]) - -bench result: ok. 0 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/ignore_permissions.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- diff --git a/cli/tests/testdata/bench/interval.out b/cli/tests/testdata/bench/interval.out index dec5549ef..882821837 100644 --- a/cli/tests/testdata/bench/interval.out +++ b/cli/tests/testdata/bench/interval.out @@ -1,5 +1,7 @@ Check [WILDCARD]/bench/interval.ts -running 0 benches from [WILDCARD]/bench/interval.ts - -bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/interval.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- diff --git a/cli/tests/testdata/bench/load_unload.out b/cli/tests/testdata/bench/load_unload.out index 9b73341d4..6537f47fa 100644 --- a/cli/tests/testdata/bench/load_unload.out +++ b/cli/tests/testdata/bench/load_unload.out @@ -1,6 +1,8 @@ Check [WILDCARD]/bench/load_unload.ts -running 1 bench from [WILDCARD]/bench/load_unload.ts -bench bench ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) - -bench result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/load_unload.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +bench [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] diff --git a/cli/tests/testdata/bench/meta.out b/cli/tests/testdata/bench/meta.out index e62172eb3..85b898e7e 100644 --- a/cli/tests/testdata/bench/meta.out +++ b/cli/tests/testdata/bench/meta.out @@ -1,7 +1,9 @@ Check [WILDCARD]/bench/meta.ts import.meta.main: false import.meta.url: [WILDCARD]/bench/meta.ts -running 0 benches from [WILDCARD]/bench/meta.ts - -bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/meta.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- diff --git a/cli/tests/testdata/bench/no_check.out b/cli/tests/testdata/bench/no_check.out index ceb8b22fc..1f90836eb 100644 --- a/cli/tests/testdata/bench/no_check.out +++ b/cli/tests/testdata/bench/no_check.out @@ -1,6 +1,3 @@ - -bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) - error: Uncaught TypeError: Cannot read properties of undefined (reading 'fn') Deno.bench(); ^ diff --git a/cli/tests/testdata/bench/no_prompt_by_default.out b/cli/tests/testdata/bench/no_prompt_by_default.out index d47198d85..a73971a9b 100644 --- a/cli/tests/testdata/bench/no_prompt_by_default.out +++ b/cli/tests/testdata/bench/no_prompt_by_default.out @@ -1,16 +1,9 @@ -running 1 bench from [WILDCARD]no_prompt_by_default.ts -bench no prompt ... 1000 iterations FAILED ([WILDCARD]ms) +[WILDCARD]cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) -failures: - -no prompt -PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag +[WILDCARD]/bench/no_prompt_by_default.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +no prompt error: PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag [WILDCARD] - -failures: - - no prompt - -bench result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]ms) - error: Bench failed diff --git a/cli/tests/testdata/bench/no_prompt_with_denied_perms.out b/cli/tests/testdata/bench/no_prompt_with_denied_perms.out index efe9fa6dc..fe8f0c29c 100644 --- a/cli/tests/testdata/bench/no_prompt_with_denied_perms.out +++ b/cli/tests/testdata/bench/no_prompt_with_denied_perms.out @@ -1,16 +1,9 @@ -running 1 bench from [WILDCARD]/no_prompt_with_denied_perms.ts -bench no prompt ... 1000 iterations FAILED ([WILDCARD]ms) +[WILDCARD]cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) -failures: - -no prompt -PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag +[WILDCARD]/bench/no_prompt_with_denied_perms.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +no prompt error: PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag [WILDCARD] - -failures: - - no prompt - -bench result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]ms) - error: Bench failed diff --git a/cli/tests/testdata/bench/only.out b/cli/tests/testdata/bench/only.out index 3c9855560..b874c9a96 100644 --- a/cli/tests/testdata/bench/only.out +++ b/cli/tests/testdata/bench/only.out @@ -1,7 +1,9 @@ Check [WILDCARD]/bench/only.ts -running 1 bench from [WILDCARD]/bench/only.ts -bench only ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) - -bench result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/only.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +only [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] error: Bench failed because the "only" option was used diff --git a/cli/tests/testdata/bench/overloads.out b/cli/tests/testdata/bench/overloads.out index a736b2e98..347a8392d 100644 --- a/cli/tests/testdata/bench/overloads.out +++ b/cli/tests/testdata/bench/overloads.out @@ -1,11 +1,12 @@ Check [WILDCARD]/bench/overloads.ts -running 6 benches from [WILDCARD]/bench/overloads.ts -bench bench0 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench1 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench2 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench3 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench4 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench5 ... 1000 iterations ignored ([WILDCARD]) - -bench result: ok. 5 passed; 0 failed; 1 ignored; 0 measured; 0 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/overloads.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +bench0 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench1 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench2 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench3 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench4 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] diff --git a/cli/tests/testdata/bench/pass.out b/cli/tests/testdata/bench/pass.out index 99320e666..9090b5535 100644 --- a/cli/tests/testdata/bench/pass.out +++ b/cli/tests/testdata/bench/pass.out @@ -1,15 +1,17 @@ Check [WILDCARD]/bench/pass.ts -running 10 benches from [WILDCARD]/bench/pass.ts -bench bench0 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench1 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench2 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench3 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench4 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench5 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench6 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench7 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench8 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) -bench bench9 ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok ([WILDCARD]) - -bench result: ok. 10 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/pass.ts +benchmark time (avg) (min … max) p75 p99 p995 +------------------------------------------------- ----------------------------- +bench0 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench1 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench2 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench3 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench4 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench5 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench6 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench7 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench8 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +bench9 [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] diff --git a/cli/tests/testdata/bench/quiet.out b/cli/tests/testdata/bench/quiet.out index e214980e6..aad853189 100644 --- a/cli/tests/testdata/bench/quiet.out +++ b/cli/tests/testdata/bench/quiet.out @@ -1,8 +1,10 @@ -running 4 benches from [WILDCARD]/bench/quiet.ts -bench console.log ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench console.error ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench console.info ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] -bench console.warn ... 1000 iterations [WILDCARD] ns/iter ([WILDCARD]..[WILDCARD] ns/iter) ok [WILDCARD] - -bench result: ok. 4 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) +cpu: [WILDCARD] +runtime: deno [WILDCARD] ([WILDCARD]) +[WILDCARD]/bench/quiet.ts +benchmark time (avg) (min … max) p75 p99 p995 +----------------------------------------------------- ----------------------------- +console.log [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +console.error [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +console.info [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] +console.warn [WILDCARD] [WILDCARD]/iter[WILDCARD]([WILDCARD] … [WILDCARD]) [WILDCARD] diff --git a/cli/tests/testdata/bench/unhandled_rejection.out b/cli/tests/testdata/bench/unhandled_rejection.out index 0e2b03c3e..98c9e68b5 100644 --- a/cli/tests/testdata/bench/unhandled_rejection.out +++ b/cli/tests/testdata/bench/unhandled_rejection.out @@ -1,7 +1,4 @@ Check [WILDCARD]/bench/unhandled_rejection.ts - -bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) - error: Uncaught (in promise) Error: rejection reject(new Error("rejection")); ^ diff --git a/cli/tests/testdata/bench/unresolved_promise.out b/cli/tests/testdata/bench/unresolved_promise.out index b3c3d65f9..d544d77e7 100644 --- a/cli/tests/testdata/bench/unresolved_promise.out +++ b/cli/tests/testdata/bench/unresolved_promise.out @@ -1,5 +1,2 @@ Check [WILDCARD]/bench/unresolved_promise.ts - -bench result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) - error: Module evaluation is still pending but there are no pending ops or dynamic imports. This situation is often caused by unresolved promises. diff --git a/cli/tools/bench.rs b/cli/tools/bench.rs index 2d35c27c5..0947c2647 100644 --- a/cli/tools/bench.rs +++ b/cli/tools/bench.rs @@ -5,7 +5,6 @@ use crate::cache::CacherLoader; use crate::colors; use crate::compat; use crate::create_main_worker; -use crate::display; use crate::emit; use crate::file_watcher; use crate::file_watcher::ResolutionResult; @@ -35,15 +34,11 @@ use deno_graph::ModuleKind; use deno_runtime::permissions::Permissions; use deno_runtime::tokio_util::run_basic; use log::Level; -use num_format::Locale; -use num_format::ToFormattedString; use serde::Deserialize; +use serde::Serialize; use std::collections::HashSet; -use std::io::Write; use std::path::PathBuf; use std::sync::Arc; -use std::time::Duration; -use std::time::Instant; use tokio::sync::mpsc::unbounded_channel; use tokio::sync::mpsc::UnboundedSender; @@ -53,14 +48,6 @@ struct BenchSpecifierOptions { filter: Option<String>, } -#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)] -#[serde(rename_all = "camelCase")] -pub struct BenchDescription { - pub origin: String, - pub name: String, - pub iterations: u64, -} - #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] pub enum BenchOutput { @@ -69,198 +56,294 @@ pub enum BenchOutput { #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] -pub enum BenchResult { - Ok, - Ignored, - Failed(String), -} - -#[derive(Debug, Clone, PartialEq, Deserialize)] -#[serde(rename_all = "camelCase")] pub struct BenchPlan { - pub origin: String, pub total: usize, - pub filtered_out: usize, + pub origin: String, pub used_only: bool, + pub names: Vec<String>, } #[derive(Debug, Clone, Deserialize)] #[serde(rename_all = "camelCase")] pub enum BenchEvent { Plan(BenchPlan), - Wait(BenchDescription), Output(BenchOutput), - IterationTime(u64), - Result(BenchDescription, BenchResult, u64), + Wait(BenchMetadata), + Result(String, BenchResult), } -#[derive(Debug, Clone)] -pub struct BenchMeasures { - pub iterations: u64, - pub current_start: Instant, - pub measures: Vec<u128>, +#[derive(Debug, Clone, PartialEq, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum BenchResult { + Ok(BenchMeasurement), + Failed(BenchFailure), } -#[derive(Debug, Clone)] -pub struct BenchSummary { +#[derive(Debug, Clone, Serialize)] +pub struct BenchReport { pub total: usize, - pub passed: usize, pub failed: usize, - pub ignored: usize, - pub filtered_out: usize, - pub measured: usize, - pub measures: Vec<BenchMeasures>, - pub current_bench: BenchMeasures, - pub failures: Vec<(BenchDescription, String)>, + pub failures: Vec<BenchFailure>, + pub measurements: Vec<BenchMeasurement>, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)] +pub struct BenchMetadata { + pub name: String, + pub origin: String, + pub baseline: bool, + pub group: Option<String>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BenchMeasurement { + pub name: String, + pub baseline: bool, + pub stats: BenchStats, + pub group: Option<String>, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BenchFailure { + pub name: String, + pub error: String, + pub baseline: bool, + pub group: Option<String>, } -impl BenchSummary { +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct BenchStats { + pub n: u64, + pub min: f64, + pub max: f64, + pub avg: f64, + pub p75: f64, + pub p99: f64, + pub p995: f64, + pub p999: f64, +} + +impl BenchReport { pub fn new() -> Self { Self { total: 0, - passed: 0, failed: 0, - ignored: 0, - filtered_out: 0, - measured: 0, - measures: Vec::new(), - current_bench: BenchMeasures { - iterations: 0, - current_start: Instant::now(), - measures: vec![], - }, failures: Vec::new(), + measurements: Vec::new(), } } +} - fn has_failed(&self) -> bool { - self.failed > 0 || !self.failures.is_empty() - } - - fn has_pending(&self) -> bool { - self.total - self.passed - self.failed - self.ignored > 0 - } +fn create_reporter(show_output: bool) -> Box<dyn BenchReporter + Send> { + Box::new(ConsoleReporter::new(show_output)) } pub trait BenchReporter { + fn report_group_summary(&mut self); fn report_plan(&mut self, plan: &BenchPlan); - fn report_wait(&mut self, description: &BenchDescription); + fn report_end(&mut self, report: &BenchReport); + fn report_wait(&mut self, wait: &BenchMetadata); fn report_output(&mut self, output: &BenchOutput); - fn report_result( - &mut self, - description: &BenchDescription, - result: &BenchResult, - elapsed: u64, - current_bench: &BenchMeasures, - ); - fn report_summary(&mut self, summary: &BenchSummary, elapsed: &Duration); + fn report_result(&mut self, result: &BenchResult); } -struct PrettyBenchReporter { - echo_output: bool, +struct ConsoleReporter { + name: String, + show_output: bool, + has_ungrouped: bool, + group: Option<String>, + baseline: Option<BenchMeasurement>, + group_measurements: Vec<BenchMeasurement>, + options: Option<mitata::reporter::Options>, } -impl PrettyBenchReporter { - fn new(echo_output: bool) -> Self { - Self { echo_output } - } - - fn force_report_wait(&mut self, description: &BenchDescription) { - print!( - "bench {} ... {} iterations ", - description.name, description.iterations - ); - // flush for faster feedback when line buffered - std::io::stdout().flush().unwrap(); +impl ConsoleReporter { + fn new(show_output: bool) -> Self { + Self { + show_output, + group: None, + options: None, + baseline: None, + name: String::new(), + has_ungrouped: false, + group_measurements: Vec::new(), + } } } -impl BenchReporter for PrettyBenchReporter { +impl BenchReporter for ConsoleReporter { + #[cold] fn report_plan(&mut self, plan: &BenchPlan) { - let inflection = if plan.total == 1 { "bench" } else { "benches" }; - println!("running {} {} from {}", plan.total, inflection, plan.origin); + use std::sync::atomic::AtomicBool; + use std::sync::atomic::Ordering; + static FIRST_PLAN: AtomicBool = AtomicBool::new(true); + + self.options = Some(mitata::reporter::Options::new( + &plan.names.iter().map(|x| x.as_str()).collect::<Vec<&str>>(), + )); + + let options = self.options.as_mut().unwrap(); + + options.percentiles = true; + options.colors = colors::use_color(); + + if FIRST_PLAN + .compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst) + .is_ok() + { + println!("{}", colors::gray(format!("cpu: {}", mitata::cpu::name()))); + println!( + "{}\n", + colors::gray(format!( + "runtime: deno {} ({})", + crate::version::deno(), + env!("TARGET") + )) + ); + } else { + println!(); + } + + println!( + "{}\n{}\n{}", + colors::gray(&plan.origin), + mitata::reporter::header(options), + mitata::reporter::br(options) + ); } - fn report_wait(&mut self, description: &BenchDescription) { - self.force_report_wait(description); + fn report_wait(&mut self, wait: &BenchMetadata) { + self.name = wait.name.clone(); + + match &wait.group { + None => { + self.has_ungrouped = true; + } + + Some(group) => { + if self.group.is_none() + && self.has_ungrouped + && self.group_measurements.is_empty() + { + println!(); + } + + if None == self.group || group != self.group.as_ref().unwrap() { + self.report_group_summary(); + } + + if (self.group.is_none() && self.has_ungrouped) + || (self.group.is_some() && self.group_measurements.is_empty()) + { + println!(); + } + + self.group = Some(group.clone()); + } + } } fn report_output(&mut self, output: &BenchOutput) { - if self.echo_output { + if self.show_output { match output { - BenchOutput::Console(line) => print!("{}", line), + BenchOutput::Console(line) => { + print!("{} {}", colors::gray(format!("{}:", self.name)), line) + } } } } - fn report_result( - &mut self, - _description: &BenchDescription, - result: &BenchResult, - elapsed: u64, - current_bench: &BenchMeasures, - ) { - let status = match result { - BenchResult::Ok => { - let ns_op = current_bench.measures.iter().sum::<u128>() - / current_bench.iterations as u128; - let min_op = current_bench.measures.iter().min().unwrap_or(&0); - let max_op = current_bench.measures.iter().max().unwrap_or(&0); - format!( - "{} ns/iter ({}..{} ns/iter) {}", - ns_op.to_formatted_string(&Locale::en), - min_op.to_formatted_string(&Locale::en), - max_op.to_formatted_string(&Locale::en), - colors::green("ok") - ) - } - BenchResult::Ignored => colors::yellow("ignored").to_string(), - BenchResult::Failed(_) => colors::red("FAILED").to_string(), - }; + fn report_result(&mut self, result: &BenchResult) { + let options = self.options.as_ref().unwrap(); - println!( - "{} {}", - status, - colors::gray(format!("({})", display::human_elapsed(elapsed.into()))) - ); - } + match result { + BenchResult::Ok(bench) => { + let mut bench = bench.to_owned(); + + if bench.baseline && self.baseline.is_none() { + self.baseline = Some(bench.clone()); + } else { + bench.baseline = false; + } - fn report_summary(&mut self, summary: &BenchSummary, elapsed: &Duration) { - if !summary.failures.is_empty() { - println!("\nfailures:\n"); - for (description, error) in &summary.failures { - println!("{}", description.name); - println!("{}", error); - println!(); + self.group_measurements.push(bench.clone()); + + println!( + "{}", + mitata::reporter::benchmark( + &bench.name, + &mitata::reporter::BenchmarkStats { + avg: bench.stats.avg, + min: bench.stats.min, + max: bench.stats.max, + p75: bench.stats.p75, + p99: bench.stats.p99, + p995: bench.stats.p995, + }, + options + ) + ); } - println!("failures:\n"); - for (description, _) in &summary.failures { - println!("\t{}", description.name); + BenchResult::Failed(failure) => { + println!( + "{}", + mitata::reporter::benchmark_error( + &failure.name, + &mitata::reporter::Error { + stack: None, + message: failure.error.clone(), + }, + options + ) + ) } - } + }; + } - let status = if summary.has_failed() || summary.has_pending() { - colors::red("FAILED").to_string() - } else { - colors::green("ok").to_string() + fn report_group_summary(&mut self) { + let options = match self.options.as_ref() { + None => return, + Some(options) => options, }; - println!( - "\nbench result: {}. {} passed; {} failed; {} ignored; {} measured; {} filtered out {}\n", - status, - summary.passed, - summary.failed, - summary.ignored, - summary.measured, - summary.filtered_out, - colors::gray(format!("({})", display::human_elapsed(elapsed.as_millis()))), - ); + if 2 <= self.group_measurements.len() + && (self.group.is_some() + || (self.group.is_none() && self.baseline.is_some())) + { + println!( + "\n{}", + mitata::reporter::summary( + &self + .group_measurements + .iter() + .map(|b| mitata::reporter::GroupBenchmark { + name: b.name.clone(), + baseline: b.baseline, + group: b.group.as_deref().unwrap_or("").to_owned(), + + stats: mitata::reporter::BenchmarkStats { + avg: b.stats.avg, + min: b.stats.min, + max: b.stats.max, + p75: b.stats.p75, + p99: b.stats.p99, + p995: b.stats.p995, + }, + }) + .collect::<Vec<mitata::reporter::GroupBenchmark>>(), + options + ) + ); + } + + self.baseline = None; + self.group_measurements.clear(); } -} -fn create_reporter(echo_output: bool) -> Box<dyn BenchReporter + Send> { - Box::new(PrettyBenchReporter::new(echo_output)) + fn report_end(&mut self, _: &BenchReport) { + self.report_group_summary(); + } } /// Type check a collection of module and document specifiers. @@ -367,20 +450,16 @@ async fn bench_specifiers( .buffer_unordered(1) .collect::<Vec<Result<Result<(), AnyError>, tokio::task::JoinError>>>(); - let mut reporter = create_reporter(log_level != Some(Level::Error)); - let handler = { tokio::task::spawn(async move { - let earlier = Instant::now(); - let mut summary = BenchSummary::new(); let mut used_only = false; + let mut report = BenchReport::new(); + let mut reporter = create_reporter(log_level != Some(Level::Error)); while let Some(event) = receiver.recv().await { match event { BenchEvent::Plan(plan) => { - summary.total += plan.total; - summary.filtered_out += plan.filtered_out; - + report.total += plan.total; if plan.used_only { used_only = true; } @@ -388,51 +467,32 @@ async fn bench_specifiers( reporter.report_plan(&plan); } - BenchEvent::Wait(description) => { - reporter.report_wait(&description); - summary.current_bench = BenchMeasures { - iterations: description.iterations, - current_start: Instant::now(), - measures: Vec::with_capacity( - description.iterations.try_into().unwrap(), - ), - }; + BenchEvent::Wait(metadata) => { + reporter.report_wait(&metadata); } BenchEvent::Output(output) => { reporter.report_output(&output); } - BenchEvent::IterationTime(iter_time) => { - summary.current_bench.measures.push(iter_time.into()) - } - - BenchEvent::Result(description, result, elapsed) => { + BenchEvent::Result(_origin, result) => { match &result { - BenchResult::Ok => { - summary.passed += 1; - } - BenchResult::Ignored => { - summary.ignored += 1; + BenchResult::Ok(bench) => { + report.measurements.push(bench.clone()); } - BenchResult::Failed(error) => { - summary.failed += 1; - summary.failures.push((description.clone(), error.clone())); + + BenchResult::Failed(failure) => { + report.failed += 1; + report.failures.push(failure.clone()); } - } + }; - reporter.report_result( - &description, - &result, - elapsed, - &summary.current_bench, - ); + reporter.report_result(&result); } } } - let elapsed = Instant::now().duration_since(earlier); - reporter.report_summary(&summary, &elapsed); + reporter.report_end(&report); if used_only { return Err(generic_error( @@ -440,7 +500,7 @@ async fn bench_specifiers( )); } - if summary.failed > 0 { + if report.failed > 0 { return Err(generic_error("Bench failed")); } |