summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNayeem Rahman <nayeemrmn99@gmail.com>2022-05-05 00:15:54 +0100
committerGitHub <noreply@github.com>2022-05-05 01:15:54 +0200
commitca134d25e1c281384a1a131c19d0574e7c8d30e8 (patch)
tree90ffc4b39fc131f50eebab74c53c5b2b64eba32f
parent6a21fe745acf44fb32c294a34314abf58cc43c30 (diff)
feat(test): Show Deno.test() call locations for failures (#14484)
-rw-r--r--cli/tests/testdata/compat/test_runner/cjs.out7
-rw-r--r--cli/tests/testdata/compat/test_runner/esm.out7
-rw-r--r--cli/tests/testdata/test/aggregate_error.out7
-rw-r--r--cli/tests/testdata/test/allow_none.out43
-rw-r--r--cli/tests/testdata/test/exit_sanitizer.out19
-rw-r--r--cli/tests/testdata/test/fail.out61
-rw-r--r--cli/tests/testdata/test/fail_fast.out7
-rw-r--r--cli/tests/testdata/test/fail_fast_with_val.out12
-rw-r--r--cli/tests/testdata/test/finally_timeout.out7
-rw-r--r--cli/tests/testdata/test/no_prompt_by_default.out7
-rw-r--r--cli/tests/testdata/test/no_prompt_with_denied_perms.out7
-rw-r--r--cli/tests/testdata/test/ops_sanitizer_missing_details.out7
-rw-r--r--cli/tests/testdata/test/ops_sanitizer_multiple_timeout_tests.out13
-rw-r--r--cli/tests/testdata/test/ops_sanitizer_multiple_timeout_tests_no_trace.out13
-rw-r--r--cli/tests/testdata/test/ops_sanitizer_unstable.out7
-rw-r--r--cli/tests/testdata/test/resource_sanitizer.out7
-rw-r--r--cli/tests/testdata/test/steps/failing_steps.out27
-rw-r--r--cli/tests/testdata/test/steps/invalid_usage.out51
-rw-r--r--cli/tools/test.rs54
-rw-r--r--runtime/js/40_testing.js11
20 files changed, 186 insertions, 188 deletions
diff --git a/cli/tests/testdata/compat/test_runner/cjs.out b/cli/tests/testdata/compat/test_runner/cjs.out
index 7daf096db..918a06b1a 100644
--- a/cli/tests/testdata/compat/test_runner/cjs.out
+++ b/cli/tests/testdata/compat/test_runner/cjs.out
@@ -4,8 +4,8 @@ Failed assertion ... FAILED ([WILDCARD])
failures:
-./compat/test_runner/cjs.js > Failed assertion
-AssertionError: Values are not strictly equal:
+Failed assertion => ./compat/test_runner/cjs.js:[WILDCARD]
+error: AssertionError: Values are not strictly equal:
[Diff] Actual / Expected
@@ -20,8 +20,7 @@ AssertionError: Values are not strictly equal:
failures:
- ./compat/test_runner/cjs.js
- Failed assertion
+Failed assertion => ./compat/test_runner/cjs.js:[WILDCARD]
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/compat/test_runner/esm.out b/cli/tests/testdata/compat/test_runner/esm.out
index 8b51609f2..7660a3d2a 100644
--- a/cli/tests/testdata/compat/test_runner/esm.out
+++ b/cli/tests/testdata/compat/test_runner/esm.out
@@ -4,8 +4,8 @@ Failed assertion ... FAILED ([WILDCARD])
failures:
-./compat/test_runner/esm.mjs > Failed assertion
-AssertionError: Values are not strictly equal:
+Failed assertion => ./compat/test_runner/esm.mjs:[WILDCARD]
+error: AssertionError: Values are not strictly equal:
[Diff] Actual / Expected
@@ -20,8 +20,7 @@ AssertionError: Values are not strictly equal:
failures:
- ./compat/test_runner/esm.mjs
- Failed assertion
+Failed assertion => ./compat/test_runner/esm.mjs:[WILDCARD]
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/aggregate_error.out b/cli/tests/testdata/test/aggregate_error.out
index 3d0b807f0..dacce8979 100644
--- a/cli/tests/testdata/test/aggregate_error.out
+++ b/cli/tests/testdata/test/aggregate_error.out
@@ -3,8 +3,8 @@ aggregate ... FAILED ([WILDCARD])
failures:
-./test/aggregate_error.ts > aggregate
-AggregateError
+aggregate => ./test/aggregate_error.ts:[WILDCARD]
+error: AggregateError
Error: Error 1
at [WILDCARD]/testdata/test/aggregate_error.ts:2:18
Error: Error 2
@@ -15,8 +15,7 @@ AggregateError
failures:
- ./test/aggregate_error.ts
- aggregate
+aggregate => ./test/aggregate_error.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/allow_none.out b/cli/tests/testdata/test/allow_none.out
index ea5157cf2..241a5dd1a 100644
--- a/cli/tests/testdata/test/allow_none.out
+++ b/cli/tests/testdata/test/allow_none.out
@@ -10,43 +10,42 @@ hrtime ... FAILED [WILDCARD]
failures:
-./test/allow_none.ts > read
-PermissionDenied: Can't escalate parent thread permissions
+read => ./test/allow_none.ts:[WILDCARD]
+error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
-./test/allow_none.ts > write
-PermissionDenied: Can't escalate parent thread permissions
+write => ./test/allow_none.ts:[WILDCARD]
+error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
-./test/allow_none.ts > net
-PermissionDenied: Can't escalate parent thread permissions
+net => ./test/allow_none.ts:[WILDCARD]
+error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
-./test/allow_none.ts > env
-PermissionDenied: Can't escalate parent thread permissions
+env => ./test/allow_none.ts:[WILDCARD]
+error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
-./test/allow_none.ts > run
-PermissionDenied: Can't escalate parent thread permissions
+run => ./test/allow_none.ts:[WILDCARD]
+error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
-./test/allow_none.ts > ffi
-PermissionDenied: Can't escalate parent thread permissions
+ffi => ./test/allow_none.ts:[WILDCARD]
+error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
-./test/allow_none.ts > hrtime
-PermissionDenied: Can't escalate parent thread permissions
+hrtime => ./test/allow_none.ts:[WILDCARD]
+error: PermissionDenied: Can't escalate parent thread permissions
[WILDCARD]
failures:
- ./test/allow_none.ts
- read
- write
- net
- env
- run
- ffi
- hrtime
+read => ./test/allow_none.ts:[WILDCARD]
+write => ./test/allow_none.ts:[WILDCARD]
+net => ./test/allow_none.ts:[WILDCARD]
+env => ./test/allow_none.ts:[WILDCARD]
+run => ./test/allow_none.ts:[WILDCARD]
+ffi => ./test/allow_none.ts:[WILDCARD]
+hrtime => ./test/allow_none.ts:[WILDCARD]
test result: FAILED. 0 passed; 7 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]
diff --git a/cli/tests/testdata/test/exit_sanitizer.out b/cli/tests/testdata/test/exit_sanitizer.out
index 5468af1df..4e117b7b6 100644
--- a/cli/tests/testdata/test/exit_sanitizer.out
+++ b/cli/tests/testdata/test/exit_sanitizer.out
@@ -6,22 +6,22 @@ exit(2) ... FAILED ([WILDCARD])
failures:
-./test/exit_sanitizer.ts > exit(0)
-AssertionError: Test case attempted to exit with exit code: 0
+exit(0) => ./test/exit_sanitizer.ts:[WILDCARD]
+error: AssertionError: Test case attempted to exit with exit code: 0
Deno.exit(0);
^
at [WILDCARD]
at [WILDCARD]/test/exit_sanitizer.ts:2:8
-./test/exit_sanitizer.ts > exit(1)
-AssertionError: Test case attempted to exit with exit code: 1
+exit(1) => ./test/exit_sanitizer.ts:[WILDCARD]
+error: AssertionError: Test case attempted to exit with exit code: 1
Deno.exit(1);
^
at [WILDCARD]
at [WILDCARD]/test/exit_sanitizer.ts:6:8
-./test/exit_sanitizer.ts > exit(2)
-AssertionError: Test case attempted to exit with exit code: 2
+exit(2) => ./test/exit_sanitizer.ts:[WILDCARD]
+error: AssertionError: Test case attempted to exit with exit code: 2
Deno.exit(2);
^
at [WILDCARD]
@@ -29,10 +29,9 @@ AssertionError: Test case attempted to exit with exit code: 2
failures:
- ./test/exit_sanitizer.ts
- exit(0)
- exit(1)
- exit(2)
+exit(0) => ./test/exit_sanitizer.ts:[WILDCARD]
+exit(1) => ./test/exit_sanitizer.ts:[WILDCARD]
+exit(2) => ./test/exit_sanitizer.ts:[WILDCARD]
test result: FAILED. 0 passed; 3 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/fail.out b/cli/tests/testdata/test/fail.out
index 696dee52b..f996487e1 100644
--- a/cli/tests/testdata/test/fail.out
+++ b/cli/tests/testdata/test/fail.out
@@ -13,79 +13,78 @@ test 9 ... FAILED ([WILDCARD])
failures:
-./test/fail.ts > test 0
-Error
+test 0 => ./test/fail.ts:1:6
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:2:9
-./test/fail.ts > test 1
-Error
+test 1 => ./test/fail.ts:4:6
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:5:9
-./test/fail.ts > test 2
-Error
+test 2 => ./test/fail.ts:7:6
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:8:9
-./test/fail.ts > test 3
-Error
+test 3 => ./test/fail.ts:10:6
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:11:9
-./test/fail.ts > test 4
-Error
+test 4 => ./test/fail.ts:13:6
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:14:9
-./test/fail.ts > test 5
-Error
+test 5 => ./test/fail.ts:16:6
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:17:9
-./test/fail.ts > test 6
-Error
+test 6 => ./test/fail.ts:19:6
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:20:9
-./test/fail.ts > test 7
-Error
+test 7 => ./test/fail.ts:22:6
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:23:9
-./test/fail.ts > test 8
-Error
+test 8 => ./test/fail.ts:25:6
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:26:9
-./test/fail.ts > test 9
-Error
+test 9 => ./test/fail.ts:28:6
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail.ts:29:9
failures:
- ./test/fail.ts
- test 0
- test 1
- test 2
- test 3
- test 4
- test 5
- test 6
- test 7
- test 8
- test 9
+test 0 => ./test/fail.ts:1:6
+test 1 => ./test/fail.ts:4:6
+test 2 => ./test/fail.ts:7:6
+test 3 => ./test/fail.ts:10:6
+test 4 => ./test/fail.ts:13:6
+test 5 => ./test/fail.ts:16:6
+test 6 => ./test/fail.ts:19:6
+test 7 => ./test/fail.ts:22:6
+test 8 => ./test/fail.ts:25:6
+test 9 => ./test/fail.ts:28:6
test result: FAILED. 0 passed; 10 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/fail_fast.out b/cli/tests/testdata/test/fail_fast.out
index 47c380bcf..75630072b 100644
--- a/cli/tests/testdata/test/fail_fast.out
+++ b/cli/tests/testdata/test/fail_fast.out
@@ -4,16 +4,15 @@ test 1 ... FAILED ([WILDCARD])
failures:
-./test/fail_fast.ts > test 1
-Error
+test 1 => ./test/fail_fast.ts:[WILDCARD]
+error: Error
throw new Error();
^
at [WILDCARD]/test/fail_fast.ts:2:9
failures:
- ./test/fail_fast.ts
- test 1
+test 1 => ./test/fail_fast.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/fail_fast_with_val.out b/cli/tests/testdata/test/fail_fast_with_val.out
index a902e4c7b..aeb51a20a 100644
--- a/cli/tests/testdata/test/fail_fast_with_val.out
+++ b/cli/tests/testdata/test/fail_fast_with_val.out
@@ -5,19 +5,19 @@ test test 2 ... FAILED ([WILDCARD])
failures:
-test 1
-Error
+test 1 => ./test/fail_fast_with_val.ts:[WILDCARD]
+error: Error
at [WILDCARD]/test/fail_fast_with_val.ts:2:9
at [WILDCARD]
-test 2
-Error
+test 2 => ./test/fail_fast_with_val.ts:[WILDCARD]
+error: Error
at [WILDCARD]/test/fail_fast_with_val.ts:5:9
at [WILDCARD]
failures:
- test 1
- test 2
+test 1 => ./test/fail_fast_with_val.ts:[WILDCARD]
+test 2 => ./test/fail_fast_with_val.ts:[WILDCARD]
test result: FAILED. 0 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/finally_timeout.out b/cli/tests/testdata/test/finally_timeout.out
index 7fd882670..f72dbce95 100644
--- a/cli/tests/testdata/test/finally_timeout.out
+++ b/cli/tests/testdata/test/finally_timeout.out
@@ -5,16 +5,15 @@ success ... ok ([WILDCARD])
failures:
-./test/finally_timeout.ts > error
-Error: fail
+error => ./test/finally_timeout.ts:[WILDCARD]
+error: Error: fail
throw new Error("fail");
^
at [WILDCARD]/test/finally_timeout.ts:4:11
failures:
- ./test/finally_timeout.ts
- error
+error => ./test/finally_timeout.ts:[WILDCARD]
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/no_prompt_by_default.out b/cli/tests/testdata/test/no_prompt_by_default.out
index 61fcea8c8..f577337d8 100644
--- a/cli/tests/testdata/test/no_prompt_by_default.out
+++ b/cli/tests/testdata/test/no_prompt_by_default.out
@@ -3,14 +3,13 @@ no prompt ... FAILED ([WILDCARD]ms)
failures:
-./test/no_prompt_by_default.ts > no prompt
-PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
+no prompt => ./test/no_prompt_by_default.ts:[WILDCARD]
+error: PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
[WILDCARD]
failures:
- ./test/no_prompt_by_default.ts
- no prompt
+no prompt => ./test/no_prompt_by_default.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]ms)
diff --git a/cli/tests/testdata/test/no_prompt_with_denied_perms.out b/cli/tests/testdata/test/no_prompt_with_denied_perms.out
index 16a2092c5..695323043 100644
--- a/cli/tests/testdata/test/no_prompt_with_denied_perms.out
+++ b/cli/tests/testdata/test/no_prompt_with_denied_perms.out
@@ -3,14 +3,13 @@ no prompt ... FAILED ([WILDCARD]ms)
failures:
-./test/no_prompt_with_denied_perms.ts > no prompt
-PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
+no prompt => ./test/no_prompt_with_denied_perms.ts:[WILDCARD]
+error: PermissionDenied: Requires read access to "./some_file.txt", run again with the --allow-read flag
[WILDCARD]
failures:
- ./test/no_prompt_with_denied_perms.ts
- no prompt
+no prompt => ./test/no_prompt_with_denied_perms.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]ms)
diff --git a/cli/tests/testdata/test/ops_sanitizer_missing_details.out b/cli/tests/testdata/test/ops_sanitizer_missing_details.out
index 88f6f04b2..f5fd086b7 100644
--- a/cli/tests/testdata/test/ops_sanitizer_missing_details.out
+++ b/cli/tests/testdata/test/ops_sanitizer_missing_details.out
@@ -4,8 +4,8 @@ test 1 ... FAILED [WILDCARD]
failures:
-./test/ops_sanitizer_missing_details.ts > test 1
-Test case is leaking async ops.
+test 1 => ./test/ops_sanitizer_missing_details.ts:[WILDCARD]
+error: Test case is leaking async ops.
- 1 async operation to op_write was started in this test, but never completed.
@@ -13,8 +13,7 @@ To get more details where ops were leaked, run again with --trace-ops flag.
failures:
- ./test/ops_sanitizer_missing_details.ts
- test 1
+test 1 => ./test/ops_sanitizer_missing_details.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]
diff --git a/cli/tests/testdata/test/ops_sanitizer_multiple_timeout_tests.out b/cli/tests/testdata/test/ops_sanitizer_multiple_timeout_tests.out
index 222d22162..954e41788 100644
--- a/cli/tests/testdata/test/ops_sanitizer_multiple_timeout_tests.out
+++ b/cli/tests/testdata/test/ops_sanitizer_multiple_timeout_tests.out
@@ -5,8 +5,8 @@ test 2 ... FAILED ([WILDCARD])
failures:
-./test/ops_sanitizer_multiple_timeout_tests.ts > test 1
-Test case is leaking async ops.
+test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
+error: Test case is leaking async ops.
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operations were started here:
at [WILDCARD]
@@ -21,8 +21,8 @@ Test case is leaking async ops.
at [WILDCARD]/testdata/test/ops_sanitizer_multiple_timeout_tests.ts:8:27
at [WILDCARD]
-./test/ops_sanitizer_multiple_timeout_tests.ts > test 2
-Test case is leaking async ops.
+test 2 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
+error: Test case is leaking async ops.
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operations were started here:
at [WILDCARD]
@@ -39,9 +39,8 @@ Test case is leaking async ops.
failures:
- ./test/ops_sanitizer_multiple_timeout_tests.ts
- test 1
- test 2
+test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
+test 2 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
test result: FAILED. 0 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/ops_sanitizer_multiple_timeout_tests_no_trace.out b/cli/tests/testdata/test/ops_sanitizer_multiple_timeout_tests_no_trace.out
index 10758fba6..e4ca9b906 100644
--- a/cli/tests/testdata/test/ops_sanitizer_multiple_timeout_tests_no_trace.out
+++ b/cli/tests/testdata/test/ops_sanitizer_multiple_timeout_tests_no_trace.out
@@ -5,15 +5,15 @@ test 2 ... FAILED ([WILDCARD])
failures:
-./test/ops_sanitizer_multiple_timeout_tests.ts > test 1
-Test case is leaking async ops.
+test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
+error: Test case is leaking async ops.
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call.
To get more details where ops were leaked, run again with --trace-ops flag.
-./test/ops_sanitizer_multiple_timeout_tests.ts > test 2
-Test case is leaking async ops.
+test 2 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
+error: Test case is leaking async ops.
- 2 async operations to sleep for a duration were started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call.
@@ -21,9 +21,8 @@ To get more details where ops were leaked, run again with --trace-ops flag.
failures:
- ./test/ops_sanitizer_multiple_timeout_tests.ts
- test 1
- test 2
+test 1 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
+test 2 => ./test/ops_sanitizer_multiple_timeout_tests.ts:[WILDCARD]
test result: FAILED. 0 passed; 2 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/ops_sanitizer_unstable.out b/cli/tests/testdata/test/ops_sanitizer_unstable.out
index 71fc12191..b8cbf33cb 100644
--- a/cli/tests/testdata/test/ops_sanitizer_unstable.out
+++ b/cli/tests/testdata/test/ops_sanitizer_unstable.out
@@ -5,8 +5,8 @@ leak interval ... FAILED ([WILDCARD])
failures:
-./test/ops_sanitizer_unstable.ts > leak interval
-Test case is leaking async ops.
+leak interval => ./test/ops_sanitizer_unstable.ts:[WILDCARD]
+error: Test case is leaking async ops.
- 1 async operation to sleep for a duration was started in this test, but never completed. This is often caused by not cancelling a `setTimeout` or `setInterval` call. The operation was started here:
at [WILDCARD]
@@ -16,8 +16,7 @@ Test case is leaking async ops.
failures:
- ./test/ops_sanitizer_unstable.ts
- leak interval
+leak interval => ./test/ops_sanitizer_unstable.ts:[WILDCARD]
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/resource_sanitizer.out b/cli/tests/testdata/test/resource_sanitizer.out
index 847fe9606..949ec64a7 100644
--- a/cli/tests/testdata/test/resource_sanitizer.out
+++ b/cli/tests/testdata/test/resource_sanitizer.out
@@ -4,8 +4,8 @@ leak ... FAILED ([WILDCARD])
failures:
-./test/resource_sanitizer.ts > leak
-AssertionError: Test case is leaking 2 resources:
+leak => ./test/resource_sanitizer.ts:[WILDCARD]
+error: AssertionError: Test case is leaking 2 resources:
- The stdin pipe (rid 0) was opened before the test started, but was closed during the test. Do not close resources in a test that were not created during that test.
- A file (rid 3) was opened during the test, but not closed during the test. Close the file handle by calling `file.close()`.
@@ -14,8 +14,7 @@ AssertionError: Test case is leaking 2 resources:
failures:
- ./test/resource_sanitizer.ts
- leak
+leak => ./test/resource_sanitizer.ts:[WILDCARD]
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/steps/failing_steps.out b/cli/tests/testdata/test/steps/failing_steps.out
index 2095eda51..5baa09ce7 100644
--- a/cli/tests/testdata/test/steps/failing_steps.out
+++ b/cli/tests/testdata/test/steps/failing_steps.out
@@ -3,7 +3,7 @@ running 3 tests from ./test/steps/failing_steps.ts
nested failure ...
step 1 ...
inner 1 ... FAILED ([WILDCARD])
- Error: Failed.
+ error: Error: Failed.
throw new Error("Failed.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
@@ -13,12 +13,12 @@ nested failure ...
FAILED ([WILDCARD])
multiple test step failures ...
step 1 ... FAILED ([WILDCARD])
- Error: Fail.
+ error: Error: Fail.
throw new Error("Fail.");
^
[WILDCARD]
step 2 ... FAILED ([WILDCARD])
- Error: Fail.
+ error: Error: Fail.
await t.step("step 2", () => Promise.reject(new Error("Fail.")));
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
@@ -26,7 +26,7 @@ multiple test step failures ...
FAILED ([WILDCARD])
failing step in failing test ...
step 1 ... FAILED ([WILDCARD])
- Error: Fail.
+ error: Error: Fail.
throw new Error("Fail.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
@@ -35,28 +35,27 @@ FAILED ([WILDCARD])
failures:
-./test/steps/failing_steps.ts > nested failure
-Error: 1 test step failed.
+nested failure => ./test/steps/failing_steps.ts:[WILDCARD]
+error: Error: 1 test step failed.
at runTest (deno:runtime/js/40_testing.js:[WILDCARD])
at async Object.runTests (deno:runtime/js/40_testing.js:[WILDCARD])
-./test/steps/failing_steps.ts > multiple test step failures
-Error: 2 test steps failed.
+multiple test step failures => ./test/steps/failing_steps.ts:[WILDCARD]
+error: Error: 2 test steps failed.
at runTest (deno:runtime/js/40_testing.js:[WILDCARD])
at async Object.runTests (deno:runtime/js/40_testing.js:[WILDCARD])
-./test/steps/failing_steps.ts > failing step in failing test
-Error: Fail test.
+failing step in failing test => ./test/steps/failing_steps.ts:[WILDCARD]
+error: Error: Fail test.
throw new Error("Fail test.");
^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
failures:
- ./test/steps/failing_steps.ts
- nested failure
- multiple test step failures
- failing step in failing test
+nested failure => ./test/steps/failing_steps.ts:[WILDCARD]
+multiple test step failures => ./test/steps/failing_steps.ts:[WILDCARD]
+failing step in failing test => ./test/steps/failing_steps.ts:[WILDCARD]
test result: FAILED. 0 passed (1 step); 3 failed (5 steps); 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tests/testdata/test/steps/invalid_usage.out b/cli/tests/testdata/test/steps/invalid_usage.out
index 3b9921f14..afa19c53d 100644
--- a/cli/tests/testdata/test/steps/invalid_usage.out
+++ b/cli/tests/testdata/test/steps/invalid_usage.out
@@ -9,11 +9,11 @@ FAILED ([WILDCARD])
inner missing await ...
step ...
inner ... pending ([WILDCARD])
- Error: Parent scope completed before test step finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
+ error: Error: Parent scope completed before test step finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
at [WILDCARD]
at async TestContext.step [WILDCARD]
FAILED ([WILDCARD])
- Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
+ error: Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
await t.step("step", (t) => {
^
at [WILDCARD]
@@ -22,7 +22,7 @@ FAILED ([WILDCARD])
parallel steps with sanitizers ...
step 1 ... pending ([WILDCARD])
step 2 ... FAILED ([WILDCARD])
- Error: Cannot start test step while another test step with sanitizers is running.
+ error: Error: Cannot start test step while another test step with sanitizers is running.
* parallel steps with sanitizers > step 1
await t.step("step 2", () => {});
^
@@ -32,7 +32,7 @@ FAILED ([WILDCARD])
parallel steps when first has sanitizer ...
step 1 ... pending ([WILDCARD])
step 2 ... FAILED ([WILDCARD])
- Error: Cannot start test step while another test step with sanitizers is running.
+ error: Error: Cannot start test step while another test step with sanitizers is running.
* parallel steps when first has sanitizer > step 1
await t.step({
^
@@ -42,7 +42,7 @@ FAILED ([WILDCARD])
parallel steps when second has sanitizer ...
step 1 ... ok ([WILDCARD])
step 2 ... FAILED ([WILDCARD])
- Error: Cannot start test step with sanitizers while another test step is running.
+ error: Error: Cannot start test step with sanitizers while another test step is running.
* parallel steps when second has sanitizer > step 1
await t.step({
^
@@ -55,7 +55,7 @@ parallel steps where only inner tests have sanitizers ...
ok ([WILDCARD])
step 2 ...
step inner ... FAILED ([WILDCARD])
- Error: Cannot start test step with sanitizers while another test step is running.
+ error: Error: Cannot start test step with sanitizers while another test step is running.
* parallel steps where only inner tests have sanitizers > step 1
await t.step({
^
@@ -66,49 +66,48 @@ FAILED ([WILDCARD])
failures:
-./test/steps/invalid_usage.ts > capturing
-Error: Cannot run test step after parent scope has finished execution. Ensure any `.step(...)` calls are executed before their parent scope completes execution.
+capturing => ./test/steps/invalid_usage.ts:[WILDCARD]
+error: Error: Cannot run test step after parent scope has finished execution. Ensure any `.step(...)` calls are executed before their parent scope completes execution.
await capturedContext.step("next step", () => {});
^
at TestContext.step ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
-./test/steps/invalid_usage.ts > top level missing await
-Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
+top level missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
+error: Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
at postValidation [WILDCARD]
at testStepSanitizer ([WILDCARD])
[WILDCARD]
-./test/steps/invalid_usage.ts > inner missing await
-Error: 1 test step failed.
+inner missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
+error: Error: 1 test step failed.
at [WILDCARD]
-./test/steps/invalid_usage.ts > parallel steps with sanitizers
-Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
+parallel steps with sanitizers => ./test/steps/invalid_usage.ts:[WILDCARD]
+error: Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
at postValidation [WILDCARD]
at testStepSanitizer ([WILDCARD])
[WILDCARD]
-./test/steps/invalid_usage.ts > parallel steps when first has sanitizer
-Error: 1 test step failed.
+parallel steps when first has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
+error: Error: 1 test step failed.
at runTest ([WILDCARD])
at [WILDCARD]
-./test/steps/invalid_usage.ts > parallel steps when second has sanitizer
-Error: 1 test step failed.
+parallel steps when second has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
+error: Error: 1 test step failed.
at runTest ([WILDCARD])
at [WILDCARD]
failures:
- ./test/steps/invalid_usage.ts
- capturing
- top level missing await
- inner missing await
- parallel steps with sanitizers
- parallel steps when first has sanitizer
- parallel steps when second has sanitizer
- parallel steps where only inner tests have sanitizers
+capturing => ./test/steps/invalid_usage.ts:[WILDCARD]
+top level missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
+inner missing await => ./test/steps/invalid_usage.ts:[WILDCARD]
+parallel steps with sanitizers => ./test/steps/invalid_usage.ts:[WILDCARD]
+parallel steps when first has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
+parallel steps when second has sanitizer => ./test/steps/invalid_usage.ts:[WILDCARD]
+parallel steps where only inner tests have sanitizers => ./test/steps/invalid_usage.ts:[WILDCARD]
test result: FAILED. 0 passed (4 steps); 7 failed (10 steps); 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
diff --git a/cli/tools/test.rs b/cli/tools/test.rs
index 096be383d..dc3eeba3b 100644
--- a/cli/tools/test.rs
+++ b/cli/tools/test.rs
@@ -51,7 +51,6 @@ use rand::seq::SliceRandom;
use rand::SeedableRng;
use regex::Regex;
use serde::Deserialize;
-use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::io::Read;
@@ -78,9 +77,18 @@ pub enum TestMode {
#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)]
#[serde(rename_all = "camelCase")]
+pub struct TestLocation {
+ pub file_name: String,
+ pub line_number: u32,
+ pub column_number: u32,
+}
+
+#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)]
+#[serde(rename_all = "camelCase")]
pub struct TestDescription {
pub origin: String,
pub name: String,
+ pub location: TestLocation,
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
@@ -303,6 +311,7 @@ impl PrettyTestReporter {
if let Some(js_error) = result.error() {
let err_string = format_test_error(js_error);
+ let err_string = format!("{}: {}", colors::red_bold("error"), err_string);
for line in err_string.lines() {
println!("{}{}", " ".repeat(description.level + 1), line);
}
@@ -442,38 +451,33 @@ impl TestReporter for PrettyTestReporter {
fn report_summary(&mut self, summary: &TestSummary, elapsed: &Duration) {
if !summary.failures.is_empty() {
+ let mut failure_titles = vec![];
println!("\nfailures:\n");
for (description, js_error) in &summary.failures {
+ let failure_title = format!(
+ "{} {}",
+ &description.name,
+ colors::gray(format!(
+ "=> {}:{}:{}",
+ self
+ .to_relative_path_or_remote_url(&description.location.file_name),
+ description.location.line_number,
+ description.location.column_number
+ ))
+ );
+ println!("{}", &failure_title);
println!(
- "{} {} {}",
- colors::gray(
- self.to_relative_path_or_remote_url(&description.origin)
- ),
- colors::gray(">"),
- description.name
+ "{}: {}",
+ colors::red_bold("error"),
+ format_test_error(js_error)
);
- println!("{}", format_test_error(js_error));
println!();
- }
-
- let mut grouped_by_origin: BTreeMap<String, Vec<String>> =
- BTreeMap::default();
- for (description, _) in &summary.failures {
- let test_names = grouped_by_origin
- .entry(description.origin.clone())
- .or_default();
- test_names.push(description.name.clone());
+ failure_titles.push(failure_title);
}
println!("failures:\n");
- for (origin, test_names) in &grouped_by_origin {
- println!(
- "\t{}",
- colors::gray(self.to_relative_path_or_remote_url(origin))
- );
- for test_name in test_names {
- println!("\t{}", test_name);
- }
+ for failure_title in failure_titles {
+ println!("{}", failure_title);
}
}
diff --git a/runtime/js/40_testing.js b/runtime/js/40_testing.js
index b176e7b2d..47af45110 100644
--- a/runtime/js/40_testing.js
+++ b/runtime/js/40_testing.js
@@ -635,6 +635,16 @@
);
}
+ const jsError = Deno.core.destructureError(new Error());
+ // Note: There might pop up a case where one of the filename, line number or
+ // column number from the caller isn't defined. We assume never for now.
+ // Make `TestDescription::location` optional if such a case is found.
+ testDef.location = {
+ fileName: jsError.frames[1].fileName,
+ lineNumber: jsError.frames[1].lineNumber,
+ columnNumber: jsError.frames[1].columnNumber,
+ };
+
ArrayPrototypePush(tests, testDef);
}
@@ -1097,6 +1107,7 @@
const description = {
origin,
name: test.name,
+ location: test.location,
};
const earlier = DateNow();