diff options
Diffstat (limited to 'cli')
-rw-r--r-- | cli/lsp/testing/execution.rs | 11 | ||||
-rw-r--r-- | cli/tests/testdata/test/aggregate_error.out | 5 | ||||
-rw-r--r-- | cli/tests/testdata/test/exit_sanitizer.out | 3 | ||||
-rw-r--r-- | cli/tests/testdata/test/fail.out | 10 | ||||
-rw-r--r-- | cli/tests/testdata/test/fail_fast.out | 1 | ||||
-rw-r--r-- | cli/tests/testdata/test/finally_timeout.out | 1 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/failing_steps.out | 1 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/invalid_usage.out | 24 | ||||
-rw-r--r-- | cli/tools/test.rs | 70 |
9 files changed, 72 insertions, 54 deletions
diff --git a/cli/lsp/testing/execution.rs b/cli/lsp/testing/execution.rs index b305b1650..93f3d9ba3 100644 --- a/cli/lsp/testing/execution.rs +++ b/cli/lsp/testing/execution.rs @@ -8,7 +8,6 @@ use crate::checksum; use crate::create_main_worker; use crate::emit; use crate::flags; -use crate::fmt_errors::PrettyJsError; use crate::located_script_name; use crate::lsp::client::Client; use crate::lsp::client::TestingNotification; @@ -799,10 +798,7 @@ impl test::TestReporter for LspTestReporter { }) } test::TestResult::Failed(js_error) => { - let err_string = PrettyJsError::create(*js_error.clone()) - .to_string() - .trim_start_matches("Uncaught ") - .to_string(); + let err_string = test::format_test_error(js_error); self.progress(lsp_custom::TestRunProgressMessage::Failed { test: desc.into(), messages: as_test_messages(err_string, false), @@ -846,10 +842,7 @@ impl test::TestReporter for LspTestReporter { } test::TestStepResult::Failed(js_error) => { let messages = if let Some(js_error) = js_error { - let err_string = PrettyJsError::create(*js_error.clone()) - .to_string() - .trim_start_matches("Uncaught ") - .to_string(); + let err_string = test::format_test_error(js_error); as_test_messages(err_string, false) } else { vec![] diff --git a/cli/tests/testdata/test/aggregate_error.out b/cli/tests/testdata/test/aggregate_error.out index 5c4c9b392..3d0b807f0 100644 --- a/cli/tests/testdata/test/aggregate_error.out +++ b/cli/tests/testdata/test/aggregate_error.out @@ -7,12 +7,11 @@ failures: AggregateError Error: Error 1 at [WILDCARD]/testdata/test/aggregate_error.ts:2:18 - [WILDCARD] Error: Error 2 at [WILDCARD]/testdata/test/aggregate_error.ts:3:18 - [WILDCARD] + throw new AggregateError([error1, error2]); + ^ at [WILDCARD]/testdata/test/aggregate_error.ts:5:9 - at [WILDCARD] failures: diff --git a/cli/tests/testdata/test/exit_sanitizer.out b/cli/tests/testdata/test/exit_sanitizer.out index a0a659cb6..5468af1df 100644 --- a/cli/tests/testdata/test/exit_sanitizer.out +++ b/cli/tests/testdata/test/exit_sanitizer.out @@ -12,7 +12,6 @@ AssertionError: Test case attempted to exit with exit code: 0 ^ at [WILDCARD] at [WILDCARD]/test/exit_sanitizer.ts:2:8 - at [WILDCARD] ./test/exit_sanitizer.ts > exit(1) AssertionError: Test case attempted to exit with exit code: 1 @@ -20,7 +19,6 @@ AssertionError: Test case attempted to exit with exit code: 1 ^ at [WILDCARD] at [WILDCARD]/test/exit_sanitizer.ts:6:8 - at [WILDCARD] ./test/exit_sanitizer.ts > exit(2) AssertionError: Test case attempted to exit with exit code: 2 @@ -28,7 +26,6 @@ AssertionError: Test case attempted to exit with exit code: 2 ^ at [WILDCARD] at [WILDCARD]/test/exit_sanitizer.ts:10:8 - at [WILDCARD] failures: diff --git a/cli/tests/testdata/test/fail.out b/cli/tests/testdata/test/fail.out index ba96ef7e2..696dee52b 100644 --- a/cli/tests/testdata/test/fail.out +++ b/cli/tests/testdata/test/fail.out @@ -18,70 +18,60 @@ Error throw new Error(); ^ at [WILDCARD]/test/fail.ts:2:9 - at [WILDCARD] ./test/fail.ts > test 1 Error throw new Error(); ^ at [WILDCARD]/test/fail.ts:5:9 - at [WILDCARD] ./test/fail.ts > test 2 Error throw new Error(); ^ at [WILDCARD]/test/fail.ts:8:9 - at [WILDCARD] ./test/fail.ts > test 3 Error throw new Error(); ^ at [WILDCARD]/test/fail.ts:11:9 - at [WILDCARD] ./test/fail.ts > test 4 Error throw new Error(); ^ at [WILDCARD]/test/fail.ts:14:9 - at [WILDCARD] ./test/fail.ts > test 5 Error throw new Error(); ^ at [WILDCARD]/test/fail.ts:17:9 - at [WILDCARD] ./test/fail.ts > test 6 Error throw new Error(); ^ at [WILDCARD]/test/fail.ts:20:9 - at [WILDCARD] ./test/fail.ts > test 7 Error throw new Error(); ^ at [WILDCARD]/test/fail.ts:23:9 - at [WILDCARD] ./test/fail.ts > test 8 Error throw new Error(); ^ at [WILDCARD]/test/fail.ts:26:9 - at [WILDCARD] ./test/fail.ts > test 9 Error throw new Error(); ^ at [WILDCARD]/test/fail.ts:29:9 - at [WILDCARD] failures: diff --git a/cli/tests/testdata/test/fail_fast.out b/cli/tests/testdata/test/fail_fast.out index b13f5f677..47c380bcf 100644 --- a/cli/tests/testdata/test/fail_fast.out +++ b/cli/tests/testdata/test/fail_fast.out @@ -9,7 +9,6 @@ Error throw new Error(); ^ at [WILDCARD]/test/fail_fast.ts:2:9 - at [WILDCARD] failures: diff --git a/cli/tests/testdata/test/finally_timeout.out b/cli/tests/testdata/test/finally_timeout.out index 04a1c2775..7fd882670 100644 --- a/cli/tests/testdata/test/finally_timeout.out +++ b/cli/tests/testdata/test/finally_timeout.out @@ -10,7 +10,6 @@ Error: fail throw new Error("fail"); ^ at [WILDCARD]/test/finally_timeout.ts:4:11 - at [WILDCARD] failures: diff --git a/cli/tests/testdata/test/steps/failing_steps.out b/cli/tests/testdata/test/steps/failing_steps.out index a5fd9d74d..2095eda51 100644 --- a/cli/tests/testdata/test/steps/failing_steps.out +++ b/cli/tests/testdata/test/steps/failing_steps.out @@ -50,7 +50,6 @@ Error: Fail test. throw new Error("Fail test."); ^ at [WILDCARD]/failing_steps.ts:[WILDCARD] - at [WILDCARD] failures: diff --git a/cli/tests/testdata/test/steps/invalid_usage.out b/cli/tests/testdata/test/steps/invalid_usage.out index 9107cb4e0..3b9921f14 100644 --- a/cli/tests/testdata/test/steps/invalid_usage.out +++ b/cli/tests/testdata/test/steps/invalid_usage.out @@ -10,16 +10,14 @@ inner missing await ... step ... inner ... pending ([WILDCARD]) Error: Parent scope completed before test step finished execution. Ensure all steps are awaited (ex. `await t.step(...)`). - at postValidation [WILDCARD] - at testStepSanitizer [WILDCARD] + at [WILDCARD] + at async TestContext.step [WILDCARD] FAILED ([WILDCARD]) Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`). await t.step("step", (t) => { ^ - at postValidation [WILDCARD] - at testStepSanitizer [WILDCARD] + at [WILDCARD] at async fn ([WILDCARD]/invalid_usage.ts:[WILDCARD]) - at async Object.testStepSanitizer [WILDCARD] FAILED ([WILDCARD]) parallel steps with sanitizers ... step 1 ... pending ([WILDCARD]) @@ -28,10 +26,8 @@ parallel steps with sanitizers ... * parallel steps with sanitizers > step 1 await t.step("step 2", () => {}); ^ - at preValidation ([WILDCARD]) - at testStepSanitizer ([WILDCARD]) - at [WILDCARD]/invalid_usage.ts:[WILDCARD] at [WILDCARD] + at [WILDCARD]/invalid_usage.ts:[WILDCARD] FAILED ([WILDCARD]) parallel steps when first has sanitizer ... step 1 ... pending ([WILDCARD]) @@ -40,10 +36,8 @@ parallel steps when first has sanitizer ... * parallel steps when first has sanitizer > step 1 await t.step({ ^ - at preValidation ([WILDCARD]) - at testStepSanitizer ([WILDCARD]) - at [WILDCARD]/invalid_usage.ts:[WILDCARD] at [WILDCARD] + at [WILDCARD]/invalid_usage.ts:[WILDCARD] FAILED ([WILDCARD]) parallel steps when second has sanitizer ... step 1 ... ok ([WILDCARD]) @@ -52,10 +46,8 @@ parallel steps when second has sanitizer ... * parallel steps when second has sanitizer > step 1 await t.step({ ^ - at preValidation ([WILDCARD]) - at testStepSanitizer ([WILDCARD]) - at [WILDCARD]/invalid_usage.ts:[WILDCARD] at [WILDCARD] + at [WILDCARD]/invalid_usage.ts:[WILDCARD] FAILED ([WILDCARD]) parallel steps where only inner tests have sanitizers ... step 1 ... @@ -67,8 +59,7 @@ parallel steps where only inner tests have sanitizers ... * parallel steps where only inner tests have sanitizers > step 1 await t.step({ ^ - at preValidation ([WILDCARD]) - at testStepSanitizer ([WILDCARD]) + at [WILDCARD] at [WILDCARD]/invalid_usage.ts:[WILDCARD] FAILED ([WILDCARD]) FAILED ([WILDCARD]) @@ -81,7 +72,6 @@ Error: Cannot run test step after parent scope has finished execution. Ensure an ^ at TestContext.step ([WILDCARD]) at [WILDCARD]/invalid_usage.ts:[WILDCARD] - at [WILDCARD] ./test/steps/invalid_usage.ts > top level missing await Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`). diff --git a/cli/tools/test.rs b/cli/tools/test.rs index 96e0dd8c3..aab062a6c 100644 --- a/cli/tools/test.rs +++ b/cli/tools/test.rs @@ -301,10 +301,7 @@ impl PrettyTestReporter { ); if let Some(js_error) = result.error() { - let err_string = PrettyJsError::create(js_error.clone()) - .to_string() - .trim_start_matches("Uncaught ") - .to_string(); + let err_string = format_test_error(js_error); for line in err_string.lines() { println!("{}{}", " ".repeat(description.level + 1), line); } @@ -464,11 +461,7 @@ impl TestReporter for PrettyTestReporter { colors::gray(">"), description.name ); - let err_string = PrettyJsError::create(*js_error.clone()) - .to_string() - .trim_start_matches("Uncaught ") - .to_string(); - println!("{}", err_string); + println!("{}", format_test_error(js_error)); println!(); } @@ -525,6 +518,65 @@ impl TestReporter for PrettyTestReporter { } } +fn abbreviate_test_error(js_error: &JsError) -> JsError { + let mut js_error = js_error.clone(); + let frames = std::mem::take(&mut js_error.frames); + + // check if there are any stack frames coming from user code + let should_filter = frames.iter().any(|f| { + if let Some(file_name) = &f.file_name { + !(file_name.starts_with("[deno:") || file_name.starts_with("deno:")) + } else { + true + } + }); + + if should_filter { + let mut frames = frames + .into_iter() + .rev() + .skip_while(|f| { + if let Some(file_name) = &f.file_name { + file_name.starts_with("[deno:") || file_name.starts_with("deno:") + } else { + false + } + }) + .into_iter() + .collect::<Vec<_>>(); + frames.reverse(); + js_error.frames = frames; + } else { + js_error.frames = frames; + } + + js_error.cause = js_error + .cause + .as_ref() + .map(|e| Box::new(abbreviate_test_error(e))); + js_error.aggregated = js_error + .aggregated + .as_ref() + .map(|es| es.iter().map(abbreviate_test_error).collect()); + js_error +} + +// This function maps JsError to PrettyJsError and applies some changes +// specifically for test runner purposes: +// +// - filter out stack frames: +// - if stack trace consists of mixed user and internal code, the frames +// below the first user code frame are filtered out +// - if stack trace consists only of internal code it is preserved as is +pub fn format_test_error(js_error: &JsError) -> String { + let mut js_error = abbreviate_test_error(js_error); + js_error.exception_message = js_error + .exception_message + .trim_start_matches("Uncaught ") + .to_string(); + PrettyJsError::create(js_error).to_string() +} + fn create_reporter( concurrent: bool, echo_output: bool, |