summaryrefslogtreecommitdiff
path: root/cli
diff options
context:
space:
mode:
authorBartek IwaƄczuk <biwanczuk@gmail.com>2022-04-16 19:51:12 +0200
committerGitHub <noreply@github.com>2022-04-16 19:51:12 +0200
commit5f2d9a4a220307b1111c91dfac74951ef3925457 (patch)
tree0873546419f8f8dd9ccbaf53dd0c6aa04c78baf0 /cli
parent32aaefd9eeea4a08eec0159f6374bfadf2bec62f (diff)
feat(test): use structured data for JavaScript errors in tests (#14287)
This commit rewrites test runner to send structured error data from JavaScript to Rust instead of passing strings. This will allow to customize display of errors in test report (which will be addressed in follow up commits).
Diffstat (limited to 'cli')
-rw-r--r--cli/lsp/testing/execution.rs19
-rw-r--r--cli/tests/testdata/compat/test_runner/cjs.out4
-rw-r--r--cli/tests/testdata/compat/test_runner/esm.out4
-rw-r--r--cli/tests/testdata/test/exit_sanitizer.out6
-rw-r--r--cli/tests/testdata/test/fail.out20
-rw-r--r--cli/tests/testdata/test/fail_fast.out2
-rw-r--r--cli/tests/testdata/test/finally_timeout.out2
-rw-r--r--cli/tests/testdata/test/steps/failing_steps.out10
-rw-r--r--cli/tests/testdata/test/steps/invalid_usage.out12
-rw-r--r--cli/tools/test.rs32
10 files changed, 93 insertions, 18 deletions
diff --git a/cli/lsp/testing/execution.rs b/cli/lsp/testing/execution.rs
index bc780a475..b305b1650 100644
--- a/cli/lsp/testing/execution.rs
+++ b/cli/lsp/testing/execution.rs
@@ -8,6 +8,7 @@ use crate::checksum;
use crate::create_main_worker;
use crate::emit;
use crate::flags;
+use crate::fmt_errors::PrettyJsError;
use crate::located_script_name;
use crate::lsp::client::Client;
use crate::lsp::client::TestingNotification;
@@ -797,10 +798,14 @@ impl test::TestReporter for LspTestReporter {
test: desc.into(),
})
}
- test::TestResult::Failed(message) => {
+ test::TestResult::Failed(js_error) => {
+ let err_string = PrettyJsError::create(*js_error.clone())
+ .to_string()
+ .trim_start_matches("Uncaught ")
+ .to_string();
self.progress(lsp_custom::TestRunProgressMessage::Failed {
test: desc.into(),
- messages: as_test_messages(message, false),
+ messages: as_test_messages(err_string, false),
duration: Some(elapsed as u32),
})
}
@@ -839,9 +844,13 @@ impl test::TestReporter for LspTestReporter {
test: desc.into(),
})
}
- test::TestStepResult::Failed(message) => {
- let messages = if let Some(message) = message {
- as_test_messages(message, false)
+ test::TestStepResult::Failed(js_error) => {
+ let messages = if let Some(js_error) = js_error {
+ let err_string = PrettyJsError::create(*js_error.clone())
+ .to_string()
+ .trim_start_matches("Uncaught ")
+ .to_string();
+ as_test_messages(err_string, false)
} else {
vec![]
};
diff --git a/cli/tests/testdata/compat/test_runner/cjs.out b/cli/tests/testdata/compat/test_runner/cjs.out
index 02d86891b..722721166 100644
--- a/cli/tests/testdata/compat/test_runner/cjs.out
+++ b/cli/tests/testdata/compat/test_runner/cjs.out
@@ -5,7 +5,7 @@ Failed assertion ... FAILED ([WILDCARD])
failures:
compat/test_runner/cjs.js > Failed assertion
-AssertionError [ERR_ASSERTION]: Values are not strictly equal:
+AssertionError: Values are not strictly equal:
[Diff] Actual / Expected
@@ -14,6 +14,8 @@ AssertionError [ERR_ASSERTION]: Values are not strictly equal:
- 10
+ 20
+ Error.captureStackTrace(this, stackStartFn || stackStartFunction);
+ ^
[WILDCARD]
failures:
diff --git a/cli/tests/testdata/compat/test_runner/esm.out b/cli/tests/testdata/compat/test_runner/esm.out
index 73cfe7826..f135c993d 100644
--- a/cli/tests/testdata/compat/test_runner/esm.out
+++ b/cli/tests/testdata/compat/test_runner/esm.out
@@ -5,7 +5,7 @@ Failed assertion ... FAILED ([WILDCARD])
failures:
compat/test_runner/esm.mjs > Failed assertion
-AssertionError [ERR_ASSERTION]: Values are not strictly equal:
+AssertionError: Values are not strictly equal:
[Diff] Actual / Expected
@@ -14,6 +14,8 @@ AssertionError [ERR_ASSERTION]: Values are not strictly equal:
- 10
+ 20
+ Error.captureStackTrace(this, stackStartFn || stackStartFunction);
+ ^
[WILDCARD]
failures:
diff --git a/cli/tests/testdata/test/exit_sanitizer.out b/cli/tests/testdata/test/exit_sanitizer.out
index 51e44ab57..c714e8b4d 100644
--- a/cli/tests/testdata/test/exit_sanitizer.out
+++ b/cli/tests/testdata/test/exit_sanitizer.out
@@ -8,18 +8,24 @@ failures:
test/exit_sanitizer.ts > exit(0)
AssertionError: Test case attempted to exit with exit code: 0
+ Deno.exit(0);
+ ^
at [WILDCARD]
at [WILDCARD]/test/exit_sanitizer.ts:2:8
at [WILDCARD]
test/exit_sanitizer.ts > exit(1)
AssertionError: Test case attempted to exit with exit code: 1
+ Deno.exit(1);
+ ^
at [WILDCARD]
at [WILDCARD]/test/exit_sanitizer.ts:6:8
at [WILDCARD]
test/exit_sanitizer.ts > exit(2)
AssertionError: Test case attempted to exit with exit code: 2
+ Deno.exit(2);
+ ^
at [WILDCARD]
at [WILDCARD]/test/exit_sanitizer.ts:10:8
at [WILDCARD]
diff --git a/cli/tests/testdata/test/fail.out b/cli/tests/testdata/test/fail.out
index 7c64ec929..bac66c9bd 100644
--- a/cli/tests/testdata/test/fail.out
+++ b/cli/tests/testdata/test/fail.out
@@ -15,51 +15,71 @@ failures:
test/fail.ts > test 0
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail.ts:2:9
at [WILDCARD]
test/fail.ts > test 1
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail.ts:5:9
at [WILDCARD]
test/fail.ts > test 2
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail.ts:8:9
at [WILDCARD]
test/fail.ts > test 3
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail.ts:11:9
at [WILDCARD]
test/fail.ts > test 4
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail.ts:14:9
at [WILDCARD]
test/fail.ts > test 5
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail.ts:17:9
at [WILDCARD]
test/fail.ts > test 6
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail.ts:20:9
at [WILDCARD]
test/fail.ts > test 7
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail.ts:23:9
at [WILDCARD]
test/fail.ts > test 8
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail.ts:26:9
at [WILDCARD]
test/fail.ts > test 9
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail.ts:29:9
at [WILDCARD]
diff --git a/cli/tests/testdata/test/fail_fast.out b/cli/tests/testdata/test/fail_fast.out
index 58e1e4e5a..e9adef329 100644
--- a/cli/tests/testdata/test/fail_fast.out
+++ b/cli/tests/testdata/test/fail_fast.out
@@ -6,6 +6,8 @@ failures:
test/fail_fast.ts > test 1
Error
+ throw new Error();
+ ^
at [WILDCARD]/test/fail_fast.ts:2:9
at [WILDCARD]
diff --git a/cli/tests/testdata/test/finally_timeout.out b/cli/tests/testdata/test/finally_timeout.out
index aafb8125f..8de59acfd 100644
--- a/cli/tests/testdata/test/finally_timeout.out
+++ b/cli/tests/testdata/test/finally_timeout.out
@@ -7,6 +7,8 @@ failures:
test/finally_timeout.ts > error
Error: fail
+ throw new Error("fail");
+ ^
at [WILDCARD]/test/finally_timeout.ts:4:11
at [WILDCARD]
diff --git a/cli/tests/testdata/test/steps/failing_steps.out b/cli/tests/testdata/test/steps/failing_steps.out
index 0724da178..24e19a064 100644
--- a/cli/tests/testdata/test/steps/failing_steps.out
+++ b/cli/tests/testdata/test/steps/failing_steps.out
@@ -4,6 +4,8 @@ nested failure ...
step 1 ...
inner 1 ... FAILED ([WILDCARD])
Error: Failed.
+ throw new Error("Failed.");
+ ^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
[WILDCARD]
inner 2 ... ok ([WILDCARD])
@@ -12,15 +14,21 @@ FAILED ([WILDCARD])
multiple test step failures ...
step 1 ... FAILED ([WILDCARD])
Error: Fail.
+ throw new Error("Fail.");
+ ^
[WILDCARD]
step 2 ... FAILED ([WILDCARD])
Error: Fail.
+ await t.step("step 2", () => Promise.reject(new Error("Fail.")));
+ ^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
[WILDCARD]
FAILED ([WILDCARD])
failing step in failing test ...
step 1 ... FAILED ([WILDCARD])
Error: Fail.
+ throw new Error("Fail.");
+ ^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
at [WILDCARD]
FAILED ([WILDCARD])
@@ -39,6 +47,8 @@ Error: 2 test steps failed.
test/steps/failing_steps.ts > failing step in failing test
Error: Fail test.
+ throw new Error("Fail test.");
+ ^
at [WILDCARD]/failing_steps.ts:[WILDCARD]
at [WILDCARD]
diff --git a/cli/tests/testdata/test/steps/invalid_usage.out b/cli/tests/testdata/test/steps/invalid_usage.out
index f64f148b7..833c3a74f 100644
--- a/cli/tests/testdata/test/steps/invalid_usage.out
+++ b/cli/tests/testdata/test/steps/invalid_usage.out
@@ -14,6 +14,8 @@ inner missing await ...
at testStepSanitizer [WILDCARD]
FAILED ([WILDCARD])
Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`).
+ await t.step("step", (t) => {
+ ^
at postValidation [WILDCARD]
at testStepSanitizer [WILDCARD]
at async fn ([WILDCARD]/invalid_usage.ts:[WILDCARD])
@@ -24,6 +26,8 @@ parallel steps with sanitizers ...
step 2 ... FAILED ([WILDCARD])
Error: Cannot start test step while another test step with sanitizers is running.
* parallel steps with sanitizers > step 1
+ await t.step("step 2", () => {});
+ ^
at preValidation ([WILDCARD])
at testStepSanitizer ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
@@ -34,6 +38,8 @@ parallel steps when first has sanitizer ...
step 2 ... FAILED ([WILDCARD])
Error: Cannot start test step while another test step with sanitizers is running.
* parallel steps when first has sanitizer > step 1
+ await t.step({
+ ^
at preValidation ([WILDCARD])
at testStepSanitizer ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
@@ -44,6 +50,8 @@ parallel steps when second has sanitizer ...
step 2 ... FAILED ([WILDCARD])
Error: Cannot start test step with sanitizers while another test step is running.
* parallel steps when second has sanitizer > step 1
+ await t.step({
+ ^
at preValidation ([WILDCARD])
at testStepSanitizer ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
@@ -57,6 +65,8 @@ parallel steps where only inner tests have sanitizers ...
step inner ... FAILED ([WILDCARD])
Error: Cannot start test step with sanitizers while another test step is running.
* parallel steps where only inner tests have sanitizers > step 1
+ await t.step({
+ ^
at preValidation ([WILDCARD])
at testStepSanitizer ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
@@ -67,6 +77,8 @@ failures:
test/steps/invalid_usage.ts > capturing
Error: Cannot run test step after parent scope has finished execution. Ensure any `.step(...)` calls are executed before their parent scope completes execution.
+ await capturedContext.step("next step", () => {});
+ ^
at TestContext.step ([WILDCARD])
at [WILDCARD]/invalid_usage.ts:[WILDCARD]
at [WILDCARD]
diff --git a/cli/tools/test.rs b/cli/tools/test.rs
index 8cfad29ee..e3c6507fe 100644
--- a/cli/tools/test.rs
+++ b/cli/tools/test.rs
@@ -13,6 +13,7 @@ use crate::file_watcher::ResolutionResult;
use crate::flags::Flags;
use crate::flags::TestFlags;
use crate::flags::TypeCheckMode;
+use crate::fmt_errors::PrettyJsError;
use crate::fs_util::collect_specifiers;
use crate::fs_util::is_supported_test_ext;
use crate::fs_util::is_supported_test_path;
@@ -31,6 +32,7 @@ use deno_ast::swc::common::comments::CommentKind;
use deno_ast::MediaType;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
+use deno_core::error::JsError;
use deno_core::futures::future;
use deno_core::futures::stream;
use deno_core::futures::FutureExt;
@@ -92,7 +94,7 @@ pub enum TestOutput {
pub enum TestResult {
Ok,
Ignored,
- Failed(String),
+ Failed(Box<JsError>),
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
@@ -108,15 +110,15 @@ pub struct TestStepDescription {
pub enum TestStepResult {
Ok,
Ignored,
- Failed(Option<String>),
- Pending(Option<String>),
+ Failed(Option<Box<JsError>>),
+ Pending(Option<Box<JsError>>),
}
impl TestStepResult {
- fn error(&self) -> Option<&str> {
+ fn error(&self) -> Option<&JsError> {
match self {
- TestStepResult::Failed(Some(text)) => Some(text.as_str()),
- TestStepResult::Pending(Some(text)) => Some(text.as_str()),
+ TestStepResult::Failed(Some(error)) => Some(error),
+ TestStepResult::Pending(Some(error)) => Some(error),
_ => None,
}
}
@@ -154,7 +156,7 @@ pub struct TestSummary {
pub ignored_steps: usize,
pub filtered_out: usize,
pub measured: usize,
- pub failures: Vec<(TestDescription, String)>,
+ pub failures: Vec<(TestDescription, Box<JsError>)>,
}
#[derive(Debug, Clone, Deserialize)]
@@ -294,8 +296,12 @@ impl PrettyTestReporter {
colors::gray(format!("({})", display::human_elapsed(elapsed.into())))
);
- if let Some(error_text) = result.error() {
- for line in error_text.lines() {
+ if let Some(js_error) = result.error() {
+ let err_string = PrettyJsError::create(js_error.clone())
+ .to_string()
+ .trim_start_matches("Uncaught ")
+ .to_string();
+ for line in err_string.lines() {
println!("{}{}", " ".repeat(description.level + 1), line);
}
}
@@ -445,7 +451,7 @@ impl TestReporter for PrettyTestReporter {
fn report_summary(&mut self, summary: &TestSummary, elapsed: &Duration) {
if !summary.failures.is_empty() {
println!("\nfailures:\n");
- for (description, error) in &summary.failures {
+ for (description, js_error) in &summary.failures {
println!(
"{} {} {}",
colors::gray(
@@ -454,7 +460,11 @@ impl TestReporter for PrettyTestReporter {
colors::gray(">"),
description.name
);
- println!("{}", error);
+ let err_string = PrettyJsError::create(*js_error.clone())
+ .to_string()
+ .trim_start_matches("Uncaught ")
+ .to_string();
+ println!("{}", err_string);
println!();
}