diff options
author | Nayeem Rahman <nayeemrmn99@gmail.com> | 2022-05-09 10:44:50 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-05-09 11:44:50 +0200 |
commit | 23efc4fcab2ca3b8b47539a7fb1d904efc57eb7c (patch) | |
tree | 54dd76ef9d2071245c9ad28610f78c72937546eb /cli/tools | |
parent | ab728e9ccfff2d1ac6362b22b579b00120a39f67 (diff) |
feat(test): Represent uncaught errors (#14513)
This commit adds better reporting of uncaught errors
in top level scope of testing files. This change affects
both console runner as well as LSP runner.
Diffstat (limited to 'cli/tools')
-rw-r--r-- | cli/tools/test.rs | 151 |
1 files changed, 117 insertions, 34 deletions
diff --git a/cli/tools/test.rs b/cli/tools/test.rs index 38c6919cc..1eb36671e 100644 --- a/cli/tools/test.rs +++ b/cli/tools/test.rs @@ -51,6 +51,7 @@ use rand::seq::SliceRandom; use rand::SeedableRng; use regex::Regex; use serde::Deserialize; +use std::collections::BTreeMap; use std::collections::HashMap; use std::collections::HashSet; use std::io::Read; @@ -149,6 +150,7 @@ pub enum TestEvent { Wait(TestDescription), Output(Vec<u8>), Result(TestDescription, TestResult, u64), + UncaughtError(String, Box<JsError>), StepWait(TestStepDescription), StepResult(TestStepDescription, TestStepResult, u64), } @@ -166,6 +168,7 @@ pub struct TestSummary { pub filtered_out: usize, pub measured: usize, pub failures: Vec<(TestDescription, Box<JsError>)>, + pub uncaught_errors: Vec<(String, Box<JsError>)>, } #[derive(Debug, Clone, Deserialize)] @@ -192,6 +195,7 @@ impl TestSummary { filtered_out: 0, measured: 0, failures: Vec::new(), + uncaught_errors: Vec::new(), } } @@ -214,6 +218,7 @@ pub trait TestReporter { result: &TestResult, elapsed: u64, ); + fn report_uncaught_error(&mut self, origin: &str, error: &JsError); fn report_step_wait(&mut self, description: &TestStepDescription); fn report_step_result( &mut self, @@ -233,10 +238,11 @@ struct PrettyTestReporter { concurrent: bool, echo_output: bool, deferred_step_output: HashMap<TestDescription, Vec<DeferredStepOutput>>, - in_test_count: usize, + in_new_line: bool, last_wait_output_level: usize, cwd: Url, did_have_user_output: bool, + started_tests: bool, } impl PrettyTestReporter { @@ -244,16 +250,18 @@ impl PrettyTestReporter { PrettyTestReporter { concurrent, echo_output, - in_test_count: 0, + in_new_line: true, deferred_step_output: HashMap::new(), last_wait_output_level: 0, cwd: Url::from_directory_path(std::env::current_dir().unwrap()).unwrap(), did_have_user_output: false, + started_tests: false, } } fn force_report_wait(&mut self, description: &TestDescription) { print!("{} ...", description.name); + self.in_new_line = false; // flush for faster feedback when line buffered std::io::stdout().flush().unwrap(); self.last_wait_output_level = 0; @@ -278,6 +286,7 @@ impl PrettyTestReporter { println!(); } print!("{}{} ...", " ".repeat(description.level), description.name); + self.in_new_line = false; // flush for faster feedback when line buffered std::io::stdout().flush().unwrap(); self.last_wait_output_level = description.level; @@ -316,11 +325,13 @@ impl PrettyTestReporter { println!("{}{}", " ".repeat(description.level + 1), line); } } + self.in_new_line = true; } fn write_output_end(&mut self) -> bool { if self.did_have_user_output { println!("{}", colors::gray("----- output end -----")); + self.in_new_line = true; self.did_have_user_output = false; true } else { @@ -341,13 +352,14 @@ impl TestReporter for PrettyTestReporter { self.to_relative_path_or_remote_url(&plan.origin) )) ); + self.in_new_line = true; } fn report_wait(&mut self, description: &TestDescription) { if !self.concurrent { self.force_report_wait(description); } - self.in_test_count += 1; + self.started_tests = true; } fn report_output(&mut self, output: &[u8]) { @@ -355,10 +367,11 @@ impl TestReporter for PrettyTestReporter { return; } - if !self.did_have_user_output && self.in_test_count > 0 { + if !self.did_have_user_output && self.started_tests { self.did_have_user_output = true; println!(); println!("{}", colors::gray("------- output -------")); + self.in_new_line = true; } // output everything to stdout in order to prevent @@ -372,8 +385,6 @@ impl TestReporter for PrettyTestReporter { result: &TestResult, elapsed: u64, ) { - self.in_test_count -= 1; - if self.concurrent { self.force_report_wait(description); @@ -414,6 +425,21 @@ impl TestReporter for PrettyTestReporter { status, colors::gray(format!("({})", display::human_elapsed(elapsed.into()))) ); + self.in_new_line = true; + } + + fn report_uncaught_error(&mut self, origin: &str, _error: &JsError) { + if !self.in_new_line { + println!(); + } + println!( + "Uncaught error from {} {}", + self.to_relative_path_or_remote_url(origin), + colors::red("FAILED") + ); + self.in_new_line = true; + self.last_wait_output_level = 0; + self.did_have_user_output = false; } fn report_step_wait(&mut self, description: &TestStepDescription) { @@ -450,31 +476,65 @@ impl TestReporter for PrettyTestReporter { } fn report_summary(&mut self, summary: &TestSummary, elapsed: &Duration) { - if !summary.failures.is_empty() { + if !summary.failures.is_empty() || !summary.uncaught_errors.is_empty() { + #[allow(clippy::type_complexity)] // Type alias doesn't look better here + let mut failures_by_origin: BTreeMap< + String, + (Vec<(&TestDescription, &JsError)>, Option<&JsError>), + > = BTreeMap::default(); let mut failure_titles = vec![]; - println!("\n{}\n", colors::white_bold_on_red(" ERRORS ")); for (description, js_error) in &summary.failures { - let failure_title = format!( - "{} {}", - &description.name, - colors::gray(format!( - "=> {}:{}:{}", - self - .to_relative_path_or_remote_url(&description.location.file_name), - description.location.line_number, - description.location.column_number - )) - ); - println!("{}", &failure_title); - println!( - "{}: {}", - colors::red_bold("error"), - format_test_error(js_error) - ); - println!(); - failure_titles.push(failure_title); + let (failures, _) = failures_by_origin + .entry(description.origin.clone()) + .or_default(); + failures.push((description, js_error.as_ref())); + } + for (origin, js_error) in &summary.uncaught_errors { + let (_, uncaught_error) = + failures_by_origin.entry(origin.clone()).or_default(); + let _ = uncaught_error.insert(js_error.as_ref()); + } + println!("\n{}\n", colors::white_bold_on_red(" ERRORS ")); + for (origin, (failures, uncaught_error)) in failures_by_origin { + for (description, js_error) in failures { + let failure_title = format!( + "{} {}", + &description.name, + colors::gray(format!( + "=> {}:{}:{}", + self.to_relative_path_or_remote_url( + &description.location.file_name + ), + description.location.line_number, + description.location.column_number + )) + ); + println!("{}", &failure_title); + println!( + "{}: {}", + colors::red_bold("error"), + format_test_error(js_error) + ); + println!(); + failure_titles.push(failure_title); + } + if let Some(js_error) = uncaught_error { + let failure_title = format!( + "{} (uncaught error)", + self.to_relative_path_or_remote_url(&origin) + ); + println!("{}", &failure_title); + println!( + "{}: {}", + colors::red_bold("error"), + format_test_error(js_error) + ); + println!("This error was not caught from a test and caused the test runner to fail on the referenced module."); + println!("It most likely originated from a dangling promise, event/timeout handler or top-level code."); + println!(); + failure_titles.push(failure_title); + } } - println!("{}\n", colors::white_bold_on_red(" FAILURES ")); for failure_title in failure_titles { println!("{}", failure_title); @@ -510,6 +570,7 @@ impl TestReporter for PrettyTestReporter { colors::gray( format!("({})", display::human_elapsed(elapsed.as_millis()))), ); + self.in_new_line = true; } } @@ -586,7 +647,7 @@ async fn test_specifier( permissions: Permissions, specifier: ModuleSpecifier, mode: TestMode, - sender: TestEventSender, + sender: &TestEventSender, options: TestSpecifierOptions, ) -> Result<(), AnyError> { let mut worker = create_main_worker( @@ -959,14 +1020,30 @@ async fn test_specifiers( let permissions = permissions.clone(); let specifier = specifier.clone(); let mode = mode.clone(); - let sender = sender.clone(); + let mut sender = sender.clone(); let options = options.clone(); tokio::task::spawn_blocking(move || { - let future = - test_specifier(ps, permissions, specifier, mode, sender, options); - - run_basic(future) + let origin = specifier.to_string(); + let file_result = run_basic(test_specifier( + ps, + permissions, + specifier, + mode, + &sender, + options, + )); + if let Err(error) = file_result { + if error.is::<JsError>() { + sender.send(TestEvent::UncaughtError( + origin, + Box::new(error.downcast::<JsError>().unwrap()), + ))?; + } else { + return Err(error); + } + } + Ok(()) }) }); @@ -1021,6 +1098,12 @@ async fn test_specifiers( reporter.report_result(&description, &result, elapsed); } + TestEvent::UncaughtError(origin, error) => { + reporter.report_uncaught_error(&origin, &error); + summary.failed += 1; + summary.uncaught_errors.push((origin, error)); + } + TestEvent::StepWait(description) => { reporter.report_step_wait(&description); } |