summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cli/args/flags.rs38
-rw-r--r--cli/args/mod.rs2
-rw-r--r--cli/lsp/testing/execution.rs13
-rw-r--r--cli/tools/bench/reporters.rs8
-rw-r--r--cli/tools/jupyter/mod.rs12
-rw-r--r--cli/tools/repl/session.rs2
-rw-r--r--cli/tools/test/fmt.rs10
-rw-r--r--cli/tools/test/mod.rs82
-rw-r--r--cli/tools/test/reporters/common.rs12
-rw-r--r--cli/tools/test/reporters/dot.rs8
-rw-r--r--cli/tools/test/reporters/junit.rs25
-rw-r--r--cli/tools/test/reporters/pretty.rs11
-rw-r--r--cli/tools/test/reporters/tap.rs15
-rw-r--r--tests/registry/jsr/@std/assert/1.0.0_meta.json2
-rw-r--r--tests/specs/remove/basic/add_lock.out4
-rw-r--r--tests/specs/test/hide_stacktraces/__test__.jsonc24
-rw-r--r--tests/specs/test/hide_stacktraces/dot.out23
-rw-r--r--tests/specs/test/hide_stacktraces/junit.out27
-rw-r--r--tests/specs/test/hide_stacktraces/main.js8
-rw-r--r--tests/specs/test/hide_stacktraces/pretty.out24
-rw-r--r--tests/specs/test/hide_stacktraces/tap.out8
21 files changed, 300 insertions, 58 deletions
diff --git a/cli/args/flags.rs b/cli/args/flags.rs
index c3edd8d3f..0ceac4563 100644
--- a/cli/args/flags.rs
+++ b/cli/args/flags.rs
@@ -403,6 +403,7 @@ pub struct TestFlags {
pub watch: Option<WatchFlagsWithPaths>,
pub reporter: TestReporterConfig,
pub junit_path: Option<String>,
+ pub hide_stacktraces: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
@@ -2999,6 +3000,12 @@ Directory arguments are expanded to all contained files matching the glob
.value_parser(["pretty", "dot", "junit", "tap"])
.help_heading(TEST_HEADING)
)
+ .arg(
+ Arg::new("hide-stacktraces")
+ .long("hide-stacktraces")
+ .help("Hide stack traces for errors in failure test results.")
+ .action(ArgAction::SetTrue)
+ )
.arg(env_file_arg())
)
}
@@ -4920,6 +4927,8 @@ fn test_parse(flags: &mut Flags, matches: &mut ArgMatches) {
flags.log_level = Some(Level::Error);
}
+ let hide_stacktraces = matches.get_flag("hide-stacktraces");
+
flags.subcommand = DenoSubcommand::Test(TestFlags {
no_run,
doc,
@@ -4935,6 +4944,7 @@ fn test_parse(flags: &mut Flags, matches: &mut ArgMatches) {
watch: watch_arg_parse_with_paths(matches),
reporter,
junit_path,
+ hide_stacktraces,
});
}
@@ -9015,6 +9025,7 @@ mod tests {
watch: Default::default(),
reporter: Default::default(),
junit_path: None,
+ hide_stacktraces: false,
}),
unstable_config: UnstableConfig {
legacy_flag_enabled: true,
@@ -9102,6 +9113,7 @@ mod tests {
clean: false,
watch: Default::default(),
junit_path: None,
+ hide_stacktraces: false,
}),
type_check_mode: TypeCheckMode::Local,
permissions: PermissionFlags {
@@ -9140,6 +9152,7 @@ mod tests {
watch: Default::default(),
reporter: Default::default(),
junit_path: None,
+ hide_stacktraces: false,
}),
type_check_mode: TypeCheckMode::Local,
permissions: PermissionFlags {
@@ -9182,6 +9195,7 @@ mod tests {
watch: Default::default(),
reporter: Default::default(),
junit_path: None,
+ hide_stacktraces: false,
}),
permissions: PermissionFlags {
no_prompt: true,
@@ -9318,6 +9332,7 @@ mod tests {
watch: Default::default(),
reporter: Default::default(),
junit_path: None,
+ hide_stacktraces: false,
}),
permissions: PermissionFlags {
no_prompt: true,
@@ -9353,6 +9368,7 @@ mod tests {
watch: Some(Default::default()),
reporter: Default::default(),
junit_path: None,
+ hide_stacktraces: false,
}),
permissions: PermissionFlags {
no_prompt: true,
@@ -9387,6 +9403,7 @@ mod tests {
watch: Some(Default::default()),
reporter: Default::default(),
junit_path: None,
+ hide_stacktraces: false,
}),
permissions: PermissionFlags {
no_prompt: true,
@@ -9428,6 +9445,7 @@ mod tests {
}),
reporter: Default::default(),
junit_path: None,
+ hide_stacktraces: false,
}),
type_check_mode: TypeCheckMode::Local,
permissions: PermissionFlags {
@@ -9625,6 +9643,26 @@ mod tests {
}
#[test]
+ fn test_hide_stacktraces() {
+ let r = flags_from_vec(svec!["deno", "test", "--hide-stacktraces"]);
+ assert_eq!(
+ r.unwrap(),
+ Flags {
+ subcommand: DenoSubcommand::Test(TestFlags {
+ hide_stacktraces: true,
+ ..TestFlags::default()
+ }),
+ type_check_mode: TypeCheckMode::Local,
+ permissions: PermissionFlags {
+ no_prompt: true,
+ ..Default::default()
+ },
+ ..Flags::default()
+ }
+ );
+ }
+
+ #[test]
fn bundle_with_cafile() {
let r = flags_from_vec(svec![
"deno",
diff --git a/cli/args/mod.rs b/cli/args/mod.rs
index 403e4ffdf..68cf916b2 100644
--- a/cli/args/mod.rs
+++ b/cli/args/mod.rs
@@ -377,6 +377,7 @@ pub struct WorkspaceTestOptions {
pub trace_leaks: bool,
pub reporter: TestReporterConfig,
pub junit_path: Option<String>,
+ pub hide_stacktraces: bool,
}
impl WorkspaceTestOptions {
@@ -394,6 +395,7 @@ impl WorkspaceTestOptions {
trace_leaks: test_flags.trace_leaks,
reporter: test_flags.reporter,
junit_path: test_flags.junit_path.clone(),
+ hide_stacktraces: test_flags.hide_stacktraces,
}
}
}
diff --git a/cli/lsp/testing/execution.rs b/cli/lsp/testing/execution.rs
index 96f22a9b0..14196baa3 100644
--- a/cli/lsp/testing/execution.rs
+++ b/cli/lsp/testing/execution.rs
@@ -15,6 +15,7 @@ use crate::lsp::logging::lsp_log;
use crate::tools::test;
use crate::tools::test::create_test_event_channel;
use crate::tools::test::FailFastTracker;
+use crate::tools::test::TestFailureFormatOptions;
use deno_core::anyhow::anyhow;
use deno_core::error::AnyError;
@@ -655,7 +656,10 @@ impl LspTestReporter {
let desc = self.tests.get(&desc.id).unwrap();
self.progress(lsp_custom::TestRunProgressMessage::Failed {
test: desc.as_test_identifier(&self.tests),
- messages: as_test_messages(failure.to_string(), false),
+ messages: as_test_messages(
+ failure.format(&TestFailureFormatOptions::default()),
+ false,
+ ),
duration: Some(elapsed as u32),
})
}
@@ -675,7 +679,7 @@ impl LspTestReporter {
let err_string = format!(
"Uncaught error from {}: {}\nThis error was not caught from a test and caused the test runner to fail on the referenced module.\nIt most likely originated from a dangling promise, event/timeout handler or top-level code.",
origin,
- test::fmt::format_test_error(js_error)
+ test::fmt::format_test_error(js_error, &TestFailureFormatOptions::default())
);
let messages = as_test_messages(err_string, false);
for desc in self.tests.values().filter(|d| d.origin() == origin) {
@@ -751,7 +755,10 @@ impl LspTestReporter {
test::TestStepResult::Failed(failure) => {
self.progress(lsp_custom::TestRunProgressMessage::Failed {
test: desc.as_test_identifier(&self.tests),
- messages: as_test_messages(failure.to_string(), false),
+ messages: as_test_messages(
+ failure.format(&TestFailureFormatOptions::default()),
+ false,
+ ),
duration: Some(elapsed as u32),
})
}
diff --git a/cli/tools/bench/reporters.rs b/cli/tools/bench/reporters.rs
index 9a1da04ec..3f244ed62 100644
--- a/cli/tools/bench/reporters.rs
+++ b/cli/tools/bench/reporters.rs
@@ -2,6 +2,7 @@
use serde::Serialize;
+use crate::tools::test::TestFailureFormatOptions;
use crate::version;
use super::*;
@@ -243,7 +244,10 @@ impl BenchReporter for ConsoleReporter {
&desc.name,
&mitata::reporter::Error {
stack: None,
- message: format_test_error(js_error),
+ message: format_test_error(
+ js_error,
+ &TestFailureFormatOptions::default()
+ ),
},
options
)
@@ -298,7 +302,7 @@ impl BenchReporter for ConsoleReporter {
println!(
"{}: {}",
colors::red_bold("error"),
- format_test_error(&error)
+ format_test_error(&error, &TestFailureFormatOptions::default())
);
println!("This error was not caught from a benchmark and caused the bench runner to fail on the referenced module.");
println!("It most likely originated from a dangling promise, event/timeout handler or top-level code.");
diff --git a/cli/tools/jupyter/mod.rs b/cli/tools/jupyter/mod.rs
index 7e88f92c2..14fcbd72c 100644
--- a/cli/tools/jupyter/mod.rs
+++ b/cli/tools/jupyter/mod.rs
@@ -11,6 +11,7 @@ use crate::tools::repl;
use crate::tools::test::create_single_test_event_channel;
use crate::tools::test::reporters::PrettyTestReporter;
use crate::tools::test::TestEventWorkerSender;
+use crate::tools::test::TestFailureFormatOptions;
use crate::CliFactory;
use deno_core::anyhow::bail;
use deno_core::anyhow::Context;
@@ -142,8 +143,15 @@ pub async fn kernel(
})?;
repl_session.set_test_reporter_factory(Box::new(move || {
Box::new(
- PrettyTestReporter::new(false, true, false, true, cwd_url.clone())
- .with_writer(Box::new(TestWriter(stdio_tx.clone()))),
+ PrettyTestReporter::new(
+ false,
+ true,
+ false,
+ true,
+ cwd_url.clone(),
+ TestFailureFormatOptions::default(),
+ )
+ .with_writer(Box::new(TestWriter(stdio_tx.clone()))),
)
}));
diff --git a/cli/tools/repl/session.rs b/cli/tools/repl/session.rs
index b379c3646..484664dae 100644
--- a/cli/tools/repl/session.rs
+++ b/cli/tools/repl/session.rs
@@ -16,6 +16,7 @@ use crate::tools::test::send_test_event;
use crate::tools::test::worker_has_tests;
use crate::tools::test::TestEvent;
use crate::tools::test::TestEventReceiver;
+use crate::tools::test::TestFailureFormatOptions;
use deno_ast::diagnostics::Diagnostic;
use deno_ast::swc::ast as swc_ast;
@@ -276,6 +277,7 @@ impl ReplSession {
false,
true,
cwd_url.clone(),
+ TestFailureFormatOptions::default(),
))
}),
main_module,
diff --git a/cli/tools/test/fmt.rs b/cli/tools/test/fmt.rs
index d66c72239..174155072 100644
--- a/cli/tools/test/fmt.rs
+++ b/cli/tools/test/fmt.rs
@@ -72,16 +72,24 @@ fn abbreviate_test_error(js_error: &JsError) -> JsError {
// This function prettifies `JsError` and applies some changes specifically for
// test runner purposes:
//
+// - hide stack traces if `options.hide_stacktraces` is set to `true`
+//
// - filter out stack frames:
// - if stack trace consists of mixed user and internal code, the frames
// below the first user code frame are filtered out
// - if stack trace consists only of internal code it is preserved as is
-pub fn format_test_error(js_error: &JsError) -> String {
+pub fn format_test_error(
+ js_error: &JsError,
+ options: &TestFailureFormatOptions,
+) -> String {
let mut js_error = abbreviate_test_error(js_error);
js_error.exception_message = js_error
.exception_message
.trim_start_matches("Uncaught ")
.to_string();
+ if options.hide_stacktraces {
+ return js_error.exception_message;
+ }
format_js_error(&js_error)
}
diff --git a/cli/tools/test/mod.rs b/cli/tools/test/mod.rs
index 4fbd0423e..0dc213350 100644
--- a/cli/tools/test/mod.rs
+++ b/cli/tools/test/mod.rs
@@ -288,6 +288,11 @@ impl From<&TestDescription> for TestFailureDescription {
}
}
+#[derive(Debug, Default, Clone, PartialEq)]
+pub struct TestFailureFormatOptions {
+ pub hide_stacktraces: bool,
+}
+
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")]
@@ -302,52 +307,55 @@ pub enum TestFailure {
HasSanitizersAndOverlaps(IndexSet<String>), // Long names of overlapped tests
}
-impl std::fmt::Display for TestFailure {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+impl TestFailure {
+ pub fn format(
+ &self,
+ options: &TestFailureFormatOptions,
+ ) -> Cow<'static, str> {
match self {
TestFailure::JsError(js_error) => {
- write!(f, "{}", format_test_error(js_error))
+ Cow::Owned(format_test_error(js_error, options))
+ }
+ TestFailure::FailedSteps(1) => Cow::Borrowed("1 test step failed."),
+ TestFailure::FailedSteps(n) => {
+ Cow::Owned(format!("{} test steps failed.", n))
}
- TestFailure::FailedSteps(1) => write!(f, "1 test step failed."),
- TestFailure::FailedSteps(n) => write!(f, "{n} test steps failed."),
TestFailure::IncompleteSteps => {
- write!(f, "Completed while steps were still running. Ensure all steps are awaited with `await t.step(...)`.")
+ Cow::Borrowed("Completed while steps were still running. Ensure all steps are awaited with `await t.step(...)`.")
}
TestFailure::Incomplete => {
- write!(
- f,
- "Didn't complete before parent. Await step with `await t.step(...)`."
- )
+ Cow::Borrowed("Didn't complete before parent. Await step with `await t.step(...)`.")
}
TestFailure::Leaked(details, trailer_notes) => {
- write!(f, "Leaks detected:")?;
+ let mut f = String::new();
+ write!(f, "Leaks detected:").unwrap();
for detail in details {
- write!(f, "\n - {}", detail)?;
+ write!(f, "\n - {}", detail).unwrap();
}
for trailer in trailer_notes {
- write!(f, "\n{}", trailer)?;
+ write!(f, "\n{}", trailer).unwrap();
}
- Ok(())
+ Cow::Owned(f)
}
TestFailure::OverlapsWithSanitizers(long_names) => {
- write!(f, "Started test step while another test step with sanitizers was running:")?;
+ let mut f = String::new();
+ write!(f, "Started test step while another test step with sanitizers was running:").unwrap();
for long_name in long_names {
- write!(f, "\n * {}", long_name)?;
+ write!(f, "\n * {}", long_name).unwrap();
}
- Ok(())
+ Cow::Owned(f)
}
TestFailure::HasSanitizersAndOverlaps(long_names) => {
- write!(f, "Started test step with sanitizers while another test step was running:")?;
+ let mut f = String::new();
+ write!(f, "Started test step with sanitizers while another test step was running:").unwrap();
for long_name in long_names {
- write!(f, "\n * {}", long_name)?;
+ write!(f, "\n * {}", long_name).unwrap();
}
- Ok(())
+ Cow::Owned(f)
}
}
}
-}
-impl TestFailure {
pub fn overview(&self) -> String {
match self {
TestFailure::JsError(js_error) => js_error.exception_message.clone(),
@@ -369,10 +377,6 @@ impl TestFailure {
}
}
- pub fn detail(&self) -> String {
- self.to_string()
- }
-
fn format_label(&self) -> String {
match self {
TestFailure::Incomplete => colors::gray("INCOMPLETE").to_string(),
@@ -512,6 +516,7 @@ struct TestSpecifiersOptions {
specifier: TestSpecifierOptions,
reporter: TestReporterConfig,
junit_path: Option<String>,
+ hide_stacktraces: bool,
}
#[derive(Debug, Default, Clone)]
@@ -545,23 +550,31 @@ impl TestSummary {
fn get_test_reporter(options: &TestSpecifiersOptions) -> Box<dyn TestReporter> {
let parallel = options.concurrent_jobs.get() > 1;
+ let failure_format_options = TestFailureFormatOptions {
+ hide_stacktraces: options.hide_stacktraces,
+ };
let reporter: Box<dyn TestReporter> = match &options.reporter {
- TestReporterConfig::Dot => {
- Box::new(DotTestReporter::new(options.cwd.clone()))
- }
+ TestReporterConfig::Dot => Box::new(DotTestReporter::new(
+ options.cwd.clone(),
+ failure_format_options,
+ )),
TestReporterConfig::Pretty => Box::new(PrettyTestReporter::new(
parallel,
options.log_level != Some(Level::Error),
options.filter,
false,
options.cwd.clone(),
+ failure_format_options,
+ )),
+ TestReporterConfig::Junit => Box::new(JunitTestReporter::new(
+ options.cwd.clone(),
+ "-".to_string(),
+ failure_format_options,
)),
- TestReporterConfig::Junit => {
- Box::new(JunitTestReporter::new(options.cwd.clone(), "-".to_string()))
- }
TestReporterConfig::Tap => Box::new(TapTestReporter::new(
options.cwd.clone(),
options.concurrent_jobs > NonZeroUsize::new(1).unwrap(),
+ failure_format_options,
)),
};
@@ -569,6 +582,9 @@ fn get_test_reporter(options: &TestSpecifiersOptions) -> Box<dyn TestReporter> {
let junit = Box::new(JunitTestReporter::new(
options.cwd.clone(),
junit_path.to_string(),
+ TestFailureFormatOptions {
+ hide_stacktraces: options.hide_stacktraces,
+ },
));
return Box::new(CompoundTestReporter::new(vec![reporter, junit]));
}
@@ -1807,6 +1823,7 @@ pub async fn run_tests(
filter: workspace_test_options.filter.is_some(),
reporter: workspace_test_options.reporter,
junit_path: workspace_test_options.junit_path,
+ hide_stacktraces: workspace_test_options.hide_stacktraces,
specifier: TestSpecifierOptions {
filter: TestFilter::from_flag(&workspace_test_options.filter),
shuffle: workspace_test_options.shuffle,
@@ -1973,6 +1990,7 @@ pub async fn run_tests_with_watch(
filter: workspace_test_options.filter.is_some(),
reporter: workspace_test_options.reporter,
junit_path: workspace_test_options.junit_path,
+ hide_stacktraces: workspace_test_options.hide_stacktraces,
specifier: TestSpecifierOptions {
filter: TestFilter::from_flag(&workspace_test_options.filter),
shuffle: workspace_test_options.shuffle,
diff --git a/cli/tools/test/reporters/common.rs b/cli/tools/test/reporters/common.rs
index e4d8d4247..7ca83db80 100644
--- a/cli/tools/test/reporters/common.rs
+++ b/cli/tools/test/reporters/common.rs
@@ -105,6 +105,7 @@ pub(super) fn report_summary(
cwd: &Url,
summary: &TestSummary,
elapsed: &Duration,
+ options: &TestFailureFormatOptions,
) {
if !summary.failures.is_empty() || !summary.uncaught_errors.is_empty() {
#[allow(clippy::type_complexity)] // Type alias doesn't look better here
@@ -136,8 +137,13 @@ pub(super) fn report_summary(
if !failure.hide_in_summary() {
let failure_title = format_test_for_summary(cwd, description);
writeln!(writer, "{}", &failure_title).unwrap();
- writeln!(writer, "{}: {}", colors::red_bold("error"), failure)
- .unwrap();
+ writeln!(
+ writer,
+ "{}: {}",
+ colors::red_bold("error"),
+ failure.format(options)
+ )
+ .unwrap();
writeln!(writer).unwrap();
failure_titles.push(failure_title);
}
@@ -152,7 +158,7 @@ pub(super) fn report_summary(
writer,
"{}: {}",
colors::red_bold("error"),
- format_test_error(js_error)
+ format_test_error(js_error, options)
)
.unwrap();
writeln!(writer, "This error was not caught from a test and caused the test runner to fail on the referenced module.").unwrap();
diff --git a/cli/tools/test/reporters/dot.rs b/cli/tools/test/reporters/dot.rs
index 8fbd59232..169c4b0e7 100644
--- a/cli/tools/test/reporters/dot.rs
+++ b/cli/tools/test/reporters/dot.rs
@@ -9,11 +9,15 @@ pub struct DotTestReporter {
width: usize,
cwd: Url,
summary: TestSummary,
+ failure_format_options: TestFailureFormatOptions,
}
#[allow(clippy::print_stdout)]
impl DotTestReporter {
- pub fn new(cwd: Url) -> DotTestReporter {
+ pub fn new(
+ cwd: Url,
+ failure_format_options: TestFailureFormatOptions,
+ ) -> DotTestReporter {
let console_width = if let Some(size) = crate::util::console::console_size()
{
size.cols as usize
@@ -26,6 +30,7 @@ impl DotTestReporter {
width: console_width,
cwd,
summary: TestSummary::new(),
+ failure_format_options,
}
}
@@ -190,6 +195,7 @@ impl TestReporter for DotTestReporter {
&self.cwd,
&self.summary,
elapsed,
+ &self.failure_format_options,
);
println!();
}
diff --git a/cli/tools/test/reporters/junit.rs b/cli/tools/test/reporters/junit.rs
index 4b69218df..3998bee40 100644
--- a/cli/tools/test/reporters/junit.rs
+++ b/cli/tools/test/reporters/junit.rs
@@ -15,19 +15,28 @@ pub struct JunitTestReporter {
// from child to parent to build the full test name that reflects the test
// hierarchy.
test_name_tree: TestNameTree,
+ failure_format_options: TestFailureFormatOptions,
}
impl JunitTestReporter {
- pub fn new(cwd: Url, output_path: String) -> Self {
+ pub fn new(
+ cwd: Url,
+ output_path: String,
+ failure_format_options: TestFailureFormatOptions,
+ ) -> Self {
Self {
cwd,
output_path,
cases: IndexMap::new(),
test_name_tree: TestNameTree::new(),
+ failure_format_options,
}
}
- fn convert_status(status: &TestResult) -> quick_junit::TestCaseStatus {
+ fn convert_status(
+ status: &TestResult,
+ failure_format_options: &TestFailureFormatOptions,
+ ) -> quick_junit::TestCaseStatus {
match status {
TestResult::Ok => quick_junit::TestCaseStatus::success(),
TestResult::Ignored => quick_junit::TestCaseStatus::skipped(),
@@ -35,7 +44,7 @@ impl JunitTestReporter {
kind: quick_junit::NonSuccessKind::Failure,
message: Some(failure.overview()),
ty: None,
- description: Some(failure.detail()),
+ description: Some(failure.format(failure_format_options).into_owned()),
reruns: vec![],
},
TestResult::Cancelled => quick_junit::TestCaseStatus::NonSuccess {
@@ -50,6 +59,7 @@ impl JunitTestReporter {
fn convert_step_status(
status: &TestStepResult,
+ failure_format_options: &TestFailureFormatOptions,
) -> quick_junit::TestCaseStatus {
match status {
TestStepResult::Ok => quick_junit::TestCaseStatus::success(),
@@ -59,7 +69,9 @@ impl JunitTestReporter {
kind: quick_junit::NonSuccessKind::Failure,
message: Some(failure.overview()),
ty: None,
- description: Some(failure.detail()),
+ description: Some(
+ failure.format(failure_format_options).into_owned(),
+ ),
reruns: vec![],
}
}
@@ -111,7 +123,7 @@ impl TestReporter for JunitTestReporter {
elapsed: u64,
) {
if let Some(case) = self.cases.get_mut(&description.id) {
- case.status = Self::convert_status(result);
+ case.status = Self::convert_status(result, &self.failure_format_options);
case.set_time(Duration::from_millis(elapsed));
}
}
@@ -153,7 +165,8 @@ impl TestReporter for JunitTestReporter {
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
if let Some(case) = self.cases.get_mut(&description.id) {
- case.status = Self::convert_step_status(result);
+ case.status =
+ Self::convert_step_status(result, &self.failure_format_options);
case.set_time(Duration::from_millis(elapsed));
}
}
diff --git a/cli/tools/test/reporters/pretty.rs b/cli/tools/test/reporters/pretty.rs
index cb9f2c435..4120bbfb5 100644
--- a/cli/tools/test/reporters/pretty.rs
+++ b/cli/tools/test/reporters/pretty.rs
@@ -20,6 +20,7 @@ pub struct PrettyTestReporter {
HashMap<usize, IndexMap<usize, (TestStepDescription, TestStepResult, u64)>>,
summary: TestSummary,
writer: Box<dyn std::io::Write>,
+ failure_format_options: TestFailureFormatOptions,
}
impl PrettyTestReporter {
@@ -29,6 +30,7 @@ impl PrettyTestReporter {
filter: bool,
repl: bool,
cwd: Url,
+ failure_format_options: TestFailureFormatOptions,
) -> PrettyTestReporter {
PrettyTestReporter {
parallel,
@@ -45,6 +47,7 @@ impl PrettyTestReporter {
child_results_buffer: Default::default(),
summary: TestSummary::new(),
writer: Box::new(std::io::stdout()),
+ failure_format_options,
}
}
@@ -395,7 +398,13 @@ impl TestReporter for PrettyTestReporter {
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
self.write_output_end();
- common::report_summary(&mut self.writer, &self.cwd, &self.summary, elapsed);
+ common::report_summary(
+ &mut self.writer,
+ &self.cwd,
+ &self.summary,
+ elapsed,
+ &self.failure_format_options,
+ );
if !self.repl {
writeln!(&mut self.writer).unwrap();
}
diff --git a/cli/tools/test/reporters/tap.rs b/cli/tools/test/reporters/tap.rs
index 62cb58a83..ea68ddd43 100644
--- a/cli/tools/test/reporters/tap.rs
+++ b/cli/tools/test/reporters/tap.rs
@@ -20,11 +20,16 @@ pub struct TapTestReporter {
n: usize,
step_n: usize,
step_results: HashMap<usize, Vec<(TestStepDescription, TestStepResult)>>,
+ failure_format_options: TestFailureFormatOptions,
}
#[allow(clippy::print_stdout)]
impl TapTestReporter {
- pub fn new(cwd: Url, is_concurrent: bool) -> TapTestReporter {
+ pub fn new(
+ cwd: Url,
+ is_concurrent: bool,
+ failure_format_options: TestFailureFormatOptions,
+ ) -> TapTestReporter {
TapTestReporter {
cwd,
is_concurrent,
@@ -33,6 +38,7 @@ impl TapTestReporter {
n: 0,
step_n: 0,
step_results: HashMap::new(),
+ failure_format_options,
}
}
@@ -45,6 +51,7 @@ impl TapTestReporter {
}
fn print_diagnostic(
+ &self,
indent: usize,
failure: &TestFailure,
location: DiagnosticLocation,
@@ -56,7 +63,7 @@ impl TapTestReporter {
// YAML is a superset of JSON, so we can avoid a YAML dependency here.
// This makes the output less readable though.
let diagnostic = serde_json::to_string(&json!({
- "message": failure.to_string(),
+ "message": failure.format(&self.failure_format_options),
"severity": "fail".to_string(),
"at": location,
}))
@@ -102,7 +109,7 @@ impl TapTestReporter {
Self::print_line(4, status, self.step_n, &desc.name, directive);
if let TestStepResult::Failed(failure) = result {
- Self::print_diagnostic(
+ self.print_diagnostic(
4,
failure,
DiagnosticLocation {
@@ -171,7 +178,7 @@ impl TestReporter for TapTestReporter {
Self::print_line(0, status, self.n, &description.name, directive);
if let TestResult::Failed(failure) = result {
- Self::print_diagnostic(
+ self.print_diagnostic(
0,
failure,
DiagnosticLocation {
diff --git a/tests/registry/jsr/@std/assert/1.0.0_meta.json b/tests/registry/jsr/@std/assert/1.0.0_meta.json
index 3ca2db93a..5bac06209 100644
--- a/tests/registry/jsr/@std/assert/1.0.0_meta.json
+++ b/tests/registry/jsr/@std/assert/1.0.0_meta.json
@@ -2,7 +2,7 @@
"exports": {
".": "./mod.ts",
"./assert": "./assert.ts",
- "./assert-equals": "./assert-equals.ts",
+ "./assert-equals": "./assert_equals.ts",
"./fail": "./fail.ts"
}
}
diff --git a/tests/specs/remove/basic/add_lock.out b/tests/specs/remove/basic/add_lock.out
index a5a45e854..cda0fc34a 100644
--- a/tests/specs/remove/basic/add_lock.out
+++ b/tests/specs/remove/basic/add_lock.out
@@ -7,10 +7,10 @@
},
"jsr": {
"@std/assert@1.0.0": {
- "integrity": "7ae268c58de9693b4997fd93d9b303a47df336664e2008378ccb93c3458d092a"
+ "integrity": "[WILDLINE]"
},
"@std/http@1.0.0": {
- "integrity": "d75bd303c21123a9b58f7249e38b4c0aa3a09f7d76b13f9d7e7842d89052091a"
+ "integrity": "[WILDLINE]"
}
}
},
diff --git a/tests/specs/test/hide_stacktraces/__test__.jsonc b/tests/specs/test/hide_stacktraces/__test__.jsonc
new file mode 100644
index 000000000..963873df3
--- /dev/null
+++ b/tests/specs/test/hide_stacktraces/__test__.jsonc
@@ -0,0 +1,24 @@
+{
+ "tests": {
+ "reporter_dot": {
+ "args": "test --hide-stacktraces --reporter=dot main.js",
+ "output": "dot.out",
+ "exitCode": 1
+ },
+ "reporter_junit": {
+ "args": "test --hide-stacktraces --reporter=junit main.js",
+ "output": "junit.out",
+ "exitCode": 1
+ },
+ "reporter_pretty": {
+ "args": "test --hide-stacktraces main.js",
+ "output": "pretty.out",
+ "exitCode": 1
+ },
+ "reporter_tap": {
+ "args": "test --hide-stacktraces --reporter=tap main.js",
+ "output": "tap.out",
+ "exitCode": 1
+ }
+ }
+}
diff --git a/tests/specs/test/hide_stacktraces/dot.out b/tests/specs/test/hide_stacktraces/dot.out
new file mode 100644
index 000000000..abda749f1
--- /dev/null
+++ b/tests/specs/test/hide_stacktraces/dot.out
@@ -0,0 +1,23 @@
+!
+
+ ERRORS
+
+assert a b => ./main.js:1:6
+error: AssertionError: Values are not equal.
+
+
+ [Diff] Actual / Expected
+
+
+- foo
++ bar
+
+
+
+ FAILURES
+
+assert a b => ./main.js:1:6
+
+FAILED | 0 passed | 1 failed ([WILDCARD])
+
+error: Test failed
diff --git a/tests/specs/test/hide_stacktraces/junit.out b/tests/specs/test/hide_stacktraces/junit.out
new file mode 100644
index 000000000..483c4764b
--- /dev/null
+++ b/tests/specs/test/hide_stacktraces/junit.out
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<testsuites name="deno test" tests="1" failures="1" errors="0" time="[WILDCARD]">
+ <testsuite name="./main.js" tests="1" disabled="0" errors="0" failures="1">
+ <testcase name="assert a b" classname="./main.js" time="[WILDCARD]" line="1" col="6">
+ <failure message="Uncaught AssertionError: Values are not equal.
+
+
+ [Diff] Actual / Expected
+
+
+- foo
++ bar
+
+">AssertionError: Values are not equal.
+
+
+ [Diff] Actual / Expected
+
+
+- foo
++ bar
+
+</failure>
+ </testcase>
+ </testsuite>
+</testsuites>
+error: Test failed
diff --git a/tests/specs/test/hide_stacktraces/main.js b/tests/specs/test/hide_stacktraces/main.js
new file mode 100644
index 000000000..5e9d186fb
--- /dev/null
+++ b/tests/specs/test/hide_stacktraces/main.js
@@ -0,0 +1,8 @@
+Deno.test("assert a b", () => {
+ class AssertionError extends Error {
+ name = "AssertionError";
+ }
+ throw new AssertionError(
+ "Values are not equal.\n\n\n [Diff] Actual / Expected\n\n\n- foo\n+ bar\n\n",
+ );
+});
diff --git a/tests/specs/test/hide_stacktraces/pretty.out b/tests/specs/test/hide_stacktraces/pretty.out
new file mode 100644
index 000000000..7e107e8e7
--- /dev/null
+++ b/tests/specs/test/hide_stacktraces/pretty.out
@@ -0,0 +1,24 @@
+running 1 test from ./main.js
+assert a b ... FAILED ([WILDCARD])
+
+ ERRORS
+
+assert a b => ./main.js:1:6
+error: AssertionError: Values are not equal.
+
+
+ [Diff] Actual / Expected
+
+
+- foo
++ bar
+
+
+
+ FAILURES
+
+assert a b => ./main.js:1:6
+
+FAILED | 0 passed | 1 failed ([WILDCARD])
+
+error: Test failed
diff --git a/tests/specs/test/hide_stacktraces/tap.out b/tests/specs/test/hide_stacktraces/tap.out
new file mode 100644
index 000000000..b91c86108
--- /dev/null
+++ b/tests/specs/test/hide_stacktraces/tap.out
@@ -0,0 +1,8 @@
+TAP version 14
+# ./main.js
+not ok 1 - assert a b
+ ---
+ {"message":"AssertionError: Values are not equal.\n\n\n [Diff] Actual / Expected\n\n\n- foo\n+ bar\n\n","severity":"fail","at":{"file":"./main.js","line":1}}
+ ...
+1..1
+error: Test failed