summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cli/main.rs6
-rw-r--r--cli/tests/test/exit_sanitizer.out1
-rw-r--r--cli/tests/test/fail.out1
-rw-r--r--cli/tests/test/fail_fast.out1
-rw-r--r--cli/tests/test/finally_timeout.out1
-rw-r--r--cli/tests/test/only.out3
-rw-r--r--cli/tools/test_runner.rs52
7 files changed, 38 insertions, 27 deletions
diff --git a/cli/main.rs b/cli/main.rs
index 16b2df0e7..f69772437 100644
--- a/cli/main.rs
+++ b/cli/main.rs
@@ -1234,7 +1234,7 @@ async fn test_command(
tools::test_runner::is_supported,
)?;
- let failed = test_runner::run_tests(
+ test_runner::run_tests(
program_state.clone(),
permissions,
lib,
@@ -1249,10 +1249,6 @@ async fn test_command(
concurrent_jobs,
)
.await?;
-
- if failed {
- std::process::exit(1);
- }
}
Ok(())
diff --git a/cli/tests/test/exit_sanitizer.out b/cli/tests/test/exit_sanitizer.out
index 0f9d0d45e..f331d7a70 100644
--- a/cli/tests/test/exit_sanitizer.out
+++ b/cli/tests/test/exit_sanitizer.out
@@ -32,3 +32,4 @@ failures:
test result: FAILED. 0 passed; 3 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
+error: Test failed
diff --git a/cli/tests/test/fail.out b/cli/tests/test/fail.out
index cf5b84268..66d471cdf 100644
--- a/cli/tests/test/fail.out
+++ b/cli/tests/test/fail.out
@@ -78,3 +78,4 @@ failures:
test result: FAILED. 0 passed; 10 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
+error: Test failed
diff --git a/cli/tests/test/fail_fast.out b/cli/tests/test/fail_fast.out
index bbeddaeba..c97f854fa 100644
--- a/cli/tests/test/fail_fast.out
+++ b/cli/tests/test/fail_fast.out
@@ -15,3 +15,4 @@ failures:
test result: FAILED. 0 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
+error: Test failed
diff --git a/cli/tests/test/finally_timeout.out b/cli/tests/test/finally_timeout.out
index 257a27db3..570e9108f 100644
--- a/cli/tests/test/finally_timeout.out
+++ b/cli/tests/test/finally_timeout.out
@@ -16,3 +16,4 @@ failures:
test result: FAILED. 1 passed; 1 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
+error: Test failed
diff --git a/cli/tests/test/only.out b/cli/tests/test/only.out
index 9195ab5aa..dc78cae80 100644
--- a/cli/tests/test/only.out
+++ b/cli/tests/test/only.out
@@ -4,5 +4,4 @@ test only ... ok ([WILDCARD])
test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out ([WILDCARD])
-FAILED because the "only" option was used
-
+error: Test failed because the "only" option was used
diff --git a/cli/tools/test_runner.rs b/cli/tools/test_runner.rs
index 39d6bd62b..b185e3361 100644
--- a/cli/tools/test_runner.rs
+++ b/cli/tools/test_runner.rs
@@ -11,6 +11,7 @@ use crate::module_graph;
use crate::program_state::ProgramState;
use crate::tokio_util;
use crate::tools::coverage::CoverageCollector;
+use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::futures::future;
use deno_core::futures::stream;
@@ -320,7 +321,6 @@ pub async fn run_test_file(
/// Runs tests.
///
-/// Returns a boolean indicating whether the tests failed.
#[allow(clippy::too_many_arguments)]
pub async fn run_tests(
program_state: Arc<ProgramState>,
@@ -335,7 +335,11 @@ pub async fn run_tests(
filter: Option<String>,
shuffle: Option<u64>,
concurrent_jobs: usize,
-) -> Result<bool, AnyError> {
+) -> Result<(), AnyError> {
+ if !allow_none && doc_modules.is_empty() && test_modules.is_empty() {
+ return Err(generic_error("No test modules found"));
+ }
+
let test_modules = if let Some(seed) = shuffle {
let mut rng = SmallRng::seed_from_u64(seed);
let mut test_modules = test_modules.clone();
@@ -423,13 +427,6 @@ pub async fn run_tests(
program_state.maybe_import_map.clone(),
)
.await?;
- } else if test_modules.is_empty() {
- println!("No matching test modules found");
- if !allow_none {
- std::process::exit(1);
- }
-
- return Ok(false);
}
program_state
@@ -443,7 +440,7 @@ pub async fn run_tests(
.await?;
if no_run {
- return Ok(false);
+ return Ok(());
}
// Because scripts, and therefore worker.execute cannot detect unresolved promises at the moment
@@ -493,7 +490,7 @@ pub async fn run_tests(
})
});
- let join_futures = stream::iter(join_handles)
+ let join_stream = stream::iter(join_handles)
.buffer_unordered(concurrent_jobs)
.collect::<Vec<Result<Result<(), AnyError>, tokio::task::JoinError>>>();
@@ -552,17 +549,20 @@ pub async fn run_tests(
reporter.report_summary(&summary, &elapsed);
if used_only {
- println!(
- "{} because the \"only\" option was used\n",
- colors::red("FAILED")
- );
+ return Err(generic_error(
+ "Test failed because the \"only\" option was used",
+ ));
+ }
+
+ if summary.failed > 0 {
+ return Err(generic_error("Test failed"));
}
- used_only || summary.failed > 0
+ Ok(())
})
};
- let (result, join_results) = future::join(handler, join_futures).await;
+ let (join_results, result) = future::join(join_stream, handler).await;
let mut join_errors = join_results.into_iter().filter_map(|join_result| {
join_result
@@ -572,10 +572,22 @@ pub async fn run_tests(
});
if let Some(e) = join_errors.next() {
- Err(e)
- } else {
- Ok(result.unwrap_or(false))
+ return Err(e);
}
+
+ match result {
+ Ok(result) => {
+ if let Some(err) = result.err() {
+ return Err(err);
+ }
+ }
+
+ Err(err) => {
+ return Err(err.into());
+ }
+ }
+
+ Ok(())
}
#[cfg(test)]