summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCasper Beyer <caspervonb@pm.me>2021-04-29 02:17:04 +0800
committerGitHub <noreply@github.com>2021-04-28 20:17:04 +0200
commitc455c28b834683f6516422dbf1b020fbb2c1bbb6 (patch)
tree96e1484f4853969ae46539c26ffd8d716f409eb7
parent0260b488fbba9a43c64641428d3603b8761067a4 (diff)
feat(test): run test modules in parallel (#9815)
This commit adds support for running test in parallel. Entire test runner functionality has been rewritten from JavaScript to Rust and a set of ops was added to support reporting in Rust. A new "--jobs" flag was added to "deno test" that allows to configure how many threads will be used. When given no value it defaults to 2.
-rw-r--r--cli/flags.rs27
-rw-r--r--cli/main.rs107
-rw-r--r--cli/ops/mod.rs2
-rw-r--r--cli/ops/testing.rs (renamed from cli/ops/test_runner.rs)31
-rw-r--r--cli/program_state.rs66
-rw-r--r--cli/tests/integration_tests.rs15
-rw-r--r--cli/tests/test/deno_test_only.ts.out5
-rw-r--r--cli/tests/test/deno_test_unresolved_promise.out5
-rw-r--r--cli/tests/test/exit_sanitizer_test.out2
-rw-r--r--cli/tests/test/quiet_test.out4
-rw-r--r--cli/tests/test/quiet_test.ts3
-rw-r--r--cli/tests/test/test_finally_cleartimeout.out2
-rw-r--r--cli/tests/test/test_unresolved_promise.js8
-rw-r--r--cli/tests/test/unhandled_rejection.out6
-rw-r--r--cli/tests/test/unhandled_rejection.ts3
-rw-r--r--cli/tests/unit/filter_function_test.ts52
-rw-r--r--cli/tools/test_runner.rs353
-rw-r--r--runtime/js/40_testing.js285
18 files changed, 591 insertions, 385 deletions
diff --git a/cli/flags.rs b/cli/flags.rs
index 9c3316497..3e63f8014 100644
--- a/cli/flags.rs
+++ b/cli/flags.rs
@@ -102,6 +102,7 @@ pub enum DenoSubcommand {
allow_none: bool,
include: Option<Vec<String>>,
filter: Option<String>,
+ concurrent_jobs: usize,
},
Types,
Upgrade {
@@ -1013,6 +1014,18 @@ fn test_subcommand<'a, 'b>() -> App<'a, 'b> {
.help("UNSTABLE: Collect coverage profile data"),
)
.arg(
+ Arg::with_name("jobs")
+ .short("j")
+ .long("jobs")
+ .min_values(0)
+ .max_values(1)
+ .takes_value(true)
+ .validator(|val: String| match val.parse::<usize>() {
+ Ok(_) => Ok(()),
+ Err(_) => Err("jobs should be a number".to_string()),
+ }),
+ )
+ .arg(
Arg::with_name("files")
.help("List of file names to run")
.takes_value(true)
@@ -1666,6 +1679,18 @@ fn test_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
}
}
+ let concurrent_jobs = if matches.is_present("jobs") {
+ if let Some(value) = matches.value_of("jobs") {
+ value.parse().unwrap()
+ } else {
+ // TODO(caspervonb) when no value is given use
+ // https://doc.rust-lang.org/std/thread/fn.available_concurrency.html
+ 2
+ }
+ } else {
+ 1
+ };
+
let include = if matches.is_present("files") {
let files: Vec<String> = matches
.values_of("files")
@@ -1685,6 +1710,7 @@ fn test_parse(flags: &mut Flags, matches: &clap::ArgMatches) {
include,
filter,
allow_none,
+ concurrent_jobs,
};
}
@@ -3330,6 +3356,7 @@ mod tests {
allow_none: true,
quiet: false,
include: Some(svec!["dir1/", "dir2/"]),
+ concurrent_jobs: 1,
},
unstable: true,
coverage_dir: Some("cov".to_string()),
diff --git a/cli/main.rs b/cli/main.rs
index d66bdcee9..6b5c5da27 100644
--- a/cli/main.rs
+++ b/cli/main.rs
@@ -219,7 +219,7 @@ pub fn create_main_worker(
ops::runtime_compiler::init(js_runtime);
if enable_testing {
- ops::test_runner::init(js_runtime);
+ ops::testing::init(js_runtime);
}
js_runtime.sync_ops_cache();
@@ -902,6 +902,7 @@ async fn coverage_command(
.await
}
+#[allow(clippy::too_many_arguments)]
async fn test_command(
flags: Flags,
include: Option<Vec<String>>,
@@ -910,87 +911,23 @@ async fn test_command(
quiet: bool,
allow_none: bool,
filter: Option<String>,
+ concurrent_jobs: usize,
) -> Result<(), AnyError> {
- let program_state = ProgramState::build(flags.clone()).await?;
- let permissions = Permissions::from_options(&flags.clone().into());
- let cwd = std::env::current_dir().expect("No current directory");
- let include = include.unwrap_or_else(|| vec![".".to_string()]);
- let test_modules =
- tools::test_runner::prepare_test_modules_urls(include, &cwd)?;
-
- if test_modules.is_empty() {
- println!("No matching test modules found");
- if !allow_none {
- std::process::exit(1);
- }
- return Ok(());
- }
- let main_module = deno_core::resolve_path("$deno$test.ts")?;
- // Create a dummy source file.
- let source_file = File {
- local: main_module.to_file_path().unwrap(),
- maybe_types: None,
- media_type: MediaType::TypeScript,
- source: tools::test_runner::render_test_file(
- test_modules.clone(),
- fail_fast,
- quiet,
- filter,
- ),
- specifier: main_module.clone(),
- };
- // Save our fake file into file fetcher cache
- // to allow module access by TS compiler
- program_state.file_fetcher.insert_cached(source_file);
-
- if no_run {
- let lib = if flags.unstable {
- module_graph::TypeLib::UnstableDenoWindow
- } else {
- module_graph::TypeLib::DenoWindow
- };
- program_state
- .prepare_module_load(
- main_module.clone(),
- lib,
- Permissions::allow_all(),
- false,
- program_state.maybe_import_map.clone(),
- )
- .await?;
- return Ok(());
- }
-
- let mut worker =
- create_main_worker(&program_state, main_module.clone(), permissions, true);
-
if let Some(ref coverage_dir) = flags.coverage_dir {
env::set_var("DENO_UNSTABLE_COVERAGE_DIR", coverage_dir);
}
- let mut maybe_coverage_collector =
- if let Some(ref coverage_dir) = program_state.coverage_dir {
- let session = worker.create_inspector_session();
- let coverage_dir = PathBuf::from(coverage_dir);
- let mut coverage_collector =
- tools::coverage::CoverageCollector::new(coverage_dir, session);
- coverage_collector.start_collecting().await?;
-
- Some(coverage_collector)
- } else {
- None
- };
-
- let execute_result = worker.execute_module(&main_module).await;
- execute_result?;
- worker.execute("window.dispatchEvent(new Event('load'))")?;
- worker.run_event_loop().await?;
- worker.execute("window.dispatchEvent(new Event('unload'))")?;
- worker.run_event_loop().await?;
-
- if let Some(coverage_collector) = maybe_coverage_collector.as_mut() {
- coverage_collector.stop_collecting().await?;
- }
+ tools::test_runner::run_tests(
+ flags,
+ include,
+ no_run,
+ fail_fast,
+ quiet,
+ allow_none,
+ filter,
+ concurrent_jobs,
+ )
+ .await?;
Ok(())
}
@@ -1125,10 +1062,18 @@ fn get_subcommand(
include,
allow_none,
filter,
- } => {
- test_command(flags, include, no_run, fail_fast, quiet, allow_none, filter)
- .boxed_local()
- }
+ concurrent_jobs,
+ } => test_command(
+ flags,
+ include,
+ no_run,
+ fail_fast,
+ quiet,
+ allow_none,
+ filter,
+ concurrent_jobs,
+ )
+ .boxed_local(),
DenoSubcommand::Completions { buf } => {
if let Err(e) = write_to_stdout_ignore_sigpipe(&buf) {
eprintln!("{}", e);
diff --git a/cli/ops/mod.rs b/cli/ops/mod.rs
index 386ad16fa..a3df77fac 100644
--- a/cli/ops/mod.rs
+++ b/cli/ops/mod.rs
@@ -2,6 +2,6 @@
pub mod errors;
pub mod runtime_compiler;
-pub mod test_runner;
+pub mod testing;
pub use deno_runtime::ops::{reg_async, reg_sync};
diff --git a/cli/ops/test_runner.rs b/cli/ops/testing.rs
index 380ec7fb0..450f55a41 100644
--- a/cli/ops/test_runner.rs
+++ b/cli/ops/testing.rs
@@ -1,23 +1,27 @@
-// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
-
+use crate::tools::test_runner::TestMessage;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::serde_json;
+use deno_core::serde_json::json;
use deno_core::serde_json::Value;
+use deno_core::JsRuntime;
use deno_core::OpState;
use deno_core::ZeroCopyBuf;
use deno_runtime::ops::worker_host::create_worker_permissions;
use deno_runtime::ops::worker_host::PermissionsArg;
use deno_runtime::permissions::Permissions;
+use serde::Deserialize;
+use std::sync::mpsc::Sender;
use uuid::Uuid;
-pub fn init(rt: &mut deno_core::JsRuntime) {
+pub fn init(rt: &mut JsRuntime) {
super::reg_sync(rt, "op_pledge_test_permissions", op_pledge_test_permissions);
super::reg_sync(
rt,
"op_restore_test_permissions",
op_restore_test_permissions,
);
+ super::reg_sync(rt, "op_post_test_message", op_post_test_message);
}
#[derive(Clone)]
@@ -64,3 +68,24 @@ pub fn op_restore_test_permissions(
Err(generic_error("no permissions to restore"))
}
}
+
+#[derive(Debug, Deserialize)]
+#[serde(rename_all = "camelCase")]
+struct PostTestMessageArgs {
+ message: TestMessage,
+}
+
+fn op_post_test_message(
+ state: &mut OpState,
+ args: Value,
+ _zero_copy: Option<ZeroCopyBuf>,
+) -> Result<Value, AnyError> {
+ let args: PostTestMessageArgs = serde_json::from_value(args)?;
+ let sender = state.borrow::<Sender<TestMessage>>().clone();
+
+ if sender.send(args.message).is_err() {
+ Ok(json!(false))
+ } else {
+ Ok(json!(true))
+ }
+}
diff --git a/cli/program_state.rs b/cli/program_state.rs
index c52f4efef..de6331b89 100644
--- a/cli/program_state.rs
+++ b/cli/program_state.rs
@@ -133,6 +133,72 @@ impl ProgramState {
Ok(Arc::new(program_state))
}
+ /// Prepares a set of module specifiers for loading in one shot.
+ ///
+ pub async fn prepare_module_graph(
+ self: &Arc<Self>,
+ specifiers: Vec<ModuleSpecifier>,
+ lib: TypeLib,
+ runtime_permissions: Permissions,
+ maybe_import_map: Option<ImportMap>,
+ ) -> Result<(), AnyError> {
+ let handler = Arc::new(Mutex::new(FetchHandler::new(
+ self,
+ runtime_permissions.clone(),
+ )?));
+
+ let mut builder =
+ GraphBuilder::new(handler, maybe_import_map, self.lockfile.clone());
+
+ for specifier in specifiers {
+ builder.add(&specifier, false).await?;
+ }
+
+ let mut graph = builder.get_graph();
+ let debug = self.flags.log_level == Some(log::Level::Debug);
+ let maybe_config_path = self.flags.config_path.clone();
+
+ let result_modules = if self.flags.no_check {
+ let result_info = graph.transpile(TranspileOptions {
+ debug,
+ maybe_config_path,
+ reload: self.flags.reload,
+ })?;
+ debug!("{}", result_info.stats);
+ if let Some(ignored_options) = result_info.maybe_ignored_options {
+ warn!("{}", ignored_options);
+ }
+ result_info.loadable_modules
+ } else {
+ let result_info = graph.check(CheckOptions {
+ debug,
+ emit: true,
+ lib,
+ maybe_config_path,
+ reload: self.flags.reload,
+ })?;
+
+ debug!("{}", result_info.stats);
+ if let Some(ignored_options) = result_info.maybe_ignored_options {
+ eprintln!("{}", ignored_options);
+ }
+ if !result_info.diagnostics.is_empty() {
+ return Err(anyhow!(result_info.diagnostics));
+ }
+ result_info.loadable_modules
+ };
+
+ let mut loadable_modules = self.modules.lock().unwrap();
+ loadable_modules.extend(result_modules);
+
+ if let Some(ref lockfile) = self.lockfile {
+ let g = lockfile.lock().unwrap();
+ g.write()?;
+ }
+
+ Ok(())
+ }
+
/// This function is called when new module load is
/// initialized by the JsRuntime. Its resposibility is to collect
/// all dependencies and if it is required then also perform TS typecheck
diff --git a/cli/tests/integration_tests.rs b/cli/tests/integration_tests.rs
index b5e5d1fa6..9c56396b4 100644
--- a/cli/tests/integration_tests.rs
+++ b/cli/tests/integration_tests.rs
@@ -34,6 +34,9 @@ fn js_unit_tests_lint() {
fn js_unit_tests() {
let _g = util::http_server();
+ // Note that the unit tests are not safe for concurrency and must be run with a concurrency limit
+ // of one because there are some chdir tests in there.
+ // TODO(caspervonb) split these tests into two groups: parallel and serial.
let mut deno = util::deno_cmd()
.current_dir(util::root_path())
.arg("test")
@@ -2438,11 +2441,23 @@ mod integration {
output: "test/deno_test_unresolved_promise.out",
});
+ itest!(unhandled_rejection {
+ args: "test test/unhandled_rejection.ts",
+ exit_code: 1,
+ output: "test/unhandled_rejection.out",
+ });
+
itest!(exit_sanitizer {
args: "test test/exit_sanitizer_test.ts",
output: "test/exit_sanitizer_test.out",
exit_code: 1,
});
+
+ itest!(quiet {
+ args: "test --quiet test/quiet_test.ts",
+ exit_code: 0,
+ output: "test/quiet_test.out",
+ });
}
#[test]
diff --git a/cli/tests/test/deno_test_only.ts.out b/cli/tests/test/deno_test_only.ts.out
index a23f2505c..25a0b1a0c 100644
--- a/cli/tests/test/deno_test_only.ts.out
+++ b/cli/tests/test/deno_test_only.ts.out
@@ -1,7 +1,8 @@
-[WILDCARD]running 1 tests
+[WILDCARD]
+running 1 tests
test def ... ok ([WILDCARD])
-test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD])
+test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 2 filtered out ([WILDCARD])
FAILED because the "only" option was used
diff --git a/cli/tests/test/deno_test_unresolved_promise.out b/cli/tests/test/deno_test_unresolved_promise.out
index cc4f2985e..e1ec640ab 100644
--- a/cli/tests/test/deno_test_unresolved_promise.out
+++ b/cli/tests/test/deno_test_unresolved_promise.out
@@ -1,4 +1,5 @@
-Check [WILDCARD]
running 2 tests
-test unresolved promise ... in promise
+test unresolved promise ...
+test result: FAILED. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]
+
error: Module evaluation is still pending but there are no pending ops or dynamic imports. This situation is often caused by unresolved promise.
diff --git a/cli/tests/test/exit_sanitizer_test.out b/cli/tests/test/exit_sanitizer_test.out
index 351453928..25df34b34 100644
--- a/cli/tests/test/exit_sanitizer_test.out
+++ b/cli/tests/test/exit_sanitizer_test.out
@@ -1,4 +1,4 @@
-Check [WILDCARD]/$deno$test.ts
+Check [WILDCARD]/exit_sanitizer_test.ts
running 3 tests
test exit(0) ... FAILED ([WILDCARD])
test exit(1) ... FAILED ([WILDCARD])
diff --git a/cli/tests/test/quiet_test.out b/cli/tests/test/quiet_test.out
new file mode 100644
index 000000000..4b57c50fe
--- /dev/null
+++ b/cli/tests/test/quiet_test.out
@@ -0,0 +1,4 @@
+running 1 tests
+test log ... ok [WILDCARD]
+
+test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]
diff --git a/cli/tests/test/quiet_test.ts b/cli/tests/test/quiet_test.ts
new file mode 100644
index 000000000..e98e6797a
--- /dev/null
+++ b/cli/tests/test/quiet_test.ts
@@ -0,0 +1,3 @@
+Deno.test("log", function () {
+ console.log("log");
+});
diff --git a/cli/tests/test/test_finally_cleartimeout.out b/cli/tests/test/test_finally_cleartimeout.out
index c88b8242b..ddad7cce9 100644
--- a/cli/tests/test/test_finally_cleartimeout.out
+++ b/cli/tests/test/test_finally_cleartimeout.out
@@ -1,4 +1,4 @@
-Check [WILDCARD]/$deno$test.ts
+Check [WILDCARD]/test_finally_cleartimeout.ts
running 2 tests
test error ... FAILED ([WILDCARD])
test success ... ok ([WILDCARD])
diff --git a/cli/tests/test/test_unresolved_promise.js b/cli/tests/test/test_unresolved_promise.js
index 466b18864..8f50e907a 100644
--- a/cli/tests/test/test_unresolved_promise.js
+++ b/cli/tests/test/test_unresolved_promise.js
@@ -1,15 +1,11 @@
Deno.test({
name: "unresolved promise",
fn() {
- return new Promise((_resolve, _reject) => {
- console.log("in promise");
- });
+ return new Promise((_resolve, _reject) => {});
},
});
Deno.test({
name: "ok",
- fn() {
- console.log("ok test");
- },
+ fn() {},
});
diff --git a/cli/tests/test/unhandled_rejection.out b/cli/tests/test/unhandled_rejection.out
new file mode 100644
index 000000000..27b3865a8
--- /dev/null
+++ b/cli/tests/test/unhandled_rejection.out
@@ -0,0 +1,6 @@
+Check [WILDCARD]
+
+test result: ok. 0 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD]
+
+error: Uncaught (in promise) Error: rejection
+[WILDCARD]
diff --git a/cli/tests/test/unhandled_rejection.ts b/cli/tests/test/unhandled_rejection.ts
new file mode 100644
index 000000000..396e1c09d
--- /dev/null
+++ b/cli/tests/test/unhandled_rejection.ts
@@ -0,0 +1,3 @@
+new Promise((resolve, reject) => {
+ reject(new Error("rejection"));
+});
diff --git a/cli/tests/unit/filter_function_test.ts b/cli/tests/unit/filter_function_test.ts
deleted file mode 100644
index 2c1d9a7c8..000000000
--- a/cli/tests/unit/filter_function_test.ts
+++ /dev/null
@@ -1,52 +0,0 @@
-import { assertEquals, unitTest } from "./test_util.ts";
-
-// @ts-expect-error TypeScript (as of 3.7) does not support indexing namespaces by symbol
-const { createFilterFn } = Deno[Deno.internal];
-
-unitTest(function filterAsString(): void {
- const filterFn = createFilterFn("my-test");
- const tests = [
- {
- fn(): void {},
- name: "my-test",
- },
- {
- fn(): void {},
- name: "other-test",
- },
- ];
- const filteredTests = tests.filter(filterFn);
- assertEquals(filteredTests.length, 1);
-});
-
-unitTest(function filterAsREGEX(): void {
- const filterFn = createFilterFn("/.+-test/");
- const tests = [
- {
- fn(): void {},
- name: "my-test",
- },
- {
- fn(): void {},
- name: "other-test",
- },
- ];
- const filteredTests = tests.filter(filterFn);
- assertEquals(filteredTests.length, 2);
-});
-
-unitTest(function filterAsEscapedREGEX(): void {
- const filterFn = createFilterFn("/\\w+-test/");
- const tests = [
- {
- fn(): void {},
- name: "my-test",
- },
- {
- fn(): void {},
- name: "other-test",
- },
- ];
- const filteredTests = tests.filter(filterFn);
- assertEquals(filteredTests.length, 2);
-});
diff --git a/cli/tools/test_runner.rs b/cli/tools/test_runner.rs
index df792bd53..eb5b9831c 100644
--- a/cli/tools/test_runner.rs
+++ b/cli/tools/test_runner.rs
@@ -1,11 +1,56 @@
// Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
+use crate::colors;
+use crate::create_main_worker;
+use crate::file_fetcher::File;
+use crate::flags::Flags;
use crate::fs_util;
+use crate::media_type::MediaType;
+use crate::module_graph;
+use crate::program_state::ProgramState;
+use crate::tokio_util;
+use crate::tools::coverage::CoverageCollector;
use crate::tools::installer::is_remote_url;
use deno_core::error::AnyError;
+use deno_core::futures::future;
+use deno_core::futures::stream;
+use deno_core::futures::StreamExt;
use deno_core::serde_json::json;
use deno_core::url::Url;
+use deno_core::ModuleSpecifier;
+use deno_runtime::permissions::Permissions;
+use serde::Deserialize;
use std::path::Path;
+use std::path::PathBuf;
+use std::sync::mpsc::channel;
+use std::sync::mpsc::Sender;
+use std::sync::Arc;
+
+#[derive(Debug, Clone, PartialEq, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub enum TestResult {
+ Ok,
+ Ignored,
+ Failed(String),
+}
+
+#[derive(Debug, Clone, Deserialize)]
+#[serde(tag = "kind", content = "data", rename_all = "camelCase")]
+pub enum TestMessage {
+ Plan {
+ pending: usize,
+ filtered: usize,
+ only: bool,
+ },
+ Wait {
+ name: String,
+ },
+ Result {
+ name: String,
+ duration: usize,
+ result: TestResult,
+ },
+}
fn is_supported(p: &Path) -> bool {
use std::path::Component;
@@ -31,7 +76,7 @@ fn is_supported(p: &Path) -> bool {
}
}
-pub fn prepare_test_modules_urls(
+pub fn collect_test_module_specifiers(
include: Vec<String>,
root_path: &Path,
) -> Result<Vec<Url>, AnyError> {
@@ -63,32 +108,302 @@ pub fn prepare_test_modules_urls(
Ok(prepared)
}
-pub fn render_test_file(
- modules: Vec<Url>,
+pub async fn run_test_file(
+ program_state: Arc<ProgramState>,
+ main_module: ModuleSpecifier,
+ test_module: ModuleSpecifier,
+ permissions: Permissions,
+ channel: Sender<TestMessage>,
+) -> Result<(), AnyError> {
+ let mut worker =
+ create_main_worker(&program_state, main_module.clone(), permissions, true);
+
+ {
+ let js_runtime = &mut worker.js_runtime;
+ js_runtime
+ .op_state()
+ .borrow_mut()
+ .put::<Sender<TestMessage>>(channel.clone());
+ }
+
+ let mut maybe_coverage_collector = if let Some(ref coverage_dir) =
+ program_state.coverage_dir
+ {
+ let session = worker.create_inspector_session();
+ let coverage_dir = PathBuf::from(coverage_dir);
+ let mut coverage_collector = CoverageCollector::new(coverage_dir, session);
+ coverage_collector.start_collecting().await?;
+
+ Some(coverage_collector)
+ } else {
+ None
+ };
+
+ let execute_result = worker.execute_module(&main_module).await;
+ execute_result?;
+ worker.execute("window.dispatchEvent(new Event('load'))")?;
+
+ let execute_result = worker.execute_module(&test_module).await;
+ execute_result?;
+
+ worker.run_event_loop().await?;
+ worker.execute("window.dispatchEvent(new Event('unload'))")?;
+
+ if let Some(coverage_collector) = maybe_coverage_collector.as_mut() {
+ coverage_collector.stop_collecting().await?;
+ }
+
+ Ok(())
+}
+
+#[allow(clippy::too_many_arguments)]
+pub async fn run_tests(
+ flags: Flags,
+ include: Option<Vec<String>>,
+ no_run: bool,
fail_fast: bool,
quiet: bool,
+ allow_none: bool,
filter: Option<String>,
-) -> String {
- let mut test_file = "".to_string();
+ concurrent_jobs: usize,
+) -> Result<(), AnyError> {
+ let program_state = ProgramState::build(flags.clone()).await?;
+ let permissions = Permissions::from_options(&flags.clone().into());
+ let cwd = std::env::current_dir().expect("No current directory");
+ let include = include.unwrap_or_else(|| vec![".".to_string()]);
+ let test_modules = collect_test_module_specifiers(include, &cwd)?;
- for module in modules {
- test_file.push_str(&format!("import \"{}\";\n", module.to_string()));
+ if test_modules.is_empty() {
+ println!("No matching test modules found");
+ if !allow_none {
+ std::process::exit(1);
+ }
+ return Ok(());
}
- let options = if let Some(filter) = filter {
- json!({ "failFast": fail_fast, "reportToConsole": !quiet, "disableLog": quiet, "filter": filter })
+ let lib = if flags.unstable {
+ module_graph::TypeLib::UnstableDenoWindow
} else {
- json!({ "failFast": fail_fast, "reportToConsole": !quiet, "disableLog": quiet })
+ module_graph::TypeLib::DenoWindow
+ };
+
+ program_state
+ .prepare_module_graph(
+ test_modules.clone(),
+ lib.clone(),
+ permissions.clone(),
+ program_state.maybe_import_map.clone(),
+ )
+ .await?;
+
+ if no_run {
+ return Ok(());
+ }
+
+ // Because scripts, and therefore worker.execute cannot detect unresolved promises at the moment
+ // we generate a module for the actual test execution.
+ let test_options = json!({
+ "disableLog": quiet,
+ "filter": filter,
+ });
+
+ let test_module = deno_core::resolve_path("$deno$test.js")?;
+ let test_source =
+ format!("await Deno[Deno.internal].runTests({});", test_options);
+ let test_file = File {
+ local: test_module.to_file_path().unwrap(),
+ maybe_types: None,
+ media_type: MediaType::JavaScript,
+ source: test_source.clone(),
+ specifier: test_module.clone(),
+ };
+
+ program_state.file_fetcher.insert_cached(test_file);
+
+ let (sender, receiver) = channel::<TestMessage>();
+
+ let join_handles = test_modules.iter().map(move |main_module| {
+ let program_state = program_state.clone();
+ let main_module = main_module.clone();
+ let test_module = test_module.clone();
+ let permissions = permissions.clone();
+ let sender = sender.clone();
+
+ tokio::task::spawn_blocking(move || {
+ let join_handle = std::thread::spawn(move || {
+ let future = run_test_file(
+ program_state,
+ main_module,
+ test_module,
+ permissions,
+ sender,
+ );
+
+ tokio_util::run_basic(future)
+ });
+
+ join_handle.join().unwrap()
+ })
+ });
+
+ let join_futures = stream::iter(join_handles)
+ .buffer_unordered(concurrent_jobs)
+ .collect::<Vec<Result<Result<(), AnyError>, tokio::task::JoinError>>>();
+
+ let handler = {
+ tokio::task::spawn_blocking(move || {
+ let time = std::time::Instant::now();
+ let mut failed = 0;
+ let mut filtered_out = 0;
+ let mut ignored = 0;
+ let mut passed = 0;
+ let measured = 0;
+
+ let mut planned = 0;
+ let mut used_only = false;
+ let mut has_error = false;
+ let mut failures: Vec<(String, String)> = Vec::new();
+
+ for message in receiver.iter() {
+ match message {
+ TestMessage::Plan {
+ pending,
+ filtered,
+ only,
+ } => {
+ println!("running {} tests", pending);
+
+ if only {
+ used_only = true;
+ }
+
+ planned += pending;
+ filtered_out += filtered;
+ }
+
+ TestMessage::Wait { name } => {
+ if concurrent_jobs == 1 {
+ print!("test {} ...", name);
+ }
+ }
+
+ TestMessage::Result {
+ name,
+ duration,
+ result,
+ } => {
+ if concurrent_jobs != 1 {
+ print!("test {} ...", name);
+ }
+
+ match result {
+ TestResult::Ok => {
+ println!(
+ " {} {}",
+ colors::green("ok"),
+ colors::gray(format!("({}ms)", duration))
+ );
+
+ passed += 1;
+ }
+ TestResult::Ignored => {
+ println!(
+ " {} {}",
+ colors::yellow("ignored"),
+ colors::gray(format!("({}ms)", duration))
+ );
+
+ ignored += 1;
+ }
+ TestResult::Failed(error) => {
+ println!(
+ " {} {}",
+ colors::red("FAILED"),
+ colors::gray(format!("({}ms)", duration))
+ );
+
+ failed += 1;
+ failures.push((name, error));
+ has_error = true;
+ }
+ }
+ }
+ }
+
+ if has_error && fail_fast {
+ break;
+ }
+ }
+
+ // If one of the workers panic then we can end up with less test results than what was
+ // planned.
+ // In that case we mark it as an error so that it will be reported as failed.
+ if planned > passed + ignored + failed {
+ has_error = true;
+ }
+
+ if !failures.is_empty() {
+ println!("\nfailures:\n");
+ for (name, error) in &failures {
+ println!("{}", name);
+ println!("{}", error);
+ println!();
+ }
+
+ println!("failures:\n");
+ for (name, _) in &failures {
+ println!("\t{}", name);
+ }
+ }
+
+ let status = if has_error {
+ colors::red("FAILED").to_string()
+ } else {
+ colors::green("ok").to_string()
+ };
+
+ println!(
+ "\ntest result: {}. {} passed; {} failed; {} ignored; {} measured; {} filtered out {}\n",
+ status,
+ passed,
+ failed,
+ ignored,
+ measured,
+ filtered_out,
+ colors::gray(format!("({}ms)", time.elapsed().as_millis())),
+ );
+
+ if used_only {
+ println!(
+ "{} because the \"only\" option was used\n",
+ colors::red("FAILED")
+ );
+
+ has_error = true;
+ }
+
+ has_error
+ })
};
- test_file.push_str("// @ts-ignore\n");
+ let (result, join_results) = future::join(handler, join_futures).await;
- test_file.push_str(&format!(
- "await Deno[Deno.internal].runTests({});\n",
- options
- ));
+ let mut join_errors = join_results.into_iter().filter_map(|join_result| {
+ join_result
+ .ok()
+ .map(|handle_result| handle_result.err())
+ .flatten()
+ });
- test_file
+ if let Some(e) = join_errors.next() {
+ Err(e)
+ } else {
+ if result.unwrap_or(false) {
+ std::process::exit(1);
+ }
+
+ Ok(())
+ }
}
#[cfg(test)]
@@ -96,9 +411,9 @@ mod tests {
use super::*;
#[test]
- fn test_prepare_test_modules_urls() {
+ fn test_collect_test_module_specifiers() {
let test_data_path = test_util::root_path().join("cli/tests/subdir");
- let mut matched_urls = prepare_test_modules_urls(
+ let mut matched_urls = collect_test_module_specifiers(
vec![
"https://example.com/colors_test.ts".to_string(),
"./mod1.ts".to_string(),
@@ -156,7 +471,7 @@ mod tests {
.join("http");
println!("root {:?}", root);
let mut matched_urls =
- prepare_test_modules_urls(vec![".".to_string()], &root).unwrap();
+ collect_test_module_specifiers(vec![".".to_string()], &root).unwrap();
matched_urls.sort();
let root_url = Url::from_file_path(root).unwrap().to_string();
println!("root_url {}", root_url);
diff --git a/runtime/js/40_testing.js b/runtime/js/40_testing.js
index 4a97f6437..f835a0cf7 100644
--- a/runtime/js/40_testing.js
+++ b/runtime/js/40_testing.js
@@ -3,29 +3,12 @@
((window) => {
const core = window.Deno.core;
- const colors = window.__bootstrap.colors;
const { parsePermissions } = window.__bootstrap.worker;
const { setExitHandler, exit } = window.__bootstrap.os;
const { Console, inspectArgs } = window.__bootstrap.console;
- const { stdout } = window.__bootstrap.files;
const { metrics } = window.__bootstrap.metrics;
const { assert } = window.__bootstrap.util;
- const disabledConsole = new Console(() => {});
-
- function delay(ms) {
- return new Promise((resolve) => {
- setTimeout(resolve, ms);
- });
- }
-
- function formatDuration(time = 0) {
- const gray = colors.maybeColor(colors.gray);
- const italic = colors.maybeColor(colors.italic);
- const timeStr = `(${time}ms)`;
- return gray(italic(timeStr));
- }
-
// Wrap test function in additional assertion that makes sure
// the test case does not leak async "ops" - ie. number of async
// completed ops after the test is the same as number of dispatched
@@ -40,8 +23,9 @@
// Defer until next event loop turn - that way timeouts and intervals
// cleared can actually be removed from resource table, otherwise
// false positives may occur (https://github.com/denoland/deno/issues/4591)
- await delay(0);
+ await new Promise((resolve) => setTimeout(resolve, 0));
}
+
const post = metrics();
// We're checking diff because one might spawn HTTP server in the background
// that will be a pending async op before test starts.
@@ -107,7 +91,7 @@ finishing test case.`;
};
}
- const TEST_REGISTRY = [];
+ const tests = [];
// Main test function provided by Deno, as you can see it merely
// creates a new object with "name" and "fn" fields.
@@ -155,77 +139,26 @@ finishing test case.`;
testDef.fn = assertExit(testDef.fn);
}
- TEST_REGISTRY.push(testDef);
+ tests.push(testDef);
}
- const encoder = new TextEncoder();
-
- function log(msg, noNewLine = false) {
- if (!noNewLine) {
- msg += "\n";
- }
-
- // Using `stdout` here because it doesn't force new lines
- // compared to `console.log`; `core.print` on the other hand
- // is line-buffered and doesn't output message without newline
- stdout.writeSync(encoder.encode(msg));
+ function postTestMessage(kind, data) {
+ return core.opSync("op_post_test_message", { message: { kind, data } });
}
- function reportToConsole(message) {
- const green = colors.maybeColor(colors.green);
- const red = colors.maybeColor(colors.red);
- const yellow = colors.maybeColor(colors.yellow);
- const redFailed = red("FAILED");
- const greenOk = green("ok");
- const yellowIgnored = yellow("ignored");
- if (message.start != null) {
- log(`running ${message.start.tests.length} tests`);
- } else if (message.testStart != null) {
- const { name } = message.testStart;
-
- log(`test ${name} ... `, true);
- return;
- } else if (message.testEnd != null) {
- switch (message.testEnd.status) {
- case "passed":
- log(`${greenOk} ${formatDuration(message.testEnd.duration)}`);
- break;
- case "failed":
- log(`${redFailed} ${formatDuration(message.testEnd.duration)}`);
- break;
- case "ignored":
- log(`${yellowIgnored} ${formatDuration(message.testEnd.duration)}`);
- break;
- }
- } else if (message.end != null) {
- const failures = message.end.results.filter((m) => m.error != null);
- if (failures.length > 0) {
- log(`\nfailures:\n`);
-
- for (const { name, error } of failures) {
- log(name);
- log(inspectArgs([error]));
- log("");
+ function createTestFilter(filter) {
+ return (def) => {
+ if (filter) {
+ if (filter.startsWith("/") && filter.endsWith("/")) {
+ const regex = new RegExp(filter.slice(1, filter.length - 1));
+ return regex.test(def.name);
}
- log(`failures:\n`);
-
- for (const { name } of failures) {
- log(`\t${name}`);
- }
+ return def.name.includes(filter);
}
- log(
- `\ntest result: ${message.end.failed ? redFailed : greenOk}. ` +
- `${message.end.passed} passed; ${message.end.failed} failed; ` +
- `${message.end.ignored} ignored; ${message.end.measured} measured; ` +
- `${message.end.filtered} filtered out ` +
- `${formatDuration(message.end.duration)}\n`,
- );
- if (message.end.usedOnly && message.end.failed == 0) {
- log(`${redFailed} because the "only" option was used\n`);
- }
- }
+ return true;
+ };
}
function pledgeTestPermissions(permissions) {
@@ -239,167 +172,85 @@ finishing test case.`;
core.opSync("op_restore_test_permissions", token);
}
- // TODO(bartlomieju): already implements AsyncGenerator<RunTestsMessage>, but add as "implements to class"
- // TODO(bartlomieju): implements PromiseLike<RunTestsEndResult>
- class TestRunner {
- #usedOnly = false;
-
- constructor(
- tests,
- filterFn,
- failFast,
- ) {
- this.stats = {
- filtered: 0,
- ignored: 0,
- measured: 0,
- passed: 0,
- failed: 0,
- };
- this.filterFn = filterFn;
- this.failFast = failFast;
- const onlyTests = tests.filter(({ only }) => only);
- this.#usedOnly = onlyTests.length > 0;
- const unfilteredTests = this.#usedOnly ? onlyTests : tests;
- this.testsToRun = unfilteredTests.filter(filterFn);
- this.stats.filtered = unfilteredTests.length - this.testsToRun.length;
- }
-
- async *[Symbol.asyncIterator]() {
- yield { start: { tests: this.testsToRun } };
-
- const results = [];
- const suiteStart = +new Date();
-
- for (const test of this.testsToRun) {
- const endMessage = {
- name: test.name,
- duration: 0,
- };
- yield { testStart: { ...test } };
- if (test.ignore) {
- endMessage.status = "ignored";
- this.stats.ignored++;
- } else {
- const start = +new Date();
-
- let token;
- try {
- if (test.permissions) {
- token = pledgeTestPermissions(test.permissions);
- }
-
- await test.fn();
-
- endMessage.status = "passed";
- this.stats.passed++;
- } catch (err) {
- endMessage.status = "failed";
- endMessage.error = err;
- this.stats.failed++;
- } finally {
- // Permissions must always be restored for a clean environment,
- // otherwise the process can end up dropping permissions
- // until there are none left.
- if (token) {
- restoreTestPermissions(token);
- }
- }
-
- endMessage.duration = +new Date() - start;
- }
- results.push(endMessage);
- yield { testEnd: endMessage };
- if (this.failFast && endMessage.error != null) {
- break;
- }
- }
+ async function runTest({ name, ignore, fn, permissions }) {
+ let token = null;
+ const time = Date.now();
- const duration = +new Date() - suiteStart;
+ try {
+ postTestMessage("wait", {
+ name,
+ });
- yield {
- end: { ...this.stats, usedOnly: this.#usedOnly, duration, results },
- };
- }
- }
+ if (permissions) {
+ token = pledgeTestPermissions(permissions);
+ }
- function createFilterFn(
- filter,
- skip,
- ) {
- return (def) => {
- let passes = true;
+ if (ignore) {
+ const duration = Date.now() - time;
+ postTestMessage("result", {
+ name,
+ duration,
+ result: "ignored",
+ });
- if (filter) {
- if (filter instanceof RegExp) {
- passes = passes && filter.test(def.name);
- } else if (filter.startsWith("/") && filter.endsWith("/")) {
- const filterAsRegex = new RegExp(filter.slice(1, filter.length - 1));
- passes = passes && filterAsRegex.test(def.name);
- } else {
- passes = passes && def.name.includes(filter);
- }
+ return;
}
- if (skip) {
- if (skip instanceof RegExp) {
- passes = passes && !skip.test(def.name);
- } else {
- passes = passes && !def.name.includes(skip);
- }
- }
+ await fn();
- return passes;
- };
+ const duration = Date.now() - time;
+ postTestMessage("result", {
+ name,
+ duration,
+ result: "ok",
+ });
+ } catch (error) {
+ const duration = Date.now() - time;
+
+ postTestMessage("result", {
+ name,
+ duration,
+ result: {
+ "failed": inspectArgs([error]),
+ },
+ });
+ } finally {
+ if (token) {
+ restoreTestPermissions(token);
+ }
+ }
}
async function runTests({
- exitOnFail = true,
- failFast = false,
- filter = undefined,
- skip = undefined,
disableLog = false,
- reportToConsole: reportToConsole_ = true,
- onMessage = undefined,
+ filter = null,
} = {}) {
- const filterFn = createFilterFn(filter, skip);
- const testRunner = new TestRunner(TEST_REGISTRY, filterFn, failFast);
-
const originalConsole = globalThis.console;
-
if (disableLog) {
- globalThis.console = disabledConsole;
+ globalThis.console = new Console(() => {});
}
- let endMsg;
+ const only = tests.filter((test) => test.only);
+ const pending = (only.length > 0 ? only : tests).filter(
+ createTestFilter(filter),
+ );
+ postTestMessage("plan", {
+ filtered: tests.length - pending.length,
+ pending: pending.length,
+ only: only.length > 0,
+ });
- for await (const message of testRunner) {
- if (onMessage != null) {
- await onMessage(message);
- }
- if (reportToConsole_) {
- reportToConsole(message);
- }
- if (message.end != null) {
- endMsg = message.end;
- }
+ for (const test of pending) {
+ await runTest(test);
}
if (disableLog) {
globalThis.console = originalConsole;
}
-
- if ((endMsg.failed > 0 || endMsg?.usedOnly) && exitOnFail) {
- exit(1);
- }
-
- return endMsg;
}
window.__bootstrap.internals = {
...window.__bootstrap.internals ?? {},
- reportToConsole,
- createFilterFn,
runTests,
};