summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorAsher Gomez <ashersaupingomez@gmail.com>2024-02-20 00:34:24 +1100
committerGitHub <noreply@github.com>2024-02-19 06:34:24 -0700
commit2b279ad630651e973d5a31586f58809f005bc925 (patch)
tree3e3cbeb4126643c75381dd5422e8603a7488bb8a /tests
parenteb542bc185c6c4ce1847417a2dfdf04862cd86db (diff)
chore: move `test_util` to `tests/util/server` (#22444)
As discussed with @mmastrac. --------- Signed-off-by: Asher Gomez <ashersaupingomez@gmail.com> Co-authored-by: Matt Mastracci <matthew@mastracci.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/testdata/npm/README.md2
-rw-r--r--tests/util/server/Cargo.toml62
-rw-r--r--tests/util/server/src/assertions.rs94
-rw-r--r--tests/util/server/src/builders.rs997
-rw-r--r--tests/util/server/src/factory.rs98
-rw-r--r--tests/util/server/src/fs.rs450
-rw-r--r--tests/util/server/src/https.rs133
-rw-r--r--tests/util/server/src/lib.rs1277
-rw-r--r--tests/util/server/src/lsp.rs1104
-rw-r--r--tests/util/server/src/macros.rs86
-rw-r--r--tests/util/server/src/npm.rs179
-rw-r--r--tests/util/server/src/pty.rs770
-rw-r--r--tests/util/server/src/servers/grpc.rs103
-rw-r--r--tests/util/server/src/servers/hyper_utils.rs154
-rw-r--r--tests/util/server/src/servers/mod.rs1536
-rw-r--r--tests/util/server/src/servers/registry.rs182
-rw-r--r--tests/util/server/src/servers/ws.rs268
-rw-r--r--tests/util/server/src/spawn.rs71
-rw-r--r--tests/util/server/src/test_server.rs5
-rw-r--r--tests/util/server/src/testdata/strace_summary.out39
-rw-r--r--tests/util/server/src/testdata/strace_summary2.out37
-rw-r--r--tests/util/server/src/testdata/strace_summary3.out48
-rw-r--r--tests/util/server/src/testdata/time.out18
-rw-r--r--tests/util/server/src/testdata/wrk1.txt14
-rw-r--r--tests/util/server/src/testdata/wrk2.txt13
-rw-r--r--tests/util/server/src/testdata/wrk3.txt13
26 files changed, 7752 insertions, 1 deletions
diff --git a/tests/testdata/npm/README.md b/tests/testdata/npm/README.md
index ba3f5f771..bb4a3bad3 100644
--- a/tests/testdata/npm/README.md
+++ b/tests/testdata/npm/README.md
@@ -4,7 +4,7 @@ This folder contains test data for npm specifiers.
## Registry
-The registry is served by the test server (server in test_util) at
+The registry is served by the test server (server in `tests/util/server`) at
http://localhost:4545/npm/registry/ via the `./registry` folder.
### Updating with real npm packages
diff --git a/tests/util/server/Cargo.toml b/tests/util/server/Cargo.toml
new file mode 100644
index 000000000..fbec0e1aa
--- /dev/null
+++ b/tests/util/server/Cargo.toml
@@ -0,0 +1,62 @@
+# Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+[package]
+name = "test_server"
+version = "0.1.0"
+authors.workspace = true
+edition.workspace = true
+license.workspace = true
+publish = false
+repository.workspace = true
+
+[[bin]]
+name = "test_server"
+path = "src/test_server.rs"
+
+[dependencies]
+anyhow.workspace = true
+async-stream = "0.3.3"
+base64.workspace = true
+bytes.workspace = true
+console_static_text.workspace = true
+deno_unsync = "0.3.0"
+denokv_proto.workspace = true
+fastwebsockets.workspace = true
+flate2 = { workspace = true, features = ["default"] }
+futures.workspace = true
+glob.workspace = true
+h2.workspace = true
+http.workspace = true
+http-body-util.workspace = true
+hyper.workspace = true
+hyper-util.workspace = true
+lazy-regex.workspace = true
+libc.workspace = true
+lsp-types.workspace = true
+monch.workspace = true
+nix.workspace = true
+once_cell.workspace = true
+os_pipe.workspace = true
+parking_lot.workspace = true
+pretty_assertions.workspace = true
+prost.workspace = true
+regex.workspace = true
+reqwest.workspace = true
+rustls-pemfile.workspace = true
+rustls-tokio-stream.workspace = true
+semver = "=1.0.14"
+serde.workspace = true
+serde_json.workspace = true
+sha2.workspace = true
+tar.workspace = true
+tempfile.workspace = true
+termcolor.workspace = true
+tokio.workspace = true
+url.workspace = true
+win32job = "2"
+
+[target.'cfg(windows)'.dependencies]
+winapi = { workspace = true, features = ["consoleapi", "synchapi", "handleapi", "namedpipeapi", "winbase", "winerror"] }
+
+[build-dependencies]
+prost-build.workspace = true
diff --git a/tests/util/server/src/assertions.rs b/tests/util/server/src/assertions.rs
new file mode 100644
index 000000000..b9aba9354
--- /dev/null
+++ b/tests/util/server/src/assertions.rs
@@ -0,0 +1,94 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use crate::colors;
+
+#[macro_export]
+macro_rules! assert_starts_with {
+ ($string:expr, $($test:expr),+) => {
+ let string = $string; // This might be a function call or something
+ if !($(string.starts_with($test))||+) {
+ panic!("{:?} does not start with {:?}", string, [$($test),+]);
+ }
+ }
+}
+
+#[macro_export]
+macro_rules! assert_ends_with {
+ ($left:expr, $right:expr $(,)?) => {
+ match (&$left, &$right) {
+ (actual, expected) => {
+ let actual = if expected.len() > actual.len() {
+ actual
+ } else {
+ &actual[actual.len() - expected.len()..]
+ };
+ pretty_assertions::assert_eq!(
+ actual,
+ *expected,
+ "should end with expected."
+ );
+ }
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! assert_contains {
+ ($string:expr, $($test:expr),+ $(,)?) => {
+ let string = &$string; // This might be a function call or something
+ if !($(string.contains($test))||+) {
+ panic!("{:?} does not contain any of {:?}", string, [$($test),+]);
+ }
+ }
+}
+
+#[macro_export]
+macro_rules! assert_not_contains {
+ ($string:expr, $($test:expr),+ $(,)?) => {
+ let string = &$string; // This might be a function call or something
+ if !($(!string.contains($test))||+) {
+ panic!("{:?} contained {:?}", string, [$($test),+]);
+ }
+ }
+}
+
+#[track_caller]
+pub fn assert_wildcard_match(actual: &str, expected: &str) {
+ if !expected.contains("[WILDCARD]") && !expected.contains("[UNORDERED_START]")
+ {
+ pretty_assertions::assert_eq!(actual, expected);
+ } else {
+ match crate::wildcard_match_detailed(expected, actual) {
+ crate::WildcardMatchResult::Success => {
+ // ignore
+ }
+ crate::WildcardMatchResult::Fail(debug_output) => {
+ println!(
+ "{}{}{}",
+ colors::bold("-- "),
+ colors::bold_red("OUTPUT"),
+ colors::bold(" START --"),
+ );
+ println!("{}", actual);
+ println!("{}", colors::bold("-- OUTPUT END --"));
+ println!(
+ "{}{}{}",
+ colors::bold("-- "),
+ colors::bold_green("EXPECTED"),
+ colors::bold(" START --"),
+ );
+ println!("{}", expected);
+ println!("{}", colors::bold("-- EXPECTED END --"));
+ println!(
+ "{}{}{}",
+ colors::bold("-- "),
+ colors::bold_blue("DEBUG"),
+ colors::bold(" START --"),
+ );
+ println!("{debug_output}");
+ println!("{}", colors::bold("-- DEBUG END --"));
+ panic!("pattern match failed");
+ }
+ }
+ }
+}
diff --git a/tests/util/server/src/builders.rs b/tests/util/server/src/builders.rs
new file mode 100644
index 000000000..d8c209dd7
--- /dev/null
+++ b/tests/util/server/src/builders.rs
@@ -0,0 +1,997 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use std::cell::RefCell;
+use std::collections::HashMap;
+use std::collections::HashSet;
+use std::ffi::OsStr;
+use std::ffi::OsString;
+use std::io::Read;
+use std::io::Write;
+use std::ops::Deref;
+use std::ops::DerefMut;
+use std::path::Path;
+use std::path::PathBuf;
+use std::process::Child;
+use std::process::Command;
+use std::process::Stdio;
+use std::rc::Rc;
+
+use os_pipe::pipe;
+
+use crate::assertions::assert_wildcard_match;
+use crate::deno_exe_path;
+use crate::denort_exe_path;
+use crate::env_vars_for_jsr_tests;
+use crate::env_vars_for_npm_tests;
+use crate::fs::PathRef;
+use crate::http_server;
+use crate::jsr_registry_unset_url;
+use crate::lsp::LspClientBuilder;
+use crate::npm_registry_unset_url;
+use crate::pty::Pty;
+use crate::strip_ansi_codes;
+use crate::testdata_path;
+use crate::HttpServerGuard;
+use crate::TempDir;
+
+// Gives the developer a nice error message if they have a deno configuration
+// file that will be auto-discovered by the tests and cause a lot of failures.
+static HAS_DENO_JSON_IN_WORKING_DIR_ERR: once_cell::sync::Lazy<Option<String>> =
+ once_cell::sync::Lazy::new(|| {
+ let testdata_path = testdata_path();
+ let mut current_dir = testdata_path.as_path();
+ let deno_json_names = ["deno.json", "deno.jsonc"];
+ loop {
+ for name in deno_json_names {
+ let deno_json_path = current_dir.join(name);
+ if deno_json_path.exists() {
+ return Some(format!(
+ concat!(
+ "Found deno configuration file at {}. The test suite relies on ",
+ "a deno.json not existing in any ancestor directory. Please ",
+ "delete this file so the tests won't auto-discover it.",
+ ),
+ deno_json_path.display(),
+ ));
+ }
+ }
+ if let Some(parent) = current_dir.parent() {
+ current_dir = parent;
+ } else {
+ break;
+ }
+ }
+
+ None
+ });
+
+#[derive(Default)]
+pub struct TestContextBuilder {
+ use_http_server: bool,
+ use_temp_cwd: bool,
+ use_symlinked_temp_dir: bool,
+ /// Copies the files at the specified directory in the "testdata" directory
+ /// to the temp folder and runs the test from there. This is useful when
+ /// the test creates files in the testdata directory (ex. a node_modules folder)
+ copy_temp_dir: Option<String>,
+ temp_dir_path: Option<PathBuf>,
+ cwd: Option<String>,
+ envs: HashMap<String, String>,
+}
+
+impl TestContextBuilder {
+ pub fn new() -> Self {
+ Self::default().add_compile_env_vars()
+ }
+
+ pub fn for_npm() -> Self {
+ Self::new().use_http_server().add_npm_env_vars()
+ }
+
+ pub fn for_jsr() -> Self {
+ Self::new().use_http_server().add_jsr_env_vars()
+ }
+
+ pub fn temp_dir_path(mut self, path: impl AsRef<Path>) -> Self {
+ self.temp_dir_path = Some(path.as_ref().to_path_buf());
+ self
+ }
+
+ pub fn use_http_server(mut self) -> Self {
+ self.use_http_server = true;
+ self
+ }
+
+ pub fn use_temp_cwd(mut self) -> Self {
+ self.use_temp_cwd = true;
+ self
+ }
+
+ /// Causes the temp directory to be symlinked to a target directory
+ /// which is useful for debugging issues that only show up on the CI.
+ ///
+ /// Note: This method is not actually deprecated, it's just the CI
+ /// does this by default so there's no need to check in any code that
+ /// uses this into the repo. This is just for debugging purposes.
+ #[deprecated]
+ pub fn use_symlinked_temp_dir(mut self) -> Self {
+ self.use_symlinked_temp_dir = true;
+ self
+ }
+
+ /// Copies the files at the specified directory in the "testdata" directory
+ /// to the temp folder and runs the test from there. This is useful when
+ /// the test creates files in the testdata directory (ex. a node_modules folder)
+ pub fn use_copy_temp_dir(mut self, dir: impl AsRef<str>) -> Self {
+ self.copy_temp_dir = Some(dir.as_ref().to_string());
+ self
+ }
+
+ pub fn cwd(mut self, cwd: impl AsRef<str>) -> Self {
+ self.cwd = Some(cwd.as_ref().to_string());
+ self
+ }
+
+ pub fn envs<I, K, V>(self, vars: I) -> Self
+ where
+ I: IntoIterator<Item = (K, V)>,
+ K: AsRef<str>,
+ V: AsRef<str>,
+ {
+ let mut this = self;
+ for (key, value) in vars {
+ this = this.env(key, value);
+ }
+ this
+ }
+
+ pub fn env(mut self, key: impl AsRef<str>, value: impl AsRef<str>) -> Self {
+ self
+ .envs
+ .insert(key.as_ref().to_string(), value.as_ref().to_string());
+ self
+ }
+
+ pub fn add_npm_env_vars(mut self) -> Self {
+ for (key, value) in env_vars_for_npm_tests() {
+ self = self.env(key, value);
+ }
+ self
+ }
+
+ pub fn add_compile_env_vars(mut self) -> Self {
+ // The `denort` binary is in the same artifact directory as the `deno` binary.
+ let denort_bin = denort_exe_path();
+ self = self.env("DENORT_BIN", denort_bin.to_string());
+ self
+ }
+
+ pub fn add_future_env_vars(mut self) -> Self {
+ self = self.env("DENO_FUTURE", "1");
+ self
+ }
+
+ pub fn add_jsr_env_vars(mut self) -> Self {
+ for (key, value) in env_vars_for_jsr_tests() {
+ self = self.env(key, value);
+ }
+ self
+ }
+
+ pub fn build(&self) -> TestContext {
+ if let Some(err) = &*HAS_DENO_JSON_IN_WORKING_DIR_ERR {
+ panic!("{}", err);
+ }
+
+ let temp_dir_path = self
+ .temp_dir_path
+ .clone()
+ .unwrap_or_else(std::env::temp_dir);
+ let deno_dir = TempDir::new_in(&temp_dir_path);
+ let temp_dir = TempDir::new_in(&temp_dir_path);
+ let temp_dir = if self.use_symlinked_temp_dir {
+ TempDir::new_symlinked(temp_dir)
+ } else {
+ temp_dir
+ };
+ if let Some(temp_copy_dir) = &self.copy_temp_dir {
+ let test_data_path = testdata_path().join(temp_copy_dir);
+ let temp_copy_dir = temp_dir.path().join(temp_copy_dir);
+ temp_copy_dir.create_dir_all();
+ test_data_path.copy_to_recursive(&temp_copy_dir);
+ }
+
+ let deno_exe = deno_exe_path();
+ println!("deno_exe path {}", deno_exe);
+
+ let http_server_guard = if self.use_http_server {
+ Some(Rc::new(http_server()))
+ } else {
+ None
+ };
+
+ let cwd = if self.use_temp_cwd || self.copy_temp_dir.is_some() {
+ temp_dir.path().to_owned()
+ } else {
+ testdata_path().clone()
+ };
+ let cwd = match &self.cwd {
+ Some(specified_cwd) => cwd.join(specified_cwd),
+ None => cwd,
+ };
+
+ TestContext {
+ cwd,
+ deno_exe,
+ envs: self.envs.clone(),
+ _http_server_guard: http_server_guard,
+ deno_dir,
+ temp_dir,
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct TestContext {
+ deno_exe: PathRef,
+ envs: HashMap<String, String>,
+ cwd: PathRef,
+ _http_server_guard: Option<Rc<HttpServerGuard>>,
+ deno_dir: TempDir,
+ temp_dir: TempDir,
+}
+
+impl Default for TestContext {
+ fn default() -> Self {
+ TestContextBuilder::default().build()
+ }
+}
+
+impl TestContext {
+ pub fn with_http_server() -> Self {
+ TestContextBuilder::new().use_http_server().build()
+ }
+
+ pub fn deno_dir(&self) -> &TempDir {
+ &self.deno_dir
+ }
+
+ pub fn temp_dir(&self) -> &TempDir {
+ &self.temp_dir
+ }
+
+ pub fn new_command(&self) -> TestCommandBuilder {
+ TestCommandBuilder::new(self.deno_dir.clone())
+ .envs(self.envs.clone())
+ .current_dir(&self.cwd)
+ }
+
+ pub fn new_lsp_command(&self) -> LspClientBuilder {
+ LspClientBuilder::new_with_dir(self.deno_dir.clone())
+ .deno_exe(&self.deno_exe)
+ .set_root_dir(self.temp_dir.path().clone())
+ }
+
+ pub fn run_npm(&self, args: impl AsRef<str>) {
+ self
+ .new_command()
+ .name("npm")
+ .args(args)
+ .run()
+ .skip_output_check();
+ }
+
+ pub fn get_jsr_package_integrity(&self, sub_path: &str) -> String {
+ fn get_checksum(bytes: &[u8]) -> String {
+ use sha2::Digest;
+ let mut hasher = sha2::Sha256::new();
+ hasher.update(bytes);
+ format!("{:x}", hasher.finalize())
+ }
+
+ let url = url::Url::parse(self.envs.get("JSR_URL").unwrap()).unwrap();
+ let url = url.join(&format!("{}_meta.json", sub_path)).unwrap();
+ let bytes = sync_fetch(url);
+ get_checksum(&bytes)
+ }
+}
+
+fn sync_fetch(url: url::Url) -> bytes::Bytes {
+ let runtime = tokio::runtime::Builder::new_current_thread()
+ .enable_io()
+ .enable_time()
+ .build()
+ .unwrap();
+ runtime.block_on(async move {
+ let client = reqwest::Client::new();
+ let response = client.get(url).send().await.unwrap();
+ assert!(response.status().is_success());
+ response.bytes().await.unwrap()
+ })
+}
+
+/// We can't clone an stdio, so if someone clones a DenoCmd,
+/// we want to set this to `Cloned` and show the user a helpful
+/// panic message.
+enum StdioContainer {
+ Cloned,
+ Inner(RefCell<Option<Stdio>>),
+}
+
+impl Clone for StdioContainer {
+ fn clone(&self) -> Self {
+ Self::Cloned
+ }
+}
+
+impl StdioContainer {
+ pub fn new(stdio: Stdio) -> Self {
+ Self::Inner(RefCell::new(Some(stdio)))
+ }
+
+ pub fn take(&self) -> Stdio {
+ match self {
+ StdioContainer::Cloned => panic!("Cannot run a command after it was cloned. You need to reset the stdio value."),
+ StdioContainer::Inner(inner) => {
+ match inner.borrow_mut().take() {
+ Some(value) => value,
+ None => panic!("Cannot run a command that was previously run. You need to reset the stdio value between runs."),
+ }
+ },
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct TestCommandBuilder {
+ deno_dir: TempDir,
+ stdin: Option<StdioContainer>,
+ stdout: Option<StdioContainer>,
+ stderr: Option<StdioContainer>,
+ stdin_text: Option<String>,
+ command_name: String,
+ cwd: Option<PathRef>,
+ envs: HashMap<String, String>,
+ envs_remove: HashSet<String>,
+ env_clear: bool,
+ args_text: String,
+ args_vec: Vec<String>,
+ split_output: bool,
+ skip_strip_ansi: bool,
+}
+
+impl TestCommandBuilder {
+ pub fn new(deno_dir: TempDir) -> Self {
+ Self {
+ deno_dir,
+ stdin: None,
+ stdout: None,
+ stderr: None,
+ stdin_text: None,
+ split_output: false,
+ skip_strip_ansi: false,
+ cwd: None,
+ envs: Default::default(),
+ envs_remove: Default::default(),
+ env_clear: false,
+ command_name: "deno".to_string(),
+ args_text: "".to_string(),
+ args_vec: Default::default(),
+ }
+ }
+
+ pub fn name(mut self, name: impl AsRef<OsStr>) -> Self {
+ self.command_name = name.as_ref().to_string_lossy().to_string();
+ self
+ }
+
+ pub fn args(mut self, args: impl AsRef<str>) -> Self {
+ self.args_text = args.as_ref().to_string();
+ self
+ }
+
+ pub fn args_vec<I, S>(mut self, args: I) -> Self
+ where
+ I: IntoIterator<Item = S>,
+ S: AsRef<std::ffi::OsStr>,
+ {
+ self.args_vec.extend(
+ args
+ .into_iter()
+ .map(|s| s.as_ref().to_string_lossy().to_string()),
+ );
+ self
+ }
+
+ pub fn arg<S>(mut self, arg: S) -> Self
+ where
+ S: AsRef<std::ffi::OsStr>,
+ {
+ self
+ .args_vec
+ .push(arg.as_ref().to_string_lossy().to_string());
+ self
+ }
+
+ pub fn env_clear(mut self) -> Self {
+ self.env_clear = true;
+ self
+ }
+
+ pub fn envs<I, K, V>(self, vars: I) -> Self
+ where
+ I: IntoIterator<Item = (K, V)>,
+ K: AsRef<std::ffi::OsStr>,
+ V: AsRef<std::ffi::OsStr>,
+ {
+ let mut this = self;
+ for (key, value) in vars {
+ this = this.env(key, value);
+ }
+ this
+ }
+
+ pub fn env<K, V>(mut self, key: K, val: V) -> Self
+ where
+ K: AsRef<std::ffi::OsStr>,
+ V: AsRef<std::ffi::OsStr>,
+ {
+ self.envs.insert(
+ key.as_ref().to_string_lossy().to_string(),
+ val.as_ref().to_string_lossy().to_string(),
+ );
+ self
+ }
+
+ pub fn env_remove<K>(mut self, key: K) -> Self
+ where
+ K: AsRef<std::ffi::OsStr>,
+ {
+ self
+ .envs_remove
+ .insert(key.as_ref().to_string_lossy().to_string());
+ self
+ }
+
+ pub fn skip_strip_ansi(mut self) -> Self {
+ self.skip_strip_ansi = true;
+ self
+ }
+
+ pub fn stdin<T: Into<Stdio>>(mut self, cfg: T) -> Self {
+ self.stdin = Some(StdioContainer::new(cfg.into()));
+ self
+ }
+
+ pub fn stdout<T: Into<Stdio>>(mut self, cfg: T) -> Self {
+ self.stdout = Some(StdioContainer::new(cfg.into()));
+ self
+ }
+
+ pub fn stderr<T: Into<Stdio>>(mut self, cfg: T) -> Self {
+ self.stderr = Some(StdioContainer::new(cfg.into()));
+ self
+ }
+
+ pub fn current_dir<P: AsRef<OsStr>>(mut self, dir: P) -> Self {
+ let dir = dir.as_ref().to_string_lossy().to_string();
+ self.cwd = Some(match self.cwd {
+ Some(current) => current.join(dir),
+ None => PathRef::new(dir),
+ });
+ self
+ }
+
+ pub fn stdin_piped(self) -> Self {
+ self.stdin(std::process::Stdio::piped())
+ }
+
+ pub fn stdout_piped(self) -> Self {
+ self.stdout(std::process::Stdio::piped())
+ }
+
+ pub fn stderr_piped(self) -> Self {
+ self.stderr(std::process::Stdio::piped())
+ }
+
+ pub fn piped_output(self) -> Self {
+ self.stdout_piped().stderr_piped()
+ }
+
+ pub fn stdin_text(mut self, text: impl AsRef<str>) -> Self {
+ self.stdin_text = Some(text.as_ref().to_string());
+ self.stdin_piped()
+ }
+
+ /// Splits the output into stdout and stderr rather than having them combined.
+ pub fn split_output(mut self) -> Self {
+ // Note: it was previously attempted to capture stdout & stderr separately
+ // then forward the output to a combined pipe, but this was found to be
+ // too racy compared to providing the same combined pipe to both.
+ self.split_output = true;
+ self
+ }
+
+ pub fn with_pty(&self, mut action: impl FnMut(Pty)) {
+ if !Pty::is_supported() {
+ return;
+ }
+
+ let args = self.build_args();
+ let args = args.iter().map(|s| s.as_str()).collect::<Vec<_>>();
+ let mut envs = self.build_envs();
+ if !envs.contains_key("NO_COLOR") {
+ // set this by default for pty tests
+ envs.insert("NO_COLOR".to_string(), "1".to_string());
+ }
+
+ // note(dsherret): for some reason I need to inject the current
+ // environment here for the pty tests or else I get dns errors
+ if !self.env_clear {
+ for (key, value) in std::env::vars() {
+ envs.entry(key).or_insert(value);
+ }
+ }
+
+ let cwd = self
+ .cwd
+ .as_ref()
+ .map(PathBuf::from)
+ .unwrap_or_else(|| std::env::current_dir().unwrap());
+ let command_path = self.build_command_path();
+
+ println!("command {} {}", command_path, args.join(" "));
+ println!("command cwd {}", cwd.display());
+ action(Pty::new(command_path.as_path(), &args, &cwd, Some(envs)))
+ }
+
+ pub fn output(&self) -> Result<std::process::Output, std::io::Error> {
+ assert!(self.stdin_text.is_none(), "use spawn instead");
+ self.build_command().output()
+ }
+
+ pub fn status(&self) -> Result<std::process::ExitStatus, std::io::Error> {
+ assert!(self.stdin_text.is_none(), "use spawn instead");
+ self.build_command().status()
+ }
+
+ pub fn spawn(&self) -> Result<DenoChild, std::io::Error> {
+ let child = self.build_command().spawn()?;
+ let mut child = DenoChild {
+ _deno_dir: self.deno_dir.clone(),
+ child,
+ };
+
+ if let Some(input) = &self.stdin_text {
+ let mut p_stdin = child.stdin.take().unwrap();
+ write!(p_stdin, "{input}").unwrap();
+ }
+
+ Ok(child)
+ }
+
+ pub fn spawn_with_piped_output(&self) -> DenoChild {
+ self.clone().piped_output().spawn().unwrap()
+ }
+
+ pub fn run(&self) -> TestCommandOutput {
+ fn read_pipe_to_string(mut pipe: os_pipe::PipeReader) -> String {
+ let mut output = String::new();
+ pipe.read_to_string(&mut output).unwrap();
+ output
+ }
+
+ fn sanitize_output(
+ mut text: String,
+ args: &[OsString],
+ skip_strip_ansi: bool,
+ ) -> String {
+ if !skip_strip_ansi {
+ text = strip_ansi_codes(&text).to_string();
+ }
+ // deno test's output capturing flushes with a zero-width space in order to
+ // synchronize the output pipes. Occasionally this zero width space
+ // might end up in the output so strip it from the output comparison here.
+ if args.first().and_then(|s| s.to_str()) == Some("test") {
+ text = text.replace('\u{200B}', "");
+ }
+ text
+ }
+
+ let mut command = self.build_command();
+ let args = command
+ .get_args()
+ .map(ToOwned::to_owned)
+ .collect::<Vec<_>>();
+ let (combined_reader, std_out_err_handle) = if self.split_output {
+ let (stdout_reader, stdout_writer) = pipe().unwrap();
+ let (stderr_reader, stderr_writer) = pipe().unwrap();
+ command.stdout(stdout_writer);
+ command.stderr(stderr_writer);
+ (
+ None,
+ Some((
+ std::thread::spawn(move || read_pipe_to_string(stdout_reader)),
+ std::thread::spawn(move || read_pipe_to_string(stderr_reader)),
+ )),
+ )
+ } else {
+ let (combined_reader, combined_writer) = pipe().unwrap();
+ command.stdout(combined_writer.try_clone().unwrap());
+ command.stderr(combined_writer);
+ (Some(combined_reader), None)
+ };
+
+ let mut process = command.spawn().expect("Failed spawning command");
+
+ if let Some(input) = &self.stdin_text {
+ let mut p_stdin = process.stdin.take().unwrap();
+ write!(p_stdin, "{input}").unwrap();
+ }
+
+ // This parent process is still holding its copies of the write ends,
+ // and we have to close them before we read, otherwise the read end
+ // will never report EOF. The Command object owns the writers now,
+ // and dropping it closes them.
+ drop(command);
+
+ let combined = combined_reader.map(|pipe| {
+ sanitize_output(read_pipe_to_string(pipe), &args, self.skip_strip_ansi)
+ });
+
+ let status = process.wait().unwrap();
+ let std_out_err = std_out_err_handle.map(|(stdout, stderr)| {
+ (
+ sanitize_output(stdout.join().unwrap(), &args, self.skip_strip_ansi),
+ sanitize_output(stderr.join().unwrap(), &args, self.skip_strip_ansi),
+ )
+ });
+ let exit_code = status.code();
+ #[cfg(unix)]
+ let signal = {
+ use std::os::unix::process::ExitStatusExt;
+ status.signal()
+ };
+ #[cfg(not(unix))]
+ let signal = None;
+
+ TestCommandOutput {
+ exit_code,
+ signal,
+ combined,
+ std_out_err,
+ asserted_exit_code: RefCell::new(false),
+ asserted_stdout: RefCell::new(false),
+ asserted_stderr: RefCell::new(false),
+ asserted_combined: RefCell::new(false),
+ _deno_dir: self.deno_dir.clone(),
+ }
+ }
+
+ fn build_command(&self) -> Command {
+ let command_path = self.build_command_path();
+ let args = self.build_args();
+ println!("command {} {}", command_path, args.join(" "));
+ let mut command = Command::new(command_path);
+ if let Some(cwd) = &self.cwd {
+ println!("command cwd {}", cwd);
+ command.current_dir(cwd);
+ }
+ if let Some(stdin) = &self.stdin {
+ command.stdin(stdin.take());
+ }
+ if let Some(stdout) = &self.stdout {
+ command.stdout(stdout.take());
+ }
+ if let Some(stderr) = &self.stderr {
+ command.stderr(stderr.take());
+ }
+
+ command.args(args.iter());
+ if self.env_clear {
+ command.env_clear();
+ }
+ let envs = self.build_envs();
+ command.envs(envs);
+ command.stdin(Stdio::piped());
+ command
+ }
+
+ fn build_command_path(&self) -> PathRef {
+ let command_name = if cfg!(windows) && self.command_name == "npm" {
+ "npm.cmd"
+ } else {
+ &self.command_name
+ };
+ if command_name == "deno" {
+ deno_exe_path()
+ } else {
+ PathRef::new(PathBuf::from(command_name))
+ }
+ }
+
+ fn build_args(&self) -> Vec<String> {
+ if self.args_vec.is_empty() {
+ std::borrow::Cow::Owned(
+ self
+ .args_text
+ .split_whitespace()
+ .map(|s| s.to_string())
+ .collect::<Vec<_>>(),
+ )
+ } else {
+ assert!(
+ self.args_text.is_empty(),
+ "Do not provide args when providing args_vec."
+ );
+ std::borrow::Cow::Borrowed(&self.args_vec)
+ }
+ .iter()
+ .map(|arg| arg.replace("$TESTDATA", &testdata_path().to_string_lossy()))
+ .collect::<Vec<_>>()
+ }
+
+ fn build_envs(&self) -> HashMap<String, String> {
+ let mut envs = self.envs.clone();
+ if !envs.contains_key("DENO_DIR") {
+ envs.insert("DENO_DIR".to_string(), self.deno_dir.path().to_string());
+ }
+ if !envs.contains_key("NPM_CONFIG_REGISTRY") {
+ envs.insert("NPM_CONFIG_REGISTRY".to_string(), npm_registry_unset_url());
+ }
+ if !envs.contains_key("DENO_NO_UPDATE_CHECK") {
+ envs.insert("DENO_NO_UPDATE_CHECK".to_string(), "1".to_string());
+ }
+ if !envs.contains_key("JSR_URL") {
+ envs.insert("JSR_URL".to_string(), jsr_registry_unset_url());
+ }
+ for key in &self.envs_remove {
+ envs.remove(key);
+ }
+ envs
+ }
+}
+
+pub struct DenoChild {
+ // keep alive for the duration of the use of this struct
+ _deno_dir: TempDir,
+ child: Child,
+}
+
+impl Deref for DenoChild {
+ type Target = Child;
+ fn deref(&self) -> &Child {
+ &self.child
+ }
+}
+
+impl DerefMut for DenoChild {
+ fn deref_mut(&mut self) -> &mut Child {
+ &mut self.child
+ }
+}
+
+impl DenoChild {
+ pub fn wait_with_output(
+ self,
+ ) -> Result<std::process::Output, std::io::Error> {
+ self.child.wait_with_output()
+ }
+}
+
+pub struct TestCommandOutput {
+ combined: Option<String>,
+ std_out_err: Option<(String, String)>,
+ exit_code: Option<i32>,
+ signal: Option<i32>,
+ asserted_stdout: RefCell<bool>,
+ asserted_stderr: RefCell<bool>,
+ asserted_combined: RefCell<bool>,
+ asserted_exit_code: RefCell<bool>,
+ // keep alive for the duration of the output reference
+ _deno_dir: TempDir,
+}
+
+impl Drop for TestCommandOutput {
+ // assert the output and exit code was asserted
+ fn drop(&mut self) {
+ fn panic_unasserted_output(text: &str) {
+ println!("OUTPUT\n{text}\nOUTPUT");
+ panic!(concat!(
+ "The non-empty text of the command was not asserted. ",
+ "Call `output.skip_output_check()` to skip if necessary.",
+ ),);
+ }
+
+ if std::thread::panicking() {
+ return;
+ }
+
+ // either the combined output needs to be asserted or both stdout and stderr
+ if let Some(combined) = &self.combined {
+ if !*self.asserted_combined.borrow() && !combined.is_empty() {
+ panic_unasserted_output(combined);
+ }
+ }
+ if let Some((stdout, stderr)) = &self.std_out_err {
+ if !*self.asserted_stdout.borrow() && !stdout.is_empty() {
+ panic_unasserted_output(stdout);
+ }
+ if !*self.asserted_stderr.borrow() && !stderr.is_empty() {
+ panic_unasserted_output(stderr);
+ }
+ }
+
+ // now ensure the exit code was asserted
+ if !*self.asserted_exit_code.borrow() && self.exit_code != Some(0) {
+ self.print_output();
+ panic!(
+ "The non-zero exit code of the command was not asserted: {:?}",
+ self.exit_code,
+ )
+ }
+ }
+}
+
+impl TestCommandOutput {
+ pub fn skip_output_check(&self) -> &Self {
+ *self.asserted_combined.borrow_mut() = true;
+ self.skip_stdout_check();
+ self.skip_stderr_check();
+ self
+ }
+
+ pub fn skip_stdout_check(&self) -> &Self {
+ *self.asserted_stdout.borrow_mut() = true;
+ self
+ }
+
+ pub fn skip_stderr_check(&self) -> &Self {
+ *self.asserted_stderr.borrow_mut() = true;
+ self
+ }
+
+ pub fn skip_exit_code_check(&self) -> &Self {
+ *self.asserted_exit_code.borrow_mut() = true;
+ self
+ }
+
+ pub fn exit_code(&self) -> Option<i32> {
+ self.skip_exit_code_check();
+ self.exit_code
+ }
+
+ pub fn signal(&self) -> Option<i32> {
+ self.signal
+ }
+
+ pub fn combined_output(&self) -> &str {
+ self.skip_output_check();
+ self
+ .combined
+ .as_deref()
+ .expect("not available since .split_output() was called")
+ }
+
+ pub fn stdout(&self) -> &str {
+ *self.asserted_stdout.borrow_mut() = true;
+ self
+ .std_out_err
+ .as_ref()
+ .map(|(stdout, _)| stdout.as_str())
+ .expect("call .split_output() on the builder")
+ }
+
+ pub fn stderr(&self) -> &str {
+ *self.asserted_stderr.borrow_mut() = true;
+ self
+ .std_out_err
+ .as_ref()
+ .map(|(_, stderr)| stderr.as_str())
+ .expect("call .split_output() on the builder")
+ }
+
+ #[track_caller]
+ pub fn assert_exit_code(&self, expected_exit_code: i32) -> &Self {
+ let actual_exit_code = self.exit_code();
+
+ if let Some(exit_code) = &actual_exit_code {
+ if *exit_code != expected_exit_code {
+ self.print_output();
+ panic!(
+ "bad exit code, expected: {:?}, actual: {:?}",
+ expected_exit_code, exit_code,
+ );
+ }
+ } else {
+ self.print_output();
+ if let Some(signal) = self.signal() {
+ panic!(
+ "process terminated by signal, expected exit code: {:?}, actual signal: {:?}",
+ actual_exit_code,
+ signal,
+ );
+ } else {
+ panic!(
+ "process terminated without status code on non unix platform, expected exit code: {:?}",
+ actual_exit_code,
+ );
+ }
+ }
+
+ self
+ }
+
+ pub fn print_output(&self) {
+ if let Some(combined) = &self.combined {
+ println!("OUTPUT\n{combined}\nOUTPUT");
+ } else if let Some((stdout, stderr)) = &self.std_out_err {
+ println!("STDOUT OUTPUT\n{stdout}\nSTDOUT OUTPUT");
+ println!("STDERR OUTPUT\n{stderr}\nSTDERR OUTPUT");
+ }
+ }
+
+ #[track_caller]
+ pub fn assert_matches_text(&self, expected_text: impl AsRef<str>) -> &Self {
+ self.inner_assert_matches_text(self.combined_output(), expected_text)
+ }
+
+ #[track_caller]
+ pub fn assert_matches_file(&self, file_path: impl AsRef<Path>) -> &Self {
+ self.inner_assert_matches_file(self.combined_output(), file_path)
+ }
+
+ #[track_caller]
+ pub fn assert_stdout_matches_text(
+ &self,
+ expected_text: impl AsRef<str>,
+ ) -> &Self {
+ self.inner_assert_matches_text(self.stdout(), expected_text)
+ }
+
+ #[track_caller]
+ pub fn assert_stdout_matches_file(
+ &self,
+ file_path: impl AsRef<Path>,
+ ) -> &Self {
+ self.inner_assert_matches_file(self.stdout(), file_path)
+ }
+
+ #[track_caller]
+ pub fn assert_stderr_matches_text(
+ &self,
+ expected_text: impl AsRef<str>,
+ ) -> &Self {
+ self.inner_assert_matches_text(self.stderr(), expected_text)
+ }
+
+ #[track_caller]
+ pub fn assert_stderr_matches_file(
+ &self,
+ file_path: impl AsRef<Path>,
+ ) -> &Self {
+ self.inner_assert_matches_file(self.stderr(), file_path)
+ }
+
+ #[track_caller]
+ fn inner_assert_matches_text(
+ &self,
+ actual: &str,
+ expected: impl AsRef<str>,
+ ) -> &Self {
+ assert_wildcard_match(actual, expected.as_ref());
+ self
+ }
+
+ #[track_caller]
+ fn inner_assert_matches_file(
+ &self,
+ actual: &str,
+ file_path: impl AsRef<Path>,
+ ) -> &Self {
+ let output_path = testdata_path().join(file_path);
+ println!("output path {}", output_path);
+ let expected_text = output_path.read_to_string();
+ self.inner_assert_matches_text(actual, expected_text)
+ }
+}
diff --git a/tests/util/server/src/factory.rs b/tests/util/server/src/factory.rs
new file mode 100644
index 000000000..5b796fbc1
--- /dev/null
+++ b/tests/util/server/src/factory.rs
@@ -0,0 +1,98 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+use glob::glob;
+use std::collections::HashSet;
+use std::path::PathBuf;
+
+/// Generate a unit test factory verified and backed by a glob.
+#[macro_export]
+macro_rules! unit_test_factory {
+ ($test_fn:ident, $base:literal, $glob:literal, [ $( $test:ident $(= $($path:ident)/+)? ),+ $(,)? ]) => {
+ #[test]
+ fn check_test_glob() {
+ $crate::factory::check_test_glob($base, $glob, [ $( ( stringify!($test), stringify!( $( $($path)/+ )? ) ) ),+ ].as_slice());
+ }
+
+ $(
+ #[allow(non_snake_case)]
+ #[test]
+ fn $test() {
+ $test_fn($crate::factory::get_path(stringify!($test), stringify!( $( $($path)/+ )?)))
+ }
+ )+
+ };
+ (__test__ $($prefix:ident)* $test:ident) => {
+ #[allow(non_snake_case)]
+ #[test]
+ fn $test() {
+ $test_fn(stringify!($($prefix)/+ $test))
+ }
+ };
+}
+
+pub fn get_path(test: &'static str, path: &'static str) -> String {
+ if path.is_empty() {
+ test.to_owned()
+ } else {
+ path.replace(' ', "")
+ }
+}
+
+/// Validate that the glob matches the list of tests specified.
+pub fn check_test_glob(
+ base: &'static str,
+ glob_pattern: &'static str,
+ files: &[(&'static str, &'static str)],
+) {
+ let base_dir = PathBuf::from(base)
+ .canonicalize()
+ .unwrap()
+ .to_string_lossy()
+ // Strip Windows slashes
+ .replace('\\', "/");
+ let mut found = HashSet::new();
+ let mut list = vec![];
+ for file in glob(&format!("{}/{}", base, glob_pattern))
+ .expect("Failed to read test path")
+ {
+ let mut file = file
+ .expect("Invalid file from glob")
+ .canonicalize()
+ .unwrap();
+ file.set_extension("");
+ let name = file.file_name().unwrap().to_string_lossy();
+ // Strip windows slashes
+ let file = file.to_string_lossy().replace('\\', "/");
+ let file = file
+ .strip_prefix(&base_dir)
+ .expect("File {file} did not start with {base_dir} prefix");
+ let file = file.strip_prefix('/').unwrap().to_owned();
+ if file.contains('/') {
+ list.push(format!("{}={}", name, file))
+ } else {
+ list.push(file.clone());
+ }
+ found.insert(file);
+ }
+
+ let mut error = false;
+ for (test, path) in files {
+ // Remove spaces from the macro
+ let path = if path.is_empty() {
+ (*test).to_owned()
+ } else {
+ path.replace(' ', "")
+ };
+ if found.contains(&path) {
+ found.remove(&path);
+ } else {
+ error = true;
+ }
+ }
+
+ if error || !found.is_empty() {
+ panic!(
+ "Glob did not match provided list of files. Expected: \n[\n {}\n]",
+ list.join(",\n ")
+ );
+ }
+}
diff --git a/tests/util/server/src/fs.rs b/tests/util/server/src/fs.rs
new file mode 100644
index 000000000..0e47a7503
--- /dev/null
+++ b/tests/util/server/src/fs.rs
@@ -0,0 +1,450 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use pretty_assertions::assert_eq;
+use std::borrow::Cow;
+use std::ffi::OsStr;
+use std::fs;
+use std::path::Path;
+use std::path::PathBuf;
+use std::process::Command;
+use std::sync::Arc;
+
+use anyhow::Context;
+use lsp_types::Url;
+use serde::de::DeserializeOwned;
+use serde::Serialize;
+
+use crate::assertions::assert_wildcard_match;
+use crate::testdata_path;
+
+/// Represents a path on the file system, which can be used
+/// to perform specific actions.
+#[derive(Clone, Debug, Default, PartialEq, Eq)]
+pub struct PathRef(PathBuf);
+
+impl AsRef<Path> for PathRef {
+ fn as_ref(&self) -> &Path {
+ self.as_path()
+ }
+}
+
+impl AsRef<OsStr> for PathRef {
+ fn as_ref(&self) -> &OsStr {
+ self.as_path().as_ref()
+ }
+}
+
+impl std::fmt::Display for PathRef {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{}", self.as_path().display())
+ }
+}
+
+impl PathRef {
+ pub fn new(path: impl AsRef<Path>) -> Self {
+ Self(path.as_ref().to_path_buf())
+ }
+
+ pub fn parent(&self) -> PathRef {
+ PathRef(self.as_path().parent().unwrap().to_path_buf())
+ }
+
+ pub fn uri_dir(&self) -> Url {
+ Url::from_directory_path(self.as_path()).unwrap()
+ }
+
+ pub fn uri_file(&self) -> Url {
+ Url::from_file_path(self.as_path()).unwrap()
+ }
+
+ pub fn as_path(&self) -> &Path {
+ self.0.as_path()
+ }
+
+ pub fn to_path_buf(&self) -> PathBuf {
+ self.0.to_path_buf()
+ }
+
+ pub fn to_string_lossy(&self) -> Cow<str> {
+ self.0.to_string_lossy()
+ }
+
+ pub fn exists(&self) -> bool {
+ self.0.exists()
+ }
+
+ pub fn try_exists(&self) -> std::io::Result<bool> {
+ self.0.try_exists()
+ }
+
+ pub fn is_dir(&self) -> bool {
+ self.0.is_dir()
+ }
+
+ pub fn is_file(&self) -> bool {
+ self.0.is_file()
+ }
+
+ pub fn join(&self, path: impl AsRef<Path>) -> PathRef {
+ PathRef(self.as_path().join(path))
+ }
+
+ pub fn with_extension(&self, ext: impl AsRef<OsStr>) -> PathRef {
+ PathRef(self.as_path().with_extension(ext))
+ }
+
+ pub fn canonicalize(&self) -> PathRef {
+ PathRef(strip_unc_prefix(self.as_path().canonicalize().unwrap()))
+ }
+
+ pub fn create_dir_all(&self) {
+ fs::create_dir_all(self).unwrap();
+ }
+
+ pub fn remove_file(&self) {
+ fs::remove_file(self).unwrap();
+ }
+
+ pub fn remove_dir_all(&self) {
+ fs::remove_dir_all(self).unwrap();
+ }
+
+ pub fn read_to_string(&self) -> String {
+ self.read_to_string_if_exists().unwrap()
+ }
+
+ pub fn read_to_string_if_exists(&self) -> Result<String, anyhow::Error> {
+ fs::read_to_string(self)
+ .with_context(|| format!("Could not read file: {}", self))
+ }
+
+ pub fn read_to_bytes_if_exists(&self) -> Result<Vec<u8>, anyhow::Error> {
+ fs::read(self).with_context(|| format!("Could not read file: {}", self))
+ }
+
+ pub fn read_json<TValue: DeserializeOwned>(&self) -> TValue {
+ serde_json::from_str(&self.read_to_string()).unwrap()
+ }
+
+ pub fn read_json_value(&self) -> serde_json::Value {
+ serde_json::from_str(&self.read_to_string()).unwrap()
+ }
+
+ pub fn rename(&self, to: impl AsRef<Path>) {
+ fs::rename(self, self.join(to)).unwrap();
+ }
+
+ pub fn write(&self, text: impl AsRef<str>) {
+ fs::write(self, text.as_ref()).unwrap();
+ }
+
+ pub fn write_json<TValue: Serialize>(&self, value: &TValue) {
+ let text = serde_json::to_string_pretty(value).unwrap();
+ self.write(text);
+ }
+
+ pub fn symlink_dir(
+ &self,
+ oldpath: impl AsRef<Path>,
+ newpath: impl AsRef<Path>,
+ ) {
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::symlink;
+ symlink(self.as_path().join(oldpath), self.as_path().join(newpath))
+ .unwrap();
+ }
+ #[cfg(not(unix))]
+ {
+ use std::os::windows::fs::symlink_dir;
+ symlink_dir(self.as_path().join(oldpath), self.as_path().join(newpath))
+ .unwrap();
+ }
+ }
+
+ pub fn symlink_file(
+ &self,
+ oldpath: impl AsRef<Path>,
+ newpath: impl AsRef<Path>,
+ ) {
+ #[cfg(unix)]
+ {
+ use std::os::unix::fs::symlink;
+ symlink(self.as_path().join(oldpath), self.as_path().join(newpath))
+ .unwrap();
+ }
+ #[cfg(not(unix))]
+ {
+ use std::os::windows::fs::symlink_file;
+ symlink_file(self.as_path().join(oldpath), self.as_path().join(newpath))
+ .unwrap();
+ }
+ }
+
+ pub fn read_dir(&self) -> fs::ReadDir {
+ fs::read_dir(self.as_path())
+ .with_context(|| format!("Reading {}", self.as_path().display()))
+ .unwrap()
+ }
+
+ pub fn copy(&self, to: &impl AsRef<Path>) {
+ std::fs::copy(self.as_path(), to)
+ .with_context(|| format!("Copying {} to {}", self, to.as_ref().display()))
+ .unwrap();
+ }
+
+ /// Copies this directory to another directory.
+ ///
+ /// Note: Does not handle symlinks.
+ pub fn copy_to_recursive(&self, to: &PathRef) {
+ to.create_dir_all();
+ let read_dir = self.read_dir();
+
+ for entry in read_dir {
+ let entry = entry.unwrap();
+ let file_type = entry.file_type().unwrap();
+ let new_from = self.join(entry.file_name());
+ let new_to = to.join(entry.file_name());
+
+ if file_type.is_dir() {
+ new_from.copy_to_recursive(&new_to);
+ } else if file_type.is_file() {
+ new_from.copy(&new_to);
+ }
+ }
+ }
+
+ pub fn make_dir_readonly(&self) {
+ self.create_dir_all();
+ if cfg!(windows) {
+ Command::new("attrib").arg("+r").arg(self).output().unwrap();
+ } else if cfg!(unix) {
+ Command::new("chmod").arg("555").arg(self).output().unwrap();
+ }
+ }
+
+ #[track_caller]
+ pub fn assert_matches_file(&self, wildcard_file: impl AsRef<Path>) -> &Self {
+ let wildcard_file = testdata_path().join(wildcard_file);
+ println!("output path {}", wildcard_file);
+ let expected_text = wildcard_file.read_to_string();
+ self.assert_matches_text(&expected_text)
+ }
+
+ #[track_caller]
+ pub fn assert_matches_text(&self, wildcard_text: impl AsRef<str>) -> &Self {
+ let actual = self.read_to_string();
+ assert_wildcard_match(&actual, wildcard_text.as_ref());
+ self
+ }
+
+ #[track_caller]
+ pub fn assert_matches_json(&self, expected: serde_json::Value) {
+ let actual_json = self.read_json_value();
+ if actual_json != expected {
+ let actual_text = serde_json::to_string_pretty(&actual_json).unwrap();
+ let expected_text = serde_json::to_string_pretty(&expected).unwrap();
+ assert_eq!(actual_text, expected_text);
+ }
+ }
+}
+
+#[cfg(not(windows))]
+#[inline]
+fn strip_unc_prefix(path: PathBuf) -> PathBuf {
+ path
+}
+
+/// Strips the unc prefix (ex. \\?\) from Windows paths.
+///
+/// Lifted from deno_core for use in the tests.
+#[cfg(windows)]
+fn strip_unc_prefix(path: PathBuf) -> PathBuf {
+ use std::path::Component;
+ use std::path::Prefix;
+
+ let mut components = path.components();
+ match components.next() {
+ Some(Component::Prefix(prefix)) => {
+ match prefix.kind() {
+ // \\?\device
+ Prefix::Verbatim(device) => {
+ let mut path = PathBuf::new();
+ path.push(format!(r"\\{}\", device.to_string_lossy()));
+ path.extend(components.filter(|c| !matches!(c, Component::RootDir)));
+ path
+ }
+ // \\?\c:\path
+ Prefix::VerbatimDisk(_) => {
+ let mut path = PathBuf::new();
+ path.push(prefix.as_os_str().to_string_lossy().replace(r"\\?\", ""));
+ path.extend(components);
+ path
+ }
+ // \\?\UNC\hostname\share_name\path
+ Prefix::VerbatimUNC(hostname, share_name) => {
+ let mut path = PathBuf::new();
+ path.push(format!(
+ r"\\{}\{}\",
+ hostname.to_string_lossy(),
+ share_name.to_string_lossy()
+ ));
+ path.extend(components.filter(|c| !matches!(c, Component::RootDir)));
+ path
+ }
+ _ => path,
+ }
+ }
+ _ => path,
+ }
+}
+
+enum TempDirInner {
+ TempDir {
+ path_ref: PathRef,
+ // kept alive for the duration of the temp dir
+ _dir: tempfile::TempDir,
+ },
+ Path(PathRef),
+ Symlinked {
+ symlink: Arc<TempDirInner>,
+ target: Arc<TempDirInner>,
+ },
+}
+
+impl TempDirInner {
+ pub fn path(&self) -> &PathRef {
+ match self {
+ Self::Path(path_ref) => path_ref,
+ Self::TempDir { path_ref, .. } => path_ref,
+ Self::Symlinked { symlink, .. } => symlink.path(),
+ }
+ }
+
+ pub fn target_path(&self) -> &PathRef {
+ match self {
+ TempDirInner::Symlinked { target, .. } => target.target_path(),
+ _ => self.path(),
+ }
+ }
+}
+
+impl Drop for TempDirInner {
+ fn drop(&mut self) {
+ if let Self::Path(path) = self {
+ _ = fs::remove_dir_all(path);
+ }
+ }
+}
+
+/// For creating temporary directories in tests.
+///
+/// This was done because `tempfiles::TempDir` was very slow on Windows.
+///
+/// Note: Do not use this in actual code as this does not protect against
+/// "insecure temporary file" security vulnerabilities.
+#[derive(Clone)]
+pub struct TempDir(Arc<TempDirInner>);
+
+impl Default for TempDir {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl TempDir {
+ pub fn new() -> Self {
+ Self::new_inner(&std::env::temp_dir(), None)
+ }
+
+ pub fn new_with_prefix(prefix: &str) -> Self {
+ Self::new_inner(&std::env::temp_dir(), Some(prefix))
+ }
+
+ pub fn new_in(parent_dir: &Path) -> Self {
+ Self::new_inner(parent_dir, None)
+ }
+
+ pub fn new_with_path(path: &Path) -> Self {
+ Self(Arc::new(TempDirInner::Path(PathRef(path.to_path_buf()))))
+ }
+
+ pub fn new_symlinked(target: TempDir) -> Self {
+ let target_path = target.path();
+ let path = target_path.parent().join(format!(
+ "{}_symlinked",
+ target_path.as_path().file_name().unwrap().to_str().unwrap()
+ ));
+ target.symlink_dir(target.path(), &path);
+ TempDir(Arc::new(TempDirInner::Symlinked {
+ target: target.0,
+ symlink: Self::new_with_path(path.as_path()).0,
+ }))
+ }
+
+ /// Create a new temporary directory with the given prefix as part of its name, if specified.
+ fn new_inner(parent_dir: &Path, prefix: Option<&str>) -> Self {
+ let mut builder = tempfile::Builder::new();
+ builder.prefix(prefix.unwrap_or("deno-cli-test"));
+ let dir = builder
+ .tempdir_in(parent_dir)
+ .expect("Failed to create a temporary directory");
+ Self(Arc::new(TempDirInner::TempDir {
+ path_ref: PathRef(dir.path().to_path_buf()),
+ _dir: dir,
+ }))
+ }
+
+ pub fn uri(&self) -> Url {
+ Url::from_directory_path(self.path()).unwrap()
+ }
+
+ pub fn path(&self) -> &PathRef {
+ self.0.path()
+ }
+
+ /// The resolved final target path if this is a symlink.
+ pub fn target_path(&self) -> &PathRef {
+ self.0.target_path()
+ }
+
+ pub fn create_dir_all(&self, path: impl AsRef<Path>) {
+ self.target_path().join(path).create_dir_all()
+ }
+
+ pub fn remove_file(&self, path: impl AsRef<Path>) {
+ self.target_path().join(path).remove_file()
+ }
+
+ pub fn remove_dir_all(&self, path: impl AsRef<Path>) {
+ self.target_path().join(path).remove_dir_all()
+ }
+
+ pub fn read_to_string(&self, path: impl AsRef<Path>) -> String {
+ self.target_path().join(path).read_to_string()
+ }
+
+ pub fn rename(&self, from: impl AsRef<Path>, to: impl AsRef<Path>) {
+ self.target_path().join(from).rename(to)
+ }
+
+ pub fn write(&self, path: impl AsRef<Path>, text: impl AsRef<str>) {
+ self.target_path().join(path).write(text)
+ }
+
+ pub fn symlink_dir(
+ &self,
+ oldpath: impl AsRef<Path>,
+ newpath: impl AsRef<Path>,
+ ) {
+ self.target_path().symlink_dir(oldpath, newpath)
+ }
+
+ pub fn symlink_file(
+ &self,
+ oldpath: impl AsRef<Path>,
+ newpath: impl AsRef<Path>,
+ ) {
+ self.target_path().symlink_file(oldpath, newpath)
+ }
+}
diff --git a/tests/util/server/src/https.rs b/tests/util/server/src/https.rs
new file mode 100644
index 000000000..576df6d52
--- /dev/null
+++ b/tests/util/server/src/https.rs
@@ -0,0 +1,133 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+use anyhow::anyhow;
+use futures::Stream;
+use futures::StreamExt;
+use rustls::Certificate;
+use rustls::PrivateKey;
+use rustls_tokio_stream::rustls;
+use rustls_tokio_stream::TlsStream;
+use std::io;
+use std::num::NonZeroUsize;
+use std::result::Result;
+use std::sync::Arc;
+use tokio::net::TcpStream;
+
+use crate::get_tcp_listener_stream;
+use crate::testdata_path;
+
+pub const TLS_BUFFER_SIZE: Option<NonZeroUsize> = NonZeroUsize::new(65536);
+
+#[derive(Default)]
+pub enum SupportedHttpVersions {
+ #[default]
+ All,
+ Http1Only,
+ Http2Only,
+}
+
+pub fn get_tls_listener_stream_from_tcp(
+ tls_config: Arc<rustls::ServerConfig>,
+ mut tcp: impl Stream<Item = Result<TcpStream, std::io::Error>> + Unpin + 'static,
+) -> impl Stream<Item = Result<TlsStream, std::io::Error>> + Unpin {
+ async_stream::stream! {
+ while let Some(result) = tcp.next().await {
+ match result {
+ Ok(tcp) => yield Ok(TlsStream::new_server_side(tcp, tls_config.clone(), TLS_BUFFER_SIZE)),
+ Err(e) => yield Err(e),
+ };
+ }
+ }.boxed_local()
+}
+
+pub async fn get_tls_listener_stream(
+ name: &'static str,
+ port: u16,
+ http: SupportedHttpVersions,
+) -> impl Stream<Item = Result<TlsStream, std::io::Error>> + Unpin {
+ let cert_file = "tls/localhost.crt";
+ let key_file = "tls/localhost.key";
+ let ca_cert_file = "tls/RootCA.pem";
+ let tls_config = get_tls_config(cert_file, key_file, ca_cert_file, http)
+ .await
+ .unwrap();
+
+ let tcp = get_tcp_listener_stream(name, port).await;
+ get_tls_listener_stream_from_tcp(tls_config, tcp)
+}
+
+pub async fn get_tls_config(
+ cert: &str,
+ key: &str,
+ ca: &str,
+ http_versions: SupportedHttpVersions,
+) -> io::Result<Arc<rustls::ServerConfig>> {
+ let cert_path = testdata_path().join(cert);
+ let key_path = testdata_path().join(key);
+ let ca_path = testdata_path().join(ca);
+
+ let cert_file = std::fs::File::open(cert_path)?;
+ let key_file = std::fs::File::open(key_path)?;
+ let ca_file = std::fs::File::open(ca_path)?;
+
+ let certs: Vec<Certificate> = {
+ let mut cert_reader = io::BufReader::new(cert_file);
+ rustls_pemfile::certs(&mut cert_reader)
+ .unwrap()
+ .into_iter()
+ .map(Certificate)
+ .collect()
+ };
+
+ let mut ca_cert_reader = io::BufReader::new(ca_file);
+ let ca_cert = rustls_pemfile::certs(&mut ca_cert_reader)
+ .expect("Cannot load CA certificate")
+ .remove(0);
+
+ let mut key_reader = io::BufReader::new(key_file);
+ let key = {
+ let pkcs8_key = rustls_pemfile::pkcs8_private_keys(&mut key_reader)
+ .expect("Cannot load key file");
+ let rsa_key = rustls_pemfile::rsa_private_keys(&mut key_reader)
+ .expect("Cannot load key file");
+ if !pkcs8_key.is_empty() {
+ Some(pkcs8_key[0].clone())
+ } else if !rsa_key.is_empty() {
+ Some(rsa_key[0].clone())
+ } else {
+ None
+ }
+ };
+
+ match key {
+ Some(key) => {
+ let mut root_cert_store = rustls::RootCertStore::empty();
+ root_cert_store.add(&rustls::Certificate(ca_cert)).unwrap();
+
+ // Allow (but do not require) client authentication.
+
+ let mut config = rustls::ServerConfig::builder()
+ .with_safe_defaults()
+ .with_client_cert_verifier(Arc::new(
+ rustls::server::AllowAnyAnonymousOrAuthenticatedClient::new(
+ root_cert_store,
+ ),
+ ))
+ .with_single_cert(certs, PrivateKey(key))
+ .map_err(|e| anyhow!("Error setting cert: {:?}", e))
+ .unwrap();
+
+ match http_versions {
+ SupportedHttpVersions::All => {
+ config.alpn_protocols = vec!["h2".into(), "http/1.1".into()];
+ }
+ SupportedHttpVersions::Http1Only => {}
+ SupportedHttpVersions::Http2Only => {
+ config.alpn_protocols = vec!["h2".into()];
+ }
+ }
+
+ Ok(Arc::new(config))
+ }
+ None => Err(io::Error::new(io::ErrorKind::Other, "Cannot find key")),
+ }
+}
diff --git a/tests/util/server/src/lib.rs b/tests/util/server/src/lib.rs
new file mode 100644
index 000000000..65dfe61ec
--- /dev/null
+++ b/tests/util/server/src/lib.rs
@@ -0,0 +1,1277 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+// Usage: provide a port as argument to run hyper_hello benchmark server
+// otherwise this starts multiple servers on many ports for test endpoints.
+use futures::FutureExt;
+use futures::Stream;
+use futures::StreamExt;
+use once_cell::sync::Lazy;
+use pretty_assertions::assert_eq;
+use pty::Pty;
+use regex::Regex;
+use serde::Serialize;
+use std::collections::HashMap;
+use std::env;
+use std::io::Write;
+use std::path::PathBuf;
+use std::process::Child;
+use std::process::Command;
+use std::process::Output;
+use std::process::Stdio;
+use std::result::Result;
+use std::sync::Mutex;
+use std::sync::MutexGuard;
+use tokio::net::TcpStream;
+use url::Url;
+
+pub mod assertions;
+mod builders;
+pub mod factory;
+mod fs;
+mod https;
+pub mod lsp;
+mod macros;
+mod npm;
+pub mod pty;
+pub mod servers;
+pub mod spawn;
+
+pub use builders::DenoChild;
+pub use builders::TestCommandBuilder;
+pub use builders::TestCommandOutput;
+pub use builders::TestContext;
+pub use builders::TestContextBuilder;
+pub use fs::PathRef;
+pub use fs::TempDir;
+
+pub const PERMISSION_VARIANTS: [&str; 5] =
+ ["read", "write", "env", "net", "run"];
+pub const PERMISSION_DENIED_PATTERN: &str = "PermissionDenied";
+
+static GUARD: Lazy<Mutex<HttpServerCount>> =
+ Lazy::new(|| Mutex::new(HttpServerCount::default()));
+
+pub fn env_vars_for_npm_tests() -> Vec<(String, String)> {
+ vec![
+ ("NPM_CONFIG_REGISTRY".to_string(), npm_registry_url()),
+ ("NO_COLOR".to_string(), "1".to_string()),
+ ]
+}
+
+pub fn env_vars_for_jsr_tests() -> Vec<(String, String)> {
+ vec![
+ ("JSR_URL".to_string(), jsr_registry_url()),
+ ("NO_COLOR".to_string(), "1".to_string()),
+ ]
+}
+
+pub fn root_path() -> PathRef {
+ PathRef::new(
+ PathBuf::from(concat!(env!("CARGO_MANIFEST_DIR")))
+ .parent()
+ .unwrap()
+ .parent()
+ .unwrap()
+ .parent()
+ .unwrap(),
+ )
+}
+
+pub fn prebuilt_path() -> PathRef {
+ third_party_path().join("prebuilt")
+}
+
+pub fn tests_path() -> PathRef {
+ root_path().join("tests")
+}
+
+pub fn testdata_path() -> PathRef {
+ tests_path().join("testdata")
+}
+
+pub fn third_party_path() -> PathRef {
+ root_path().join("third_party")
+}
+
+pub fn ffi_tests_path() -> PathRef {
+ root_path().join("tests").join("ffi")
+}
+
+pub fn napi_tests_path() -> PathRef {
+ root_path().join("tests").join("napi")
+}
+
+pub fn deno_config_path() -> PathRef {
+ root_path().join("tests").join("config").join("deno.json")
+}
+
+/// Test server registry url.
+pub fn npm_registry_url() -> String {
+ "http://localhost:4545/npm/registry/".to_string()
+}
+
+pub fn npm_registry_unset_url() -> String {
+ "http://NPM_CONFIG_REGISTRY.is.unset".to_string()
+}
+
+pub fn jsr_registry_url() -> String {
+ "http://127.0.0.1:4250/".to_string()
+}
+
+pub fn jsr_registry_unset_url() -> String {
+ "http://JSR_URL.is.unset".to_string()
+}
+
+pub fn std_path() -> PathRef {
+ root_path().join("tests").join("util").join("std")
+}
+
+pub fn std_file_url() -> String {
+ Url::from_directory_path(std_path()).unwrap().to_string()
+}
+
+pub fn target_dir() -> PathRef {
+ let current_exe = std::env::current_exe().unwrap();
+ let target_dir = current_exe.parent().unwrap().parent().unwrap();
+ PathRef::new(target_dir)
+}
+
+pub fn deno_exe_path() -> PathRef {
+ // Something like /Users/rld/src/deno/target/debug/deps/deno
+ let mut p = target_dir().join("deno").to_path_buf();
+ if cfg!(windows) {
+ p.set_extension("exe");
+ }
+ PathRef::new(p)
+}
+
+pub fn denort_exe_path() -> PathRef {
+ let mut p = target_dir().join("denort").to_path_buf();
+ if cfg!(windows) {
+ p.set_extension("exe");
+ }
+ PathRef::new(p)
+}
+
+pub fn prebuilt_tool_path(tool: &str) -> PathRef {
+ let mut exe = tool.to_string();
+ exe.push_str(if cfg!(windows) { ".exe" } else { "" });
+ prebuilt_path().join(platform_dir_name()).join(exe)
+}
+
+pub fn platform_dir_name() -> &'static str {
+ if cfg!(target_os = "linux") {
+ "linux64"
+ } else if cfg!(target_os = "macos") {
+ "mac"
+ } else if cfg!(target_os = "windows") {
+ "win"
+ } else {
+ unreachable!()
+ }
+}
+
+pub fn test_server_path() -> PathBuf {
+ let mut p = target_dir().join("test_server").to_path_buf();
+ if cfg!(windows) {
+ p.set_extension("exe");
+ }
+ p
+}
+
+fn ensure_test_server_built() {
+ // if the test server doesn't exist then remind the developer to build first
+ if !test_server_path().exists() {
+ panic!(
+ "Test server not found. Please cargo build before running the tests."
+ );
+ }
+}
+
+/// Returns a [`Stream`] of [`TcpStream`]s accepted from the given port.
+async fn get_tcp_listener_stream(
+ name: &'static str,
+ port: u16,
+) -> impl Stream<Item = Result<TcpStream, std::io::Error>> + Unpin + Send {
+ let host_and_port = &format!("localhost:{port}");
+
+ // Listen on ALL addresses that localhost can resolves to.
+ let accept = |listener: tokio::net::TcpListener| {
+ async {
+ let result = listener.accept().await;
+ Some((result.map(|r| r.0), listener))
+ }
+ .boxed()
+ };
+
+ let mut addresses = vec![];
+ let listeners = tokio::net::lookup_host(host_and_port)
+ .await
+ .expect(host_and_port)
+ .inspect(|address| addresses.push(*address))
+ .map(tokio::net::TcpListener::bind)
+ .collect::<futures::stream::FuturesUnordered<_>>()
+ .collect::<Vec<_>>()
+ .await
+ .into_iter()
+ .map(|s| s.unwrap())
+ .map(|listener| futures::stream::unfold(listener, accept))
+ .collect::<Vec<_>>();
+
+ // Eye catcher for HttpServerCount
+ println!("ready: {name} on {:?}", addresses);
+
+ futures::stream::select_all(listeners)
+}
+
+#[derive(Default)]
+struct HttpServerCount {
+ count: usize,
+ test_server: Option<Child>,
+}
+
+impl HttpServerCount {
+ fn inc(&mut self) {
+ self.count += 1;
+ if self.test_server.is_none() {
+ assert_eq!(self.count, 1);
+
+ println!("test_server starting...");
+ let mut test_server = Command::new(test_server_path())
+ .current_dir(testdata_path())
+ .stdout(Stdio::piped())
+ .spawn()
+ .expect("failed to execute test_server");
+ let stdout = test_server.stdout.as_mut().unwrap();
+ use std::io::BufRead;
+ use std::io::BufReader;
+ let lines = BufReader::new(stdout).lines();
+
+ // Wait for all the servers to report being ready.
+ let mut ready_count = 0;
+ for maybe_line in lines {
+ if let Ok(line) = maybe_line {
+ if line.starts_with("ready:") {
+ ready_count += 1;
+ }
+ if ready_count == 12 {
+ break;
+ }
+ } else {
+ panic!("{}", maybe_line.unwrap_err());
+ }
+ }
+ self.test_server = Some(test_server);
+ }
+ }
+
+ fn dec(&mut self) {
+ assert!(self.count > 0);
+ self.count -= 1;
+ if self.count == 0 {
+ let mut test_server = self.test_server.take().unwrap();
+ match test_server.try_wait() {
+ Ok(None) => {
+ test_server.kill().expect("failed to kill test_server");
+ let _ = test_server.wait();
+ }
+ Ok(Some(status)) => {
+ panic!("test_server exited unexpectedly {status}")
+ }
+ Err(e) => panic!("test_server error: {e}"),
+ }
+ }
+ }
+}
+
+impl Drop for HttpServerCount {
+ fn drop(&mut self) {
+ assert_eq!(self.count, 0);
+ assert!(self.test_server.is_none());
+ }
+}
+
+fn lock_http_server<'a>() -> MutexGuard<'a, HttpServerCount> {
+ let r = GUARD.lock();
+ if let Err(poison_err) = r {
+ // If panics happened, ignore it. This is for tests.
+ poison_err.into_inner()
+ } else {
+ r.unwrap()
+ }
+}
+
+pub struct HttpServerGuard {}
+
+impl Drop for HttpServerGuard {
+ fn drop(&mut self) {
+ let mut g = lock_http_server();
+ g.dec();
+ }
+}
+
+/// Adds a reference to a shared target/debug/test_server subprocess. When the
+/// last instance of the HttpServerGuard is dropped, the subprocess will be
+/// killed.
+pub fn http_server() -> HttpServerGuard {
+ ensure_test_server_built();
+ let mut g = lock_http_server();
+ g.inc();
+ HttpServerGuard {}
+}
+
+/// Helper function to strip ansi codes.
+pub fn strip_ansi_codes(s: &str) -> std::borrow::Cow<str> {
+ console_static_text::ansi::strip_ansi_codes(s)
+}
+
+pub fn run(
+ cmd: &[&str],
+ input: Option<&[&str]>,
+ envs: Option<Vec<(String, String)>>,
+ current_dir: Option<&str>,
+ expect_success: bool,
+) {
+ let mut process_builder = Command::new(cmd[0]);
+ process_builder.args(&cmd[1..]).stdin(Stdio::piped());
+
+ if let Some(dir) = current_dir {
+ process_builder.current_dir(dir);
+ }
+ if let Some(envs) = envs {
+ process_builder.envs(envs);
+ }
+ let mut prog = process_builder.spawn().expect("failed to spawn script");
+ if let Some(lines) = input {
+ let stdin = prog.stdin.as_mut().expect("failed to get stdin");
+ stdin
+ .write_all(lines.join("\n").as_bytes())
+ .expect("failed to write to stdin");
+ }
+ let status = prog.wait().expect("failed to wait on child");
+ if expect_success != status.success() {
+ panic!("Unexpected exit code: {:?}", status.code());
+ }
+}
+
+pub fn run_collect(
+ cmd: &[&str],
+ input: Option<&[&str]>,
+ envs: Option<Vec<(String, String)>>,
+ current_dir: Option<&str>,
+ expect_success: bool,
+) -> (String, String) {
+ let mut process_builder = Command::new(cmd[0]);
+ process_builder
+ .args(&cmd[1..])
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped())
+ .stderr(Stdio::piped());
+ if let Some(dir) = current_dir {
+ process_builder.current_dir(dir);
+ }
+ if let Some(envs) = envs {
+ process_builder.envs(envs);
+ }
+ let mut prog = process_builder.spawn().expect("failed to spawn script");
+ if let Some(lines) = input {
+ let stdin = prog.stdin.as_mut().expect("failed to get stdin");
+ stdin
+ .write_all(lines.join("\n").as_bytes())
+ .expect("failed to write to stdin");
+ }
+ let Output {
+ stdout,
+ stderr,
+ status,
+ } = prog.wait_with_output().expect("failed to wait on child");
+ let stdout = String::from_utf8(stdout).unwrap();
+ let stderr = String::from_utf8(stderr).unwrap();
+ if expect_success != status.success() {
+ eprintln!("stdout: <<<{stdout}>>>");
+ eprintln!("stderr: <<<{stderr}>>>");
+ panic!("Unexpected exit code: {:?}", status.code());
+ }
+ (stdout, stderr)
+}
+
+pub fn run_and_collect_output(
+ expect_success: bool,
+ args: &str,
+ input: Option<Vec<&str>>,
+ envs: Option<Vec<(String, String)>>,
+ need_http_server: bool,
+) -> (String, String) {
+ run_and_collect_output_with_args(
+ expect_success,
+ args.split_whitespace().collect(),
+ input,
+ envs,
+ need_http_server,
+ )
+}
+
+pub fn run_and_collect_output_with_args(
+ expect_success: bool,
+ args: Vec<&str>,
+ input: Option<Vec<&str>>,
+ envs: Option<Vec<(String, String)>>,
+ need_http_server: bool,
+) -> (String, String) {
+ let mut deno_process_builder = deno_cmd()
+ .args_vec(args)
+ .current_dir(testdata_path())
+ .stdin(Stdio::piped())
+ .piped_output();
+ if let Some(envs) = envs {
+ deno_process_builder = deno_process_builder.envs(envs);
+ }
+ let _http_guard = if need_http_server {
+ Some(http_server())
+ } else {
+ None
+ };
+ let mut deno = deno_process_builder
+ .spawn()
+ .expect("failed to spawn script");
+ if let Some(lines) = input {
+ let stdin = deno.stdin.as_mut().expect("failed to get stdin");
+ stdin
+ .write_all(lines.join("\n").as_bytes())
+ .expect("failed to write to stdin");
+ }
+ let Output {
+ stdout,
+ stderr,
+ status,
+ } = deno.wait_with_output().expect("failed to wait on child");
+ let stdout = String::from_utf8(stdout).unwrap();
+ let stderr = String::from_utf8(stderr).unwrap();
+ if expect_success != status.success() {
+ eprintln!("stdout: <<<{stdout}>>>");
+ eprintln!("stderr: <<<{stderr}>>>");
+ panic!("Unexpected exit code: {:?}", status.code());
+ }
+ (stdout, stderr)
+}
+
+pub fn new_deno_dir() -> TempDir {
+ TempDir::new()
+}
+
+pub fn deno_cmd() -> TestCommandBuilder {
+ let deno_dir = new_deno_dir();
+ deno_cmd_with_deno_dir(&deno_dir)
+}
+
+pub fn deno_cmd_with_deno_dir(deno_dir: &TempDir) -> TestCommandBuilder {
+ TestCommandBuilder::new(deno_dir.clone())
+ .env("DENO_DIR", deno_dir.path())
+ .env("NPM_CONFIG_REGISTRY", npm_registry_unset_url())
+ .env("JSR_URL", jsr_registry_unset_url())
+}
+
+pub fn run_powershell_script_file(
+ script_file_path: &str,
+ args: Vec<&str>,
+) -> std::result::Result<(), i64> {
+ let deno_dir = new_deno_dir();
+ let mut command = Command::new("powershell.exe");
+
+ command
+ .env("DENO_DIR", deno_dir.path())
+ .current_dir(testdata_path())
+ .arg("-file")
+ .arg(script_file_path);
+
+ for arg in args {
+ command.arg(arg);
+ }
+
+ let output = command.output().expect("failed to spawn script");
+ let stdout = String::from_utf8(output.stdout).unwrap();
+ let stderr = String::from_utf8(output.stderr).unwrap();
+ println!("{stdout}");
+ if !output.status.success() {
+ panic!(
+ "{script_file_path} executed with failing error code\n{stdout}{stderr}"
+ );
+ }
+
+ Ok(())
+}
+
+#[derive(Debug, Default)]
+pub struct CheckOutputIntegrationTest<'a> {
+ pub args: &'a str,
+ pub args_vec: Vec<&'a str>,
+ pub output: &'a str,
+ pub input: Option<&'a str>,
+ pub output_str: Option<&'a str>,
+ pub exit_code: i32,
+ pub http_server: bool,
+ pub envs: Vec<(String, String)>,
+ pub env_clear: bool,
+ pub skip_strip_ansi: bool,
+ pub temp_cwd: bool,
+ /// Copies the files at the specified directory in the "testdata" directory
+ /// to the temp folder and runs the test from there. This is useful when
+ /// the test creates files in the testdata directory (ex. a node_modules folder)
+ pub copy_temp_dir: Option<&'a str>,
+ /// Relative to "testdata" directory
+ pub cwd: Option<&'a str>,
+}
+
+impl<'a> CheckOutputIntegrationTest<'a> {
+ pub fn output(&self) -> TestCommandOutput {
+ let mut context_builder = TestContextBuilder::default();
+ if self.temp_cwd {
+ context_builder = context_builder.use_temp_cwd();
+ }
+ if let Some(dir) = &self.copy_temp_dir {
+ context_builder = context_builder.use_copy_temp_dir(dir);
+ }
+ if self.http_server {
+ context_builder = context_builder.use_http_server();
+ }
+
+ let context = context_builder.build();
+
+ let mut command_builder = context.new_command();
+
+ if !self.args.is_empty() {
+ command_builder = command_builder.args(self.args);
+ }
+ if !self.args_vec.is_empty() {
+ command_builder = command_builder.args_vec(self.args_vec.clone());
+ }
+ if let Some(input) = &self.input {
+ command_builder = command_builder.stdin_text(input);
+ }
+ for (key, value) in &self.envs {
+ command_builder = command_builder.env(key, value);
+ }
+ if self.env_clear {
+ command_builder = command_builder.env_clear();
+ }
+ if self.skip_strip_ansi {
+ command_builder = command_builder.skip_strip_ansi();
+ }
+ if let Some(cwd) = &self.cwd {
+ command_builder = command_builder.current_dir(cwd);
+ }
+
+ command_builder.run()
+ }
+}
+
+pub fn wildcard_match(pattern: &str, text: &str) -> bool {
+ match wildcard_match_detailed(pattern, text) {
+ WildcardMatchResult::Success => true,
+ WildcardMatchResult::Fail(debug_output) => {
+ eprintln!("{}", debug_output);
+ false
+ }
+ }
+}
+
+pub enum WildcardMatchResult {
+ Success,
+ Fail(String),
+}
+
+pub fn wildcard_match_detailed(
+ pattern: &str,
+ text: &str,
+) -> WildcardMatchResult {
+ fn annotate_whitespace(text: &str) -> String {
+ text.replace('\t', "\u{2192}").replace(' ', "\u{00B7}")
+ }
+
+ // Normalize line endings
+ let original_text = text.replace("\r\n", "\n");
+ let mut current_text = original_text.as_str();
+ let pattern = pattern.replace("\r\n", "\n");
+ let mut output_lines = Vec::new();
+
+ let parts = parse_wildcard_pattern_text(&pattern).unwrap();
+
+ let mut was_last_wildcard = false;
+ for (i, part) in parts.iter().enumerate() {
+ match part {
+ WildcardPatternPart::Wildcard => {
+ output_lines.push("<WILDCARD />".to_string());
+ }
+ WildcardPatternPart::Text(search_text) => {
+ let is_last = i + 1 == parts.len();
+ let search_index = if is_last && was_last_wildcard {
+ // search from the end of the file
+ current_text.rfind(search_text)
+ } else {
+ current_text.find(search_text)
+ };
+ match search_index {
+ Some(found_index) if was_last_wildcard || found_index == 0 => {
+ output_lines.push(format!(
+ "<FOUND>{}</FOUND>",
+ colors::gray(annotate_whitespace(search_text))
+ ));
+ current_text = &current_text[found_index + search_text.len()..];
+ }
+ Some(index) => {
+ output_lines.push(
+ "==== FOUND SEARCH TEXT IN WRONG POSITION ====".to_string(),
+ );
+ output_lines.push(colors::gray(annotate_whitespace(search_text)));
+ output_lines
+ .push("==== HAD UNKNOWN PRECEEDING TEXT ====".to_string());
+ output_lines
+ .push(colors::red(annotate_whitespace(&current_text[..index])));
+ return WildcardMatchResult::Fail(output_lines.join("\n"));
+ }
+ None => {
+ let mut max_found_index = 0;
+ for (index, _) in search_text.char_indices() {
+ let sub_string = &search_text[..index];
+ if let Some(found_index) = current_text.find(sub_string) {
+ if was_last_wildcard || found_index == 0 {
+ max_found_index = index;
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ if !was_last_wildcard && max_found_index > 0 {
+ output_lines.push(format!(
+ "<FOUND>{}</FOUND>",
+ colors::gray(annotate_whitespace(
+ &search_text[..max_found_index]
+ ))
+ ));
+ }
+ output_lines
+ .push("==== COULD NOT FIND SEARCH TEXT ====".to_string());
+ output_lines.push(colors::green(annotate_whitespace(
+ if was_last_wildcard {
+ search_text
+ } else {
+ &search_text[max_found_index..]
+ },
+ )));
+ if was_last_wildcard && max_found_index > 0 {
+ output_lines.push(format!(
+ "==== MAX FOUND ====\n{}",
+ colors::red(annotate_whitespace(
+ &search_text[..max_found_index]
+ ))
+ ));
+ }
+ let actual_next_text = &current_text[max_found_index..];
+ let max_next_text_len = 40;
+ let next_text_len =
+ std::cmp::min(max_next_text_len, actual_next_text.len());
+ output_lines.push(format!(
+ "==== NEXT ACTUAL TEXT ====\n{}{}",
+ colors::red(annotate_whitespace(
+ &actual_next_text[..next_text_len]
+ )),
+ if actual_next_text.len() > max_next_text_len {
+ "[TRUNCATED]"
+ } else {
+ ""
+ },
+ ));
+ return WildcardMatchResult::Fail(output_lines.join("\n"));
+ }
+ }
+ }
+ WildcardPatternPart::UnorderedLines(expected_lines) => {
+ assert!(!was_last_wildcard, "unsupported");
+ let mut actual_lines = Vec::with_capacity(expected_lines.len());
+ for _ in 0..expected_lines.len() {
+ match current_text.find('\n') {
+ Some(end_line_index) => {
+ actual_lines.push(&current_text[..end_line_index]);
+ current_text = &current_text[end_line_index + 1..];
+ }
+ None => {
+ break;
+ }
+ }
+ }
+ actual_lines.sort_unstable();
+ let mut expected_lines = expected_lines.clone();
+ expected_lines.sort_unstable();
+
+ if actual_lines.len() != expected_lines.len() {
+ output_lines
+ .push("==== HAD WRONG NUMBER OF UNORDERED LINES ====".to_string());
+ output_lines.push("# ACTUAL".to_string());
+ output_lines.extend(
+ actual_lines
+ .iter()
+ .map(|l| colors::green(annotate_whitespace(l))),
+ );
+ output_lines.push("# EXPECTED".to_string());
+ output_lines.extend(
+ expected_lines
+ .iter()
+ .map(|l| colors::green(annotate_whitespace(l))),
+ );
+ return WildcardMatchResult::Fail(output_lines.join("\n"));
+ }
+ for (actual, expected) in actual_lines.iter().zip(expected_lines.iter())
+ {
+ if actual != expected {
+ output_lines
+ .push("==== UNORDERED LINE DID NOT MATCH ====".to_string());
+ output_lines.push(format!(
+ " ACTUAL: {}",
+ colors::red(annotate_whitespace(actual))
+ ));
+ output_lines.push(format!(
+ "EXPECTED: {}",
+ colors::green(annotate_whitespace(expected))
+ ));
+ return WildcardMatchResult::Fail(output_lines.join("\n"));
+ } else {
+ output_lines.push(format!(
+ "<FOUND>{}</FOUND>",
+ colors::gray(annotate_whitespace(expected))
+ ));
+ }
+ }
+ }
+ }
+ was_last_wildcard = matches!(part, WildcardPatternPart::Wildcard);
+ }
+
+ if was_last_wildcard || current_text.is_empty() {
+ WildcardMatchResult::Success
+ } else {
+ output_lines.push("==== HAD TEXT AT END OF FILE ====".to_string());
+ output_lines.push(colors::red(annotate_whitespace(current_text)));
+ WildcardMatchResult::Fail(output_lines.join("\n"))
+ }
+}
+
+#[derive(Debug)]
+enum WildcardPatternPart<'a> {
+ Wildcard,
+ Text(&'a str),
+ UnorderedLines(Vec<&'a str>),
+}
+
+fn parse_wildcard_pattern_text(
+ text: &str,
+) -> Result<Vec<WildcardPatternPart>, monch::ParseErrorFailureError> {
+ use monch::*;
+
+ fn parse_unordered_lines(input: &str) -> ParseResult<Vec<&str>> {
+ const END_TEXT: &str = "\n[UNORDERED_END]\n";
+ let (input, _) = tag("[UNORDERED_START]\n")(input)?;
+ match input.find(END_TEXT) {
+ Some(end_index) => ParseResult::Ok((
+ &input[end_index + END_TEXT.len()..],
+ input[..end_index].lines().collect::<Vec<_>>(),
+ )),
+ None => ParseError::fail(input, "Could not find [UNORDERED_END]"),
+ }
+ }
+
+ enum InnerPart<'a> {
+ Wildcard,
+ UnorderedLines(Vec<&'a str>),
+ Char,
+ }
+
+ struct Parser<'a> {
+ current_input: &'a str,
+ last_text_input: &'a str,
+ parts: Vec<WildcardPatternPart<'a>>,
+ }
+
+ impl<'a> Parser<'a> {
+ fn parse(mut self) -> ParseResult<'a, Vec<WildcardPatternPart<'a>>> {
+ while !self.current_input.is_empty() {
+ let (next_input, inner_part) = or3(
+ map(tag("[WILDCARD]"), |_| InnerPart::Wildcard),
+ map(parse_unordered_lines, |lines| {
+ InnerPart::UnorderedLines(lines)
+ }),
+ map(next_char, |_| InnerPart::Char),
+ )(self.current_input)?;
+ match inner_part {
+ InnerPart::Wildcard => {
+ self.queue_previous_text(next_input);
+ self.parts.push(WildcardPatternPart::Wildcard);
+ }
+ InnerPart::UnorderedLines(expected_lines) => {
+ self.queue_previous_text(next_input);
+ self
+ .parts
+ .push(WildcardPatternPart::UnorderedLines(expected_lines));
+ }
+ InnerPart::Char => {
+ // ignore
+ }
+ }
+ self.current_input = next_input;
+ }
+
+ self.queue_previous_text("");
+
+ ParseResult::Ok(("", self.parts))
+ }
+
+ fn queue_previous_text(&mut self, next_input: &'a str) {
+ let previous_text = &self.last_text_input
+ [..self.last_text_input.len() - self.current_input.len()];
+ if !previous_text.is_empty() {
+ self.parts.push(WildcardPatternPart::Text(previous_text));
+ }
+ self.last_text_input = next_input;
+ }
+ }
+
+ with_failure_handling(|input| {
+ Parser {
+ current_input: input,
+ last_text_input: input,
+ parts: Vec::new(),
+ }
+ .parse()
+ })(text)
+}
+
+pub fn with_pty(deno_args: &[&str], action: impl FnMut(Pty)) {
+ let context = TestContextBuilder::default().use_temp_cwd().build();
+ context.new_command().args_vec(deno_args).with_pty(action);
+}
+
+pub struct WrkOutput {
+ pub latency: f64,
+ pub requests: u64,
+}
+
+pub fn parse_wrk_output(output: &str) -> WrkOutput {
+ static REQUESTS_RX: Lazy<Regex> =
+ lazy_regex::lazy_regex!(r"Requests/sec:\s+(\d+)");
+ static LATENCY_RX: Lazy<Regex> =
+ lazy_regex::lazy_regex!(r"\s+99%(?:\s+(\d+.\d+)([a-z]+))");
+
+ let mut requests = None;
+ let mut latency = None;
+
+ for line in output.lines() {
+ if requests.is_none() {
+ if let Some(cap) = REQUESTS_RX.captures(line) {
+ requests =
+ Some(str::parse::<u64>(cap.get(1).unwrap().as_str()).unwrap());
+ }
+ }
+ if latency.is_none() {
+ if let Some(cap) = LATENCY_RX.captures(line) {
+ let time = cap.get(1).unwrap();
+ let unit = cap.get(2).unwrap();
+
+ latency = Some(
+ str::parse::<f64>(time.as_str()).unwrap()
+ * match unit.as_str() {
+ "ms" => 1.0,
+ "us" => 0.001,
+ "s" => 1000.0,
+ _ => unreachable!(),
+ },
+ );
+ }
+ }
+ }
+
+ WrkOutput {
+ requests: requests.unwrap(),
+ latency: latency.unwrap(),
+ }
+}
+
+#[derive(Debug, Clone, Serialize)]
+pub struct StraceOutput {
+ pub percent_time: f64,
+ pub seconds: f64,
+ pub usecs_per_call: Option<u64>,
+ pub calls: u64,
+ pub errors: u64,
+}
+
+pub fn parse_strace_output(output: &str) -> HashMap<String, StraceOutput> {
+ let mut summary = HashMap::new();
+
+ // Filter out non-relevant lines. See the error log at
+ // https://github.com/denoland/deno/pull/3715/checks?check_run_id=397365887
+ // This is checked in testdata/strace_summary2.out
+ let mut lines = output.lines().filter(|line| {
+ !line.is_empty()
+ && !line.contains("detached ...")
+ && !line.contains("unfinished ...")
+ && !line.contains("????")
+ });
+ let count = lines.clone().count();
+
+ if count < 4 {
+ return summary;
+ }
+
+ let total_line = lines.next_back().unwrap();
+ lines.next_back(); // Drop separator
+ let data_lines = lines.skip(2);
+
+ for line in data_lines {
+ let syscall_fields = line.split_whitespace().collect::<Vec<_>>();
+ let len = syscall_fields.len();
+ let syscall_name = syscall_fields.last().unwrap();
+ if (5..=6).contains(&len) {
+ summary.insert(
+ syscall_name.to_string(),
+ StraceOutput {
+ percent_time: str::parse::<f64>(syscall_fields[0]).unwrap(),
+ seconds: str::parse::<f64>(syscall_fields[1]).unwrap(),
+ usecs_per_call: Some(str::parse::<u64>(syscall_fields[2]).unwrap()),
+ calls: str::parse::<u64>(syscall_fields[3]).unwrap(),
+ errors: if syscall_fields.len() < 6 {
+ 0
+ } else {
+ str::parse::<u64>(syscall_fields[4]).unwrap()
+ },
+ },
+ );
+ }
+ }
+
+ let total_fields = total_line.split_whitespace().collect::<Vec<_>>();
+
+ let mut usecs_call_offset = 0;
+ summary.insert(
+ "total".to_string(),
+ StraceOutput {
+ percent_time: str::parse::<f64>(total_fields[0]).unwrap(),
+ seconds: str::parse::<f64>(total_fields[1]).unwrap(),
+ usecs_per_call: if total_fields.len() > 5 {
+ usecs_call_offset = 1;
+ Some(str::parse::<u64>(total_fields[2]).unwrap())
+ } else {
+ None
+ },
+ calls: str::parse::<u64>(total_fields[2 + usecs_call_offset]).unwrap(),
+ errors: str::parse::<u64>(total_fields[3 + usecs_call_offset]).unwrap(),
+ },
+ );
+
+ summary
+}
+
+pub fn parse_max_mem(output: &str) -> Option<u64> {
+ // Takes the output from "time -v" as input and extracts the 'maximum
+ // resident set size' and returns it in bytes.
+ for line in output.lines() {
+ if line
+ .to_lowercase()
+ .contains("maximum resident set size (kbytes)")
+ {
+ let value = line.split(": ").nth(1).unwrap();
+ return Some(str::parse::<u64>(value).unwrap() * 1024);
+ }
+ }
+
+ None
+}
+
+pub(crate) mod colors {
+ use std::io::Write;
+
+ use termcolor::Ansi;
+ use termcolor::Color;
+ use termcolor::ColorSpec;
+ use termcolor::WriteColor;
+
+ pub fn bold<S: AsRef<str>>(s: S) -> String {
+ let mut style_spec = ColorSpec::new();
+ style_spec.set_bold(true);
+ style(s, style_spec)
+ }
+
+ pub fn red<S: AsRef<str>>(s: S) -> String {
+ fg_color(s, Color::Red)
+ }
+
+ pub fn bold_red<S: AsRef<str>>(s: S) -> String {
+ bold_fg_color(s, Color::Red)
+ }
+
+ pub fn green<S: AsRef<str>>(s: S) -> String {
+ fg_color(s, Color::Green)
+ }
+
+ pub fn bold_green<S: AsRef<str>>(s: S) -> String {
+ bold_fg_color(s, Color::Green)
+ }
+
+ pub fn bold_blue<S: AsRef<str>>(s: S) -> String {
+ bold_fg_color(s, Color::Blue)
+ }
+
+ pub fn gray<S: AsRef<str>>(s: S) -> String {
+ fg_color(s, Color::Ansi256(245))
+ }
+
+ fn bold_fg_color<S: AsRef<str>>(s: S, color: Color) -> String {
+ let mut style_spec = ColorSpec::new();
+ style_spec.set_bold(true);
+ style_spec.set_fg(Some(color));
+ style(s, style_spec)
+ }
+
+ fn fg_color<S: AsRef<str>>(s: S, color: Color) -> String {
+ let mut style_spec = ColorSpec::new();
+ style_spec.set_fg(Some(color));
+ style(s, style_spec)
+ }
+
+ fn style<S: AsRef<str>>(s: S, colorspec: ColorSpec) -> String {
+ let mut v = Vec::new();
+ let mut ansi_writer = Ansi::new(&mut v);
+ ansi_writer.set_color(&colorspec).unwrap();
+ ansi_writer.write_all(s.as_ref().as_bytes()).unwrap();
+ ansi_writer.reset().unwrap();
+ String::from_utf8_lossy(&v).into_owned()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use pretty_assertions::assert_eq;
+
+ #[test]
+ fn parse_wrk_output_1() {
+ const TEXT: &str = include_str!("./testdata/wrk1.txt");
+ let wrk = parse_wrk_output(TEXT);
+ assert_eq!(wrk.requests, 1837);
+ assert!((wrk.latency - 6.25).abs() < f64::EPSILON);
+ }
+
+ #[test]
+ fn parse_wrk_output_2() {
+ const TEXT: &str = include_str!("./testdata/wrk2.txt");
+ let wrk = parse_wrk_output(TEXT);
+ assert_eq!(wrk.requests, 53435);
+ assert!((wrk.latency - 6.22).abs() < f64::EPSILON);
+ }
+
+ #[test]
+ fn parse_wrk_output_3() {
+ const TEXT: &str = include_str!("./testdata/wrk3.txt");
+ let wrk = parse_wrk_output(TEXT);
+ assert_eq!(wrk.requests, 96037);
+ assert!((wrk.latency - 6.36).abs() < f64::EPSILON);
+ }
+
+ #[test]
+ fn strace_parse_1() {
+ const TEXT: &str = include_str!("./testdata/strace_summary.out");
+ let strace = parse_strace_output(TEXT);
+
+ // first syscall line
+ let munmap = strace.get("munmap").unwrap();
+ assert_eq!(munmap.calls, 60);
+ assert_eq!(munmap.errors, 0);
+
+ // line with errors
+ assert_eq!(strace.get("mkdir").unwrap().errors, 2);
+
+ // last syscall line
+ let prlimit = strace.get("prlimit64").unwrap();
+ assert_eq!(prlimit.calls, 2);
+ assert!((prlimit.percent_time - 0.0).abs() < f64::EPSILON);
+
+ // summary line
+ assert_eq!(strace.get("total").unwrap().calls, 704);
+ assert_eq!(strace.get("total").unwrap().errors, 5);
+ assert_eq!(strace.get("total").unwrap().usecs_per_call, None);
+ }
+
+ #[test]
+ fn strace_parse_2() {
+ const TEXT: &str = include_str!("./testdata/strace_summary2.out");
+ let strace = parse_strace_output(TEXT);
+
+ // first syscall line
+ let futex = strace.get("futex").unwrap();
+ assert_eq!(futex.calls, 449);
+ assert_eq!(futex.errors, 94);
+
+ // summary line
+ assert_eq!(strace.get("total").unwrap().calls, 821);
+ assert_eq!(strace.get("total").unwrap().errors, 107);
+ assert_eq!(strace.get("total").unwrap().usecs_per_call, None);
+ }
+
+ #[test]
+ fn strace_parse_3() {
+ const TEXT: &str = include_str!("./testdata/strace_summary3.out");
+ let strace = parse_strace_output(TEXT);
+
+ // first syscall line
+ let futex = strace.get("mprotect").unwrap();
+ assert_eq!(futex.calls, 90);
+ assert_eq!(futex.errors, 0);
+
+ // summary line
+ assert_eq!(strace.get("total").unwrap().calls, 543);
+ assert_eq!(strace.get("total").unwrap().errors, 36);
+ assert_eq!(strace.get("total").unwrap().usecs_per_call, Some(6));
+ }
+
+ #[test]
+ fn parse_parse_wildcard_match_text() {
+ let result =
+ parse_wildcard_pattern_text("[UNORDERED_START]\ntesting\ntesting")
+ .err()
+ .unwrap();
+ assert_contains!(result.to_string(), "Could not find [UNORDERED_END]");
+ }
+
+ #[test]
+ fn test_wildcard_match() {
+ let fixtures = vec![
+ ("foobarbaz", "foobarbaz", true),
+ ("[WILDCARD]", "foobarbaz", true),
+ ("foobar", "foobarbaz", false),
+ ("foo[WILDCARD]baz", "foobarbaz", true),
+ ("foo[WILDCARD]baz", "foobazbar", false),
+ ("foo[WILDCARD]baz[WILDCARD]qux", "foobarbazqatqux", true),
+ ("foo[WILDCARD]", "foobar", true),
+ ("foo[WILDCARD]baz[WILDCARD]", "foobarbazqat", true),
+ // check with different line endings
+ ("foo[WILDCARD]\nbaz[WILDCARD]\n", "foobar\nbazqat\n", true),
+ (
+ "foo[WILDCARD]\nbaz[WILDCARD]\n",
+ "foobar\r\nbazqat\r\n",
+ true,
+ ),
+ (
+ "foo[WILDCARD]\r\nbaz[WILDCARD]\n",
+ "foobar\nbazqat\r\n",
+ true,
+ ),
+ (
+ "foo[WILDCARD]\r\nbaz[WILDCARD]\r\n",
+ "foobar\nbazqat\n",
+ true,
+ ),
+ (
+ "foo[WILDCARD]\r\nbaz[WILDCARD]\r\n",
+ "foobar\r\nbazqat\r\n",
+ true,
+ ),
+ ];
+
+ // Iterate through the fixture lists, testing each one
+ for (pattern, string, expected) in fixtures {
+ let actual = wildcard_match(pattern, string);
+ dbg!(pattern, string, expected);
+ assert_eq!(actual, expected);
+ }
+ }
+
+ #[test]
+ fn test_wildcard_match2() {
+ // foo, bar, baz, qux, quux, quuz, corge, grault, garply, waldo, fred, plugh, xyzzy
+
+ assert!(wildcard_match("foo[WILDCARD]baz", "foobarbaz"));
+ assert!(!wildcard_match("foo[WILDCARD]baz", "foobazbar"));
+
+ let multiline_pattern = "[WILDCARD]
+foo:
+[WILDCARD]baz[WILDCARD]";
+
+ fn multi_line_builder(input: &str, leading_text: Option<&str>) -> String {
+ // If there is leading text add a newline so it's on it's own line
+ let head = match leading_text {
+ Some(v) => format!("{v}\n"),
+ None => "".to_string(),
+ };
+ format!(
+ "{head}foo:
+quuz {input} corge
+grault"
+ )
+ }
+
+ // Validate multi-line string builder
+ assert_eq!(
+ "QUUX=qux
+foo:
+quuz BAZ corge
+grault",
+ multi_line_builder("BAZ", Some("QUUX=qux"))
+ );
+
+ // Correct input & leading line
+ assert!(wildcard_match(
+ multiline_pattern,
+ &multi_line_builder("baz", Some("QUX=quux")),
+ ));
+
+ // Should fail when leading line
+ assert!(!wildcard_match(
+ multiline_pattern,
+ &multi_line_builder("baz", None),
+ ));
+
+ // Incorrect input & leading line
+ assert!(!wildcard_match(
+ multiline_pattern,
+ &multi_line_builder("garply", Some("QUX=quux")),
+ ));
+
+ // Incorrect input & no leading line
+ assert!(!wildcard_match(
+ multiline_pattern,
+ &multi_line_builder("garply", None),
+ ));
+ }
+
+ #[test]
+ fn test_wildcard_match_unordered_lines() {
+ // matching
+ assert!(wildcard_match(
+ concat!("[UNORDERED_START]\n", "B\n", "A\n", "[UNORDERED_END]\n"),
+ concat!("A\n", "B\n",)
+ ));
+ // different line
+ assert!(!wildcard_match(
+ concat!("[UNORDERED_START]\n", "Ba\n", "A\n", "[UNORDERED_END]\n"),
+ concat!("A\n", "B\n",)
+ ));
+ // different number of lines
+ assert!(!wildcard_match(
+ concat!(
+ "[UNORDERED_START]\n",
+ "B\n",
+ "A\n",
+ "C\n",
+ "[UNORDERED_END]\n"
+ ),
+ concat!("A\n", "B\n",)
+ ));
+ }
+
+ #[test]
+ fn max_mem_parse() {
+ const TEXT: &str = include_str!("./testdata/time.out");
+ let size = parse_max_mem(TEXT);
+
+ assert_eq!(size, Some(120380 * 1024));
+ }
+}
diff --git a/tests/util/server/src/lsp.rs b/tests/util/server/src/lsp.rs
new file mode 100644
index 000000000..6b8256fc1
--- /dev/null
+++ b/tests/util/server/src/lsp.rs
@@ -0,0 +1,1104 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use crate::deno_exe_path;
+use crate::jsr_registry_url;
+use crate::npm_registry_url;
+use crate::PathRef;
+
+use super::TempDir;
+
+use anyhow::Result;
+use lsp_types as lsp;
+use lsp_types::ClientCapabilities;
+use lsp_types::ClientInfo;
+use lsp_types::CodeActionCapabilityResolveSupport;
+use lsp_types::CodeActionClientCapabilities;
+use lsp_types::CodeActionKindLiteralSupport;
+use lsp_types::CodeActionLiteralSupport;
+use lsp_types::CompletionClientCapabilities;
+use lsp_types::CompletionItemCapability;
+use lsp_types::FoldingRangeClientCapabilities;
+use lsp_types::InitializeParams;
+use lsp_types::TextDocumentClientCapabilities;
+use lsp_types::TextDocumentSyncClientCapabilities;
+use lsp_types::Url;
+use lsp_types::WorkspaceClientCapabilities;
+use once_cell::sync::Lazy;
+use parking_lot::Condvar;
+use parking_lot::Mutex;
+use regex::Regex;
+use serde::de;
+use serde::Deserialize;
+use serde::Serialize;
+use serde_json::json;
+use serde_json::to_value;
+use serde_json::Value;
+use std::collections::HashSet;
+use std::io;
+use std::io::BufRead;
+use std::io::BufReader;
+use std::io::Write;
+use std::path::Path;
+use std::process::Child;
+use std::process::ChildStdin;
+use std::process::ChildStdout;
+use std::process::Command;
+use std::process::Stdio;
+use std::sync::mpsc;
+use std::sync::Arc;
+use std::time::Duration;
+use std::time::Instant;
+
+static CONTENT_TYPE_REG: Lazy<Regex> =
+ lazy_regex::lazy_regex!(r"(?i)^content-length:\s+(\d+)");
+
+#[derive(Clone, Debug, Deserialize, Serialize)]
+pub struct LspResponseError {
+ code: i32,
+ message: String,
+ data: Option<Value>,
+}
+
+#[derive(Clone, Debug)]
+pub enum LspMessage {
+ Notification(String, Option<Value>),
+ Request(u64, String, Option<Value>),
+ Response(u64, Option<Value>, Option<LspResponseError>),
+}
+
+impl<'a> From<&'a [u8]> for LspMessage {
+ fn from(s: &'a [u8]) -> Self {
+ let value: Value = serde_json::from_slice(s).unwrap();
+ let obj = value.as_object().unwrap();
+ if obj.contains_key("id") && obj.contains_key("method") {
+ let id = obj.get("id").unwrap().as_u64().unwrap();
+ let method = obj.get("method").unwrap().as_str().unwrap().to_string();
+ Self::Request(id, method, obj.get("params").cloned())
+ } else if obj.contains_key("id") {
+ let id = obj.get("id").unwrap().as_u64().unwrap();
+ let maybe_error: Option<LspResponseError> = obj
+ .get("error")
+ .map(|v| serde_json::from_value(v.clone()).unwrap());
+ Self::Response(id, obj.get("result").cloned(), maybe_error)
+ } else {
+ assert!(obj.contains_key("method"));
+ let method = obj.get("method").unwrap().as_str().unwrap().to_string();
+ Self::Notification(method, obj.get("params").cloned())
+ }
+ }
+}
+
+#[derive(Debug, Deserialize)]
+struct DiagnosticBatchNotificationParams {
+ batch_index: usize,
+ messages_len: usize,
+}
+
+fn read_message<R>(reader: &mut R) -> Result<Option<Vec<u8>>>
+where
+ R: io::Read + io::BufRead,
+{
+ let mut content_length = 0_usize;
+ loop {
+ let mut buf = String::new();
+ if reader.read_line(&mut buf)? == 0 {
+ return Ok(None);
+ }
+ if let Some(captures) = CONTENT_TYPE_REG.captures(&buf) {
+ let content_length_match = captures
+ .get(1)
+ .ok_or_else(|| anyhow::anyhow!("missing capture"))?;
+ content_length = content_length_match.as_str().parse::<usize>()?;
+ }
+ if &buf == "\r\n" {
+ break;
+ }
+ }
+
+ let mut msg_buf = vec![0_u8; content_length];
+ reader.read_exact(&mut msg_buf)?;
+ Ok(Some(msg_buf))
+}
+
+struct LspStdoutReader {
+ pending_messages: Arc<(Mutex<Vec<LspMessage>>, Condvar)>,
+ read_messages: Vec<LspMessage>,
+}
+
+impl LspStdoutReader {
+ pub fn new(mut buf_reader: io::BufReader<ChildStdout>) -> Self {
+ let messages: Arc<(Mutex<Vec<LspMessage>>, Condvar)> = Default::default();
+ std::thread::spawn({
+ let messages = messages.clone();
+ move || {
+ while let Ok(Some(msg_buf)) = read_message(&mut buf_reader) {
+ let msg = LspMessage::from(msg_buf.as_slice());
+ let cvar = &messages.1;
+ {
+ let mut messages = messages.0.lock();
+ messages.push(msg);
+ }
+ cvar.notify_all();
+ }
+ }
+ });
+
+ LspStdoutReader {
+ pending_messages: messages,
+ read_messages: Vec::new(),
+ }
+ }
+
+ pub fn pending_len(&self) -> usize {
+ self.pending_messages.0.lock().len()
+ }
+
+ pub fn output_pending_messages(&self) {
+ let messages = self.pending_messages.0.lock();
+ eprintln!("{:?}", messages);
+ }
+
+ pub fn had_message(&self, is_match: impl Fn(&LspMessage) -> bool) -> bool {
+ self.read_messages.iter().any(&is_match)
+ || self.pending_messages.0.lock().iter().any(&is_match)
+ }
+
+ pub fn read_message<R>(
+ &mut self,
+ mut get_match: impl FnMut(&LspMessage) -> Option<R>,
+ ) -> R {
+ let (msg_queue, cvar) = &*self.pending_messages;
+ let mut msg_queue = msg_queue.lock();
+ loop {
+ for i in 0..msg_queue.len() {
+ let msg = &msg_queue[i];
+ if let Some(result) = get_match(msg) {
+ let msg = msg_queue.remove(i);
+ self.read_messages.push(msg);
+ return result;
+ }
+ }
+ cvar.wait(&mut msg_queue);
+ }
+ }
+
+ pub fn read_latest_message<R>(
+ &mut self,
+ mut get_match: impl FnMut(&LspMessage) -> Option<R>,
+ ) -> R {
+ let (msg_queue, cvar) = &*self.pending_messages;
+ let mut msg_queue = msg_queue.lock();
+ loop {
+ for i in (0..msg_queue.len()).rev() {
+ let msg = &msg_queue[i];
+ if let Some(result) = get_match(msg) {
+ let msg = msg_queue.remove(i);
+ self.read_messages.push(msg);
+ return result;
+ }
+ }
+ cvar.wait(&mut msg_queue);
+ }
+ }
+}
+
+pub struct InitializeParamsBuilder {
+ params: InitializeParams,
+}
+
+impl InitializeParamsBuilder {
+ #[allow(clippy::new_without_default)]
+ pub fn new(config: Value) -> Self {
+ let mut config_as_options = json!({});
+ if let Some(object) = config.as_object() {
+ if let Some(deno) = object.get("deno") {
+ if let Some(deno) = deno.as_object() {
+ config_as_options = json!(deno.clone());
+ }
+ }
+ let config_as_options = config_as_options.as_object_mut().unwrap();
+ if let Some(typescript) = object.get("typescript") {
+ config_as_options.insert("typescript".to_string(), typescript.clone());
+ }
+ if let Some(javascript) = object.get("javascript") {
+ config_as_options.insert("javascript".to_string(), javascript.clone());
+ }
+ }
+ Self {
+ params: InitializeParams {
+ process_id: None,
+ client_info: Some(ClientInfo {
+ name: "test-harness".to_string(),
+ version: Some("1.0.0".to_string()),
+ }),
+ root_uri: None,
+ initialization_options: Some(config_as_options),
+ capabilities: ClientCapabilities {
+ text_document: Some(TextDocumentClientCapabilities {
+ code_action: Some(CodeActionClientCapabilities {
+ code_action_literal_support: Some(CodeActionLiteralSupport {
+ code_action_kind: CodeActionKindLiteralSupport {
+ value_set: vec![
+ "quickfix".to_string(),
+ "refactor".to_string(),
+ ],
+ },
+ }),
+ is_preferred_support: Some(true),
+ data_support: Some(true),
+ disabled_support: Some(true),
+ resolve_support: Some(CodeActionCapabilityResolveSupport {
+ properties: vec!["edit".to_string()],
+ }),
+ ..Default::default()
+ }),
+ completion: Some(CompletionClientCapabilities {
+ completion_item: Some(CompletionItemCapability {
+ snippet_support: Some(true),
+ ..Default::default()
+ }),
+ ..Default::default()
+ }),
+ folding_range: Some(FoldingRangeClientCapabilities {
+ line_folding_only: Some(true),
+ ..Default::default()
+ }),
+ synchronization: Some(TextDocumentSyncClientCapabilities {
+ dynamic_registration: Some(true),
+ will_save: Some(true),
+ will_save_wait_until: Some(true),
+ did_save: Some(true),
+ }),
+ ..Default::default()
+ }),
+ workspace: Some(WorkspaceClientCapabilities {
+ configuration: Some(true),
+ workspace_folders: Some(true),
+ ..Default::default()
+ }),
+ experimental: Some(json!({
+ "testingApi": true
+ })),
+ ..Default::default()
+ },
+ ..Default::default()
+ },
+ }
+ }
+
+ pub fn set_maybe_root_uri(&mut self, value: Option<Url>) -> &mut Self {
+ self.params.root_uri = value;
+ self
+ }
+
+ pub fn set_root_uri(&mut self, value: Url) -> &mut Self {
+ self.set_maybe_root_uri(Some(value))
+ }
+
+ pub fn set_workspace_folders(
+ &mut self,
+ folders: Vec<lsp_types::WorkspaceFolder>,
+ ) -> &mut Self {
+ self.params.workspace_folders = Some(folders);
+ self
+ }
+
+ pub fn enable_inlay_hints(&mut self) -> &mut Self {
+ let options = self.initialization_options_mut();
+ options.insert(
+ "inlayHints".to_string(),
+ json!({
+ "parameterNames": {
+ "enabled": "all"
+ },
+ "parameterTypes": {
+ "enabled": true
+ },
+ "variableTypes": {
+ "enabled": true
+ },
+ "propertyDeclarationTypes": {
+ "enabled": true
+ },
+ "functionLikeReturnTypes": {
+ "enabled": true
+ },
+ "enumMemberValues": {
+ "enabled": true
+ }
+ }),
+ );
+ self
+ }
+
+ pub fn disable_testing_api(&mut self) -> &mut Self {
+ let obj = self
+ .params
+ .capabilities
+ .experimental
+ .as_mut()
+ .unwrap()
+ .as_object_mut()
+ .unwrap();
+ obj.insert("testingApi".to_string(), false.into());
+ let options = self.initialization_options_mut();
+ options.remove("testing");
+ self
+ }
+
+ pub fn set_cache(&mut self, value: impl AsRef<str>) -> &mut Self {
+ let options = self.initialization_options_mut();
+ options.insert("cache".to_string(), value.as_ref().to_string().into());
+ self
+ }
+
+ pub fn set_code_lens(
+ &mut self,
+ value: Option<serde_json::Value>,
+ ) -> &mut Self {
+ let options = self.initialization_options_mut();
+ if let Some(value) = value {
+ options.insert("codeLens".to_string(), value);
+ } else {
+ options.remove("codeLens");
+ }
+ self
+ }
+
+ pub fn set_config(&mut self, value: impl AsRef<str>) -> &mut Self {
+ let options = self.initialization_options_mut();
+ options.insert("config".to_string(), value.as_ref().to_string().into());
+ self
+ }
+
+ pub fn set_disable_paths(&mut self, value: Vec<String>) -> &mut Self {
+ let options = self.initialization_options_mut();
+ options.insert("disablePaths".to_string(), value.into());
+ self
+ }
+
+ pub fn set_enable_paths(&mut self, value: Vec<String>) -> &mut Self {
+ let options = self.initialization_options_mut();
+ options.insert("enablePaths".to_string(), value.into());
+ self
+ }
+
+ pub fn set_deno_enable(&mut self, value: bool) -> &mut Self {
+ let options = self.initialization_options_mut();
+ options.insert("enable".to_string(), value.into());
+ self
+ }
+
+ pub fn set_import_map(&mut self, value: impl AsRef<str>) -> &mut Self {
+ let options = self.initialization_options_mut();
+ options.insert("importMap".to_string(), value.as_ref().to_string().into());
+ self
+ }
+
+ pub fn set_preload_limit(&mut self, arg: usize) -> &mut Self {
+ let options = self.initialization_options_mut();
+ options.insert("documentPreloadLimit".to_string(), arg.into());
+ self
+ }
+
+ pub fn set_tls_certificate(&mut self, value: impl AsRef<str>) -> &mut Self {
+ let options = self.initialization_options_mut();
+ options.insert(
+ "tlsCertificate".to_string(),
+ value.as_ref().to_string().into(),
+ );
+ self
+ }
+
+ pub fn set_unstable(&mut self, value: bool) -> &mut Self {
+ let options = self.initialization_options_mut();
+ options.insert("unstable".to_string(), value.into());
+ self
+ }
+
+ pub fn add_test_server_suggestions(&mut self) -> &mut Self {
+ self.set_suggest_imports_hosts(vec![(
+ "http://localhost:4545/".to_string(),
+ true,
+ )])
+ }
+
+ pub fn set_suggest_imports_hosts(
+ &mut self,
+ values: Vec<(String, bool)>,
+ ) -> &mut Self {
+ let options = self.initialization_options_mut();
+ let suggest = options.get_mut("suggest").unwrap().as_object_mut().unwrap();
+ let imports = suggest.get_mut("imports").unwrap().as_object_mut().unwrap();
+ let hosts = imports.get_mut("hosts").unwrap().as_object_mut().unwrap();
+ hosts.clear();
+ for (key, value) in values {
+ hosts.insert(key, value.into());
+ }
+ self
+ }
+
+ pub fn with_capabilities(
+ &mut self,
+ mut action: impl FnMut(&mut ClientCapabilities),
+ ) -> &mut Self {
+ action(&mut self.params.capabilities);
+ self
+ }
+
+ fn initialization_options_mut(
+ &mut self,
+ ) -> &mut serde_json::Map<String, serde_json::Value> {
+ let options = self.params.initialization_options.as_mut().unwrap();
+ options.as_object_mut().unwrap()
+ }
+
+ pub fn build(&self) -> InitializeParams {
+ self.params.clone()
+ }
+}
+
+pub struct LspClientBuilder {
+ print_stderr: bool,
+ capture_stderr: bool,
+ deno_exe: PathRef,
+ root_dir: PathRef,
+ use_diagnostic_sync: bool,
+ deno_dir: TempDir,
+}
+
+impl LspClientBuilder {
+ #[allow(clippy::new_without_default)]
+ pub fn new() -> Self {
+ Self::new_with_dir(TempDir::new())
+ }
+
+ pub fn new_with_dir(deno_dir: TempDir) -> Self {
+ Self {
+ print_stderr: false,
+ capture_stderr: false,
+ deno_exe: deno_exe_path(),
+ root_dir: deno_dir.path().clone(),
+ use_diagnostic_sync: true,
+ deno_dir,
+ }
+ }
+
+ pub fn deno_exe(mut self, exe_path: impl AsRef<Path>) -> Self {
+ self.deno_exe = PathRef::new(exe_path);
+ self
+ }
+
+ // not deprecated, this is just here so you don't accidentally
+ // commit code with this enabled
+ #[deprecated]
+ pub fn print_stderr(mut self) -> Self {
+ self.print_stderr = true;
+ self
+ }
+
+ pub fn capture_stderr(mut self) -> Self {
+ self.capture_stderr = true;
+ self
+ }
+
+ /// Whether to use the synchronization messages to better sync diagnostics
+ /// between the test client and server.
+ pub fn use_diagnostic_sync(mut self, value: bool) -> Self {
+ self.use_diagnostic_sync = value;
+ self
+ }
+
+ pub fn set_root_dir(mut self, root_dir: PathRef) -> Self {
+ self.root_dir = root_dir;
+ self
+ }
+
+ pub fn build(&self) -> LspClient {
+ self.build_result().unwrap()
+ }
+
+ pub fn build_result(&self) -> Result<LspClient> {
+ let deno_dir = self.deno_dir.clone();
+ let mut command = Command::new(&self.deno_exe);
+ command
+ .env("DENO_DIR", deno_dir.path())
+ .env("NPM_CONFIG_REGISTRY", npm_registry_url())
+ .env("JSR_URL", jsr_registry_url())
+ // turn on diagnostic synchronization communication
+ .env(
+ "DENO_DONT_USE_INTERNAL_LSP_DIAGNOSTIC_SYNC_FLAG",
+ if self.use_diagnostic_sync { "1" } else { "" },
+ )
+ .env("DENO_NO_UPDATE_CHECK", "1")
+ .arg("lsp")
+ .stdin(Stdio::piped())
+ .stdout(Stdio::piped());
+ if self.capture_stderr {
+ command.stderr(Stdio::piped());
+ } else if !self.print_stderr {
+ command.stderr(Stdio::null());
+ }
+ let mut child = command.spawn()?;
+ let stdout = child.stdout.take().unwrap();
+ let buf_reader = io::BufReader::new(stdout);
+ let reader = LspStdoutReader::new(buf_reader);
+
+ let stdin = child.stdin.take().unwrap();
+ let writer = io::BufWriter::new(stdin);
+
+ let stderr_lines_rx = if self.capture_stderr {
+ let stderr = child.stderr.take().unwrap();
+ let print_stderr = self.print_stderr;
+ let (tx, rx) = mpsc::channel::<String>();
+ std::thread::spawn(move || {
+ let stderr = BufReader::new(stderr);
+ for line in stderr.lines() {
+ match line {
+ Ok(line) => {
+ if print_stderr {
+ eprintln!("{}", line);
+ }
+ tx.send(line).unwrap();
+ }
+ Err(err) => {
+ panic!("failed to read line from stderr: {:#}", err);
+ }
+ }
+ }
+ });
+ Some(rx)
+ } else {
+ None
+ };
+
+ Ok(LspClient {
+ child,
+ reader,
+ request_id: 1,
+ start: Instant::now(),
+ root_dir: self.root_dir.clone(),
+ writer,
+ deno_dir,
+ stderr_lines_rx,
+ config: json!("{}"),
+ supports_workspace_configuration: false,
+ })
+ }
+}
+
+pub struct LspClient {
+ child: Child,
+ reader: LspStdoutReader,
+ request_id: u64,
+ start: Instant,
+ writer: io::BufWriter<ChildStdin>,
+ deno_dir: TempDir,
+ root_dir: PathRef,
+ stderr_lines_rx: Option<mpsc::Receiver<String>>,
+ config: serde_json::Value,
+ supports_workspace_configuration: bool,
+}
+
+impl Drop for LspClient {
+ fn drop(&mut self) {
+ match self.child.try_wait() {
+ Ok(None) => {
+ self.child.kill().unwrap();
+ let _ = self.child.wait();
+ }
+ Ok(Some(status)) => panic!("deno lsp exited unexpectedly {status}"),
+ Err(e) => panic!("pebble error: {e}"),
+ }
+ }
+}
+
+impl LspClient {
+ pub fn deno_dir(&self) -> &TempDir {
+ &self.deno_dir
+ }
+
+ pub fn duration(&self) -> Duration {
+ self.start.elapsed()
+ }
+
+ pub fn queue_is_empty(&self) -> bool {
+ self.reader.pending_len() == 0
+ }
+
+ pub fn queue_len(&self) -> usize {
+ self.reader.output_pending_messages();
+ self.reader.pending_len()
+ }
+
+ #[track_caller]
+ pub fn wait_until_stderr_line(&self, condition: impl Fn(&str) -> bool) {
+ let timeout_time =
+ Instant::now().checked_add(Duration::from_secs(5)).unwrap();
+ let lines_rx = self
+ .stderr_lines_rx
+ .as_ref()
+ .expect("must setup with client_builder.capture_stderr()");
+ let mut found_lines = Vec::new();
+ while Instant::now() < timeout_time {
+ if let Ok(line) = lines_rx.try_recv() {
+ if condition(&line) {
+ return;
+ }
+ found_lines.push(line);
+ }
+ std::thread::sleep(Duration::from_millis(20));
+ }
+
+ eprintln!("==== STDERR OUTPUT ====");
+ for line in found_lines {
+ eprintln!("{}", line)
+ }
+ eprintln!("== END STDERR OUTPUT ==");
+
+ panic!("Timed out waiting on condition.")
+ }
+
+ pub fn initialize_default(&mut self) {
+ self.initialize(|_| {})
+ }
+
+ pub fn initialize(
+ &mut self,
+ do_build: impl Fn(&mut InitializeParamsBuilder),
+ ) {
+ self.initialize_with_config(
+ do_build,
+ json!({ "deno": {
+ "enable": true,
+ "cache": null,
+ "certificateStores": null,
+ "codeLens": {
+ "implementations": true,
+ "references": true,
+ "test": true,
+ },
+ "config": null,
+ "importMap": null,
+ "lint": true,
+ "suggest": {
+ "autoImports": true,
+ "completeFunctionCalls": false,
+ "names": true,
+ "paths": true,
+ "imports": {
+ "hosts": {},
+ },
+ },
+ "testing": {
+ "args": [
+ "--allow-all"
+ ],
+ "enable": true,
+ },
+ "tlsCertificate": null,
+ "unsafelyIgnoreCertificateErrors": null,
+ "unstable": false,
+ } }),
+ )
+ }
+
+ pub fn initialize_with_config(
+ &mut self,
+ do_build: impl Fn(&mut InitializeParamsBuilder),
+ mut config: Value,
+ ) {
+ let mut builder = InitializeParamsBuilder::new(config.clone());
+ builder.set_root_uri(self.root_dir.uri_dir());
+ do_build(&mut builder);
+ let params: InitializeParams = builder.build();
+ // `config` must be updated to account for the builder changes.
+ // TODO(nayeemrmn): Remove config-related methods from builder.
+ if let Some(options) = &params.initialization_options {
+ if let Some(options) = options.as_object() {
+ if let Some(config) = config.as_object_mut() {
+ let mut deno = options.clone();
+ let typescript = options.get("typescript");
+ let javascript = options.get("javascript");
+ deno.remove("typescript");
+ deno.remove("javascript");
+ config.insert("deno".to_string(), json!(deno));
+ if let Some(typescript) = typescript {
+ config.insert("typescript".to_string(), typescript.clone());
+ }
+ if let Some(javascript) = javascript {
+ config.insert("javascript".to_string(), javascript.clone());
+ }
+ }
+ }
+ }
+ self.supports_workspace_configuration = match &params.capabilities.workspace
+ {
+ Some(workspace) => workspace.configuration == Some(true),
+ _ => false,
+ };
+ self.write_request("initialize", params);
+ self.write_notification("initialized", json!({}));
+ self.config = config;
+ if self.supports_workspace_configuration {
+ self.handle_configuration_request();
+ }
+ }
+
+ pub fn did_open(&mut self, params: Value) -> CollectedDiagnostics {
+ self.did_open_raw(params);
+ self.read_diagnostics()
+ }
+
+ pub fn did_open_raw(&mut self, params: Value) {
+ self.write_notification("textDocument/didOpen", params);
+ }
+
+ pub fn change_configuration(&mut self, config: Value) {
+ self.config = config;
+ if self.supports_workspace_configuration {
+ self.write_notification(
+ "workspace/didChangeConfiguration",
+ json!({ "settings": {} }),
+ );
+ self.handle_configuration_request();
+ } else {
+ self.write_notification(
+ "workspace/didChangeConfiguration",
+ json!({ "settings": &self.config }),
+ );
+ }
+ }
+
+ pub fn handle_configuration_request(&mut self) {
+ let (id, method, args) = self.read_request::<Value>();
+ assert_eq!(method, "workspace/configuration");
+ let params = args.as_ref().unwrap().as_object().unwrap();
+ let items = params.get("items").unwrap().as_array().unwrap();
+ let config_object = self.config.as_object().unwrap();
+ let mut result = vec![];
+ for item in items {
+ let item = item.as_object().unwrap();
+ let section = item.get("section").unwrap().as_str().unwrap();
+ result.push(config_object.get(section).cloned().unwrap_or_default());
+ }
+ self.write_response(id, result);
+ }
+
+ pub fn did_save(&mut self, params: Value) {
+ self.write_notification("textDocument/didSave", params);
+ }
+
+ pub fn did_change_watched_files(&mut self, params: Value) {
+ self.write_notification("workspace/didChangeWatchedFiles", params);
+ }
+
+ fn get_latest_diagnostic_batch_index(&mut self) -> usize {
+ let result = self
+ .write_request("deno/internalLatestDiagnosticBatchIndex", json!(null));
+ result.as_u64().unwrap() as usize
+ }
+
+ /// Reads the latest diagnostics. It's assumed that
+ pub fn read_diagnostics(&mut self) -> CollectedDiagnostics {
+ // wait for three (deno, lint, and typescript diagnostics) batch
+ // notification messages for that index
+ let mut read = 0;
+ let mut total_messages_len = 0;
+ while read < 3 {
+ let (method, response) =
+ self.read_notification::<DiagnosticBatchNotificationParams>();
+ assert_eq!(method, "deno/internalTestDiagnosticBatch");
+ let response = response.unwrap();
+ if response.batch_index == self.get_latest_diagnostic_batch_index() {
+ read += 1;
+ total_messages_len += response.messages_len;
+ }
+ }
+
+ // now read the latest diagnostic messages
+ let mut all_diagnostics = Vec::with_capacity(total_messages_len);
+ let mut seen_files = HashSet::new();
+ for _ in 0..total_messages_len {
+ let (method, response) =
+ self.read_latest_notification::<lsp::PublishDiagnosticsParams>();
+ assert_eq!(method, "textDocument/publishDiagnostics");
+ let response = response.unwrap();
+ if seen_files.insert(response.uri.to_string()) {
+ all_diagnostics.push(response);
+ }
+ }
+
+ CollectedDiagnostics(all_diagnostics)
+ }
+
+ pub fn shutdown(&mut self) {
+ self.write_request("shutdown", json!(null));
+ self.write_notification("exit", json!(null));
+ }
+
+ // it's flaky to assert for a notification because a notification
+ // might arrive a little later, so only provide a method for asserting
+ // that there is no notification
+ pub fn assert_no_notification(&mut self, searching_method: &str) {
+ assert!(!self.reader.had_message(|message| match message {
+ LspMessage::Notification(method, _) => method == searching_method,
+ _ => false,
+ }))
+ }
+
+ pub fn read_notification<R>(&mut self) -> (String, Option<R>)
+ where
+ R: de::DeserializeOwned,
+ {
+ self.reader.read_message(|msg| match msg {
+ LspMessage::Notification(method, maybe_params) => {
+ let params = serde_json::from_value(maybe_params.clone()?).ok()?;
+ Some((method.to_string(), params))
+ }
+ _ => None,
+ })
+ }
+
+ pub fn read_latest_notification<R>(&mut self) -> (String, Option<R>)
+ where
+ R: de::DeserializeOwned,
+ {
+ self.reader.read_latest_message(|msg| match msg {
+ LspMessage::Notification(method, maybe_params) => {
+ let params = serde_json::from_value(maybe_params.clone()?).ok()?;
+ Some((method.to_string(), params))
+ }
+ _ => None,
+ })
+ }
+
+ pub fn read_notification_with_method<R>(
+ &mut self,
+ expected_method: &str,
+ ) -> Option<R>
+ where
+ R: de::DeserializeOwned,
+ {
+ self.reader.read_message(|msg| match msg {
+ LspMessage::Notification(method, maybe_params) => {
+ if method != expected_method {
+ None
+ } else {
+ serde_json::from_value(maybe_params.clone()?).ok()
+ }
+ }
+ _ => None,
+ })
+ }
+
+ pub fn read_request<R>(&mut self) -> (u64, String, Option<R>)
+ where
+ R: de::DeserializeOwned,
+ {
+ self.reader.read_message(|msg| match msg {
+ LspMessage::Request(id, method, maybe_params) => Some((
+ *id,
+ method.to_owned(),
+ maybe_params
+ .clone()
+ .map(|p| serde_json::from_value(p).unwrap()),
+ )),
+ _ => None,
+ })
+ }
+
+ fn write(&mut self, value: Value) {
+ let value_str = value.to_string();
+ let msg = format!(
+ "Content-Length: {}\r\n\r\n{}",
+ value_str.as_bytes().len(),
+ value_str
+ );
+ self.writer.write_all(msg.as_bytes()).unwrap();
+ self.writer.flush().unwrap();
+ }
+
+ pub fn get_completion(
+ &mut self,
+ uri: impl AsRef<str>,
+ position: (usize, usize),
+ context: Value,
+ ) -> lsp::CompletionResponse {
+ self.write_request_with_res_as::<lsp::CompletionResponse>(
+ "textDocument/completion",
+ json!({
+ "textDocument": {
+ "uri": uri.as_ref(),
+ },
+ "position": { "line": position.0, "character": position.1 },
+ "context": context,
+ }),
+ )
+ }
+
+ pub fn get_completion_list(
+ &mut self,
+ uri: impl AsRef<str>,
+ position: (usize, usize),
+ context: Value,
+ ) -> lsp::CompletionList {
+ let res = self.get_completion(uri, position, context);
+ if let lsp::CompletionResponse::List(list) = res {
+ list
+ } else {
+ panic!("unexpected response");
+ }
+ }
+
+ pub fn write_request_with_res_as<R>(
+ &mut self,
+ method: impl AsRef<str>,
+ params: impl Serialize,
+ ) -> R
+ where
+ R: de::DeserializeOwned,
+ {
+ let result = self.write_request(method, params);
+ serde_json::from_value(result).unwrap()
+ }
+
+ pub fn write_request(
+ &mut self,
+ method: impl AsRef<str>,
+ params: impl Serialize,
+ ) -> Value {
+ let value = if to_value(&params).unwrap().is_null() {
+ json!({
+ "jsonrpc": "2.0",
+ "id": self.request_id,
+ "method": method.as_ref(),
+ })
+ } else {
+ json!({
+ "jsonrpc": "2.0",
+ "id": self.request_id,
+ "method": method.as_ref(),
+ "params": params,
+ })
+ };
+ self.write(value);
+
+ self.reader.read_message(|msg| match msg {
+ LspMessage::Response(id, maybe_result, maybe_error) => {
+ assert_eq!(*id, self.request_id);
+ self.request_id += 1;
+ if let Some(error) = maybe_error {
+ panic!("LSP ERROR: {error:?}");
+ }
+ Some(maybe_result.clone().unwrap())
+ }
+ _ => None,
+ })
+ }
+
+ pub fn write_response<V>(&mut self, id: u64, result: V)
+ where
+ V: Serialize,
+ {
+ let value = json!({
+ "jsonrpc": "2.0",
+ "id": id,
+ "result": result
+ });
+ self.write(value);
+ }
+
+ pub fn write_notification<S, V>(&mut self, method: S, params: V)
+ where
+ S: AsRef<str>,
+ V: Serialize,
+ {
+ let value = json!({
+ "jsonrpc": "2.0",
+ "method": method.as_ref(),
+ "params": params,
+ });
+ self.write(value);
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct CollectedDiagnostics(Vec<lsp::PublishDiagnosticsParams>);
+
+impl CollectedDiagnostics {
+ /// Gets the diagnostics that the editor will see after all the publishes.
+ pub fn all(&self) -> Vec<lsp::Diagnostic> {
+ self
+ .all_messages()
+ .into_iter()
+ .flat_map(|m| m.diagnostics)
+ .collect()
+ }
+
+ /// Gets the messages that the editor will see after all the publishes.
+ pub fn all_messages(&self) -> Vec<lsp::PublishDiagnosticsParams> {
+ self.0.clone()
+ }
+
+ pub fn messages_with_source(
+ &self,
+ source: &str,
+ ) -> lsp::PublishDiagnosticsParams {
+ self
+ .all_messages()
+ .iter()
+ .find(|p| {
+ p.diagnostics
+ .iter()
+ .any(|d| d.source == Some(source.to_string()))
+ })
+ .map(ToOwned::to_owned)
+ .unwrap()
+ }
+
+ #[track_caller]
+ pub fn messages_with_file_and_source(
+ &self,
+ specifier: &str,
+ source: &str,
+ ) -> lsp::PublishDiagnosticsParams {
+ let specifier = Url::parse(specifier).unwrap();
+ self
+ .all_messages()
+ .iter()
+ .find(|p| {
+ p.uri == specifier
+ && p
+ .diagnostics
+ .iter()
+ .any(|d| d.source == Some(source.to_string()))
+ })
+ .map(ToOwned::to_owned)
+ .unwrap()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_read_message() {
+ let msg1 = b"content-length: 11\r\n\r\nhello world";
+ let mut reader1 = std::io::Cursor::new(msg1);
+ assert_eq!(read_message(&mut reader1).unwrap().unwrap(), b"hello world");
+
+ let msg2 = b"content-length: 5\r\n\r\nhello world";
+ let mut reader2 = std::io::Cursor::new(msg2);
+ assert_eq!(read_message(&mut reader2).unwrap().unwrap(), b"hello");
+ }
+
+ #[test]
+ #[should_panic(expected = "failed to fill whole buffer")]
+ fn test_invalid_read_message() {
+ let msg1 = b"content-length: 12\r\n\r\nhello world";
+ let mut reader1 = std::io::Cursor::new(msg1);
+ read_message(&mut reader1).unwrap();
+ }
+}
diff --git a/tests/util/server/src/macros.rs b/tests/util/server/src/macros.rs
new file mode 100644
index 000000000..7cfedcc7e
--- /dev/null
+++ b/tests/util/server/src/macros.rs
@@ -0,0 +1,86 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+#[macro_export]
+macro_rules! itest(
+($name:ident {$( $key:ident: $value:expr,)*}) => {
+ #[test]
+ fn $name() {
+ let test = $crate::CheckOutputIntegrationTest {
+ $(
+ $key: $value,
+ )*
+ .. Default::default()
+ };
+ let output = test.output();
+ output.assert_exit_code(test.exit_code);
+ if !test.output.is_empty() {
+ assert!(test.output_str.is_none());
+ output.assert_matches_file(test.output);
+ } else {
+ output.assert_matches_text(test.output_str.unwrap_or(""));
+ }
+ }
+}
+);
+
+#[macro_export]
+macro_rules! itest_flaky(
+($name:ident {$( $key:ident: $value:expr,)*}) => {
+ #[flaky_test::flaky_test]
+ fn $name() {
+ let test = $crate::CheckOutputIntegrationTest {
+ $(
+ $key: $value,
+ )*
+ .. Default::default()
+ };
+ let output = test.output();
+ output.assert_exit_code(test.exit_code);
+ if !test.output.is_empty() {
+ assert!(test.output_str.is_none());
+ output.assert_matches_file(test.output);
+ } else {
+ output.assert_matches_text(test.output_str.unwrap_or(""));
+ }
+ }
+}
+);
+
+#[macro_export]
+macro_rules! context(
+({$( $key:ident: $value:expr,)*}) => {
+ $crate::TestContext::create($crate::TestContextOptions {
+ $(
+ $key: $value,
+ )*
+ .. Default::default()
+ })
+}
+);
+
+#[macro_export]
+macro_rules! itest_steps(
+($name:ident {$( $key:ident: $value:expr,)*}) => {
+ #[test]
+ fn $name() {
+ ($crate::CheckOutputIntegrationTestSteps {
+ $(
+ $key: $value,
+ )*
+ .. Default::default()
+ }).run()
+ }
+}
+);
+
+#[macro_export]
+macro_rules! command_step(
+({$( $key:ident: $value:expr,)*}) => {
+ $crate::CheckOutputIntegrationTestCommandStep {
+ $(
+ $key: $value,
+ )*
+ .. Default::default()
+ }
+}
+);
diff --git a/tests/util/server/src/npm.rs b/tests/util/server/src/npm.rs
new file mode 100644
index 000000000..7469e9b9e
--- /dev/null
+++ b/tests/util/server/src/npm.rs
@@ -0,0 +1,179 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use std::collections::HashMap;
+use std::fs;
+
+use anyhow::Context;
+use anyhow::Result;
+use base64::prelude::BASE64_STANDARD;
+use base64::Engine;
+use flate2::write::GzEncoder;
+use flate2::Compression;
+use once_cell::sync::Lazy;
+use parking_lot::Mutex;
+use tar::Builder;
+
+use crate::testdata_path;
+
+pub static CUSTOM_NPM_PACKAGE_CACHE: Lazy<CustomNpmPackageCache> =
+ Lazy::new(CustomNpmPackageCache::default);
+
+struct CustomNpmPackage {
+ pub registry_file: String,
+ pub tarballs: HashMap<String, Vec<u8>>,
+}
+
+/// Creates tarballs and a registry json file for npm packages
+/// in the `testdata/npm/registry/@denotest` directory.
+#[derive(Default)]
+pub struct CustomNpmPackageCache(Mutex<HashMap<String, CustomNpmPackage>>);
+
+impl CustomNpmPackageCache {
+ pub fn tarball_bytes(
+ &self,
+ name: &str,
+ version: &str,
+ ) -> Result<Option<Vec<u8>>> {
+ Ok(
+ self
+ .get_package_property(name, |p| p.tarballs.get(version).cloned())?
+ .flatten(),
+ )
+ }
+
+ pub fn registry_file(&self, name: &str) -> Result<Option<Vec<u8>>> {
+ self.get_package_property(name, |p| p.registry_file.as_bytes().to_vec())
+ }
+
+ fn get_package_property<TResult>(
+ &self,
+ package_name: &str,
+ func: impl FnOnce(&CustomNpmPackage) -> TResult,
+ ) -> Result<Option<TResult>> {
+ // it's ok if multiple threads race here as they will do the same work twice
+ if !self.0.lock().contains_key(package_name) {
+ match get_npm_package(package_name)? {
+ Some(package) => {
+ self.0.lock().insert(package_name.to_string(), package);
+ }
+ None => return Ok(None),
+ }
+ }
+ Ok(self.0.lock().get(package_name).map(func))
+ }
+}
+
+fn get_npm_package(package_name: &str) -> Result<Option<CustomNpmPackage>> {
+ let package_folder = testdata_path().join("npm/registry").join(package_name);
+ if !package_folder.exists() {
+ return Ok(None);
+ }
+
+ // read all the package's versions
+ let mut tarballs = HashMap::new();
+ let mut versions = serde_json::Map::new();
+ let mut latest_version = semver::Version::parse("0.0.0").unwrap();
+ for entry in fs::read_dir(&package_folder)? {
+ let entry = entry?;
+ let file_type = entry.file_type()?;
+ if !file_type.is_dir() {
+ continue;
+ }
+ let version = entry.file_name().to_string_lossy().to_string();
+ let version_folder = package_folder.join(&version);
+
+ // create the tarball
+ let mut tarball_bytes = Vec::new();
+ {
+ let mut encoder =
+ GzEncoder::new(&mut tarball_bytes, Compression::default());
+ {
+ let mut builder = Builder::new(&mut encoder);
+ builder
+ .append_dir_all("package", &version_folder)
+ .with_context(|| {
+ format!("Error adding tarball for directory: {}", version_folder)
+ })?;
+ builder.finish()?;
+ }
+ encoder.finish()?;
+ }
+
+ // get tarball hash
+ let tarball_checksum = get_tarball_checksum(&tarball_bytes);
+
+ // create the registry file JSON for this version
+ let mut dist = serde_json::Map::new();
+ dist.insert(
+ "integrity".to_string(),
+ format!("sha512-{tarball_checksum}").into(),
+ );
+ dist.insert("shasum".to_string(), "dummy-value".into());
+ dist.insert(
+ "tarball".to_string(),
+ format!(
+ "http://localhost:4545/npm/registry/{package_name}/{version}.tgz"
+ )
+ .into(),
+ );
+
+ tarballs.insert(version.clone(), tarball_bytes);
+ let package_json_path = version_folder.join("package.json");
+ let package_json_text = fs::read_to_string(&package_json_path)
+ .with_context(|| {
+ format!("Error reading package.json at {}", package_json_path)
+ })?;
+ let mut version_info: serde_json::Map<String, serde_json::Value> =
+ serde_json::from_str(&package_json_text)?;
+ version_info.insert("dist".to_string(), dist.into());
+
+ if let Some(maybe_optional_deps) = version_info.get("optionalDependencies")
+ {
+ if let Some(optional_deps) = maybe_optional_deps.as_object() {
+ if let Some(maybe_deps) = version_info.get("dependencies") {
+ if let Some(deps) = maybe_deps.as_object() {
+ let mut cloned_deps = deps.to_owned();
+ for (key, value) in optional_deps {
+ cloned_deps.insert(key.to_string(), value.to_owned());
+ }
+ version_info.insert(
+ "dependencies".to_string(),
+ serde_json::to_value(cloned_deps).unwrap(),
+ );
+ }
+ } else {
+ version_info.insert(
+ "dependencies".to_string(),
+ serde_json::to_value(optional_deps).unwrap(),
+ );
+ }
+ }
+ }
+
+ versions.insert(version.clone(), version_info.into());
+ let version = semver::Version::parse(&version)?;
+ if version.cmp(&latest_version).is_gt() {
+ latest_version = version;
+ }
+ }
+
+ let mut dist_tags = serde_json::Map::new();
+ dist_tags.insert("latest".to_string(), latest_version.to_string().into());
+
+ // create the registry file for this package
+ let mut registry_file = serde_json::Map::new();
+ registry_file.insert("name".to_string(), package_name.to_string().into());
+ registry_file.insert("versions".to_string(), versions.into());
+ registry_file.insert("dist-tags".to_string(), dist_tags.into());
+ Ok(Some(CustomNpmPackage {
+ registry_file: serde_json::to_string(&registry_file).unwrap(),
+ tarballs,
+ }))
+}
+
+fn get_tarball_checksum(bytes: &[u8]) -> String {
+ use sha2::Digest;
+ let mut hasher = sha2::Sha512::new();
+ hasher.update(bytes);
+ BASE64_STANDARD.encode(hasher.finalize())
+}
diff --git a/tests/util/server/src/pty.rs b/tests/util/server/src/pty.rs
new file mode 100644
index 000000000..3e3331b84
--- /dev/null
+++ b/tests/util/server/src/pty.rs
@@ -0,0 +1,770 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use std::borrow::Cow;
+use std::collections::HashMap;
+use std::collections::HashSet;
+use std::io::Read;
+use std::io::Write;
+use std::path::Path;
+use std::time::Duration;
+use std::time::Instant;
+
+use crate::strip_ansi_codes;
+
+/// Points to know about when writing pty tests:
+///
+/// - Consecutive writes cause issues where you might write while a prompt
+/// is not showing. So when you write, always `.expect(...)` on the output.
+/// - Similar to the last point, using `.expect(...)` can help make the test
+/// more deterministic. If the test is flaky, try adding more `.expect(...)`s
+pub struct Pty {
+ pty: Box<dyn SystemPty>,
+ read_bytes: Vec<u8>,
+ last_index: usize,
+}
+
+impl Pty {
+ pub fn new(
+ program: &Path,
+ args: &[&str],
+ cwd: &Path,
+ env_vars: Option<HashMap<String, String>>,
+ ) -> Self {
+ let pty = create_pty(program, args, cwd, env_vars);
+ let mut pty = Self {
+ pty,
+ read_bytes: Vec::new(),
+ last_index: 0,
+ };
+ if args.is_empty() || args[0] == "repl" && !args.contains(&"--quiet") {
+ // wait for the repl to start up before writing to it
+ pty.read_until_condition_with_timeout(
+ |pty| {
+ pty
+ .all_output()
+ .contains("exit using ctrl+d, ctrl+c, or close()")
+ },
+ // it sometimes takes a while to startup on the CI, so use a longer timeout
+ Duration::from_secs(60),
+ );
+ }
+
+ pty
+ }
+
+ pub fn is_supported() -> bool {
+ let is_windows = cfg!(windows);
+ if is_windows && std::env::var("CI").is_ok() {
+ // the pty tests don't really start up on the windows CI for some reason
+ // so ignore them for now
+ eprintln!("Ignoring windows CI.");
+ false
+ } else {
+ true
+ }
+ }
+
+ #[track_caller]
+ pub fn write_raw(&mut self, line: impl AsRef<str>) {
+ let line = if cfg!(windows) {
+ line.as_ref().replace('\n', "\r\n")
+ } else {
+ line.as_ref().to_string()
+ };
+ if let Err(err) = self.pty.write(line.as_bytes()) {
+ panic!("{:#}", err)
+ }
+ self.pty.flush().unwrap();
+ }
+
+ #[track_caller]
+ pub fn write_line(&mut self, line: impl AsRef<str>) {
+ self.write_line_raw(&line);
+
+ // expect what was written to show up in the output
+ // due to "pty echo"
+ for line in line.as_ref().lines() {
+ self.expect(line);
+ }
+ }
+
+ /// Writes a line without checking if it's in the output.
+ #[track_caller]
+ pub fn write_line_raw(&mut self, line: impl AsRef<str>) {
+ self.write_raw(format!("{}\n", line.as_ref()));
+ }
+
+ #[track_caller]
+ pub fn read_until(&mut self, end_text: impl AsRef<str>) -> String {
+ self.read_until_with_advancing(|text| {
+ text
+ .find(end_text.as_ref())
+ .map(|index| index + end_text.as_ref().len())
+ })
+ }
+
+ #[track_caller]
+ pub fn expect(&mut self, text: impl AsRef<str>) {
+ self.read_until(text.as_ref());
+ }
+
+ #[track_caller]
+ pub fn expect_any(&mut self, texts: &[&str]) {
+ self.read_until_with_advancing(|text| {
+ for find_text in texts {
+ if let Some(index) = text.find(find_text) {
+ return Some(index);
+ }
+ }
+ None
+ });
+ }
+
+ /// Consumes and expects to find all the text until a timeout is hit.
+ #[track_caller]
+ pub fn expect_all(&mut self, texts: &[&str]) {
+ let mut pending_texts: HashSet<&&str> = HashSet::from_iter(texts);
+ let mut max_index: Option<usize> = None;
+ self.read_until_with_advancing(|text| {
+ for pending_text in pending_texts.clone() {
+ if let Some(index) = text.find(pending_text) {
+ let index = index + pending_text.len();
+ match &max_index {
+ Some(current) => {
+ if *current < index {
+ max_index = Some(index);
+ }
+ }
+ None => {
+ max_index = Some(index);
+ }
+ }
+ pending_texts.remove(pending_text);
+ }
+ }
+ if pending_texts.is_empty() {
+ max_index
+ } else {
+ None
+ }
+ });
+ }
+
+ /// Expects the raw text to be found, which may include ANSI codes.
+ /// Note: this expects the raw bytes in any output that has already
+ /// occurred or may occur within the next few seconds.
+ #[track_caller]
+ pub fn expect_raw_in_current_output(&mut self, text: impl AsRef<str>) {
+ self.read_until_condition(|pty| {
+ let data = String::from_utf8_lossy(&pty.read_bytes);
+ data.contains(text.as_ref())
+ });
+ }
+
+ pub fn all_output(&self) -> Cow<str> {
+ String::from_utf8_lossy(&self.read_bytes)
+ }
+
+ #[track_caller]
+ fn read_until_with_advancing(
+ &mut self,
+ mut condition: impl FnMut(&str) -> Option<usize>,
+ ) -> String {
+ let mut final_text = String::new();
+ self.read_until_condition(|pty| {
+ let text = pty.next_text();
+ if let Some(end_index) = condition(&text) {
+ pty.last_index += end_index;
+ final_text = text[..end_index].to_string();
+ true
+ } else {
+ false
+ }
+ });
+ final_text
+ }
+
+ #[track_caller]
+ fn read_until_condition(&mut self, condition: impl FnMut(&mut Self) -> bool) {
+ self.read_until_condition_with_timeout(condition, Duration::from_secs(15));
+ }
+
+ #[track_caller]
+ fn read_until_condition_with_timeout(
+ &mut self,
+ condition: impl FnMut(&mut Self) -> bool,
+ timeout_duration: Duration,
+ ) {
+ if self.try_read_until_condition_with_timeout(condition, timeout_duration) {
+ return;
+ }
+
+ panic!("Timed out.")
+ }
+
+ /// Reads until the specified condition with a timeout duration returning
+ /// `true` on success or `false` on timeout.
+ fn try_read_until_condition_with_timeout(
+ &mut self,
+ mut condition: impl FnMut(&mut Self) -> bool,
+ timeout_duration: Duration,
+ ) -> bool {
+ let timeout_time = Instant::now().checked_add(timeout_duration).unwrap();
+ while Instant::now() < timeout_time {
+ self.fill_more_bytes();
+ if condition(self) {
+ return true;
+ }
+ }
+
+ let text = self.next_text();
+ eprintln!(
+ "------ Start Full Text ------\n{:?}\n------- End Full Text -------",
+ String::from_utf8_lossy(&self.read_bytes)
+ );
+ eprintln!("Next text: {:?}", text);
+
+ false
+ }
+
+ fn next_text(&self) -> String {
+ let text = String::from_utf8_lossy(&self.read_bytes).to_string();
+ let text = strip_ansi_codes(&text);
+ text[self.last_index..].to_string()
+ }
+
+ fn fill_more_bytes(&mut self) {
+ let mut buf = [0; 256];
+ match self.pty.read(&mut buf) {
+ Ok(count) if count > 0 => {
+ self.read_bytes.extend(&buf[..count]);
+ }
+ _ => {
+ std::thread::sleep(Duration::from_millis(15));
+ }
+ }
+ }
+}
+
+trait SystemPty: Read + Write {}
+
+impl SystemPty for std::fs::File {}
+
+#[cfg(unix)]
+fn setup_pty(fd: i32) {
+ use nix::fcntl::fcntl;
+ use nix::fcntl::FcntlArg;
+ use nix::fcntl::OFlag;
+ use nix::sys::termios;
+ use nix::sys::termios::tcgetattr;
+ use nix::sys::termios::tcsetattr;
+ use nix::sys::termios::SetArg;
+
+ let mut term = tcgetattr(fd).unwrap();
+ // disable cooked mode
+ term.local_flags.remove(termios::LocalFlags::ICANON);
+ tcsetattr(fd, SetArg::TCSANOW, &term).unwrap();
+
+ // turn on non-blocking mode so we get timeouts
+ let flags = fcntl(fd, FcntlArg::F_GETFL).unwrap();
+ let new_flags = OFlag::from_bits_truncate(flags) | OFlag::O_NONBLOCK;
+ fcntl(fd, FcntlArg::F_SETFL(new_flags)).unwrap();
+}
+
+#[cfg(unix)]
+fn create_pty(
+ program: &Path,
+ args: &[&str],
+ cwd: &Path,
+ env_vars: Option<HashMap<String, String>>,
+) -> Box<dyn SystemPty> {
+ use crate::pty::unix::UnixPty;
+ use std::os::unix::process::CommandExt;
+
+ // Manually open pty main/secondary sides in the test process. Since we're not actually
+ // changing uid/gid here, this is the easiest way to do it.
+
+ // SAFETY: Posix APIs
+ let (fdm, fds) = unsafe {
+ let fdm = libc::posix_openpt(libc::O_RDWR);
+ if fdm < 0 {
+ panic!("posix_openpt failed");
+ }
+ let res = libc::grantpt(fdm);
+ if res != 0 {
+ panic!("grantpt failed");
+ }
+ let res = libc::unlockpt(fdm);
+ if res != 0 {
+ panic!("unlockpt failed");
+ }
+ let fds = libc::open(libc::ptsname(fdm), libc::O_RDWR);
+ if fdm < 0 {
+ panic!("open(ptsname) failed");
+ }
+ (fdm, fds)
+ };
+
+ // SAFETY: Posix APIs
+ unsafe {
+ let cmd = std::process::Command::new(program)
+ .current_dir(cwd)
+ .args(args)
+ .envs(env_vars.unwrap_or_default())
+ .pre_exec(move || {
+ // Close parent's main handle
+ libc::close(fdm);
+ libc::dup2(fds, 0);
+ libc::dup2(fds, 1);
+ libc::dup2(fds, 2);
+ // Note that we could close `fds` here as well, but this is a short-lived process and
+ // we're just not going to worry about "leaking" it
+ Ok(())
+ })
+ .spawn()
+ .unwrap();
+
+ // Close child's secondary handle
+ libc::close(fds);
+ setup_pty(fdm);
+
+ use std::os::fd::FromRawFd;
+ let pid = nix::unistd::Pid::from_raw(cmd.id() as _);
+ let file = std::fs::File::from_raw_fd(fdm);
+ Box::new(UnixPty { pid, file })
+ }
+}
+
+#[cfg(unix)]
+mod unix {
+ use std::io::Read;
+ use std::io::Write;
+
+ use super::SystemPty;
+
+ pub struct UnixPty {
+ pub pid: nix::unistd::Pid,
+ pub file: std::fs::File,
+ }
+
+ impl Drop for UnixPty {
+ fn drop(&mut self) {
+ use nix::sys::signal::kill;
+ use nix::sys::signal::Signal;
+ kill(self.pid, Signal::SIGTERM).unwrap()
+ }
+ }
+
+ impl SystemPty for UnixPty {}
+
+ impl Read for UnixPty {
+ fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
+ self.file.read(buf)
+ }
+ }
+
+ impl Write for UnixPty {
+ fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
+ self.file.write(buf)
+ }
+
+ fn flush(&mut self) -> std::io::Result<()> {
+ self.file.flush()
+ }
+ }
+}
+
+#[cfg(target_os = "windows")]
+fn create_pty(
+ program: &Path,
+ args: &[&str],
+ cwd: &Path,
+ env_vars: Option<HashMap<String, String>>,
+) -> Box<dyn SystemPty> {
+ let pty = windows::WinPseudoConsole::new(program, args, cwd, env_vars);
+ Box::new(pty)
+}
+
+#[cfg(target_os = "windows")]
+mod windows {
+ use std::collections::HashMap;
+ use std::io::ErrorKind;
+ use std::io::Read;
+ use std::path::Path;
+ use std::ptr;
+ use std::time::Duration;
+
+ use winapi::shared::minwindef::FALSE;
+ use winapi::shared::minwindef::LPVOID;
+ use winapi::shared::minwindef::TRUE;
+ use winapi::shared::winerror::S_OK;
+ use winapi::um::consoleapi::ClosePseudoConsole;
+ use winapi::um::consoleapi::CreatePseudoConsole;
+ use winapi::um::fileapi::FlushFileBuffers;
+ use winapi::um::fileapi::ReadFile;
+ use winapi::um::fileapi::WriteFile;
+ use winapi::um::handleapi::DuplicateHandle;
+ use winapi::um::handleapi::INVALID_HANDLE_VALUE;
+ use winapi::um::namedpipeapi::CreatePipe;
+ use winapi::um::namedpipeapi::PeekNamedPipe;
+ use winapi::um::processthreadsapi::CreateProcessW;
+ use winapi::um::processthreadsapi::DeleteProcThreadAttributeList;
+ use winapi::um::processthreadsapi::GetCurrentProcess;
+ use winapi::um::processthreadsapi::InitializeProcThreadAttributeList;
+ use winapi::um::processthreadsapi::UpdateProcThreadAttribute;
+ use winapi::um::processthreadsapi::LPPROC_THREAD_ATTRIBUTE_LIST;
+ use winapi::um::processthreadsapi::PROCESS_INFORMATION;
+ use winapi::um::synchapi::WaitForSingleObject;
+ use winapi::um::winbase::CREATE_UNICODE_ENVIRONMENT;
+ use winapi::um::winbase::EXTENDED_STARTUPINFO_PRESENT;
+ use winapi::um::winbase::INFINITE;
+ use winapi::um::winbase::STARTUPINFOEXW;
+ use winapi::um::wincontypes::COORD;
+ use winapi::um::wincontypes::HPCON;
+ use winapi::um::winnt::DUPLICATE_SAME_ACCESS;
+ use winapi::um::winnt::HANDLE;
+
+ use super::SystemPty;
+
+ macro_rules! assert_win_success {
+ ($expression:expr) => {
+ let success = $expression;
+ if success != TRUE {
+ panic!("{}", std::io::Error::last_os_error().to_string())
+ }
+ };
+ }
+
+ macro_rules! handle_err {
+ ($expression:expr) => {
+ let success = $expression;
+ if success != TRUE {
+ return Err(std::io::Error::last_os_error());
+ }
+ };
+ }
+
+ pub struct WinPseudoConsole {
+ stdin_write_handle: WinHandle,
+ stdout_read_handle: WinHandle,
+ // keep these alive for the duration of the pseudo console
+ _process_handle: WinHandle,
+ _thread_handle: WinHandle,
+ _attribute_list: ProcThreadAttributeList,
+ }
+
+ impl WinPseudoConsole {
+ pub fn new(
+ program: &Path,
+ args: &[&str],
+ cwd: &Path,
+ maybe_env_vars: Option<HashMap<String, String>>,
+ ) -> Self {
+ // https://docs.microsoft.com/en-us/windows/console/creating-a-pseudoconsole-session
+ // SAFETY:
+ // Generous use of winapi to create a PTY (thus large unsafe block).
+ unsafe {
+ let mut size: COORD = std::mem::zeroed();
+ size.X = 800;
+ size.Y = 500;
+ let mut console_handle = std::ptr::null_mut();
+ let (stdin_read_handle, stdin_write_handle) = create_pipe();
+ let (stdout_read_handle, stdout_write_handle) = create_pipe();
+
+ let result = CreatePseudoConsole(
+ size,
+ stdin_read_handle.as_raw_handle(),
+ stdout_write_handle.as_raw_handle(),
+ 0,
+ &mut console_handle,
+ );
+ assert_eq!(result, S_OK);
+
+ let mut environment_vars = maybe_env_vars.map(get_env_vars);
+ let mut attribute_list = ProcThreadAttributeList::new(console_handle);
+ let mut startup_info: STARTUPINFOEXW = std::mem::zeroed();
+ startup_info.StartupInfo.cb =
+ std::mem::size_of::<STARTUPINFOEXW>() as u32;
+ startup_info.lpAttributeList = attribute_list.as_mut_ptr();
+
+ let mut proc_info: PROCESS_INFORMATION = std::mem::zeroed();
+ let command = format!(
+ "\"{}\" {}",
+ program.to_string_lossy(),
+ args
+ .iter()
+ .map(|a| format!("\"{}\"", a))
+ .collect::<Vec<_>>()
+ .join(" ")
+ )
+ .trim()
+ .to_string();
+ let mut application_str = to_windows_str(&program.to_string_lossy());
+ let mut command_str = to_windows_str(&command);
+ let cwd = cwd.to_string_lossy().replace('/', "\\");
+ let mut cwd = to_windows_str(&cwd);
+
+ assert_win_success!(CreateProcessW(
+ application_str.as_mut_ptr(),
+ command_str.as_mut_ptr(),
+ ptr::null_mut(),
+ ptr::null_mut(),
+ FALSE,
+ EXTENDED_STARTUPINFO_PRESENT | CREATE_UNICODE_ENVIRONMENT,
+ environment_vars
+ .as_mut()
+ .map(|v| v.as_mut_ptr() as LPVOID)
+ .unwrap_or(ptr::null_mut()),
+ cwd.as_mut_ptr(),
+ &mut startup_info.StartupInfo,
+ &mut proc_info,
+ ));
+
+ // close the handles that the pseudoconsole now has
+ drop(stdin_read_handle);
+ drop(stdout_write_handle);
+
+ // start a thread that will close the pseudoconsole on process exit
+ let thread_handle = WinHandle::new(proc_info.hThread);
+ std::thread::spawn({
+ let thread_handle = thread_handle.duplicate();
+ let console_handle = WinHandle::new(console_handle);
+ move || {
+ WaitForSingleObject(thread_handle.as_raw_handle(), INFINITE);
+ // wait for the reading thread to catch up
+ std::thread::sleep(Duration::from_millis(200));
+ // close the console handle which will close the
+ // stdout pipe for the reader
+ ClosePseudoConsole(console_handle.into_raw_handle());
+ }
+ });
+
+ Self {
+ stdin_write_handle,
+ stdout_read_handle,
+ _process_handle: WinHandle::new(proc_info.hProcess),
+ _thread_handle: thread_handle,
+ _attribute_list: attribute_list,
+ }
+ }
+ }
+ }
+
+ impl Read for WinPseudoConsole {
+ fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
+ // don't do a blocking read in order to support timing out
+ let mut bytes_available = 0;
+ // SAFETY: winapi call
+ handle_err!(unsafe {
+ PeekNamedPipe(
+ self.stdout_read_handle.as_raw_handle(),
+ ptr::null_mut(),
+ 0,
+ ptr::null_mut(),
+ &mut bytes_available,
+ ptr::null_mut(),
+ )
+ });
+ if bytes_available == 0 {
+ return Err(std::io::Error::new(ErrorKind::WouldBlock, "Would block."));
+ }
+
+ let mut bytes_read = 0;
+ // SAFETY: winapi call
+ handle_err!(unsafe {
+ ReadFile(
+ self.stdout_read_handle.as_raw_handle(),
+ buf.as_mut_ptr() as _,
+ buf.len() as u32,
+ &mut bytes_read,
+ ptr::null_mut(),
+ )
+ });
+
+ Ok(bytes_read as usize)
+ }
+ }
+
+ impl SystemPty for WinPseudoConsole {}
+
+ impl std::io::Write for WinPseudoConsole {
+ fn write(&mut self, buffer: &[u8]) -> std::io::Result<usize> {
+ let mut bytes_written = 0;
+ // SAFETY:
+ // winapi call
+ handle_err!(unsafe {
+ WriteFile(
+ self.stdin_write_handle.as_raw_handle(),
+ buffer.as_ptr() as *const _,
+ buffer.len() as u32,
+ &mut bytes_written,
+ ptr::null_mut(),
+ )
+ });
+ Ok(bytes_written as usize)
+ }
+
+ fn flush(&mut self) -> std::io::Result<()> {
+ // SAFETY: winapi call
+ handle_err!(unsafe {
+ FlushFileBuffers(self.stdin_write_handle.as_raw_handle())
+ });
+ Ok(())
+ }
+ }
+
+ struct WinHandle {
+ inner: HANDLE,
+ }
+
+ impl WinHandle {
+ pub fn new(handle: HANDLE) -> Self {
+ WinHandle { inner: handle }
+ }
+
+ pub fn duplicate(&self) -> WinHandle {
+ // SAFETY: winapi call
+ let process_handle = unsafe { GetCurrentProcess() };
+ let mut duplicate_handle = ptr::null_mut();
+ // SAFETY: winapi call
+ assert_win_success!(unsafe {
+ DuplicateHandle(
+ process_handle,
+ self.inner,
+ process_handle,
+ &mut duplicate_handle,
+ 0,
+ 0,
+ DUPLICATE_SAME_ACCESS,
+ )
+ });
+
+ WinHandle::new(duplicate_handle)
+ }
+
+ pub fn as_raw_handle(&self) -> HANDLE {
+ self.inner
+ }
+
+ pub fn into_raw_handle(self) -> HANDLE {
+ let handle = self.inner;
+ // skip the drop implementation in order to not close the handle
+ std::mem::forget(self);
+ handle
+ }
+ }
+
+ // SAFETY: These handles are ok to send across threads.
+ unsafe impl Send for WinHandle {}
+ // SAFETY: These handles are ok to send across threads.
+ unsafe impl Sync for WinHandle {}
+
+ impl Drop for WinHandle {
+ fn drop(&mut self) {
+ if !self.inner.is_null() && self.inner != INVALID_HANDLE_VALUE {
+ // SAFETY: winapi call
+ unsafe {
+ winapi::um::handleapi::CloseHandle(self.inner);
+ }
+ }
+ }
+ }
+
+ struct ProcThreadAttributeList {
+ buffer: Vec<u8>,
+ }
+
+ impl ProcThreadAttributeList {
+ pub fn new(console_handle: HPCON) -> Self {
+ // SAFETY:
+ // Generous use of unsafe winapi calls to create a ProcThreadAttributeList.
+ unsafe {
+ // discover size required for the list
+ let mut size = 0;
+ let attribute_count = 1;
+ assert_eq!(
+ InitializeProcThreadAttributeList(
+ ptr::null_mut(),
+ attribute_count,
+ 0,
+ &mut size
+ ),
+ FALSE
+ );
+
+ let mut buffer = vec![0u8; size];
+ let attribute_list_ptr = buffer.as_mut_ptr() as _;
+
+ assert_win_success!(InitializeProcThreadAttributeList(
+ attribute_list_ptr,
+ attribute_count,
+ 0,
+ &mut size,
+ ));
+
+ const PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE: usize = 0x00020016;
+ assert_win_success!(UpdateProcThreadAttribute(
+ attribute_list_ptr,
+ 0,
+ PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE,
+ console_handle,
+ std::mem::size_of::<HPCON>(),
+ ptr::null_mut(),
+ ptr::null_mut(),
+ ));
+
+ ProcThreadAttributeList { buffer }
+ }
+ }
+
+ pub fn as_mut_ptr(&mut self) -> LPPROC_THREAD_ATTRIBUTE_LIST {
+ self.buffer.as_mut_slice().as_mut_ptr() as *mut _
+ }
+ }
+
+ impl Drop for ProcThreadAttributeList {
+ fn drop(&mut self) {
+ // SAFETY: winapi call
+ unsafe { DeleteProcThreadAttributeList(self.as_mut_ptr()) };
+ }
+ }
+
+ fn create_pipe() -> (WinHandle, WinHandle) {
+ let mut read_handle = std::ptr::null_mut();
+ let mut write_handle = std::ptr::null_mut();
+
+ // SAFETY: Creating an anonymous pipe with winapi.
+ assert_win_success!(unsafe {
+ CreatePipe(&mut read_handle, &mut write_handle, ptr::null_mut(), 0)
+ });
+
+ (WinHandle::new(read_handle), WinHandle::new(write_handle))
+ }
+
+ fn to_windows_str(str: &str) -> Vec<u16> {
+ use std::os::windows::prelude::OsStrExt;
+ std::ffi::OsStr::new(str)
+ .encode_wide()
+ .chain(Some(0))
+ .collect()
+ }
+
+ fn get_env_vars(env_vars: HashMap<String, String>) -> Vec<u16> {
+ // See lpEnvironment: https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessw
+ let mut parts = env_vars
+ .into_iter()
+ // each environment variable is in the form `name=value\0`
+ .map(|(key, value)| format!("{key}={value}\0"))
+ .collect::<Vec<_>>();
+
+ // all strings in an environment block must be case insensitively
+ // sorted alphabetically by name
+ // https://docs.microsoft.com/en-us/windows/win32/procthread/changing-environment-variables
+ parts.sort_by_key(|part| part.to_lowercase());
+
+ // the entire block is terminated by NULL (\0)
+ format!("{}\0", parts.join(""))
+ .encode_utf16()
+ .collect::<Vec<_>>()
+ }
+}
diff --git a/tests/util/server/src/servers/grpc.rs b/tests/util/server/src/servers/grpc.rs
new file mode 100644
index 000000000..144afc06a
--- /dev/null
+++ b/tests/util/server/src/servers/grpc.rs
@@ -0,0 +1,103 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use futures::StreamExt;
+use h2;
+use hyper::header::HeaderName;
+use hyper::header::HeaderValue;
+use rustls_tokio_stream::TlsStream;
+use tokio::net::TcpStream;
+use tokio::task::LocalSet;
+
+use super::get_tcp_listener_stream;
+use super::get_tls_listener_stream;
+use super::SupportedHttpVersions;
+
+pub async fn h2_grpc_server(h2_grpc_port: u16, h2s_grpc_port: u16) {
+ let mut tcp = get_tcp_listener_stream("grpc", h2_grpc_port).await;
+ let mut tls = get_tls_listener_stream(
+ "grpc (tls)",
+ h2s_grpc_port,
+ SupportedHttpVersions::Http2Only,
+ )
+ .await;
+
+ async fn serve(socket: TcpStream) -> Result<(), anyhow::Error> {
+ let mut connection = h2::server::handshake(socket).await?;
+
+ while let Some(result) = connection.accept().await {
+ let (request, respond) = result?;
+ tokio::spawn(async move {
+ let _ = handle_request(request, respond).await;
+ });
+ }
+
+ Ok(())
+ }
+
+ async fn serve_tls(socket: TlsStream) -> Result<(), anyhow::Error> {
+ let mut connection = h2::server::handshake(socket).await?;
+
+ while let Some(result) = connection.accept().await {
+ let (request, respond) = result?;
+ tokio::spawn(async move {
+ let _ = handle_request(request, respond).await;
+ });
+ }
+
+ Ok(())
+ }
+
+ async fn handle_request(
+ mut request: hyper::Request<h2::RecvStream>,
+ mut respond: h2::server::SendResponse<bytes::Bytes>,
+ ) -> Result<(), anyhow::Error> {
+ let body = request.body_mut();
+ while let Some(data) = body.data().await {
+ let data = data?;
+ let _ = body.flow_control().release_capacity(data.len());
+ }
+
+ let maybe_recv_trailers = body.trailers().await?;
+
+ let response = hyper::Response::new(());
+ let mut send = respond.send_response(response, false)?;
+ send.send_data(bytes::Bytes::from_static(b"hello "), false)?;
+ send.send_data(bytes::Bytes::from_static(b"world\n"), false)?;
+ let mut trailers = hyper::HeaderMap::new();
+ trailers.insert(
+ HeaderName::from_static("abc"),
+ HeaderValue::from_static("def"),
+ );
+ trailers.insert(
+ HeaderName::from_static("opr"),
+ HeaderValue::from_static("stv"),
+ );
+ if let Some(recv_trailers) = maybe_recv_trailers {
+ for (key, value) in recv_trailers {
+ trailers.insert(key.unwrap(), value);
+ }
+ }
+ send.send_trailers(trailers)?;
+
+ Ok(())
+ }
+
+ let local_set = LocalSet::new();
+ local_set.spawn_local(async move {
+ while let Some(Ok(tcp)) = tcp.next().await {
+ tokio::spawn(async move {
+ let _ = serve(tcp).await;
+ });
+ }
+ });
+
+ local_set.spawn_local(async move {
+ while let Some(Ok(tls)) = tls.next().await {
+ tokio::spawn(async move {
+ let _ = serve_tls(tls).await;
+ });
+ }
+ });
+
+ local_set.await;
+}
diff --git a/tests/util/server/src/servers/hyper_utils.rs b/tests/util/server/src/servers/hyper_utils.rs
new file mode 100644
index 000000000..ea15bba0e
--- /dev/null
+++ b/tests/util/server/src/servers/hyper_utils.rs
@@ -0,0 +1,154 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use bytes::Bytes;
+use futures::Future;
+use futures::FutureExt;
+use futures::Stream;
+use futures::StreamExt;
+use http;
+use http::Request;
+use http::Response;
+use http_body_util::combinators::UnsyncBoxBody;
+use hyper_util::rt::TokioIo;
+use std::convert::Infallible;
+use std::io;
+use std::net::SocketAddr;
+use std::pin::Pin;
+use std::result::Result;
+use tokio::net::TcpListener;
+
+#[derive(Debug, Clone, Copy)]
+pub enum ServerKind {
+ Auto,
+ OnlyHttp1,
+ OnlyHttp2,
+}
+
+#[derive(Debug, Clone, Copy)]
+pub struct ServerOptions {
+ pub error_msg: &'static str,
+ pub addr: SocketAddr,
+ pub kind: ServerKind,
+}
+
+type HandlerOutput =
+ Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error>;
+
+pub async fn run_server<F, S>(options: ServerOptions, handler: F)
+where
+ F: Fn(Request<hyper::body::Incoming>) -> S + Copy + 'static,
+ S: Future<Output = HandlerOutput> + 'static,
+{
+ let fut: Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>> =
+ async move {
+ let listener = TcpListener::bind(options.addr).await?;
+ loop {
+ let (stream, _) = listener.accept().await?;
+ let io = TokioIo::new(stream);
+ deno_unsync::spawn(hyper_serve_connection(
+ io,
+ handler,
+ options.error_msg,
+ options.kind,
+ ));
+ }
+ }
+ .boxed_local();
+
+ if let Err(e) = fut.await {
+ let err_str = e.to_string();
+ if !err_str.contains("early eof") {
+ eprintln!("{}: {:?}", options.error_msg, e);
+ }
+ }
+}
+
+pub async fn run_server_with_acceptor<'a, A, F, S>(
+ mut acceptor: Pin<Box<A>>,
+ handler: F,
+ error_msg: &'static str,
+ kind: ServerKind,
+) where
+ A: Stream<Item = io::Result<rustls_tokio_stream::TlsStream>> + ?Sized,
+ F: Fn(Request<hyper::body::Incoming>) -> S + Copy + 'static,
+ S: Future<Output = HandlerOutput> + 'static,
+{
+ let fut: Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>> =
+ async move {
+ while let Some(result) = acceptor.next().await {
+ let stream = result?;
+ let io = TokioIo::new(stream);
+ deno_unsync::spawn(hyper_serve_connection(
+ io, handler, error_msg, kind,
+ ));
+ }
+ Ok(())
+ }
+ .boxed_local();
+
+ if let Err(e) = fut.await {
+ let err_str = e.to_string();
+ if !err_str.contains("early eof") {
+ eprintln!("{}: {:?}", error_msg, e);
+ }
+ }
+}
+
+async fn hyper_serve_connection<I, F, S>(
+ io: I,
+ handler: F,
+ error_msg: &'static str,
+ kind: ServerKind,
+) where
+ I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
+ F: Fn(Request<hyper::body::Incoming>) -> S + Copy + 'static,
+ S: Future<Output = HandlerOutput> + 'static,
+{
+ let service = hyper::service::service_fn(handler);
+
+ let result: Result<(), anyhow::Error> = match kind {
+ ServerKind::Auto => {
+ let builder =
+ hyper_util::server::conn::auto::Builder::new(DenoUnsyncExecutor);
+ builder
+ .serve_connection(io, service)
+ .await
+ .map_err(|e| anyhow::anyhow!("{}", e))
+ }
+ ServerKind::OnlyHttp1 => {
+ let builder = hyper::server::conn::http1::Builder::new();
+ builder
+ .serve_connection(io, service)
+ .await
+ .map_err(|e| e.into())
+ }
+ ServerKind::OnlyHttp2 => {
+ let builder =
+ hyper::server::conn::http2::Builder::new(DenoUnsyncExecutor);
+ builder
+ .serve_connection(io, service)
+ .await
+ .map_err(|e| e.into())
+ }
+ };
+
+ if let Err(e) = result {
+ let err_str = e.to_string();
+ if !err_str.contains("early eof") {
+ eprintln!("{}: {:?}", error_msg, e);
+ }
+ }
+}
+
+#[derive(Clone)]
+struct DenoUnsyncExecutor;
+
+impl<Fut> hyper::rt::Executor<Fut> for DenoUnsyncExecutor
+where
+ Fut: Future + 'static,
+ Fut::Output: 'static,
+{
+ fn execute(&self, fut: Fut) {
+ deno_unsync::spawn(fut);
+ }
+}
diff --git a/tests/util/server/src/servers/mod.rs b/tests/util/server/src/servers/mod.rs
new file mode 100644
index 000000000..f828f1bd4
--- /dev/null
+++ b/tests/util/server/src/servers/mod.rs
@@ -0,0 +1,1536 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+// Usage: provide a port as argument to run hyper_hello benchmark server
+// otherwise this starts multiple servers on many ports for test endpoints.
+use base64::prelude::BASE64_STANDARD;
+use base64::Engine;
+use bytes::Bytes;
+use denokv_proto::datapath::AtomicWrite;
+use denokv_proto::datapath::AtomicWriteOutput;
+use denokv_proto::datapath::AtomicWriteStatus;
+use denokv_proto::datapath::ReadRangeOutput;
+use denokv_proto::datapath::SnapshotRead;
+use denokv_proto::datapath::SnapshotReadOutput;
+use denokv_proto::datapath::SnapshotReadStatus;
+use futures::FutureExt;
+use futures::Stream;
+use futures::StreamExt;
+use http;
+use http::HeaderValue;
+use http::Method;
+use http::Request;
+use http::Response;
+use http::StatusCode;
+use http_body_util::combinators::UnsyncBoxBody;
+use http_body_util::BodyExt;
+use http_body_util::Empty;
+use http_body_util::Full;
+use pretty_assertions::assert_eq;
+use prost::Message;
+use std::collections::HashMap;
+use std::convert::Infallible;
+use std::env;
+use std::net::Ipv6Addr;
+use std::net::SocketAddr;
+use std::net::SocketAddrV6;
+use std::path::PathBuf;
+use std::result::Result;
+use std::time::Duration;
+use tokio::io::AsyncWriteExt;
+use tokio::net::TcpStream;
+
+mod grpc;
+mod hyper_utils;
+mod registry;
+mod ws;
+
+use hyper_utils::run_server;
+use hyper_utils::run_server_with_acceptor;
+use hyper_utils::ServerKind;
+use hyper_utils::ServerOptions;
+
+use super::https::get_tls_listener_stream;
+use super::https::SupportedHttpVersions;
+use super::npm::CUSTOM_NPM_PACKAGE_CACHE;
+use super::std_path;
+use super::testdata_path;
+
+const PORT: u16 = 4545;
+const TEST_AUTH_TOKEN: &str = "abcdef123456789";
+const TEST_BASIC_AUTH_USERNAME: &str = "testuser123";
+const TEST_BASIC_AUTH_PASSWORD: &str = "testpassabc";
+const KV_DATABASE_ID: &str = "11111111-1111-1111-1111-111111111111";
+const KV_ACCESS_TOKEN: &str = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+const KV_DATABASE_TOKEN: &str = "MOCKMOCKMOCKMOCKMOCKMOCKMOCK";
+const REDIRECT_PORT: u16 = 4546;
+const ANOTHER_REDIRECT_PORT: u16 = 4547;
+const DOUBLE_REDIRECTS_PORT: u16 = 4548;
+const INF_REDIRECTS_PORT: u16 = 4549;
+const REDIRECT_ABSOLUTE_PORT: u16 = 4550;
+const AUTH_REDIRECT_PORT: u16 = 4551;
+const TLS_CLIENT_AUTH_PORT: u16 = 4552;
+const BASIC_AUTH_REDIRECT_PORT: u16 = 4554;
+const TLS_PORT: u16 = 4557;
+const HTTPS_PORT: u16 = 5545;
+const H1_ONLY_TLS_PORT: u16 = 5546;
+const H2_ONLY_TLS_PORT: u16 = 5547;
+const H1_ONLY_PORT: u16 = 5548;
+const H2_ONLY_PORT: u16 = 5549;
+const HTTPS_CLIENT_AUTH_PORT: u16 = 5552;
+const WS_PORT: u16 = 4242;
+const WSS_PORT: u16 = 4243;
+const WSS2_PORT: u16 = 4249;
+const WS_CLOSE_PORT: u16 = 4244;
+const WS_PING_PORT: u16 = 4245;
+const H2_GRPC_PORT: u16 = 4246;
+const H2S_GRPC_PORT: u16 = 4247;
+const REGISTRY_SERVER_PORT: u16 = 4250;
+
+// Use the single-threaded scheduler. The hyper server is used as a point of
+// comparison for the (single-threaded!) benchmarks in cli/bench. We're not
+// comparing apples to apples if we use the default multi-threaded scheduler.
+#[tokio::main(flavor = "current_thread")]
+pub async fn run_all_servers() {
+ if let Some(port) = env::args().nth(1) {
+ return hyper_hello(port.parse::<u16>().unwrap()).await;
+ }
+
+ let redirect_server_fut = wrap_redirect_server(REDIRECT_PORT);
+ let double_redirects_server_fut =
+ wrap_double_redirect_server(DOUBLE_REDIRECTS_PORT);
+ let inf_redirects_server_fut = wrap_inf_redirect_server(INF_REDIRECTS_PORT);
+ let another_redirect_server_fut =
+ wrap_another_redirect_server(ANOTHER_REDIRECT_PORT);
+ let auth_redirect_server_fut = wrap_auth_redirect_server(AUTH_REDIRECT_PORT);
+ let basic_auth_redirect_server_fut =
+ wrap_basic_auth_redirect_server(BASIC_AUTH_REDIRECT_PORT);
+ let abs_redirect_server_fut =
+ wrap_abs_redirect_server(REDIRECT_ABSOLUTE_PORT);
+
+ let ws_server_fut = ws::run_ws_server(WS_PORT);
+ let ws_ping_server_fut = ws::run_ws_ping_server(WS_PING_PORT);
+ let wss_server_fut = ws::run_wss_server(WSS_PORT);
+ let ws_close_server_fut = ws::run_ws_close_server(WS_CLOSE_PORT);
+ let wss2_server_fut = ws::run_wss2_server(WSS2_PORT);
+
+ let tls_server_fut = run_tls_server(TLS_PORT);
+ let tls_client_auth_server_fut =
+ run_tls_client_auth_server(TLS_CLIENT_AUTH_PORT);
+ let client_auth_server_https_fut =
+ wrap_client_auth_https_server(HTTPS_CLIENT_AUTH_PORT);
+ let main_server_fut = wrap_main_server(PORT);
+ let main_server_ipv6_fut = wrap_main_ipv6_server(PORT);
+ let main_server_https_fut = wrap_main_https_server(HTTPS_PORT);
+ let h1_only_server_tls_fut = wrap_https_h1_only_tls_server(H1_ONLY_TLS_PORT);
+ let h2_only_server_tls_fut = wrap_https_h2_only_tls_server(H2_ONLY_TLS_PORT);
+ let h1_only_server_fut = wrap_http_h1_only_server(H1_ONLY_PORT);
+ let h2_only_server_fut = wrap_http_h2_only_server(H2_ONLY_PORT);
+ let h2_grpc_server_fut = grpc::h2_grpc_server(H2_GRPC_PORT, H2S_GRPC_PORT);
+
+ let registry_server_fut = registry::registry_server(REGISTRY_SERVER_PORT);
+
+ let server_fut = async {
+ futures::join!(
+ redirect_server_fut,
+ ws_server_fut,
+ ws_ping_server_fut,
+ wss_server_fut,
+ wss2_server_fut,
+ tls_server_fut,
+ tls_client_auth_server_fut,
+ ws_close_server_fut,
+ another_redirect_server_fut,
+ auth_redirect_server_fut,
+ basic_auth_redirect_server_fut,
+ inf_redirects_server_fut,
+ double_redirects_server_fut,
+ abs_redirect_server_fut,
+ main_server_fut,
+ main_server_ipv6_fut,
+ main_server_https_fut,
+ client_auth_server_https_fut,
+ h1_only_server_tls_fut,
+ h2_only_server_tls_fut,
+ h1_only_server_fut,
+ h2_only_server_fut,
+ h2_grpc_server_fut,
+ registry_server_fut,
+ )
+ }
+ .boxed_local();
+
+ server_fut.await;
+}
+
+fn empty_body() -> UnsyncBoxBody<Bytes, Infallible> {
+ UnsyncBoxBody::new(Empty::new())
+}
+
+fn string_body(str_: &str) -> UnsyncBoxBody<Bytes, Infallible> {
+ UnsyncBoxBody::new(Full::new(Bytes::from(str_.to_string())))
+}
+
+fn json_body(value: serde_json::Value) -> UnsyncBoxBody<Bytes, Infallible> {
+ let str_ = value.to_string();
+ string_body(&str_)
+}
+
+/// Benchmark server that just serves "hello world" responses.
+async fn hyper_hello(port: u16) {
+ println!("hyper hello");
+ let addr = SocketAddr::from(([127, 0, 0, 1], port));
+ let handler = move |_: Request<hyper::body::Incoming>| async move {
+ Ok::<_, anyhow::Error>(Response::new(UnsyncBoxBody::new(
+ http_body_util::Full::new(Bytes::from("Hello World!")),
+ )))
+ };
+ run_server(
+ ServerOptions {
+ addr,
+ error_msg: "server error",
+ kind: ServerKind::Auto,
+ },
+ handler,
+ )
+ .await;
+}
+
+fn redirect_resp(url: String) -> Response<UnsyncBoxBody<Bytes, Infallible>> {
+ let mut redirect_resp = Response::new(UnsyncBoxBody::new(Empty::new()));
+ *redirect_resp.status_mut() = StatusCode::MOVED_PERMANENTLY;
+ redirect_resp.headers_mut().insert(
+ http::header::LOCATION,
+ HeaderValue::from_str(&url[..]).unwrap(),
+ );
+
+ redirect_resp
+}
+
+async fn redirect(
+ req: Request<hyper::body::Incoming>,
+) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
+ let p = req.uri().path();
+ assert_eq!(&p[0..1], "/");
+ let url = format!("http://localhost:{PORT}{p}");
+
+ Ok(redirect_resp(url))
+}
+
+async fn double_redirects(
+ req: Request<hyper::body::Incoming>,
+) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
+ let p = req.uri().path();
+ assert_eq!(&p[0..1], "/");
+ let url = format!("http://localhost:{REDIRECT_PORT}{p}");
+
+ Ok(redirect_resp(url))
+}
+
+async fn inf_redirects(
+ req: Request<hyper::body::Incoming>,
+) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
+ let p = req.uri().path();
+ assert_eq!(&p[0..1], "/");
+ let url = format!("http://localhost:{INF_REDIRECTS_PORT}{p}");
+
+ Ok(redirect_resp(url))
+}
+
+async fn another_redirect(
+ req: Request<hyper::body::Incoming>,
+) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
+ let p = req.uri().path();
+ assert_eq!(&p[0..1], "/");
+ let url = format!("http://localhost:{PORT}/subdir{p}");
+
+ Ok(redirect_resp(url))
+}
+
+async fn auth_redirect(
+ req: Request<hyper::body::Incoming>,
+) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
+ if let Some(auth) = req
+ .headers()
+ .get("authorization")
+ .map(|v| v.to_str().unwrap())
+ {
+ if auth.to_lowercase() == format!("bearer {TEST_AUTH_TOKEN}") {
+ let p = req.uri().path();
+ assert_eq!(&p[0..1], "/");
+ let url = format!("http://localhost:{PORT}{p}");
+ return Ok(redirect_resp(url));
+ }
+ }
+
+ let mut resp = Response::new(UnsyncBoxBody::new(Empty::new()));
+ *resp.status_mut() = StatusCode::NOT_FOUND;
+ Ok(resp)
+}
+
+async fn basic_auth_redirect(
+ req: Request<hyper::body::Incoming>,
+) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
+ if let Some(auth) = req
+ .headers()
+ .get("authorization")
+ .map(|v| v.to_str().unwrap())
+ {
+ let credentials =
+ format!("{TEST_BASIC_AUTH_USERNAME}:{TEST_BASIC_AUTH_PASSWORD}");
+ if auth == format!("Basic {}", BASE64_STANDARD.encode(credentials)) {
+ let p = req.uri().path();
+ assert_eq!(&p[0..1], "/");
+ let url = format!("http://localhost:{PORT}{p}");
+ return Ok(redirect_resp(url));
+ }
+ }
+
+ let mut resp = Response::new(UnsyncBoxBody::new(Empty::new()));
+ *resp.status_mut() = StatusCode::NOT_FOUND;
+ Ok(resp)
+}
+
+/// Returns a [`Stream`] of [`TcpStream`]s accepted from the given port.
+async fn get_tcp_listener_stream(
+ name: &'static str,
+ port: u16,
+) -> impl Stream<Item = Result<TcpStream, std::io::Error>> + Unpin + Send {
+ let host_and_port = &format!("localhost:{port}");
+
+ // Listen on ALL addresses that localhost can resolves to.
+ let accept = |listener: tokio::net::TcpListener| {
+ async {
+ let result = listener.accept().await;
+ Some((result.map(|r| r.0), listener))
+ }
+ .boxed()
+ };
+
+ let mut addresses = vec![];
+ let listeners = tokio::net::lookup_host(host_and_port)
+ .await
+ .expect(host_and_port)
+ .inspect(|address| addresses.push(*address))
+ .map(tokio::net::TcpListener::bind)
+ .collect::<futures::stream::FuturesUnordered<_>>()
+ .collect::<Vec<_>>()
+ .await
+ .into_iter()
+ .map(|s| s.unwrap())
+ .map(|listener| futures::stream::unfold(listener, accept))
+ .collect::<Vec<_>>();
+
+ // Eye catcher for HttpServerCount
+ println!("ready: {name} on {:?}", addresses);
+
+ futures::stream::select_all(listeners)
+}
+
+/// This server responds with 'PASS' if client authentication was successful. Try it by running
+/// test_server and
+/// curl --key tests/testdata/tls/localhost.key \
+/// --cert cli/tests/testsdata/tls/localhost.crt \
+/// --cacert tests/testdata/tls/RootCA.crt https://localhost:4552/
+async fn run_tls_client_auth_server(port: u16) {
+ let mut tls =
+ get_tls_listener_stream("tls client auth", port, Default::default()).await;
+ while let Some(Ok(mut tls_stream)) = tls.next().await {
+ tokio::spawn(async move {
+ let Ok(handshake) = tls_stream.handshake().await else {
+ eprintln!("Failed to handshake");
+ return;
+ };
+ // We only need to check for the presence of client certificates
+ // here. Rusttls ensures that they are valid and signed by the CA.
+ let response = match handshake.has_peer_certificates {
+ true => b"PASS",
+ false => b"FAIL",
+ };
+ tls_stream.write_all(response).await.unwrap();
+ });
+ }
+}
+
+/// This server responds with 'PASS' if client authentication was successful. Try it by running
+/// test_server and
+/// curl --cacert tests/testdata/tls/RootCA.crt https://localhost:4553/
+async fn run_tls_server(port: u16) {
+ let mut tls = get_tls_listener_stream("tls", port, Default::default()).await;
+ while let Some(Ok(mut tls_stream)) = tls.next().await {
+ tokio::spawn(async move {
+ tls_stream.write_all(b"PASS").await.unwrap();
+ });
+ }
+}
+
+async fn absolute_redirect(
+ req: Request<hyper::body::Incoming>,
+) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
+ let path = req.uri().path();
+
+ if path == "/" {
+ // We have to manually extract query params here,
+ // as `req.uri()` returns `PathAndQuery` only,
+ // and we cannot use `Url::parse(req.uri()).query_pairs()`,
+ // as it requires url to have a proper base.
+ let query_params: HashMap<_, _> = req
+ .uri()
+ .query()
+ .unwrap_or_default()
+ .split('&')
+ .filter_map(|s| {
+ s.split_once('=').map(|t| (t.0.to_owned(), t.1.to_owned()))
+ })
+ .collect();
+
+ if let Some(url) = query_params.get("redirect_to") {
+ println!("URL: {url:?}");
+ let redirect = redirect_resp(url.to_owned());
+ return Ok(redirect);
+ }
+ }
+
+ if path.starts_with("/REDIRECT") {
+ let url = &req.uri().path()[9..];
+ println!("URL: {url:?}");
+ let redirect = redirect_resp(url.to_string());
+ return Ok(redirect);
+ }
+
+ if path.starts_with("/a/b/c") {
+ if let Some(x_loc) = req.headers().get("x-location") {
+ let loc = x_loc.to_str().unwrap();
+ return Ok(redirect_resp(loc.to_string()));
+ }
+ }
+
+ let file_path = testdata_path().join(&req.uri().path()[1..]);
+ if file_path.is_dir() || !file_path.exists() {
+ let mut not_found_resp = Response::new(UnsyncBoxBody::new(Empty::new()));
+ *not_found_resp.status_mut() = StatusCode::NOT_FOUND;
+ return Ok(not_found_resp);
+ }
+
+ let file = tokio::fs::read(file_path).await.unwrap();
+ let file_resp = custom_headers(req.uri().path(), file);
+ Ok(file_resp)
+}
+
+async fn main_server(
+ req: Request<hyper::body::Incoming>,
+) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
+ return match (req.method(), req.uri().path()) {
+ (_, "/echo_server") => {
+ let (parts, body) = req.into_parts();
+ let mut response = Response::new(UnsyncBoxBody::new(Full::new(
+ body.collect().await?.to_bytes(),
+ )));
+
+ if let Some(status) = parts.headers.get("x-status") {
+ *response.status_mut() =
+ StatusCode::from_bytes(status.as_bytes()).unwrap();
+ }
+ response.headers_mut().extend(parts.headers);
+ Ok(response)
+ }
+ (&Method::POST, "/echo_multipart_file") => {
+ let body = req.into_body();
+ let bytes = &body.collect().await.unwrap().to_bytes()[0..];
+ let start = b"--boundary\t \r\n\
+ Content-Disposition: form-data; name=\"field_1\"\r\n\
+ \r\n\
+ value_1 \r\n\
+ \r\n--boundary\r\n\
+ Content-Disposition: form-data; name=\"file\"; \
+ filename=\"file.bin\"\r\n\
+ Content-Type: application/octet-stream\r\n\
+ \r\n";
+ let end = b"\r\n--boundary--\r\n";
+ let b = [start as &[u8], bytes, end].concat();
+
+ let mut response =
+ Response::new(UnsyncBoxBody::new(Full::new(Bytes::from(b))));
+ response.headers_mut().insert(
+ "content-type",
+ HeaderValue::from_static("multipart/form-data;boundary=boundary"),
+ );
+ Ok(response)
+ }
+ (&Method::GET, "/ghost_ws_client") => {
+ use tokio::io::AsyncReadExt;
+
+ let mut tcp_stream = TcpStream::connect("localhost:4248").await.unwrap();
+ #[cfg(unix)]
+ // SAFETY: set socket keep alive.
+ unsafe {
+ use std::os::fd::AsRawFd;
+
+ let fd = tcp_stream.as_raw_fd();
+ let mut val: libc::c_int = 1;
+ let r = libc::setsockopt(
+ fd,
+ libc::SOL_SOCKET,
+ libc::SO_KEEPALIVE,
+ &mut val as *mut _ as *mut libc::c_void,
+ std::mem::size_of_val(&val) as libc::socklen_t,
+ );
+ assert_eq!(r, 0);
+ }
+
+ // Typical websocket handshake request.
+ let headers = [
+ "GET / HTTP/1.1",
+ "Host: localhost",
+ "Upgrade: websocket",
+ "Connection: Upgrade",
+ "Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==",
+ "Sec-WebSocket-Version: 13",
+ "\r\n",
+ ]
+ .join("\r\n");
+ tcp_stream.write_all(headers.as_bytes()).await.unwrap();
+
+ let mut buf = [0u8; 200];
+ let n = tcp_stream.read(&mut buf).await.unwrap();
+ assert!(n > 0);
+
+ // Ghost the server:
+ // - Close the read half of the connection.
+ // - forget the TcpStream.
+ let tcp_stream = tcp_stream.into_std().unwrap();
+ let _ = tcp_stream.shutdown(std::net::Shutdown::Read);
+ std::mem::forget(tcp_stream);
+
+ let res = Response::new(empty_body());
+ Ok(res)
+ }
+ (_, "/multipart_form_data.txt") => {
+ let b = "Preamble\r\n\
+ --boundary\t \r\n\
+ Content-Disposition: form-data; name=\"field_1\"\r\n\
+ \r\n\
+ value_1 \r\n\
+ \r\n--boundary\r\n\
+ Content-Disposition: form-data; name=\"field_2\";\
+ filename=\"file.js\"\r\n\
+ Content-Type: text/javascript\r\n\
+ \r\n\
+ console.log(\"Hi\")\
+ \r\n--boundary--\r\n\
+ Epilogue";
+ let mut res = Response::new(string_body(b));
+ res.headers_mut().insert(
+ "content-type",
+ HeaderValue::from_static("multipart/form-data;boundary=boundary"),
+ );
+ Ok(res)
+ }
+ (_, "/multipart_form_bad_content_type") => {
+ let b = "Preamble\r\n\
+ --boundary\t \r\n\
+ Content-Disposition: form-data; name=\"field_1\"\r\n\
+ \r\n\
+ value_1 \r\n\
+ \r\n--boundary\r\n\
+ Content-Disposition: form-data; name=\"field_2\";\
+ filename=\"file.js\"\r\n\
+ Content-Type: text/javascript\r\n\
+ \r\n\
+ console.log(\"Hi\")\
+ \r\n--boundary--\r\n\
+ Epilogue";
+ let mut res = Response::new(string_body(b));
+ res.headers_mut().insert(
+ "content-type",
+ HeaderValue::from_static("multipart/form-datatststs;boundary=boundary"),
+ );
+ Ok(res)
+ }
+ (_, "/bad_redirect") => {
+ let mut res = Response::new(empty_body());
+ *res.status_mut() = StatusCode::FOUND;
+ Ok(res)
+ }
+ (_, "/server_error") => {
+ let mut res = Response::new(empty_body());
+ *res.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
+ Ok(res)
+ }
+ (_, "/x_deno_warning.js") => {
+ let mut res = Response::new(empty_body());
+ *res.status_mut() = StatusCode::MOVED_PERMANENTLY;
+ res
+ .headers_mut()
+ .insert("X-Deno-Warning", HeaderValue::from_static("foobar"));
+ res.headers_mut().insert(
+ "location",
+ HeaderValue::from_bytes(b"/lsp/x_deno_warning_redirect.js").unwrap(),
+ );
+ Ok(res)
+ }
+ (_, "/non_ascii_redirect") => {
+ let mut res = Response::new(empty_body());
+ *res.status_mut() = StatusCode::MOVED_PERMANENTLY;
+ res.headers_mut().insert(
+ "location",
+ HeaderValue::from_bytes(b"/redirect\xae").unwrap(),
+ );
+ Ok(res)
+ }
+ (_, "/etag_script.ts") => {
+ let if_none_match = req.headers().get("if-none-match");
+ if if_none_match == Some(&HeaderValue::from_static("33a64df551425fcc55e"))
+ {
+ let mut resp = Response::new(empty_body());
+ *resp.status_mut() = StatusCode::NOT_MODIFIED;
+ resp.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ resp
+ .headers_mut()
+ .insert("ETag", HeaderValue::from_static("33a64df551425fcc55e"));
+
+ Ok(resp)
+ } else {
+ let mut resp = Response::new(string_body("console.log('etag')"));
+ resp.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ resp
+ .headers_mut()
+ .insert("ETag", HeaderValue::from_static("33a64df551425fcc55e"));
+ Ok(resp)
+ }
+ }
+ (_, "/xTypeScriptTypes.js") => {
+ let mut res = Response::new(string_body("export const foo = 'foo';"));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/javascript"),
+ );
+ res.headers_mut().insert(
+ "X-TypeScript-Types",
+ HeaderValue::from_static("./xTypeScriptTypes.d.ts"),
+ );
+ Ok(res)
+ }
+ (_, "/xTypeScriptTypes.jsx") => {
+ let mut res = Response::new(string_body("export const foo = 'foo';"));
+ res
+ .headers_mut()
+ .insert("Content-type", HeaderValue::from_static("text/jsx"));
+ res.headers_mut().insert(
+ "X-TypeScript-Types",
+ HeaderValue::from_static("./xTypeScriptTypes.d.ts"),
+ );
+ Ok(res)
+ }
+ (_, "/xTypeScriptTypes.ts") => {
+ let mut res =
+ Response::new(string_body("export const foo: string = 'foo';"));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ res.headers_mut().insert(
+ "X-TypeScript-Types",
+ HeaderValue::from_static("./xTypeScriptTypes.d.ts"),
+ );
+ Ok(res)
+ }
+ (_, "/xTypeScriptTypes.d.ts") => {
+ let mut res = Response::new(string_body("export const foo: 'foo';"));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ Ok(res)
+ }
+ (_, "/run/type_directives_redirect.js") => {
+ let mut res = Response::new(string_body("export const foo = 'foo';"));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/javascript"),
+ );
+ res.headers_mut().insert(
+ "X-TypeScript-Types",
+ HeaderValue::from_static(
+ "http://localhost:4547/xTypeScriptTypesRedirect.d.ts",
+ ),
+ );
+ Ok(res)
+ }
+ (_, "/run/type_headers_deno_types.foo.js") => {
+ let mut res = Response::new(string_body(
+ "export function foo(text) { console.log(text); }",
+ ));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/javascript"),
+ );
+ res.headers_mut().insert(
+ "X-TypeScript-Types",
+ HeaderValue::from_static(
+ "http://localhost:4545/run/type_headers_deno_types.d.ts",
+ ),
+ );
+ Ok(res)
+ }
+ (_, "/run/type_headers_deno_types.d.ts") => {
+ let mut res =
+ Response::new(string_body("export function foo(text: number): void;"));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ Ok(res)
+ }
+ (_, "/run/type_headers_deno_types.foo.d.ts") => {
+ let mut res =
+ Response::new(string_body("export function foo(text: string): void;"));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ Ok(res)
+ }
+ (_, "/subdir/xTypeScriptTypesRedirect.d.ts") => {
+ let mut res = Response::new(string_body(
+ "import './xTypeScriptTypesRedirected.d.ts';",
+ ));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ Ok(res)
+ }
+ (_, "/subdir/xTypeScriptTypesRedirected.d.ts") => {
+ let mut res = Response::new(string_body("export const foo: 'foo';"));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ Ok(res)
+ }
+ (_, "/referenceTypes.js") => {
+ let mut res = Response::new(string_body("/// <reference types=\"./xTypeScriptTypes.d.ts\" />\r\nexport const foo = \"foo\";\r\n"));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/javascript"),
+ );
+ Ok(res)
+ }
+ (_, "/subdir/file_with_:_in_name.ts") => {
+ let mut res = Response::new(string_body(
+ "console.log('Hello from file_with_:_in_name.ts');",
+ ));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ Ok(res)
+ }
+ (_, "/v1/extensionless") => {
+ let mut res =
+ Response::new(string_body(r#"export * from "/subdir/mod1.ts";"#));
+ res.headers_mut().insert(
+ "content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ Ok(res)
+ }
+ (_, "/subdir/no_js_ext@1.0.0") => {
+ let mut res = Response::new(string_body(
+ r#"import { printHello } from "./mod2.ts";
+ printHello();
+ "#,
+ ));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/javascript"),
+ );
+ Ok(res)
+ }
+ (_, "/.well-known/deno-import-intellisense.json") => {
+ let file_path =
+ testdata_path().join("lsp/registries/deno-import-intellisense.json");
+ if let Ok(body) = tokio::fs::read(file_path).await {
+ Ok(custom_headers(
+ "/.well-known/deno-import-intellisense.json",
+ body,
+ ))
+ } else {
+ Ok(Response::new(empty_body()))
+ }
+ }
+ (_, "/http_version") => {
+ let version = format!("{:?}", req.version());
+ Ok(Response::new(string_body(&version)))
+ }
+ (_, "/content_length") => {
+ let content_length = format!("{:?}", req.headers().get("content-length"));
+ Ok(Response::new(string_body(&content_length)))
+ }
+ (_, "/jsx/jsx-runtime") | (_, "/jsx/jsx-dev-runtime") => {
+ let mut res = Response::new(string_body(
+ r#"export function jsx(
+ _type,
+ _props,
+ _key,
+ _source,
+ _self,
+ ) {}
+ export const jsxs = jsx;
+ export const jsxDEV = jsx;
+ export const Fragment = Symbol("Fragment");
+ console.log("imported", import.meta.url);
+ "#,
+ ));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/javascript"),
+ );
+ Ok(res)
+ }
+ (_, "/dynamic") => {
+ let mut res = Response::new(string_body(
+ &serde_json::to_string_pretty(&std::time::SystemTime::now()).unwrap(),
+ ));
+ res
+ .headers_mut()
+ .insert("cache-control", HeaderValue::from_static("no-cache"));
+ Ok(res)
+ }
+ (_, "/dynamic_cache") => {
+ let mut res = Response::new(string_body(
+ &serde_json::to_string_pretty(&std::time::SystemTime::now()).unwrap(),
+ ));
+ res.headers_mut().insert(
+ "cache-control",
+ HeaderValue::from_static("public, max-age=604800, immutable"),
+ );
+ Ok(res)
+ }
+ (_, "/dynamic_module.ts") => {
+ let mut res = Response::new(string_body(&format!(
+ r#"export const time = {};"#,
+ std::time::SystemTime::now().elapsed().unwrap().as_nanos()
+ )));
+ res.headers_mut().insert(
+ "Content-type",
+ HeaderValue::from_static("application/typescript"),
+ );
+ Ok(res)
+ }
+ (_, "/echo_accept") => {
+ let accept = req.headers().get("accept").map(|v| v.to_str().unwrap());
+ let res =
+ Response::new(json_body(serde_json::json!({ "accept": accept })));
+ Ok(res)
+ }
+ (_, "/search_params") => {
+ let query = req.uri().query().map(|s| s.to_string());
+ let res = Response::new(string_body(&query.unwrap_or_default()));
+ Ok(res)
+ }
+ (&Method::POST, "/kv_remote_authorize") => {
+ if req
+ .headers()
+ .get("authorization")
+ .and_then(|x| x.to_str().ok())
+ .unwrap_or_default()
+ != format!("Bearer {}", KV_ACCESS_TOKEN)
+ {
+ return Ok(
+ Response::builder()
+ .status(StatusCode::UNAUTHORIZED)
+ .body(empty_body())
+ .unwrap(),
+ );
+ }
+
+ Ok(
+ Response::builder()
+ .header("content-type", "application/json")
+ .body(json_body(serde_json::json!({
+ "version": 1,
+ "databaseId": KV_DATABASE_ID,
+ "endpoints": [
+ {
+ "url": format!("http://localhost:{}/kv_blackhole", PORT),
+ "consistency": "strong",
+ }
+ ],
+ "token": KV_DATABASE_TOKEN,
+ "expiresAt": "2099-01-01T00:00:00Z",
+ })))
+ .unwrap(),
+ )
+ }
+ (&Method::POST, "/kv_remote_authorize_invalid_format") => {
+ if req
+ .headers()
+ .get("authorization")
+ .and_then(|x| x.to_str().ok())
+ .unwrap_or_default()
+ != format!("Bearer {}", KV_ACCESS_TOKEN)
+ {
+ return Ok(
+ Response::builder()
+ .status(StatusCode::UNAUTHORIZED)
+ .body(empty_body())
+ .unwrap(),
+ );
+ }
+
+ Ok(
+ Response::builder()
+ .header("content-type", "application/json")
+ .body(json_body(serde_json::json!({
+ "version": 1,
+ "databaseId": KV_DATABASE_ID,
+ })))
+ .unwrap(),
+ )
+ }
+ (&Method::POST, "/kv_remote_authorize_invalid_version") => {
+ if req
+ .headers()
+ .get("authorization")
+ .and_then(|x| x.to_str().ok())
+ .unwrap_or_default()
+ != format!("Bearer {}", KV_ACCESS_TOKEN)
+ {
+ return Ok(
+ Response::builder()
+ .status(StatusCode::UNAUTHORIZED)
+ .body(empty_body())
+ .unwrap(),
+ );
+ }
+
+ Ok(
+ Response::builder()
+ .header("content-type", "application/json")
+ .body(json_body(serde_json::json!({
+ "version": 1000,
+ "databaseId": KV_DATABASE_ID,
+ "endpoints": [
+ {
+ "url": format!("http://localhost:{}/kv_blackhole", PORT),
+ "consistency": "strong",
+ }
+ ],
+ "token": KV_DATABASE_TOKEN,
+ "expiresAt": "2099-01-01T00:00:00Z",
+ })))
+ .unwrap(),
+ )
+ }
+ (&Method::POST, "/kv_blackhole/snapshot_read") => {
+ if req
+ .headers()
+ .get("authorization")
+ .and_then(|x| x.to_str().ok())
+ .unwrap_or_default()
+ != format!("Bearer {}", KV_DATABASE_TOKEN)
+ {
+ return Ok(
+ Response::builder()
+ .status(StatusCode::UNAUTHORIZED)
+ .body(empty_body())
+ .unwrap(),
+ );
+ }
+
+ let body = req
+ .into_body()
+ .collect()
+ .await
+ .unwrap_or_default()
+ .to_bytes();
+ let Ok(body): Result<SnapshotRead, _> = prost::Message::decode(&body[..])
+ else {
+ return Ok(
+ Response::builder()
+ .status(StatusCode::BAD_REQUEST)
+ .body(empty_body())
+ .unwrap(),
+ );
+ };
+ if body.ranges.is_empty() {
+ return Ok(
+ Response::builder()
+ .status(StatusCode::BAD_REQUEST)
+ .body(empty_body())
+ .unwrap(),
+ );
+ }
+ Ok(
+ Response::builder()
+ .body(UnsyncBoxBody::new(Full::new(Bytes::from(
+ SnapshotReadOutput {
+ ranges: body
+ .ranges
+ .iter()
+ .map(|_| ReadRangeOutput { values: vec![] })
+ .collect(),
+ read_disabled: false,
+ read_is_strongly_consistent: true,
+ status: SnapshotReadStatus::SrSuccess.into(),
+ }
+ .encode_to_vec(),
+ ))))
+ .unwrap(),
+ )
+ }
+ (&Method::POST, "/kv_blackhole/atomic_write") => {
+ if req
+ .headers()
+ .get("authorization")
+ .and_then(|x| x.to_str().ok())
+ .unwrap_or_default()
+ != format!("Bearer {}", KV_DATABASE_TOKEN)
+ {
+ return Ok(
+ Response::builder()
+ .status(StatusCode::UNAUTHORIZED)
+ .body(empty_body())
+ .unwrap(),
+ );
+ }
+
+ let body = req
+ .into_body()
+ .collect()
+ .await
+ .unwrap_or_default()
+ .to_bytes();
+ let Ok(_body): Result<AtomicWrite, _> = prost::Message::decode(&body[..])
+ else {
+ return Ok(
+ Response::builder()
+ .status(StatusCode::BAD_REQUEST)
+ .body(empty_body())
+ .unwrap(),
+ );
+ };
+ Ok(
+ Response::builder()
+ .body(UnsyncBoxBody::new(Full::new(Bytes::from(
+ AtomicWriteOutput {
+ status: AtomicWriteStatus::AwSuccess.into(),
+ versionstamp: vec![0u8; 10],
+ failed_checks: vec![],
+ }
+ .encode_to_vec(),
+ ))))
+ .unwrap(),
+ )
+ }
+ (&Method::GET, "/upgrade/sleep/release-latest.txt") => {
+ tokio::time::sleep(Duration::from_secs(95)).await;
+ return Ok(
+ Response::builder()
+ .status(StatusCode::OK)
+ .body(string_body("99999.99.99"))
+ .unwrap(),
+ );
+ }
+ (&Method::GET, "/upgrade/sleep/canary-latest.txt") => {
+ tokio::time::sleep(Duration::from_secs(95)).await;
+ return Ok(
+ Response::builder()
+ .status(StatusCode::OK)
+ .body(string_body("bda3850f84f24b71e02512c1ba2d6bf2e3daa2fd"))
+ .unwrap(),
+ );
+ }
+ (&Method::GET, "/release-latest.txt") => {
+ return Ok(
+ Response::builder()
+ .status(StatusCode::OK)
+ // use a deno version that will never happen
+ .body(string_body("99999.99.99"))
+ .unwrap(),
+ );
+ }
+ (
+ &Method::GET,
+ "/canary-latest.txt"
+ | "/canary-x86_64-apple-darwin-latest.txt"
+ | "/canary-aarch64-apple-darwin-latest.txt"
+ | "/canary-x86_64-unknown-linux-gnu-latest.txt"
+ | "/canary-aarch64-unknown-linux-gnu-latest.txt"
+ | "/canary-x86_64-unknown-linux-musl-latest.txt"
+ | "/canary-aarch64-unknown-linux-musl-latest.txt"
+ | "/canary-x86_64-pc-windows-msvc-latest.txt",
+ ) => {
+ return Ok(
+ Response::builder()
+ .status(StatusCode::OK)
+ .body(string_body("bda3850f84f24b71e02512c1ba2d6bf2e3daa2fd"))
+ .unwrap(),
+ );
+ }
+ _ => {
+ let mut file_path = testdata_path().to_path_buf();
+ file_path.push(&req.uri().path()[1..].replace("%2f", "/"));
+ if let Ok(file) = tokio::fs::read(&file_path).await {
+ let file_resp = custom_headers(req.uri().path(), file);
+ return Ok(file_resp);
+ }
+
+ // serve npm registry files
+ if let Some(suffix) = req
+ .uri()
+ .path()
+ .strip_prefix("/npm/registry/@denotest/")
+ .or_else(|| req.uri().path().strip_prefix("/npm/registry/@denotest%2f"))
+ {
+ // serve all requests to /npm/registry/@deno using the file system
+ // at that path
+ match handle_custom_npm_registry_path(suffix) {
+ Ok(Some(response)) => return Ok(response),
+ Ok(None) => {} // ignore, not found
+ Err(err) => {
+ return Response::builder()
+ .status(StatusCode::INTERNAL_SERVER_ERROR)
+ .body(string_body(&format!("{err:#}")))
+ .map_err(|e| e.into());
+ }
+ }
+ } else if req.uri().path().starts_with("/npm/registry/") {
+ // otherwise, serve based on registry.json and tgz files
+ let is_tarball = req.uri().path().ends_with(".tgz");
+ if !is_tarball {
+ file_path.push("registry.json");
+ }
+ if let Ok(file) = tokio::fs::read(&file_path).await {
+ let file_resp = custom_headers(req.uri().path(), file);
+ return Ok(file_resp);
+ } else if should_download_npm_packages() {
+ if let Err(err) =
+ download_npm_registry_file(req.uri(), &file_path, is_tarball).await
+ {
+ return Response::builder()
+ .status(StatusCode::INTERNAL_SERVER_ERROR)
+ .body(string_body(&format!("{err:#}")))
+ .map_err(|e| e.into());
+ };
+
+ // serve the file
+ if let Ok(file) = tokio::fs::read(&file_path).await {
+ let file_resp = custom_headers(req.uri().path(), file);
+ return Ok(file_resp);
+ }
+ }
+ } else if let Some(suffix) = req.uri().path().strip_prefix("/deno_std/") {
+ let file_path = std_path().join(suffix);
+ if let Ok(file) = tokio::fs::read(&file_path).await {
+ let file_resp = custom_headers(req.uri().path(), file);
+ return Ok(file_resp);
+ }
+ } else if let Some(suffix) = req.uri().path().strip_prefix("/sleep/") {
+ let duration = suffix.parse::<u64>().unwrap();
+ tokio::time::sleep(Duration::from_millis(duration)).await;
+ return Response::builder()
+ .status(StatusCode::OK)
+ .header("content-type", "application/typescript")
+ .body(empty_body())
+ .map_err(|e| e.into());
+ }
+
+ Response::builder()
+ .status(StatusCode::NOT_FOUND)
+ .body(empty_body())
+ .map_err(|e| e.into())
+ }
+ };
+}
+
+fn handle_custom_npm_registry_path(
+ path: &str,
+) -> Result<Option<Response<UnsyncBoxBody<Bytes, Infallible>>>, anyhow::Error> {
+ let parts = path
+ .split('/')
+ .filter(|p| !p.is_empty())
+ .collect::<Vec<_>>();
+ let cache = &CUSTOM_NPM_PACKAGE_CACHE;
+ let package_name = format!("@denotest/{}", parts[0]);
+ if parts.len() == 2 {
+ if let Some(file_bytes) =
+ cache.tarball_bytes(&package_name, parts[1].trim_end_matches(".tgz"))?
+ {
+ let file_resp = custom_headers("file.tgz", file_bytes);
+ return Ok(Some(file_resp));
+ }
+ } else if parts.len() == 1 {
+ if let Some(registry_file) = cache.registry_file(&package_name)? {
+ let file_resp = custom_headers("registry.json", registry_file);
+ return Ok(Some(file_resp));
+ }
+ }
+
+ Ok(None)
+}
+
+fn should_download_npm_packages() -> bool {
+ // when this env var is set, it will download and save npm packages
+ // to the testdata/npm/registry directory
+ std::env::var("DENO_TEST_UTIL_UPDATE_NPM") == Ok("1".to_string())
+}
+
+async fn download_npm_registry_file(
+ uri: &hyper::Uri,
+ file_path: &PathBuf,
+ is_tarball: bool,
+) -> Result<(), anyhow::Error> {
+ let url_parts = uri
+ .path()
+ .strip_prefix("/npm/registry/")
+ .unwrap()
+ .split('/')
+ .collect::<Vec<_>>();
+ let package_name = if url_parts[0].starts_with('@') {
+ url_parts.into_iter().take(2).collect::<Vec<_>>().join("/")
+ } else {
+ url_parts.into_iter().take(1).collect::<Vec<_>>().join("/")
+ };
+ let url = if is_tarball {
+ let file_name = file_path.file_name().unwrap().to_string_lossy();
+ format!("https://registry.npmjs.org/{package_name}/-/{file_name}")
+ } else {
+ format!("https://registry.npmjs.org/{package_name}")
+ };
+ let client = reqwest::Client::new();
+ let response = client.get(url).send().await?;
+ let bytes = response.bytes().await?;
+ let bytes = if is_tarball {
+ bytes.to_vec()
+ } else {
+ String::from_utf8(bytes.to_vec())
+ .unwrap()
+ .replace(
+ &format!("https://registry.npmjs.org/{package_name}/-/"),
+ &format!("http://localhost:4545/npm/registry/{package_name}/"),
+ )
+ .into_bytes()
+ };
+ std::fs::create_dir_all(file_path.parent().unwrap())?;
+ std::fs::write(file_path, bytes)?;
+ Ok(())
+}
+
+async fn wrap_redirect_server(port: u16) {
+ let redirect_addr = SocketAddr::from(([127, 0, 0, 1], port));
+ run_server(
+ ServerOptions {
+ addr: redirect_addr,
+ error_msg: "Redirect error",
+ kind: ServerKind::Auto,
+ },
+ redirect,
+ )
+ .await;
+}
+
+async fn wrap_double_redirect_server(port: u16) {
+ let double_redirects_addr = SocketAddr::from(([127, 0, 0, 1], port));
+ run_server(
+ ServerOptions {
+ addr: double_redirects_addr,
+ error_msg: "Double redirect error",
+ kind: ServerKind::Auto,
+ },
+ double_redirects,
+ )
+ .await;
+}
+
+async fn wrap_inf_redirect_server(port: u16) {
+ let inf_redirects_addr = SocketAddr::from(([127, 0, 0, 1], port));
+ run_server(
+ ServerOptions {
+ addr: inf_redirects_addr,
+ error_msg: "Inf redirect error",
+ kind: ServerKind::Auto,
+ },
+ inf_redirects,
+ )
+ .await;
+}
+
+async fn wrap_another_redirect_server(port: u16) {
+ let another_redirect_addr = SocketAddr::from(([127, 0, 0, 1], port));
+ run_server(
+ ServerOptions {
+ addr: another_redirect_addr,
+ error_msg: "Another redirect error",
+ kind: ServerKind::Auto,
+ },
+ another_redirect,
+ )
+ .await;
+}
+
+async fn wrap_auth_redirect_server(port: u16) {
+ let auth_redirect_addr = SocketAddr::from(([127, 0, 0, 1], port));
+ run_server(
+ ServerOptions {
+ addr: auth_redirect_addr,
+ error_msg: "Auth redirect error",
+ kind: ServerKind::Auto,
+ },
+ auth_redirect,
+ )
+ .await;
+}
+
+async fn wrap_basic_auth_redirect_server(port: u16) {
+ let basic_auth_redirect_addr = SocketAddr::from(([127, 0, 0, 1], port));
+ run_server(
+ ServerOptions {
+ addr: basic_auth_redirect_addr,
+ error_msg: "Basic auth redirect error",
+ kind: ServerKind::Auto,
+ },
+ basic_auth_redirect,
+ )
+ .await;
+}
+
+async fn wrap_abs_redirect_server(port: u16) {
+ let abs_redirect_addr = SocketAddr::from(([127, 0, 0, 1], port));
+ run_server(
+ ServerOptions {
+ addr: abs_redirect_addr,
+ error_msg: "Absolute redirect error",
+ kind: ServerKind::Auto,
+ },
+ absolute_redirect,
+ )
+ .await;
+}
+
+async fn wrap_main_server(port: u16) {
+ let main_server_addr = SocketAddr::from(([127, 0, 0, 1], port));
+ wrap_main_server_for_addr(&main_server_addr).await
+}
+
+// necessary because on Windows the npm binary will resolve localhost to ::1
+async fn wrap_main_ipv6_server(port: u16) {
+ let ipv6_loopback = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
+ let main_server_addr =
+ SocketAddr::V6(SocketAddrV6::new(ipv6_loopback, port, 0, 0));
+ wrap_main_server_for_addr(&main_server_addr).await
+}
+
+async fn wrap_main_server_for_addr(main_server_addr: &SocketAddr) {
+ run_server(
+ ServerOptions {
+ addr: *main_server_addr,
+ kind: ServerKind::Auto,
+ error_msg: "HTTP server error",
+ },
+ main_server,
+ )
+ .await;
+}
+
+async fn wrap_main_https_server(port: u16) {
+ let tls = get_tls_listener_stream("https", port, Default::default()).await;
+ let tls_acceptor = tls.boxed_local();
+ run_server_with_acceptor(
+ tls_acceptor,
+ main_server,
+ "HTTPS server error",
+ ServerKind::Auto,
+ )
+ .await
+}
+
+async fn wrap_https_h1_only_tls_server(port: u16) {
+ let tls = get_tls_listener_stream(
+ "https (h1 only)",
+ port,
+ SupportedHttpVersions::Http1Only,
+ )
+ .await;
+
+ run_server_with_acceptor(
+ tls.boxed_local(),
+ main_server,
+ "HTTP1 only TLS server error",
+ ServerKind::OnlyHttp1,
+ )
+ .await
+}
+
+async fn wrap_https_h2_only_tls_server(port: u16) {
+ let tls = get_tls_listener_stream(
+ "https (h2 only)",
+ port,
+ SupportedHttpVersions::Http2Only,
+ )
+ .await;
+
+ run_server_with_acceptor(
+ tls.boxed_local(),
+ main_server,
+ "HTTP2 only TLS server error",
+ ServerKind::OnlyHttp2,
+ )
+ .await
+}
+
+async fn wrap_http_h1_only_server(port: u16) {
+ let main_server_http_addr = SocketAddr::from(([127, 0, 0, 1], port));
+ run_server(
+ ServerOptions {
+ addr: main_server_http_addr,
+ error_msg: "HTTP1 only server error:",
+ kind: ServerKind::OnlyHttp1,
+ },
+ main_server,
+ )
+ .await;
+}
+
+async fn wrap_http_h2_only_server(port: u16) {
+ let main_server_http_addr = SocketAddr::from(([127, 0, 0, 1], port));
+ run_server(
+ ServerOptions {
+ addr: main_server_http_addr,
+ error_msg: "HTTP1 only server error:",
+ kind: ServerKind::OnlyHttp2,
+ },
+ main_server,
+ )
+ .await;
+}
+
+async fn wrap_client_auth_https_server(port: u16) {
+ let mut tls =
+ get_tls_listener_stream("https_client_auth", port, Default::default())
+ .await;
+
+ let tls = async_stream::stream! {
+ while let Some(Ok(mut tls)) = tls.next().await {
+ let handshake = tls.handshake().await?;
+ // We only need to check for the presence of client certificates
+ // here. Rusttls ensures that they are valid and signed by the CA.
+ match handshake.has_peer_certificates {
+ true => { yield Ok(tls); },
+ false => { eprintln!("https_client_auth: no valid client certificate"); },
+ };
+ }
+ };
+
+ run_server_with_acceptor(
+ tls.boxed_local(),
+ main_server,
+ "Auth TLS server error",
+ ServerKind::Auto,
+ )
+ .await
+}
+
+fn custom_headers(
+ p: &str,
+ body: Vec<u8>,
+) -> Response<UnsyncBoxBody<Bytes, Infallible>> {
+ let mut response = Response::new(UnsyncBoxBody::new(
+ http_body_util::Full::new(Bytes::from(body)),
+ ));
+
+ if p.ends_with("/run/import_compression/brotli") {
+ response
+ .headers_mut()
+ .insert("Content-Encoding", HeaderValue::from_static("br"));
+ response.headers_mut().insert(
+ "Content-Type",
+ HeaderValue::from_static("application/javascript"),
+ );
+ response
+ .headers_mut()
+ .insert("Content-Length", HeaderValue::from_static("26"));
+ return response;
+ }
+ if p.ends_with("/run/import_compression/gziped") {
+ response
+ .headers_mut()
+ .insert("Content-Encoding", HeaderValue::from_static("gzip"));
+ response.headers_mut().insert(
+ "Content-Type",
+ HeaderValue::from_static("application/javascript"),
+ );
+ response
+ .headers_mut()
+ .insert("Content-Length", HeaderValue::from_static("39"));
+ return response;
+ }
+
+ if p.contains("/encoding/") {
+ let charset = p
+ .split_terminator('/')
+ .last()
+ .unwrap()
+ .trim_end_matches(".ts");
+
+ response.headers_mut().insert(
+ "Content-Type",
+ HeaderValue::from_str(
+ &format!("application/typescript;charset={charset}")[..],
+ )
+ .unwrap(),
+ );
+ return response;
+ }
+
+ let content_type = if p.contains(".t1.") {
+ Some("text/typescript")
+ } else if p.contains(".t2.") {
+ Some("video/vnd.dlna.mpeg-tts")
+ } else if p.contains(".t3.") {
+ Some("video/mp2t")
+ } else if p.contains(".t4.") {
+ Some("application/x-typescript")
+ } else if p.contains(".j1.") {
+ Some("text/javascript")
+ } else if p.contains(".j2.") {
+ Some("application/ecmascript")
+ } else if p.contains(".j3.") {
+ Some("text/ecmascript")
+ } else if p.contains(".j4.") {
+ Some("application/x-javascript")
+ } else if p.contains("form_urlencoded") {
+ Some("application/x-www-form-urlencoded")
+ } else if p.contains("unknown_ext") || p.contains("no_ext") {
+ Some("text/typescript")
+ } else if p.contains("mismatch_ext") || p.contains("no_js_ext") {
+ Some("text/javascript")
+ } else if p.ends_with(".ts") || p.ends_with(".tsx") {
+ Some("application/typescript")
+ } else if p.ends_with(".js") || p.ends_with(".jsx") {
+ Some("application/javascript")
+ } else if p.ends_with(".json") {
+ Some("application/json")
+ } else if p.ends_with(".wasm") {
+ Some("application/wasm")
+ } else if p.ends_with(".tgz") {
+ Some("application/gzip")
+ } else {
+ None
+ };
+
+ if let Some(t) = content_type {
+ response
+ .headers_mut()
+ .insert("Content-Type", HeaderValue::from_str(t).unwrap());
+ return response;
+ }
+
+ response
+}
diff --git a/tests/util/server/src/servers/registry.rs b/tests/util/server/src/servers/registry.rs
new file mode 100644
index 000000000..0efe06217
--- /dev/null
+++ b/tests/util/server/src/servers/registry.rs
@@ -0,0 +1,182 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use crate::testdata_path;
+
+use super::run_server;
+use super::ServerKind;
+use super::ServerOptions;
+use bytes::Bytes;
+use http_body_util::combinators::UnsyncBoxBody;
+use http_body_util::Empty;
+use http_body_util::Full;
+use hyper::body::Incoming;
+use hyper::Request;
+use hyper::Response;
+use hyper::StatusCode;
+use once_cell::sync::Lazy;
+use serde_json::json;
+use std::collections::BTreeMap;
+use std::collections::HashMap;
+use std::convert::Infallible;
+use std::net::SocketAddr;
+use std::path::Path;
+use std::sync::Mutex;
+
+pub async fn registry_server(port: u16) {
+ let registry_server_addr = SocketAddr::from(([127, 0, 0, 1], port));
+
+ run_server(
+ ServerOptions {
+ addr: registry_server_addr,
+ error_msg: "Registry server error",
+ kind: ServerKind::Auto,
+ },
+ registry_server_handler,
+ )
+ .await
+}
+
+async fn registry_server_handler(
+ req: Request<Incoming>,
+) -> Result<Response<UnsyncBoxBody<Bytes, Infallible>>, anyhow::Error> {
+ let path = req.uri().path();
+
+ // TODO(bartlomieju): add a proper router here
+ if path.starts_with("/api/scope/") {
+ let body = serde_json::to_string_pretty(&json!({})).unwrap();
+ let res = Response::new(UnsyncBoxBody::new(Full::from(body)));
+ return Ok(res);
+ } else if path.starts_with("/api/scopes/") {
+ let body = serde_json::to_string_pretty(&json!({
+ "id": "sdfwqer-sffg-qwerasdf",
+ "status": "success",
+ "error": null
+ }))
+ .unwrap();
+ let res = Response::new(UnsyncBoxBody::new(Full::from(body)));
+ return Ok(res);
+ } else if path.starts_with("/api/publish_status/") {
+ let body = serde_json::to_string_pretty(&json!({
+ "id": "sdfwqer-qwer-qwerasdf",
+ "status": "success",
+ "error": null
+ }))
+ .unwrap();
+ let res = Response::new(UnsyncBoxBody::new(Full::from(body)));
+ return Ok(res);
+ }
+
+ // serve the registry package files
+ let mut file_path =
+ testdata_path().to_path_buf().join("jsr").join("registry");
+ file_path.push(&req.uri().path()[1..].replace("%2f", "/"));
+ if let Ok(body) = tokio::fs::read(&file_path).await {
+ let body = if let Some(version) = file_path
+ .file_name()
+ .unwrap()
+ .to_string_lossy()
+ .strip_suffix("_meta.json")
+ {
+ // fill the manifest with checksums found in the directory so that
+ // we don't need to maintain them manually in the testdata directory
+ let mut meta: serde_json::Value = serde_json::from_slice(&body)?;
+ let mut manifest =
+ manifest_sorted(meta.get("manifest").cloned().unwrap_or(json!({})));
+ let version_dir = file_path.parent().unwrap().join(version);
+ fill_manifest_at_dir(&mut manifest, &version_dir);
+ meta
+ .as_object_mut()
+ .unwrap()
+ .insert("manifest".to_string(), json!(manifest));
+ serde_json::to_string(&meta).unwrap().into_bytes()
+ } else {
+ body
+ };
+ return Ok(Response::new(UnsyncBoxBody::new(
+ http_body_util::Full::new(Bytes::from(body)),
+ )));
+ }
+
+ let empty_body = UnsyncBoxBody::new(Empty::new());
+ let res = Response::builder()
+ .status(StatusCode::NOT_FOUND)
+ .body(empty_body)?;
+ Ok(res)
+}
+
+fn manifest_sorted(
+ meta: serde_json::Value,
+) -> BTreeMap<String, serde_json::Value> {
+ let mut manifest = BTreeMap::new();
+ if let serde_json::Value::Object(files) = meta {
+ for (file, checksum) in files {
+ manifest.insert(file.clone(), checksum.clone());
+ }
+ }
+ manifest
+}
+
+fn fill_manifest_at_dir(
+ manifest: &mut BTreeMap<String, serde_json::Value>,
+ dir: &Path,
+) {
+ let file_system_manifest = get_manifest_entries_for_dir(dir);
+ for (file_path, value) in file_system_manifest {
+ manifest.entry(file_path).or_insert(value);
+ }
+}
+
+static DIR_MANIFEST_CACHE: Lazy<
+ Mutex<HashMap<String, BTreeMap<String, serde_json::Value>>>,
+> = Lazy::new(Default::default);
+
+fn get_manifest_entries_for_dir(
+ dir: &Path,
+) -> BTreeMap<String, serde_json::Value> {
+ fn inner_fill(
+ root_dir: &Path,
+ dir: &Path,
+ manifest: &mut BTreeMap<String, serde_json::Value>,
+ ) {
+ for entry in std::fs::read_dir(dir).unwrap() {
+ let entry = entry.unwrap();
+ let path = entry.path();
+ if path.is_file() {
+ let file_bytes = std::fs::read(&path).unwrap();
+ let checksum = format!("sha256-{}", get_checksum(&file_bytes));
+ let relative_path = path
+ .to_string_lossy()
+ .strip_prefix(&root_dir.to_string_lossy().to_string())
+ .unwrap()
+ .replace('\\', "/");
+ manifest.insert(
+ relative_path,
+ json!({
+ "size": file_bytes.len(),
+ "checksum": checksum,
+ }),
+ );
+ } else if path.is_dir() {
+ inner_fill(root_dir, &path, manifest);
+ }
+ }
+ }
+
+ DIR_MANIFEST_CACHE
+ .lock()
+ .unwrap()
+ .entry(dir.to_string_lossy().to_string())
+ .or_insert_with(|| {
+ let mut manifest = BTreeMap::new();
+ inner_fill(dir, dir, &mut manifest);
+ manifest
+ })
+ .clone()
+}
+
+fn get_checksum(bytes: &[u8]) -> String {
+ use sha2::Digest;
+ let mut hasher = sha2::Sha256::new();
+ hasher.update(bytes);
+ format!("{:x}", hasher.finalize())
+}
diff --git a/tests/util/server/src/servers/ws.rs b/tests/util/server/src/servers/ws.rs
new file mode 100644
index 000000000..815119b6a
--- /dev/null
+++ b/tests/util/server/src/servers/ws.rs
@@ -0,0 +1,268 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use anyhow::anyhow;
+use bytes::Bytes;
+use fastwebsockets::FragmentCollector;
+use fastwebsockets::Frame;
+use fastwebsockets::OpCode;
+use fastwebsockets::Role;
+use fastwebsockets::WebSocket;
+use futures::future::join3;
+use futures::future::poll_fn;
+use futures::Future;
+use futures::StreamExt;
+use h2::server::Handshake;
+use h2::server::SendResponse;
+use h2::Reason;
+use h2::RecvStream;
+use hyper::upgrade::Upgraded;
+use hyper::Method;
+use hyper::Request;
+use hyper::Response;
+use hyper::StatusCode;
+use hyper_util::rt::TokioIo;
+use pretty_assertions::assert_eq;
+use std::pin::Pin;
+use std::result::Result;
+use tokio::io::AsyncReadExt;
+use tokio::io::AsyncWriteExt;
+
+use super::get_tcp_listener_stream;
+use super::get_tls_listener_stream;
+use super::SupportedHttpVersions;
+
+pub async fn run_ws_server(port: u16) {
+ let mut tcp = get_tcp_listener_stream("ws", port).await;
+ while let Some(Ok(stream)) = tcp.next().await {
+ spawn_ws_server(stream, |ws| Box::pin(echo_websocket_handler(ws)));
+ }
+}
+
+pub async fn run_ws_ping_server(port: u16) {
+ let mut tcp = get_tcp_listener_stream("ws (ping)", port).await;
+ while let Some(Ok(stream)) = tcp.next().await {
+ spawn_ws_server(stream, |ws| Box::pin(ping_websocket_handler(ws)));
+ }
+}
+
+pub async fn run_wss_server(port: u16) {
+ let mut tls = get_tls_listener_stream("wss", port, Default::default()).await;
+ while let Some(Ok(tls_stream)) = tls.next().await {
+ tokio::spawn(async move {
+ spawn_ws_server(tls_stream, |ws| Box::pin(echo_websocket_handler(ws)));
+ });
+ }
+}
+
+pub async fn run_ws_close_server(port: u16) {
+ let mut tcp = get_tcp_listener_stream("ws (close)", port).await;
+ while let Some(Ok(stream)) = tcp.next().await {
+ spawn_ws_server(stream, |ws| Box::pin(close_websocket_handler(ws)));
+ }
+}
+
+pub async fn run_wss2_server(port: u16) {
+ let mut tls = get_tls_listener_stream(
+ "wss2 (tls)",
+ port,
+ SupportedHttpVersions::Http2Only,
+ )
+ .await;
+ while let Some(Ok(tls)) = tls.next().await {
+ tokio::spawn(async move {
+ let mut h2 = h2::server::Builder::new();
+ h2.enable_connect_protocol();
+ // Using Bytes is pretty alloc-heavy but this is a test server
+ let server: Handshake<_, Bytes> = h2.handshake(tls);
+ let mut server = match server.await {
+ Ok(server) => server,
+ Err(e) => {
+ println!("Failed to handshake h2: {e:?}");
+ return;
+ }
+ };
+ loop {
+ let Some(conn) = server.accept().await else {
+ break;
+ };
+ let (recv, send) = match conn {
+ Ok(conn) => conn,
+ Err(e) => {
+ println!("Failed to accept a connection: {e:?}");
+ break;
+ }
+ };
+ tokio::spawn(handle_wss_stream(recv, send));
+ }
+ });
+ }
+}
+
+async fn echo_websocket_handler(
+ ws: fastwebsockets::WebSocket<TokioIo<Upgraded>>,
+) -> Result<(), anyhow::Error> {
+ let mut ws = FragmentCollector::new(ws);
+
+ loop {
+ let frame = ws.read_frame().await.unwrap();
+ match frame.opcode {
+ OpCode::Close => break,
+ OpCode::Text | OpCode::Binary => {
+ ws.write_frame(frame).await.unwrap();
+ }
+ _ => {}
+ }
+ }
+
+ Ok(())
+}
+
+type WsHandler =
+ fn(
+ fastwebsockets::WebSocket<TokioIo<Upgraded>>,
+ ) -> Pin<Box<dyn Future<Output = Result<(), anyhow::Error>> + Send>>;
+
+fn spawn_ws_server<S>(stream: S, handler: WsHandler)
+where
+ S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static,
+{
+ let service = hyper::service::service_fn(
+ move |mut req: http::Request<hyper::body::Incoming>| async move {
+ let (response, upgrade_fut) = fastwebsockets::upgrade::upgrade(&mut req)
+ .map_err(|e| anyhow!("Error upgrading websocket connection: {}", e))?;
+
+ tokio::spawn(async move {
+ let ws = upgrade_fut
+ .await
+ .map_err(|e| anyhow!("Error upgrading websocket connection: {}", e))
+ .unwrap();
+
+ if let Err(e) = handler(ws).await {
+ eprintln!("Error in websocket connection: {}", e);
+ }
+ });
+
+ Ok::<_, anyhow::Error>(response)
+ },
+ );
+
+ let io = TokioIo::new(stream);
+ tokio::spawn(async move {
+ let conn = hyper::server::conn::http1::Builder::new()
+ .serve_connection(io, service)
+ .with_upgrades();
+
+ if let Err(e) = conn.await {
+ eprintln!("websocket server error: {e:?}");
+ }
+ });
+}
+
+async fn handle_wss_stream(
+ recv: Request<RecvStream>,
+ mut send: SendResponse<Bytes>,
+) -> Result<(), h2::Error> {
+ if recv.method() != Method::CONNECT {
+ eprintln!("wss2: refusing non-CONNECT stream");
+ send.send_reset(Reason::REFUSED_STREAM);
+ return Ok(());
+ }
+ let Some(protocol) = recv.extensions().get::<h2::ext::Protocol>() else {
+ eprintln!("wss2: refusing no-:protocol stream");
+ send.send_reset(Reason::REFUSED_STREAM);
+ return Ok(());
+ };
+ if protocol.as_str() != "websocket" && protocol.as_str() != "WebSocket" {
+ eprintln!("wss2: refusing non-websocket stream");
+ send.send_reset(Reason::REFUSED_STREAM);
+ return Ok(());
+ }
+ let mut body = recv.into_body();
+ let mut response = Response::new(());
+ *response.status_mut() = StatusCode::OK;
+ let mut resp = send.send_response(response, false)?;
+ // Use a duplex stream to talk to fastwebsockets because it's just faster to implement
+ let (a, b) = tokio::io::duplex(65536);
+ let f1 = tokio::spawn(tokio::task::unconstrained(async move {
+ let ws = WebSocket::after_handshake(a, Role::Server);
+ let mut ws = FragmentCollector::new(ws);
+ loop {
+ let frame = ws.read_frame().await.unwrap();
+ if frame.opcode == OpCode::Close {
+ break;
+ }
+ ws.write_frame(frame).await.unwrap();
+ }
+ }));
+ let (mut br, mut bw) = tokio::io::split(b);
+ let f2 = tokio::spawn(tokio::task::unconstrained(async move {
+ loop {
+ let Some(Ok(data)) = poll_fn(|cx| body.poll_data(cx)).await else {
+ return;
+ };
+ body.flow_control().release_capacity(data.len()).unwrap();
+ let Ok(_) = bw.write_all(&data).await else {
+ break;
+ };
+ }
+ }));
+ let f3 = tokio::spawn(tokio::task::unconstrained(async move {
+ loop {
+ let mut buf = [0; 65536];
+ let n = br.read(&mut buf).await.unwrap();
+ if n == 0 {
+ break;
+ }
+ resp.reserve_capacity(n);
+ poll_fn(|cx| resp.poll_capacity(cx)).await;
+ resp
+ .send_data(Bytes::copy_from_slice(&buf[0..n]), false)
+ .unwrap();
+ }
+ resp.send_data(Bytes::new(), true).unwrap();
+ }));
+ _ = join3(f1, f2, f3).await;
+ Ok(())
+}
+
+async fn close_websocket_handler(
+ ws: fastwebsockets::WebSocket<TokioIo<Upgraded>>,
+) -> Result<(), anyhow::Error> {
+ let mut ws = FragmentCollector::new(ws);
+
+ ws.write_frame(Frame::close_raw(vec![].into()))
+ .await
+ .unwrap();
+
+ Ok(())
+}
+
+async fn ping_websocket_handler(
+ ws: fastwebsockets::WebSocket<TokioIo<Upgraded>>,
+) -> Result<(), anyhow::Error> {
+ let mut ws = FragmentCollector::new(ws);
+
+ for i in 0..9 {
+ ws.write_frame(Frame::new(true, OpCode::Ping, None, vec![].into()))
+ .await
+ .unwrap();
+
+ let frame = ws.read_frame().await.unwrap();
+ assert_eq!(frame.opcode, OpCode::Pong);
+ assert!(frame.payload.is_empty());
+
+ ws.write_frame(Frame::text(
+ format!("hello {}", i).as_bytes().to_vec().into(),
+ ))
+ .await
+ .unwrap();
+
+ let frame = ws.read_frame().await.unwrap();
+ assert_eq!(frame.opcode, OpCode::Text);
+ assert_eq!(frame.payload, format!("hello {}", i).as_bytes());
+ }
+
+ ws.write_frame(Frame::close(1000, b"")).await.unwrap();
+
+ Ok(())
+}
diff --git a/tests/util/server/src/spawn.rs b/tests/util/server/src/spawn.rs
new file mode 100644
index 000000000..bfd83e9b2
--- /dev/null
+++ b/tests/util/server/src/spawn.rs
@@ -0,0 +1,71 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+use anyhow::Error;
+use std::convert::Infallible;
+
+/// For unix targets, we just replace our current process with the desired cargo process.
+#[cfg(unix)]
+pub fn exec_replace_inner(
+ cmd: &str,
+ args: &[&str],
+) -> Result<Infallible, Error> {
+ use std::ffi::CStr;
+ use std::ffi::CString;
+
+ let args = args
+ .iter()
+ .map(|arg| CString::new(*arg).unwrap())
+ .collect::<Vec<_>>();
+ let args: Vec<&CStr> =
+ args.iter().map(|arg| arg.as_ref()).collect::<Vec<_>>();
+
+ let err = nix::unistd::execvp(&CString::new(cmd).unwrap(), &args)
+ .expect_err("Impossible");
+ Err(err.into())
+}
+
+#[cfg(windows)]
+pub fn exec_replace_inner(
+ cmd: &str,
+ args: &[&str],
+) -> Result<Infallible, Error> {
+ use std::os::windows::io::AsRawHandle;
+ use std::process::Command;
+ use win32job::ExtendedLimitInfo;
+ use win32job::Job;
+
+ // Use a job to ensure the child process's lifetime does not exceed the current process's lifetime.
+ // This ensures that if the current process is terminated (e.g., via ctrl+c or task manager),
+ // the child process is automatically reaped.
+
+ // For more information about this technique, see Raymond Chen's blog post:
+ // https://devblogs.microsoft.com/oldnewthing/20131209-00/?p=2433
+ // Note: While our implementation is not perfect, it serves its purpose for test code.
+
+ // In the future, we may directly obtain the main thread's handle from Rust code and use it
+ // to create a suspended process that we can then resume:
+ // https://github.com/rust-lang/rust/issues/96723
+
+ // Creates a child process and assigns it to our current job.
+ // A more reliable approach would be to create the child suspended and then assign it to the job.
+ // For now, we create the child, create the job, and then assign both us and the child to the job.
+ let mut child = Command::new(cmd).args(&args[1..]).spawn()?;
+
+ let mut info = ExtendedLimitInfo::default();
+ info.limit_kill_on_job_close();
+ let job = Job::create_with_limit_info(&info)?;
+ job.assign_current_process()?;
+ let handle = child.as_raw_handle();
+ job.assign_process(handle as _)?;
+
+ let exit = child.wait()?;
+ std::process::exit(exit.code().unwrap_or(1));
+}
+
+/// Runs a command, replacing the current process on Unix. On Windows, this function blocks and
+/// exits.
+///
+/// In either case, the only way this function returns is if it fails to launch the child
+/// process.
+pub fn exec_replace(command: &str, args: &[&str]) -> Result<Infallible, Error> {
+ exec_replace_inner(command, args)
+}
diff --git a/tests/util/server/src/test_server.rs b/tests/util/server/src/test_server.rs
new file mode 100644
index 000000000..b0f74d606
--- /dev/null
+++ b/tests/util/server/src/test_server.rs
@@ -0,0 +1,5 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+fn main() {
+ test_server::servers::run_all_servers();
+}
diff --git a/tests/util/server/src/testdata/strace_summary.out b/tests/util/server/src/testdata/strace_summary.out
new file mode 100644
index 000000000..7984b175a
--- /dev/null
+++ b/tests/util/server/src/testdata/strace_summary.out
@@ -0,0 +1,39 @@
+% time seconds usecs/call calls errors syscall
+------ ----------- ----------- --------- --------- ----------------
+ 65.76 0.005881 98 60 munmap
+ 13.79 0.001233 2 462 mprotect
+ 7.13 0.000638 11 56 mmap
+ 3.57 0.000319 22 14 openat
+ 1.65 0.000148 10 14 fstat
+ 1.58 0.000141 7 20 read
+ 1.53 0.000137 7 18 close
+ 1.49 0.000133 16 8 madvise
+ 1.10 0.000098 98 1 execve
+ 0.30 0.000027 9 3 prctl
+ 0.29 0.000026 26 1 1 access
+ 0.25 0.000022 11 2 2 mkdir
+ 0.23 0.000021 7 3 write
+ 0.18 0.000016 4 4 set_robust_list
+ 0.16 0.000014 7 2 brk
+ 0.15 0.000013 13 1 pipe2
+ 0.11 0.000010 3 3 clone
+ 0.11 0.000010 3 3 sigaltstack
+ 0.10 0.000009 4 2 stat
+ 0.10 0.000009 9 1 arch_prctl
+ 0.10 0.000009 9 1 epoll_create1
+ 0.09 0.000008 8 1 epoll_ctl
+ 0.08 0.000007 3 2 getrandom
+ 0.04 0.000004 4 1 getcwd
+ 0.04 0.000004 2 2 sched_getaffinity
+ 0.03 0.000003 3 1 1 ioctl
+ 0.03 0.000003 1 3 futex
+ 0.00 0.000000 0 1 open
+ 0.00 0.000000 0 5 rt_sigaction
+ 0.00 0.000000 0 1 rt_sigprocmask
+ 0.00 0.000000 0 1 fcntl
+ 0.00 0.000000 0 1 1 readlink
+ 0.00 0.000000 0 1 set_tid_address
+ 0.00 0.000000 0 3 epoll_wait
+ 0.00 0.000000 0 2 prlimit64
+------ ----------- ----------- --------- --------- ----------------
+100.00 0.008943 704 5 total
diff --git a/tests/util/server/src/testdata/strace_summary2.out b/tests/util/server/src/testdata/strace_summary2.out
new file mode 100644
index 000000000..798a06665
--- /dev/null
+++ b/tests/util/server/src/testdata/strace_summary2.out
@@ -0,0 +1,37 @@
+17697 ????( <detached ...>
+% time seconds usecs/call calls errors syscall
+------ ----------- ----------- --------- --------- ----------------
+ 63.19 0.030363 68 449 94 futex
+ 34.70 0.016672 16672 1 epoll_wait
+ 1.58 0.000761 6 129 mprotect
+ 0.40 0.000193 3 58 madvise
+ 0.11 0.000055 3 17 brk
+ 0.01 0.000003 0 32 mmap
+ 0.00 0.000000 0 20 1 read
+ 0.00 0.000000 0 1 write
+ 0.00 0.000000 0 14 open
+ 0.00 0.000000 0 17 close
+ 0.00 0.000000 0 10 fstat
+ 0.00 0.000000 0 10 munmap
+ 0.00 0.000000 0 5 rt_sigaction
+ 0.00 0.000000 0 1 rt_sigprocmask
+ 0.00 0.000000 0 4 4 ioctl
+ 0.00 0.000000 0 8 8 access
+ 0.00 0.000000 0 6 sched_yield
+ 0.00 0.000000 0 3 clone
+ 0.00 0.000000 0 1 execve
+ 0.00 0.000000 0 3 fcntl
+ 0.00 0.000000 0 5 getcwd
+ 0.00 0.000000 0 2 getrlimit
+ 0.00 0.000000 0 9 sigaltstack
+ 0.00 0.000000 0 3 prctl
+ 0.00 0.000000 0 1 arch_prctl
+ 0.00 0.000000 0 3 sched_getaffinity
+ 0.00 0.000000 0 1 set_tid_address
+ 0.00 0.000000 0 1 epoll_ctl
+ 0.00 0.000000 0 4 set_robust_list
+ 0.00 0.000000 0 1 epoll_create1
+ 0.00 0.000000 0 1 pipe2
+ 0.00 0.000000 0 1 getrandom
+------ ----------- ----------- --------- --------- ----------------
+100.00 0.048047 821 107 total
diff --git a/tests/util/server/src/testdata/strace_summary3.out b/tests/util/server/src/testdata/strace_summary3.out
new file mode 100644
index 000000000..c0cb844ca
--- /dev/null
+++ b/tests/util/server/src/testdata/strace_summary3.out
@@ -0,0 +1,48 @@
+% time seconds usecs/call calls errors syscall
+------ ----------- ----------- --------- --------- ------------------
+ 14.12 0.000501 5 90 mprotect
+ 12.23 0.000434 62 7 clone3
+ 8.51 0.000302 13 22 rt_sigprocmask
+ 7.10 0.000252 7 32 read
+ 7.02 0.000249 6 39 madvise
+ 6.31 0.000224 8 26 7 openat
+ 5.69 0.000202 5 34 mmap
+ 5.10 0.000181 4 39 2 newfstatat
+ 4.40 0.000156 4 39 fcntl
+ 4.17 0.000148 5 27 brk
+ 3.27 0.000116 4 26 close
+ 3.16 0.000112 14 8 rseq
+ 3.04 0.000108 15 7 prctl
+ 2.56 0.000091 11 8 set_robust_list
+ 2.20 0.000078 6 12 gettid
+ 1.69 0.000060 5 11 munmap
+ 1.55 0.000055 5 10 write
+ 1.38 0.000049 3 14 lseek
+ 1.01 0.000036 7 5 3 ioctl
+ 0.90 0.000032 6 5 getpid
+ 0.82 0.000029 4 7 getcwd
+ 0.65 0.000023 5 4 sched_getaffinity
+ 0.51 0.000018 18 1 1 pkey_alloc
+ 0.45 0.000016 8 2 unlink
+ 0.45 0.000016 1 16 9 statx
+ 0.31 0.000011 1 6 prlimit64
+ 0.31 0.000011 2 4 getrandom
+ 0.25 0.000009 9 1 uname
+ 0.23 0.000008 1 6 rt_sigaction
+ 0.23 0.000008 4 2 geteuid
+ 0.20 0.000007 7 1 ftruncate
+ 0.11 0.000004 1 3 sigaltstack
+ 0.08 0.000003 3 1 getppid
+ 0.00 0.000000 0 1 poll
+ 0.00 0.000000 0 4 pread64
+ 0.00 0.000000 0 1 1 access
+ 0.00 0.000000 0 1 socketpair
+ 0.00 0.000000 0 1 execve
+ 0.00 0.000000 0 13 12 readlink
+ 0.00 0.000000 0 2 1 arch_prctl
+ 0.00 0.000000 0 1 set_tid_address
+ 0.00 0.000000 0 2 epoll_ctl
+ 0.00 0.000000 0 1 eventfd2
+ 0.00 0.000000 0 1 epoll_create1
+------ ----------- ----------- --------- --------- ------------------
+100.00 0.003549 6 543 36 total \ No newline at end of file
diff --git a/tests/util/server/src/testdata/time.out b/tests/util/server/src/testdata/time.out
new file mode 100644
index 000000000..3ff409bd7
--- /dev/null
+++ b/tests/util/server/src/testdata/time.out
@@ -0,0 +1,18 @@
+Hello
+ Command being timed: "./target/debug/deno tests/003_relative_import.ts"
+ User time (seconds): 2.43
+ System time (seconds): 0.05
+ Percent of CPU this job got: 156%
+ Elapsed (wall clock) time (h:mm:ss or m:ss): 0:01.59
+ Average shared text size (kbytes): 0
+ Average unshared data size (kbytes): 0
+ Average stack size (kbytes): 0
+ Average total size (kbytes): 0
+ Maximum resident set size (kbytes): 120380
+ Average resident set size (kbytes): 0
+ Major (requiring I/O) page faults: 0
+ Minor (reclaiming a frame) page faults: 41452
+ Voluntary context switches: 75
+ Involuntary context switches: 42
+ Swaps: 0
+ File system inputs: 0 \ No newline at end of file
diff --git a/tests/util/server/src/testdata/wrk1.txt b/tests/util/server/src/testdata/wrk1.txt
new file mode 100644
index 000000000..8ad7cf739
--- /dev/null
+++ b/tests/util/server/src/testdata/wrk1.txt
@@ -0,0 +1,14 @@
+Running 10s test @ http://127.0.0.1:4500/
+ 2 threads and 10 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 5.08ms 1.37ms 34.96ms 96.63%
+ Req/Sec 0.92k 51.83 1.00k 78.50%
+ Latency Distribution
+ 50% 1.96ms
+ 75% 2.02ms
+ 90% 2.43ms
+ 99% 6.25ms
+ 18381 requests in 10.00s, 0.89MB read
+ Socket errors: connect 0, read 18381, write 0, timeout 0
+Requests/sec: 1837.86
+Transfer/sec: 91.53KB
diff --git a/tests/util/server/src/testdata/wrk2.txt b/tests/util/server/src/testdata/wrk2.txt
new file mode 100644
index 000000000..4b68c6c8a
--- /dev/null
+++ b/tests/util/server/src/testdata/wrk2.txt
@@ -0,0 +1,13 @@
+Running 10s test @ http://127.0.0.1:4544/
+ 2 threads and 10 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 402.90us 1.15ms 1.25us 94.86%
+ Req/Sec 26.86k 2.01k 31.81k 78.71%
+ Latency Distribution
+ 50% 2.03ms
+ 75% 2.10ms
+ 90% 2.43ms
+ 99% 6.22ms
+ 539721 requests in 10.10s, 26.25MB read
+Requests/sec: 53435.75
+Transfer/sec: 2.60MB
diff --git a/tests/util/server/src/testdata/wrk3.txt b/tests/util/server/src/testdata/wrk3.txt
new file mode 100644
index 000000000..4c115a096
--- /dev/null
+++ b/tests/util/server/src/testdata/wrk3.txt
@@ -0,0 +1,13 @@
+Running 10s test @ http://127.0.0.1:4544/
+ 2 threads and 10 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 26.55ms 152.26ms 1.63s 97.45%
+ Req/Sec 48.26k 3.13k 61.41k 93.00%
+ Latency Distribution
+ 50% 1.98ms
+ 75% 2.06ms
+ 90% 2.47ms
+ 99% 6.36ms
+ 960491 requests in 10.00s, 80.61MB read
+Requests/sec: 96037.58
+Transfer/sec: 8.06MB