summaryrefslogtreecommitdiff
path: root/cli
diff options
context:
space:
mode:
Diffstat (limited to 'cli')
-rw-r--r--cli/Cargo.toml7
-rw-r--r--cli/bench/deno_http_proxy.ts20
-rw-r--r--cli/bench/deno_tcp.ts29
-rw-r--r--cli/bench/deno_tcp_proxy.ts30
-rw-r--r--cli/bench/http.rs303
-rw-r--r--cli/bench/main.rs462
-rw-r--r--cli/bench/node_http.js9
-rw-r--r--cli/bench/node_http_proxy.js22
-rw-r--r--cli/bench/node_tcp.js18
-rw-r--r--cli/bench/node_tcp_promise.js25
-rw-r--r--cli/bench/node_tcp_proxy.js68
-rw-r--r--cli/bench/throughput.rs62
-rw-r--r--cli/tests/integration_tests.rs5
13 files changed, 1055 insertions, 5 deletions
diff --git a/cli/Cargo.toml b/cli/Cargo.toml
index d7149b79c..751a7c461 100644
--- a/cli/Cargo.toml
+++ b/cli/Cargo.toml
@@ -14,6 +14,11 @@ default-run = "deno"
name = "deno"
path = "main.rs"
+[[bench]]
+name = "deno_bench"
+harness = false
+path = "./bench/main.rs"
+
[build-dependencies]
deno_core = { path = "../core", version = "0.54.0" }
deno_web = { path = "../op_crates/web", version = "0.4.0" }
@@ -78,6 +83,8 @@ fwdansi = "1.1.0"
nix = "0.17.0"
[dev-dependencies]
+# Used in benchmark
+chrono = "0.4"
os_pipe = "0.9.2"
# Used for testing inspector. Keep in-sync with warp.
tokio-tungstenite = { version = "0.10.1", features = ["connect"] }
diff --git a/cli/bench/deno_http_proxy.ts b/cli/bench/deno_http_proxy.ts
new file mode 100644
index 000000000..6e5141377
--- /dev/null
+++ b/cli/bench/deno_http_proxy.ts
@@ -0,0 +1,20 @@
+// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
+import { serve, ServerRequest } from "../std/http/server.ts";
+
+const addr = Deno.args[0] || "127.0.0.1:4500";
+const originAddr = Deno.args[1] || "127.0.0.1:4501";
+const server = serve(addr);
+
+async function proxyRequest(req: ServerRequest): Promise<void> {
+ const url = `http://${originAddr}${req.url}`;
+ const resp = await fetch(url, {
+ method: req.method,
+ headers: req.headers,
+ });
+ req.respond(resp);
+}
+
+console.log(`Proxy listening on http://${addr}/`);
+for await (const req of server) {
+ proxyRequest(req);
+}
diff --git a/cli/bench/deno_tcp.ts b/cli/bench/deno_tcp.ts
new file mode 100644
index 000000000..a204e7bfb
--- /dev/null
+++ b/cli/bench/deno_tcp.ts
@@ -0,0 +1,29 @@
+// Used for benchmarking Deno's networking.
+// TODO Replace this with a real HTTP server once
+// https://github.com/denoland/deno/issues/726 is completed.
+// Note: this is a keep-alive server.
+const addr = Deno.args[0] || "127.0.0.1:4500";
+const [hostname, port] = addr.split(":");
+const listener = Deno.listen({ hostname, port: Number(port) });
+const response = new TextEncoder().encode(
+ "HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World\n",
+);
+async function handle(conn: Deno.Conn): Promise<void> {
+ const buffer = new Uint8Array(1024);
+ try {
+ while (true) {
+ const r = await conn.read(buffer);
+ if (r === null) {
+ break;
+ }
+ await conn.write(response);
+ }
+ } finally {
+ conn.close();
+ }
+}
+
+console.log("Listening on", addr);
+for await (const conn of listener) {
+ handle(conn);
+}
diff --git a/cli/bench/deno_tcp_proxy.ts b/cli/bench/deno_tcp_proxy.ts
new file mode 100644
index 000000000..db693b690
--- /dev/null
+++ b/cli/bench/deno_tcp_proxy.ts
@@ -0,0 +1,30 @@
+// Used for benchmarking Deno's tcp proxy performance.
+const addr = Deno.args[0] || "127.0.0.1:4500";
+const originAddr = Deno.args[1] || "127.0.0.1:4501";
+
+const [hostname, port] = addr.split(":");
+const [originHostname, originPort] = originAddr.split(":");
+
+const listener = Deno.listen({ hostname, port: Number(port) });
+
+async function handle(conn: Deno.Conn): Promise<void> {
+ const origin = await Deno.connect({
+ hostname: originHostname,
+ port: Number(originPort),
+ });
+ try {
+ await Promise.all([Deno.copy(conn, origin), Deno.copy(origin, conn)]);
+ } catch (err) {
+ if (err.message !== "read error" && err.message !== "write error") {
+ throw err;
+ }
+ } finally {
+ conn.close();
+ origin.close();
+ }
+}
+
+console.log(`Proxy listening on http://${addr}/`);
+for await (const conn of listener) {
+ handle(conn);
+}
diff --git a/cli/bench/http.rs b/cli/bench/http.rs
new file mode 100644
index 000000000..f9acf20da
--- /dev/null
+++ b/cli/bench/http.rs
@@ -0,0 +1,303 @@
+// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
+
+use super::Result;
+use std::{
+ collections::HashMap, path::PathBuf, process::Command, time::Duration,
+};
+pub use test_util::{parse_wrk_output, WrkOutput as HttpBenchmarkResult};
+
+// Some of the benchmarks in this file have been renamed. In case the history
+// somehow gets messed up:
+// "node_http" was once called "node"
+// "deno_tcp" was once called "deno"
+// "deno_http" was once called "deno_net_http"
+
+const DURATION: &str = "20s";
+
+pub(crate) fn benchmark(
+ target_path: &PathBuf,
+) -> Result<HashMap<String, HttpBenchmarkResult>> {
+ let deno_exe = test_util::deno_exe_path();
+ let deno_exe = deno_exe.to_str().unwrap();
+
+ let hyper_hello_exe = target_path.join("test_server");
+ let hyper_hello_exe = hyper_hello_exe.to_str().unwrap();
+
+ let core_http_bin_ops_exe = target_path.join("examples/http_bench_bin_ops");
+ let core_http_bin_ops_exe = core_http_bin_ops_exe.to_str().unwrap();
+
+ let core_http_json_ops_exe = target_path.join("examples/http_bench_json_ops");
+ let core_http_json_ops_exe = core_http_json_ops_exe.to_str().unwrap();
+
+ let mut res = HashMap::new();
+
+ // "deno_tcp" was once called "deno"
+ res.insert("deno_tcp".to_string(), deno_tcp(deno_exe)?);
+ // res.insert("deno_udp".to_string(), deno_udp(deno_exe)?);
+ res.insert("deno_http".to_string(), deno_http(deno_exe)?);
+ // TODO(ry) deno_proxy disabled to make fetch() standards compliant.
+ // res.insert("deno_proxy".to_string(), deno_http_proxy(deno_exe) hyper_hello_exe))
+ res.insert(
+ "deno_proxy_tcp".to_string(),
+ deno_tcp_proxy(deno_exe, hyper_hello_exe)?,
+ );
+ // "core_http_bin_ops" was once called "deno_core_single"
+ // "core_http_bin_ops" was once called "deno_core_http_bench"
+ res.insert(
+ "core_http_bin_ops".to_string(),
+ core_http_bin_ops(core_http_bin_ops_exe)?,
+ );
+ res.insert(
+ "core_http_json_ops".to_string(),
+ core_http_json_ops(core_http_json_ops_exe)?,
+ );
+ // "node_http" was once called "node"
+ res.insert("node_http".to_string(), node_http()?);
+ res.insert("node_proxy".to_string(), node_http_proxy(hyper_hello_exe)?);
+ res.insert(
+ "node_proxy_tcp".to_string(),
+ node_tcp_proxy(hyper_hello_exe)?,
+ );
+ res.insert("node_tcp".to_string(), node_tcp()?);
+ res.insert("hyper".to_string(), hyper_http(hyper_hello_exe)?);
+
+ Ok(res)
+}
+
+fn run(
+ server_cmd: &[&str],
+ port: u16,
+ env: Option<Vec<(String, String)>>,
+ origin_cmd: Option<&[&str]>,
+) -> Result<HttpBenchmarkResult> {
+ // Wait for port 4544 to become available.
+ // TODO Need to use SO_REUSEPORT with tokio::net::TcpListener.
+ std::thread::sleep(Duration::from_secs(5));
+
+ let mut origin = None;
+ if let Some(cmd) = origin_cmd {
+ let mut com = Command::new(cmd[0]);
+ com.args(&cmd[1..]);
+ if let Some(env) = env.clone() {
+ com.envs(env);
+ }
+ origin = Some(com.spawn()?);
+ };
+
+ println!("{}", server_cmd.join(" "));
+ let mut server = {
+ let mut com = Command::new(server_cmd[0]);
+ com.args(&server_cmd[1..]);
+ if let Some(env) = env {
+ com.envs(env);
+ }
+ com.spawn()?
+ };
+
+ std::thread::sleep(Duration::from_secs(5)); // wait for server to wake up. TODO racy.
+
+ let wrk = test_util::prebuilt_tool_path("wrk");
+ assert!(wrk.is_file());
+
+ let wrk_cmd = &[
+ wrk.to_str().unwrap(),
+ "-d",
+ DURATION,
+ "--latency",
+ &format!("http://127.0.0.1:{}/", port),
+ ];
+ println!("{}", wrk_cmd.join(" "));
+ let output = test_util::run_collect(wrk_cmd, None, None, None, true).0;
+
+ println!("{}", output);
+ assert!(
+ server.try_wait()?.map_or(true, |s| s.success()),
+ "server ended with error"
+ );
+
+ server.kill()?;
+ if let Some(mut origin) = origin {
+ origin.kill()?;
+ }
+
+ Ok(parse_wrk_output(&output))
+}
+
+fn get_port() -> u16 {
+ static mut NEXT_PORT: u16 = 4544;
+
+ let port = unsafe { NEXT_PORT };
+
+ unsafe {
+ NEXT_PORT += 1;
+ }
+
+ port
+}
+
+fn server_addr(port: u16) -> String {
+ format!("0.0.0.0:{}", port)
+}
+
+fn deno_tcp(deno_exe: &str) -> Result<HttpBenchmarkResult> {
+ let port = get_port();
+ println!("http_benchmark testing DENO tcp.");
+ run(
+ &[
+ deno_exe,
+ "run",
+ "--allow-net",
+ "cli/bench/deno_tcp.ts",
+ &server_addr(port),
+ ],
+ port,
+ None,
+ None,
+ )
+}
+
+fn deno_tcp_proxy(
+ deno_exe: &str,
+ hyper_exe: &str,
+) -> Result<HttpBenchmarkResult> {
+ let port = get_port();
+ let origin_port = get_port();
+
+ println!("http_proxy_benchmark testing DENO using net/tcp.");
+ run(
+ &[
+ deno_exe,
+ "run",
+ "--allow-net",
+ "--reload",
+ "--unstable",
+ "cli/bench/deno_tcp_proxy.ts",
+ &server_addr(port),
+ &server_addr(origin_port),
+ ],
+ port,
+ None,
+ Some(&[hyper_exe, &origin_port.to_string()]),
+ )
+}
+
+fn deno_http(deno_exe: &str) -> Result<HttpBenchmarkResult> {
+ let port = get_port();
+ println!("http_benchmark testing DENO using net/http.");
+ run(
+ &[
+ deno_exe,
+ "run",
+ "--allow-net",
+ "--reload",
+ "--unstable",
+ "std/http/http_bench.ts",
+ &server_addr(port),
+ ],
+ port,
+ None,
+ None,
+ )
+}
+
+#[allow(dead_code)]
+fn deno_http_proxy(
+ deno_exe: &str,
+ hyper_exe: &str,
+) -> Result<HttpBenchmarkResult> {
+ let port = get_port();
+ let origin_port = get_port();
+
+ println!("http_proxy_benchmark testing DENO using net/http.");
+ run(
+ &[
+ deno_exe,
+ "run",
+ "--allow-net",
+ "--reload",
+ "--unstable",
+ "cli/bench/deno_http_proxy.ts",
+ &server_addr(port),
+ &server_addr(origin_port),
+ ],
+ port,
+ None,
+ Some(&[hyper_exe, &origin_port.to_string()]),
+ )
+}
+
+fn core_http_bin_ops(exe: &str) -> Result<HttpBenchmarkResult> {
+ println!("http_benchmark testing CORE http_bench_bin_ops");
+ run(&[exe], 4544, None, None)
+}
+
+fn core_http_json_ops(exe: &str) -> Result<HttpBenchmarkResult> {
+ println!("http_benchmark testing CORE http_bench_json_ops");
+ run(&[exe], 4544, None, None)
+}
+
+fn node_http() -> Result<HttpBenchmarkResult> {
+ let port = get_port();
+ println!("http_benchmark testing NODE.");
+ run(
+ &["node", "cli/bench/node_http.js", &port.to_string()],
+ port,
+ None,
+ None,
+ )
+}
+
+fn node_http_proxy(hyper_exe: &str) -> Result<HttpBenchmarkResult> {
+ let port = get_port();
+ let origin_port = get_port();
+ let origin_port = origin_port.to_string();
+
+ println!("http_proxy_benchmark testing NODE.");
+ run(
+ &[
+ "node",
+ "cli/bench/node_http_proxy.js",
+ &port.to_string(),
+ &origin_port,
+ ],
+ port,
+ None,
+ Some(&[hyper_exe, &origin_port]),
+ )
+}
+
+fn node_tcp_proxy(exe: &str) -> Result<HttpBenchmarkResult> {
+ let port = get_port();
+ let origin_port = get_port();
+ let origin_port = origin_port.to_string();
+
+ println!("http_proxy_benchmark testing NODE tcp.");
+ run(
+ &[
+ "node",
+ "cli/bench/node_tcp_proxy.js",
+ &port.to_string(),
+ &origin_port,
+ ],
+ port,
+ None,
+ Some(&[exe, &origin_port]),
+ )
+}
+
+fn node_tcp() -> Result<HttpBenchmarkResult> {
+ let port = get_port();
+ println!("http_benchmark testing node_tcp.js");
+ run(
+ &["node", "cli/bench/node_tcp.js", &port.to_string()],
+ port,
+ None,
+ None,
+ )
+}
+
+fn hyper_http(exe: &str) -> Result<HttpBenchmarkResult> {
+ let port = get_port();
+ println!("http_benchmark testing RUST hyper");
+ run(&[exe, &format!("{}", port)], port, None, None)
+}
diff --git a/cli/bench/main.rs b/cli/bench/main.rs
new file mode 100644
index 000000000..b2ae59d4f
--- /dev/null
+++ b/cli/bench/main.rs
@@ -0,0 +1,462 @@
+// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
+
+use serde_json::{self, map::Map, Number, Value};
+use std::{
+ convert::From,
+ env, fs,
+ path::PathBuf,
+ process::{Command, Stdio},
+};
+
+mod http;
+mod throughput;
+
+fn read_json(filename: &str) -> Result<serde_json::Value> {
+ let f = fs::File::open(filename)?;
+ Ok(serde_json::from_reader(f)?)
+}
+
+fn write_json(filename: &str, value: &serde_json::Value) -> Result<()> {
+ let f = fs::File::create(filename)?;
+ serde_json::to_writer(f, value)?;
+ Ok(())
+}
+
+/// The list of the tuples of the benchmark name, arguments and return code
+const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
+ ("hello", &["run", "cli/tests/002_hello.ts"], None),
+ (
+ "relative_import",
+ &["run", "cli/tests/003_relative_import.ts"],
+ None,
+ ),
+ ("error_001", &["run", "cli/tests/error_001.ts"], Some(1)),
+ (
+ "cold_hello",
+ &["run", "--reload", "cli/tests/002_hello.ts"],
+ None,
+ ),
+ (
+ "cold_relative_import",
+ &["run", "--reload", "cli/tests/003_relative_import.ts"],
+ None,
+ ),
+ (
+ "workers_startup",
+ &["run", "--allow-read", "cli/tests/workers_startup_bench.ts"],
+ None,
+ ),
+ (
+ "workers_round_robin",
+ &[
+ "run",
+ "--allow-read",
+ "cli/tests/workers_round_robin_bench.ts",
+ ],
+ None,
+ ),
+ (
+ "text_decoder",
+ &["run", "cli/tests/text_decoder_perf.js"],
+ None,
+ ),
+ (
+ "text_encoder",
+ &["run", "cli/tests/text_encoder_perf.js"],
+ None,
+ ),
+ (
+ "check",
+ &["cache", "--reload", "std/examples/chat/server_test.ts"],
+ None,
+ ),
+ (
+ "no_check",
+ &[
+ "cache",
+ "--reload",
+ "--no-check",
+ "std/examples/chat/server_test.ts",
+ ],
+ None,
+ ),
+];
+
+const RESULT_KEYS: &[&str] =
+ &["mean", "stddev", "user", "system", "min", "max"];
+fn run_exec_time(deno_exe: &PathBuf, target_dir: &PathBuf) -> Result<Value> {
+ let hyperfine_exe = test_util::prebuilt_tool_path("hyperfine");
+
+ let benchmark_file = target_dir.join("hyperfine_results.json");
+ let benchmark_file = benchmark_file.to_str().unwrap();
+
+ let mut command = [
+ hyperfine_exe.to_str().unwrap(),
+ "--export-json",
+ benchmark_file,
+ "--warmup",
+ "3",
+ ]
+ .iter()
+ .map(|s| s.to_string())
+ .collect::<Vec<_>>();
+
+ for (_, args, return_code) in EXEC_TIME_BENCHMARKS {
+ let ret_code_test = if let Some(code) = return_code {
+ // Bash test which asserts the return code value of the previous command
+ // $? contains the return code of the previous command
+ format!("; test $? -eq {}", code)
+ } else {
+ "".to_string()
+ };
+ command.push(format!(
+ "{} {} {}",
+ deno_exe.to_str().unwrap(),
+ args.join(" "),
+ ret_code_test
+ ));
+ }
+
+ test_util::run(
+ &command.iter().map(|s| s.as_ref()).collect::<Vec<_>>(),
+ None,
+ None,
+ None,
+ true,
+ );
+
+ let mut results = Map::new();
+ let hyperfine_results = read_json(benchmark_file)?;
+ for ((name, _, _), data) in EXEC_TIME_BENCHMARKS.iter().zip(
+ hyperfine_results
+ .as_object()
+ .unwrap()
+ .get("results")
+ .unwrap()
+ .as_array()
+ .unwrap(),
+ ) {
+ let data = data.as_object().unwrap().clone();
+ results.insert(
+ name.to_string(),
+ Value::Object(
+ data
+ .into_iter()
+ .filter(|(key, _)| RESULT_KEYS.contains(&key.as_str()))
+ .collect::<Map<String, Value>>(),
+ ),
+ );
+ }
+
+ Ok(Value::Object(results))
+}
+
+const BINARY_TARGET_FILES: &[&str] =
+ &["CLI_SNAPSHOT.bin", "COMPILER_SNAPSHOT.bin"];
+fn get_binary_sizes(target_dir: &PathBuf) -> Result<Value> {
+ let mut sizes = Map::new();
+ let mut mtimes = std::collections::HashMap::new();
+
+ sizes.insert(
+ "deno".to_string(),
+ Value::Number(Number::from(test_util::deno_exe_path().metadata()?.len())),
+ );
+
+ // Because cargo's OUT_DIR is not predictable, search the build tree for
+ // snapshot related files.
+ for file in walkdir::WalkDir::new(target_dir) {
+ if file.is_err() {
+ continue;
+ }
+ let file = file.unwrap();
+ let filename = file.file_name().to_str().unwrap().to_string();
+
+ if !BINARY_TARGET_FILES.contains(&filename.as_str()) {
+ continue;
+ }
+
+ let meta = file.metadata()?;
+ let file_mtime = meta.modified()?;
+
+ // If multiple copies of a file are found, use the most recent one.
+ if let Some(stored_mtime) = mtimes.get(&filename) {
+ if *stored_mtime > file_mtime {
+ continue;
+ }
+ }
+
+ mtimes.insert(filename.clone(), file_mtime);
+ sizes.insert(filename, Value::Number(Number::from(meta.len())));
+ }
+
+ Ok(Value::Object(sizes))
+}
+
+const BUNDLES: &[(&str, &str)] = &[
+ ("file_server", "./std/http/file_server.ts"),
+ ("gist", "./std/examples/gist.ts"),
+];
+fn bundle_benchmark(deno_exe: &PathBuf) -> Result<Value> {
+ let mut sizes = Map::new();
+
+ for (name, url) in BUNDLES {
+ let path = format!("{}.bundle.js", name);
+ test_util::run(
+ &[
+ deno_exe.to_str().unwrap(),
+ "bundle",
+ "--unstable",
+ url,
+ &path,
+ ],
+ None,
+ None,
+ None,
+ true,
+ );
+
+ let file = PathBuf::from(path);
+ assert!(file.is_file());
+ sizes.insert(
+ name.to_string(),
+ Value::Number(Number::from(file.metadata()?.len())),
+ );
+ let _ = fs::remove_file(file);
+ }
+
+ Ok(Value::Object(sizes))
+}
+
+fn run_throughput(deno_exe: &PathBuf) -> Result<Value> {
+ let mut m = Map::new();
+
+ m.insert("100M_tcp".to_string(), throughput::tcp(deno_exe, 100)?);
+ m.insert("100M_cat".to_string(), throughput::cat(deno_exe, 100)?);
+ m.insert("10M_tcp".to_string(), throughput::tcp(deno_exe, 10)?);
+ m.insert("10M_cat".to_string(), throughput::cat(deno_exe, 10)?);
+
+ Ok(Value::Object(m))
+}
+
+fn run_http(
+ target_dir: &PathBuf,
+ new_data: &mut Map<String, Value>,
+) -> Result<()> {
+ let stats = http::benchmark(target_dir)?;
+
+ new_data.insert(
+ "req_per_sec".to_string(),
+ Value::Object(
+ stats
+ .iter()
+ .map(|(name, result)| {
+ (name.clone(), Value::Number(Number::from(result.requests)))
+ })
+ .collect::<Map<String, Value>>(),
+ ),
+ );
+
+ new_data.insert(
+ "max_latency".to_string(),
+ Value::Object(
+ stats
+ .iter()
+ .map(|(name, result)| {
+ (
+ name.clone(),
+ Value::Number(Number::from_f64(result.latency).unwrap()),
+ )
+ })
+ .collect::<Map<String, Value>>(),
+ ),
+ );
+
+ Ok(())
+}
+
+fn run_strace_benchmarks(
+ deno_exe: &PathBuf,
+ new_data: &mut Map<String, Value>,
+) -> Result<()> {
+ use std::io::Read;
+
+ let mut thread_count = Map::new();
+ let mut syscall_count = Map::new();
+
+ for (name, args, _) in EXEC_TIME_BENCHMARKS {
+ let mut file = tempfile::NamedTempFile::new()?;
+
+ Command::new("strace")
+ .args(&[
+ "-c",
+ "-f",
+ "-o",
+ file.path().to_str().unwrap(),
+ deno_exe.to_str().unwrap(),
+ ])
+ .args(args.iter())
+ .stdout(Stdio::null())
+ .spawn()?
+ .wait()?;
+
+ let mut output = String::new();
+ file.as_file_mut().read_to_string(&mut output)?;
+
+ let strace_result = test_util::parse_strace_output(&output);
+ thread_count.insert(
+ name.to_string(),
+ Value::Number(Number::from(
+ strace_result.get("clone").unwrap().calls + 1,
+ )),
+ );
+ syscall_count.insert(
+ name.to_string(),
+ Value::Number(Number::from(strace_result.get("total").unwrap().calls)),
+ );
+ }
+
+ new_data.insert("thread_count".to_string(), Value::Object(thread_count));
+ new_data.insert("syscall_count".to_string(), Value::Object(syscall_count));
+
+ Ok(())
+}
+
+fn run_max_mem_benchmark(deno_exe: &PathBuf) -> Result<Value> {
+ let mut results = Map::new();
+
+ for (name, args, return_code) in EXEC_TIME_BENCHMARKS {
+ let proc = Command::new("time")
+ .args(&["-v", deno_exe.to_str().unwrap()])
+ .args(args.iter())
+ .stdout(Stdio::null())
+ .stderr(Stdio::piped())
+ .spawn()?;
+
+ let proc_result = proc.wait_with_output()?;
+ if let Some(code) = return_code {
+ assert_eq!(proc_result.status.code().unwrap(), *code);
+ }
+ let out = String::from_utf8(proc_result.stderr)?;
+
+ results.insert(
+ name.to_string(),
+ Value::Number(Number::from(test_util::parse_max_mem(&out).unwrap())),
+ );
+ }
+
+ Ok(Value::Object(results))
+}
+
+/*
+ TODO(SyrupThinker)
+ Switch to the #[bench] attribute once
+ it is stabilized.
+ Before that the #[test] tests won't be run because
+ we replace the harness with our own runner here.
+*/
+fn main() -> Result<()> {
+ if env::args().find(|s| s == "--bench").is_none() {
+ return Ok(());
+ }
+
+ println!("Starting Deno benchmark");
+
+ let target_dir = test_util::target_dir();
+ let deno_exe = test_util::deno_exe_path();
+
+ env::set_current_dir(&test_util::root_path())?;
+
+ let mut new_data: Map<String, Value> = Map::new();
+
+ new_data.insert(
+ "created_at".to_string(),
+ Value::String(
+ chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Secs, true),
+ ),
+ );
+ new_data.insert(
+ "sha1".to_string(),
+ Value::String(
+ test_util::run_collect(
+ &["git", "rev-parse", "HEAD"],
+ None,
+ None,
+ None,
+ true,
+ )
+ .0
+ .trim()
+ .to_string(),
+ ),
+ );
+
+ // TODO(ry) The "benchmark" benchmark should actually be called "exec_time".
+ // When this is changed, the historical data in gh-pages branch needs to be
+ // changed too.
+ new_data.insert(
+ "benchmark".to_string(),
+ run_exec_time(&deno_exe, &target_dir)?,
+ );
+
+ new_data.insert("binary_size".to_string(), get_binary_sizes(&target_dir)?);
+ new_data.insert("bundle_size".to_string(), bundle_benchmark(&deno_exe)?);
+
+ // Cannot run throughput benchmark on windows because they don't have nc or
+ // pipe.
+ if cfg!(not(target_os = "windows")) {
+ new_data.insert("throughput".to_string(), run_throughput(&deno_exe)?);
+ run_http(&target_dir, &mut new_data)?;
+ }
+
+ if cfg!(target_os = "linux") {
+ run_strace_benchmarks(&deno_exe, &mut new_data)?;
+ new_data
+ .insert("max_memory".to_string(), run_max_mem_benchmark(&deno_exe)?);
+ }
+
+ println!("===== <BENCHMARK RESULTS>");
+ serde_json::to_writer_pretty(std::io::stdout(), &new_data)?;
+ println!("\n===== </BENCHMARK RESULTS>");
+
+ if let Some(filename) = target_dir.join("bench.json").to_str() {
+ write_json(filename, &Value::Object(new_data))?;
+ } else {
+ eprintln!("Cannot write bench.json, path is invalid");
+ }
+
+ Ok(())
+}
+
+#[derive(Debug)]
+enum Error {
+ Io(std::io::Error),
+ Serde(serde_json::error::Error),
+ FromUtf8(std::string::FromUtf8Error),
+ Walkdir(walkdir::Error),
+}
+
+impl From<std::io::Error> for Error {
+ fn from(ioe: std::io::Error) -> Self {
+ Error::Io(ioe)
+ }
+}
+
+impl From<serde_json::error::Error> for Error {
+ fn from(sje: serde_json::error::Error) -> Self {
+ Error::Serde(sje)
+ }
+}
+
+impl From<std::string::FromUtf8Error> for Error {
+ fn from(fue: std::string::FromUtf8Error) -> Self {
+ Error::FromUtf8(fue)
+ }
+}
+
+impl From<walkdir::Error> for Error {
+ fn from(wde: walkdir::Error) -> Self {
+ Error::Walkdir(wde)
+ }
+}
+
+pub(crate) type Result<T> = std::result::Result<T, Error>;
diff --git a/cli/bench/node_http.js b/cli/bench/node_http.js
new file mode 100644
index 000000000..189098e4c
--- /dev/null
+++ b/cli/bench/node_http.js
@@ -0,0 +1,9 @@
+// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
+const http = require("http");
+const port = process.argv[2] || "4544";
+console.log("port", port);
+http
+ .Server((req, res) => {
+ res.end("Hello World");
+ })
+ .listen(port);
diff --git a/cli/bench/node_http_proxy.js b/cli/bench/node_http_proxy.js
new file mode 100644
index 000000000..b984c484f
--- /dev/null
+++ b/cli/bench/node_http_proxy.js
@@ -0,0 +1,22 @@
+// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
+const http = require("http");
+const port = process.argv[2] || "4544";
+const originPort = process.argv[3] || "4545";
+console.log("port", port);
+http
+ .Server((req, res) => {
+ const options = {
+ port: originPort,
+ path: req.url,
+ method: req.method,
+ headers: req.headers,
+ };
+
+ const proxy = http.request(options, (proxyRes) => {
+ res.writeHead(proxyRes.statusCode, proxyRes.headers);
+ proxyRes.pipe(res, { end: true });
+ });
+
+ req.pipe(proxy, { end: true });
+ })
+ .listen(port);
diff --git a/cli/bench/node_tcp.js b/cli/bench/node_tcp.js
new file mode 100644
index 000000000..22e2a5161
--- /dev/null
+++ b/cli/bench/node_tcp.js
@@ -0,0 +1,18 @@
+// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
+// Note: this is a keep-alive server.
+const { Server } = require("net");
+const port = process.argv[2] || "4544";
+console.log("port", port);
+
+const response = Buffer.from(
+ "HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World\n",
+);
+
+Server((socket) => {
+ socket.on("data", (_) => {
+ socket.write(response);
+ });
+ socket.on("error", (_) => {
+ socket.destroy();
+ });
+}).listen(port);
diff --git a/cli/bench/node_tcp_promise.js b/cli/bench/node_tcp_promise.js
new file mode 100644
index 000000000..36709d2b9
--- /dev/null
+++ b/cli/bench/node_tcp_promise.js
@@ -0,0 +1,25 @@
+// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
+// Note: this is a keep-alive server.
+const { Server } = require("net");
+const port = process.argv[2] || "4544";
+console.log("port", port);
+
+const response = Buffer.from(
+ "HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World\n",
+);
+
+function write(socket, buffer) {
+ const p = new Promise((resolve, _) => {
+ socket.write(buffer, resolve);
+ });
+ return Promise.resolve(p);
+}
+
+Server(async (socket) => {
+ socket.on("error", (_) => {
+ socket.destroy();
+ });
+ for await (const _ of socket) {
+ await write(socket, response);
+ }
+}).listen(port);
diff --git a/cli/bench/node_tcp_proxy.js b/cli/bench/node_tcp_proxy.js
new file mode 100644
index 000000000..d693dc5c8
--- /dev/null
+++ b/cli/bench/node_tcp_proxy.js
@@ -0,0 +1,68 @@
+const net = require("net");
+
+process.on("uncaughtException", function (error) {
+ console.error(error);
+});
+
+if (process.argv.length != 4) {
+ console.log("usage: %s <localport> <remoteport>", process.argv[1]);
+ process.exit();
+}
+
+const localport = process.argv[2];
+const remoteport = process.argv[3];
+
+const remotehost = "127.0.0.1";
+
+const server = net.createServer(function (localsocket) {
+ const remotesocket = new net.Socket();
+
+ remotesocket.connect(remoteport, remotehost);
+
+ localsocket.on("data", function (data) {
+ const flushed = remotesocket.write(data);
+ if (!flushed) {
+ localsocket.pause();
+ }
+ });
+
+ remotesocket.on("data", function (data) {
+ const flushed = localsocket.write(data);
+ if (!flushed) {
+ remotesocket.pause();
+ }
+ });
+
+ localsocket.on("drain", function () {
+ remotesocket.resume();
+ });
+
+ remotesocket.on("drain", function () {
+ localsocket.resume();
+ });
+
+ localsocket.on("close", function () {
+ remotesocket.end();
+ });
+
+ remotesocket.on("close", function () {
+ localsocket.end();
+ });
+
+ localsocket.on("error", function () {
+ localsocket.end();
+ });
+
+ remotesocket.on("error", function () {
+ remotesocket.end();
+ });
+});
+
+server.listen(localport);
+
+console.log(
+ "redirecting connections from 127.0.0.1:%d to %s:%d",
+ localport,
+ remotehost,
+ remoteport,
+);
diff --git a/cli/bench/throughput.rs b/cli/bench/throughput.rs
new file mode 100644
index 000000000..0be46f142
--- /dev/null
+++ b/cli/bench/throughput.rs
@@ -0,0 +1,62 @@
+// Copyright 2018-2020 the Deno authors. All rights reserved. MIT license.
+
+use super::Result;
+use serde_json::{Number, Value};
+use std::{
+ path::PathBuf,
+ process::Command,
+ time::{Duration, Instant},
+};
+
+const MB: usize = 1024 * 1024;
+const SERVER_ADDR: &str = "0.0.0.0:4544";
+const CLIENT_ADDR: &str = "127.0.0.1 4544";
+
+pub(crate) fn cat(deno_exe: &PathBuf, megs: usize) -> Result<Value> {
+ let size = megs * MB;
+ let shell_cmd = format!(
+ "{} run --allow-read cli/tests/cat.ts /dev/zero | head -c {}",
+ deno_exe.to_str().unwrap(),
+ size
+ );
+ println!("{}", shell_cmd);
+ let cmd = &["sh", "-c", &shell_cmd];
+
+ let start = Instant::now();
+ let _ = test_util::run_collect(cmd, None, None, None, true);
+ let end = Instant::now();
+
+ Ok(Value::Number(
+ Number::from_f64((end - start).as_secs_f64()).unwrap(),
+ ))
+}
+
+pub(crate) fn tcp(deno_exe: &PathBuf, megs: usize) -> Result<Value> {
+ let size = megs * MB;
+
+ let shell_cmd = format!("head -c {} /dev/zero | nc {}", size, CLIENT_ADDR);
+ println!("{}", shell_cmd);
+ let cmd = &["sh", "-c", &shell_cmd];
+
+ // Run deno echo server in the background.
+ let mut echo_server = Command::new(deno_exe.to_str().unwrap())
+ .args(&[
+ "run",
+ "--allow-net",
+ "cli/tests/echo_server.ts",
+ SERVER_ADDR,
+ ])
+ .spawn()?;
+
+ std::thread::sleep(Duration::from_secs(5)); // wait for deno to wake up. TODO racy.
+
+ let start = Instant::now();
+ let _ = test_util::run_collect(cmd, None, None, None, true);
+ let end = Instant::now();
+
+ echo_server.kill()?;
+
+ Ok(Value::Number(
+ Number::from_f64((end - start).as_secs_f64()).unwrap(),
+ ))
+}
diff --git a/cli/tests/integration_tests.rs b/cli/tests/integration_tests.rs
index 4184b0b0a..c0dcdb606 100644
--- a/cli/tests/integration_tests.rs
+++ b/cli/tests/integration_tests.rs
@@ -273,11 +273,6 @@ grault",
}
#[test]
-fn benchmark_test() {
- util::run_python_script("tools/benchmark_test.py")
-}
-
-#[test]
fn deno_dir_test() {
use std::fs::remove_dir_all;
let _g = util::http_server();