summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Cargo.lock1
-rw-r--r--cli/Cargo.toml1
-rw-r--r--cli/ops/bench.rs52
-rw-r--r--cli/tools/bench.rs173
-rw-r--r--cli/tools/test.rs52
-rw-r--r--runtime/js/40_testing.js197
6 files changed, 261 insertions, 215 deletions
diff --git a/Cargo.lock b/Cargo.lock
index e104af655..28077bc56 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -772,6 +772,7 @@ dependencies = [
"fwdansi",
"http",
"import_map",
+ "indexmap",
"jsonc-parser",
"libc",
"log",
diff --git a/cli/Cargo.toml b/cli/Cargo.toml
index fc09642e9..062f0cf70 100644
--- a/cli/Cargo.toml
+++ b/cli/Cargo.toml
@@ -72,6 +72,7 @@ eszip = "=0.20.0"
fancy-regex = "=0.9.0"
http = "=0.2.6"
import_map = "=0.9.0"
+indexmap = "1.8.1"
jsonc-parser = { version = "=0.19.0", features = ["serde"] }
libc = "=0.2.126"
log = { version = "=0.4.16", features = ["serde"] }
diff --git a/cli/ops/bench.rs b/cli/ops/bench.rs
index e028aa6b1..1c62452d4 100644
--- a/cli/ops/bench.rs
+++ b/cli/ops/bench.rs
@@ -1,4 +1,6 @@
+use crate::tools::bench::BenchDescription;
use crate::tools::bench::BenchEvent;
+use crate::tools::test::TestFilter;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
use deno_core::op;
@@ -8,22 +10,32 @@ use deno_core::OpState;
use deno_runtime::permissions::create_child_permissions;
use deno_runtime::permissions::ChildPermissionsArg;
use deno_runtime::permissions::Permissions;
+use serde::Deserialize;
+use serde::Serialize;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering;
use std::time;
use tokio::sync::mpsc::UnboundedSender;
use uuid::Uuid;
-pub fn init(sender: UnboundedSender<BenchEvent>, unstable: bool) -> Extension {
+pub fn init(
+ sender: UnboundedSender<BenchEvent>,
+ filter: TestFilter,
+ unstable: bool,
+) -> Extension {
Extension::builder()
.ops(vec![
op_pledge_test_permissions::decl(),
op_restore_test_permissions::decl(),
op_get_bench_origin::decl(),
+ op_register_bench::decl(),
op_dispatch_bench_event::decl(),
op_bench_now::decl(),
op_bench_check_unstable::decl(),
])
.state(move |state| {
state.put(sender.clone());
+ state.put(filter.clone());
state.put(Unstable(unstable));
Ok(())
})
@@ -97,6 +109,44 @@ fn op_get_bench_origin(state: &mut OpState) -> String {
state.borrow::<ModuleSpecifier>().to_string()
}
+#[derive(Debug, Deserialize)]
+#[serde(rename_all = "camelCase")]
+struct BenchInfo {
+ name: String,
+ origin: String,
+ baseline: bool,
+ group: Option<String>,
+}
+
+#[derive(Debug, Serialize)]
+#[serde(rename_all = "camelCase")]
+struct BenchRegisterResult {
+ id: usize,
+ filtered_out: bool,
+}
+
+static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
+
+#[op]
+fn op_register_bench(
+ state: &mut OpState,
+ info: BenchInfo,
+) -> Result<BenchRegisterResult, AnyError> {
+ let id = NEXT_ID.fetch_add(1, Ordering::SeqCst);
+ let filter = state.borrow::<TestFilter>().clone();
+ let filtered_out = !filter.includes(&info.name);
+ let description = BenchDescription {
+ id,
+ name: info.name,
+ origin: info.origin,
+ baseline: info.baseline,
+ group: info.group,
+ };
+ let sender = state.borrow::<UnboundedSender<BenchEvent>>().clone();
+ sender.send(BenchEvent::Register(description)).ok();
+ Ok(BenchRegisterResult { id, filtered_out })
+}
+
#[op]
fn op_dispatch_bench_event(state: &mut OpState, event: BenchEvent) {
let sender = state.borrow::<UnboundedSender<BenchEvent>>().clone();
diff --git a/cli/tools/bench.rs b/cli/tools/bench.rs
index afd85a8d0..d9e9c38c3 100644
--- a/cli/tools/bench.rs
+++ b/cli/tools/bench.rs
@@ -21,18 +21,21 @@ use crate::ops;
use crate::proc_state::ProcState;
use crate::resolver::ImportMapResolver;
use crate::resolver::JsxResolver;
+use crate::tools::test::format_test_error;
+use crate::tools::test::TestFilter;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
+use deno_core::error::JsError;
use deno_core::futures::future;
use deno_core::futures::stream;
use deno_core::futures::FutureExt;
use deno_core::futures::StreamExt;
-use deno_core::serde_json::json;
use deno_core::ModuleSpecifier;
use deno_graph::ModuleKind;
use deno_runtime::permissions::Permissions;
use deno_runtime::tokio_util::run_basic;
+use indexmap::IndexMap;
use log::Level;
use serde::Deserialize;
use serde::Serialize;
@@ -50,12 +53,6 @@ struct BenchSpecifierOptions {
#[derive(Debug, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")]
-pub enum BenchOutput {
- Console(String),
-}
-
-#[derive(Debug, Clone, PartialEq, Deserialize)]
-#[serde(rename_all = "camelCase")]
pub struct BenchPlan {
pub total: usize,
pub origin: String,
@@ -67,28 +64,30 @@ pub struct BenchPlan {
#[serde(rename_all = "camelCase")]
pub enum BenchEvent {
Plan(BenchPlan),
- Output(BenchOutput),
- Wait(BenchMetadata),
- Result(String, BenchResult),
+ Output(String),
+ Register(BenchDescription),
+ Wait(usize),
+ Result(usize, BenchResult),
}
#[derive(Debug, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum BenchResult {
- Ok(BenchMeasurement),
- Failed(BenchFailure),
+ Ok(BenchStats),
+ Failed(Box<JsError>),
}
-#[derive(Debug, Clone, Serialize)]
+#[derive(Debug, Clone)]
pub struct BenchReport {
pub total: usize,
pub failed: usize,
- pub failures: Vec<BenchFailure>,
- pub measurements: Vec<BenchMeasurement>,
+ pub failures: Vec<(BenchDescription, Box<JsError>)>,
+ pub measurements: Vec<(BenchDescription, BenchStats)>,
}
#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)]
-pub struct BenchMetadata {
+pub struct BenchDescription {
+ pub id: usize,
pub name: String,
pub origin: String,
pub baseline: bool,
@@ -96,22 +95,6 @@ pub struct BenchMetadata {
}
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
-pub struct BenchMeasurement {
- pub name: String,
- pub baseline: bool,
- pub stats: BenchStats,
- pub group: Option<String>,
-}
-
-#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
-pub struct BenchFailure {
- pub name: String,
- pub error: String,
- pub baseline: bool,
- pub group: Option<String>,
-}
-
-#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct BenchStats {
pub n: u64,
pub min: f64,
@@ -142,9 +125,10 @@ pub trait BenchReporter {
fn report_group_summary(&mut self);
fn report_plan(&mut self, plan: &BenchPlan);
fn report_end(&mut self, report: &BenchReport);
- fn report_wait(&mut self, wait: &BenchMetadata);
- fn report_output(&mut self, output: &BenchOutput);
- fn report_result(&mut self, result: &BenchResult);
+ fn report_register(&mut self, desc: &BenchDescription);
+ fn report_wait(&mut self, desc: &BenchDescription);
+ fn report_output(&mut self, output: &str);
+ fn report_result(&mut self, desc: &BenchDescription, result: &BenchResult);
}
struct ConsoleReporter {
@@ -152,8 +136,8 @@ struct ConsoleReporter {
show_output: bool,
has_ungrouped: bool,
group: Option<String>,
- baseline: Option<BenchMeasurement>,
- group_measurements: Vec<BenchMeasurement>,
+ baseline: bool,
+ group_measurements: Vec<(BenchDescription, BenchStats)>,
options: Option<mitata::reporter::Options>,
}
@@ -163,7 +147,7 @@ impl ConsoleReporter {
show_output,
group: None,
options: None,
- baseline: None,
+ baseline: false,
name: String::new(),
has_ungrouped: false,
group_measurements: Vec::new(),
@@ -181,7 +165,7 @@ impl BenchReporter for ConsoleReporter {
self.report_group_summary();
self.group = None;
- self.baseline = None;
+ self.baseline = false;
self.name = String::new();
self.group_measurements.clear();
self.options = Some(mitata::reporter::Options::new(
@@ -218,10 +202,12 @@ impl BenchReporter for ConsoleReporter {
);
}
- fn report_wait(&mut self, wait: &BenchMetadata) {
- self.name = wait.name.clone();
+ fn report_register(&mut self, _desc: &BenchDescription) {}
+
+ fn report_wait(&mut self, desc: &BenchDescription) {
+ self.name = desc.name.clone();
- match &wait.group {
+ match &desc.group {
None => {
self.has_ungrouped = true;
}
@@ -249,56 +235,52 @@ impl BenchReporter for ConsoleReporter {
}
}
- fn report_output(&mut self, output: &BenchOutput) {
+ fn report_output(&mut self, output: &str) {
if self.show_output {
- match output {
- BenchOutput::Console(line) => {
- print!("{} {}", colors::gray(format!("{}:", self.name)), line)
- }
- }
+ print!("{} {}", colors::gray(format!("{}:", self.name)), output)
}
}
- fn report_result(&mut self, result: &BenchResult) {
+ fn report_result(&mut self, desc: &BenchDescription, result: &BenchResult) {
let options = self.options.as_ref().unwrap();
match result {
- BenchResult::Ok(bench) => {
- let mut bench = bench.to_owned();
+ BenchResult::Ok(stats) => {
+ let mut desc = desc.clone();
- if bench.baseline && self.baseline.is_none() {
- self.baseline = Some(bench.clone());
+ if desc.baseline && !self.baseline {
+ self.baseline = true;
} else {
- bench.baseline = false;
+ desc.baseline = false;
}
- self.group_measurements.push(bench.clone());
-
println!(
"{}",
mitata::reporter::benchmark(
- &bench.name,
+ &desc.name,
&mitata::reporter::BenchmarkStats {
- avg: bench.stats.avg,
- min: bench.stats.min,
- max: bench.stats.max,
- p75: bench.stats.p75,
- p99: bench.stats.p99,
- p995: bench.stats.p995,
+ avg: stats.avg,
+ min: stats.min,
+ max: stats.max,
+ p75: stats.p75,
+ p99: stats.p99,
+ p995: stats.p995,
},
options
)
);
+
+ self.group_measurements.push((desc, stats.clone()));
}
- BenchResult::Failed(failure) => {
+ BenchResult::Failed(js_error) => {
println!(
"{}",
mitata::reporter::benchmark_error(
- &failure.name,
+ &desc.name,
&mitata::reporter::Error {
stack: None,
- message: failure.error.clone(),
+ message: format_test_error(js_error),
},
options
)
@@ -314,8 +296,7 @@ impl BenchReporter for ConsoleReporter {
};
if 2 <= self.group_measurements.len()
- && (self.group.is_some()
- || (self.group.is_none() && self.baseline.is_some()))
+ && (self.group.is_some() || (self.group.is_none() && self.baseline))
{
println!(
"\n{}",
@@ -323,18 +304,18 @@ impl BenchReporter for ConsoleReporter {
&self
.group_measurements
.iter()
- .map(|b| mitata::reporter::GroupBenchmark {
- name: b.name.clone(),
- baseline: b.baseline,
- group: b.group.as_deref().unwrap_or("").to_owned(),
+ .map(|(d, s)| mitata::reporter::GroupBenchmark {
+ name: d.name.clone(),
+ baseline: d.baseline,
+ group: d.group.as_deref().unwrap_or("").to_owned(),
stats: mitata::reporter::BenchmarkStats {
- avg: b.stats.avg,
- min: b.stats.min,
- max: b.stats.max,
- p75: b.stats.p75,
- p99: b.stats.p99,
- p995: b.stats.p995,
+ avg: s.avg,
+ min: s.min,
+ max: s.max,
+ p75: s.p75,
+ p99: s.p99,
+ p995: s.p995,
},
})
.collect::<Vec<mitata::reporter::GroupBenchmark>>(),
@@ -343,7 +324,7 @@ impl BenchReporter for ConsoleReporter {
);
}
- self.baseline = None;
+ self.baseline = false;
self.group_measurements.clear();
}
@@ -380,11 +361,12 @@ async fn bench_specifier(
channel: UnboundedSender<BenchEvent>,
options: BenchSpecifierOptions,
) -> Result<(), AnyError> {
+ let filter = TestFilter::from_flag(&options.filter);
let mut worker = create_main_worker(
&ps,
specifier.clone(),
permissions,
- vec![ops::bench::init(channel.clone(), ps.flags.unstable)],
+ vec![ops::bench::init(channel.clone(), filter, ps.flags.unstable)],
Default::default(),
);
@@ -413,12 +395,7 @@ async fn bench_specifier(
let bench_result = worker.js_runtime.execute_script(
&located_script_name!(),
- &format!(
- r#"Deno[Deno.internal].runBenchmarks({})"#,
- json!({
- "filter": options.filter,
- }),
- ),
+ r#"Deno[Deno.internal].runBenchmarks()"#,
)?;
worker.js_runtime.resolve_value(bench_result).await?;
@@ -462,6 +439,7 @@ async fn bench_specifiers(
let mut used_only = false;
let mut report = BenchReport::new();
let mut reporter = create_reporter(log_level != Some(Level::Error));
+ let mut benches = IndexMap::new();
while let Some(event) = receiver.recv().await {
match event {
@@ -474,27 +452,32 @@ async fn bench_specifiers(
reporter.report_plan(&plan);
}
- BenchEvent::Wait(metadata) => {
- reporter.report_wait(&metadata);
+ BenchEvent::Register(desc) => {
+ reporter.report_register(&desc);
+ benches.insert(desc.id, desc);
+ }
+
+ BenchEvent::Wait(id) => {
+ reporter.report_wait(benches.get(&id).unwrap());
}
BenchEvent::Output(output) => {
reporter.report_output(&output);
}
- BenchEvent::Result(_origin, result) => {
- match &result {
- BenchResult::Ok(bench) => {
- report.measurements.push(bench.clone());
+ BenchEvent::Result(id, result) => {
+ let desc = benches.get(&id).unwrap();
+ reporter.report_result(desc, &result);
+ match result {
+ BenchResult::Ok(stats) => {
+ report.measurements.push((desc.clone(), stats));
}
BenchResult::Failed(failure) => {
report.failed += 1;
- report.failures.push(failure.clone());
+ report.failures.push((desc.clone(), failure));
}
};
-
- reporter.report_result(&result);
}
}
}
diff --git a/cli/tools/test.rs b/cli/tools/test.rs
index 4424ee28d..7dcc23dd1 100644
--- a/cli/tools/test.rs
+++ b/cli/tools/test.rs
@@ -77,6 +77,58 @@ pub enum TestMode {
Both,
}
+// TODO(nayeemrmn): This is only used for benches right now.
+#[derive(Clone, Debug, Default)]
+pub struct TestFilter {
+ pub substring: Option<String>,
+ pub regex: Option<Regex>,
+ pub include: Option<Vec<String>>,
+ pub exclude: Vec<String>,
+}
+
+impl TestFilter {
+ pub fn includes(&self, name: &String) -> bool {
+ if let Some(substring) = &self.substring {
+ if !name.contains(substring) {
+ return false;
+ }
+ }
+ if let Some(regex) = &self.regex {
+ if !regex.is_match(name) {
+ return false;
+ }
+ }
+ if let Some(include) = &self.include {
+ if !include.contains(name) {
+ return false;
+ }
+ }
+ if self.exclude.contains(name) {
+ return false;
+ }
+ true
+ }
+
+ pub fn from_flag(flag: &Option<String>) -> Self {
+ let mut substring = None;
+ let mut regex = None;
+ if let Some(flag) = flag {
+ if flag.starts_with('/') && flag.ends_with('/') {
+ let rs = flag.trim_start_matches('/').trim_end_matches('/');
+ regex =
+ Some(Regex::new(rs).unwrap_or_else(|_| Regex::new("$^").unwrap()));
+ } else {
+ substring = Some(flag.clone());
+ }
+ }
+ Self {
+ substring,
+ regex,
+ ..Default::default()
+ }
+ }
+}
+
#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)]
#[serde(rename_all = "camelCase")]
pub struct TestLocation {
diff --git a/runtime/js/40_testing.js b/runtime/js/40_testing.js
index 47af45110..8c7d69fb0 100644
--- a/runtime/js/40_testing.js
+++ b/runtime/js/40_testing.js
@@ -4,11 +4,10 @@
((window) => {
const core = window.Deno.core;
const { setExitHandler } = window.__bootstrap.os;
- const { Console, inspectArgs } = window.__bootstrap.console;
+ const { Console } = window.__bootstrap.console;
const { serializePermissions } = window.__bootstrap.permissions;
const { assert } = window.__bootstrap.infra;
const {
- AggregateErrorPrototype,
ArrayFrom,
ArrayPrototypeFilter,
ArrayPrototypeJoin,
@@ -537,8 +536,23 @@
};
}
+ /**
+ * @typedef {{
+ * id: number,
+ * name: string,
+ * fn: BenchFunction
+ * origin: string,
+ * filteredOut: boolean,
+ * ignore: boolean,
+ * only: boolean.
+ * sanitizeExit: boolean,
+ * permissions: PermissionOptions,
+ * }} BenchDescription
+ */
+
const tests = [];
- const benches = [];
+ /** @type {BenchDescription[]} */
+ const benchDescs = [];
// Main test function provided by Deno.
function test(
@@ -655,12 +669,11 @@
maybeFn,
) {
core.opSync("op_bench_check_unstable");
- let benchDef;
+ let benchDesc;
const defaults = {
ignore: false,
+ baseline: false,
only: false,
- sanitizeOps: true,
- sanitizeResources: true,
sanitizeExit: true,
permissions: null,
};
@@ -670,7 +683,7 @@
throw new TypeError("The bench name can't be empty");
}
if (typeof optionsOrFn === "function") {
- benchDef = { fn: optionsOrFn, name: nameOrFnOrOptions, ...defaults };
+ benchDesc = { fn: optionsOrFn, name: nameOrFnOrOptions, ...defaults };
} else {
if (!maybeFn || typeof maybeFn !== "function") {
throw new TypeError("Missing bench function");
@@ -685,7 +698,7 @@
"Unexpected 'name' field in options, bench name is already provided as the first argument.",
);
}
- benchDef = {
+ benchDesc = {
...defaults,
...optionsOrFn,
fn: maybeFn,
@@ -702,7 +715,7 @@
if (maybeFn != undefined) {
throw new TypeError("Unexpected third argument to Deno.bench()");
}
- benchDef = {
+ benchDesc = {
...defaults,
fn: nameOrFnOrOptions,
name: nameOrFnOrOptions.name,
@@ -732,28 +745,18 @@
if (!name) {
throw new TypeError("The bench name can't be empty");
}
- benchDef = { ...defaults, ...nameOrFnOrOptions, fn, name };
+ benchDesc = { ...defaults, ...nameOrFnOrOptions, fn, name };
}
+ benchDesc.origin = getBenchOrigin();
const AsyncFunction = (async () => {}).constructor;
- benchDef.async = AsyncFunction === benchDef.fn.constructor;
+ benchDesc.async = AsyncFunction === benchDesc.fn.constructor;
- ArrayPrototypePush(benches, benchDef);
- }
-
- function formatError(error) {
- if (ObjectPrototypeIsPrototypeOf(AggregateErrorPrototype, error)) {
- const message = error
- .errors
- .map((error) =>
- inspectArgs([error]).replace(/^(?!\s*$)/gm, " ".repeat(4))
- )
- .join("\n");
-
- return error.name + "\n" + message + error.stack;
- }
+ const { id, filteredOut } = core.opSync("op_register_bench", benchDesc);
+ benchDesc.id = id;
+ benchDesc.filteredOut = filteredOut;
- return inspectArgs([error]);
+ ArrayPrototypePush(benchDescs, benchDesc);
}
/**
@@ -848,7 +851,8 @@
};
}
- async function benchMeasure(timeBudget, fn, step, sync) {
+ async function benchMeasure(timeBudget, desc) {
+ const fn = desc.fn;
let n = 0;
let avg = 0;
let wavg = 0;
@@ -859,11 +863,10 @@
// warmup step
let c = 0;
- step.warmup = true;
let iterations = 20;
let budget = 10 * 1e6;
- if (sync) {
+ if (!desc.async) {
while (budget > 0 || iterations-- > 0) {
const t1 = benchNow();
@@ -890,13 +893,11 @@
wavg /= c;
// measure step
- step.warmup = false;
-
if (wavg > lowPrecisionThresholdInNs) {
let iterations = 10;
let budget = timeBudget * 1e6;
- if (sync) {
+ if (!desc.async) {
while (budget > 0 || iterations-- > 0) {
const t1 = benchNow();
@@ -906,7 +907,7 @@
n++;
avg += iterationTime;
budget -= iterationTime;
- all.push(iterationTime);
+ ArrayPrototypePush(all, iterationTime);
if (iterationTime < min) min = iterationTime;
if (iterationTime > max) max = iterationTime;
}
@@ -920,7 +921,7 @@
n++;
avg += iterationTime;
budget -= iterationTime;
- all.push(iterationTime);
+ ArrayPrototypePush(all, iterationTime);
if (iterationTime < min) min = iterationTime;
if (iterationTime > max) max = iterationTime;
}
@@ -929,7 +930,7 @@
let iterations = 10;
let budget = timeBudget * 1e6;
- if (sync) {
+ if (!desc.async) {
while (budget > 0 || iterations-- > 0) {
const t1 = benchNow();
for (let c = 0; c < lowPrecisionThresholdInNs; c++) fn();
@@ -937,7 +938,7 @@
n++;
avg += iterationTime;
- all.push(iterationTime);
+ ArrayPrototypePush(all, iterationTime);
if (iterationTime < min) min = iterationTime;
if (iterationTime > max) max = iterationTime;
budget -= iterationTime * lowPrecisionThresholdInNs;
@@ -962,21 +963,15 @@
return benchStats(n, wavg > lowPrecisionThresholdInNs, avg, min, max, all);
}
- async function runBench(bench) {
- const step = new BenchStep({
- name: bench.name,
- sanitizeExit: bench.sanitizeExit,
- warmup: false,
- });
-
+ async function runBench(desc) {
let token = null;
try {
- if (bench.permissions) {
- token = pledgePermissions(bench.permissions);
+ if (desc.permissions) {
+ token = pledgePermissions(desc.permissions);
}
- if (bench.sanitizeExit) {
+ if (desc.sanitizeExit) {
setExitHandler((exitCode) => {
assert(
false,
@@ -986,24 +981,31 @@
}
const benchTimeInMs = 500;
- const fn = bench.fn.bind(null, step);
- const stats = await benchMeasure(benchTimeInMs, fn, step, !bench.async);
+ const stats = await benchMeasure(benchTimeInMs, desc);
- return { ok: { stats, ...bench } };
+ return { ok: stats };
} catch (error) {
- return { failed: { ...bench, error: formatError(error) } };
+ return { failed: core.destructureError(error) };
} finally {
if (bench.sanitizeExit) setExitHandler(null);
if (token !== null) restorePermissions(token);
}
}
+ let origin = null;
+
function getTestOrigin() {
- return core.opSync("op_get_test_origin");
+ if (origin == null) {
+ origin = core.opSync("op_get_test_origin");
+ }
+ return origin;
}
function getBenchOrigin() {
- return core.opSync("op_get_bench_origin");
+ if (origin == null) {
+ origin = core.opSync("op_get_bench_origin");
+ }
+ return origin;
}
function reportTestPlan(plan) {
@@ -1036,30 +1038,6 @@
});
}
- function reportBenchPlan(plan) {
- core.opSync("op_dispatch_bench_event", {
- plan,
- });
- }
-
- function reportBenchConsoleOutput(console) {
- core.opSync("op_dispatch_bench_event", {
- output: { console },
- });
- }
-
- function reportBenchWait(description) {
- core.opSync("op_dispatch_bench_event", {
- wait: description,
- });
- }
-
- function reportBenchResult(origin, result) {
- core.opSync("op_dispatch_bench_event", {
- result: [origin, result],
- });
- }
-
function benchNow() {
return core.opSync("op_bench_now");
}
@@ -1120,50 +1098,52 @@
}
}
- async function runBenchmarks({
- filter = null,
- } = {}) {
+ async function runBenchmarks() {
core.setMacrotaskCallback(handleOpSanitizerDelayMacrotask);
const origin = getBenchOrigin();
const originalConsole = globalThis.console;
- globalThis.console = new Console(reportBenchConsoleOutput);
+ globalThis.console = new Console((s) => {
+ core.opSync("op_dispatch_bench_event", { output: s });
+ });
- const only = ArrayPrototypeFilter(benches, (bench) => bench.only);
+ const only = ArrayPrototypeFilter(benchDescs, (bench) => bench.only);
const filtered = ArrayPrototypeFilter(
- only.length > 0 ? only : benches,
- createTestFilter(filter),
+ only.length > 0 ? only : benchDescs,
+ (desc) => !desc.filteredOut && !desc.ignore,
);
let groups = new Set();
- const benchmarks = ArrayPrototypeFilter(filtered, (bench) => !bench.ignore);
-
// make sure ungrouped benchmarks are placed above grouped
groups.add(undefined);
- for (const bench of benchmarks) {
- bench.group ||= undefined;
- groups.add(bench.group);
+ for (const desc of filtered) {
+ desc.group ||= undefined;
+ groups.add(desc.group);
}
groups = ArrayFrom(groups);
ArrayPrototypeSort(
- benchmarks,
+ filtered,
(a, b) => groups.indexOf(a.group) - groups.indexOf(b.group),
);
- reportBenchPlan({
- origin,
- total: benchmarks.length,
- usedOnly: only.length > 0,
- names: ArrayPrototypeMap(benchmarks, (bench) => bench.name),
+ core.opSync("op_dispatch_bench_event", {
+ plan: {
+ origin,
+ total: filtered.length,
+ usedOnly: only.length > 0,
+ names: ArrayPrototypeMap(filtered, (desc) => desc.name),
+ },
});
- for (const bench of benchmarks) {
- bench.baseline = !!bench.baseline;
- reportBenchWait({ origin, ...bench });
- reportBenchResult(origin, await runBench(bench));
+ for (const desc of filtered) {
+ desc.baseline = !!desc.baseline;
+ core.opSync("op_dispatch_bench_event", { wait: desc.id });
+ core.opSync("op_dispatch_bench_event", {
+ result: [desc.id, await runBench(desc)],
+ });
}
globalThis.console = originalConsole;
@@ -1364,27 +1344,6 @@
}
}
- /**
- * @typedef {{
- * name: string;
- * sanitizeExit: boolean,
- * warmup: boolean,
- * }} BenchStepParams
- */
- class BenchStep {
- /** @type {BenchStepParams} */
- #params;
-
- /** @param params {BenchStepParams} */
- constructor(params) {
- this.#params = params;
- }
-
- get name() {
- return this.#params.name;
- }
- }
-
/** @param parentStep {TestStep} */
function createTestContext(parentStep) {
return {