diff options
author | Bartek IwaĆczuk <biwanczuk@gmail.com> | 2023-07-28 17:27:10 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-07-28 11:27:10 -0400 |
commit | a9951e360cf9937c84bb884ccfefbbd304b401e3 (patch) | |
tree | cea09f6b92137fa2ca5412b3ea069b8a971817f0 /cli/tools/bench/mod.rs | |
parent | 279030f2b8564ceed76c0ed08b14ea97e2258215 (diff) |
refactor(cli/tools): split bench into multiple modules (#19974)
I was asked to add "iter/s" to the benchmark output, before attempting
that I wanted to split this into multiple modules.
Diffstat (limited to 'cli/tools/bench/mod.rs')
-rw-r--r-- | cli/tools/bench/mod.rs | 506 |
1 files changed, 506 insertions, 0 deletions
diff --git a/cli/tools/bench/mod.rs b/cli/tools/bench/mod.rs new file mode 100644 index 000000000..34ce46bc3 --- /dev/null +++ b/cli/tools/bench/mod.rs @@ -0,0 +1,506 @@ +// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. + +use crate::args::BenchFlags; +use crate::args::CliOptions; +use crate::args::Flags; +use crate::colors; +use crate::display::write_json_to_stdout; +use crate::factory::CliFactory; +use crate::factory::CliFactoryBuilder; +use crate::graph_util::graph_valid_with_cli_options; +use crate::graph_util::has_graph_root_local_dependent_changed; +use crate::module_loader::ModuleLoadPreparer; +use crate::ops; +use crate::tools::test::format_test_error; +use crate::tools::test::TestFilter; +use crate::util::file_watcher; +use crate::util::fs::collect_specifiers; +use crate::util::path::is_supported_ext; +use crate::version::get_user_agent; +use crate::worker::CliMainWorkerFactory; + +use deno_core::error::generic_error; +use deno_core::error::AnyError; +use deno_core::error::JsError; +use deno_core::futures::future; +use deno_core::futures::stream; +use deno_core::futures::StreamExt; +use deno_core::located_script_name; +use deno_core::serde_v8; +use deno_core::task::spawn; +use deno_core::task::spawn_blocking; +use deno_core::v8; +use deno_core::ModuleSpecifier; +use deno_runtime::permissions::Permissions; +use deno_runtime::permissions::PermissionsContainer; +use deno_runtime::tokio_util::create_and_run_current_thread; +use indexmap::IndexMap; +use indexmap::IndexSet; +use log::Level; +use serde::Deserialize; +use serde::Serialize; +use std::collections::HashSet; +use std::path::Path; +use std::sync::Arc; +use tokio::sync::mpsc::unbounded_channel; +use tokio::sync::mpsc::UnboundedSender; + +mod mitata; +mod reporters; + +use reporters::BenchReporter; +use reporters::ConsoleReporter; +use reporters::JsonReporter; + +#[derive(Debug, Clone)] +struct BenchSpecifierOptions { + filter: TestFilter, + json: bool, + log_level: Option<log::Level>, +} + +#[derive(Debug, Clone, Eq, PartialEq, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BenchPlan { + pub total: usize, + pub origin: String, + pub used_only: bool, + pub names: Vec<String>, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum BenchEvent { + Plan(BenchPlan), + Output(String), + Register(BenchDescription), + Wait(usize), + Result(usize, BenchResult), +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub enum BenchResult { + Ok(BenchStats), + Failed(Box<JsError>), +} + +#[derive(Debug, Clone)] +pub struct BenchReport { + pub total: usize, + pub failed: usize, + pub failures: Vec<(BenchDescription, Box<JsError>)>, + pub measurements: Vec<(BenchDescription, BenchStats)>, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)] +pub struct BenchDescription { + pub id: usize, + pub name: String, + pub origin: String, + pub baseline: bool, + pub group: Option<String>, + pub ignore: bool, + pub only: bool, + pub warmup: bool, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BenchStats { + pub n: u64, + pub min: f64, + pub max: f64, + pub avg: f64, + pub p75: f64, + pub p99: f64, + pub p995: f64, + pub p999: f64, +} + +impl BenchReport { + pub fn new() -> Self { + Self { + total: 0, + failed: 0, + failures: Vec::new(), + measurements: Vec::new(), + } + } +} + +fn create_reporter( + show_output: bool, + json: bool, +) -> Box<dyn BenchReporter + Send> { + if json { + return Box::new(JsonReporter::new()); + } + Box::new(ConsoleReporter::new(show_output)) +} + +/// Type check a collection of module and document specifiers. +async fn check_specifiers( + cli_options: &CliOptions, + module_load_preparer: &ModuleLoadPreparer, + specifiers: Vec<ModuleSpecifier>, +) -> Result<(), AnyError> { + let lib = cli_options.ts_type_lib_window(); + module_load_preparer + .prepare_module_load( + specifiers, + false, + lib, + PermissionsContainer::allow_all(), + ) + .await?; + Ok(()) +} + +/// Run a single specifier as an executable bench module. +async fn bench_specifier( + worker_factory: Arc<CliMainWorkerFactory>, + permissions: Permissions, + specifier: ModuleSpecifier, + sender: UnboundedSender<BenchEvent>, + filter: TestFilter, +) -> Result<(), AnyError> { + let mut worker = worker_factory + .create_custom_worker( + specifier.clone(), + PermissionsContainer::new(permissions), + vec![ops::bench::deno_bench::init_ops(sender.clone())], + Default::default(), + ) + .await?; + + // We execute the main module as a side module so that import.meta.main is not set. + worker.execute_side_module_possibly_with_npm().await?; + + let mut worker = worker.into_main_worker(); + worker.dispatch_load_event(located_script_name!())?; + + let benchmarks = { + let state_rc = worker.js_runtime.op_state(); + let mut state = state_rc.borrow_mut(); + std::mem::take(&mut state.borrow_mut::<ops::bench::BenchContainer>().0) + }; + let (only, no_only): (Vec<_>, Vec<_>) = + benchmarks.into_iter().partition(|(d, _)| d.only); + let used_only = !only.is_empty(); + let benchmarks = if used_only { only } else { no_only }; + let mut benchmarks = benchmarks + .into_iter() + .filter(|(d, _)| filter.includes(&d.name) && !d.ignore) + .collect::<Vec<_>>(); + let mut groups = IndexSet::<Option<String>>::new(); + // make sure ungrouped benchmarks are placed above grouped + groups.insert(None); + for (desc, _) in &benchmarks { + groups.insert(desc.group.clone()); + } + benchmarks.sort_by(|(d1, _), (d2, _)| { + groups + .get_index_of(&d1.group) + .unwrap() + .partial_cmp(&groups.get_index_of(&d2.group).unwrap()) + .unwrap() + }); + sender.send(BenchEvent::Plan(BenchPlan { + origin: specifier.to_string(), + total: benchmarks.len(), + used_only, + names: benchmarks.iter().map(|(d, _)| d.name.clone()).collect(), + }))?; + for (desc, function) in benchmarks { + sender.send(BenchEvent::Wait(desc.id))?; + let result = worker.js_runtime.call_and_await(&function).await?; + let scope = &mut worker.js_runtime.handle_scope(); + let result = v8::Local::new(scope, result); + let result = serde_v8::from_v8::<BenchResult>(scope, result)?; + sender.send(BenchEvent::Result(desc.id, result))?; + } + + // Ignore `defaultPrevented` of the `beforeunload` event. We don't allow the + // event loop to continue beyond what's needed to await results. + worker.dispatch_beforeunload_event(located_script_name!())?; + worker.dispatch_unload_event(located_script_name!())?; + Ok(()) +} + +/// Test a collection of specifiers with test modes concurrently. +async fn bench_specifiers( + worker_factory: Arc<CliMainWorkerFactory>, + permissions: &Permissions, + specifiers: Vec<ModuleSpecifier>, + options: BenchSpecifierOptions, +) -> Result<(), AnyError> { + let (sender, mut receiver) = unbounded_channel::<BenchEvent>(); + let log_level = options.log_level; + let option_for_handles = options.clone(); + + let join_handles = specifiers.into_iter().map(move |specifier| { + let worker_factory = worker_factory.clone(); + let permissions = permissions.clone(); + let specifier = specifier; + let sender = sender.clone(); + let options = option_for_handles.clone(); + spawn_blocking(move || { + let future = bench_specifier( + worker_factory, + permissions, + specifier, + sender, + options.filter, + ); + create_and_run_current_thread(future) + }) + }); + + let join_stream = stream::iter(join_handles) + .buffer_unordered(1) + .collect::<Vec<Result<Result<(), AnyError>, tokio::task::JoinError>>>(); + + let handler = { + spawn(async move { + let mut used_only = false; + let mut report = BenchReport::new(); + let mut reporter = + create_reporter(log_level != Some(Level::Error), options.json); + let mut benches = IndexMap::new(); + + while let Some(event) = receiver.recv().await { + match event { + BenchEvent::Plan(plan) => { + report.total += plan.total; + if plan.used_only { + used_only = true; + } + + reporter.report_plan(&plan); + } + + BenchEvent::Register(desc) => { + reporter.report_register(&desc); + benches.insert(desc.id, desc); + } + + BenchEvent::Wait(id) => { + reporter.report_wait(benches.get(&id).unwrap()); + } + + BenchEvent::Output(output) => { + reporter.report_output(&output); + } + + BenchEvent::Result(id, result) => { + let desc = benches.get(&id).unwrap(); + reporter.report_result(desc, &result); + match result { + BenchResult::Ok(stats) => { + report.measurements.push((desc.clone(), stats)); + } + + BenchResult::Failed(failure) => { + report.failed += 1; + report.failures.push((desc.clone(), failure)); + } + }; + } + } + } + + reporter.report_end(&report); + + if used_only { + return Err(generic_error( + "Bench failed because the \"only\" option was used", + )); + } + + if report.failed > 0 { + return Err(generic_error("Bench failed")); + } + + Ok(()) + }) + }; + + let (join_results, result) = future::join(join_stream, handler).await; + + // propagate any errors + for join_result in join_results { + join_result??; + } + + result??; + + Ok(()) +} + +/// Checks if the path has a basename and extension Deno supports for benches. +fn is_supported_bench_path(path: &Path) -> bool { + if let Some(name) = path.file_stem() { + let basename = name.to_string_lossy(); + (basename.ends_with("_bench") + || basename.ends_with(".bench") + || basename == "bench") + && is_supported_ext(path) + } else { + false + } +} + +pub async fn run_benchmarks( + flags: Flags, + bench_flags: BenchFlags, +) -> Result<(), AnyError> { + let cli_options = CliOptions::from_flags(flags)?; + let bench_options = cli_options.resolve_bench_options(bench_flags)?; + let factory = CliFactory::from_cli_options(Arc::new(cli_options)); + let cli_options = factory.cli_options(); + // Various bench files should not share the same permissions in terms of + // `PermissionsContainer` - otherwise granting/revoking permissions in one + // file would have impact on other files, which is undesirable. + let permissions = + Permissions::from_options(&cli_options.permissions_options())?; + + let specifiers = + collect_specifiers(&bench_options.files, is_supported_bench_path)?; + + if specifiers.is_empty() { + return Err(generic_error("No bench modules found")); + } + + check_specifiers( + cli_options, + factory.module_load_preparer().await?, + specifiers.clone(), + ) + .await?; + + if bench_options.no_run { + return Ok(()); + } + + let log_level = cli_options.log_level(); + let worker_factory = + Arc::new(factory.create_cli_main_worker_factory().await?); + bench_specifiers( + worker_factory, + &permissions, + specifiers, + BenchSpecifierOptions { + filter: TestFilter::from_flag(&bench_options.filter), + json: bench_options.json, + log_level, + }, + ) + .await?; + + Ok(()) +} + +// TODO(bartlomieju): heavy duplication of code with `cli/tools/test.rs` +pub async fn run_benchmarks_with_watch( + flags: Flags, + bench_flags: BenchFlags, +) -> Result<(), AnyError> { + file_watcher::watch_func( + flags, + file_watcher::PrintConfig { + job_name: "Bench".to_string(), + clear_screen: bench_flags + .watch + .as_ref() + .map(|w| !w.no_clear_screen) + .unwrap_or(true), + }, + move |flags, sender, changed_paths| { + let bench_flags = bench_flags.clone(); + Ok(async move { + let factory = CliFactoryBuilder::new() + .with_watcher(sender.clone()) + .build_from_flags(flags) + .await?; + let cli_options = factory.cli_options(); + let bench_options = cli_options.resolve_bench_options(bench_flags)?; + + let _ = sender.send(cli_options.watch_paths()); + let _ = sender.send(bench_options.files.include.clone()); + + let graph_kind = cli_options.type_check_mode().as_graph_kind(); + let module_graph_builder = factory.module_graph_builder().await?; + let module_load_preparer = factory.module_load_preparer().await?; + + let bench_modules = + collect_specifiers(&bench_options.files, is_supported_bench_path)?; + + // Various bench files should not share the same permissions in terms of + // `PermissionsContainer` - otherwise granting/revoking permissions in one + // file would have impact on other files, which is undesirable. + let permissions = + Permissions::from_options(&cli_options.permissions_options())?; + + let graph = module_graph_builder + .create_graph(graph_kind, bench_modules.clone()) + .await?; + graph_valid_with_cli_options(&graph, &bench_modules, cli_options)?; + + let bench_modules_to_reload = if let Some(changed_paths) = changed_paths + { + let changed_specifiers = changed_paths + .into_iter() + .filter_map(|p| ModuleSpecifier::from_file_path(p).ok()) + .collect::<HashSet<_>>(); + let mut result = Vec::new(); + for bench_module_specifier in bench_modules { + if has_graph_root_local_dependent_changed( + &graph, + &bench_module_specifier, + &changed_specifiers, + ) { + result.push(bench_module_specifier.clone()); + } + } + result + } else { + bench_modules.clone() + }; + + let worker_factory = + Arc::new(factory.create_cli_main_worker_factory().await?); + + let specifiers = + collect_specifiers(&bench_options.files, is_supported_bench_path)? + .into_iter() + .filter(|specifier| bench_modules_to_reload.contains(specifier)) + .collect::<Vec<ModuleSpecifier>>(); + + check_specifiers(cli_options, module_load_preparer, specifiers.clone()) + .await?; + + if bench_options.no_run { + return Ok(()); + } + + let log_level = cli_options.log_level(); + bench_specifiers( + worker_factory, + &permissions, + specifiers, + BenchSpecifierOptions { + filter: TestFilter::from_flag(&bench_options.filter), + json: bench_options.json, + log_level, + }, + ) + .await?; + + Ok(()) + }) + }, + ) + .await?; + + Ok(()) +} |