From 66424032a2c78c6010c0a1a1b22a26d081166660 Mon Sep 17 00:00:00 2001 From: David Sherret Date: Mon, 19 Feb 2024 10:28:41 -0500 Subject: feat(unstable/lint): no-slow-types for JSR packages (#22430) 1. Renames zap/fast-check to instead be a `no-slow-types` lint rule. 1. This lint rule is automatically run when doing `deno lint` for packages (deno.json files with a name, version, and exports field) 1. This lint rules still occurs on publish. It can be skipped by running with `--no-slow-types` --- cli/Cargo.toml | 4 +- cli/args/flags.rs | 10 +- cli/graph_util.rs | 24 +- cli/lsp/diagnostics.rs | 6 +- cli/tools/lint.rs | 586 -------------------------- cli/tools/lint/mod.rs | 798 ++++++++++++++++++++++++++++++++++++ cli/tools/lint/no_slow_types.rs | 38 ++ cli/tools/registry/diagnostics.rs | 20 +- cli/tools/registry/graph.rs | 117 ------ cli/tools/registry/mod.rs | 195 ++++----- cli/tools/registry/publish_order.rs | 42 +- 11 files changed, 979 insertions(+), 861 deletions(-) delete mode 100644 cli/tools/lint.rs create mode 100644 cli/tools/lint/mod.rs create mode 100644 cli/tools/lint/no_slow_types.rs (limited to 'cli') diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 349967708..a7d6780e2 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -64,11 +64,11 @@ winres.workspace = true [dependencies] deno_ast = { workspace = true, features = ["bundler", "cjs", "codegen", "proposal", "react", "sourcemap", "transforms", "typescript", "view", "visit"] } deno_cache_dir = { workspace = true } -deno_config = "=0.9.2" +deno_config = "=0.10.0" deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] } deno_doc = { version = "=0.107.0", features = ["html"] } deno_emit = "=0.37.0" -deno_graph = "=0.66.0" +deno_graph = "=0.66.2" deno_lint = { version = "=0.56.0", features = ["docs"] } deno_lockfile.workspace = true deno_npm = "=0.17.0" diff --git a/cli/args/flags.rs b/cli/args/flags.rs index 3b6810a95..0ce521296 100644 --- a/cli/args/flags.rs +++ b/cli/args/flags.rs @@ -301,7 +301,7 @@ pub struct VendorFlags { pub struct PublishFlags { pub token: Option, pub dry_run: bool, - pub no_zap: bool, + pub allow_slow_types: bool, } #[derive(Clone, Debug, Eq, PartialEq)] @@ -2389,9 +2389,9 @@ fn publish_subcommand() -> Command { .action(ArgAction::SetTrue), ) .arg( - Arg::new("no-zap") - .long("no-zap") - .help("Skip Zap compatibility validation") + Arg::new("allow-slow-types") + .long("allow-slow-types") + .help("Allow publishing with slow types") .action(ArgAction::SetTrue), ) }) @@ -3828,7 +3828,7 @@ fn publish_parse(flags: &mut Flags, matches: &mut ArgMatches) { flags.subcommand = DenoSubcommand::Publish(PublishFlags { token: matches.remove_one("token"), dry_run: matches.get_flag("dry-run"), - no_zap: matches.get_flag("no-zap"), + allow_slow_types: matches.get_flag("allow-slow-types"), }); } diff --git a/cli/graph_util.rs b/cli/graph_util.rs index b9027afa3..380d82cbf 100644 --- a/cli/graph_util.rs +++ b/cli/graph_util.rs @@ -23,6 +23,7 @@ use crate::util::sync::TaskQueue; use crate::util::sync::TaskQueuePermit; use deno_config::ConfigFile; +use deno_config::WorkspaceMemberConfig; use deno_core::anyhow::bail; use deno_core::anyhow::Context; use deno_core::error::custom_error; @@ -277,7 +278,7 @@ impl ModuleGraphBuilder { graph_kind: GraphKind, roots: Vec, loader: &mut dyn Loader, - ) -> Result { + ) -> Result { self .create_graph_with_options(CreateGraphOptions { is_dynamic: false, @@ -289,10 +290,29 @@ impl ModuleGraphBuilder { .await } + pub async fn create_publish_graph( + &self, + packages: &[WorkspaceMemberConfig], + ) -> Result { + let mut roots = Vec::new(); + for package in packages { + roots.extend(package.config_file.resolve_export_value_urls()?); + } + self + .create_graph_with_options(CreateGraphOptions { + is_dynamic: false, + graph_kind: deno_graph::GraphKind::All, + roots, + workspace_fast_check: true, + loader: None, + }) + .await + } + pub async fn create_graph_with_options( &self, options: CreateGraphOptions<'_>, - ) -> Result { + ) -> Result { let mut graph = ModuleGraph::new(options.graph_kind); self diff --git a/cli/lsp/diagnostics.rs b/cli/lsp/diagnostics.rs index 0b3faa551..e3e206a52 100644 --- a/cli/lsp/diagnostics.rs +++ b/cli/lsp/diagnostics.rs @@ -796,7 +796,11 @@ fn generate_lint_diagnostics( let documents = snapshot .documents .documents(DocumentsFilter::OpenDiagnosable); - let lint_rules = get_configured_rules(lint_options.rules.clone()); + let lint_rules = get_configured_rules( + lint_options.rules.clone(), + config.config_file.as_ref(), + ) + .rules; let mut diagnostics_vec = Vec::new(); for document in documents { let settings = diff --git a/cli/tools/lint.rs b/cli/tools/lint.rs deleted file mode 100644 index 32b47e453..000000000 --- a/cli/tools/lint.rs +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. - -//! This module provides file linting utilities using -//! [`deno_lint`](https://github.com/denoland/deno_lint). -use crate::args::Flags; -use crate::args::LintFlags; -use crate::args::LintOptions; -use crate::args::LintReporterKind; -use crate::args::LintRulesConfig; -use crate::colors; -use crate::factory::CliFactory; -use crate::tools::fmt::run_parallelized; -use crate::util::file_watcher; -use crate::util::fs::canonicalize_path; -use crate::util::fs::specifier_from_file_path; -use crate::util::fs::FileCollector; -use crate::util::path::is_script_ext; -use crate::util::sync::AtomicFlag; -use deno_ast::diagnostics::Diagnostic; -use deno_ast::MediaType; -use deno_ast::ParsedSource; -use deno_config::glob::FilePatterns; -use deno_core::anyhow::bail; -use deno_core::error::generic_error; -use deno_core::error::AnyError; -use deno_core::serde_json; -use deno_lint::diagnostic::LintDiagnostic; -use deno_lint::linter::LintFileOptions; -use deno_lint::linter::Linter; -use deno_lint::linter::LinterBuilder; -use deno_lint::rules; -use deno_lint::rules::LintRule; -use log::debug; -use log::info; -use serde::Serialize; -use std::fs; -use std::io::stdin; -use std::io::Read; -use std::path::Path; -use std::path::PathBuf; -use std::sync::Arc; -use std::sync::Mutex; - -use crate::cache::IncrementalCache; - -static STDIN_FILE_NAME: &str = "$deno$stdin.ts"; - -fn create_reporter(kind: LintReporterKind) -> Box { - match kind { - LintReporterKind::Pretty => Box::new(PrettyLintReporter::new()), - LintReporterKind::Json => Box::new(JsonLintReporter::new()), - LintReporterKind::Compact => Box::new(CompactLintReporter::new()), - } -} - -pub async fn lint(flags: Flags, lint_flags: LintFlags) -> Result<(), AnyError> { - if let Some(watch_flags) = &lint_flags.watch { - if lint_flags.is_stdin() { - return Err(generic_error( - "Lint watch on standard input is not supported.", - )); - } - file_watcher::watch_func( - flags, - file_watcher::PrintConfig::new("Lint", !watch_flags.no_clear_screen), - move |flags, watcher_communicator, changed_paths| { - let lint_flags = lint_flags.clone(); - Ok(async move { - let factory = CliFactory::from_flags(flags).await?; - let cli_options = factory.cli_options(); - let lint_options = cli_options.resolve_lint_options(lint_flags)?; - let files = collect_lint_files(lint_options.files.clone()).and_then( - |files| { - if files.is_empty() { - Err(generic_error("No target files found.")) - } else { - Ok(files) - } - }, - )?; - _ = watcher_communicator.watch_paths(files.clone()); - - let lint_paths = if let Some(paths) = changed_paths { - // lint all files on any changed (https://github.com/denoland/deno/issues/12446) - files - .iter() - .any(|path| { - canonicalize_path(path) - .map(|p| paths.contains(&p)) - .unwrap_or(false) - }) - .then_some(files) - .unwrap_or_else(|| [].to_vec()) - } else { - files - }; - - lint_files(factory, lint_options, lint_paths).await?; - Ok(()) - }) - }, - ) - .await?; - } else { - let factory = CliFactory::from_flags(flags).await?; - let cli_options = factory.cli_options(); - let is_stdin = lint_flags.is_stdin(); - let lint_options = cli_options.resolve_lint_options(lint_flags)?; - let files = &lint_options.files; - let success = if is_stdin { - let reporter_kind = lint_options.reporter_kind; - let reporter_lock = Arc::new(Mutex::new(create_reporter(reporter_kind))); - let lint_rules = get_config_rules_err_empty(lint_options.rules)?; - let file_path = cli_options.initial_cwd().join(STDIN_FILE_NAME); - let r = lint_stdin(&file_path, lint_rules); - let success = handle_lint_result( - &file_path.to_string_lossy(), - r, - reporter_lock.clone(), - ); - reporter_lock.lock().unwrap().close(1); - success - } else { - let target_files = - collect_lint_files(files.clone()).and_then(|files| { - if files.is_empty() { - Err(generic_error("No target files found.")) - } else { - Ok(files) - } - })?; - debug!("Found {} files", target_files.len()); - lint_files(factory, lint_options, target_files).await? - }; - if !success { - std::process::exit(1); - } - } - - Ok(()) -} - -async fn lint_files( - factory: CliFactory, - lint_options: LintOptions, - paths: Vec, -) -> Result { - let caches = factory.caches()?; - let lint_rules = get_config_rules_err_empty(lint_options.rules)?; - let incremental_cache = Arc::new(IncrementalCache::new( - caches.lint_incremental_cache_db(), - // use a hash of the rule names in order to bust the cache - &{ - // ensure this is stable by sorting it - let mut names = lint_rules.iter().map(|r| r.code()).collect::>(); - names.sort_unstable(); - names - }, - &paths, - )); - let target_files_len = paths.len(); - let reporter_kind = lint_options.reporter_kind; - let reporter_lock = - Arc::new(Mutex::new(create_reporter(reporter_kind.clone()))); - let has_error = Arc::new(AtomicFlag::default()); - - run_parallelized(paths, { - let has_error = has_error.clone(); - let lint_rules = lint_rules.clone(); - let reporter_lock = reporter_lock.clone(); - let incremental_cache = incremental_cache.clone(); - move |file_path| { - let file_text = fs::read_to_string(&file_path)?; - - // don't bother rechecking this file if it didn't have any diagnostics before - if incremental_cache.is_file_same(&file_path, &file_text) { - return Ok(()); - } - - let r = lint_file(&file_path, file_text, lint_rules); - if let Ok((file_diagnostics, file_source)) = &r { - if file_diagnostics.is_empty() { - // update the incremental cache if there were no diagnostics - incremental_cache - .update_file(&file_path, file_source.text_info().text_str()) - } - } - - let success = handle_lint_result( - &file_path.to_string_lossy(), - r, - reporter_lock.clone(), - ); - if !success { - has_error.raise(); - } - - Ok(()) - } - }) - .await?; - incremental_cache.wait_completion().await; - reporter_lock.lock().unwrap().close(target_files_len); - - Ok(!has_error.is_raised()) -} - -fn collect_lint_files(files: FilePatterns) -> Result, AnyError> { - FileCollector::new(|path, _| is_script_ext(path)) - .ignore_git_folder() - .ignore_node_modules() - .ignore_vendor_folder() - .collect_file_patterns(files) -} - -pub fn print_rules_list(json: bool, maybe_rules_tags: Option>) { - let lint_rules = if maybe_rules_tags.is_none() { - rules::get_all_rules() - } else { - rules::get_filtered_rules(maybe_rules_tags, None, None) - }; - - if json { - let json_rules: Vec = lint_rules - .iter() - .map(|rule| { - serde_json::json!({ - "code": rule.code(), - "tags": rule.tags(), - "docs": rule.docs(), - }) - }) - .collect(); - let json_str = serde_json::to_string_pretty(&json_rules).unwrap(); - println!("{json_str}"); - } else { - // The rules should still be printed even if `--quiet` option is enabled, - // so use `println!` here instead of `info!`. - println!("Available rules:"); - for rule in lint_rules.iter() { - print!(" - {}", colors::cyan(rule.code())); - if rule.tags().is_empty() { - println!(); - } else { - println!(" [{}]", colors::gray(rule.tags().join(", "))) - } - println!( - "{}", - colors::gray(format!( - " help: https://lint.deno.land/#{}", - rule.code() - )) - ); - println!(); - } - } -} - -pub fn create_linter(rules: Vec<&'static dyn LintRule>) -> Linter { - LinterBuilder::default() - .ignore_file_directive("deno-lint-ignore-file") - .ignore_diagnostic_directive("deno-lint-ignore") - .rules(rules) - .build() -} - -fn lint_file( - file_path: &Path, - source_code: String, - lint_rules: Vec<&'static dyn LintRule>, -) -> Result<(Vec, ParsedSource), AnyError> { - let specifier = specifier_from_file_path(file_path)?; - let media_type = MediaType::from_specifier(&specifier); - - let linter = create_linter(lint_rules); - - let (source, file_diagnostics) = linter.lint_file(LintFileOptions { - specifier, - media_type, - source_code: source_code.clone(), - })?; - - Ok((file_diagnostics, source)) -} - -/// Lint stdin and write result to stdout. -/// Treats input as TypeScript. -/// Compatible with `--json` flag. -fn lint_stdin( - file_path: &Path, - lint_rules: Vec<&'static dyn LintRule>, -) -> Result<(Vec, ParsedSource), AnyError> { - let mut source_code = String::new(); - if stdin().read_to_string(&mut source_code).is_err() { - return Err(generic_error("Failed to read from stdin")); - } - - let linter = create_linter(lint_rules); - - let (source, file_diagnostics) = linter.lint_file(LintFileOptions { - specifier: specifier_from_file_path(file_path)?, - source_code: source_code.clone(), - media_type: MediaType::TypeScript, - })?; - - Ok((file_diagnostics, source)) -} - -fn handle_lint_result( - file_path: &str, - result: Result<(Vec, ParsedSource), AnyError>, - reporter_lock: Arc>>, -) -> bool { - let mut reporter = reporter_lock.lock().unwrap(); - - match result { - Ok((mut file_diagnostics, source)) => { - file_diagnostics.sort_by(|a, b| match a.specifier.cmp(&b.specifier) { - std::cmp::Ordering::Equal => a.range.start.cmp(&b.range.start), - file_order => file_order, - }); - for d in file_diagnostics.iter() { - reporter.visit_diagnostic(d, &source); - } - file_diagnostics.is_empty() - } - Err(err) => { - reporter.visit_error(file_path, &err); - false - } - } -} - -trait LintReporter { - fn visit_diagnostic(&mut self, d: &LintDiagnostic, source: &ParsedSource); - fn visit_error(&mut self, file_path: &str, err: &AnyError); - fn close(&mut self, check_count: usize); -} - -#[derive(Serialize)] -struct LintError { - file_path: String, - message: String, -} - -struct PrettyLintReporter { - lint_count: u32, -} - -impl PrettyLintReporter { - fn new() -> PrettyLintReporter { - PrettyLintReporter { lint_count: 0 } - } -} - -impl LintReporter for PrettyLintReporter { - fn visit_diagnostic(&mut self, d: &LintDiagnostic, _source: &ParsedSource) { - self.lint_count += 1; - - eprintln!("{}", d.display()); - } - - fn visit_error(&mut self, file_path: &str, err: &AnyError) { - eprintln!("Error linting: {file_path}"); - eprintln!(" {err}"); - } - - fn close(&mut self, check_count: usize) { - match self.lint_count { - 1 => info!("Found 1 problem"), - n if n > 1 => info!("Found {} problems", self.lint_count), - _ => (), - } - - match check_count { - n if n <= 1 => info!("Checked {} file", n), - n if n > 1 => info!("Checked {} files", n), - _ => unreachable!(), - } - } -} - -struct CompactLintReporter { - lint_count: u32, -} - -impl CompactLintReporter { - fn new() -> CompactLintReporter { - CompactLintReporter { lint_count: 0 } - } -} - -impl LintReporter for CompactLintReporter { - fn visit_diagnostic(&mut self, d: &LintDiagnostic, _source: &ParsedSource) { - self.lint_count += 1; - - let line_and_column = d.text_info.line_and_column_display(d.range.start); - eprintln!( - "{}: line {}, col {} - {} ({})", - d.specifier, - line_and_column.line_number, - line_and_column.column_number, - d.message, - d.code - ) - } - - fn visit_error(&mut self, file_path: &str, err: &AnyError) { - eprintln!("Error linting: {file_path}"); - eprintln!(" {err}"); - } - - fn close(&mut self, check_count: usize) { - match self.lint_count { - 1 => info!("Found 1 problem"), - n if n > 1 => info!("Found {} problems", self.lint_count), - _ => (), - } - - match check_count { - n if n <= 1 => info!("Checked {} file", n), - n if n > 1 => info!("Checked {} files", n), - _ => unreachable!(), - } - } -} - -// WARNING: Ensure doesn't change because it's used in the JSON output -#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct JsonDiagnosticLintPosition { - /// The 1-indexed line number. - pub line: usize, - /// The 0-indexed column index. - pub col: usize, - pub byte_pos: usize, -} - -impl JsonDiagnosticLintPosition { - pub fn new(byte_index: usize, loc: deno_ast::LineAndColumnIndex) -> Self { - JsonDiagnosticLintPosition { - line: loc.line_index + 1, - col: loc.column_index, - byte_pos: byte_index, - } - } -} - -// WARNING: Ensure doesn't change because it's used in the JSON output -#[derive(Debug, Clone, PartialEq, Eq, Serialize)] -struct JsonLintDiagnosticRange { - pub start: JsonDiagnosticLintPosition, - pub end: JsonDiagnosticLintPosition, -} - -// WARNING: Ensure doesn't change because it's used in the JSON output -#[derive(Clone, Serialize)] -struct JsonLintDiagnostic { - pub filename: String, - pub range: JsonLintDiagnosticRange, - pub message: String, - pub code: String, - pub hint: Option, -} - -#[derive(Serialize)] -struct JsonLintReporter { - diagnostics: Vec, - errors: Vec, -} - -impl JsonLintReporter { - fn new() -> JsonLintReporter { - JsonLintReporter { - diagnostics: Vec::new(), - errors: Vec::new(), - } - } -} - -impl LintReporter for JsonLintReporter { - fn visit_diagnostic(&mut self, d: &LintDiagnostic, _source: &ParsedSource) { - self.diagnostics.push(JsonLintDiagnostic { - filename: d.specifier.to_string(), - range: JsonLintDiagnosticRange { - start: JsonDiagnosticLintPosition::new( - d.range.start.as_byte_index(d.text_info.range().start), - d.text_info.line_and_column_index(d.range.start), - ), - end: JsonDiagnosticLintPosition::new( - d.range.end.as_byte_index(d.text_info.range().start), - d.text_info.line_and_column_index(d.range.end), - ), - }, - message: d.message.clone(), - code: d.code.clone(), - hint: d.hint.clone(), - }); - } - - fn visit_error(&mut self, file_path: &str, err: &AnyError) { - self.errors.push(LintError { - file_path: file_path.to_string(), - message: err.to_string(), - }); - } - - fn close(&mut self, _check_count: usize) { - sort_diagnostics(&mut self.diagnostics); - let json = serde_json::to_string_pretty(&self); - println!("{}", json.unwrap()); - } -} - -fn sort_diagnostics(diagnostics: &mut [JsonLintDiagnostic]) { - // Sort so that we guarantee a deterministic output which is useful for tests - diagnostics.sort_by(|a, b| { - use std::cmp::Ordering; - let file_order = a.filename.cmp(&b.filename); - match file_order { - Ordering::Equal => { - let line_order = a.range.start.line.cmp(&b.range.start.line); - match line_order { - Ordering::Equal => a.range.start.col.cmp(&b.range.start.col), - _ => line_order, - } - } - _ => file_order, - } - }); -} - -fn get_config_rules_err_empty( - rules: LintRulesConfig, -) -> Result, AnyError> { - let lint_rules = get_configured_rules(rules); - if lint_rules.is_empty() { - bail!("No rules have been configured") - } - Ok(lint_rules) -} - -pub fn get_configured_rules( - rules: LintRulesConfig, -) -> Vec<&'static dyn LintRule> { - if rules.tags.is_none() && rules.include.is_none() && rules.exclude.is_none() - { - rules::get_recommended_rules() - } else { - rules::get_filtered_rules( - rules.tags.or_else(|| Some(vec!["recommended".to_string()])), - rules.exclude, - rules.include, - ) - } -} - -#[cfg(test)] -mod test { - use deno_lint::rules::get_recommended_rules; - - use super::*; - use crate::args::LintRulesConfig; - - #[test] - fn recommended_rules_when_no_tags_in_config() { - let rules_config = LintRulesConfig { - exclude: Some(vec!["no-debugger".to_string()]), - include: None, - tags: None, - }; - let rules = get_configured_rules(rules_config); - let mut rule_names = rules - .into_iter() - .map(|r| r.code().to_string()) - .collect::>(); - rule_names.sort(); - let mut recommended_rule_names = get_recommended_rules() - .into_iter() - .map(|r| r.code().to_string()) - .filter(|n| n != "no-debugger") - .collect::>(); - recommended_rule_names.sort(); - assert_eq!(rule_names, recommended_rule_names); - } -} diff --git a/cli/tools/lint/mod.rs b/cli/tools/lint/mod.rs new file mode 100644 index 000000000..e4a88f91c --- /dev/null +++ b/cli/tools/lint/mod.rs @@ -0,0 +1,798 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +//! This module provides file linting utilities using +//! [`deno_lint`](https://github.com/denoland/deno_lint). +use deno_ast::diagnostics::Diagnostic; +use deno_ast::MediaType; +use deno_ast::ModuleSpecifier; +use deno_ast::ParsedSource; +use deno_ast::SourceRange; +use deno_ast::SourceTextInfo; +use deno_config::glob::FilePatterns; +use deno_core::anyhow::bail; +use deno_core::error::generic_error; +use deno_core::error::AnyError; +use deno_core::parking_lot::Mutex; +use deno_core::serde_json; +use deno_graph::FastCheckDiagnostic; +use deno_lint::diagnostic::LintDiagnostic; +use deno_lint::linter::LintFileOptions; +use deno_lint::linter::Linter; +use deno_lint::linter::LinterBuilder; +use deno_lint::rules; +use deno_lint::rules::LintRule; +use log::debug; +use log::info; +use serde::Serialize; +use std::borrow::Cow; +use std::collections::HashSet; +use std::fs; +use std::io::stdin; +use std::io::Read; +use std::path::Path; +use std::path::PathBuf; +use std::sync::Arc; + +use crate::args::Flags; +use crate::args::LintFlags; +use crate::args::LintOptions; +use crate::args::LintReporterKind; +use crate::args::LintRulesConfig; +use crate::cache::IncrementalCache; +use crate::colors; +use crate::factory::CliFactory; +use crate::tools::fmt::run_parallelized; +use crate::util::file_watcher; +use crate::util::fs::canonicalize_path; +use crate::util::fs::specifier_from_file_path; +use crate::util::fs::FileCollector; +use crate::util::path::is_script_ext; +use crate::util::sync::AtomicFlag; + +pub mod no_slow_types; + +static STDIN_FILE_NAME: &str = "$deno$stdin.ts"; + +fn create_reporter(kind: LintReporterKind) -> Box { + match kind { + LintReporterKind::Pretty => Box::new(PrettyLintReporter::new()), + LintReporterKind::Json => Box::new(JsonLintReporter::new()), + LintReporterKind::Compact => Box::new(CompactLintReporter::new()), + } +} + +pub async fn lint(flags: Flags, lint_flags: LintFlags) -> Result<(), AnyError> { + if let Some(watch_flags) = &lint_flags.watch { + if lint_flags.is_stdin() { + return Err(generic_error( + "Lint watch on standard input is not supported.", + )); + } + file_watcher::watch_func( + flags, + file_watcher::PrintConfig::new("Lint", !watch_flags.no_clear_screen), + move |flags, watcher_communicator, changed_paths| { + let lint_flags = lint_flags.clone(); + Ok(async move { + let factory = CliFactory::from_flags(flags).await?; + let cli_options = factory.cli_options(); + let lint_options = cli_options.resolve_lint_options(lint_flags)?; + let files = collect_lint_files(lint_options.files.clone()).and_then( + |files| { + if files.is_empty() { + Err(generic_error("No target files found.")) + } else { + Ok(files) + } + }, + )?; + _ = watcher_communicator.watch_paths(files.clone()); + + let lint_paths = if let Some(paths) = changed_paths { + // lint all files on any changed (https://github.com/denoland/deno/issues/12446) + files + .iter() + .any(|path| { + canonicalize_path(path) + .map(|p| paths.contains(&p)) + .unwrap_or(false) + }) + .then_some(files) + .unwrap_or_else(|| [].to_vec()) + } else { + files + }; + + lint_files(factory, lint_options, lint_paths).await?; + Ok(()) + }) + }, + ) + .await?; + } else { + let factory = CliFactory::from_flags(flags).await?; + let cli_options = factory.cli_options(); + let is_stdin = lint_flags.is_stdin(); + let lint_options = cli_options.resolve_lint_options(lint_flags)?; + let files = &lint_options.files; + let success = if is_stdin { + let reporter_kind = lint_options.reporter_kind; + let reporter_lock = Arc::new(Mutex::new(create_reporter(reporter_kind))); + let lint_rules = get_config_rules_err_empty( + lint_options.rules, + cli_options.maybe_config_file().as_ref(), + )?; + let file_path = cli_options.initial_cwd().join(STDIN_FILE_NAME); + let r = lint_stdin(&file_path, lint_rules.rules); + let success = handle_lint_result( + &file_path.to_string_lossy(), + r, + reporter_lock.clone(), + ); + reporter_lock.lock().close(1); + success + } else { + let target_files = + collect_lint_files(files.clone()).and_then(|files| { + if files.is_empty() { + Err(generic_error("No target files found.")) + } else { + Ok(files) + } + })?; + debug!("Found {} files", target_files.len()); + lint_files(factory, lint_options, target_files).await? + }; + if !success { + std::process::exit(1); + } + } + + Ok(()) +} + +async fn lint_files( + factory: CliFactory, + lint_options: LintOptions, + paths: Vec, +) -> Result { + let caches = factory.caches()?; + let maybe_config_file = factory.cli_options().maybe_config_file().as_ref(); + let lint_rules = + get_config_rules_err_empty(lint_options.rules, maybe_config_file)?; + let incremental_cache = Arc::new(IncrementalCache::new( + caches.lint_incremental_cache_db(), + &lint_rules.incremental_cache_state(), + &paths, + )); + let target_files_len = paths.len(); + let reporter_kind = lint_options.reporter_kind; + // todo(dsherret): abstract away this lock behind a performant interface + let reporter_lock = + Arc::new(Mutex::new(create_reporter(reporter_kind.clone()))); + let has_error = Arc::new(AtomicFlag::default()); + + let mut futures = Vec::with_capacity(2); + if lint_rules.no_slow_types { + if let Some(config_file) = maybe_config_file { + let members = config_file.to_workspace_members()?; + let has_error = has_error.clone(); + let reporter_lock = reporter_lock.clone(); + let module_graph_builder = factory.module_graph_builder().await?.clone(); + let path_urls = paths + .iter() + .filter_map(|p| ModuleSpecifier::from_file_path(p).ok()) + .collect::>(); + futures.push(deno_core::unsync::spawn(async move { + let graph = module_graph_builder.create_publish_graph(&members).await?; + // todo(dsherret): this isn't exactly correct as linting isn't properly + // setup to handle workspaces. Iterating over the workspace members + // should be done at a higher level because it also needs to take into + // account the config per workspace member. + for member in &members { + let export_urls = member.config_file.resolve_export_value_urls()?; + if !export_urls.iter().any(|url| path_urls.contains(url)) { + continue; // entrypoint is not specified, so skip + } + let diagnostics = no_slow_types::collect_no_slow_type_diagnostics( + &export_urls, + &graph, + ); + if !diagnostics.is_empty() { + has_error.raise(); + let mut reporter = reporter_lock.lock(); + for diagnostic in &diagnostics { + reporter + .visit_diagnostic(LintOrCliDiagnostic::FastCheck(diagnostic)); + } + } + } + Ok(()) + })); + } + } + + futures.push({ + let has_error = has_error.clone(); + let lint_rules = lint_rules.rules.clone(); + let reporter_lock = reporter_lock.clone(); + let incremental_cache = incremental_cache.clone(); + deno_core::unsync::spawn(async move { + run_parallelized(paths, { + move |file_path| { + let file_text = fs::read_to_string(&file_path)?; + + // don't bother rechecking this file if it didn't have any diagnostics before + if incremental_cache.is_file_same(&file_path, &file_text) { + return Ok(()); + } + + let r = lint_file(&file_path, file_text, lint_rules); + if let Ok((file_diagnostics, file_source)) = &r { + if file_diagnostics.is_empty() { + // update the incremental cache if there were no diagnostics + incremental_cache + .update_file(&file_path, file_source.text_info().text_str()) + } + } + + let success = handle_lint_result( + &file_path.to_string_lossy(), + r, + reporter_lock.clone(), + ); + if !success { + has_error.raise(); + } + + Ok(()) + } + }) + .await + }) + }); + + deno_core::futures::future::try_join_all(futures).await?; + + incremental_cache.wait_completion().await; + reporter_lock.lock().close(target_files_len); + + Ok(!has_error.is_raised()) +} + +fn collect_lint_files(files: FilePatterns) -> Result, AnyError> { + FileCollector::new(|path, _| is_script_ext(path)) + .ignore_git_folder() + .ignore_node_modules() + .ignore_vendor_folder() + .collect_file_patterns(files) +} + +pub fn print_rules_list(json: bool, maybe_rules_tags: Option>) { + let lint_rules = if maybe_rules_tags.is_none() { + rules::get_all_rules() + } else { + rules::get_filtered_rules(maybe_rules_tags, None, None) + }; + + if json { + let json_rules: Vec = lint_rules + .iter() + .map(|rule| { + serde_json::json!({ + "code": rule.code(), + "tags": rule.tags(), + "docs": rule.docs(), + }) + }) + .collect(); + let json_str = serde_json::to_string_pretty(&json_rules).unwrap(); + println!("{json_str}"); + } else { + // The rules should still be printed even if `--quiet` option is enabled, + // so use `println!` here instead of `info!`. + println!("Available rules:"); + for rule in lint_rules.iter() { + print!(" - {}", colors::cyan(rule.code())); + if rule.tags().is_empty() { + println!(); + } else { + println!(" [{}]", colors::gray(rule.tags().join(", "))) + } + println!( + "{}", + colors::gray(format!( + " help: https://lint.deno.land/#{}", + rule.code() + )) + ); + println!(); + } + } +} + +pub fn create_linter(rules: Vec<&'static dyn LintRule>) -> Linter { + LinterBuilder::default() + .ignore_file_directive("deno-lint-ignore-file") + .ignore_diagnostic_directive("deno-lint-ignore") + .rules(rules) + .build() +} + +fn lint_file( + file_path: &Path, + source_code: String, + lint_rules: Vec<&'static dyn LintRule>, +) -> Result<(Vec, ParsedSource), AnyError> { + let specifier = specifier_from_file_path(file_path)?; + let media_type = MediaType::from_specifier(&specifier); + + let linter = create_linter(lint_rules); + + let (source, file_diagnostics) = linter.lint_file(LintFileOptions { + specifier, + media_type, + source_code: source_code.clone(), + })?; + + Ok((file_diagnostics, source)) +} + +/// Lint stdin and write result to stdout. +/// Treats input as TypeScript. +/// Compatible with `--json` flag. +fn lint_stdin( + file_path: &Path, + lint_rules: Vec<&'static dyn LintRule>, +) -> Result<(Vec, ParsedSource), AnyError> { + let mut source_code = String::new(); + if stdin().read_to_string(&mut source_code).is_err() { + return Err(generic_error("Failed to read from stdin")); + } + + let linter = create_linter(lint_rules); + + let (source, file_diagnostics) = linter.lint_file(LintFileOptions { + specifier: specifier_from_file_path(file_path)?, + source_code: source_code.clone(), + media_type: MediaType::TypeScript, + })?; + + Ok((file_diagnostics, source)) +} + +fn handle_lint_result( + file_path: &str, + result: Result<(Vec, ParsedSource), AnyError>, + reporter_lock: Arc>>, +) -> bool { + let mut reporter = reporter_lock.lock(); + + match result { + Ok((mut file_diagnostics, _source)) => { + file_diagnostics.sort_by(|a, b| match a.specifier.cmp(&b.specifier) { + std::cmp::Ordering::Equal => a.range.start.cmp(&b.range.start), + file_order => file_order, + }); + for d in &file_diagnostics { + reporter.visit_diagnostic(LintOrCliDiagnostic::Lint(d)); + } + file_diagnostics.is_empty() + } + Err(err) => { + reporter.visit_error(file_path, &err); + false + } + } +} + +#[derive(Clone, Copy)] +pub enum LintOrCliDiagnostic<'a> { + Lint(&'a LintDiagnostic), + FastCheck(&'a FastCheckDiagnostic), +} + +impl<'a> LintOrCliDiagnostic<'a> { + pub fn specifier(&self) -> &ModuleSpecifier { + match self { + LintOrCliDiagnostic::Lint(d) => &d.specifier, + LintOrCliDiagnostic::FastCheck(d) => d.specifier(), + } + } + + pub fn range(&self) -> Option<(&SourceTextInfo, SourceRange)> { + match self { + LintOrCliDiagnostic::Lint(d) => Some((&d.text_info, d.range)), + LintOrCliDiagnostic::FastCheck(d) => { + d.range().map(|r| (&r.text_info, r.range)) + } + } + } +} + +impl<'a> deno_ast::diagnostics::Diagnostic for LintOrCliDiagnostic<'a> { + fn level(&self) -> deno_ast::diagnostics::DiagnosticLevel { + match self { + LintOrCliDiagnostic::Lint(d) => d.level(), + LintOrCliDiagnostic::FastCheck(d) => d.level(), + } + } + + fn code(&self) -> Cow<'_, str> { + match self { + LintOrCliDiagnostic::Lint(d) => d.code(), + LintOrCliDiagnostic::FastCheck(_) => Cow::Borrowed("no-slow-types"), + } + } + + fn message(&self) -> Cow<'_, str> { + match self { + LintOrCliDiagnostic::Lint(d) => d.message(), + LintOrCliDiagnostic::FastCheck(d) => d.message(), + } + } + + fn location(&self) -> deno_ast::diagnostics::DiagnosticLocation { + match self { + LintOrCliDiagnostic::Lint(d) => d.location(), + LintOrCliDiagnostic::FastCheck(d) => d.location(), + } + } + + fn snippet(&self) -> Option> { + match self { + LintOrCliDiagnostic::Lint(d) => d.snippet(), + LintOrCliDiagnostic::FastCheck(d) => d.snippet(), + } + } + + fn hint(&self) -> Option> { + match self { + LintOrCliDiagnostic::Lint(d) => d.hint(), + LintOrCliDiagnostic::FastCheck(d) => d.hint(), + } + } + + fn snippet_fixed( + &self, + ) -> Option> { + match self { + LintOrCliDiagnostic::Lint(d) => d.snippet_fixed(), + LintOrCliDiagnostic::FastCheck(d) => d.snippet_fixed(), + } + } + + fn info(&self) -> Cow<'_, [Cow<'_, str>]> { + match self { + LintOrCliDiagnostic::Lint(d) => d.info(), + LintOrCliDiagnostic::FastCheck(d) => d.info(), + } + } + + fn docs_url(&self) -> Option> { + match self { + LintOrCliDiagnostic::Lint(d) => d.docs_url(), + LintOrCliDiagnostic::FastCheck(d) => d.docs_url(), + } + } +} + +trait LintReporter { + fn visit_diagnostic(&mut self, d: LintOrCliDiagnostic); + fn visit_error(&mut self, file_path: &str, err: &AnyError); + fn close(&mut self, check_count: usize); +} + +#[derive(Serialize)] +struct LintError { + file_path: String, + message: String, +} + +struct PrettyLintReporter { + lint_count: u32, +} + +impl PrettyLintReporter { + fn new() -> PrettyLintReporter { + PrettyLintReporter { lint_count: 0 } + } +} + +impl LintReporter for PrettyLintReporter { + fn visit_diagnostic(&mut self, d: LintOrCliDiagnostic) { + self.lint_count += 1; + + eprintln!("{}", d.display()); + } + + fn visit_error(&mut self, file_path: &str, err: &AnyError) { + eprintln!("Error linting: {file_path}"); + eprintln!(" {err}"); + } + + fn close(&mut self, check_count: usize) { + match self.lint_count { + 1 => info!("Found 1 problem"), + n if n > 1 => info!("Found {} problems", self.lint_count), + _ => (), + } + + match check_count { + n if n <= 1 => info!("Checked {} file", n), + n if n > 1 => info!("Checked {} files", n), + _ => unreachable!(), + } + } +} + +struct CompactLintReporter { + lint_count: u32, +} + +impl CompactLintReporter { + fn new() -> CompactLintReporter { + CompactLintReporter { lint_count: 0 } + } +} + +impl LintReporter for CompactLintReporter { + fn visit_diagnostic(&mut self, d: LintOrCliDiagnostic) { + self.lint_count += 1; + + match d.range() { + Some((text_info, range)) => { + let line_and_column = text_info.line_and_column_display(range.start); + eprintln!( + "{}: line {}, col {} - {} ({})", + d.specifier(), + line_and_column.line_number, + line_and_column.column_number, + d.message(), + d.code(), + ) + } + None => { + eprintln!("{}: {} ({})", d.specifier(), d.message(), d.code()) + } + } + } + + fn visit_error(&mut self, file_path: &str, err: &AnyError) { + eprintln!("Error linting: {file_path}"); + eprintln!(" {err}"); + } + + fn close(&mut self, check_count: usize) { + match self.lint_count { + 1 => info!("Found 1 problem"), + n if n > 1 => info!("Found {} problems", self.lint_count), + _ => (), + } + + match check_count { + n if n <= 1 => info!("Checked {} file", n), + n if n > 1 => info!("Checked {} files", n), + _ => unreachable!(), + } + } +} + +// WARNING: Ensure doesn't change because it's used in the JSON output +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct JsonDiagnosticLintPosition { + /// The 1-indexed line number. + pub line: usize, + /// The 0-indexed column index. + pub col: usize, + pub byte_pos: usize, +} + +impl JsonDiagnosticLintPosition { + pub fn new(byte_index: usize, loc: deno_ast::LineAndColumnIndex) -> Self { + JsonDiagnosticLintPosition { + line: loc.line_index + 1, + col: loc.column_index, + byte_pos: byte_index, + } + } +} + +// WARNING: Ensure doesn't change because it's used in the JSON output +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +struct JsonLintDiagnosticRange { + pub start: JsonDiagnosticLintPosition, + pub end: JsonDiagnosticLintPosition, +} + +// WARNING: Ensure doesn't change because it's used in the JSON output +#[derive(Clone, Serialize)] +struct JsonLintDiagnostic { + pub filename: String, + pub range: Option, + pub message: String, + pub code: String, + pub hint: Option, +} + +#[derive(Serialize)] +struct JsonLintReporter { + diagnostics: Vec, + errors: Vec, +} + +impl JsonLintReporter { + fn new() -> JsonLintReporter { + JsonLintReporter { + diagnostics: Vec::new(), + errors: Vec::new(), + } + } +} + +impl LintReporter for JsonLintReporter { + fn visit_diagnostic(&mut self, d: LintOrCliDiagnostic) { + self.diagnostics.push(JsonLintDiagnostic { + filename: d.specifier().to_string(), + range: d.range().map(|(text_info, range)| JsonLintDiagnosticRange { + start: JsonDiagnosticLintPosition::new( + range.start.as_byte_index(text_info.range().start), + text_info.line_and_column_index(range.start), + ), + end: JsonDiagnosticLintPosition::new( + range.end.as_byte_index(text_info.range().start), + text_info.line_and_column_index(range.end), + ), + }), + message: d.message().to_string(), + code: d.code().to_string(), + hint: d.hint().map(|h| h.to_string()), + }); + } + + fn visit_error(&mut self, file_path: &str, err: &AnyError) { + self.errors.push(LintError { + file_path: file_path.to_string(), + message: err.to_string(), + }); + } + + fn close(&mut self, _check_count: usize) { + sort_diagnostics(&mut self.diagnostics); + let json = serde_json::to_string_pretty(&self); + println!("{}", json.unwrap()); + } +} + +fn sort_diagnostics(diagnostics: &mut [JsonLintDiagnostic]) { + // Sort so that we guarantee a deterministic output which is useful for tests + diagnostics.sort_by(|a, b| { + use std::cmp::Ordering; + let file_order = a.filename.cmp(&b.filename); + match file_order { + Ordering::Equal => match &a.range { + Some(a_range) => match &b.range { + Some(b_range) => { + let line_order = a_range.start.line.cmp(&b_range.start.line); + match line_order { + Ordering::Equal => a_range.start.col.cmp(&b_range.start.col), + _ => line_order, + } + } + None => Ordering::Less, + }, + None => match &b.range { + Some(_) => Ordering::Greater, + None => Ordering::Equal, + }, + }, + _ => file_order, + } + }); +} + +fn get_config_rules_err_empty( + rules: LintRulesConfig, + maybe_config_file: Option<&deno_config::ConfigFile>, +) -> Result { + let lint_rules = get_configured_rules(rules, maybe_config_file); + if lint_rules.rules.is_empty() { + bail!("No rules have been configured") + } + Ok(lint_rules) +} + +#[derive(Debug, Clone)] +pub struct ConfiguredRules { + pub rules: Vec<&'static dyn LintRule>, + // cli specific rules + pub no_slow_types: bool, +} + +impl ConfiguredRules { + fn incremental_cache_state(&self) -> Vec<&str> { + // use a hash of the rule names in order to bust the cache + let mut names = self.rules.iter().map(|r| r.code()).collect::>(); + // ensure this is stable by sorting it + names.sort_unstable(); + if self.no_slow_types { + names.push("no-slow-types"); + } + names + } +} + +pub fn get_configured_rules( + rules: LintRulesConfig, + maybe_config_file: Option<&deno_config::ConfigFile>, +) -> ConfiguredRules { + const NO_SLOW_TYPES_NAME: &str = "no-slow-types"; + let implicit_no_slow_types = maybe_config_file + .map(|c| c.is_package() || !c.json.workspaces.is_empty()) + .unwrap_or(false); + if rules.tags.is_none() && rules.include.is_none() && rules.exclude.is_none() + { + ConfiguredRules { + rules: rules::get_recommended_rules(), + no_slow_types: implicit_no_slow_types, + } + } else { + let no_slow_types = implicit_no_slow_types + && !rules + .exclude + .as_ref() + .map(|exclude| exclude.iter().any(|i| i == NO_SLOW_TYPES_NAME)) + .unwrap_or(false); + let rules = rules::get_filtered_rules( + rules.tags.or_else(|| Some(vec!["recommended".to_string()])), + rules.exclude.map(|exclude| { + exclude + .into_iter() + .filter(|c| c != NO_SLOW_TYPES_NAME) + .collect() + }), + rules.include.map(|include| { + include + .into_iter() + .filter(|c| c != NO_SLOW_TYPES_NAME) + .collect() + }), + ); + ConfiguredRules { + rules, + no_slow_types, + } + } +} + +#[cfg(test)] +mod test { + use deno_lint::rules::get_recommended_rules; + + use super::*; + use crate::args::LintRulesConfig; + + #[test] + fn recommended_rules_when_no_tags_in_config() { + let rules_config = LintRulesConfig { + exclude: Some(vec!["no-debugger".to_string()]), + include: None, + tags: None, + }; + let rules = get_configured_rules(rules_config, None); + let mut rule_names = rules + .rules + .into_iter() + .map(|r| r.code().to_string()) + .collect::>(); + rule_names.sort(); + let mut recommended_rule_names = get_recommended_rules() + .into_iter() + .map(|r| r.code().to_string()) + .filter(|n| n != "no-debugger") + .collect::>(); + recommended_rule_names.sort(); + assert_eq!(rule_names, recommended_rule_names); + } +} diff --git a/cli/tools/lint/no_slow_types.rs b/cli/tools/lint/no_slow_types.rs new file mode 100644 index 000000000..6bb108a88 --- /dev/null +++ b/cli/tools/lint/no_slow_types.rs @@ -0,0 +1,38 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use deno_ast::diagnostics::Diagnostic; +use deno_ast::ModuleSpecifier; +use deno_graph::FastCheckDiagnostic; +use deno_graph::ModuleGraph; + +/// Collects diagnostics from the module graph for the +/// given package's export URLs. +pub fn collect_no_slow_type_diagnostics( + package_export_urls: &[ModuleSpecifier], + graph: &ModuleGraph, +) -> Vec { + let mut js_exports = package_export_urls + .iter() + .filter_map(|url| graph.get(url).and_then(|m| m.js())); + // fast check puts the same diagnostics in each entrypoint for the + // package (since it's all or nothing), so we only need to check + // the first one JS entrypoint + let Some(module) = js_exports.next() else { + // could happen if all the exports are JSON + return vec![]; + }; + + if let Some(diagnostics) = module.fast_check_diagnostics() { + let mut diagnostics = diagnostics.clone(); + diagnostics.sort_by_cached_key(|d| { + ( + d.specifier().clone(), + d.range().map(|r| r.range), + d.code().to_string(), + ) + }); + diagnostics + } else { + Vec::new() + } +} diff --git a/cli/tools/registry/diagnostics.rs b/cli/tools/registry/diagnostics.rs index aeb5d61e2..b605c293b 100644 --- a/cli/tools/registry/diagnostics.rs +++ b/cli/tools/registry/diagnostics.rs @@ -30,7 +30,7 @@ pub struct PublishDiagnosticsCollector { impl PublishDiagnosticsCollector { pub fn print_and_error(&self) -> Result<(), AnyError> { let mut errors = 0; - let mut has_zap_errors = false; + let mut has_slow_types_errors = false; let diagnostics = self.diagnostics.lock().unwrap().take(); for diagnostic in diagnostics { eprint!("{}", diagnostic.display()); @@ -38,17 +38,23 @@ impl PublishDiagnosticsCollector { errors += 1; } if matches!(diagnostic, PublishDiagnostic::FastCheck(..)) { - has_zap_errors = true; + has_slow_types_errors = true; } } if errors > 0 { - if has_zap_errors { + if has_slow_types_errors { eprintln!( - "This package contains Zap errors. Although conforming to Zap will" + "This package contains errors for slow types. Fixing these errors will:\n" ); - eprintln!("significantly improve the type checking performance of your library,"); - eprintln!("you can choose to skip it by providing the --no-zap flag."); - eprintln!(); + eprintln!( + " 1. Significantly improve your package users' type checking performance." + ); + eprintln!(" 2. Improve the automatic documentation generation."); + eprintln!(" 3. Enable automatic .d.ts generation for Node.js."); + eprintln!( + "\nDon't want to bother? You can choose to skip this step by" + ); + eprintln!("providing the --allow-slow-types flag.\n"); } Err(anyhow!( diff --git a/cli/tools/registry/graph.rs b/cli/tools/registry/graph.rs index 3445d55e7..0310a97c6 100644 --- a/cli/tools/registry/graph.rs +++ b/cli/tools/registry/graph.rs @@ -1,17 +1,9 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use std::collections::HashSet; -use std::collections::VecDeque; use std::sync::Arc; -use deno_ast::ModuleSpecifier; use deno_ast::SourceTextInfo; -use deno_config::ConfigFile; -use deno_config::WorkspaceConfig; -use deno_core::anyhow::bail; -use deno_core::anyhow::Context; -use deno_core::error::AnyError; -use deno_graph::FastCheckDiagnostic; use deno_graph::ModuleEntryRef; use deno_graph::ModuleGraph; use deno_graph::ResolutionResolved; @@ -21,55 +13,6 @@ use lsp_types::Url; use super::diagnostics::PublishDiagnostic; use super::diagnostics::PublishDiagnosticsCollector; -#[derive(Debug)] -pub struct MemberRoots { - pub name: String, - pub dir_url: ModuleSpecifier, - pub exports: Vec, -} - -pub fn get_workspace_member_roots( - config: &WorkspaceConfig, -) -> Result, AnyError> { - let mut members = Vec::with_capacity(config.members.len()); - let mut seen_names = HashSet::with_capacity(config.members.len()); - for member in &config.members { - if !seen_names.insert(&member.package_name) { - bail!( - "Cannot have two workspace packages with the same name ('{}' at {})", - member.package_name, - member.path.display(), - ); - } - members.push(MemberRoots { - name: member.package_name.clone(), - dir_url: member.config_file.specifier.join("./").unwrap().clone(), - exports: resolve_config_file_roots_from_exports(&member.config_file)?, - }); - } - Ok(members) -} - -pub fn resolve_config_file_roots_from_exports( - config_file: &ConfigFile, -) -> Result, AnyError> { - let exports_config = config_file - .to_exports_config() - .with_context(|| { - format!("Failed to parse exports at {}", config_file.specifier) - })? - .into_map(); - let mut exports = Vec::with_capacity(exports_config.len()); - for (_, value) in exports_config { - let entry_point = - config_file.specifier.join(&value).with_context(|| { - format!("Failed to join {} with {}", config_file.specifier, value) - })?; - exports.push(entry_point); - } - Ok(exports) -} - pub fn collect_invalid_external_imports( graph: &ModuleGraph, diagnostics_collector: &PublishDiagnosticsCollector, @@ -142,63 +85,3 @@ pub fn collect_invalid_external_imports( } } } - -/// Collects diagnostics from the module graph for the given packages. -/// Returns true if any diagnostics were collected. -pub fn collect_fast_check_type_graph_diagnostics( - graph: &ModuleGraph, - packages: &[MemberRoots], - diagnostics_collector: &PublishDiagnosticsCollector, -) -> bool { - let mut had_diagnostic = false; - let mut seen_modules = HashSet::with_capacity(graph.specifiers_count()); - for package in packages { - let mut pending = VecDeque::new(); - for export in &package.exports { - if seen_modules.insert(export.clone()) { - pending.push_back(export.clone()); - } - } - - 'analyze_package: while let Some(specifier) = pending.pop_front() { - let Ok(Some(module)) = graph.try_get_prefer_types(&specifier) else { - continue; - }; - let Some(es_module) = module.js() else { - continue; - }; - if let Some(diagnostics) = es_module.fast_check_diagnostics() { - for diagnostic in diagnostics { - had_diagnostic = true; - diagnostics_collector - .push(PublishDiagnostic::FastCheck(diagnostic.clone())); - if matches!( - diagnostic, - FastCheckDiagnostic::UnsupportedJavaScriptEntrypoint { .. } - ) { - break 'analyze_package; // no need to keep analyzing this package - } - } - } - - // analyze the next dependencies - for dep in es_module.dependencies_prefer_fast_check().values() { - let Some(specifier) = graph.resolve_dependency_from_dep(dep, true) - else { - continue; - }; - - let dep_in_same_package = - specifier.as_str().starts_with(package.dir_url.as_str()); - if dep_in_same_package { - let is_new = seen_modules.insert(specifier.clone()); - if is_new { - pending.push_back(specifier.clone()); - } - } - } - } - } - - had_diagnostic -} diff --git a/cli/tools/registry/mod.rs b/cli/tools/registry/mod.rs index 9205d9b26..586115f27 100644 --- a/cli/tools/registry/mod.rs +++ b/cli/tools/registry/mod.rs @@ -8,6 +8,7 @@ use std::sync::Arc; use base64::prelude::BASE64_STANDARD; use base64::Engine; use deno_config::ConfigFile; +use deno_config::WorkspaceMemberConfig; use deno_core::anyhow::bail; use deno_core::anyhow::Context; use deno_core::error::AnyError; @@ -33,12 +34,10 @@ use crate::factory::CliFactory; use crate::graph_util::ModuleGraphBuilder; use crate::http_util::HttpClient; use crate::tools::check::CheckOptions; +use crate::tools::lint::no_slow_types; +use crate::tools::registry::diagnostics::PublishDiagnostic; use crate::tools::registry::diagnostics::PublishDiagnosticsCollector; -use crate::tools::registry::graph::collect_fast_check_type_graph_diagnostics; use crate::tools::registry::graph::collect_invalid_external_imports; -use crate::tools::registry::graph::get_workspace_member_roots; -use crate::tools::registry::graph::resolve_config_file_roots_from_exports; -use crate::tools::registry::graph::MemberRoots; use crate::util::display::human_size; use crate::util::import_map::ImportMapUnfurler; @@ -80,16 +79,8 @@ impl PreparedPublishPackage { static SUGGESTED_ENTRYPOINTS: [&str; 4] = ["mod.ts", "mod.js", "index.ts", "index.js"]; -fn get_deno_json_package_name( - deno_json: &ConfigFile, -) -> Result { - match deno_json.json.name.clone() { - Some(name) => Ok(name), - None => bail!("{} is missing 'name' field", deno_json.specifier), - } -} - async fn prepare_publish( + package_name: &str, deno_json: &ConfigFile, source_cache: Arc, graph: Arc, @@ -101,7 +92,6 @@ async fn prepare_publish( let Some(version) = deno_json.json.version.clone() else { bail!("{} is missing 'version' field", deno_json.specifier); }; - let name = get_deno_json_package_name(deno_json)?; if deno_json.json.exports.is_none() { let mut suggested_entrypoint = None; @@ -118,22 +108,22 @@ async fn prepare_publish( "version": "{}", "exports": "{}" }}"#, - name, + package_name, version, suggested_entrypoint.unwrap_or("") ); bail!( "You did not specify an entrypoint to \"{}\" package in {}. Add `exports` mapping in the configuration file, eg:\n{}", - name, + package_name, deno_json.specifier, exports_content ); } - let Some(name) = name.strip_prefix('@') else { + let Some(name_no_at) = package_name.strip_prefix('@') else { bail!("Invalid package name, use '@/ format"); }; - let Some((scope, package_name)) = name.split_once('/') else { + let Some((scope, name_no_scope)) = name_no_at.split_once('/') else { bail!("Invalid package name, use '@/ format"); }; let file_patterns = deno_json.to_publish_config()?.map(|c| c.files); @@ -152,11 +142,11 @@ async fn prepare_publish( }) .await??; - log::debug!("Tarball size ({}): {}", name, tarball.bytes.len()); + log::debug!("Tarball size ({}): {}", package_name, tarball.bytes.len()); Ok(Rc::new(PreparedPublishPackage { scope: scope.to_string(), - package: package_name.to_string(), + package: name_no_scope.to_string(), version: version.to_string(), tarball, // the config file is always at the root of a publishing dir, @@ -660,77 +650,44 @@ struct PreparePackagesData { async fn prepare_packages_for_publishing( cli_factory: &CliFactory, - no_zap: bool, + allow_slow_types: bool, diagnostics_collector: &PublishDiagnosticsCollector, deno_json: ConfigFile, import_map: Arc, ) -> Result { - let maybe_workspace_config = deno_json.to_workspace_config()?; + let members = deno_json.to_workspace_members()?; let module_graph_builder = cli_factory.module_graph_builder().await?.as_ref(); let source_cache = cli_factory.parsed_source_cache(); let type_checker = cli_factory.type_checker().await?; let cli_options = cli_factory.cli_options(); - let Some(workspace_config) = maybe_workspace_config else { - let roots = resolve_config_file_roots_from_exports(&deno_json)?; - let graph = build_and_check_graph_for_publish( - module_graph_builder, - type_checker, - cli_options, - no_zap, - diagnostics_collector, - &[MemberRoots { - name: get_deno_json_package_name(&deno_json)?, - dir_url: deno_json.specifier.join("./").unwrap().clone(), - exports: roots, - }], - ) - .await?; - let package = prepare_publish( - &deno_json, - source_cache.clone(), - graph, - import_map, - diagnostics_collector, - ) - .await?; - let package_name = format!("@{}/{}", package.scope, package.package); - let publish_order_graph = - PublishOrderGraph::new_single(package_name.clone()); - let package_by_name = HashMap::from([(package_name, package)]); - return Ok(PreparePackagesData { - publish_order_graph, - package_by_name, - }); - }; + if members.len() > 1 { + println!("Publishing a workspace..."); + } - println!("Publishing a workspace..."); // create the module graph - let roots = get_workspace_member_roots(&workspace_config)?; let graph = build_and_check_graph_for_publish( module_graph_builder, type_checker, cli_options, - no_zap, + allow_slow_types, diagnostics_collector, - &roots, + &members, ) .await?; - let mut package_by_name = - HashMap::with_capacity(workspace_config.members.len()); + let mut package_by_name = HashMap::with_capacity(members.len()); let publish_order_graph = - publish_order::build_publish_order_graph(&graph, &roots)?; + publish_order::build_publish_order_graph(&graph, &members)?; - let results = workspace_config - .members - .iter() - .cloned() + let results = members + .into_iter() .map(|member| { let import_map = import_map.clone(); let graph = graph.clone(); async move { let package = prepare_publish( + &member.package_name, &member.config_file, source_cache.clone(), graph, @@ -761,64 +718,69 @@ async fn build_and_check_graph_for_publish( module_graph_builder: &ModuleGraphBuilder, type_checker: &TypeChecker, cli_options: &CliOptions, - no_zap: bool, + allow_slow_types: bool, diagnostics_collector: &PublishDiagnosticsCollector, - packages: &[MemberRoots], + packages: &[WorkspaceMemberConfig], ) -> Result, deno_core::anyhow::Error> { - let graph = Arc::new( - module_graph_builder - .create_graph_with_options(crate::graph_util::CreateGraphOptions { - is_dynamic: false, - // All because we're going to use this same graph to determine the publish order later - graph_kind: deno_graph::GraphKind::All, - roots: packages - .iter() - .flat_map(|r| r.exports.iter()) - .cloned() - .collect(), - workspace_fast_check: true, - loader: None, - }) - .await?, - ); + let graph = + Arc::new(module_graph_builder.create_publish_graph(packages).await?); graph.valid()?; + // todo(dsherret): move to lint rule collect_invalid_external_imports(&graph, diagnostics_collector); - let mut has_fast_check_diagnostics = false; - if !no_zap { - log::info!("Checking fast check type graph for errors..."); - has_fast_check_diagnostics = collect_fast_check_type_graph_diagnostics( - &graph, - packages, - diagnostics_collector, + if allow_slow_types { + log::info!( + concat!( + "{} Publishing a library with slow types is not recommended. ", + "This may lead to poor type checking performance for users of ", + "your package, may affect the quality of automatic documentation ", + "generation, and your package will not be shipped with a .d.ts ", + "file for Node.js users." + ), + colors::yellow("Warning"), ); - } + } else { + log::info!("Checking for slow types in the public API..."); + let mut any_pkg_had_diagnostics = false; + for package in packages { + let export_urls = package.config_file.resolve_export_value_urls()?; + let diagnostics = + no_slow_types::collect_no_slow_type_diagnostics(&export_urls, &graph); + if !diagnostics.is_empty() { + any_pkg_had_diagnostics = true; + for diagnostic in diagnostics { + diagnostics_collector.push(PublishDiagnostic::FastCheck(diagnostic)); + } + } + } - if !has_fast_check_diagnostics { - log::info!("Ensuring type checks..."); - let diagnostics = type_checker - .check_diagnostics( - graph.clone(), - CheckOptions { - lib: cli_options.ts_type_lib_window(), - log_ignored_options: false, - reload: cli_options.reload_flag(), - }, - ) - .await?; - if !diagnostics.is_empty() { - bail!( - concat!( - "{:#}\n\n", - "You may have discovered a bug in Deno's fast check implementation. ", - "Fast check is still early days and we would appreciate if you log a ", - "bug if you believe this is one: https://github.com/denoland/deno/issues/" - ), - diagnostics - ); + if !any_pkg_had_diagnostics { + // this is a temporary measure until we know that fast check is reliable and stable + let check_diagnostics = type_checker + .check_diagnostics( + graph.clone(), + CheckOptions { + lib: cli_options.ts_type_lib_window(), + log_ignored_options: false, + reload: cli_options.reload_flag(), + }, + ) + .await?; + if !check_diagnostics.is_empty() { + bail!( + concat!( + "Failed ensuring public API type output is valid.\n\n", + "{:#}\n\n", + "You may have discovered a bug in Deno. Please open an issue at: ", + "https://github.com/denoland/deno/issues/" + ), + check_diagnostics + ); + } } } + Ok(graph) } @@ -852,7 +814,7 @@ pub async fn publish( let prepared_data = prepare_packages_for_publishing( &cli_factory, - publish_flags.no_zap, + publish_flags.allow_slow_types, &diagnostics_collector, config_file.clone(), import_map, @@ -866,10 +828,7 @@ pub async fn publish( } if publish_flags.dry_run { - log::warn!( - "{} Aborting due to --dry-run", - crate::colors::yellow("Warning") - ); + log::warn!("{} Aborting due to --dry-run", colors::yellow("Warning")); return Ok(()); } diff --git a/cli/tools/registry/publish_order.rs b/cli/tools/registry/publish_order.rs index bb423b2b5..ad0f72272 100644 --- a/cli/tools/registry/publish_order.rs +++ b/cli/tools/registry/publish_order.rs @@ -4,12 +4,12 @@ use std::collections::HashMap; use std::collections::HashSet; use std::collections::VecDeque; +use deno_ast::ModuleSpecifier; +use deno_config::WorkspaceMemberConfig; use deno_core::anyhow::bail; use deno_core::error::AnyError; use deno_graph::ModuleGraph; -use super::graph::MemberRoots; - pub struct PublishOrderGraph { packages: HashMap>, in_degree: HashMap, @@ -17,14 +17,6 @@ pub struct PublishOrderGraph { } impl PublishOrderGraph { - pub fn new_single(package_name: String) -> Self { - Self { - packages: HashMap::from([(package_name.clone(), HashSet::new())]), - in_degree: HashMap::from([(package_name.clone(), 0)]), - reverse_map: HashMap::from([(package_name, Vec::new())]), - } - } - pub fn next(&mut self) -> Vec { let mut package_names_with_depth = self .in_degree @@ -122,22 +114,26 @@ impl PublishOrderGraph { pub fn build_publish_order_graph( graph: &ModuleGraph, - roots: &[MemberRoots], + roots: &[WorkspaceMemberConfig], ) -> Result { - let packages = build_pkg_deps(graph, roots); + let packages = build_pkg_deps(graph, roots)?; Ok(build_publish_order_graph_from_pkgs_deps(packages)) } fn build_pkg_deps( graph: &deno_graph::ModuleGraph, - roots: &[MemberRoots], -) -> HashMap> { + roots: &[WorkspaceMemberConfig], +) -> Result>, AnyError> { let mut members = HashMap::with_capacity(roots.len()); let mut seen_modules = HashSet::with_capacity(graph.modules().count()); - for root in roots { + let roots = roots + .iter() + .map(|r| (ModuleSpecifier::from_file_path(&r.dir_path).unwrap(), r)) + .collect::>(); + for (root_dir_url, root) in &roots { let mut deps = HashSet::new(); let mut pending = VecDeque::new(); - pending.extend(root.exports.clone()); + pending.extend(root.config_file.resolve_export_value_urls()?); while let Some(specifier) = pending.pop_front() { let Some(module) = graph.get(&specifier).and_then(|m| m.js()) else { continue; @@ -163,23 +159,23 @@ fn build_pkg_deps( if specifier.scheme() != "file" { continue; } - if specifier.as_str().starts_with(root.dir_url.as_str()) { + if specifier.as_str().starts_with(root_dir_url.as_str()) { if seen_modules.insert(specifier.clone()) { pending.push_back(specifier.clone()); } } else { - let found_root = roots - .iter() - .find(|root| specifier.as_str().starts_with(root.dir_url.as_str())); + let found_root = roots.iter().find(|(dir_url, _)| { + specifier.as_str().starts_with(dir_url.as_str()) + }); if let Some(root) = found_root { - deps.insert(root.name.clone()); + deps.insert(root.1.package_name.clone()); } } } } - members.insert(root.name.clone(), deps); + members.insert(root.package_name.clone(), deps); } - members + Ok(members) } fn build_publish_order_graph_from_pkgs_deps( -- cgit v1.2.3