summaryrefslogtreecommitdiff
path: root/cli
diff options
context:
space:
mode:
authorDavid Sherret <dsherret@users.noreply.github.com>2024-02-19 10:28:41 -0500
committerGitHub <noreply@github.com>2024-02-19 15:28:41 +0000
commit66424032a2c78c6010c0a1a1b22a26d081166660 (patch)
tree610e95beba5685ef1ba322375bf31a3fd6c5a187 /cli
parent2b279ad630651e973d5a31586f58809f005bc925 (diff)
feat(unstable/lint): no-slow-types for JSR packages (#22430)
1. Renames zap/fast-check to instead be a `no-slow-types` lint rule. 1. This lint rule is automatically run when doing `deno lint` for packages (deno.json files with a name, version, and exports field) 1. This lint rules still occurs on publish. It can be skipped by running with `--no-slow-types`
Diffstat (limited to 'cli')
-rw-r--r--cli/Cargo.toml4
-rw-r--r--cli/args/flags.rs10
-rw-r--r--cli/graph_util.rs24
-rw-r--r--cli/lsp/diagnostics.rs6
-rw-r--r--cli/tools/lint/mod.rs (renamed from cli/tools/lint.rs)410
-rw-r--r--cli/tools/lint/no_slow_types.rs38
-rw-r--r--cli/tools/registry/diagnostics.rs20
-rw-r--r--cli/tools/registry/graph.rs117
-rw-r--r--cli/tools/registry/mod.rs195
-rw-r--r--cli/tools/registry/publish_order.rs42
10 files changed, 492 insertions, 374 deletions
diff --git a/cli/Cargo.toml b/cli/Cargo.toml
index 349967708..a7d6780e2 100644
--- a/cli/Cargo.toml
+++ b/cli/Cargo.toml
@@ -64,11 +64,11 @@ winres.workspace = true
[dependencies]
deno_ast = { workspace = true, features = ["bundler", "cjs", "codegen", "proposal", "react", "sourcemap", "transforms", "typescript", "view", "visit"] }
deno_cache_dir = { workspace = true }
-deno_config = "=0.9.2"
+deno_config = "=0.10.0"
deno_core = { workspace = true, features = ["include_js_files_for_snapshotting"] }
deno_doc = { version = "=0.107.0", features = ["html"] }
deno_emit = "=0.37.0"
-deno_graph = "=0.66.0"
+deno_graph = "=0.66.2"
deno_lint = { version = "=0.56.0", features = ["docs"] }
deno_lockfile.workspace = true
deno_npm = "=0.17.0"
diff --git a/cli/args/flags.rs b/cli/args/flags.rs
index 3b6810a95..0ce521296 100644
--- a/cli/args/flags.rs
+++ b/cli/args/flags.rs
@@ -301,7 +301,7 @@ pub struct VendorFlags {
pub struct PublishFlags {
pub token: Option<String>,
pub dry_run: bool,
- pub no_zap: bool,
+ pub allow_slow_types: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
@@ -2389,9 +2389,9 @@ fn publish_subcommand() -> Command {
.action(ArgAction::SetTrue),
)
.arg(
- Arg::new("no-zap")
- .long("no-zap")
- .help("Skip Zap compatibility validation")
+ Arg::new("allow-slow-types")
+ .long("allow-slow-types")
+ .help("Allow publishing with slow types")
.action(ArgAction::SetTrue),
)
})
@@ -3828,7 +3828,7 @@ fn publish_parse(flags: &mut Flags, matches: &mut ArgMatches) {
flags.subcommand = DenoSubcommand::Publish(PublishFlags {
token: matches.remove_one("token"),
dry_run: matches.get_flag("dry-run"),
- no_zap: matches.get_flag("no-zap"),
+ allow_slow_types: matches.get_flag("allow-slow-types"),
});
}
diff --git a/cli/graph_util.rs b/cli/graph_util.rs
index b9027afa3..380d82cbf 100644
--- a/cli/graph_util.rs
+++ b/cli/graph_util.rs
@@ -23,6 +23,7 @@ use crate::util::sync::TaskQueue;
use crate::util::sync::TaskQueuePermit;
use deno_config::ConfigFile;
+use deno_config::WorkspaceMemberConfig;
use deno_core::anyhow::bail;
use deno_core::anyhow::Context;
use deno_core::error::custom_error;
@@ -277,7 +278,7 @@ impl ModuleGraphBuilder {
graph_kind: GraphKind,
roots: Vec<ModuleSpecifier>,
loader: &mut dyn Loader,
- ) -> Result<deno_graph::ModuleGraph, AnyError> {
+ ) -> Result<ModuleGraph, AnyError> {
self
.create_graph_with_options(CreateGraphOptions {
is_dynamic: false,
@@ -289,10 +290,29 @@ impl ModuleGraphBuilder {
.await
}
+ pub async fn create_publish_graph(
+ &self,
+ packages: &[WorkspaceMemberConfig],
+ ) -> Result<ModuleGraph, AnyError> {
+ let mut roots = Vec::new();
+ for package in packages {
+ roots.extend(package.config_file.resolve_export_value_urls()?);
+ }
+ self
+ .create_graph_with_options(CreateGraphOptions {
+ is_dynamic: false,
+ graph_kind: deno_graph::GraphKind::All,
+ roots,
+ workspace_fast_check: true,
+ loader: None,
+ })
+ .await
+ }
+
pub async fn create_graph_with_options(
&self,
options: CreateGraphOptions<'_>,
- ) -> Result<deno_graph::ModuleGraph, AnyError> {
+ ) -> Result<ModuleGraph, AnyError> {
let mut graph = ModuleGraph::new(options.graph_kind);
self
diff --git a/cli/lsp/diagnostics.rs b/cli/lsp/diagnostics.rs
index 0b3faa551..e3e206a52 100644
--- a/cli/lsp/diagnostics.rs
+++ b/cli/lsp/diagnostics.rs
@@ -796,7 +796,11 @@ fn generate_lint_diagnostics(
let documents = snapshot
.documents
.documents(DocumentsFilter::OpenDiagnosable);
- let lint_rules = get_configured_rules(lint_options.rules.clone());
+ let lint_rules = get_configured_rules(
+ lint_options.rules.clone(),
+ config.config_file.as_ref(),
+ )
+ .rules;
let mut diagnostics_vec = Vec::new();
for document in documents {
let settings =
diff --git a/cli/tools/lint.rs b/cli/tools/lint/mod.rs
index 32b47e453..e4a88f91c 100644
--- a/cli/tools/lint.rs
+++ b/cli/tools/lint/mod.rs
@@ -2,28 +2,19 @@
//! This module provides file linting utilities using
//! [`deno_lint`](https://github.com/denoland/deno_lint).
-use crate::args::Flags;
-use crate::args::LintFlags;
-use crate::args::LintOptions;
-use crate::args::LintReporterKind;
-use crate::args::LintRulesConfig;
-use crate::colors;
-use crate::factory::CliFactory;
-use crate::tools::fmt::run_parallelized;
-use crate::util::file_watcher;
-use crate::util::fs::canonicalize_path;
-use crate::util::fs::specifier_from_file_path;
-use crate::util::fs::FileCollector;
-use crate::util::path::is_script_ext;
-use crate::util::sync::AtomicFlag;
use deno_ast::diagnostics::Diagnostic;
use deno_ast::MediaType;
+use deno_ast::ModuleSpecifier;
use deno_ast::ParsedSource;
+use deno_ast::SourceRange;
+use deno_ast::SourceTextInfo;
use deno_config::glob::FilePatterns;
use deno_core::anyhow::bail;
use deno_core::error::generic_error;
use deno_core::error::AnyError;
+use deno_core::parking_lot::Mutex;
use deno_core::serde_json;
+use deno_graph::FastCheckDiagnostic;
use deno_lint::diagnostic::LintDiagnostic;
use deno_lint::linter::LintFileOptions;
use deno_lint::linter::Linter;
@@ -33,15 +24,32 @@ use deno_lint::rules::LintRule;
use log::debug;
use log::info;
use serde::Serialize;
+use std::borrow::Cow;
+use std::collections::HashSet;
use std::fs;
use std::io::stdin;
use std::io::Read;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
-use std::sync::Mutex;
+use crate::args::Flags;
+use crate::args::LintFlags;
+use crate::args::LintOptions;
+use crate::args::LintReporterKind;
+use crate::args::LintRulesConfig;
use crate::cache::IncrementalCache;
+use crate::colors;
+use crate::factory::CliFactory;
+use crate::tools::fmt::run_parallelized;
+use crate::util::file_watcher;
+use crate::util::fs::canonicalize_path;
+use crate::util::fs::specifier_from_file_path;
+use crate::util::fs::FileCollector;
+use crate::util::path::is_script_ext;
+use crate::util::sync::AtomicFlag;
+
+pub mod no_slow_types;
static STDIN_FILE_NAME: &str = "$deno$stdin.ts";
@@ -110,15 +118,18 @@ pub async fn lint(flags: Flags, lint_flags: LintFlags) -> Result<(), AnyError> {
let success = if is_stdin {
let reporter_kind = lint_options.reporter_kind;
let reporter_lock = Arc::new(Mutex::new(create_reporter(reporter_kind)));
- let lint_rules = get_config_rules_err_empty(lint_options.rules)?;
+ let lint_rules = get_config_rules_err_empty(
+ lint_options.rules,
+ cli_options.maybe_config_file().as_ref(),
+ )?;
let file_path = cli_options.initial_cwd().join(STDIN_FILE_NAME);
- let r = lint_stdin(&file_path, lint_rules);
+ let r = lint_stdin(&file_path, lint_rules.rules);
let success = handle_lint_result(
&file_path.to_string_lossy(),
r,
reporter_lock.clone(),
);
- reporter_lock.lock().unwrap().close(1);
+ reporter_lock.lock().close(1);
success
} else {
let target_files =
@@ -146,61 +157,105 @@ async fn lint_files(
paths: Vec<PathBuf>,
) -> Result<bool, AnyError> {
let caches = factory.caches()?;
- let lint_rules = get_config_rules_err_empty(lint_options.rules)?;
+ let maybe_config_file = factory.cli_options().maybe_config_file().as_ref();
+ let lint_rules =
+ get_config_rules_err_empty(lint_options.rules, maybe_config_file)?;
let incremental_cache = Arc::new(IncrementalCache::new(
caches.lint_incremental_cache_db(),
- // use a hash of the rule names in order to bust the cache
- &{
- // ensure this is stable by sorting it
- let mut names = lint_rules.iter().map(|r| r.code()).collect::<Vec<_>>();
- names.sort_unstable();
- names
- },
+ &lint_rules.incremental_cache_state(),
&paths,
));
let target_files_len = paths.len();
let reporter_kind = lint_options.reporter_kind;
+ // todo(dsherret): abstract away this lock behind a performant interface
let reporter_lock =
Arc::new(Mutex::new(create_reporter(reporter_kind.clone())));
let has_error = Arc::new(AtomicFlag::default());
- run_parallelized(paths, {
+ let mut futures = Vec::with_capacity(2);
+ if lint_rules.no_slow_types {
+ if let Some(config_file) = maybe_config_file {
+ let members = config_file.to_workspace_members()?;
+ let has_error = has_error.clone();
+ let reporter_lock = reporter_lock.clone();
+ let module_graph_builder = factory.module_graph_builder().await?.clone();
+ let path_urls = paths
+ .iter()
+ .filter_map(|p| ModuleSpecifier::from_file_path(p).ok())
+ .collect::<HashSet<_>>();
+ futures.push(deno_core::unsync::spawn(async move {
+ let graph = module_graph_builder.create_publish_graph(&members).await?;
+ // todo(dsherret): this isn't exactly correct as linting isn't properly
+ // setup to handle workspaces. Iterating over the workspace members
+ // should be done at a higher level because it also needs to take into
+ // account the config per workspace member.
+ for member in &members {
+ let export_urls = member.config_file.resolve_export_value_urls()?;
+ if !export_urls.iter().any(|url| path_urls.contains(url)) {
+ continue; // entrypoint is not specified, so skip
+ }
+ let diagnostics = no_slow_types::collect_no_slow_type_diagnostics(
+ &export_urls,
+ &graph,
+ );
+ if !diagnostics.is_empty() {
+ has_error.raise();
+ let mut reporter = reporter_lock.lock();
+ for diagnostic in &diagnostics {
+ reporter
+ .visit_diagnostic(LintOrCliDiagnostic::FastCheck(diagnostic));
+ }
+ }
+ }
+ Ok(())
+ }));
+ }
+ }
+
+ futures.push({
let has_error = has_error.clone();
- let lint_rules = lint_rules.clone();
+ let lint_rules = lint_rules.rules.clone();
let reporter_lock = reporter_lock.clone();
let incremental_cache = incremental_cache.clone();
- move |file_path| {
- let file_text = fs::read_to_string(&file_path)?;
+ deno_core::unsync::spawn(async move {
+ run_parallelized(paths, {
+ move |file_path| {
+ let file_text = fs::read_to_string(&file_path)?;
+
+ // don't bother rechecking this file if it didn't have any diagnostics before
+ if incremental_cache.is_file_same(&file_path, &file_text) {
+ return Ok(());
+ }
- // don't bother rechecking this file if it didn't have any diagnostics before
- if incremental_cache.is_file_same(&file_path, &file_text) {
- return Ok(());
- }
+ let r = lint_file(&file_path, file_text, lint_rules);
+ if let Ok((file_diagnostics, file_source)) = &r {
+ if file_diagnostics.is_empty() {
+ // update the incremental cache if there were no diagnostics
+ incremental_cache
+ .update_file(&file_path, file_source.text_info().text_str())
+ }
+ }
- let r = lint_file(&file_path, file_text, lint_rules);
- if let Ok((file_diagnostics, file_source)) = &r {
- if file_diagnostics.is_empty() {
- // update the incremental cache if there were no diagnostics
- incremental_cache
- .update_file(&file_path, file_source.text_info().text_str())
+ let success = handle_lint_result(
+ &file_path.to_string_lossy(),
+ r,
+ reporter_lock.clone(),
+ );
+ if !success {
+ has_error.raise();
+ }
+
+ Ok(())
}
- }
+ })
+ .await
+ })
+ });
- let success = handle_lint_result(
- &file_path.to_string_lossy(),
- r,
- reporter_lock.clone(),
- );
- if !success {
- has_error.raise();
- }
+ deno_core::futures::future::try_join_all(futures).await?;
- Ok(())
- }
- })
- .await?;
incremental_cache.wait_completion().await;
- reporter_lock.lock().unwrap().close(target_files_len);
+ reporter_lock.lock().close(target_files_len);
Ok(!has_error.is_raised())
}
@@ -311,16 +366,16 @@ fn handle_lint_result(
result: Result<(Vec<LintDiagnostic>, ParsedSource), AnyError>,
reporter_lock: Arc<Mutex<Box<dyn LintReporter + Send>>>,
) -> bool {
- let mut reporter = reporter_lock.lock().unwrap();
+ let mut reporter = reporter_lock.lock();
match result {
- Ok((mut file_diagnostics, source)) => {
+ Ok((mut file_diagnostics, _source)) => {
file_diagnostics.sort_by(|a, b| match a.specifier.cmp(&b.specifier) {
std::cmp::Ordering::Equal => a.range.start.cmp(&b.range.start),
file_order => file_order,
});
- for d in file_diagnostics.iter() {
- reporter.visit_diagnostic(d, &source);
+ for d in &file_diagnostics {
+ reporter.visit_diagnostic(LintOrCliDiagnostic::Lint(d));
}
file_diagnostics.is_empty()
}
@@ -331,8 +386,99 @@ fn handle_lint_result(
}
}
+#[derive(Clone, Copy)]
+pub enum LintOrCliDiagnostic<'a> {
+ Lint(&'a LintDiagnostic),
+ FastCheck(&'a FastCheckDiagnostic),
+}
+
+impl<'a> LintOrCliDiagnostic<'a> {
+ pub fn specifier(&self) -> &ModuleSpecifier {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => &d.specifier,
+ LintOrCliDiagnostic::FastCheck(d) => d.specifier(),
+ }
+ }
+
+ pub fn range(&self) -> Option<(&SourceTextInfo, SourceRange)> {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => Some((&d.text_info, d.range)),
+ LintOrCliDiagnostic::FastCheck(d) => {
+ d.range().map(|r| (&r.text_info, r.range))
+ }
+ }
+ }
+}
+
+impl<'a> deno_ast::diagnostics::Diagnostic for LintOrCliDiagnostic<'a> {
+ fn level(&self) -> deno_ast::diagnostics::DiagnosticLevel {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => d.level(),
+ LintOrCliDiagnostic::FastCheck(d) => d.level(),
+ }
+ }
+
+ fn code(&self) -> Cow<'_, str> {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => d.code(),
+ LintOrCliDiagnostic::FastCheck(_) => Cow::Borrowed("no-slow-types"),
+ }
+ }
+
+ fn message(&self) -> Cow<'_, str> {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => d.message(),
+ LintOrCliDiagnostic::FastCheck(d) => d.message(),
+ }
+ }
+
+ fn location(&self) -> deno_ast::diagnostics::DiagnosticLocation {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => d.location(),
+ LintOrCliDiagnostic::FastCheck(d) => d.location(),
+ }
+ }
+
+ fn snippet(&self) -> Option<deno_ast::diagnostics::DiagnosticSnippet<'_>> {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => d.snippet(),
+ LintOrCliDiagnostic::FastCheck(d) => d.snippet(),
+ }
+ }
+
+ fn hint(&self) -> Option<Cow<'_, str>> {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => d.hint(),
+ LintOrCliDiagnostic::FastCheck(d) => d.hint(),
+ }
+ }
+
+ fn snippet_fixed(
+ &self,
+ ) -> Option<deno_ast::diagnostics::DiagnosticSnippet<'_>> {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => d.snippet_fixed(),
+ LintOrCliDiagnostic::FastCheck(d) => d.snippet_fixed(),
+ }
+ }
+
+ fn info(&self) -> Cow<'_, [Cow<'_, str>]> {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => d.info(),
+ LintOrCliDiagnostic::FastCheck(d) => d.info(),
+ }
+ }
+
+ fn docs_url(&self) -> Option<Cow<'_, str>> {
+ match self {
+ LintOrCliDiagnostic::Lint(d) => d.docs_url(),
+ LintOrCliDiagnostic::FastCheck(d) => d.docs_url(),
+ }
+ }
+}
+
trait LintReporter {
- fn visit_diagnostic(&mut self, d: &LintDiagnostic, source: &ParsedSource);
+ fn visit_diagnostic(&mut self, d: LintOrCliDiagnostic);
fn visit_error(&mut self, file_path: &str, err: &AnyError);
fn close(&mut self, check_count: usize);
}
@@ -354,7 +500,7 @@ impl PrettyLintReporter {
}
impl LintReporter for PrettyLintReporter {
- fn visit_diagnostic(&mut self, d: &LintDiagnostic, _source: &ParsedSource) {
+ fn visit_diagnostic(&mut self, d: LintOrCliDiagnostic) {
self.lint_count += 1;
eprintln!("{}", d.display());
@@ -391,18 +537,25 @@ impl CompactLintReporter {
}
impl LintReporter for CompactLintReporter {
- fn visit_diagnostic(&mut self, d: &LintDiagnostic, _source: &ParsedSource) {
+ fn visit_diagnostic(&mut self, d: LintOrCliDiagnostic) {
self.lint_count += 1;
- let line_and_column = d.text_info.line_and_column_display(d.range.start);
- eprintln!(
- "{}: line {}, col {} - {} ({})",
- d.specifier,
- line_and_column.line_number,
- line_and_column.column_number,
- d.message,
- d.code
- )
+ match d.range() {
+ Some((text_info, range)) => {
+ let line_and_column = text_info.line_and_column_display(range.start);
+ eprintln!(
+ "{}: line {}, col {} - {} ({})",
+ d.specifier(),
+ line_and_column.line_number,
+ line_and_column.column_number,
+ d.message(),
+ d.code(),
+ )
+ }
+ None => {
+ eprintln!("{}: {} ({})", d.specifier(), d.message(), d.code())
+ }
+ }
}
fn visit_error(&mut self, file_path: &str, err: &AnyError) {
@@ -457,7 +610,7 @@ struct JsonLintDiagnosticRange {
#[derive(Clone, Serialize)]
struct JsonLintDiagnostic {
pub filename: String,
- pub range: JsonLintDiagnosticRange,
+ pub range: Option<JsonLintDiagnosticRange>,
pub message: String,
pub code: String,
pub hint: Option<String>,
@@ -479,22 +632,22 @@ impl JsonLintReporter {
}
impl LintReporter for JsonLintReporter {
- fn visit_diagnostic(&mut self, d: &LintDiagnostic, _source: &ParsedSource) {
+ fn visit_diagnostic(&mut self, d: LintOrCliDiagnostic) {
self.diagnostics.push(JsonLintDiagnostic {
- filename: d.specifier.to_string(),
- range: JsonLintDiagnosticRange {
+ filename: d.specifier().to_string(),
+ range: d.range().map(|(text_info, range)| JsonLintDiagnosticRange {
start: JsonDiagnosticLintPosition::new(
- d.range.start.as_byte_index(d.text_info.range().start),
- d.text_info.line_and_column_index(d.range.start),
+ range.start.as_byte_index(text_info.range().start),
+ text_info.line_and_column_index(range.start),
),
end: JsonDiagnosticLintPosition::new(
- d.range.end.as_byte_index(d.text_info.range().start),
- d.text_info.line_and_column_index(d.range.end),
+ range.end.as_byte_index(text_info.range().start),
+ text_info.line_and_column_index(range.end),
),
- },
- message: d.message.clone(),
- code: d.code.clone(),
- hint: d.hint.clone(),
+ }),
+ message: d.message().to_string(),
+ code: d.code().to_string(),
+ hint: d.hint().map(|h| h.to_string()),
});
}
@@ -518,13 +671,22 @@ fn sort_diagnostics(diagnostics: &mut [JsonLintDiagnostic]) {
use std::cmp::Ordering;
let file_order = a.filename.cmp(&b.filename);
match file_order {
- Ordering::Equal => {
- let line_order = a.range.start.line.cmp(&b.range.start.line);
- match line_order {
- Ordering::Equal => a.range.start.col.cmp(&b.range.start.col),
- _ => line_order,
- }
- }
+ Ordering::Equal => match &a.range {
+ Some(a_range) => match &b.range {
+ Some(b_range) => {
+ let line_order = a_range.start.line.cmp(&b_range.start.line);
+ match line_order {
+ Ordering::Equal => a_range.start.col.cmp(&b_range.start.col),
+ _ => line_order,
+ }
+ }
+ None => Ordering::Less,
+ },
+ None => match &b.range {
+ Some(_) => Ordering::Greater,
+ None => Ordering::Equal,
+ },
+ },
_ => file_order,
}
});
@@ -532,26 +694,75 @@ fn sort_diagnostics(diagnostics: &mut [JsonLintDiagnostic]) {
fn get_config_rules_err_empty(
rules: LintRulesConfig,
-) -> Result<Vec<&'static dyn LintRule>, AnyError> {
- let lint_rules = get_configured_rules(rules);
- if lint_rules.is_empty() {
+ maybe_config_file: Option<&deno_config::ConfigFile>,
+) -> Result<ConfiguredRules, AnyError> {
+ let lint_rules = get_configured_rules(rules, maybe_config_file);
+ if lint_rules.rules.is_empty() {
bail!("No rules have been configured")
}
Ok(lint_rules)
}
+#[derive(Debug, Clone)]
+pub struct ConfiguredRules {
+ pub rules: Vec<&'static dyn LintRule>,
+ // cli specific rules
+ pub no_slow_types: bool,
+}
+
+impl ConfiguredRules {
+ fn incremental_cache_state(&self) -> Vec<&str> {
+ // use a hash of the rule names in order to bust the cache
+ let mut names = self.rules.iter().map(|r| r.code()).collect::<Vec<_>>();
+ // ensure this is stable by sorting it
+ names.sort_unstable();
+ if self.no_slow_types {
+ names.push("no-slow-types");
+ }
+ names
+ }
+}
+
pub fn get_configured_rules(
rules: LintRulesConfig,
-) -> Vec<&'static dyn LintRule> {
+ maybe_config_file: Option<&deno_config::ConfigFile>,
+) -> ConfiguredRules {
+ const NO_SLOW_TYPES_NAME: &str = "no-slow-types";
+ let implicit_no_slow_types = maybe_config_file
+ .map(|c| c.is_package() || !c.json.workspaces.is_empty())
+ .unwrap_or(false);
if rules.tags.is_none() && rules.include.is_none() && rules.exclude.is_none()
{
- rules::get_recommended_rules()
+ ConfiguredRules {
+ rules: rules::get_recommended_rules(),
+ no_slow_types: implicit_no_slow_types,
+ }
} else {
- rules::get_filtered_rules(
+ let no_slow_types = implicit_no_slow_types
+ && !rules
+ .exclude
+ .as_ref()
+ .map(|exclude| exclude.iter().any(|i| i == NO_SLOW_TYPES_NAME))
+ .unwrap_or(false);
+ let rules = rules::get_filtered_rules(
rules.tags.or_else(|| Some(vec!["recommended".to_string()])),
- rules.exclude,
- rules.include,
- )
+ rules.exclude.map(|exclude| {
+ exclude
+ .into_iter()
+ .filter(|c| c != NO_SLOW_TYPES_NAME)
+ .collect()
+ }),
+ rules.include.map(|include| {
+ include
+ .into_iter()
+ .filter(|c| c != NO_SLOW_TYPES_NAME)
+ .collect()
+ }),
+ );
+ ConfiguredRules {
+ rules,
+ no_slow_types,
+ }
}
}
@@ -569,8 +780,9 @@ mod test {
include: None,
tags: None,
};
- let rules = get_configured_rules(rules_config);
+ let rules = get_configured_rules(rules_config, None);
let mut rule_names = rules
+ .rules
.into_iter()
.map(|r| r.code().to_string())
.collect::<Vec<_>>();
diff --git a/cli/tools/lint/no_slow_types.rs b/cli/tools/lint/no_slow_types.rs
new file mode 100644
index 000000000..6bb108a88
--- /dev/null
+++ b/cli/tools/lint/no_slow_types.rs
@@ -0,0 +1,38 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use deno_ast::diagnostics::Diagnostic;
+use deno_ast::ModuleSpecifier;
+use deno_graph::FastCheckDiagnostic;
+use deno_graph::ModuleGraph;
+
+/// Collects diagnostics from the module graph for the
+/// given package's export URLs.
+pub fn collect_no_slow_type_diagnostics(
+ package_export_urls: &[ModuleSpecifier],
+ graph: &ModuleGraph,
+) -> Vec<FastCheckDiagnostic> {
+ let mut js_exports = package_export_urls
+ .iter()
+ .filter_map(|url| graph.get(url).and_then(|m| m.js()));
+ // fast check puts the same diagnostics in each entrypoint for the
+ // package (since it's all or nothing), so we only need to check
+ // the first one JS entrypoint
+ let Some(module) = js_exports.next() else {
+ // could happen if all the exports are JSON
+ return vec![];
+ };
+
+ if let Some(diagnostics) = module.fast_check_diagnostics() {
+ let mut diagnostics = diagnostics.clone();
+ diagnostics.sort_by_cached_key(|d| {
+ (
+ d.specifier().clone(),
+ d.range().map(|r| r.range),
+ d.code().to_string(),
+ )
+ });
+ diagnostics
+ } else {
+ Vec::new()
+ }
+}
diff --git a/cli/tools/registry/diagnostics.rs b/cli/tools/registry/diagnostics.rs
index aeb5d61e2..b605c293b 100644
--- a/cli/tools/registry/diagnostics.rs
+++ b/cli/tools/registry/diagnostics.rs
@@ -30,7 +30,7 @@ pub struct PublishDiagnosticsCollector {
impl PublishDiagnosticsCollector {
pub fn print_and_error(&self) -> Result<(), AnyError> {
let mut errors = 0;
- let mut has_zap_errors = false;
+ let mut has_slow_types_errors = false;
let diagnostics = self.diagnostics.lock().unwrap().take();
for diagnostic in diagnostics {
eprint!("{}", diagnostic.display());
@@ -38,17 +38,23 @@ impl PublishDiagnosticsCollector {
errors += 1;
}
if matches!(diagnostic, PublishDiagnostic::FastCheck(..)) {
- has_zap_errors = true;
+ has_slow_types_errors = true;
}
}
if errors > 0 {
- if has_zap_errors {
+ if has_slow_types_errors {
eprintln!(
- "This package contains Zap errors. Although conforming to Zap will"
+ "This package contains errors for slow types. Fixing these errors will:\n"
);
- eprintln!("significantly improve the type checking performance of your library,");
- eprintln!("you can choose to skip it by providing the --no-zap flag.");
- eprintln!();
+ eprintln!(
+ " 1. Significantly improve your package users' type checking performance."
+ );
+ eprintln!(" 2. Improve the automatic documentation generation.");
+ eprintln!(" 3. Enable automatic .d.ts generation for Node.js.");
+ eprintln!(
+ "\nDon't want to bother? You can choose to skip this step by"
+ );
+ eprintln!("providing the --allow-slow-types flag.\n");
}
Err(anyhow!(
diff --git a/cli/tools/registry/graph.rs b/cli/tools/registry/graph.rs
index 3445d55e7..0310a97c6 100644
--- a/cli/tools/registry/graph.rs
+++ b/cli/tools/registry/graph.rs
@@ -1,17 +1,9 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::collections::HashSet;
-use std::collections::VecDeque;
use std::sync::Arc;
-use deno_ast::ModuleSpecifier;
use deno_ast::SourceTextInfo;
-use deno_config::ConfigFile;
-use deno_config::WorkspaceConfig;
-use deno_core::anyhow::bail;
-use deno_core::anyhow::Context;
-use deno_core::error::AnyError;
-use deno_graph::FastCheckDiagnostic;
use deno_graph::ModuleEntryRef;
use deno_graph::ModuleGraph;
use deno_graph::ResolutionResolved;
@@ -21,55 +13,6 @@ use lsp_types::Url;
use super::diagnostics::PublishDiagnostic;
use super::diagnostics::PublishDiagnosticsCollector;
-#[derive(Debug)]
-pub struct MemberRoots {
- pub name: String,
- pub dir_url: ModuleSpecifier,
- pub exports: Vec<ModuleSpecifier>,
-}
-
-pub fn get_workspace_member_roots(
- config: &WorkspaceConfig,
-) -> Result<Vec<MemberRoots>, AnyError> {
- let mut members = Vec::with_capacity(config.members.len());
- let mut seen_names = HashSet::with_capacity(config.members.len());
- for member in &config.members {
- if !seen_names.insert(&member.package_name) {
- bail!(
- "Cannot have two workspace packages with the same name ('{}' at {})",
- member.package_name,
- member.path.display(),
- );
- }
- members.push(MemberRoots {
- name: member.package_name.clone(),
- dir_url: member.config_file.specifier.join("./").unwrap().clone(),
- exports: resolve_config_file_roots_from_exports(&member.config_file)?,
- });
- }
- Ok(members)
-}
-
-pub fn resolve_config_file_roots_from_exports(
- config_file: &ConfigFile,
-) -> Result<Vec<ModuleSpecifier>, AnyError> {
- let exports_config = config_file
- .to_exports_config()
- .with_context(|| {
- format!("Failed to parse exports at {}", config_file.specifier)
- })?
- .into_map();
- let mut exports = Vec::with_capacity(exports_config.len());
- for (_, value) in exports_config {
- let entry_point =
- config_file.specifier.join(&value).with_context(|| {
- format!("Failed to join {} with {}", config_file.specifier, value)
- })?;
- exports.push(entry_point);
- }
- Ok(exports)
-}
-
pub fn collect_invalid_external_imports(
graph: &ModuleGraph,
diagnostics_collector: &PublishDiagnosticsCollector,
@@ -142,63 +85,3 @@ pub fn collect_invalid_external_imports(
}
}
}
-
-/// Collects diagnostics from the module graph for the given packages.
-/// Returns true if any diagnostics were collected.
-pub fn collect_fast_check_type_graph_diagnostics(
- graph: &ModuleGraph,
- packages: &[MemberRoots],
- diagnostics_collector: &PublishDiagnosticsCollector,
-) -> bool {
- let mut had_diagnostic = false;
- let mut seen_modules = HashSet::with_capacity(graph.specifiers_count());
- for package in packages {
- let mut pending = VecDeque::new();
- for export in &package.exports {
- if seen_modules.insert(export.clone()) {
- pending.push_back(export.clone());
- }
- }
-
- 'analyze_package: while let Some(specifier) = pending.pop_front() {
- let Ok(Some(module)) = graph.try_get_prefer_types(&specifier) else {
- continue;
- };
- let Some(es_module) = module.js() else {
- continue;
- };
- if let Some(diagnostics) = es_module.fast_check_diagnostics() {
- for diagnostic in diagnostics {
- had_diagnostic = true;
- diagnostics_collector
- .push(PublishDiagnostic::FastCheck(diagnostic.clone()));
- if matches!(
- diagnostic,
- FastCheckDiagnostic::UnsupportedJavaScriptEntrypoint { .. }
- ) {
- break 'analyze_package; // no need to keep analyzing this package
- }
- }
- }
-
- // analyze the next dependencies
- for dep in es_module.dependencies_prefer_fast_check().values() {
- let Some(specifier) = graph.resolve_dependency_from_dep(dep, true)
- else {
- continue;
- };
-
- let dep_in_same_package =
- specifier.as_str().starts_with(package.dir_url.as_str());
- if dep_in_same_package {
- let is_new = seen_modules.insert(specifier.clone());
- if is_new {
- pending.push_back(specifier.clone());
- }
- }
- }
- }
- }
-
- had_diagnostic
-}
diff --git a/cli/tools/registry/mod.rs b/cli/tools/registry/mod.rs
index 9205d9b26..586115f27 100644
--- a/cli/tools/registry/mod.rs
+++ b/cli/tools/registry/mod.rs
@@ -8,6 +8,7 @@ use std::sync::Arc;
use base64::prelude::BASE64_STANDARD;
use base64::Engine;
use deno_config::ConfigFile;
+use deno_config::WorkspaceMemberConfig;
use deno_core::anyhow::bail;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
@@ -33,12 +34,10 @@ use crate::factory::CliFactory;
use crate::graph_util::ModuleGraphBuilder;
use crate::http_util::HttpClient;
use crate::tools::check::CheckOptions;
+use crate::tools::lint::no_slow_types;
+use crate::tools::registry::diagnostics::PublishDiagnostic;
use crate::tools::registry::diagnostics::PublishDiagnosticsCollector;
-use crate::tools::registry::graph::collect_fast_check_type_graph_diagnostics;
use crate::tools::registry::graph::collect_invalid_external_imports;
-use crate::tools::registry::graph::get_workspace_member_roots;
-use crate::tools::registry::graph::resolve_config_file_roots_from_exports;
-use crate::tools::registry::graph::MemberRoots;
use crate::util::display::human_size;
use crate::util::import_map::ImportMapUnfurler;
@@ -80,16 +79,8 @@ impl PreparedPublishPackage {
static SUGGESTED_ENTRYPOINTS: [&str; 4] =
["mod.ts", "mod.js", "index.ts", "index.js"];
-fn get_deno_json_package_name(
- deno_json: &ConfigFile,
-) -> Result<String, AnyError> {
- match deno_json.json.name.clone() {
- Some(name) => Ok(name),
- None => bail!("{} is missing 'name' field", deno_json.specifier),
- }
-}
-
async fn prepare_publish(
+ package_name: &str,
deno_json: &ConfigFile,
source_cache: Arc<ParsedSourceCache>,
graph: Arc<deno_graph::ModuleGraph>,
@@ -101,7 +92,6 @@ async fn prepare_publish(
let Some(version) = deno_json.json.version.clone() else {
bail!("{} is missing 'version' field", deno_json.specifier);
};
- let name = get_deno_json_package_name(deno_json)?;
if deno_json.json.exports.is_none() {
let mut suggested_entrypoint = None;
@@ -118,22 +108,22 @@ async fn prepare_publish(
"version": "{}",
"exports": "{}"
}}"#,
- name,
+ package_name,
version,
suggested_entrypoint.unwrap_or("<path_to_entrypoint>")
);
bail!(
"You did not specify an entrypoint to \"{}\" package in {}. Add `exports` mapping in the configuration file, eg:\n{}",
- name,
+ package_name,
deno_json.specifier,
exports_content
);
}
- let Some(name) = name.strip_prefix('@') else {
+ let Some(name_no_at) = package_name.strip_prefix('@') else {
bail!("Invalid package name, use '@<scope_name>/<package_name> format");
};
- let Some((scope, package_name)) = name.split_once('/') else {
+ let Some((scope, name_no_scope)) = name_no_at.split_once('/') else {
bail!("Invalid package name, use '@<scope_name>/<package_name> format");
};
let file_patterns = deno_json.to_publish_config()?.map(|c| c.files);
@@ -152,11 +142,11 @@ async fn prepare_publish(
})
.await??;
- log::debug!("Tarball size ({}): {}", name, tarball.bytes.len());
+ log::debug!("Tarball size ({}): {}", package_name, tarball.bytes.len());
Ok(Rc::new(PreparedPublishPackage {
scope: scope.to_string(),
- package: package_name.to_string(),
+ package: name_no_scope.to_string(),
version: version.to_string(),
tarball,
// the config file is always at the root of a publishing dir,
@@ -660,77 +650,44 @@ struct PreparePackagesData {
async fn prepare_packages_for_publishing(
cli_factory: &CliFactory,
- no_zap: bool,
+ allow_slow_types: bool,
diagnostics_collector: &PublishDiagnosticsCollector,
deno_json: ConfigFile,
import_map: Arc<ImportMap>,
) -> Result<PreparePackagesData, AnyError> {
- let maybe_workspace_config = deno_json.to_workspace_config()?;
+ let members = deno_json.to_workspace_members()?;
let module_graph_builder = cli_factory.module_graph_builder().await?.as_ref();
let source_cache = cli_factory.parsed_source_cache();
let type_checker = cli_factory.type_checker().await?;
let cli_options = cli_factory.cli_options();
- let Some(workspace_config) = maybe_workspace_config else {
- let roots = resolve_config_file_roots_from_exports(&deno_json)?;
- let graph = build_and_check_graph_for_publish(
- module_graph_builder,
- type_checker,
- cli_options,
- no_zap,
- diagnostics_collector,
- &[MemberRoots {
- name: get_deno_json_package_name(&deno_json)?,
- dir_url: deno_json.specifier.join("./").unwrap().clone(),
- exports: roots,
- }],
- )
- .await?;
- let package = prepare_publish(
- &deno_json,
- source_cache.clone(),
- graph,
- import_map,
- diagnostics_collector,
- )
- .await?;
- let package_name = format!("@{}/{}", package.scope, package.package);
- let publish_order_graph =
- PublishOrderGraph::new_single(package_name.clone());
- let package_by_name = HashMap::from([(package_name, package)]);
- return Ok(PreparePackagesData {
- publish_order_graph,
- package_by_name,
- });
- };
+ if members.len() > 1 {
+ println!("Publishing a workspace...");
+ }
- println!("Publishing a workspace...");
// create the module graph
- let roots = get_workspace_member_roots(&workspace_config)?;
let graph = build_and_check_graph_for_publish(
module_graph_builder,
type_checker,
cli_options,
- no_zap,
+ allow_slow_types,
diagnostics_collector,
- &roots,
+ &members,
)
.await?;
- let mut package_by_name =
- HashMap::with_capacity(workspace_config.members.len());
+ let mut package_by_name = HashMap::with_capacity(members.len());
let publish_order_graph =
- publish_order::build_publish_order_graph(&graph, &roots)?;
+ publish_order::build_publish_order_graph(&graph, &members)?;
- let results = workspace_config
- .members
- .iter()
- .cloned()
+ let results = members
+ .into_iter()
.map(|member| {
let import_map = import_map.clone();
let graph = graph.clone();
async move {
let package = prepare_publish(
+ &member.package_name,
&member.config_file,
source_cache.clone(),
graph,
@@ -761,64 +718,69 @@ async fn build_and_check_graph_for_publish(
module_graph_builder: &ModuleGraphBuilder,
type_checker: &TypeChecker,
cli_options: &CliOptions,
- no_zap: bool,
+ allow_slow_types: bool,
diagnostics_collector: &PublishDiagnosticsCollector,
- packages: &[MemberRoots],
+ packages: &[WorkspaceMemberConfig],
) -> Result<Arc<deno_graph::ModuleGraph>, deno_core::anyhow::Error> {
- let graph = Arc::new(
- module_graph_builder
- .create_graph_with_options(crate::graph_util::CreateGraphOptions {
- is_dynamic: false,
- // All because we're going to use this same graph to determine the publish order later
- graph_kind: deno_graph::GraphKind::All,
- roots: packages
- .iter()
- .flat_map(|r| r.exports.iter())
- .cloned()
- .collect(),
- workspace_fast_check: true,
- loader: None,
- })
- .await?,
- );
+ let graph =
+ Arc::new(module_graph_builder.create_publish_graph(packages).await?);
graph.valid()?;
+ // todo(dsherret): move to lint rule
collect_invalid_external_imports(&graph, diagnostics_collector);
- let mut has_fast_check_diagnostics = false;
- if !no_zap {
- log::info!("Checking fast check type graph for errors...");
- has_fast_check_diagnostics = collect_fast_check_type_graph_diagnostics(
- &graph,
- packages,
- diagnostics_collector,
+ if allow_slow_types {
+ log::info!(
+ concat!(
+ "{} Publishing a library with slow types is not recommended. ",
+ "This may lead to poor type checking performance for users of ",
+ "your package, may affect the quality of automatic documentation ",
+ "generation, and your package will not be shipped with a .d.ts ",
+ "file for Node.js users."
+ ),
+ colors::yellow("Warning"),
);
- }
+ } else {
+ log::info!("Checking for slow types in the public API...");
+ let mut any_pkg_had_diagnostics = false;
+ for package in packages {
+ let export_urls = package.config_file.resolve_export_value_urls()?;
+ let diagnostics =
+ no_slow_types::collect_no_slow_type_diagnostics(&export_urls, &graph);
+ if !diagnostics.is_empty() {
+ any_pkg_had_diagnostics = true;
+ for diagnostic in diagnostics {
+ diagnostics_collector.push(PublishDiagnostic::FastCheck(diagnostic));
+ }
+ }
+ }
- if !has_fast_check_diagnostics {
- log::info!("Ensuring type checks...");
- let diagnostics = type_checker
- .check_diagnostics(
- graph.clone(),
- CheckOptions {
- lib: cli_options.ts_type_lib_window(),
- log_ignored_options: false,
- reload: cli_options.reload_flag(),
- },
- )
- .await?;
- if !diagnostics.is_empty() {
- bail!(
- concat!(
- "{:#}\n\n",
- "You may have discovered a bug in Deno's fast check implementation. ",
- "Fast check is still early days and we would appreciate if you log a ",
- "bug if you believe this is one: https://github.com/denoland/deno/issues/"
- ),
- diagnostics
- );
+ if !any_pkg_had_diagnostics {
+ // this is a temporary measure until we know that fast check is reliable and stable
+ let check_diagnostics = type_checker
+ .check_diagnostics(
+ graph.clone(),
+ CheckOptions {
+ lib: cli_options.ts_type_lib_window(),
+ log_ignored_options: false,
+ reload: cli_options.reload_flag(),
+ },
+ )
+ .await?;
+ if !check_diagnostics.is_empty() {
+ bail!(
+ concat!(
+ "Failed ensuring public API type output is valid.\n\n",
+ "{:#}\n\n",
+ "You may have discovered a bug in Deno. Please open an issue at: ",
+ "https://github.com/denoland/deno/issues/"
+ ),
+ check_diagnostics
+ );
+ }
}
}
+
Ok(graph)
}
@@ -852,7 +814,7 @@ pub async fn publish(
let prepared_data = prepare_packages_for_publishing(
&cli_factory,
- publish_flags.no_zap,
+ publish_flags.allow_slow_types,
&diagnostics_collector,
config_file.clone(),
import_map,
@@ -866,10 +828,7 @@ pub async fn publish(
}
if publish_flags.dry_run {
- log::warn!(
- "{} Aborting due to --dry-run",
- crate::colors::yellow("Warning")
- );
+ log::warn!("{} Aborting due to --dry-run", colors::yellow("Warning"));
return Ok(());
}
diff --git a/cli/tools/registry/publish_order.rs b/cli/tools/registry/publish_order.rs
index bb423b2b5..ad0f72272 100644
--- a/cli/tools/registry/publish_order.rs
+++ b/cli/tools/registry/publish_order.rs
@@ -4,12 +4,12 @@ use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
+use deno_ast::ModuleSpecifier;
+use deno_config::WorkspaceMemberConfig;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_graph::ModuleGraph;
-use super::graph::MemberRoots;
-
pub struct PublishOrderGraph {
packages: HashMap<String, HashSet<String>>,
in_degree: HashMap<String, usize>,
@@ -17,14 +17,6 @@ pub struct PublishOrderGraph {
}
impl PublishOrderGraph {
- pub fn new_single(package_name: String) -> Self {
- Self {
- packages: HashMap::from([(package_name.clone(), HashSet::new())]),
- in_degree: HashMap::from([(package_name.clone(), 0)]),
- reverse_map: HashMap::from([(package_name, Vec::new())]),
- }
- }
-
pub fn next(&mut self) -> Vec<String> {
let mut package_names_with_depth = self
.in_degree
@@ -122,22 +114,26 @@ impl PublishOrderGraph {
pub fn build_publish_order_graph(
graph: &ModuleGraph,
- roots: &[MemberRoots],
+ roots: &[WorkspaceMemberConfig],
) -> Result<PublishOrderGraph, AnyError> {
- let packages = build_pkg_deps(graph, roots);
+ let packages = build_pkg_deps(graph, roots)?;
Ok(build_publish_order_graph_from_pkgs_deps(packages))
}
fn build_pkg_deps(
graph: &deno_graph::ModuleGraph,
- roots: &[MemberRoots],
-) -> HashMap<String, HashSet<String>> {
+ roots: &[WorkspaceMemberConfig],
+) -> Result<HashMap<String, HashSet<String>>, AnyError> {
let mut members = HashMap::with_capacity(roots.len());
let mut seen_modules = HashSet::with_capacity(graph.modules().count());
- for root in roots {
+ let roots = roots
+ .iter()
+ .map(|r| (ModuleSpecifier::from_file_path(&r.dir_path).unwrap(), r))
+ .collect::<Vec<_>>();
+ for (root_dir_url, root) in &roots {
let mut deps = HashSet::new();
let mut pending = VecDeque::new();
- pending.extend(root.exports.clone());
+ pending.extend(root.config_file.resolve_export_value_urls()?);
while let Some(specifier) = pending.pop_front() {
let Some(module) = graph.get(&specifier).and_then(|m| m.js()) else {
continue;
@@ -163,23 +159,23 @@ fn build_pkg_deps(
if specifier.scheme() != "file" {
continue;
}
- if specifier.as_str().starts_with(root.dir_url.as_str()) {
+ if specifier.as_str().starts_with(root_dir_url.as_str()) {
if seen_modules.insert(specifier.clone()) {
pending.push_back(specifier.clone());
}
} else {
- let found_root = roots
- .iter()
- .find(|root| specifier.as_str().starts_with(root.dir_url.as_str()));
+ let found_root = roots.iter().find(|(dir_url, _)| {
+ specifier.as_str().starts_with(dir_url.as_str())
+ });
if let Some(root) = found_root {
- deps.insert(root.name.clone());
+ deps.insert(root.1.package_name.clone());
}
}
}
}
- members.insert(root.name.clone(), deps);
+ members.insert(root.package_name.clone(), deps);
}
- members
+ Ok(members)
}
fn build_publish_order_graph_from_pkgs_deps(