summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Cargo.lock12
-rw-r--r--cli/Cargo.toml1
-rw-r--r--cli/config_file.rs4
-rw-r--r--cli/deno_dir.rs12
-rw-r--r--cli/lsp/language_server.rs2
-rw-r--r--cli/main.rs3
-rw-r--r--cli/tools/fmt.rs253
-rw-r--r--cli/tools/incremental_cache.rs371
-rw-r--r--cli/tools/lint.rs33
-rw-r--r--cli/tools/mod.rs1
-rw-r--r--ext/webstorage/lib.rs2
11 files changed, 645 insertions, 49 deletions
diff --git a/Cargo.lock b/Cargo.lock
index abb320355..5fccb2a93 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -795,6 +795,7 @@ dependencies = [
"tower-lsp",
"trust-dns-client",
"trust-dns-server",
+ "twox-hash",
"typed-arena",
"uuid",
"walkdir",
@@ -4724,6 +4725,17 @@ dependencies = [
]
[[package]]
+name = "twox-hash"
+version = "1.6.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0"
+dependencies = [
+ "cfg-if",
+ "rand",
+ "static_assertions",
+]
+
+[[package]]
name = "typed-arena"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/cli/Cargo.toml b/cli/Cargo.toml
index 826f16dda..6005a634f 100644
--- a/cli/Cargo.toml
+++ b/cli/Cargo.toml
@@ -95,6 +95,7 @@ text_lines = "=0.4.1"
tokio = { version = "=1.17", features = ["full"] }
tokio-util = "=0.7.0"
tower-lsp = "=0.16.0"
+twox-hash = "=1.6.2"
typed-arena = "2.0.1"
uuid = { version = "=0.8.2", features = ["v4", "serde"] }
walkdir = "=2.3.2"
diff --git a/cli/config_file.rs b/cli/config_file.rs
index 2c1ec7907..ba1d427a1 100644
--- a/cli/config_file.rs
+++ b/cli/config_file.rs
@@ -480,7 +480,7 @@ pub struct LintConfig {
pub files: FilesConfig,
}
-#[derive(Clone, Copy, Debug, Deserialize)]
+#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
#[serde(deny_unknown_fields, rename_all = "camelCase")]
pub enum ProseWrap {
Always,
@@ -488,7 +488,7 @@ pub enum ProseWrap {
Preserve,
}
-#[derive(Clone, Debug, Default, Deserialize)]
+#[derive(Clone, Debug, Default, Serialize, Deserialize)]
#[serde(default, deny_unknown_fields, rename_all = "camelCase")]
pub struct FmtOptionsConfig {
pub use_tabs: Option<bool>,
diff --git a/cli/deno_dir.rs b/cli/deno_dir.rs
index ca6f8b81f..ea46474d0 100644
--- a/cli/deno_dir.rs
+++ b/cli/deno_dir.rs
@@ -44,6 +44,18 @@ impl DenoDir {
Ok(deno_dir)
}
+
+ /// Path for the incremental cache used for formatting.
+ pub fn fmt_incremental_cache_db_file_path(&self) -> PathBuf {
+ // bump this version name to invalidate the entire cache
+ self.root.join("fmt_incremental_cache_v1")
+ }
+
+ /// Path for the incremental cache used for linting.
+ pub fn lint_incremental_cache_db_file_path(&self) -> PathBuf {
+ // bump this version name to invalidate the entire cache
+ self.root.join("lint_incremental_cache_v1")
+ }
}
/// To avoid the poorly managed dirs crate
diff --git a/cli/lsp/language_server.rs b/cli/lsp/language_server.rs
index 8b452586a..1b9eeed3d 100644
--- a/cli/lsp/language_server.rs
+++ b/cli/lsp/language_server.rs
@@ -1154,7 +1154,7 @@ impl Inner {
Some(Err(err)) => Err(anyhow!("{}", err)),
None => {
// it's not a js/ts file, so attempt to format its contents
- format_file(&file_path, document.content().as_str(), fmt_options)
+ format_file(&file_path, document.content().as_str(), &fmt_options)
}
};
diff --git a/cli/main.rs b/cli/main.rs
index e8a949e3d..7f86967aa 100644
--- a/cli/main.rs
+++ b/cli/main.rs
@@ -908,7 +908,8 @@ async fn format_command(
return Ok(0);
}
- tools::fmt::format(ps.flags.as_ref(), fmt_flags, maybe_fmt_config).await?;
+ tools::fmt::format(ps.flags.as_ref(), fmt_flags, maybe_fmt_config, &ps.dir)
+ .await?;
Ok(0)
}
diff --git a/cli/tools/fmt.rs b/cli/tools/fmt.rs
index 401fe836e..4fe99c616 100644
--- a/cli/tools/fmt.rs
+++ b/cli/tools/fmt.rs
@@ -11,6 +11,7 @@ use crate::colors;
use crate::config_file::FmtConfig;
use crate::config_file::FmtOptionsConfig;
use crate::config_file::ProseWrap;
+use crate::deno_dir::DenoDir;
use crate::diff::diff;
use crate::file_watcher;
use crate::file_watcher::ResolutionResult;
@@ -40,11 +41,14 @@ use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Arc;
+use super::incremental_cache::IncrementalCache;
+
/// Format JavaScript/TypeScript files.
pub async fn format(
flags: &Flags,
fmt_flags: FmtFlags,
maybe_fmt_config: Option<FmtConfig>,
+ deno_dir: &DenoDir,
) -> Result<(), AnyError> {
let FmtFlags {
files,
@@ -132,11 +136,18 @@ pub async fn format(
}
};
let operation = |(paths, fmt_options): (Vec<PathBuf>, FmtOptionsConfig)| async move {
+ let incremental_cache = Arc::new(IncrementalCache::new(
+ &deno_dir.fmt_incremental_cache_db_file_path(),
+ &fmt_options,
+ &paths,
+ ));
if check {
- check_source_files(paths, fmt_options).await?;
+ check_source_files(paths, fmt_options, incremental_cache.clone()).await?;
} else {
- format_source_files(paths, fmt_options).await?;
+ format_source_files(paths, fmt_options, incremental_cache.clone())
+ .await?;
}
+ incremental_cache.wait_completion().await;
Ok(())
};
@@ -234,18 +245,18 @@ pub fn format_json(
pub fn format_file(
file_path: &Path,
file_text: &str,
- fmt_options: FmtOptionsConfig,
+ fmt_options: &FmtOptionsConfig,
) -> Result<Option<String>, AnyError> {
let ext = get_extension(file_path).unwrap_or_default();
if matches!(
ext.as_str(),
"md" | "mkd" | "mkdn" | "mdwn" | "mdown" | "markdown"
) {
- format_markdown(file_text, &fmt_options)
+ format_markdown(file_text, fmt_options)
} else if matches!(ext.as_str(), "json" | "jsonc") {
- format_json(file_text, &fmt_options)
+ format_json(file_text, fmt_options)
} else {
- let config = get_resolved_typescript_config(&fmt_options);
+ let config = get_resolved_typescript_config(fmt_options);
dprint_plugin_typescript::format_text(file_path, file_text, &config)
}
}
@@ -263,6 +274,7 @@ pub fn format_parsed_source(
async fn check_source_files(
paths: Vec<PathBuf>,
fmt_options: FmtOptionsConfig,
+ incremental_cache: Arc<IncrementalCache>,
) -> Result<(), AnyError> {
let not_formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
@@ -277,7 +289,12 @@ async fn check_source_files(
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_text = read_file_contents(&file_path)?.text;
- match format_file(&file_path, &file_text, fmt_options.clone()) {
+ // skip checking the file if we know it's formatted
+ if incremental_cache.is_file_same(&file_path, &file_text) {
+ return Ok(());
+ }
+
+ match format_file(&file_path, &file_text, &fmt_options) {
Ok(Some(formatted_text)) => {
not_formatted_files_count.fetch_add(1, Ordering::Relaxed);
let _g = output_lock.lock();
@@ -286,7 +303,14 @@ async fn check_source_files(
info!("{} {}:", colors::bold("from"), file_path.display());
info!("{}", diff);
}
- Ok(None) => {}
+ Ok(None) => {
+ // When checking formatting, only update the incremental cache when
+ // the file is the same since we don't bother checking for stable
+ // formatting here. Additionally, ensure this is done during check
+ // so that CIs that cache the DENO_DIR will get the benefit of
+ // incremental formatting
+ incremental_cache.update_file(&file_path, &file_text);
+ }
Err(e) => {
let _g = output_lock.lock();
eprintln!("Error checking: {}", file_path.to_string_lossy());
@@ -318,6 +342,7 @@ async fn check_source_files(
async fn format_source_files(
paths: Vec<PathBuf>,
fmt_options: FmtOptionsConfig,
+ incremental_cache: Arc<IncrementalCache>,
) -> Result<(), AnyError> {
let formatted_files_count = Arc::new(AtomicUsize::new(0));
let checked_files_count = Arc::new(AtomicUsize::new(0));
@@ -330,8 +355,19 @@ async fn format_source_files(
checked_files_count.fetch_add(1, Ordering::Relaxed);
let file_contents = read_file_contents(&file_path)?;
- match format_file(&file_path, &file_contents.text, fmt_options.clone()) {
+ // skip formatting the file if we know it's formatted
+ if incremental_cache.is_file_same(&file_path, &file_contents.text) {
+ return Ok(());
+ }
+
+ match format_ensure_stable(
+ &file_path,
+ &file_contents.text,
+ &fmt_options,
+ format_file,
+ ) {
Ok(Some(formatted_text)) => {
+ incremental_cache.update_file(&file_path, &formatted_text);
write_file_contents(
&file_path,
FileContents {
@@ -343,7 +379,9 @@ async fn format_source_files(
let _g = output_lock.lock();
info!("{}", file_path.to_string_lossy());
}
- Ok(None) => {}
+ Ok(None) => {
+ incremental_cache.update_file(&file_path, &file_contents.text);
+ }
Err(e) => {
let _g = output_lock.lock();
eprintln!("Error formatting: {}", file_path.to_string_lossy());
@@ -372,6 +410,66 @@ async fn format_source_files(
Ok(())
}
+/// When storing any formatted text in the incremental cache, we want
+/// to ensure that anything stored when formatted will have itself as
+/// the output as well. This is to prevent "double format" issues where
+/// a user formats their code locally and it fails on the CI afterwards.
+fn format_ensure_stable(
+ file_path: &Path,
+ file_text: &str,
+ fmt_options: &FmtOptionsConfig,
+ fmt_func: impl Fn(
+ &Path,
+ &str,
+ &FmtOptionsConfig,
+ ) -> Result<Option<String>, AnyError>,
+) -> Result<Option<String>, AnyError> {
+ let formatted_text = fmt_func(file_path, file_text, fmt_options)?;
+
+ match formatted_text {
+ Some(mut current_text) => {
+ let mut count = 0;
+ loop {
+ match fmt_func(file_path, &current_text, fmt_options) {
+ Ok(Some(next_pass_text)) => {
+ // just in case
+ if next_pass_text == current_text {
+ return Ok(Some(next_pass_text));
+ }
+ current_text = next_pass_text;
+ }
+ Ok(None) => {
+ return Ok(Some(current_text));
+ }
+ Err(err) => {
+ panic!(
+ concat!(
+ "Formatting succeeded initially, but failed when ensuring a ",
+ "stable format. This indicates a bug in the formatter where ",
+ "the text it produces is not syntatically correct. As a temporary ",
+ "workfaround you can ignore this file.\n\n{:#}"
+ ),
+ err,
+ )
+ }
+ }
+ count += 1;
+ if count == 5 {
+ panic!(
+ concat!(
+ "Formatting not stable. Bailed after {} tries. This indicates a bug ",
+ "in the formatter where it formats the file differently each time. As a ",
+ "temporary workaround you can ignore this file."
+ ),
+ count
+ )
+ }
+ }
+ }
+ None => Ok(None),
+ }
+}
+
/// Format stdin and write result to stdout.
/// Treats input as TypeScript or as set by `--ext` flag.
/// Compatible with `--check` flag.
@@ -386,7 +484,7 @@ pub fn format_stdin(
let file_path = PathBuf::from(format!("_stdin.{}", fmt_flags.ext));
let fmt_options = resolve_fmt_options(&fmt_flags, fmt_options);
- let formatted_text = format_file(&file_path, &source, fmt_options)?;
+ let formatted_text = format_file(&file_path, &source, &fmt_options)?;
if fmt_flags.check {
if formatted_text.is_some() {
println!("Not formatted stdin");
@@ -628,37 +726,106 @@ fn is_contain_git(path: &Path) -> bool {
path.components().any(|c| c.as_os_str() == ".git")
}
-#[test]
-fn test_is_supported_ext_fmt() {
- assert!(!is_supported_ext_fmt(Path::new("tests/subdir/redirects")));
- assert!(is_supported_ext_fmt(Path::new("README.md")));
- assert!(is_supported_ext_fmt(Path::new("readme.MD")));
- assert!(is_supported_ext_fmt(Path::new("readme.mkd")));
- assert!(is_supported_ext_fmt(Path::new("readme.mkdn")));
- assert!(is_supported_ext_fmt(Path::new("readme.mdwn")));
- assert!(is_supported_ext_fmt(Path::new("readme.mdown")));
- assert!(is_supported_ext_fmt(Path::new("readme.markdown")));
- assert!(is_supported_ext_fmt(Path::new("lib/typescript.d.ts")));
- assert!(is_supported_ext_fmt(Path::new("testdata/001_hello.js")));
- assert!(is_supported_ext_fmt(Path::new("testdata/002_hello.ts")));
- assert!(is_supported_ext_fmt(Path::new("foo.jsx")));
- assert!(is_supported_ext_fmt(Path::new("foo.tsx")));
- assert!(is_supported_ext_fmt(Path::new("foo.TS")));
- assert!(is_supported_ext_fmt(Path::new("foo.TSX")));
- assert!(is_supported_ext_fmt(Path::new("foo.JS")));
- assert!(is_supported_ext_fmt(Path::new("foo.JSX")));
- assert!(is_supported_ext_fmt(Path::new("foo.mjs")));
- assert!(!is_supported_ext_fmt(Path::new("foo.mjsx")));
- assert!(is_supported_ext_fmt(Path::new("foo.jsonc")));
- assert!(is_supported_ext_fmt(Path::new("foo.JSONC")));
- assert!(is_supported_ext_fmt(Path::new("foo.json")));
- assert!(is_supported_ext_fmt(Path::new("foo.JsON")));
-}
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ fn test_is_supported_ext_fmt() {
+ assert!(!is_supported_ext_fmt(Path::new("tests/subdir/redirects")));
+ assert!(is_supported_ext_fmt(Path::new("README.md")));
+ assert!(is_supported_ext_fmt(Path::new("readme.MD")));
+ assert!(is_supported_ext_fmt(Path::new("readme.mkd")));
+ assert!(is_supported_ext_fmt(Path::new("readme.mkdn")));
+ assert!(is_supported_ext_fmt(Path::new("readme.mdwn")));
+ assert!(is_supported_ext_fmt(Path::new("readme.mdown")));
+ assert!(is_supported_ext_fmt(Path::new("readme.markdown")));
+ assert!(is_supported_ext_fmt(Path::new("lib/typescript.d.ts")));
+ assert!(is_supported_ext_fmt(Path::new("testdata/001_hello.js")));
+ assert!(is_supported_ext_fmt(Path::new("testdata/002_hello.ts")));
+ assert!(is_supported_ext_fmt(Path::new("foo.jsx")));
+ assert!(is_supported_ext_fmt(Path::new("foo.tsx")));
+ assert!(is_supported_ext_fmt(Path::new("foo.TS")));
+ assert!(is_supported_ext_fmt(Path::new("foo.TSX")));
+ assert!(is_supported_ext_fmt(Path::new("foo.JS")));
+ assert!(is_supported_ext_fmt(Path::new("foo.JSX")));
+ assert!(is_supported_ext_fmt(Path::new("foo.mjs")));
+ assert!(!is_supported_ext_fmt(Path::new("foo.mjsx")));
+ assert!(is_supported_ext_fmt(Path::new("foo.jsonc")));
+ assert!(is_supported_ext_fmt(Path::new("foo.JSONC")));
+ assert!(is_supported_ext_fmt(Path::new("foo.json")));
+ assert!(is_supported_ext_fmt(Path::new("foo.JsON")));
+ }
+
+ #[test]
+ fn test_is_located_in_git() {
+ assert!(is_contain_git(Path::new("test/.git")));
+ assert!(is_contain_git(Path::new(".git/bad.json")));
+ assert!(is_contain_git(Path::new("test/.git/bad.json")));
+ assert!(!is_contain_git(Path::new("test/bad.git/bad.json")));
+ }
+
+ #[test]
+ #[should_panic(expected = "Formatting not stable. Bailed after 5 tries.")]
+ fn test_format_ensure_stable_unstable_format() {
+ format_ensure_stable(
+ &PathBuf::from("mod.ts"),
+ "1",
+ &Default::default(),
+ |_, file_text, _| Ok(Some(format!("1{}", file_text))),
+ )
+ .unwrap();
+ }
+
+ #[test]
+ fn test_format_ensure_stable_error_first() {
+ let err = format_ensure_stable(
+ &PathBuf::from("mod.ts"),
+ "1",
+ &Default::default(),
+ |_, _, _| bail!("Error formatting."),
+ )
+ .unwrap_err();
+
+ assert_eq!(err.to_string(), "Error formatting.");
+ }
-#[test]
-fn test_is_located_in_git() {
- assert!(is_contain_git(Path::new("test/.git")));
- assert!(is_contain_git(Path::new(".git/bad.json")));
- assert!(is_contain_git(Path::new("test/.git/bad.json")));
- assert!(!is_contain_git(Path::new("test/bad.git/bad.json")));
+ #[test]
+ #[should_panic(expected = "Formatting succeeded initially, but failed when")]
+ fn test_format_ensure_stable_error_second() {
+ format_ensure_stable(
+ &PathBuf::from("mod.ts"),
+ "1",
+ &Default::default(),
+ |_, file_text, _| {
+ if file_text == "1" {
+ Ok(Some("11".to_string()))
+ } else {
+ bail!("Error formatting.")
+ }
+ },
+ )
+ .unwrap();
+ }
+
+ #[test]
+ fn test_format_stable_after_two() {
+ let result = format_ensure_stable(
+ &PathBuf::from("mod.ts"),
+ "1",
+ &Default::default(),
+ |_, file_text, _| {
+ if file_text == "1" {
+ Ok(Some("11".to_string()))
+ } else if file_text == "11" {
+ Ok(None)
+ } else {
+ unreachable!();
+ }
+ },
+ )
+ .unwrap();
+
+ assert_eq!(result, Some("11".to_string()));
+ }
}
diff --git a/cli/tools/incremental_cache.rs b/cli/tools/incremental_cache.rs
new file mode 100644
index 000000000..476c46b29
--- /dev/null
+++ b/cli/tools/incremental_cache.rs
@@ -0,0 +1,371 @@
+use std::collections::HashMap;
+use std::path::Path;
+use std::path::PathBuf;
+
+use deno_core::error::AnyError;
+use deno_core::parking_lot::Mutex;
+use deno_core::serde_json;
+use deno_runtime::deno_webstorage::rusqlite::params;
+use deno_runtime::deno_webstorage::rusqlite::Connection;
+use serde::Serialize;
+use tokio::task::JoinHandle;
+
+/// Cache used to skip formatting/linting a file again when we
+/// know it is already formatted or has no lint diagnostics.
+pub struct IncrementalCache(Option<IncrementalCacheInner>);
+
+impl IncrementalCache {
+ pub fn new<TState: Serialize>(
+ db_file_path: &Path,
+ state: &TState,
+ initial_file_paths: &[PathBuf],
+ ) -> Self {
+ // if creating the incremental cache fails, then we
+ // treat it as not having a cache
+ let result =
+ IncrementalCacheInner::new(db_file_path, state, initial_file_paths);
+ IncrementalCache(match result {
+ Ok(inner) => Some(inner),
+ Err(err) => {
+ log::debug!("Creating the incremental cache failed.\n{:#}", err);
+ // Maybe the cache file is corrupt. Attempt to remove
+ // the cache file for next time
+ let _ = std::fs::remove_file(db_file_path);
+ None
+ }
+ })
+ }
+
+ pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
+ if let Some(inner) = &self.0 {
+ inner.is_file_same(file_path, file_text)
+ } else {
+ false
+ }
+ }
+
+ pub fn update_file(&self, file_path: &Path, file_text: &str) {
+ if let Some(inner) = &self.0 {
+ inner.update_file(file_path, file_text)
+ }
+ }
+
+ pub async fn wait_completion(&self) {
+ if let Some(inner) = &self.0 {
+ inner.wait_completion().await;
+ }
+ }
+}
+
+enum ReceiverMessage {
+ Update(PathBuf, u64),
+ Exit,
+}
+
+struct IncrementalCacheInner {
+ previous_hashes: HashMap<PathBuf, u64>,
+ sender: tokio::sync::mpsc::UnboundedSender<ReceiverMessage>,
+ handle: Mutex<Option<JoinHandle<()>>>,
+}
+
+impl IncrementalCacheInner {
+ pub fn new<TState: Serialize>(
+ db_file_path: &Path,
+ state: &TState,
+ initial_file_paths: &[PathBuf],
+ ) -> Result<Self, AnyError> {
+ let state_hash =
+ fast_insecure_hash(serde_json::to_string(state).unwrap().as_bytes());
+ let sql_cache = SqlIncrementalCache::new(db_file_path, state_hash)?;
+ Ok(Self::from_sql_incremental_cache(
+ sql_cache,
+ initial_file_paths,
+ ))
+ }
+
+ fn from_sql_incremental_cache(
+ cache: SqlIncrementalCache,
+ initial_file_paths: &[PathBuf],
+ ) -> Self {
+ let mut previous_hashes = HashMap::new();
+ for path in initial_file_paths {
+ if let Some(hash) = cache.get_source_hash(path) {
+ previous_hashes.insert(path.to_path_buf(), hash);
+ }
+ }
+
+ let (sender, mut receiver) =
+ tokio::sync::mpsc::unbounded_channel::<ReceiverMessage>();
+
+ // sqlite isn't `Sync`, so we do all the updating on a dedicated task
+ let handle = tokio::task::spawn(async move {
+ while let Some(message) = receiver.recv().await {
+ match message {
+ ReceiverMessage::Update(path, hash) => {
+ let _ = cache.set_source_hash(&path, hash);
+ }
+ ReceiverMessage::Exit => break,
+ }
+ }
+ });
+
+ IncrementalCacheInner {
+ previous_hashes,
+ sender,
+ handle: Mutex::new(Some(handle)),
+ }
+ }
+
+ pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
+ match self.previous_hashes.get(file_path) {
+ Some(hash) => *hash == fast_insecure_hash(file_text.as_bytes()),
+ None => false,
+ }
+ }
+
+ pub fn update_file(&self, file_path: &Path, file_text: &str) {
+ let hash = fast_insecure_hash(file_text.as_bytes());
+ if let Some(previous_hash) = self.previous_hashes.get(file_path) {
+ if *previous_hash == hash {
+ return; // do not bother updating the db file because nothing has changed
+ }
+ }
+ let _ = self
+ .sender
+ .send(ReceiverMessage::Update(file_path.to_path_buf(), hash));
+ }
+
+ pub async fn wait_completion(&self) {
+ if self.sender.send(ReceiverMessage::Exit).is_err() {
+ return;
+ }
+ let handle = self.handle.lock().take();
+ if let Some(handle) = handle {
+ handle.await.unwrap();
+ }
+ }
+}
+
+struct SqlIncrementalCache {
+ conn: Connection,
+ /// A hash of the state used to produce the formatting/linting other than
+ /// the CLI version. This state is a hash of the configuration and ensures
+ /// we format/lint a file when the configuration changes.
+ state_hash: u64,
+}
+
+impl SqlIncrementalCache {
+ pub fn new(db_file_path: &Path, state_hash: u64) -> Result<Self, AnyError> {
+ let conn = Connection::open(db_file_path)?;
+ Self::from_connection(conn, state_hash, crate::version::deno())
+ }
+
+ fn from_connection(
+ conn: Connection,
+ state_hash: u64,
+ cli_version: String,
+ ) -> Result<Self, AnyError> {
+ run_pragma(&conn)?;
+ create_tables(&conn, cli_version)?;
+
+ Ok(Self { conn, state_hash })
+ }
+
+ pub fn get_source_hash(&self, path: &Path) -> Option<u64> {
+ match self.get_source_hash_result(path) {
+ Ok(option) => option,
+ Err(err) => {
+ if cfg!(debug_assertions) {
+ panic!("Error retrieving hash: {}", err);
+ } else {
+ // fail silently when not debugging
+ None
+ }
+ }
+ }
+ }
+
+ fn get_source_hash_result(
+ &self,
+ path: &Path,
+ ) -> Result<Option<u64>, AnyError> {
+ let query = "
+ SELECT
+ source_hash
+ FROM
+ incrementalcache
+ WHERE
+ file_path=?1
+ AND state_hash=?2
+ LIMIT 1";
+ let mut stmt = self.conn.prepare_cached(query)?;
+ let mut rows = stmt
+ .query(params![path.to_string_lossy(), self.state_hash.to_string()])?;
+ if let Some(row) = rows.next()? {
+ let hash: String = row.get(0)?;
+ Ok(Some(hash.parse::<u64>()?))
+ } else {
+ Ok(None)
+ }
+ }
+
+ pub fn set_source_hash(
+ &self,
+ path: &Path,
+ source_hash: u64,
+ ) -> Result<(), AnyError> {
+ let sql = "
+ INSERT OR REPLACE INTO
+ incrementalcache (file_path, state_hash, source_hash)
+ VALUES
+ (?1, ?2, ?3)";
+ let mut stmt = self.conn.prepare_cached(sql)?;
+ stmt.execute(params![
+ path.to_string_lossy(),
+ &self.state_hash.to_string(),
+ &source_hash.to_string(),
+ ])?;
+ Ok(())
+ }
+}
+
+fn run_pragma(conn: &Connection) -> Result<(), AnyError> {
+ // Enable write-ahead-logging and tweak some other stuff
+ let initial_pragmas = "
+ -- enable write-ahead-logging mode
+ PRAGMA journal_mode=WAL;
+ PRAGMA synchronous=NORMAL;
+ PRAGMA temp_store=memory;
+ PRAGMA page_size=4096;
+ PRAGMA mmap_size=6000000;
+ PRAGMA optimize;
+ ";
+
+ conn.execute_batch(initial_pragmas)?;
+ Ok(())
+}
+
+fn create_tables(
+ conn: &Connection,
+ cli_version: String,
+) -> Result<(), AnyError> {
+ // INT doesn't store up to u64, so use TEXT
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS incrementalcache (
+ file_path TEXT PRIMARY KEY,
+ state_hash TEXT NOT NULL,
+ source_hash TEXT NOT NULL
+ )",
+ [],
+ )?;
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS info (
+ key TEXT PRIMARY KEY,
+ value TEXT NOT NULL
+ )",
+ [],
+ )?;
+
+ // delete the cache when the CLI version changes
+ let data_cli_version: Option<String> = conn
+ .query_row(
+ "SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1",
+ [],
+ |row| row.get(0),
+ )
+ .ok();
+ if data_cli_version != Some(cli_version.to_string()) {
+ conn.execute("DELETE FROM incrementalcache", params![])?;
+ let mut stmt = conn
+ .prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?;
+ stmt.execute(params!["CLI_VERSION", &cli_version])?;
+ }
+
+ Ok(())
+}
+
+/// Very fast non-cryptographically secure hash.
+fn fast_insecure_hash(bytes: &[u8]) -> u64 {
+ use std::hash::Hasher;
+ use twox_hash::XxHash64;
+
+ let mut hasher = XxHash64::default();
+ hasher.write(bytes);
+ hasher.finish()
+}
+
+#[cfg(test)]
+mod test {
+ use std::path::PathBuf;
+
+ use super::*;
+
+ #[test]
+ pub fn sql_cache_general_use() {
+ let conn = Connection::open_in_memory().unwrap();
+ let cache =
+ SqlIncrementalCache::from_connection(conn, 1, "1.0.0".to_string())
+ .unwrap();
+ let path = PathBuf::from("/mod.ts");
+
+ assert_eq!(cache.get_source_hash(&path), None);
+ cache.set_source_hash(&path, 2).unwrap();
+ assert_eq!(cache.get_source_hash(&path), Some(2));
+
+ // try changing the cli version (should clear)
+ let conn = cache.conn;
+ let mut cache =
+ SqlIncrementalCache::from_connection(conn, 1, "2.0.0".to_string())
+ .unwrap();
+ assert_eq!(cache.get_source_hash(&path), None);
+
+ // add back the file to the cache
+ cache.set_source_hash(&path, 2).unwrap();
+ assert_eq!(cache.get_source_hash(&path), Some(2));
+
+ // try changing the state hash
+ cache.state_hash = 2;
+ assert_eq!(cache.get_source_hash(&path), None);
+ cache.state_hash = 1;
+
+ // should return now that everything is back
+ assert_eq!(cache.get_source_hash(&path), Some(2));
+
+ // recreating the cache should not remove the data because the CLI version and state hash is the same
+ let conn = cache.conn;
+ let cache =
+ SqlIncrementalCache::from_connection(conn, 1, "2.0.0".to_string())
+ .unwrap();
+ assert_eq!(cache.get_source_hash(&path), Some(2));
+
+ // now try replacing and using another path
+ cache.set_source_hash(&path, 3).unwrap();
+ cache.set_source_hash(&path, 4).unwrap();
+ let path2 = PathBuf::from("/mod2.ts");
+ cache.set_source_hash(&path2, 5).unwrap();
+ assert_eq!(cache.get_source_hash(&path), Some(4));
+ assert_eq!(cache.get_source_hash(&path2), Some(5));
+ }
+
+ #[tokio::test]
+ pub async fn incremental_cache_general_use() {
+ let conn = Connection::open_in_memory().unwrap();
+ let sql_cache =
+ SqlIncrementalCache::from_connection(conn, 1, "1.0.0".to_string())
+ .unwrap();
+ let file_path = PathBuf::from("/mod.ts");
+ let file_text = "test";
+ let file_hash = fast_insecure_hash(file_text.as_bytes());
+ sql_cache.set_source_hash(&file_path, file_hash).unwrap();
+ let cache = IncrementalCacheInner::from_sql_incremental_cache(
+ sql_cache,
+ &[file_path.clone()],
+ );
+
+ assert!(cache.is_file_same(&file_path, "test"));
+ assert!(!cache.is_file_same(&file_path, "other"));
+
+ // just ensure this doesn't panic
+ cache.update_file(&file_path, "other");
+ }
+}
diff --git a/cli/tools/lint.rs b/cli/tools/lint.rs
index bca64d1f4..4f5ad0fe8 100644
--- a/cli/tools/lint.rs
+++ b/cli/tools/lint.rs
@@ -34,6 +34,8 @@ use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
+use super::incremental_cache::IncrementalCache;
+
static STDIN_FILE_NAME: &str = "_stdin.ts";
#[derive(Clone, Debug)]
@@ -147,6 +149,17 @@ pub async fn lint(flags: Flags, lint_flags: LintFlags) -> Result<(), AnyError> {
};
let operation = |paths: Vec<PathBuf>| async {
+ let incremental_cache = Arc::new(IncrementalCache::new(
+ &ps.dir.lint_incremental_cache_db_file_path(),
+ // use a hash of the rule names in order to bust the cache
+ &{
+ // ensure this is stable by sorting it
+ let mut names = lint_rules.iter().map(|r| r.code()).collect::<Vec<_>>();
+ names.sort_unstable();
+ names
+ },
+ &paths,
+ ));
let target_files_len = paths.len();
let reporter_kind = reporter_kind.clone();
let reporter_lock = Arc::new(Mutex::new(create_reporter(reporter_kind)));
@@ -154,8 +167,23 @@ pub async fn lint(flags: Flags, lint_flags: LintFlags) -> Result<(), AnyError> {
let has_error = has_error.clone();
let lint_rules = lint_rules.clone();
let reporter_lock = reporter_lock.clone();
+ let incremental_cache = incremental_cache.clone();
move |file_path| {
- let r = lint_file(file_path.clone(), lint_rules.clone());
+ let file_text = fs::read_to_string(&file_path)?;
+
+ // don't bother rechecking this file if it didn't have any diagnostics before
+ if incremental_cache.is_file_same(&file_path, &file_text) {
+ return Ok(());
+ }
+
+ let r = lint_file(file_path.clone(), file_text, lint_rules.clone());
+ if let Ok((file_diagnostics, file_text)) = &r {
+ if file_diagnostics.is_empty() {
+ // update the incremental cache if there were no diagnostics
+ incremental_cache.update_file(&file_path, file_text)
+ }
+ }
+
handle_lint_result(
&file_path.to_string_lossy(),
r,
@@ -167,6 +195,7 @@ pub async fn lint(flags: Flags, lint_flags: LintFlags) -> Result<(), AnyError> {
}
})
.await?;
+ incremental_cache.wait_completion().await;
reporter_lock.lock().unwrap().close(target_files_len);
Ok(())
@@ -262,10 +291,10 @@ pub fn create_linter(
fn lint_file(
file_path: PathBuf,
+ source_code: String,
lint_rules: Vec<Arc<dyn LintRule>>,
) -> Result<(Vec<LintDiagnostic>, String), AnyError> {
let file_name = file_path.to_string_lossy().to_string();
- let source_code = fs::read_to_string(&file_path)?;
let media_type = MediaType::from(&file_path);
let linter = create_linter(media_type, lint_rules);
diff --git a/cli/tools/mod.rs b/cli/tools/mod.rs
index 7c5d79744..a6eaeb70e 100644
--- a/cli/tools/mod.rs
+++ b/cli/tools/mod.rs
@@ -4,6 +4,7 @@ pub mod bench;
pub mod coverage;
pub mod doc;
pub mod fmt;
+pub mod incremental_cache;
pub mod installer;
pub mod lint;
pub mod repl;
diff --git a/ext/webstorage/lib.rs b/ext/webstorage/lib.rs
index 47d720ffb..6284a59bc 100644
--- a/ext/webstorage/lib.rs
+++ b/ext/webstorage/lib.rs
@@ -13,6 +13,8 @@ use rusqlite::OptionalExtension;
use std::fmt;
use std::path::PathBuf;
+pub use rusqlite;
+
#[derive(Clone)]
struct OriginStorageDir(PathBuf);