summaryrefslogtreecommitdiff
path: root/cli/cache/incremental.rs
diff options
context:
space:
mode:
authorMatt Mastracci <matthew@mastracci.com>2023-03-27 16:01:52 -0600
committerGitHub <noreply@github.com>2023-03-27 22:01:52 +0000
commit86c3c4f34397a29c2bf1847bddfea562a2369a4f (patch)
treebfbabf6f6d55dc14db47c4e06d4aae50277ab5ca /cli/cache/incremental.rs
parent8c051dbd1a075ad3c228f78b29b13f0e455972a7 (diff)
feat(core): initialize SQLite off-main-thread (#18401)
This gets SQLite off the flamegraph and reduces initialization time by somewhere between 0.2ms and 0.5ms. In addition, I took the opportunity to move all the cache management code to a single place and reduce duplication. While the PR has a net gain of lines, much of that is just being a bit more deliberate with how we're recovering from errors. The existing caches had various policies for dealing with cache corruption, so I've unified them and tried to isolate the decisions we make for recovery in a single place (see `open_connection` in `CacheDB`). The policy I chose was: 1. Retry twice to open on-disk caches 2. If that fails, try to delete the file and recreate it on-disk 3. If we fail to delete the file or re-create a new cache, use a fallback strategy that can be chosen per-cache: InMemory (temporary cache for the process run), BlackHole (ignore writes, return empty reads), or Error (fail on every operation). The caches all use the same general code now, and share the cache failure recovery policy. In addition, it cleans up a TODO in the `NodeAnalysisCache`.
Diffstat (limited to 'cli/cache/incremental.rs')
-rw-r--r--cli/cache/incremental.rs166
1 files changed, 53 insertions, 113 deletions
diff --git a/cli/cache/incremental.rs b/cli/cache/incremental.rs
index d5298071f..deb30cdd1 100644
--- a/cli/cache/incremental.rs
+++ b/cli/cache/incremental.rs
@@ -8,57 +8,49 @@ use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex;
use deno_core::serde_json;
use deno_runtime::deno_webstorage::rusqlite::params;
-use deno_runtime::deno_webstorage::rusqlite::Connection;
use serde::Serialize;
use tokio::task::JoinHandle;
+use super::cache_db::CacheDB;
+use super::cache_db::CacheDBConfiguration;
+use super::cache_db::CacheFailure;
use super::common::FastInsecureHasher;
-use super::common::INITIAL_PRAGMAS;
+
+pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
+ table_initializer: "CREATE TABLE IF NOT EXISTS incrementalcache (
+ file_path TEXT PRIMARY KEY,
+ state_hash TEXT NOT NULL,
+ source_hash TEXT NOT NULL
+ );",
+ on_version_change: "DELETE FROM incrementalcache;",
+ preheat_queries: &[],
+ // If the cache fails, just ignore all caching attempts
+ on_failure: CacheFailure::Blackhole,
+};
/// Cache used to skip formatting/linting a file again when we
/// know it is already formatted or has no lint diagnostics.
-pub struct IncrementalCache(Option<IncrementalCacheInner>);
+pub struct IncrementalCache(IncrementalCacheInner);
impl IncrementalCache {
pub fn new<TState: Serialize>(
- db_file_path: &Path,
+ db: CacheDB,
state: &TState,
initial_file_paths: &[PathBuf],
) -> Self {
- // if creating the incremental cache fails, then we
- // treat it as not having a cache
- let result =
- IncrementalCacheInner::new(db_file_path, state, initial_file_paths);
- IncrementalCache(match result {
- Ok(inner) => Some(inner),
- Err(err) => {
- log::debug!("Creating the incremental cache failed.\n{:#}", err);
- // Maybe the cache file is corrupt. Attempt to remove
- // the cache file for next time
- let _ = std::fs::remove_file(db_file_path);
- None
- }
- })
+ IncrementalCache(IncrementalCacheInner::new(db, state, initial_file_paths))
}
pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
- if let Some(inner) = &self.0 {
- inner.is_file_same(file_path, file_text)
- } else {
- false
- }
+ self.0.is_file_same(file_path, file_text)
}
pub fn update_file(&self, file_path: &Path, file_text: &str) {
- if let Some(inner) = &self.0 {
- inner.update_file(file_path, file_text)
- }
+ self.0.update_file(file_path, file_text)
}
pub async fn wait_completion(&self) {
- if let Some(inner) = &self.0 {
- inner.wait_completion().await;
- }
+ self.0.wait_completion().await;
}
}
@@ -75,18 +67,15 @@ struct IncrementalCacheInner {
impl IncrementalCacheInner {
pub fn new<TState: Serialize>(
- db_file_path: &Path,
+ db: CacheDB,
state: &TState,
initial_file_paths: &[PathBuf],
- ) -> Result<Self, AnyError> {
+ ) -> Self {
let state_hash = FastInsecureHasher::new()
.write_str(&serde_json::to_string(state).unwrap())
.finish();
- let sql_cache = SqlIncrementalCache::new(db_file_path, state_hash)?;
- Ok(Self::from_sql_incremental_cache(
- sql_cache,
- initial_file_paths,
- ))
+ let sql_cache = SqlIncrementalCache::new(db, state_hash);
+ Self::from_sql_incremental_cache(sql_cache, initial_file_paths)
}
fn from_sql_incremental_cache(
@@ -155,7 +144,7 @@ impl IncrementalCacheInner {
}
struct SqlIncrementalCache {
- conn: Connection,
+ conn: CacheDB,
/// A hash of the state used to produce the formatting/linting other than
/// the CLI version. This state is a hash of the configuration and ensures
/// we format/lint a file when the configuration changes.
@@ -163,20 +152,8 @@ struct SqlIncrementalCache {
}
impl SqlIncrementalCache {
- pub fn new(db_file_path: &Path, state_hash: u64) -> Result<Self, AnyError> {
- log::debug!("Loading incremental cache.");
- let conn = Connection::open(db_file_path)?;
- Self::from_connection(conn, state_hash, crate::version::deno())
- }
-
- fn from_connection(
- conn: Connection,
- state_hash: u64,
- cli_version: &'static str,
- ) -> Result<Self, AnyError> {
- initialize(&conn, cli_version)?;
-
- Ok(Self { conn, state_hash })
+ pub fn new(conn: CacheDB, state_hash: u64) -> Self {
+ Self { conn, state_hash }
}
pub fn get_source_hash(&self, path: &Path) -> Option<u64> {
@@ -206,15 +183,15 @@ impl SqlIncrementalCache {
file_path=?1
AND state_hash=?2
LIMIT 1";
- let mut stmt = self.conn.prepare_cached(query)?;
- let mut rows = stmt
- .query(params![path.to_string_lossy(), self.state_hash.to_string()])?;
- if let Some(row) = rows.next()? {
- let hash: String = row.get(0)?;
- Ok(Some(hash.parse::<u64>()?))
- } else {
- Ok(None)
- }
+ let res = self.conn.query_row(
+ query,
+ params![path.to_string_lossy(), self.state_hash.to_string()],
+ |row| {
+ let hash: String = row.get(0)?;
+ Ok(hash.parse::<u64>()?)
+ },
+ )?;
+ Ok(res)
}
pub fn set_source_hash(
@@ -227,53 +204,18 @@ impl SqlIncrementalCache {
incrementalcache (file_path, state_hash, source_hash)
VALUES
(?1, ?2, ?3)";
- let mut stmt = self.conn.prepare_cached(sql)?;
- stmt.execute(params![
- path.to_string_lossy(),
- &self.state_hash.to_string(),
- &source_hash,
- ])?;
+ self.conn.execute(
+ sql,
+ params![
+ path.to_string_lossy(),
+ &self.state_hash.to_string(),
+ &source_hash,
+ ],
+ )?;
Ok(())
}
}
-fn initialize(
- conn: &Connection,
- cli_version: &'static str,
-) -> Result<(), AnyError> {
- // INT doesn't store up to u64, so use TEXT for source_hash
- let query = format!(
- "{INITIAL_PRAGMAS}
- CREATE TABLE IF NOT EXISTS incrementalcache (
- file_path TEXT PRIMARY KEY,
- state_hash TEXT NOT NULL,
- source_hash TEXT NOT NULL
- );
- CREATE TABLE IF NOT EXISTS info (
- key TEXT PRIMARY KEY,
- value TEXT NOT NULL
- );"
- );
- conn.execute_batch(&query)?;
-
- // delete the cache when the CLI version changes
- let data_cli_version: Option<String> = conn
- .query_row(
- "SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1",
- [],
- |row| row.get(0),
- )
- .ok();
- if data_cli_version.as_deref() != Some(cli_version) {
- conn.execute("DELETE FROM incrementalcache", params![])?;
- let mut stmt = conn
- .prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?;
- stmt.execute(params!["CLI_VERSION", cli_version])?;
- }
-
- Ok(())
-}
-
#[cfg(test)]
mod test {
use std::path::PathBuf;
@@ -282,8 +224,8 @@ mod test {
#[test]
pub fn sql_cache_general_use() {
- let conn = Connection::open_in_memory().unwrap();
- let cache = SqlIncrementalCache::from_connection(conn, 1, "1.0.0").unwrap();
+ let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
+ let cache = SqlIncrementalCache::new(conn, 1);
let path = PathBuf::from("/mod.ts");
assert_eq!(cache.get_source_hash(&path), None);
@@ -291,9 +233,8 @@ mod test {
assert_eq!(cache.get_source_hash(&path), Some(2));
// try changing the cli version (should clear)
- let conn = cache.conn;
- let mut cache =
- SqlIncrementalCache::from_connection(conn, 1, "2.0.0").unwrap();
+ let conn = cache.conn.recreate_with_version("2.0.0");
+ let mut cache = SqlIncrementalCache::new(conn, 1);
assert_eq!(cache.get_source_hash(&path), None);
// add back the file to the cache
@@ -309,8 +250,8 @@ mod test {
assert_eq!(cache.get_source_hash(&path), Some(2));
// recreating the cache should not remove the data because the CLI version and state hash is the same
- let conn = cache.conn;
- let cache = SqlIncrementalCache::from_connection(conn, 1, "2.0.0").unwrap();
+ let conn = cache.conn.recreate_with_version("2.0.0");
+ let cache = SqlIncrementalCache::new(conn, 1);
assert_eq!(cache.get_source_hash(&path), Some(2));
// now try replacing and using another path
@@ -324,9 +265,8 @@ mod test {
#[tokio::test]
pub async fn incremental_cache_general_use() {
- let conn = Connection::open_in_memory().unwrap();
- let sql_cache =
- SqlIncrementalCache::from_connection(conn, 1, "1.0.0").unwrap();
+ let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
+ let sql_cache = SqlIncrementalCache::new(conn, 1);
let file_path = PathBuf::from("/mod.ts");
let file_text = "test";
let file_hash = FastInsecureHasher::new().write_str(file_text).finish();