summaryrefslogtreecommitdiff
path: root/cli/cache
diff options
context:
space:
mode:
authorDavid Sherret <dsherret@users.noreply.github.com>2022-07-12 18:58:39 -0400
committerGitHub <noreply@github.com>2022-07-12 18:58:39 -0400
commit0c87dd1e9898d7ac93e274d3611ee491a107d47a (patch)
treef626332706ccd12e0719f9b84d6b234d5483659b /cli/cache
parent76107649804e674268becd693b7b2a954eecb3da (diff)
perf: use emit from swc instead of tsc (#15118)
Diffstat (limited to 'cli/cache')
-rw-r--r--cli/cache/check.rs215
-rw-r--r--cli/cache/common.rs31
-rw-r--r--cli/cache/disk_cache.rs388
-rw-r--r--cli/cache/emit.rs71
-rw-r--r--cli/cache/incremental.rs350
-rw-r--r--cli/cache/mod.rs150
6 files changed, 1205 insertions, 0 deletions
diff --git a/cli/cache/check.rs b/cli/cache/check.rs
new file mode 100644
index 000000000..0ff86ef6c
--- /dev/null
+++ b/cli/cache/check.rs
@@ -0,0 +1,215 @@
+// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
+
+use std::path::Path;
+
+use deno_ast::ModuleSpecifier;
+use deno_core::anyhow::Context;
+use deno_core::error::AnyError;
+use deno_runtime::deno_webstorage::rusqlite::params;
+use deno_runtime::deno_webstorage::rusqlite::Connection;
+
+use super::common::run_sqlite_pragma;
+
+/// The cache used to tell whether type checking should occur again.
+///
+/// This simply stores a hash of the inputs of each successful type check
+/// and only clears them out when changing CLI versions.
+pub struct TypeCheckCache {
+ conn: Connection,
+}
+
+impl TypeCheckCache {
+ pub fn new(db_file_path: &Path) -> Result<Self, AnyError> {
+ let conn = Connection::open(db_file_path).with_context(|| {
+ format!(
+ concat!(
+ "Error opening type checking cache at {} -- ",
+ "Perhaps it's corrupt. Maybe try deleting it."
+ ),
+ db_file_path.display()
+ )
+ })?;
+ Self::from_connection(conn, crate::version::deno())
+ }
+
+ fn from_connection(
+ conn: Connection,
+ cli_version: String,
+ ) -> Result<Self, AnyError> {
+ run_sqlite_pragma(&conn)?;
+ create_tables(&conn, cli_version)?;
+
+ Ok(Self { conn })
+ }
+
+ pub fn has_check_hash(&self, hash: u64) -> bool {
+ match self.hash_check_hash_result(hash) {
+ Ok(val) => val,
+ Err(err) => {
+ if cfg!(debug_assertions) {
+ panic!("Error retrieving hash: {}", err);
+ } else {
+ log::debug!("Error retrieving hash: {}", err);
+ // fail silently when not debugging
+ false
+ }
+ }
+ }
+ }
+
+ fn hash_check_hash_result(&self, hash: u64) -> Result<bool, AnyError> {
+ let query = "SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1";
+ let mut stmt = self.conn.prepare_cached(query)?;
+ Ok(stmt.exists(params![hash.to_string()])?)
+ }
+
+ pub fn add_check_hash(&self, check_hash: u64) {
+ if let Err(err) = self.add_check_hash_result(check_hash) {
+ if cfg!(debug_assertions) {
+ panic!("Error saving check hash: {}", err);
+ } else {
+ log::debug!("Error saving check hash: {}", err);
+ }
+ }
+ }
+
+ fn add_check_hash_result(&self, check_hash: u64) -> Result<(), AnyError> {
+ let sql = "
+ INSERT OR REPLACE INTO
+ checkcache (check_hash)
+ VALUES
+ (?1)";
+ let mut stmt = self.conn.prepare_cached(sql)?;
+ stmt.execute(params![&check_hash.to_string(),])?;
+ Ok(())
+ }
+
+ pub fn get_tsbuildinfo(&self, specifier: &ModuleSpecifier) -> Option<String> {
+ let mut stmt = self
+ .conn
+ .prepare_cached("SELECT text FROM tsbuildinfo WHERE specifier=?1 LIMIT 1")
+ .ok()?;
+ let mut rows = stmt.query(params![specifier.to_string()]).ok()?;
+ let row = rows.next().ok().flatten()?;
+
+ row.get(0).ok()
+ }
+
+ pub fn set_tsbuildinfo(&self, specifier: &ModuleSpecifier, text: &str) {
+ if let Err(err) = self.set_tsbuildinfo_result(specifier, text) {
+ // should never error here, but if it ever does don't fail
+ if cfg!(debug_assertions) {
+ panic!("Error saving tsbuildinfo: {}", err);
+ } else {
+ log::debug!("Error saving tsbuildinfo: {}", err);
+ }
+ }
+ }
+
+ fn set_tsbuildinfo_result(
+ &self,
+ specifier: &ModuleSpecifier,
+ text: &str,
+ ) -> Result<(), AnyError> {
+ let mut stmt = self.conn.prepare_cached(
+ "INSERT OR REPLACE INTO tsbuildinfo (specifier, text) VALUES (?1, ?2)",
+ )?;
+ stmt.execute(params![specifier.to_string(), text])?;
+ Ok(())
+ }
+}
+
+fn create_tables(
+ conn: &Connection,
+ cli_version: String,
+) -> Result<(), AnyError> {
+ // INT doesn't store up to u64, so use TEXT
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS checkcache (
+ check_hash TEXT PRIMARY KEY
+ )",
+ [],
+ )?;
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS tsbuildinfo (
+ specifier TEXT PRIMARY KEY,
+ text TEXT NOT NULL
+ )",
+ [],
+ )?;
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS info (
+ key TEXT PRIMARY KEY,
+ value TEXT NOT NULL
+ )",
+ [],
+ )?;
+
+ // delete the cache when the CLI version changes
+ let data_cli_version: Option<String> = conn
+ .query_row(
+ "SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1",
+ [],
+ |row| row.get(0),
+ )
+ .ok();
+ if data_cli_version != Some(cli_version.to_string()) {
+ conn.execute("DELETE FROM checkcache", params![])?;
+ conn.execute("DELETE FROM tsbuildinfo", params![])?;
+ let mut stmt = conn
+ .prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?;
+ stmt.execute(params!["CLI_VERSION", &cli_version])?;
+ }
+
+ Ok(())
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+
+ #[test]
+ pub fn check_cache_general_use() {
+ let conn = Connection::open_in_memory().unwrap();
+ let cache =
+ TypeCheckCache::from_connection(conn, "1.0.0".to_string()).unwrap();
+
+ assert!(!cache.has_check_hash(1));
+ cache.add_check_hash(1);
+ assert!(cache.has_check_hash(1));
+ assert!(!cache.has_check_hash(2));
+
+ let specifier1 = ModuleSpecifier::parse("file:///test.json").unwrap();
+ assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
+ cache.set_tsbuildinfo(&specifier1, "test");
+ assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
+
+ // try changing the cli version (should clear)
+ let conn = cache.conn;
+ let cache =
+ TypeCheckCache::from_connection(conn, "2.0.0".to_string()).unwrap();
+ assert!(!cache.has_check_hash(1));
+ cache.add_check_hash(1);
+ assert!(cache.has_check_hash(1));
+ assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
+ cache.set_tsbuildinfo(&specifier1, "test");
+ assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
+
+ // recreating the cache should not remove the data because the CLI version and state hash is the same
+ let conn = cache.conn;
+ let cache =
+ TypeCheckCache::from_connection(conn, "2.0.0".to_string()).unwrap();
+ assert!(cache.has_check_hash(1));
+ assert!(!cache.has_check_hash(2));
+ assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
+
+ // adding when already exists should not cause issue
+ cache.add_check_hash(1);
+ assert!(cache.has_check_hash(1));
+ cache.set_tsbuildinfo(&specifier1, "other");
+ assert_eq!(
+ cache.get_tsbuildinfo(&specifier1),
+ Some("other".to_string())
+ );
+ }
+}
diff --git a/cli/cache/common.rs b/cli/cache/common.rs
new file mode 100644
index 000000000..c01c1ab9a
--- /dev/null
+++ b/cli/cache/common.rs
@@ -0,0 +1,31 @@
+// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
+
+use deno_core::error::AnyError;
+use deno_runtime::deno_webstorage::rusqlite::Connection;
+
+/// Very fast non-cryptographically secure hash.
+pub fn fast_insecure_hash(bytes: &[u8]) -> u64 {
+ use std::hash::Hasher;
+ use twox_hash::XxHash64;
+
+ let mut hasher = XxHash64::default();
+ hasher.write(bytes);
+ hasher.finish()
+}
+
+/// Runs the common sqlite pragma.
+pub fn run_sqlite_pragma(conn: &Connection) -> Result<(), AnyError> {
+ // Enable write-ahead-logging and tweak some other stuff
+ let initial_pragmas = "
+ -- enable write-ahead-logging mode
+ PRAGMA journal_mode=WAL;
+ PRAGMA synchronous=NORMAL;
+ PRAGMA temp_store=memory;
+ PRAGMA page_size=4096;
+ PRAGMA mmap_size=6000000;
+ PRAGMA optimize;
+ ";
+
+ conn.execute_batch(initial_pragmas)?;
+ Ok(())
+}
diff --git a/cli/cache/disk_cache.rs b/cli/cache/disk_cache.rs
new file mode 100644
index 000000000..01352c398
--- /dev/null
+++ b/cli/cache/disk_cache.rs
@@ -0,0 +1,388 @@
+// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
+
+use crate::fs_util;
+use crate::http_cache::url_to_filename;
+
+use super::CacheType;
+use super::Cacher;
+use super::EmitMetadata;
+
+use deno_ast::ModuleSpecifier;
+use deno_core::error::AnyError;
+use deno_core::serde_json;
+use deno_core::url::Host;
+use deno_core::url::Url;
+use std::ffi::OsStr;
+use std::fs;
+use std::io;
+use std::path::Component;
+use std::path::Path;
+use std::path::PathBuf;
+use std::path::Prefix;
+use std::str;
+
+#[derive(Clone)]
+pub struct DiskCache {
+ pub location: PathBuf,
+}
+
+fn with_io_context<T: AsRef<str>>(
+ e: &std::io::Error,
+ context: T,
+) -> std::io::Error {
+ std::io::Error::new(e.kind(), format!("{} (for '{}')", e, context.as_ref()))
+}
+
+impl DiskCache {
+ /// `location` must be an absolute path.
+ pub fn new(location: &Path) -> Self {
+ assert!(location.is_absolute());
+ Self {
+ location: location.to_owned(),
+ }
+ }
+
+ /// Ensures the location of the cache.
+ pub fn ensure_dir_exists(&self, path: &Path) -> io::Result<()> {
+ if path.is_dir() {
+ return Ok(());
+ }
+ fs::create_dir_all(&path).map_err(|e| {
+ io::Error::new(e.kind(), format!(
+ "Could not create TypeScript compiler cache location: {:?}\nCheck the permission of the directory.",
+ path
+ ))
+ })
+ }
+
+ fn get_cache_filename(&self, url: &Url) -> Option<PathBuf> {
+ let mut out = PathBuf::new();
+
+ let scheme = url.scheme();
+ out.push(scheme);
+
+ match scheme {
+ "wasm" => {
+ let host = url.host_str().unwrap();
+ let host_port = match url.port() {
+ // Windows doesn't support ":" in filenames, so we represent port using a
+ // special string.
+ Some(port) => format!("{}_PORT{}", host, port),
+ None => host.to_string(),
+ };
+ out.push(host_port);
+
+ for path_seg in url.path_segments().unwrap() {
+ out.push(path_seg);
+ }
+ }
+ "http" | "https" | "data" | "blob" => out = url_to_filename(url)?,
+ "file" => {
+ let path = match url.to_file_path() {
+ Ok(path) => path,
+ Err(_) => return None,
+ };
+ let mut path_components = path.components();
+
+ if cfg!(target_os = "windows") {
+ if let Some(Component::Prefix(prefix_component)) =
+ path_components.next()
+ {
+ // Windows doesn't support ":" in filenames, so we need to extract disk prefix
+ // Example: file:///C:/deno/js/unit_test_runner.ts
+ // it should produce: file\c\deno\js\unit_test_runner.ts
+ match prefix_component.kind() {
+ Prefix::Disk(disk_byte) | Prefix::VerbatimDisk(disk_byte) => {
+ let disk = (disk_byte as char).to_string();
+ out.push(disk);
+ }
+ Prefix::UNC(server, share)
+ | Prefix::VerbatimUNC(server, share) => {
+ out.push("UNC");
+ let host = Host::parse(server.to_str().unwrap()).unwrap();
+ let host = host.to_string().replace(':', "_");
+ out.push(host);
+ out.push(share);
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ // Must be relative, so strip forward slash
+ let mut remaining_components = path_components.as_path();
+ if let Ok(stripped) = remaining_components.strip_prefix("/") {
+ remaining_components = stripped;
+ };
+
+ out = out.join(remaining_components);
+ }
+ _ => return None,
+ };
+
+ Some(out)
+ }
+
+ pub fn get_cache_filename_with_extension(
+ &self,
+ url: &Url,
+ extension: &str,
+ ) -> Option<PathBuf> {
+ let base = self.get_cache_filename(url)?;
+
+ match base.extension() {
+ None => Some(base.with_extension(extension)),
+ Some(ext) => {
+ let original_extension = OsStr::to_str(ext).unwrap();
+ let final_extension = format!("{}.{}", original_extension, extension);
+ Some(base.with_extension(final_extension))
+ }
+ }
+ }
+
+ pub fn get(&self, filename: &Path) -> std::io::Result<Vec<u8>> {
+ let path = self.location.join(filename);
+ fs::read(&path)
+ }
+
+ pub fn set(&self, filename: &Path, data: &[u8]) -> std::io::Result<()> {
+ let path = self.location.join(filename);
+ match path.parent() {
+ Some(parent) => self.ensure_dir_exists(parent),
+ None => Ok(()),
+ }?;
+ fs_util::atomic_write_file(&path, data, crate::http_cache::CACHE_PERM)
+ .map_err(|e| with_io_context(&e, format!("{:#?}", &path)))
+ }
+
+ fn get_emit_metadata(
+ &self,
+ specifier: &ModuleSpecifier,
+ ) -> Option<EmitMetadata> {
+ let filename = self.get_cache_filename_with_extension(specifier, "meta")?;
+ let bytes = self.get(&filename).ok()?;
+ serde_json::from_slice(&bytes).ok()
+ }
+
+ fn set_emit_metadata(
+ &self,
+ specifier: &ModuleSpecifier,
+ data: EmitMetadata,
+ ) -> Result<(), AnyError> {
+ let filename = self
+ .get_cache_filename_with_extension(specifier, "meta")
+ .unwrap();
+ let bytes = serde_json::to_vec(&data)?;
+ self.set(&filename, &bytes).map_err(|e| e.into())
+ }
+}
+
+// todo(13302): remove and replace with sqlite database
+impl Cacher for DiskCache {
+ fn get(
+ &self,
+ cache_type: CacheType,
+ specifier: &ModuleSpecifier,
+ ) -> Option<String> {
+ let extension = match cache_type {
+ CacheType::Emit => "js",
+ CacheType::SourceMap => "js.map",
+ CacheType::Version => {
+ return self.get_emit_metadata(specifier).map(|d| d.version_hash)
+ }
+ };
+ let filename =
+ self.get_cache_filename_with_extension(specifier, extension)?;
+ self
+ .get(&filename)
+ .ok()
+ .and_then(|b| String::from_utf8(b).ok())
+ }
+
+ fn set(
+ &self,
+ cache_type: CacheType,
+ specifier: &ModuleSpecifier,
+ value: String,
+ ) -> Result<(), AnyError> {
+ let extension = match cache_type {
+ CacheType::Emit => "js",
+ CacheType::SourceMap => "js.map",
+ CacheType::Version => {
+ let data = if let Some(mut data) = self.get_emit_metadata(specifier) {
+ data.version_hash = value;
+ data
+ } else {
+ EmitMetadata {
+ version_hash: value,
+ }
+ };
+ return self.set_emit_metadata(specifier, data);
+ }
+ };
+ let filename = self
+ .get_cache_filename_with_extension(specifier, extension)
+ .unwrap();
+ self.set(&filename, value.as_bytes()).map_err(|e| e.into())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use test_util::TempDir;
+
+ #[test]
+ fn test_create_cache_if_dir_exits() {
+ let cache_location = TempDir::new();
+ let mut cache_path = cache_location.path().to_owned();
+ cache_path.push("foo");
+ let cache = DiskCache::new(&cache_path);
+ cache
+ .ensure_dir_exists(&cache.location)
+ .expect("Testing expect:");
+ assert!(cache_path.is_dir());
+ }
+
+ #[test]
+ fn test_create_cache_if_dir_not_exits() {
+ let temp_dir = TempDir::new();
+ let mut cache_location = temp_dir.path().to_owned();
+ assert!(fs::remove_dir(&cache_location).is_ok());
+ cache_location.push("foo");
+ assert!(!cache_location.is_dir());
+ let cache = DiskCache::new(&cache_location);
+ cache
+ .ensure_dir_exists(&cache.location)
+ .expect("Testing expect:");
+ assert!(cache_location.is_dir());
+ }
+
+ #[test]
+ fn test_get_cache_filename() {
+ let cache_location = if cfg!(target_os = "windows") {
+ PathBuf::from(r"C:\deno_dir\")
+ } else {
+ PathBuf::from("/deno_dir/")
+ };
+
+ let cache = DiskCache::new(&cache_location);
+
+ let mut test_cases = vec![
+ (
+ "http://deno.land/std/http/file_server.ts",
+ "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf",
+ ),
+ (
+ "http://localhost:8000/std/http/file_server.ts",
+ "http/localhost_PORT8000/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf",
+ ),
+ (
+ "https://deno.land/std/http/file_server.ts",
+ "https/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf",
+ ),
+ ("wasm://wasm/d1c677ea", "wasm/wasm/d1c677ea"),
+ ];
+
+ if cfg!(target_os = "windows") {
+ test_cases.push(("file:///D:/a/1/s/format.ts", "file/D/a/1/s/format.ts"));
+ // IPv4 localhost
+ test_cases.push((
+ "file://127.0.0.1/d$/a/1/s/format.ts",
+ "file/UNC/127.0.0.1/d$/a/1/s/format.ts",
+ ));
+ // IPv6 localhost
+ test_cases.push((
+ "file://[0:0:0:0:0:0:0:1]/d$/a/1/s/format.ts",
+ "file/UNC/[__1]/d$/a/1/s/format.ts",
+ ));
+ // shared folder
+ test_cases.push((
+ "file://comp/t-share/a/1/s/format.ts",
+ "file/UNC/comp/t-share/a/1/s/format.ts",
+ ));
+ } else {
+ test_cases.push((
+ "file:///std/http/file_server.ts",
+ "file/std/http/file_server.ts",
+ ));
+ }
+
+ for test_case in &test_cases {
+ let cache_filename =
+ cache.get_cache_filename(&Url::parse(test_case.0).unwrap());
+ assert_eq!(cache_filename, Some(PathBuf::from(test_case.1)));
+ }
+ }
+
+ #[test]
+ fn test_get_cache_filename_with_extension() {
+ let p = if cfg!(target_os = "windows") {
+ "C:\\foo"
+ } else {
+ "/foo"
+ };
+ let cache = DiskCache::new(&PathBuf::from(p));
+
+ let mut test_cases = vec![
+ (
+ "http://deno.land/std/http/file_server.ts",
+ "js",
+ "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf.js",
+ ),
+ (
+ "http://deno.land/std/http/file_server.ts",
+ "js.map",
+ "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf.js.map",
+ ),
+ ];
+
+ if cfg!(target_os = "windows") {
+ test_cases.push((
+ "file:///D:/std/http/file_server",
+ "js",
+ "file/D/std/http/file_server.js",
+ ));
+ } else {
+ test_cases.push((
+ "file:///std/http/file_server",
+ "js",
+ "file/std/http/file_server.js",
+ ));
+ }
+
+ for test_case in &test_cases {
+ assert_eq!(
+ cache.get_cache_filename_with_extension(
+ &Url::parse(test_case.0).unwrap(),
+ test_case.1
+ ),
+ Some(PathBuf::from(test_case.2))
+ )
+ }
+ }
+
+ #[test]
+ fn test_get_cache_filename_invalid_urls() {
+ let cache_location = if cfg!(target_os = "windows") {
+ PathBuf::from(r"C:\deno_dir\")
+ } else {
+ PathBuf::from("/deno_dir/")
+ };
+
+ let cache = DiskCache::new(&cache_location);
+
+ let mut test_cases = vec!["unknown://localhost/test.ts"];
+
+ if cfg!(target_os = "windows") {
+ test_cases.push("file://");
+ test_cases.push("file:///");
+ }
+
+ for test_case in &test_cases {
+ let cache_filename =
+ cache.get_cache_filename(&Url::parse(test_case).unwrap());
+ assert_eq!(cache_filename, None);
+ }
+ }
+}
diff --git a/cli/cache/emit.rs b/cli/cache/emit.rs
new file mode 100644
index 000000000..e1469b862
--- /dev/null
+++ b/cli/cache/emit.rs
@@ -0,0 +1,71 @@
+// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
+
+use deno_ast::ModuleSpecifier;
+use deno_core::error::AnyError;
+
+use super::CacheType;
+use super::Cacher;
+
+/// Emit cache for a single file.
+#[derive(Debug, Clone, PartialEq)]
+pub struct SpecifierEmitCacheData {
+ pub source_hash: String,
+ pub text: String,
+ pub map: Option<String>,
+}
+
+pub trait EmitCache {
+ /// Gets the emit data from the cache.
+ fn get_emit_data(
+ &self,
+ specifier: &ModuleSpecifier,
+ ) -> Option<SpecifierEmitCacheData>;
+ /// Sets the emit data in the cache.
+ fn set_emit_data(
+ &self,
+ specifier: ModuleSpecifier,
+ data: SpecifierEmitCacheData,
+ ) -> Result<(), AnyError>;
+ /// Gets the stored hash of the source of the provider specifier
+ /// to tell if the emit is out of sync with the source.
+ /// TODO(13302): this is actually not reliable and should be removed
+ /// once switching to an sqlite db
+ fn get_source_hash(&self, specifier: &ModuleSpecifier) -> Option<String>;
+ /// Gets the emitted JavaScript of the TypeScript source.
+ /// TODO(13302): remove this once switching to an sqlite db
+ fn get_emit_text(&self, specifier: &ModuleSpecifier) -> Option<String>;
+}
+
+impl<T: Cacher> EmitCache for T {
+ fn get_emit_data(
+ &self,
+ specifier: &ModuleSpecifier,
+ ) -> Option<SpecifierEmitCacheData> {
+ Some(SpecifierEmitCacheData {
+ source_hash: self.get_source_hash(specifier)?,
+ text: self.get_emit_text(specifier)?,
+ map: self.get(CacheType::SourceMap, specifier),
+ })
+ }
+
+ fn get_source_hash(&self, specifier: &ModuleSpecifier) -> Option<String> {
+ self.get(CacheType::Version, specifier)
+ }
+
+ fn get_emit_text(&self, specifier: &ModuleSpecifier) -> Option<String> {
+ self.get(CacheType::Emit, specifier)
+ }
+
+ fn set_emit_data(
+ &self,
+ specifier: ModuleSpecifier,
+ data: SpecifierEmitCacheData,
+ ) -> Result<(), AnyError> {
+ self.set(CacheType::Version, &specifier, data.source_hash)?;
+ self.set(CacheType::Emit, &specifier, data.text)?;
+ if let Some(map) = data.map {
+ self.set(CacheType::SourceMap, &specifier, map)?;
+ }
+ Ok(())
+ }
+}
diff --git a/cli/cache/incremental.rs b/cli/cache/incremental.rs
new file mode 100644
index 000000000..b5fff0734
--- /dev/null
+++ b/cli/cache/incremental.rs
@@ -0,0 +1,350 @@
+// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
+
+use std::collections::HashMap;
+use std::path::Path;
+use std::path::PathBuf;
+
+use deno_core::error::AnyError;
+use deno_core::parking_lot::Mutex;
+use deno_core::serde_json;
+use deno_runtime::deno_webstorage::rusqlite::params;
+use deno_runtime::deno_webstorage::rusqlite::Connection;
+use serde::Serialize;
+use tokio::task::JoinHandle;
+
+use super::common::fast_insecure_hash;
+use super::common::run_sqlite_pragma;
+
+/// Cache used to skip formatting/linting a file again when we
+/// know it is already formatted or has no lint diagnostics.
+pub struct IncrementalCache(Option<IncrementalCacheInner>);
+
+impl IncrementalCache {
+ pub fn new<TState: Serialize>(
+ db_file_path: &Path,
+ state: &TState,
+ initial_file_paths: &[PathBuf],
+ ) -> Self {
+ // if creating the incremental cache fails, then we
+ // treat it as not having a cache
+ let result =
+ IncrementalCacheInner::new(db_file_path, state, initial_file_paths);
+ IncrementalCache(match result {
+ Ok(inner) => Some(inner),
+ Err(err) => {
+ log::debug!("Creating the incremental cache failed.\n{:#}", err);
+ // Maybe the cache file is corrupt. Attempt to remove
+ // the cache file for next time
+ let _ = std::fs::remove_file(db_file_path);
+ None
+ }
+ })
+ }
+
+ pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
+ if let Some(inner) = &self.0 {
+ inner.is_file_same(file_path, file_text)
+ } else {
+ false
+ }
+ }
+
+ pub fn update_file(&self, file_path: &Path, file_text: &str) {
+ if let Some(inner) = &self.0 {
+ inner.update_file(file_path, file_text)
+ }
+ }
+
+ pub async fn wait_completion(&self) {
+ if let Some(inner) = &self.0 {
+ inner.wait_completion().await;
+ }
+ }
+}
+
+enum ReceiverMessage {
+ Update(PathBuf, u64),
+ Exit,
+}
+
+struct IncrementalCacheInner {
+ previous_hashes: HashMap<PathBuf, u64>,
+ sender: tokio::sync::mpsc::UnboundedSender<ReceiverMessage>,
+ handle: Mutex<Option<JoinHandle<()>>>,
+}
+
+impl IncrementalCacheInner {
+ pub fn new<TState: Serialize>(
+ db_file_path: &Path,
+ state: &TState,
+ initial_file_paths: &[PathBuf],
+ ) -> Result<Self, AnyError> {
+ let state_hash =
+ fast_insecure_hash(serde_json::to_string(state).unwrap().as_bytes());
+ let sql_cache = SqlIncrementalCache::new(db_file_path, state_hash)?;
+ Ok(Self::from_sql_incremental_cache(
+ sql_cache,
+ initial_file_paths,
+ ))
+ }
+
+ fn from_sql_incremental_cache(
+ cache: SqlIncrementalCache,
+ initial_file_paths: &[PathBuf],
+ ) -> Self {
+ let mut previous_hashes = HashMap::new();
+ for path in initial_file_paths {
+ if let Some(hash) = cache.get_source_hash(path) {
+ previous_hashes.insert(path.to_path_buf(), hash);
+ }
+ }
+
+ let (sender, mut receiver) =
+ tokio::sync::mpsc::unbounded_channel::<ReceiverMessage>();
+
+ // sqlite isn't `Sync`, so we do all the updating on a dedicated task
+ let handle = tokio::task::spawn(async move {
+ while let Some(message) = receiver.recv().await {
+ match message {
+ ReceiverMessage::Update(path, hash) => {
+ let _ = cache.set_source_hash(&path, hash);
+ }
+ ReceiverMessage::Exit => break,
+ }
+ }
+ });
+
+ IncrementalCacheInner {
+ previous_hashes,
+ sender,
+ handle: Mutex::new(Some(handle)),
+ }
+ }
+
+ pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
+ match self.previous_hashes.get(file_path) {
+ Some(hash) => *hash == fast_insecure_hash(file_text.as_bytes()),
+ None => false,
+ }
+ }
+
+ pub fn update_file(&self, file_path: &Path, file_text: &str) {
+ let hash = fast_insecure_hash(file_text.as_bytes());
+ if let Some(previous_hash) = self.previous_hashes.get(file_path) {
+ if *previous_hash == hash {
+ return; // do not bother updating the db file because nothing has changed
+ }
+ }
+ let _ = self
+ .sender
+ .send(ReceiverMessage::Update(file_path.to_path_buf(), hash));
+ }
+
+ pub async fn wait_completion(&self) {
+ if self.sender.send(ReceiverMessage::Exit).is_err() {
+ return;
+ }
+ let handle = self.handle.lock().take();
+ if let Some(handle) = handle {
+ handle.await.unwrap();
+ }
+ }
+}
+
+struct SqlIncrementalCache {
+ conn: Connection,
+ /// A hash of the state used to produce the formatting/linting other than
+ /// the CLI version. This state is a hash of the configuration and ensures
+ /// we format/lint a file when the configuration changes.
+ state_hash: u64,
+}
+
+impl SqlIncrementalCache {
+ pub fn new(db_file_path: &Path, state_hash: u64) -> Result<Self, AnyError> {
+ let conn = Connection::open(db_file_path)?;
+ Self::from_connection(conn, state_hash, crate::version::deno())
+ }
+
+ fn from_connection(
+ conn: Connection,
+ state_hash: u64,
+ cli_version: String,
+ ) -> Result<Self, AnyError> {
+ run_sqlite_pragma(&conn)?;
+ create_tables(&conn, cli_version)?;
+
+ Ok(Self { conn, state_hash })
+ }
+
+ pub fn get_source_hash(&self, path: &Path) -> Option<u64> {
+ match self.get_source_hash_result(path) {
+ Ok(option) => option,
+ Err(err) => {
+ if cfg!(debug_assertions) {
+ panic!("Error retrieving hash: {}", err);
+ } else {
+ // fail silently when not debugging
+ None
+ }
+ }
+ }
+ }
+
+ fn get_source_hash_result(
+ &self,
+ path: &Path,
+ ) -> Result<Option<u64>, AnyError> {
+ let query = "
+ SELECT
+ source_hash
+ FROM
+ incrementalcache
+ WHERE
+ file_path=?1
+ AND state_hash=?2
+ LIMIT 1";
+ let mut stmt = self.conn.prepare_cached(query)?;
+ let mut rows = stmt
+ .query(params![path.to_string_lossy(), self.state_hash.to_string()])?;
+ if let Some(row) = rows.next()? {
+ let hash: String = row.get(0)?;
+ Ok(Some(hash.parse::<u64>()?))
+ } else {
+ Ok(None)
+ }
+ }
+
+ pub fn set_source_hash(
+ &self,
+ path: &Path,
+ source_hash: u64,
+ ) -> Result<(), AnyError> {
+ let sql = "
+ INSERT OR REPLACE INTO
+ incrementalcache (file_path, state_hash, source_hash)
+ VALUES
+ (?1, ?2, ?3)";
+ let mut stmt = self.conn.prepare_cached(sql)?;
+ stmt.execute(params![
+ path.to_string_lossy(),
+ &self.state_hash.to_string(),
+ &source_hash.to_string(),
+ ])?;
+ Ok(())
+ }
+}
+
+fn create_tables(
+ conn: &Connection,
+ cli_version: String,
+) -> Result<(), AnyError> {
+ // INT doesn't store up to u64, so use TEXT
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS incrementalcache (
+ file_path TEXT PRIMARY KEY,
+ state_hash TEXT NOT NULL,
+ source_hash TEXT NOT NULL
+ )",
+ [],
+ )?;
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS info (
+ key TEXT PRIMARY KEY,
+ value TEXT NOT NULL
+ )",
+ [],
+ )?;
+
+ // delete the cache when the CLI version changes
+ let data_cli_version: Option<String> = conn
+ .query_row(
+ "SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1",
+ [],
+ |row| row.get(0),
+ )
+ .ok();
+ if data_cli_version != Some(cli_version.to_string()) {
+ conn.execute("DELETE FROM incrementalcache", params![])?;
+ let mut stmt = conn
+ .prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?;
+ stmt.execute(params!["CLI_VERSION", &cli_version])?;
+ }
+
+ Ok(())
+}
+
+#[cfg(test)]
+mod test {
+ use std::path::PathBuf;
+
+ use super::*;
+
+ #[test]
+ pub fn sql_cache_general_use() {
+ let conn = Connection::open_in_memory().unwrap();
+ let cache =
+ SqlIncrementalCache::from_connection(conn, 1, "1.0.0".to_string())
+ .unwrap();
+ let path = PathBuf::from("/mod.ts");
+
+ assert_eq!(cache.get_source_hash(&path), None);
+ cache.set_source_hash(&path, 2).unwrap();
+ assert_eq!(cache.get_source_hash(&path), Some(2));
+
+ // try changing the cli version (should clear)
+ let conn = cache.conn;
+ let mut cache =
+ SqlIncrementalCache::from_connection(conn, 1, "2.0.0".to_string())
+ .unwrap();
+ assert_eq!(cache.get_source_hash(&path), None);
+
+ // add back the file to the cache
+ cache.set_source_hash(&path, 2).unwrap();
+ assert_eq!(cache.get_source_hash(&path), Some(2));
+
+ // try changing the state hash
+ cache.state_hash = 2;
+ assert_eq!(cache.get_source_hash(&path), None);
+ cache.state_hash = 1;
+
+ // should return now that everything is back
+ assert_eq!(cache.get_source_hash(&path), Some(2));
+
+ // recreating the cache should not remove the data because the CLI version and state hash is the same
+ let conn = cache.conn;
+ let cache =
+ SqlIncrementalCache::from_connection(conn, 1, "2.0.0".to_string())
+ .unwrap();
+ assert_eq!(cache.get_source_hash(&path), Some(2));
+
+ // now try replacing and using another path
+ cache.set_source_hash(&path, 3).unwrap();
+ cache.set_source_hash(&path, 4).unwrap();
+ let path2 = PathBuf::from("/mod2.ts");
+ cache.set_source_hash(&path2, 5).unwrap();
+ assert_eq!(cache.get_source_hash(&path), Some(4));
+ assert_eq!(cache.get_source_hash(&path2), Some(5));
+ }
+
+ #[tokio::test]
+ pub async fn incremental_cache_general_use() {
+ let conn = Connection::open_in_memory().unwrap();
+ let sql_cache =
+ SqlIncrementalCache::from_connection(conn, 1, "1.0.0".to_string())
+ .unwrap();
+ let file_path = PathBuf::from("/mod.ts");
+ let file_text = "test";
+ let file_hash = fast_insecure_hash(file_text.as_bytes());
+ sql_cache.set_source_hash(&file_path, file_hash).unwrap();
+ let cache = IncrementalCacheInner::from_sql_incremental_cache(
+ sql_cache,
+ &[file_path.clone()],
+ );
+
+ assert!(cache.is_file_same(&file_path, "test"));
+ assert!(!cache.is_file_same(&file_path, "other"));
+
+ // just ensure this doesn't panic
+ cache.update_file(&file_path, "other");
+ }
+}
diff --git a/cli/cache/mod.rs b/cli/cache/mod.rs
new file mode 100644
index 000000000..f363d8fa8
--- /dev/null
+++ b/cli/cache/mod.rs
@@ -0,0 +1,150 @@
+// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
+
+use crate::errors::get_error_class_name;
+use crate::file_fetcher::FileFetcher;
+
+use deno_core::error::AnyError;
+use deno_core::futures::FutureExt;
+use deno_core::serde::Deserialize;
+use deno_core::serde::Serialize;
+use deno_core::ModuleSpecifier;
+use deno_graph::source::CacheInfo;
+use deno_graph::source::LoadFuture;
+use deno_graph::source::LoadResponse;
+use deno_graph::source::Loader;
+use deno_runtime::permissions::Permissions;
+use std::sync::Arc;
+
+mod check;
+mod common;
+mod disk_cache;
+mod emit;
+mod incremental;
+
+pub use check::TypeCheckCache;
+pub use disk_cache::DiskCache;
+pub use emit::EmitCache;
+pub use emit::SpecifierEmitCacheData;
+pub use incremental::IncrementalCache;
+
+#[derive(Debug, Deserialize, Serialize)]
+pub struct EmitMetadata {
+ pub version_hash: String,
+}
+
+pub enum CacheType {
+ Emit,
+ SourceMap,
+ Version,
+}
+
+/// A trait which provides a concise implementation to getting and setting
+/// values in a cache.
+pub trait Cacher {
+ /// Get a value from the cache.
+ fn get(
+ &self,
+ cache_type: CacheType,
+ specifier: &ModuleSpecifier,
+ ) -> Option<String>;
+ /// Set a value in the cache.
+ fn set(
+ &self,
+ cache_type: CacheType,
+ specifier: &ModuleSpecifier,
+ value: String,
+ ) -> Result<(), AnyError>;
+}
+
+/// A "wrapper" for the FileFetcher and DiskCache for the Deno CLI that provides
+/// a concise interface to the DENO_DIR when building module graphs.
+pub struct FetchCacher {
+ disk_cache: DiskCache,
+ dynamic_permissions: Permissions,
+ file_fetcher: Arc<FileFetcher>,
+ root_permissions: Permissions,
+}
+
+impl FetchCacher {
+ pub fn new(
+ disk_cache: DiskCache,
+ file_fetcher: FileFetcher,
+ root_permissions: Permissions,
+ dynamic_permissions: Permissions,
+ ) -> Self {
+ let file_fetcher = Arc::new(file_fetcher);
+
+ Self {
+ disk_cache,
+ dynamic_permissions,
+ file_fetcher,
+ root_permissions,
+ }
+ }
+}
+
+impl Loader for FetchCacher {
+ fn get_cache_info(&self, specifier: &ModuleSpecifier) -> Option<CacheInfo> {
+ let local = self.file_fetcher.get_local_path(specifier)?;
+ if local.is_file() {
+ let location = &self.disk_cache.location;
+ let emit = self
+ .disk_cache
+ .get_cache_filename_with_extension(specifier, "js")
+ .map(|p| location.join(p))
+ .filter(|p| p.is_file());
+ let map = self
+ .disk_cache
+ .get_cache_filename_with_extension(specifier, "js.map")
+ .map(|p| location.join(p))
+ .filter(|p| p.is_file());
+ Some(CacheInfo {
+ local: Some(local),
+ emit,
+ map,
+ })
+ } else {
+ None
+ }
+ }
+
+ fn load(
+ &mut self,
+ specifier: &ModuleSpecifier,
+ is_dynamic: bool,
+ ) -> LoadFuture {
+ let specifier = specifier.clone();
+ let mut permissions = if is_dynamic {
+ self.dynamic_permissions.clone()
+ } else {
+ self.root_permissions.clone()
+ };
+ let file_fetcher = self.file_fetcher.clone();
+
+ async move {
+ file_fetcher
+ .fetch(&specifier, &mut permissions)
+ .await
+ .map_or_else(
+ |err| {
+ if let Some(err) = err.downcast_ref::<std::io::Error>() {
+ if err.kind() == std::io::ErrorKind::NotFound {
+ return Ok(None);
+ }
+ } else if get_error_class_name(&err) == "NotFound" {
+ return Ok(None);
+ }
+ Err(err)
+ },
+ |file| {
+ Ok(Some(LoadResponse::Module {
+ specifier: file.specifier,
+ maybe_headers: file.maybe_headers,
+ content: file.source,
+ }))
+ },
+ )
+ }
+ .boxed()
+ }
+}