From 0c87dd1e9898d7ac93e274d3611ee491a107d47a Mon Sep 17 00:00:00 2001 From: David Sherret Date: Tue, 12 Jul 2022 18:58:39 -0400 Subject: perf: use emit from swc instead of tsc (#15118) --- cli/cache.rs | 140 -------- cli/cache/check.rs | 215 ++++++++++++ cli/cache/common.rs | 31 ++ cli/cache/disk_cache.rs | 388 +++++++++++++++++++++ cli/cache/emit.rs | 71 ++++ cli/cache/incremental.rs | 350 +++++++++++++++++++ cli/cache/mod.rs | 150 ++++++++ cli/deno_dir.rs | 9 +- cli/disk_cache.rs | 387 -------------------- cli/emit.rs | 329 +++++------------ cli/graph_util.rs | 6 +- cli/main.rs | 11 +- cli/proc_state.rs | 70 ++-- cli/tests/integration/check_tests.rs | 63 ++++ cli/tests/integration/mod.rs | 46 --- cli/tests/integration/run_tests.rs | 82 +++-- .../testdata/check/cache_config_on_off/deno.json | 5 + .../testdata/check/cache_config_on_off/main.ts | 1 + cli/tests/testdata/coverage/branch_expected.lcov | 4 +- cli/tests/testdata/coverage/branch_expected.out | 3 +- cli/tests/testdata/coverage/complex_expected.lcov | 22 +- cli/tests/testdata/coverage/complex_expected.out | 4 +- cli/tests/testdata/run/remote_type_error/main.ts | 3 + cli/tests/testdata/run/remote_type_error/remote.ts | 5 + cli/tools/coverage/mod.rs | 14 +- cli/tools/fmt.rs | 2 +- cli/tools/incremental_cache.rs | 371 -------------------- cli/tools/lint.rs | 2 +- cli/tools/mod.rs | 1 - cli/tsc.rs | 112 +----- cli/tsc/99_main_compiler.js | 16 +- 31 files changed, 1526 insertions(+), 1387 deletions(-) delete mode 100644 cli/cache.rs create mode 100644 cli/cache/check.rs create mode 100644 cli/cache/common.rs create mode 100644 cli/cache/disk_cache.rs create mode 100644 cli/cache/emit.rs create mode 100644 cli/cache/incremental.rs create mode 100644 cli/cache/mod.rs delete mode 100644 cli/disk_cache.rs create mode 100644 cli/tests/testdata/check/cache_config_on_off/deno.json create mode 100644 cli/tests/testdata/check/cache_config_on_off/main.ts create mode 100644 cli/tests/testdata/run/remote_type_error/main.ts create mode 100644 cli/tests/testdata/run/remote_type_error/remote.ts delete mode 100644 cli/tools/incremental_cache.rs (limited to 'cli') diff --git a/cli/cache.rs b/cli/cache.rs deleted file mode 100644 index bdb38e393..000000000 --- a/cli/cache.rs +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. - -use crate::disk_cache::DiskCache; -use crate::errors::get_error_class_name; -use crate::file_fetcher::FileFetcher; - -use deno_core::error::AnyError; -use deno_core::futures::FutureExt; -use deno_core::serde::Deserialize; -use deno_core::serde::Serialize; -use deno_core::ModuleSpecifier; -use deno_graph::source::CacheInfo; -use deno_graph::source::LoadFuture; -use deno_graph::source::LoadResponse; -use deno_graph::source::Loader; -use deno_runtime::permissions::Permissions; -use std::sync::Arc; - -#[derive(Debug, Deserialize, Serialize)] -pub struct EmitMetadata { - pub version_hash: String, -} - -pub enum CacheType { - Emit, - SourceMap, - TypeScriptBuildInfo, - Version, -} - -/// A trait which provides a concise implementation to getting and setting -/// values in a cache. -pub trait Cacher { - /// Get a value from the cache. - fn get( - &self, - cache_type: CacheType, - specifier: &ModuleSpecifier, - ) -> Option; - /// Set a value in the cache. - fn set( - &self, - cache_type: CacheType, - specifier: &ModuleSpecifier, - value: String, - ) -> Result<(), AnyError>; -} - -/// A "wrapper" for the FileFetcher and DiskCache for the Deno CLI that provides -/// a concise interface to the DENO_DIR when building module graphs. -pub struct FetchCacher { - disk_cache: DiskCache, - dynamic_permissions: Permissions, - file_fetcher: Arc, - root_permissions: Permissions, -} - -impl FetchCacher { - pub fn new( - disk_cache: DiskCache, - file_fetcher: FileFetcher, - root_permissions: Permissions, - dynamic_permissions: Permissions, - ) -> Self { - let file_fetcher = Arc::new(file_fetcher); - - Self { - disk_cache, - dynamic_permissions, - file_fetcher, - root_permissions, - } - } -} - -impl Loader for FetchCacher { - fn get_cache_info(&self, specifier: &ModuleSpecifier) -> Option { - let local = self.file_fetcher.get_local_path(specifier)?; - if local.is_file() { - let location = &self.disk_cache.location; - let emit = self - .disk_cache - .get_cache_filename_with_extension(specifier, "js") - .map(|p| location.join(p)) - .filter(|p| p.is_file()); - let map = self - .disk_cache - .get_cache_filename_with_extension(specifier, "js.map") - .map(|p| location.join(p)) - .filter(|p| p.is_file()); - Some(CacheInfo { - local: Some(local), - emit, - map, - }) - } else { - None - } - } - - fn load( - &mut self, - specifier: &ModuleSpecifier, - is_dynamic: bool, - ) -> LoadFuture { - let specifier = specifier.clone(); - let mut permissions = if is_dynamic { - self.dynamic_permissions.clone() - } else { - self.root_permissions.clone() - }; - let file_fetcher = self.file_fetcher.clone(); - - async move { - file_fetcher - .fetch(&specifier, &mut permissions) - .await - .map_or_else( - |err| { - if let Some(err) = err.downcast_ref::() { - if err.kind() == std::io::ErrorKind::NotFound { - return Ok(None); - } - } else if get_error_class_name(&err) == "NotFound" { - return Ok(None); - } - Err(err) - }, - |file| { - Ok(Some(LoadResponse::Module { - specifier: file.specifier, - maybe_headers: file.maybe_headers, - content: file.source, - })) - }, - ) - } - .boxed() - } -} diff --git a/cli/cache/check.rs b/cli/cache/check.rs new file mode 100644 index 000000000..0ff86ef6c --- /dev/null +++ b/cli/cache/check.rs @@ -0,0 +1,215 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use std::path::Path; + +use deno_ast::ModuleSpecifier; +use deno_core::anyhow::Context; +use deno_core::error::AnyError; +use deno_runtime::deno_webstorage::rusqlite::params; +use deno_runtime::deno_webstorage::rusqlite::Connection; + +use super::common::run_sqlite_pragma; + +/// The cache used to tell whether type checking should occur again. +/// +/// This simply stores a hash of the inputs of each successful type check +/// and only clears them out when changing CLI versions. +pub struct TypeCheckCache { + conn: Connection, +} + +impl TypeCheckCache { + pub fn new(db_file_path: &Path) -> Result { + let conn = Connection::open(db_file_path).with_context(|| { + format!( + concat!( + "Error opening type checking cache at {} -- ", + "Perhaps it's corrupt. Maybe try deleting it." + ), + db_file_path.display() + ) + })?; + Self::from_connection(conn, crate::version::deno()) + } + + fn from_connection( + conn: Connection, + cli_version: String, + ) -> Result { + run_sqlite_pragma(&conn)?; + create_tables(&conn, cli_version)?; + + Ok(Self { conn }) + } + + pub fn has_check_hash(&self, hash: u64) -> bool { + match self.hash_check_hash_result(hash) { + Ok(val) => val, + Err(err) => { + if cfg!(debug_assertions) { + panic!("Error retrieving hash: {}", err); + } else { + log::debug!("Error retrieving hash: {}", err); + // fail silently when not debugging + false + } + } + } + } + + fn hash_check_hash_result(&self, hash: u64) -> Result { + let query = "SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1"; + let mut stmt = self.conn.prepare_cached(query)?; + Ok(stmt.exists(params![hash.to_string()])?) + } + + pub fn add_check_hash(&self, check_hash: u64) { + if let Err(err) = self.add_check_hash_result(check_hash) { + if cfg!(debug_assertions) { + panic!("Error saving check hash: {}", err); + } else { + log::debug!("Error saving check hash: {}", err); + } + } + } + + fn add_check_hash_result(&self, check_hash: u64) -> Result<(), AnyError> { + let sql = " + INSERT OR REPLACE INTO + checkcache (check_hash) + VALUES + (?1)"; + let mut stmt = self.conn.prepare_cached(sql)?; + stmt.execute(params![&check_hash.to_string(),])?; + Ok(()) + } + + pub fn get_tsbuildinfo(&self, specifier: &ModuleSpecifier) -> Option { + let mut stmt = self + .conn + .prepare_cached("SELECT text FROM tsbuildinfo WHERE specifier=?1 LIMIT 1") + .ok()?; + let mut rows = stmt.query(params![specifier.to_string()]).ok()?; + let row = rows.next().ok().flatten()?; + + row.get(0).ok() + } + + pub fn set_tsbuildinfo(&self, specifier: &ModuleSpecifier, text: &str) { + if let Err(err) = self.set_tsbuildinfo_result(specifier, text) { + // should never error here, but if it ever does don't fail + if cfg!(debug_assertions) { + panic!("Error saving tsbuildinfo: {}", err); + } else { + log::debug!("Error saving tsbuildinfo: {}", err); + } + } + } + + fn set_tsbuildinfo_result( + &self, + specifier: &ModuleSpecifier, + text: &str, + ) -> Result<(), AnyError> { + let mut stmt = self.conn.prepare_cached( + "INSERT OR REPLACE INTO tsbuildinfo (specifier, text) VALUES (?1, ?2)", + )?; + stmt.execute(params![specifier.to_string(), text])?; + Ok(()) + } +} + +fn create_tables( + conn: &Connection, + cli_version: String, +) -> Result<(), AnyError> { + // INT doesn't store up to u64, so use TEXT + conn.execute( + "CREATE TABLE IF NOT EXISTS checkcache ( + check_hash TEXT PRIMARY KEY + )", + [], + )?; + conn.execute( + "CREATE TABLE IF NOT EXISTS tsbuildinfo ( + specifier TEXT PRIMARY KEY, + text TEXT NOT NULL + )", + [], + )?; + conn.execute( + "CREATE TABLE IF NOT EXISTS info ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + )", + [], + )?; + + // delete the cache when the CLI version changes + let data_cli_version: Option = conn + .query_row( + "SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1", + [], + |row| row.get(0), + ) + .ok(); + if data_cli_version != Some(cli_version.to_string()) { + conn.execute("DELETE FROM checkcache", params![])?; + conn.execute("DELETE FROM tsbuildinfo", params![])?; + let mut stmt = conn + .prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?; + stmt.execute(params!["CLI_VERSION", &cli_version])?; + } + + Ok(()) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + pub fn check_cache_general_use() { + let conn = Connection::open_in_memory().unwrap(); + let cache = + TypeCheckCache::from_connection(conn, "1.0.0".to_string()).unwrap(); + + assert!(!cache.has_check_hash(1)); + cache.add_check_hash(1); + assert!(cache.has_check_hash(1)); + assert!(!cache.has_check_hash(2)); + + let specifier1 = ModuleSpecifier::parse("file:///test.json").unwrap(); + assert_eq!(cache.get_tsbuildinfo(&specifier1), None); + cache.set_tsbuildinfo(&specifier1, "test"); + assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string())); + + // try changing the cli version (should clear) + let conn = cache.conn; + let cache = + TypeCheckCache::from_connection(conn, "2.0.0".to_string()).unwrap(); + assert!(!cache.has_check_hash(1)); + cache.add_check_hash(1); + assert!(cache.has_check_hash(1)); + assert_eq!(cache.get_tsbuildinfo(&specifier1), None); + cache.set_tsbuildinfo(&specifier1, "test"); + assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string())); + + // recreating the cache should not remove the data because the CLI version and state hash is the same + let conn = cache.conn; + let cache = + TypeCheckCache::from_connection(conn, "2.0.0".to_string()).unwrap(); + assert!(cache.has_check_hash(1)); + assert!(!cache.has_check_hash(2)); + assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string())); + + // adding when already exists should not cause issue + cache.add_check_hash(1); + assert!(cache.has_check_hash(1)); + cache.set_tsbuildinfo(&specifier1, "other"); + assert_eq!( + cache.get_tsbuildinfo(&specifier1), + Some("other".to_string()) + ); + } +} diff --git a/cli/cache/common.rs b/cli/cache/common.rs new file mode 100644 index 000000000..c01c1ab9a --- /dev/null +++ b/cli/cache/common.rs @@ -0,0 +1,31 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use deno_core::error::AnyError; +use deno_runtime::deno_webstorage::rusqlite::Connection; + +/// Very fast non-cryptographically secure hash. +pub fn fast_insecure_hash(bytes: &[u8]) -> u64 { + use std::hash::Hasher; + use twox_hash::XxHash64; + + let mut hasher = XxHash64::default(); + hasher.write(bytes); + hasher.finish() +} + +/// Runs the common sqlite pragma. +pub fn run_sqlite_pragma(conn: &Connection) -> Result<(), AnyError> { + // Enable write-ahead-logging and tweak some other stuff + let initial_pragmas = " + -- enable write-ahead-logging mode + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA temp_store=memory; + PRAGMA page_size=4096; + PRAGMA mmap_size=6000000; + PRAGMA optimize; + "; + + conn.execute_batch(initial_pragmas)?; + Ok(()) +} diff --git a/cli/cache/disk_cache.rs b/cli/cache/disk_cache.rs new file mode 100644 index 000000000..01352c398 --- /dev/null +++ b/cli/cache/disk_cache.rs @@ -0,0 +1,388 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use crate::fs_util; +use crate::http_cache::url_to_filename; + +use super::CacheType; +use super::Cacher; +use super::EmitMetadata; + +use deno_ast::ModuleSpecifier; +use deno_core::error::AnyError; +use deno_core::serde_json; +use deno_core::url::Host; +use deno_core::url::Url; +use std::ffi::OsStr; +use std::fs; +use std::io; +use std::path::Component; +use std::path::Path; +use std::path::PathBuf; +use std::path::Prefix; +use std::str; + +#[derive(Clone)] +pub struct DiskCache { + pub location: PathBuf, +} + +fn with_io_context>( + e: &std::io::Error, + context: T, +) -> std::io::Error { + std::io::Error::new(e.kind(), format!("{} (for '{}')", e, context.as_ref())) +} + +impl DiskCache { + /// `location` must be an absolute path. + pub fn new(location: &Path) -> Self { + assert!(location.is_absolute()); + Self { + location: location.to_owned(), + } + } + + /// Ensures the location of the cache. + pub fn ensure_dir_exists(&self, path: &Path) -> io::Result<()> { + if path.is_dir() { + return Ok(()); + } + fs::create_dir_all(&path).map_err(|e| { + io::Error::new(e.kind(), format!( + "Could not create TypeScript compiler cache location: {:?}\nCheck the permission of the directory.", + path + )) + }) + } + + fn get_cache_filename(&self, url: &Url) -> Option { + let mut out = PathBuf::new(); + + let scheme = url.scheme(); + out.push(scheme); + + match scheme { + "wasm" => { + let host = url.host_str().unwrap(); + let host_port = match url.port() { + // Windows doesn't support ":" in filenames, so we represent port using a + // special string. + Some(port) => format!("{}_PORT{}", host, port), + None => host.to_string(), + }; + out.push(host_port); + + for path_seg in url.path_segments().unwrap() { + out.push(path_seg); + } + } + "http" | "https" | "data" | "blob" => out = url_to_filename(url)?, + "file" => { + let path = match url.to_file_path() { + Ok(path) => path, + Err(_) => return None, + }; + let mut path_components = path.components(); + + if cfg!(target_os = "windows") { + if let Some(Component::Prefix(prefix_component)) = + path_components.next() + { + // Windows doesn't support ":" in filenames, so we need to extract disk prefix + // Example: file:///C:/deno/js/unit_test_runner.ts + // it should produce: file\c\deno\js\unit_test_runner.ts + match prefix_component.kind() { + Prefix::Disk(disk_byte) | Prefix::VerbatimDisk(disk_byte) => { + let disk = (disk_byte as char).to_string(); + out.push(disk); + } + Prefix::UNC(server, share) + | Prefix::VerbatimUNC(server, share) => { + out.push("UNC"); + let host = Host::parse(server.to_str().unwrap()).unwrap(); + let host = host.to_string().replace(':', "_"); + out.push(host); + out.push(share); + } + _ => unreachable!(), + } + } + } + + // Must be relative, so strip forward slash + let mut remaining_components = path_components.as_path(); + if let Ok(stripped) = remaining_components.strip_prefix("/") { + remaining_components = stripped; + }; + + out = out.join(remaining_components); + } + _ => return None, + }; + + Some(out) + } + + pub fn get_cache_filename_with_extension( + &self, + url: &Url, + extension: &str, + ) -> Option { + let base = self.get_cache_filename(url)?; + + match base.extension() { + None => Some(base.with_extension(extension)), + Some(ext) => { + let original_extension = OsStr::to_str(ext).unwrap(); + let final_extension = format!("{}.{}", original_extension, extension); + Some(base.with_extension(final_extension)) + } + } + } + + pub fn get(&self, filename: &Path) -> std::io::Result> { + let path = self.location.join(filename); + fs::read(&path) + } + + pub fn set(&self, filename: &Path, data: &[u8]) -> std::io::Result<()> { + let path = self.location.join(filename); + match path.parent() { + Some(parent) => self.ensure_dir_exists(parent), + None => Ok(()), + }?; + fs_util::atomic_write_file(&path, data, crate::http_cache::CACHE_PERM) + .map_err(|e| with_io_context(&e, format!("{:#?}", &path))) + } + + fn get_emit_metadata( + &self, + specifier: &ModuleSpecifier, + ) -> Option { + let filename = self.get_cache_filename_with_extension(specifier, "meta")?; + let bytes = self.get(&filename).ok()?; + serde_json::from_slice(&bytes).ok() + } + + fn set_emit_metadata( + &self, + specifier: &ModuleSpecifier, + data: EmitMetadata, + ) -> Result<(), AnyError> { + let filename = self + .get_cache_filename_with_extension(specifier, "meta") + .unwrap(); + let bytes = serde_json::to_vec(&data)?; + self.set(&filename, &bytes).map_err(|e| e.into()) + } +} + +// todo(13302): remove and replace with sqlite database +impl Cacher for DiskCache { + fn get( + &self, + cache_type: CacheType, + specifier: &ModuleSpecifier, + ) -> Option { + let extension = match cache_type { + CacheType::Emit => "js", + CacheType::SourceMap => "js.map", + CacheType::Version => { + return self.get_emit_metadata(specifier).map(|d| d.version_hash) + } + }; + let filename = + self.get_cache_filename_with_extension(specifier, extension)?; + self + .get(&filename) + .ok() + .and_then(|b| String::from_utf8(b).ok()) + } + + fn set( + &self, + cache_type: CacheType, + specifier: &ModuleSpecifier, + value: String, + ) -> Result<(), AnyError> { + let extension = match cache_type { + CacheType::Emit => "js", + CacheType::SourceMap => "js.map", + CacheType::Version => { + let data = if let Some(mut data) = self.get_emit_metadata(specifier) { + data.version_hash = value; + data + } else { + EmitMetadata { + version_hash: value, + } + }; + return self.set_emit_metadata(specifier, data); + } + }; + let filename = self + .get_cache_filename_with_extension(specifier, extension) + .unwrap(); + self.set(&filename, value.as_bytes()).map_err(|e| e.into()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use test_util::TempDir; + + #[test] + fn test_create_cache_if_dir_exits() { + let cache_location = TempDir::new(); + let mut cache_path = cache_location.path().to_owned(); + cache_path.push("foo"); + let cache = DiskCache::new(&cache_path); + cache + .ensure_dir_exists(&cache.location) + .expect("Testing expect:"); + assert!(cache_path.is_dir()); + } + + #[test] + fn test_create_cache_if_dir_not_exits() { + let temp_dir = TempDir::new(); + let mut cache_location = temp_dir.path().to_owned(); + assert!(fs::remove_dir(&cache_location).is_ok()); + cache_location.push("foo"); + assert!(!cache_location.is_dir()); + let cache = DiskCache::new(&cache_location); + cache + .ensure_dir_exists(&cache.location) + .expect("Testing expect:"); + assert!(cache_location.is_dir()); + } + + #[test] + fn test_get_cache_filename() { + let cache_location = if cfg!(target_os = "windows") { + PathBuf::from(r"C:\deno_dir\") + } else { + PathBuf::from("/deno_dir/") + }; + + let cache = DiskCache::new(&cache_location); + + let mut test_cases = vec![ + ( + "http://deno.land/std/http/file_server.ts", + "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf", + ), + ( + "http://localhost:8000/std/http/file_server.ts", + "http/localhost_PORT8000/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf", + ), + ( + "https://deno.land/std/http/file_server.ts", + "https/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf", + ), + ("wasm://wasm/d1c677ea", "wasm/wasm/d1c677ea"), + ]; + + if cfg!(target_os = "windows") { + test_cases.push(("file:///D:/a/1/s/format.ts", "file/D/a/1/s/format.ts")); + // IPv4 localhost + test_cases.push(( + "file://127.0.0.1/d$/a/1/s/format.ts", + "file/UNC/127.0.0.1/d$/a/1/s/format.ts", + )); + // IPv6 localhost + test_cases.push(( + "file://[0:0:0:0:0:0:0:1]/d$/a/1/s/format.ts", + "file/UNC/[__1]/d$/a/1/s/format.ts", + )); + // shared folder + test_cases.push(( + "file://comp/t-share/a/1/s/format.ts", + "file/UNC/comp/t-share/a/1/s/format.ts", + )); + } else { + test_cases.push(( + "file:///std/http/file_server.ts", + "file/std/http/file_server.ts", + )); + } + + for test_case in &test_cases { + let cache_filename = + cache.get_cache_filename(&Url::parse(test_case.0).unwrap()); + assert_eq!(cache_filename, Some(PathBuf::from(test_case.1))); + } + } + + #[test] + fn test_get_cache_filename_with_extension() { + let p = if cfg!(target_os = "windows") { + "C:\\foo" + } else { + "/foo" + }; + let cache = DiskCache::new(&PathBuf::from(p)); + + let mut test_cases = vec![ + ( + "http://deno.land/std/http/file_server.ts", + "js", + "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf.js", + ), + ( + "http://deno.land/std/http/file_server.ts", + "js.map", + "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf.js.map", + ), + ]; + + if cfg!(target_os = "windows") { + test_cases.push(( + "file:///D:/std/http/file_server", + "js", + "file/D/std/http/file_server.js", + )); + } else { + test_cases.push(( + "file:///std/http/file_server", + "js", + "file/std/http/file_server.js", + )); + } + + for test_case in &test_cases { + assert_eq!( + cache.get_cache_filename_with_extension( + &Url::parse(test_case.0).unwrap(), + test_case.1 + ), + Some(PathBuf::from(test_case.2)) + ) + } + } + + #[test] + fn test_get_cache_filename_invalid_urls() { + let cache_location = if cfg!(target_os = "windows") { + PathBuf::from(r"C:\deno_dir\") + } else { + PathBuf::from("/deno_dir/") + }; + + let cache = DiskCache::new(&cache_location); + + let mut test_cases = vec!["unknown://localhost/test.ts"]; + + if cfg!(target_os = "windows") { + test_cases.push("file://"); + test_cases.push("file:///"); + } + + for test_case in &test_cases { + let cache_filename = + cache.get_cache_filename(&Url::parse(test_case).unwrap()); + assert_eq!(cache_filename, None); + } + } +} diff --git a/cli/cache/emit.rs b/cli/cache/emit.rs new file mode 100644 index 000000000..e1469b862 --- /dev/null +++ b/cli/cache/emit.rs @@ -0,0 +1,71 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use deno_ast::ModuleSpecifier; +use deno_core::error::AnyError; + +use super::CacheType; +use super::Cacher; + +/// Emit cache for a single file. +#[derive(Debug, Clone, PartialEq)] +pub struct SpecifierEmitCacheData { + pub source_hash: String, + pub text: String, + pub map: Option, +} + +pub trait EmitCache { + /// Gets the emit data from the cache. + fn get_emit_data( + &self, + specifier: &ModuleSpecifier, + ) -> Option; + /// Sets the emit data in the cache. + fn set_emit_data( + &self, + specifier: ModuleSpecifier, + data: SpecifierEmitCacheData, + ) -> Result<(), AnyError>; + /// Gets the stored hash of the source of the provider specifier + /// to tell if the emit is out of sync with the source. + /// TODO(13302): this is actually not reliable and should be removed + /// once switching to an sqlite db + fn get_source_hash(&self, specifier: &ModuleSpecifier) -> Option; + /// Gets the emitted JavaScript of the TypeScript source. + /// TODO(13302): remove this once switching to an sqlite db + fn get_emit_text(&self, specifier: &ModuleSpecifier) -> Option; +} + +impl EmitCache for T { + fn get_emit_data( + &self, + specifier: &ModuleSpecifier, + ) -> Option { + Some(SpecifierEmitCacheData { + source_hash: self.get_source_hash(specifier)?, + text: self.get_emit_text(specifier)?, + map: self.get(CacheType::SourceMap, specifier), + }) + } + + fn get_source_hash(&self, specifier: &ModuleSpecifier) -> Option { + self.get(CacheType::Version, specifier) + } + + fn get_emit_text(&self, specifier: &ModuleSpecifier) -> Option { + self.get(CacheType::Emit, specifier) + } + + fn set_emit_data( + &self, + specifier: ModuleSpecifier, + data: SpecifierEmitCacheData, + ) -> Result<(), AnyError> { + self.set(CacheType::Version, &specifier, data.source_hash)?; + self.set(CacheType::Emit, &specifier, data.text)?; + if let Some(map) = data.map { + self.set(CacheType::SourceMap, &specifier, map)?; + } + Ok(()) + } +} diff --git a/cli/cache/incremental.rs b/cli/cache/incremental.rs new file mode 100644 index 000000000..b5fff0734 --- /dev/null +++ b/cli/cache/incremental.rs @@ -0,0 +1,350 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use std::collections::HashMap; +use std::path::Path; +use std::path::PathBuf; + +use deno_core::error::AnyError; +use deno_core::parking_lot::Mutex; +use deno_core::serde_json; +use deno_runtime::deno_webstorage::rusqlite::params; +use deno_runtime::deno_webstorage::rusqlite::Connection; +use serde::Serialize; +use tokio::task::JoinHandle; + +use super::common::fast_insecure_hash; +use super::common::run_sqlite_pragma; + +/// Cache used to skip formatting/linting a file again when we +/// know it is already formatted or has no lint diagnostics. +pub struct IncrementalCache(Option); + +impl IncrementalCache { + pub fn new( + db_file_path: &Path, + state: &TState, + initial_file_paths: &[PathBuf], + ) -> Self { + // if creating the incremental cache fails, then we + // treat it as not having a cache + let result = + IncrementalCacheInner::new(db_file_path, state, initial_file_paths); + IncrementalCache(match result { + Ok(inner) => Some(inner), + Err(err) => { + log::debug!("Creating the incremental cache failed.\n{:#}", err); + // Maybe the cache file is corrupt. Attempt to remove + // the cache file for next time + let _ = std::fs::remove_file(db_file_path); + None + } + }) + } + + pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool { + if let Some(inner) = &self.0 { + inner.is_file_same(file_path, file_text) + } else { + false + } + } + + pub fn update_file(&self, file_path: &Path, file_text: &str) { + if let Some(inner) = &self.0 { + inner.update_file(file_path, file_text) + } + } + + pub async fn wait_completion(&self) { + if let Some(inner) = &self.0 { + inner.wait_completion().await; + } + } +} + +enum ReceiverMessage { + Update(PathBuf, u64), + Exit, +} + +struct IncrementalCacheInner { + previous_hashes: HashMap, + sender: tokio::sync::mpsc::UnboundedSender, + handle: Mutex>>, +} + +impl IncrementalCacheInner { + pub fn new( + db_file_path: &Path, + state: &TState, + initial_file_paths: &[PathBuf], + ) -> Result { + let state_hash = + fast_insecure_hash(serde_json::to_string(state).unwrap().as_bytes()); + let sql_cache = SqlIncrementalCache::new(db_file_path, state_hash)?; + Ok(Self::from_sql_incremental_cache( + sql_cache, + initial_file_paths, + )) + } + + fn from_sql_incremental_cache( + cache: SqlIncrementalCache, + initial_file_paths: &[PathBuf], + ) -> Self { + let mut previous_hashes = HashMap::new(); + for path in initial_file_paths { + if let Some(hash) = cache.get_source_hash(path) { + previous_hashes.insert(path.to_path_buf(), hash); + } + } + + let (sender, mut receiver) = + tokio::sync::mpsc::unbounded_channel::(); + + // sqlite isn't `Sync`, so we do all the updating on a dedicated task + let handle = tokio::task::spawn(async move { + while let Some(message) = receiver.recv().await { + match message { + ReceiverMessage::Update(path, hash) => { + let _ = cache.set_source_hash(&path, hash); + } + ReceiverMessage::Exit => break, + } + } + }); + + IncrementalCacheInner { + previous_hashes, + sender, + handle: Mutex::new(Some(handle)), + } + } + + pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool { + match self.previous_hashes.get(file_path) { + Some(hash) => *hash == fast_insecure_hash(file_text.as_bytes()), + None => false, + } + } + + pub fn update_file(&self, file_path: &Path, file_text: &str) { + let hash = fast_insecure_hash(file_text.as_bytes()); + if let Some(previous_hash) = self.previous_hashes.get(file_path) { + if *previous_hash == hash { + return; // do not bother updating the db file because nothing has changed + } + } + let _ = self + .sender + .send(ReceiverMessage::Update(file_path.to_path_buf(), hash)); + } + + pub async fn wait_completion(&self) { + if self.sender.send(ReceiverMessage::Exit).is_err() { + return; + } + let handle = self.handle.lock().take(); + if let Some(handle) = handle { + handle.await.unwrap(); + } + } +} + +struct SqlIncrementalCache { + conn: Connection, + /// A hash of the state used to produce the formatting/linting other than + /// the CLI version. This state is a hash of the configuration and ensures + /// we format/lint a file when the configuration changes. + state_hash: u64, +} + +impl SqlIncrementalCache { + pub fn new(db_file_path: &Path, state_hash: u64) -> Result { + let conn = Connection::open(db_file_path)?; + Self::from_connection(conn, state_hash, crate::version::deno()) + } + + fn from_connection( + conn: Connection, + state_hash: u64, + cli_version: String, + ) -> Result { + run_sqlite_pragma(&conn)?; + create_tables(&conn, cli_version)?; + + Ok(Self { conn, state_hash }) + } + + pub fn get_source_hash(&self, path: &Path) -> Option { + match self.get_source_hash_result(path) { + Ok(option) => option, + Err(err) => { + if cfg!(debug_assertions) { + panic!("Error retrieving hash: {}", err); + } else { + // fail silently when not debugging + None + } + } + } + } + + fn get_source_hash_result( + &self, + path: &Path, + ) -> Result, AnyError> { + let query = " + SELECT + source_hash + FROM + incrementalcache + WHERE + file_path=?1 + AND state_hash=?2 + LIMIT 1"; + let mut stmt = self.conn.prepare_cached(query)?; + let mut rows = stmt + .query(params![path.to_string_lossy(), self.state_hash.to_string()])?; + if let Some(row) = rows.next()? { + let hash: String = row.get(0)?; + Ok(Some(hash.parse::()?)) + } else { + Ok(None) + } + } + + pub fn set_source_hash( + &self, + path: &Path, + source_hash: u64, + ) -> Result<(), AnyError> { + let sql = " + INSERT OR REPLACE INTO + incrementalcache (file_path, state_hash, source_hash) + VALUES + (?1, ?2, ?3)"; + let mut stmt = self.conn.prepare_cached(sql)?; + stmt.execute(params![ + path.to_string_lossy(), + &self.state_hash.to_string(), + &source_hash.to_string(), + ])?; + Ok(()) + } +} + +fn create_tables( + conn: &Connection, + cli_version: String, +) -> Result<(), AnyError> { + // INT doesn't store up to u64, so use TEXT + conn.execute( + "CREATE TABLE IF NOT EXISTS incrementalcache ( + file_path TEXT PRIMARY KEY, + state_hash TEXT NOT NULL, + source_hash TEXT NOT NULL + )", + [], + )?; + conn.execute( + "CREATE TABLE IF NOT EXISTS info ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + )", + [], + )?; + + // delete the cache when the CLI version changes + let data_cli_version: Option = conn + .query_row( + "SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1", + [], + |row| row.get(0), + ) + .ok(); + if data_cli_version != Some(cli_version.to_string()) { + conn.execute("DELETE FROM incrementalcache", params![])?; + let mut stmt = conn + .prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?; + stmt.execute(params!["CLI_VERSION", &cli_version])?; + } + + Ok(()) +} + +#[cfg(test)] +mod test { + use std::path::PathBuf; + + use super::*; + + #[test] + pub fn sql_cache_general_use() { + let conn = Connection::open_in_memory().unwrap(); + let cache = + SqlIncrementalCache::from_connection(conn, 1, "1.0.0".to_string()) + .unwrap(); + let path = PathBuf::from("/mod.ts"); + + assert_eq!(cache.get_source_hash(&path), None); + cache.set_source_hash(&path, 2).unwrap(); + assert_eq!(cache.get_source_hash(&path), Some(2)); + + // try changing the cli version (should clear) + let conn = cache.conn; + let mut cache = + SqlIncrementalCache::from_connection(conn, 1, "2.0.0".to_string()) + .unwrap(); + assert_eq!(cache.get_source_hash(&path), None); + + // add back the file to the cache + cache.set_source_hash(&path, 2).unwrap(); + assert_eq!(cache.get_source_hash(&path), Some(2)); + + // try changing the state hash + cache.state_hash = 2; + assert_eq!(cache.get_source_hash(&path), None); + cache.state_hash = 1; + + // should return now that everything is back + assert_eq!(cache.get_source_hash(&path), Some(2)); + + // recreating the cache should not remove the data because the CLI version and state hash is the same + let conn = cache.conn; + let cache = + SqlIncrementalCache::from_connection(conn, 1, "2.0.0".to_string()) + .unwrap(); + assert_eq!(cache.get_source_hash(&path), Some(2)); + + // now try replacing and using another path + cache.set_source_hash(&path, 3).unwrap(); + cache.set_source_hash(&path, 4).unwrap(); + let path2 = PathBuf::from("/mod2.ts"); + cache.set_source_hash(&path2, 5).unwrap(); + assert_eq!(cache.get_source_hash(&path), Some(4)); + assert_eq!(cache.get_source_hash(&path2), Some(5)); + } + + #[tokio::test] + pub async fn incremental_cache_general_use() { + let conn = Connection::open_in_memory().unwrap(); + let sql_cache = + SqlIncrementalCache::from_connection(conn, 1, "1.0.0".to_string()) + .unwrap(); + let file_path = PathBuf::from("/mod.ts"); + let file_text = "test"; + let file_hash = fast_insecure_hash(file_text.as_bytes()); + sql_cache.set_source_hash(&file_path, file_hash).unwrap(); + let cache = IncrementalCacheInner::from_sql_incremental_cache( + sql_cache, + &[file_path.clone()], + ); + + assert!(cache.is_file_same(&file_path, "test")); + assert!(!cache.is_file_same(&file_path, "other")); + + // just ensure this doesn't panic + cache.update_file(&file_path, "other"); + } +} diff --git a/cli/cache/mod.rs b/cli/cache/mod.rs new file mode 100644 index 000000000..f363d8fa8 --- /dev/null +++ b/cli/cache/mod.rs @@ -0,0 +1,150 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use crate::errors::get_error_class_name; +use crate::file_fetcher::FileFetcher; + +use deno_core::error::AnyError; +use deno_core::futures::FutureExt; +use deno_core::serde::Deserialize; +use deno_core::serde::Serialize; +use deno_core::ModuleSpecifier; +use deno_graph::source::CacheInfo; +use deno_graph::source::LoadFuture; +use deno_graph::source::LoadResponse; +use deno_graph::source::Loader; +use deno_runtime::permissions::Permissions; +use std::sync::Arc; + +mod check; +mod common; +mod disk_cache; +mod emit; +mod incremental; + +pub use check::TypeCheckCache; +pub use disk_cache::DiskCache; +pub use emit::EmitCache; +pub use emit::SpecifierEmitCacheData; +pub use incremental::IncrementalCache; + +#[derive(Debug, Deserialize, Serialize)] +pub struct EmitMetadata { + pub version_hash: String, +} + +pub enum CacheType { + Emit, + SourceMap, + Version, +} + +/// A trait which provides a concise implementation to getting and setting +/// values in a cache. +pub trait Cacher { + /// Get a value from the cache. + fn get( + &self, + cache_type: CacheType, + specifier: &ModuleSpecifier, + ) -> Option; + /// Set a value in the cache. + fn set( + &self, + cache_type: CacheType, + specifier: &ModuleSpecifier, + value: String, + ) -> Result<(), AnyError>; +} + +/// A "wrapper" for the FileFetcher and DiskCache for the Deno CLI that provides +/// a concise interface to the DENO_DIR when building module graphs. +pub struct FetchCacher { + disk_cache: DiskCache, + dynamic_permissions: Permissions, + file_fetcher: Arc, + root_permissions: Permissions, +} + +impl FetchCacher { + pub fn new( + disk_cache: DiskCache, + file_fetcher: FileFetcher, + root_permissions: Permissions, + dynamic_permissions: Permissions, + ) -> Self { + let file_fetcher = Arc::new(file_fetcher); + + Self { + disk_cache, + dynamic_permissions, + file_fetcher, + root_permissions, + } + } +} + +impl Loader for FetchCacher { + fn get_cache_info(&self, specifier: &ModuleSpecifier) -> Option { + let local = self.file_fetcher.get_local_path(specifier)?; + if local.is_file() { + let location = &self.disk_cache.location; + let emit = self + .disk_cache + .get_cache_filename_with_extension(specifier, "js") + .map(|p| location.join(p)) + .filter(|p| p.is_file()); + let map = self + .disk_cache + .get_cache_filename_with_extension(specifier, "js.map") + .map(|p| location.join(p)) + .filter(|p| p.is_file()); + Some(CacheInfo { + local: Some(local), + emit, + map, + }) + } else { + None + } + } + + fn load( + &mut self, + specifier: &ModuleSpecifier, + is_dynamic: bool, + ) -> LoadFuture { + let specifier = specifier.clone(); + let mut permissions = if is_dynamic { + self.dynamic_permissions.clone() + } else { + self.root_permissions.clone() + }; + let file_fetcher = self.file_fetcher.clone(); + + async move { + file_fetcher + .fetch(&specifier, &mut permissions) + .await + .map_or_else( + |err| { + if let Some(err) = err.downcast_ref::() { + if err.kind() == std::io::ErrorKind::NotFound { + return Ok(None); + } + } else if get_error_class_name(&err) == "NotFound" { + return Ok(None); + } + Err(err) + }, + |file| { + Ok(Some(LoadResponse::Module { + specifier: file.specifier, + maybe_headers: file.maybe_headers, + content: file.source, + })) + }, + ) + } + .boxed() + } +} diff --git a/cli/deno_dir.rs b/cli/deno_dir.rs index c6a5eca29..0a1864f2c 100644 --- a/cli/deno_dir.rs +++ b/cli/deno_dir.rs @@ -1,6 +1,7 @@ // Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. -use crate::disk_cache::DiskCache; +use crate::cache::DiskCache; + use std::path::PathBuf; /// `DenoDir` serves as coordinator for multiple `DiskCache`s containing them @@ -56,6 +57,12 @@ impl DenoDir { // bump this version name to invalidate the entire cache self.root.join("lint_incremental_cache_v1") } + + /// Path for the incremental cache used for linting. + pub fn type_checking_cache_db_file_path(&self) -> PathBuf { + // bump this version name to invalidate the entire cache + self.root.join("check_cache_v1") + } } /// To avoid the poorly managed dirs crate diff --git a/cli/disk_cache.rs b/cli/disk_cache.rs deleted file mode 100644 index c23f7f4df..000000000 --- a/cli/disk_cache.rs +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. - -use crate::cache::CacheType; -use crate::cache::Cacher; -use crate::cache::EmitMetadata; -use crate::fs_util; -use crate::http_cache::url_to_filename; -use deno_ast::ModuleSpecifier; -use deno_core::error::AnyError; -use deno_core::serde_json; -use deno_core::url::{Host, Url}; -use std::ffi::OsStr; -use std::fs; -use std::io; -use std::path::Component; -use std::path::Path; -use std::path::PathBuf; -use std::path::Prefix; -use std::str; - -#[derive(Clone)] -pub struct DiskCache { - pub location: PathBuf, -} - -fn with_io_context>( - e: &std::io::Error, - context: T, -) -> std::io::Error { - std::io::Error::new(e.kind(), format!("{} (for '{}')", e, context.as_ref())) -} - -impl DiskCache { - /// `location` must be an absolute path. - pub fn new(location: &Path) -> Self { - assert!(location.is_absolute()); - Self { - location: location.to_owned(), - } - } - - /// Ensures the location of the cache. - pub fn ensure_dir_exists(&self, path: &Path) -> io::Result<()> { - if path.is_dir() { - return Ok(()); - } - fs::create_dir_all(&path).map_err(|e| { - io::Error::new(e.kind(), format!( - "Could not create TypeScript compiler cache location: {:?}\nCheck the permission of the directory.", - path - )) - }) - } - - fn get_cache_filename(&self, url: &Url) -> Option { - let mut out = PathBuf::new(); - - let scheme = url.scheme(); - out.push(scheme); - - match scheme { - "wasm" => { - let host = url.host_str().unwrap(); - let host_port = match url.port() { - // Windows doesn't support ":" in filenames, so we represent port using a - // special string. - Some(port) => format!("{}_PORT{}", host, port), - None => host.to_string(), - }; - out.push(host_port); - - for path_seg in url.path_segments().unwrap() { - out.push(path_seg); - } - } - "http" | "https" | "data" | "blob" => out = url_to_filename(url)?, - "file" => { - let path = match url.to_file_path() { - Ok(path) => path, - Err(_) => return None, - }; - let mut path_components = path.components(); - - if cfg!(target_os = "windows") { - if let Some(Component::Prefix(prefix_component)) = - path_components.next() - { - // Windows doesn't support ":" in filenames, so we need to extract disk prefix - // Example: file:///C:/deno/js/unit_test_runner.ts - // it should produce: file\c\deno\js\unit_test_runner.ts - match prefix_component.kind() { - Prefix::Disk(disk_byte) | Prefix::VerbatimDisk(disk_byte) => { - let disk = (disk_byte as char).to_string(); - out.push(disk); - } - Prefix::UNC(server, share) - | Prefix::VerbatimUNC(server, share) => { - out.push("UNC"); - let host = Host::parse(server.to_str().unwrap()).unwrap(); - let host = host.to_string().replace(':', "_"); - out.push(host); - out.push(share); - } - _ => unreachable!(), - } - } - } - - // Must be relative, so strip forward slash - let mut remaining_components = path_components.as_path(); - if let Ok(stripped) = remaining_components.strip_prefix("/") { - remaining_components = stripped; - }; - - out = out.join(remaining_components); - } - _ => return None, - }; - - Some(out) - } - - pub fn get_cache_filename_with_extension( - &self, - url: &Url, - extension: &str, - ) -> Option { - let base = self.get_cache_filename(url)?; - - match base.extension() { - None => Some(base.with_extension(extension)), - Some(ext) => { - let original_extension = OsStr::to_str(ext).unwrap(); - let final_extension = format!("{}.{}", original_extension, extension); - Some(base.with_extension(final_extension)) - } - } - } - - pub fn get(&self, filename: &Path) -> std::io::Result> { - let path = self.location.join(filename); - fs::read(&path) - } - - pub fn set(&self, filename: &Path, data: &[u8]) -> std::io::Result<()> { - let path = self.location.join(filename); - match path.parent() { - Some(parent) => self.ensure_dir_exists(parent), - None => Ok(()), - }?; - fs_util::atomic_write_file(&path, data, crate::http_cache::CACHE_PERM) - .map_err(|e| with_io_context(&e, format!("{:#?}", &path))) - } - - fn get_emit_metadata( - &self, - specifier: &ModuleSpecifier, - ) -> Option { - let filename = self.get_cache_filename_with_extension(specifier, "meta")?; - let bytes = self.get(&filename).ok()?; - serde_json::from_slice(&bytes).ok() - } - - fn set_emit_metadata( - &self, - specifier: &ModuleSpecifier, - data: EmitMetadata, - ) -> Result<(), AnyError> { - let filename = self - .get_cache_filename_with_extension(specifier, "meta") - .unwrap(); - let bytes = serde_json::to_vec(&data)?; - self.set(&filename, &bytes).map_err(|e| e.into()) - } -} - -// todo(13302): remove and replace with sqlite database -impl Cacher for DiskCache { - fn get( - &self, - cache_type: CacheType, - specifier: &ModuleSpecifier, - ) -> Option { - let extension = match cache_type { - CacheType::Emit => "js", - CacheType::SourceMap => "js.map", - CacheType::TypeScriptBuildInfo => "buildinfo", - CacheType::Version => { - return self.get_emit_metadata(specifier).map(|d| d.version_hash) - } - }; - let filename = - self.get_cache_filename_with_extension(specifier, extension)?; - self - .get(&filename) - .ok() - .and_then(|b| String::from_utf8(b).ok()) - } - - fn set( - &self, - cache_type: CacheType, - specifier: &ModuleSpecifier, - value: String, - ) -> Result<(), AnyError> { - let extension = match cache_type { - CacheType::Emit => "js", - CacheType::SourceMap => "js.map", - CacheType::TypeScriptBuildInfo => "buildinfo", - CacheType::Version => { - let data = if let Some(mut data) = self.get_emit_metadata(specifier) { - data.version_hash = value; - data - } else { - EmitMetadata { - version_hash: value, - } - }; - return self.set_emit_metadata(specifier, data); - } - }; - let filename = self - .get_cache_filename_with_extension(specifier, extension) - .unwrap(); - self.set(&filename, value.as_bytes()).map_err(|e| e.into()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use test_util::TempDir; - - #[test] - fn test_create_cache_if_dir_exits() { - let cache_location = TempDir::new(); - let mut cache_path = cache_location.path().to_owned(); - cache_path.push("foo"); - let cache = DiskCache::new(&cache_path); - cache - .ensure_dir_exists(&cache.location) - .expect("Testing expect:"); - assert!(cache_path.is_dir()); - } - - #[test] - fn test_create_cache_if_dir_not_exits() { - let temp_dir = TempDir::new(); - let mut cache_location = temp_dir.path().to_owned(); - assert!(fs::remove_dir(&cache_location).is_ok()); - cache_location.push("foo"); - assert!(!cache_location.is_dir()); - let cache = DiskCache::new(&cache_location); - cache - .ensure_dir_exists(&cache.location) - .expect("Testing expect:"); - assert!(cache_location.is_dir()); - } - - #[test] - fn test_get_cache_filename() { - let cache_location = if cfg!(target_os = "windows") { - PathBuf::from(r"C:\deno_dir\") - } else { - PathBuf::from("/deno_dir/") - }; - - let cache = DiskCache::new(&cache_location); - - let mut test_cases = vec![ - ( - "http://deno.land/std/http/file_server.ts", - "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf", - ), - ( - "http://localhost:8000/std/http/file_server.ts", - "http/localhost_PORT8000/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf", - ), - ( - "https://deno.land/std/http/file_server.ts", - "https/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf", - ), - ("wasm://wasm/d1c677ea", "wasm/wasm/d1c677ea"), - ]; - - if cfg!(target_os = "windows") { - test_cases.push(("file:///D:/a/1/s/format.ts", "file/D/a/1/s/format.ts")); - // IPv4 localhost - test_cases.push(( - "file://127.0.0.1/d$/a/1/s/format.ts", - "file/UNC/127.0.0.1/d$/a/1/s/format.ts", - )); - // IPv6 localhost - test_cases.push(( - "file://[0:0:0:0:0:0:0:1]/d$/a/1/s/format.ts", - "file/UNC/[__1]/d$/a/1/s/format.ts", - )); - // shared folder - test_cases.push(( - "file://comp/t-share/a/1/s/format.ts", - "file/UNC/comp/t-share/a/1/s/format.ts", - )); - } else { - test_cases.push(( - "file:///std/http/file_server.ts", - "file/std/http/file_server.ts", - )); - } - - for test_case in &test_cases { - let cache_filename = - cache.get_cache_filename(&Url::parse(test_case.0).unwrap()); - assert_eq!(cache_filename, Some(PathBuf::from(test_case.1))); - } - } - - #[test] - fn test_get_cache_filename_with_extension() { - let p = if cfg!(target_os = "windows") { - "C:\\foo" - } else { - "/foo" - }; - let cache = DiskCache::new(&PathBuf::from(p)); - - let mut test_cases = vec![ - ( - "http://deno.land/std/http/file_server.ts", - "js", - "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf.js", - ), - ( - "http://deno.land/std/http/file_server.ts", - "js.map", - "http/deno.land/d8300752800fe3f0beda9505dc1c3b5388beb1ee45afd1f1e2c9fc0866df15cf.js.map", - ), - ]; - - if cfg!(target_os = "windows") { - test_cases.push(( - "file:///D:/std/http/file_server", - "js", - "file/D/std/http/file_server.js", - )); - } else { - test_cases.push(( - "file:///std/http/file_server", - "js", - "file/std/http/file_server.js", - )); - } - - for test_case in &test_cases { - assert_eq!( - cache.get_cache_filename_with_extension( - &Url::parse(test_case.0).unwrap(), - test_case.1 - ), - Some(PathBuf::from(test_case.2)) - ) - } - } - - #[test] - fn test_get_cache_filename_invalid_urls() { - let cache_location = if cfg!(target_os = "windows") { - PathBuf::from(r"C:\deno_dir\") - } else { - PathBuf::from("/deno_dir/") - }; - - let cache = DiskCache::new(&cache_location); - - let mut test_cases = vec!["unknown://localhost/test.ts"]; - - if cfg!(target_os = "windows") { - test_cases.push("file://"); - test_cases.push("file:///"); - } - - for test_case in &test_cases { - let cache_filename = - cache.get_cache_filename(&Url::parse(test_case).unwrap()); - assert_eq!(cache_filename, None); - } - } -} diff --git a/cli/emit.rs b/cli/emit.rs index 329eb4f5d..a530dbcb9 100644 --- a/cli/emit.rs +++ b/cli/emit.rs @@ -9,8 +9,9 @@ use crate::args::ConfigFile; use crate::args::EmitConfigOptions; use crate::args::TsConfig; use crate::args::TypeCheckMode; -use crate::cache::CacheType; -use crate::cache::Cacher; +use crate::cache::EmitCache; +use crate::cache::SpecifierEmitCacheData; +use crate::cache::TypeCheckCache; use crate::colors; use crate::diagnostics::Diagnostics; use crate::graph_util::GraphData; @@ -35,97 +36,12 @@ use deno_graph::ModuleGraph; use deno_graph::ModuleGraphError; use deno_graph::ModuleKind; use deno_graph::ResolutionError; -use std::collections::HashMap; use std::collections::HashSet; use std::fmt; use std::result; use std::sync::Arc; use std::time::Instant; -/// Emit cache for a single file. -#[derive(Debug, Clone, PartialEq)] -pub struct SpecifierEmitCacheData { - pub source_hash: String, - pub text: String, - pub map: Option, -} - -pub trait EmitCache { - /// Gets the emit data from the cache. - fn get_emit_data( - &self, - specifier: &ModuleSpecifier, - ) -> Option; - /// Gets the stored hash of the source of the provider specifier - /// to tell if the emit is out of sync with the source. - /// TODO(13302): this is actually not reliable and should be removed - /// once switching to an sqlite db - fn get_source_hash(&self, specifier: &ModuleSpecifier) -> Option; - /// Gets the emitted JavaScript of the TypeScript source. - /// TODO(13302): remove this once switching to an sqlite db - fn get_emit_text(&self, specifier: &ModuleSpecifier) -> Option; - /// Sets the emit data in the cache. - fn set_emit_data( - &self, - specifier: ModuleSpecifier, - data: SpecifierEmitCacheData, - ) -> Result<(), AnyError>; - /// Gets the .tsbuildinfo file from the cache. - fn get_tsbuildinfo(&self, specifier: &ModuleSpecifier) -> Option; - /// Sets the .tsbuildinfo file in the cache. - fn set_tsbuildinfo( - &self, - specifier: ModuleSpecifier, - text: String, - ) -> Result<(), AnyError>; -} - -impl EmitCache for T { - fn get_emit_data( - &self, - specifier: &ModuleSpecifier, - ) -> Option { - Some(SpecifierEmitCacheData { - source_hash: self.get_source_hash(specifier)?, - text: self.get_emit_text(specifier)?, - map: self.get(CacheType::SourceMap, specifier), - }) - } - - fn get_source_hash(&self, specifier: &ModuleSpecifier) -> Option { - self.get(CacheType::Version, specifier) - } - - fn get_emit_text(&self, specifier: &ModuleSpecifier) -> Option { - self.get(CacheType::Emit, specifier) - } - - fn set_emit_data( - &self, - specifier: ModuleSpecifier, - data: SpecifierEmitCacheData, - ) -> Result<(), AnyError> { - self.set(CacheType::Version, &specifier, data.source_hash)?; - self.set(CacheType::Emit, &specifier, data.text)?; - if let Some(map) = data.map { - self.set(CacheType::SourceMap, &specifier, map)?; - } - Ok(()) - } - - fn get_tsbuildinfo(&self, specifier: &ModuleSpecifier) -> Option { - self.get(CacheType::TypeScriptBuildInfo, specifier) - } - - fn set_tsbuildinfo( - &self, - specifier: ModuleSpecifier, - text: String, - ) -> Result<(), AnyError> { - self.set(CacheType::TypeScriptBuildInfo, &specifier, text) - } -} - /// A structure representing stats from an emit operation for a graph. #[derive(Clone, Debug, Default, Eq, PartialEq)] pub struct Stats(pub Vec<(String, u32)>); @@ -236,13 +152,17 @@ pub fn get_ts_config_for_emit( let mut ts_config = TsConfig::new(json!({ "allowJs": true, "allowSyntheticDefaultImports": true, + "checkJs": false, "experimentalDecorators": true, "incremental": true, "jsx": "react", + "jsxFactory": "React.createElement", + "jsxFragmentFactory": "React.Fragment", "isolatedModules": true, "lib": lib, "module": "esnext", "resolveJsonModule": true, + "sourceMap": false, "strict": true, "target": "esnext", "tsBuildInfoFile": "deno:///.tsbuildinfo", @@ -378,10 +298,6 @@ pub struct CheckOptions { pub type_check_mode: TypeCheckMode, /// Set the debug flag on the TypeScript type checker. pub debug: bool, - /// If true, any files emitted will be cached, even if there are diagnostics - /// produced. If false, if there are diagnostics, caching emitted files will - /// be skipped. - pub emit_with_diagnostics: bool, /// The module specifier to the configuration file, passed to tsc so that /// configuration related diagnostics are properly formed. pub maybe_config_specifier: Option, @@ -389,46 +305,43 @@ pub struct CheckOptions { pub ts_config: TsConfig, /// If true, `Check ` will be written to stdout for each root. pub log_checks: bool, - /// If true, valid existing emits and `.tsbuildinfo` files will be ignored. + /// If true, valid `.tsbuildinfo` files will be ignored and type checking + /// will always occur. pub reload: bool, - pub reload_exclusions: HashSet, } -/// The result of a check or emit of a module graph. Note that the actual -/// emitted sources are stored in the cache and are not returned in the result. +/// The result of a check of a module graph. #[derive(Debug, Default)] -pub struct CheckEmitResult { +pub struct CheckResult { pub diagnostics: Diagnostics, pub stats: Stats, } -/// Given a set of roots and graph data, type check the module graph and -/// optionally emit modules, updating the cache as appropriate. Emitting is -/// determined by the `ts_config` supplied in the options, and if emitting, the -/// files are stored in the cache. +/// Given a set of roots and graph data, type check the module graph. /// /// It is expected that it is determined if a check and/or emit is validated /// before the function is called. -pub fn check_and_maybe_emit( +pub fn check( roots: &[(ModuleSpecifier, ModuleKind)], graph_data: Arc>, - cache: &dyn EmitCache, + cache: &TypeCheckCache, options: CheckOptions, -) -> Result { +) -> Result { let check_js = options.ts_config.get_check_js(); let segment_graph_data = { let graph_data = graph_data.read(); graph_data.graph_segment(roots).unwrap() }; - if valid_emit( - &segment_graph_data, - cache, - &options.ts_config, - options.reload, - &options.reload_exclusions, - ) { + let check_hash = match get_check_hash(&segment_graph_data, &options) { + CheckHashResult::NoFiles => return Ok(Default::default()), + CheckHashResult::Hash(hash) => hash, + }; + + // do not type check if we know this is type checked + if !options.reload && cache.has_check_hash(check_hash) { return Ok(Default::default()); } + let root_names = get_tsc_roots(roots, &segment_graph_data, check_js); if options.log_checks { for (root, _) in roots { @@ -454,12 +367,11 @@ pub fn check_and_maybe_emit( options.ts_config.as_bytes(), version::deno().as_bytes().to_owned(), ]; - let config_bytes = options.ts_config.as_bytes(); let response = tsc::exec(tsc::Request { config: options.ts_config, debug: options.debug, - graph_data: graph_data.clone(), + graph_data, hash_data, maybe_config_specifier: options.maybe_config_specifier, maybe_tsbuildinfo, @@ -478,105 +390,15 @@ pub fn check_and_maybe_emit( response.diagnostics }; - // sometimes we want to emit when there are diagnostics, and sometimes we - // don't. tsc will always return an emit if there are diagnostics - if (diagnostics.is_empty() || options.emit_with_diagnostics) - && !response.emitted_files.is_empty() - { - if let Some(info) = &response.maybe_tsbuildinfo { - // while we retrieve the build info for just the first module, it can be - // used for all the roots in the graph, so we will cache it for all roots - for (root, _) in roots { - cache.set_tsbuildinfo(root.clone(), info.to_string())?; - } - } - - struct SpecifierEmitData { - pub version_hash: String, - pub text: Option, - pub map: Option, - } - - impl SpecifierEmitData { - fn into_cache_data(self) -> Option { - self.text.map(|text| SpecifierEmitCacheData { - source_hash: self.version_hash, - text, - map: self.map, - }) - } - } - - // combine the emitted files into groups based on their specifier and media type - let mut emit_data_items: HashMap = - HashMap::with_capacity(response.emitted_files.len()); - for emit in response.emitted_files.into_iter() { - if let Some(specifiers) = emit.maybe_specifiers { - assert!(specifiers.len() == 1); - // The emitted specifier might not be the file specifier we want, so we - // resolve it via the graph. - let graph_data = graph_data.read(); - let specifier = graph_data.follow_redirect(&specifiers[0]); - let (source_bytes, media_type, ts_check) = - match graph_data.get(&specifier) { - Some(ModuleEntry::Module { - code, - media_type, - ts_check, - .. - }) => (code.as_bytes(), *media_type, *ts_check), - _ => { - log::debug!("skipping emit for {}", specifier); - continue; - } - }; - // Sometimes if `tsc` sees a CommonJS file or a JSON module, it will - // _helpfully_ output it, which we don't really want to do unless - // someone has enabled check_js. - if matches!(media_type, MediaType::Json) - || (!check_js - && !ts_check - && matches!( - media_type, - MediaType::JavaScript | MediaType::Cjs | MediaType::Mjs - )) - { - log::debug!("skipping emit for {}", specifier); - continue; - } - - let mut emit_data_item = emit_data_items - .entry(specifier.clone()) - .or_insert_with(|| SpecifierEmitData { - version_hash: get_version(source_bytes, &config_bytes), - text: None, - map: None, - }); - - match emit.media_type { - MediaType::JavaScript | MediaType::Mjs | MediaType::Cjs => { - emit_data_item.text = Some(emit.data); - } - MediaType::SourceMap => { - emit_data_item.map = Some(emit.data); - } - _ => unreachable!( - "unexpected media_type {} {}", - emit.media_type, specifier - ), - } - } - } + if let Some(tsbuildinfo) = response.maybe_tsbuildinfo { + cache.set_tsbuildinfo(&roots[0].0, &tsbuildinfo); + } - // now insert these items into the cache - for (specifier, data) in emit_data_items.into_iter() { - if let Some(cache_data) = data.into_cache_data() { - cache.set_emit_data(specifier, cache_data)?; - } - } + if diagnostics.is_empty() { + cache.add_check_hash(check_hash); } - Ok(CheckEmitResult { + Ok(CheckResult { diagnostics, stats: response.stats, }) @@ -590,12 +412,12 @@ pub struct EmitOptions { /// Given a module graph, emit any appropriate modules and cache them. // TODO(nayeemrmn): This would ideally take `GraphData` like -// `check_and_maybe_emit()`, but the AST isn't stored in that. Cleanup. +// `check()`, but the AST isn't stored in that. Cleanup. pub fn emit( graph: &ModuleGraph, cache: &dyn EmitCache, options: EmitOptions, -) -> Result { +) -> Result { let start = Instant::now(); let config_bytes = options.ts_config.as_bytes(); let include_js = options.ts_config.get_check_js(); @@ -623,7 +445,7 @@ pub fn emit( let transpiled_source = module .maybe_parsed_source .as_ref() - .map(|ps| ps.transpile(&emit_options)) + .map(|source| source.transpile(&emit_options)) .unwrap()?; emit_count += 1; cache.set_emit_data( @@ -642,26 +464,41 @@ pub fn emit( ("Total time".to_string(), start.elapsed().as_millis() as u32), ]); - Ok(CheckEmitResult { + Ok(CheckResult { diagnostics: Diagnostics::default(), stats, }) } -/// Check a module graph to determine if the graph contains anything that -/// is required to be emitted to be valid. It determines what modules in the -/// graph are emittable and for those that are emittable, if there is currently -/// a valid emit in the cache. -fn valid_emit( +enum CheckHashResult { + Hash(u64), + NoFiles, +} + +/// Gets a hash of the inputs for type checking. This can then +/// be used to tell +fn get_check_hash( graph_data: &GraphData, - cache: &dyn EmitCache, - ts_config: &TsConfig, - reload: bool, - reload_exclusions: &HashSet, -) -> bool { - let config_bytes = ts_config.as_bytes(); - let check_js = ts_config.get_check_js(); - for (specifier, module_entry) in graph_data.entries() { + options: &CheckOptions, +) -> CheckHashResult { + // twox hash is insecure, but fast so it works for our purposes + use std::hash::Hasher; + use twox_hash::XxHash64; + + let mut hasher = XxHash64::default(); + hasher.write_u8(match options.type_check_mode { + TypeCheckMode::All => 0, + TypeCheckMode::Local => 1, + TypeCheckMode::None => 2, + }); + hasher.write(&options.ts_config.as_bytes()); + + let check_js = options.ts_config.get_check_js(); + let mut sorted_entries = graph_data.entries().collect::>(); + sorted_entries.sort_by_key(|(s, _)| s.as_str()); // make it deterministic + let mut has_file = false; + let mut has_file_to_type_check = false; + for (specifier, module_entry) in sorted_entries { if let ModuleEntry::Module { code, media_type, @@ -669,13 +506,26 @@ fn valid_emit( .. } = module_entry { + if *ts_check { + has_file_to_type_check = true; + } + match media_type { MediaType::TypeScript + | MediaType::Dts + | MediaType::Dmts + | MediaType::Dcts | MediaType::Mts | MediaType::Cts - | MediaType::Tsx - | MediaType::Jsx => {} - MediaType::JavaScript | MediaType::Mjs | MediaType::Cjs => { + | MediaType::Tsx => { + has_file = true; + has_file_to_type_check = true; + } + MediaType::JavaScript + | MediaType::Mjs + | MediaType::Cjs + | MediaType::Jsx => { + has_file = true; if !check_js && !ts_check { continue; } @@ -683,25 +533,20 @@ fn valid_emit( MediaType::Json | MediaType::TsBuildInfo | MediaType::SourceMap - | MediaType::Dts - | MediaType::Dmts - | MediaType::Dcts | MediaType::Wasm | MediaType::Unknown => continue, } - if reload && !reload_exclusions.contains(specifier) { - return false; - } - if let Some(source_hash) = cache.get_source_hash(specifier) { - if source_hash != get_version(code.as_bytes(), &config_bytes) { - return false; - } - } else { - return false; - } + hasher.write(specifier.as_str().as_bytes()); + hasher.write(code.as_bytes()); } } - true + + if !has_file || !check_js && !has_file_to_type_check { + // no files to type check + CheckHashResult::NoFiles + } else { + CheckHashResult::Hash(hasher.finish()) + } } /// An adapter struct to make a deno_graph::ModuleGraphError display as expected diff --git a/cli/graph_util.rs b/cli/graph_util.rs index 991115319..de418edd7 100644 --- a/cli/graph_util.rs +++ b/cli/graph_util.rs @@ -162,8 +162,10 @@ impl GraphData { } } - pub fn entries(&self) -> HashMap<&ModuleSpecifier, &ModuleEntry> { - self.modules.iter().collect() + pub fn entries( + &self, + ) -> impl Iterator { + self.modules.iter() } /// Walk dependencies from `roots` and return every encountered specifier. diff --git a/cli/main.rs b/cli/main.rs index c6188fec8..4ed44c9bd 100644 --- a/cli/main.rs +++ b/cli/main.rs @@ -9,7 +9,6 @@ mod compat; mod deno_dir; mod diagnostics; mod diff; -mod disk_cache; mod display; mod emit; mod errors; @@ -59,6 +58,7 @@ use crate::args::TypeCheckMode; use crate::args::UninstallFlags; use crate::args::UpgradeFlags; use crate::args::VendorFlags; +use crate::cache::TypeCheckCache; use crate::emit::TsConfigType; use crate::file_fetcher::File; use crate::file_watcher::ResolutionResult; @@ -661,19 +661,20 @@ async fn create_graph_and_maybe_check( eprintln!("{}", ignored_options); } let maybe_config_specifier = ps.options.maybe_config_file_specifier(); - let check_result = emit::check_and_maybe_emit( + // todo: don't use anything on failure + let cache = + TypeCheckCache::new(&ps.dir.type_checking_cache_db_file_path())?; + let check_result = emit::check( &graph.roots, Arc::new(RwLock::new(graph.as_ref().into())), - &ps.dir.gen_cache, + &cache, emit::CheckOptions { type_check_mode: ps.options.type_check_mode(), debug, - emit_with_diagnostics: false, maybe_config_specifier, ts_config: ts_config_result.ts_config, log_checks: true, reload: ps.options.reload_flag(), - reload_exclusions: Default::default(), }, )?; debug!("{}", check_result.stats); diff --git a/cli/proc_state.rs b/cli/proc_state.rs index 1a32b01a4..75630e47b 100644 --- a/cli/proc_state.rs +++ b/cli/proc_state.rs @@ -5,11 +5,12 @@ use crate::args::DenoSubcommand; use crate::args::Flags; use crate::args::TypeCheckMode; use crate::cache; +use crate::cache::EmitCache; +use crate::cache::TypeCheckCache; use crate::compat; use crate::compat::NodeEsmResolver; use crate::deno_dir; use crate::emit; -use crate::emit::EmitCache; use crate::emit::TsConfigType; use crate::emit::TsTypeLib; use crate::file_fetcher::FileFetcher; @@ -394,7 +395,7 @@ impl ProcState { // should be skipped. let reload_exclusions: HashSet = { let graph_data = self.graph_data.read(); - graph_data.entries().into_keys().cloned().collect() + graph_data.entries().map(|(s, _)| s).cloned().collect() }; { @@ -426,36 +427,45 @@ impl ProcState { log::warn!("{}", ignored_options); } - if self.options.type_check_mode() == TypeCheckMode::None { - let options = emit::EmitOptions { - ts_config: ts_config_result.ts_config, - reload: self.options.reload_flag(), - reload_exclusions, - }; - let emit_result = emit::emit(&graph, &self.dir.gen_cache, options)?; - log::debug!("{}", emit_result.stats); - } else { - let maybe_config_specifier = self.options.maybe_config_file_specifier(); - let options = emit::CheckOptions { - type_check_mode: self.options.type_check_mode(), - debug: self.options.log_level() == Some(log::Level::Debug), - emit_with_diagnostics: false, - maybe_config_specifier, - ts_config: ts_config_result.ts_config, - log_checks: true, - reload: self.options.reload_flag(), - reload_exclusions, + // start type checking if necessary + let type_checking_task = + if self.options.type_check_mode() != TypeCheckMode::None { + let maybe_config_specifier = self.options.maybe_config_file_specifier(); + let roots = roots.clone(); + let options = emit::CheckOptions { + type_check_mode: self.options.type_check_mode(), + debug: self.options.log_level() == Some(log::Level::Debug), + maybe_config_specifier, + ts_config: ts_config_result.ts_config.clone(), + log_checks: true, + reload: self.options.reload_flag() + && !roots.iter().all(|r| reload_exclusions.contains(&r.0)), + }; + // todo(THIS PR): don't use a cache on failure + let check_cache = + TypeCheckCache::new(&self.dir.type_checking_cache_db_file_path())?; + let graph_data = self.graph_data.clone(); + Some(tokio::task::spawn_blocking(move || { + emit::check(&roots, graph_data, &check_cache, options) + })) + } else { + None }; - let emit_result = emit::check_and_maybe_emit( - &roots, - self.graph_data.clone(), - &self.dir.gen_cache, - options, - )?; - if !emit_result.diagnostics.is_empty() { - return Err(anyhow!(emit_result.diagnostics)); + + let options = emit::EmitOptions { + ts_config: ts_config_result.ts_config, + reload: self.options.reload_flag(), + reload_exclusions, + }; + let emit_result = emit::emit(&graph, &self.dir.gen_cache, options)?; + log::debug!("{}", emit_result.stats); + + if let Some(type_checking_task) = type_checking_task { + let type_check_result = type_checking_task.await??; + if !type_check_result.diagnostics.is_empty() { + return Err(anyhow!(type_check_result.diagnostics)); } - log::debug!("{}", emit_result.stats); + log::debug!("{}", type_check_result.stats); } if self.options.type_check_mode() != TypeCheckMode::None { diff --git a/cli/tests/integration/check_tests.rs b/cli/tests/integration/check_tests.rs index 8000ddc9d..5ceaffe51 100644 --- a/cli/tests/integration/check_tests.rs +++ b/cli/tests/integration/check_tests.rs @@ -1,7 +1,11 @@ // Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. +use std::process::Stdio; + use crate::itest; +use test_util as util; + itest!(_095_check_with_bare_import { args: "check 095_cache_with_bare_import.ts", output: "095_cache_with_bare_import.ts.out", @@ -43,3 +47,62 @@ itest!(declaration_header_file_with_no_exports { args: "check --quiet declaration_header_file_with_no_exports.ts", output_str: Some(""), }); + +#[test] +fn cache_switching_config_then_no_config() { + let deno_dir = util::new_deno_dir(); + assert!(does_type_checking(&deno_dir, true)); + assert!(does_type_checking(&deno_dir, false)); + + // should now not do type checking even when it changes + // configs because it previously did + assert!(!does_type_checking(&deno_dir, true)); + assert!(!does_type_checking(&deno_dir, false)); + + fn does_type_checking(deno_dir: &util::TempDir, with_config: bool) -> bool { + let mut cmd = util::deno_cmd_with_deno_dir(deno_dir); + cmd + .current_dir(util::testdata_path()) + .stderr(Stdio::piped()) + .arg("check") + .arg("check/cache_config_on_off/main.ts"); + if with_config { + cmd + .arg("--config") + .arg("check/cache_config_on_off/deno.json"); + } + let output = cmd.spawn().unwrap().wait_with_output().unwrap(); + assert!(output.status.success()); + + let stderr = std::str::from_utf8(&output.stderr).unwrap(); + stderr.contains("Check") + } +} + +#[test] +fn reload_flag() { + // should do type checking whenever someone specifies --reload + let deno_dir = util::new_deno_dir(); + assert!(does_type_checking(&deno_dir, false)); + assert!(!does_type_checking(&deno_dir, false)); + assert!(does_type_checking(&deno_dir, true)); + assert!(does_type_checking(&deno_dir, true)); + assert!(!does_type_checking(&deno_dir, false)); + + fn does_type_checking(deno_dir: &util::TempDir, reload: bool) -> bool { + let mut cmd = util::deno_cmd_with_deno_dir(deno_dir); + cmd + .current_dir(util::testdata_path()) + .stderr(Stdio::piped()) + .arg("check") + .arg("check/cache_config_on_off/main.ts"); + if reload { + cmd.arg("--reload"); + } + let output = cmd.spawn().unwrap().wait_with_output().unwrap(); + assert!(output.status.success()); + + let stderr = std::str::from_utf8(&output.stderr).unwrap(); + stderr.contains("Check") + } +} diff --git a/cli/tests/integration/mod.rs b/cli/tests/integration/mod.rs index 42ae24142..277b6a5d6 100644 --- a/cli/tests/integration/mod.rs +++ b/cli/tests/integration/mod.rs @@ -158,12 +158,6 @@ fn cache_test() { .expect("Failed to spawn script"); assert!(output.status.success()); - let out = std::str::from_utf8(&output.stderr).unwrap(); - // Check if file and dependencies are written successfully - assert!(out.contains("host.writeFile(\"deno://subdir/print_hello.js\")")); - assert!(out.contains("host.writeFile(\"deno://subdir/mod2.js\")")); - assert!(out.contains("host.writeFile(\"deno://006_url_imports.js\")")); - let prg = util::deno_exe_path(); let output = Command::new(&prg) .env("DENO_DIR", deno_dir.path()) @@ -369,46 +363,6 @@ fn ts_no_recheck_on_redirect() { assert!(std::str::from_utf8(&output.stderr).unwrap().is_empty()); } -#[test] -fn ts_reload() { - let hello_ts = util::testdata_path().join("002_hello.ts"); - assert!(hello_ts.is_file()); - - let deno_dir = TempDir::new(); - let mut initial = util::deno_cmd_with_deno_dir(&deno_dir) - .current_dir(util::testdata_path()) - .arg("cache") - .arg("--check=all") - .arg(&hello_ts) - .spawn() - .expect("failed to spawn script"); - let status_initial = - initial.wait().expect("failed to wait for child process"); - assert!(status_initial.success()); - - let output = util::deno_cmd_with_deno_dir(&deno_dir) - .current_dir(util::testdata_path()) - .arg("cache") - .arg("--check=all") - .arg("--reload") - .arg("-L") - .arg("debug") - .arg(&hello_ts) - .output() - .expect("failed to spawn script"); - - // check the output of the the bundle program. - let output_path = hello_ts.canonicalize().unwrap(); - assert!( - dbg!(std::str::from_utf8(&output.stderr).unwrap().trim()).contains( - &format!( - "host.getSourceFile(\"{}\", Latest)", - url::Url::from_file_path(&output_path).unwrap().as_str() - ) - ) - ); -} - #[test] fn timeout_clear() { // https://github.com/denoland/deno/issues/7599 diff --git a/cli/tests/integration/run_tests.rs b/cli/tests/integration/run_tests.rs index d9c20907d..e8bf3682a 100644 --- a/cli/tests/integration/run_tests.rs +++ b/cli/tests/integration/run_tests.rs @@ -2,8 +2,10 @@ use deno_core::url; use std::process::Command; +use std::process::Stdio; use test_util as util; use test_util::TempDir; +use util::assert_contains; itest!(stdout_write_all { args: "run --quiet stdout_write_all.ts", @@ -268,7 +270,7 @@ fn webstorage_location_shares_origin() { .arg("--location") .arg("https://example.com/a.ts") .arg("webstorage/fixture.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -283,7 +285,7 @@ fn webstorage_location_shares_origin() { .arg("--location") .arg("https://example.com/b.ts") .arg("webstorage/logger.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -305,7 +307,7 @@ fn webstorage_config_file() { .arg("--config") .arg("webstorage/config_a.jsonc") .arg("webstorage/fixture.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -320,7 +322,7 @@ fn webstorage_config_file() { .arg("--config") .arg("webstorage/config_b.jsonc") .arg("webstorage/logger.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -335,7 +337,7 @@ fn webstorage_config_file() { .arg("--config") .arg("webstorage/config_a.jsonc") .arg("webstorage/logger.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -359,7 +361,7 @@ fn webstorage_location_precedes_config() { .arg("--config") .arg("webstorage/config_a.jsonc") .arg("webstorage/fixture.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -376,7 +378,7 @@ fn webstorage_location_precedes_config() { .arg("--config") .arg("webstorage/config_b.jsonc") .arg("webstorage/logger.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -396,7 +398,7 @@ fn webstorage_main_module() { .current_dir(util::testdata_path()) .arg("run") .arg("webstorage/fixture.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -409,7 +411,7 @@ fn webstorage_main_module() { .current_dir(util::testdata_path()) .arg("run") .arg("webstorage/logger.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -422,7 +424,7 @@ fn webstorage_main_module() { .current_dir(util::testdata_path()) .arg("run") .arg("webstorage/fixture.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -1632,8 +1634,8 @@ fn no_validate_asm() { .current_dir(util::testdata_path()) .arg("run") .arg("no_validate_asm.js") - .stderr(std::process::Stdio::piped()) - .stdout(std::process::Stdio::piped()) + .stderr(Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -1650,7 +1652,7 @@ fn exec_path() { .arg("run") .arg("--allow-read") .arg("exec_path.ts") - .stdout(std::process::Stdio::piped()) + .stdout(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -1776,7 +1778,7 @@ fn rust_log() { .current_dir(util::testdata_path()) .arg("run") .arg("001_hello.js") - .stderr(std::process::Stdio::piped()) + .stderr(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -1790,7 +1792,7 @@ fn rust_log() { .arg("run") .arg("001_hello.js") .env("RUST_LOG", "debug") - .stderr(std::process::Stdio::piped()) + .stderr(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -1810,7 +1812,7 @@ fn dont_cache_on_check_fail() { .arg("--check=all") .arg("--reload") .arg("error_003_typescript.ts") - .stderr(std::process::Stdio::piped()) + .stderr(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -1824,7 +1826,7 @@ fn dont_cache_on_check_fail() { .arg("run") .arg("--check=all") .arg("error_003_typescript.ts") - .stderr(std::process::Stdio::piped()) + .stderr(Stdio::piped()) .spawn() .unwrap() .wait_with_output() @@ -2374,8 +2376,8 @@ fn issue12740() { .current_dir(util::testdata_path()) .arg("run") .arg(&mod1_path) - .stderr(std::process::Stdio::null()) - .stdout(std::process::Stdio::null()) + .stderr(Stdio::null()) + .stdout(Stdio::null()) .spawn() .unwrap() .wait() @@ -2387,8 +2389,8 @@ fn issue12740() { .current_dir(util::testdata_path()) .arg("run") .arg(&mod1_path) - .stderr(std::process::Stdio::null()) - .stdout(std::process::Stdio::null()) + .stderr(Stdio::null()) + .stdout(Stdio::null()) .spawn() .unwrap() .wait() @@ -2411,8 +2413,8 @@ fn issue12807() { .arg("run") .arg("--check") .arg(&mod1_path) - .stderr(std::process::Stdio::null()) - .stdout(std::process::Stdio::null()) + .stderr(Stdio::null()) + .stdout(Stdio::null()) .spawn() .unwrap() .wait() @@ -2425,8 +2427,8 @@ fn issue12807() { .arg("run") .arg("--check") .arg(&mod1_path) - .stderr(std::process::Stdio::null()) - .stdout(std::process::Stdio::null()) + .stderr(Stdio::null()) + .stdout(Stdio::null()) .spawn() .unwrap() .wait() @@ -2663,6 +2665,36 @@ itest!(js_root_with_ts_check { exit_code: 1, }); +#[test] +fn check_local_then_remote() { + let _http_guard = util::http_server(); + let deno_dir = util::new_deno_dir(); + let output = util::deno_cmd_with_deno_dir(&deno_dir) + .current_dir(util::testdata_path()) + .arg("run") + .arg("--check") + .arg("run/remote_type_error/main.ts") + .spawn() + .unwrap() + .wait_with_output() + .unwrap(); + assert!(output.status.success()); + let output = util::deno_cmd_with_deno_dir(&deno_dir) + .current_dir(util::testdata_path()) + .arg("run") + .arg("--check=all") + .arg("run/remote_type_error/main.ts") + .env("NO_COLOR", "1") + .stderr(Stdio::piped()) + .spawn() + .unwrap() + .wait_with_output() + .unwrap(); + assert!(!output.status.success()); + let stderr = std::str::from_utf8(&output.stderr).unwrap(); + assert_contains!(stderr, "Type 'string' is not assignable to type 'number'."); +} + itest!(no_prompt_flag { args: "run --quiet --unstable --no-prompt no_prompt.ts", output_str: Some(""), diff --git a/cli/tests/testdata/check/cache_config_on_off/deno.json b/cli/tests/testdata/check/cache_config_on_off/deno.json new file mode 100644 index 000000000..8ad9c9801 --- /dev/null +++ b/cli/tests/testdata/check/cache_config_on_off/deno.json @@ -0,0 +1,5 @@ +{ + "compilerOptions": { + "strict": false + } +} diff --git a/cli/tests/testdata/check/cache_config_on_off/main.ts b/cli/tests/testdata/check/cache_config_on_off/main.ts new file mode 100644 index 000000000..0f3785f91 --- /dev/null +++ b/cli/tests/testdata/check/cache_config_on_off/main.ts @@ -0,0 +1 @@ +console.log(5); diff --git a/cli/tests/testdata/coverage/branch_expected.lcov b/cli/tests/testdata/coverage/branch_expected.lcov index 31da70224..fb3454210 100644 --- a/cli/tests/testdata/coverage/branch_expected.lcov +++ b/cli/tests/testdata/coverage/branch_expected.lcov @@ -11,7 +11,7 @@ BRH:0 DA:1,1 DA:2,2 DA:3,2 -DA:4,2 +DA:4,0 DA:5,0 DA:6,0 DA:7,2 @@ -22,6 +22,6 @@ DA:12,0 DA:13,0 DA:14,0 DA:15,0 -LH:5 +LH:4 LF:14 end_of_record diff --git a/cli/tests/testdata/coverage/branch_expected.out b/cli/tests/testdata/coverage/branch_expected.out index 2ff5e911e..630ea93b2 100644 --- a/cli/tests/testdata/coverage/branch_expected.out +++ b/cli/tests/testdata/coverage/branch_expected.out @@ -1,4 +1,5 @@ -cover [WILDCARD]/coverage/branch.ts ... 35.714% (5/14) +cover [WILDCARD]/coverage/branch.ts ... 28.571% (4/14) + 4 | } else { 5 | return false; 6 | } -----|----- diff --git a/cli/tests/testdata/coverage/complex_expected.lcov b/cli/tests/testdata/coverage/complex_expected.lcov index 7a3cd8d92..c6f9a2578 100644 --- a/cli/tests/testdata/coverage/complex_expected.lcov +++ b/cli/tests/testdata/coverage/complex_expected.lcov @@ -11,44 +11,62 @@ FNF:4 FNH:2 BRF:0 BRH:0 +DA:13,1 +DA:14,1 +DA:15,1 +DA:16,1 DA:17,2 DA:18,2 DA:19,2 DA:20,2 +DA:21,2 DA:22,2 DA:23,2 DA:24,2 DA:25,2 DA:26,2 DA:27,2 +DA:29,1 +DA:30,1 +DA:31,1 DA:32,1 DA:33,1 DA:34,1 DA:35,1 +DA:36,1 DA:37,2 DA:38,2 DA:39,2 DA:40,2 DA:41,2 DA:42,2 +DA:44,1 +DA:45,1 DA:46,0 DA:47,0 DA:48,0 DA:49,0 +DA:50,0 DA:51,0 DA:52,0 DA:53,0 DA:54,0 DA:55,0 DA:56,0 +DA:58,1 +DA:59,1 DA:60,1 +DA:62,1 +DA:63,1 DA:64,0 DA:65,0 DA:66,0 DA:67,0 DA:68,0 +DA:70,1 DA:71,0 +DA:73,1 DA:74,1 -LH:22 -LF:38 +LH:39 +LF:56 end_of_record diff --git a/cli/tests/testdata/coverage/complex_expected.out b/cli/tests/testdata/coverage/complex_expected.out index 3f7c89e9b..aeff4cd60 100644 --- a/cli/tests/testdata/coverage/complex_expected.out +++ b/cli/tests/testdata/coverage/complex_expected.out @@ -1,9 +1,9 @@ -cover [WILDCARD]/coverage/complex.ts ... 57.895% (22/38) +cover [WILDCARD]/coverage/complex.ts ... 69.643% (39/56) 46 | export function unused( 47 | foo: string, 48 | bar: string, 49 | baz: string, ------|----- + 50 | ): Complex { 51 | return complex( 52 | foo, 53 | bar, diff --git a/cli/tests/testdata/run/remote_type_error/main.ts b/cli/tests/testdata/run/remote_type_error/main.ts new file mode 100644 index 000000000..00f8a52df --- /dev/null +++ b/cli/tests/testdata/run/remote_type_error/main.ts @@ -0,0 +1,3 @@ +import { doAction } from "http://localhost:4545/run/remote_type_error/remote.ts"; + +doAction(); diff --git a/cli/tests/testdata/run/remote_type_error/remote.ts b/cli/tests/testdata/run/remote_type_error/remote.ts new file mode 100644 index 000000000..6e9bf4adb --- /dev/null +++ b/cli/tests/testdata/run/remote_type_error/remote.ts @@ -0,0 +1,5 @@ +export function doAction() { + // this is an intentional type error + const val: number = "test"; + console.log(val); +} diff --git a/cli/tools/coverage/mod.rs b/cli/tools/coverage/mod.rs index bba7271f5..d2c6c1894 100644 --- a/cli/tools/coverage/mod.rs +++ b/cli/tools/coverage/mod.rs @@ -2,6 +2,7 @@ use crate::args::CoverageFlags; use crate::args::Flags; +use crate::cache::EmitCache; use crate::colors; use crate::fs_util::collect_files; use crate::proc_state::ProcState; @@ -676,16 +677,9 @@ pub async fn cover_files( | MediaType::Mts | MediaType::Cts | MediaType::Tsx => { - let emit_path = ps - .dir - .gen_cache - .get_cache_filename_with_extension(&file.specifier, "js") - .unwrap_or_else(|| { - unreachable!("Unable to get cache filename: {}", &file.specifier) - }); - match ps.dir.gen_cache.get(&emit_path) { - Ok(b) => String::from_utf8(b).unwrap(), - Err(_) => { + match ps.dir.gen_cache.get_emit_text(&file.specifier) { + Some(source) => source, + None => { return Err(anyhow!( "Missing transpiled source code for: \"{}\". Before generating coverage report, run `deno test --coverage` to ensure consistent state.", diff --git a/cli/tools/fmt.rs b/cli/tools/fmt.rs index 585bf6e92..334e46c34 100644 --- a/cli/tools/fmt.rs +++ b/cli/tools/fmt.rs @@ -39,7 +39,7 @@ use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; -use super::incremental_cache::IncrementalCache; +use crate::cache::IncrementalCache; /// Format JavaScript/TypeScript files. pub async fn format( diff --git a/cli/tools/incremental_cache.rs b/cli/tools/incremental_cache.rs deleted file mode 100644 index 476c46b29..000000000 --- a/cli/tools/incremental_cache.rs +++ /dev/null @@ -1,371 +0,0 @@ -use std::collections::HashMap; -use std::path::Path; -use std::path::PathBuf; - -use deno_core::error::AnyError; -use deno_core::parking_lot::Mutex; -use deno_core::serde_json; -use deno_runtime::deno_webstorage::rusqlite::params; -use deno_runtime::deno_webstorage::rusqlite::Connection; -use serde::Serialize; -use tokio::task::JoinHandle; - -/// Cache used to skip formatting/linting a file again when we -/// know it is already formatted or has no lint diagnostics. -pub struct IncrementalCache(Option); - -impl IncrementalCache { - pub fn new( - db_file_path: &Path, - state: &TState, - initial_file_paths: &[PathBuf], - ) -> Self { - // if creating the incremental cache fails, then we - // treat it as not having a cache - let result = - IncrementalCacheInner::new(db_file_path, state, initial_file_paths); - IncrementalCache(match result { - Ok(inner) => Some(inner), - Err(err) => { - log::debug!("Creating the incremental cache failed.\n{:#}", err); - // Maybe the cache file is corrupt. Attempt to remove - // the cache file for next time - let _ = std::fs::remove_file(db_file_path); - None - } - }) - } - - pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool { - if let Some(inner) = &self.0 { - inner.is_file_same(file_path, file_text) - } else { - false - } - } - - pub fn update_file(&self, file_path: &Path, file_text: &str) { - if let Some(inner) = &self.0 { - inner.update_file(file_path, file_text) - } - } - - pub async fn wait_completion(&self) { - if let Some(inner) = &self.0 { - inner.wait_completion().await; - } - } -} - -enum ReceiverMessage { - Update(PathBuf, u64), - Exit, -} - -struct IncrementalCacheInner { - previous_hashes: HashMap, - sender: tokio::sync::mpsc::UnboundedSender, - handle: Mutex>>, -} - -impl IncrementalCacheInner { - pub fn new( - db_file_path: &Path, - state: &TState, - initial_file_paths: &[PathBuf], - ) -> Result { - let state_hash = - fast_insecure_hash(serde_json::to_string(state).unwrap().as_bytes()); - let sql_cache = SqlIncrementalCache::new(db_file_path, state_hash)?; - Ok(Self::from_sql_incremental_cache( - sql_cache, - initial_file_paths, - )) - } - - fn from_sql_incremental_cache( - cache: SqlIncrementalCache, - initial_file_paths: &[PathBuf], - ) -> Self { - let mut previous_hashes = HashMap::new(); - for path in initial_file_paths { - if let Some(hash) = cache.get_source_hash(path) { - previous_hashes.insert(path.to_path_buf(), hash); - } - } - - let (sender, mut receiver) = - tokio::sync::mpsc::unbounded_channel::(); - - // sqlite isn't `Sync`, so we do all the updating on a dedicated task - let handle = tokio::task::spawn(async move { - while let Some(message) = receiver.recv().await { - match message { - ReceiverMessage::Update(path, hash) => { - let _ = cache.set_source_hash(&path, hash); - } - ReceiverMessage::Exit => break, - } - } - }); - - IncrementalCacheInner { - previous_hashes, - sender, - handle: Mutex::new(Some(handle)), - } - } - - pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool { - match self.previous_hashes.get(file_path) { - Some(hash) => *hash == fast_insecure_hash(file_text.as_bytes()), - None => false, - } - } - - pub fn update_file(&self, file_path: &Path, file_text: &str) { - let hash = fast_insecure_hash(file_text.as_bytes()); - if let Some(previous_hash) = self.previous_hashes.get(file_path) { - if *previous_hash == hash { - return; // do not bother updating the db file because nothing has changed - } - } - let _ = self - .sender - .send(ReceiverMessage::Update(file_path.to_path_buf(), hash)); - } - - pub async fn wait_completion(&self) { - if self.sender.send(ReceiverMessage::Exit).is_err() { - return; - } - let handle = self.handle.lock().take(); - if let Some(handle) = handle { - handle.await.unwrap(); - } - } -} - -struct SqlIncrementalCache { - conn: Connection, - /// A hash of the state used to produce the formatting/linting other than - /// the CLI version. This state is a hash of the configuration and ensures - /// we format/lint a file when the configuration changes. - state_hash: u64, -} - -impl SqlIncrementalCache { - pub fn new(db_file_path: &Path, state_hash: u64) -> Result { - let conn = Connection::open(db_file_path)?; - Self::from_connection(conn, state_hash, crate::version::deno()) - } - - fn from_connection( - conn: Connection, - state_hash: u64, - cli_version: String, - ) -> Result { - run_pragma(&conn)?; - create_tables(&conn, cli_version)?; - - Ok(Self { conn, state_hash }) - } - - pub fn get_source_hash(&self, path: &Path) -> Option { - match self.get_source_hash_result(path) { - Ok(option) => option, - Err(err) => { - if cfg!(debug_assertions) { - panic!("Error retrieving hash: {}", err); - } else { - // fail silently when not debugging - None - } - } - } - } - - fn get_source_hash_result( - &self, - path: &Path, - ) -> Result, AnyError> { - let query = " - SELECT - source_hash - FROM - incrementalcache - WHERE - file_path=?1 - AND state_hash=?2 - LIMIT 1"; - let mut stmt = self.conn.prepare_cached(query)?; - let mut rows = stmt - .query(params![path.to_string_lossy(), self.state_hash.to_string()])?; - if let Some(row) = rows.next()? { - let hash: String = row.get(0)?; - Ok(Some(hash.parse::()?)) - } else { - Ok(None) - } - } - - pub fn set_source_hash( - &self, - path: &Path, - source_hash: u64, - ) -> Result<(), AnyError> { - let sql = " - INSERT OR REPLACE INTO - incrementalcache (file_path, state_hash, source_hash) - VALUES - (?1, ?2, ?3)"; - let mut stmt = self.conn.prepare_cached(sql)?; - stmt.execute(params![ - path.to_string_lossy(), - &self.state_hash.to_string(), - &source_hash.to_string(), - ])?; - Ok(()) - } -} - -fn run_pragma(conn: &Connection) -> Result<(), AnyError> { - // Enable write-ahead-logging and tweak some other stuff - let initial_pragmas = " - -- enable write-ahead-logging mode - PRAGMA journal_mode=WAL; - PRAGMA synchronous=NORMAL; - PRAGMA temp_store=memory; - PRAGMA page_size=4096; - PRAGMA mmap_size=6000000; - PRAGMA optimize; - "; - - conn.execute_batch(initial_pragmas)?; - Ok(()) -} - -fn create_tables( - conn: &Connection, - cli_version: String, -) -> Result<(), AnyError> { - // INT doesn't store up to u64, so use TEXT - conn.execute( - "CREATE TABLE IF NOT EXISTS incrementalcache ( - file_path TEXT PRIMARY KEY, - state_hash TEXT NOT NULL, - source_hash TEXT NOT NULL - )", - [], - )?; - conn.execute( - "CREATE TABLE IF NOT EXISTS info ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - )", - [], - )?; - - // delete the cache when the CLI version changes - let data_cli_version: Option = conn - .query_row( - "SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1", - [], - |row| row.get(0), - ) - .ok(); - if data_cli_version != Some(cli_version.to_string()) { - conn.execute("DELETE FROM incrementalcache", params![])?; - let mut stmt = conn - .prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?; - stmt.execute(params!["CLI_VERSION", &cli_version])?; - } - - Ok(()) -} - -/// Very fast non-cryptographically secure hash. -fn fast_insecure_hash(bytes: &[u8]) -> u64 { - use std::hash::Hasher; - use twox_hash::XxHash64; - - let mut hasher = XxHash64::default(); - hasher.write(bytes); - hasher.finish() -} - -#[cfg(test)] -mod test { - use std::path::PathBuf; - - use super::*; - - #[test] - pub fn sql_cache_general_use() { - let conn = Connection::open_in_memory().unwrap(); - let cache = - SqlIncrementalCache::from_connection(conn, 1, "1.0.0".to_string()) - .unwrap(); - let path = PathBuf::from("/mod.ts"); - - assert_eq!(cache.get_source_hash(&path), None); - cache.set_source_hash(&path, 2).unwrap(); - assert_eq!(cache.get_source_hash(&path), Some(2)); - - // try changing the cli version (should clear) - let conn = cache.conn; - let mut cache = - SqlIncrementalCache::from_connection(conn, 1, "2.0.0".to_string()) - .unwrap(); - assert_eq!(cache.get_source_hash(&path), None); - - // add back the file to the cache - cache.set_source_hash(&path, 2).unwrap(); - assert_eq!(cache.get_source_hash(&path), Some(2)); - - // try changing the state hash - cache.state_hash = 2; - assert_eq!(cache.get_source_hash(&path), None); - cache.state_hash = 1; - - // should return now that everything is back - assert_eq!(cache.get_source_hash(&path), Some(2)); - - // recreating the cache should not remove the data because the CLI version and state hash is the same - let conn = cache.conn; - let cache = - SqlIncrementalCache::from_connection(conn, 1, "2.0.0".to_string()) - .unwrap(); - assert_eq!(cache.get_source_hash(&path), Some(2)); - - // now try replacing and using another path - cache.set_source_hash(&path, 3).unwrap(); - cache.set_source_hash(&path, 4).unwrap(); - let path2 = PathBuf::from("/mod2.ts"); - cache.set_source_hash(&path2, 5).unwrap(); - assert_eq!(cache.get_source_hash(&path), Some(4)); - assert_eq!(cache.get_source_hash(&path2), Some(5)); - } - - #[tokio::test] - pub async fn incremental_cache_general_use() { - let conn = Connection::open_in_memory().unwrap(); - let sql_cache = - SqlIncrementalCache::from_connection(conn, 1, "1.0.0".to_string()) - .unwrap(); - let file_path = PathBuf::from("/mod.ts"); - let file_text = "test"; - let file_hash = fast_insecure_hash(file_text.as_bytes()); - sql_cache.set_source_hash(&file_path, file_hash).unwrap(); - let cache = IncrementalCacheInner::from_sql_incremental_cache( - sql_cache, - &[file_path.clone()], - ); - - assert!(cache.is_file_same(&file_path, "test")); - assert!(!cache.is_file_same(&file_path, "other")); - - // just ensure this doesn't panic - cache.update_file(&file_path, "other"); - } -} diff --git a/cli/tools/lint.rs b/cli/tools/lint.rs index 61129b752..1c79ff8f6 100644 --- a/cli/tools/lint.rs +++ b/cli/tools/lint.rs @@ -41,7 +41,7 @@ use std::sync::atomic::Ordering; use std::sync::Arc; use std::sync::Mutex; -use super::incremental_cache::IncrementalCache; +use crate::cache::IncrementalCache; static STDIN_FILE_NAME: &str = "_stdin.ts"; diff --git a/cli/tools/mod.rs b/cli/tools/mod.rs index a6eaeb70e..7c5d79744 100644 --- a/cli/tools/mod.rs +++ b/cli/tools/mod.rs @@ -4,7 +4,6 @@ pub mod bench; pub mod coverage; pub mod doc; pub mod fmt; -pub mod incremental_cache; pub mod installer; pub mod lint; pub mod repl; diff --git a/cli/tsc.rs b/cli/tsc.rs index 4065c6354..e3001583b 100644 --- a/cli/tsc.rs +++ b/cli/tsc.rs @@ -252,8 +252,6 @@ pub struct Request { pub struct Response { /// Any diagnostics that have been returned from the checker. pub diagnostics: Diagnostics, - /// Any files that were emitted during the check. - pub emitted_files: Vec, /// If there was any build info associated with the exec request. pub maybe_tsbuildinfo: Option, /// Statistics from the check. @@ -263,7 +261,6 @@ pub struct Response { #[derive(Debug)] struct State { hash_data: Vec>, - emitted_files: Vec, graph_data: Arc>, maybe_config_specifier: Option, maybe_tsbuildinfo: Option, @@ -283,7 +280,6 @@ impl State { ) -> Self { State { hash_data, - emitted_files: Default::default(), graph_data, maybe_config_specifier, maybe_tsbuildinfo, @@ -337,10 +333,6 @@ struct EmitArgs { /// The _internal_ filename for the file. This will be used to determine how /// the file is cached and stored. file_name: String, - /// A string representation of the specifier that was associated with a - /// module. This should be present on every module that represents a module - /// that was requested to be transformed. - maybe_specifiers: Option>, } #[op] @@ -349,43 +341,9 @@ fn op_emit(state: &mut OpState, args: EmitArgs) -> bool { match args.file_name.as_ref() { "deno:///.tsbuildinfo" => state.maybe_tsbuildinfo = Some(args.data), _ => { - let media_type = MediaType::from(&args.file_name); - let media_type = if matches!( - media_type, - MediaType::JavaScript - | MediaType::Mjs - | MediaType::Cjs - | MediaType::Dts - | MediaType::Dmts - | MediaType::Dcts - | MediaType::SourceMap - | MediaType::TsBuildInfo - ) { - media_type - } else { - MediaType::JavaScript - }; - state.emitted_files.push(EmittedFile { - data: args.data, - maybe_specifiers: if let Some(specifiers) = &args.maybe_specifiers { - let specifiers = specifiers - .iter() - .map(|s| { - if let Some(data_specifier) = state.remapped_specifiers.get(s) { - data_specifier.clone() - } else if let Some(remapped_specifier) = state.root_map.get(s) { - remapped_specifier.clone() - } else { - normalize_specifier(s).unwrap() - } - }) - .collect(); - Some(specifiers) - } else { - None - }, - media_type, - }) + if cfg!(debug_assertions) { + panic!("Unhandled emit write: {}", args.file_name); + } } } @@ -703,13 +661,11 @@ pub fn exec(request: Request) -> Result { if let Some(response) = state.maybe_response { let diagnostics = response.diagnostics; - let emitted_files = state.emitted_files; let maybe_tsbuildinfo = state.maybe_tsbuildinfo; let stats = response.stats; Ok(Response { diagnostics, - emitted_files, maybe_tsbuildinfo, stats, }) @@ -907,64 +863,6 @@ mod tests { } } - #[tokio::test] - async fn test_emit() { - let mut state = setup(None, None, None).await; - let actual = op_emit::call( - &mut state, - EmitArgs { - data: "some file content".to_string(), - file_name: "cache:///some/file.js".to_string(), - maybe_specifiers: Some(vec!["file:///some/file.ts".to_string()]), - }, - ); - assert!(actual); - let state = state.borrow::(); - assert_eq!(state.emitted_files.len(), 1); - assert!(state.maybe_tsbuildinfo.is_none()); - assert_eq!( - state.emitted_files[0], - EmittedFile { - data: "some file content".to_string(), - maybe_specifiers: Some(vec![resolve_url_or_path( - "file:///some/file.ts" - ) - .unwrap()]), - media_type: MediaType::JavaScript, - } - ); - } - - #[tokio::test] - async fn test_emit_strange_specifier() { - let mut state = setup(None, None, None).await; - let actual = op_emit::call( - &mut state, - EmitArgs { - data: "some file content".to_string(), - file_name: "deno:///some.file.ts?q=.json".to_string(), - maybe_specifiers: Some( - vec!["file:///some/file.ts?q=.json".to_string()], - ), - }, - ); - assert!(actual); - let state = state.borrow::(); - assert_eq!(state.emitted_files.len(), 1); - assert!(state.maybe_tsbuildinfo.is_none()); - assert_eq!( - state.emitted_files[0], - EmittedFile { - data: "some file content".to_string(), - maybe_specifiers: Some(vec![resolve_url_or_path( - "file:///some/file.ts?q=.json" - ) - .unwrap()]), - media_type: MediaType::JavaScript, - } - ); - } - #[tokio::test] async fn test_emit_tsbuildinfo() { let mut state = setup(None, None, None).await; @@ -973,12 +871,10 @@ mod tests { EmitArgs { data: "some file content".to_string(), file_name: "deno:///.tsbuildinfo".to_string(), - maybe_specifiers: None, }, ); assert!(actual); let state = state.borrow::(); - assert_eq!(state.emitted_files.len(), 0); assert_eq!( state.maybe_tsbuildinfo, Some("some file content".to_string()) @@ -1169,7 +1065,6 @@ mod tests { .expect("exec should not have errored"); eprintln!("diagnostics {:#?}", actual.diagnostics); assert!(actual.diagnostics.is_empty()); - assert!(actual.emitted_files.is_empty()); assert!(actual.maybe_tsbuildinfo.is_some()); assert_eq!(actual.stats.0.len(), 12); } @@ -1182,7 +1077,6 @@ mod tests { .expect("exec should not have errored"); eprintln!("diagnostics {:#?}", actual.diagnostics); assert!(actual.diagnostics.is_empty()); - assert!(actual.emitted_files.is_empty()); assert!(actual.maybe_tsbuildinfo.is_some()); assert_eq!(actual.stats.0.len(), 12); } diff --git a/cli/tsc/99_main_compiler.js b/cli/tsc/99_main_compiler.js index 85ab38ccc..1c6679e84 100644 --- a/cli/tsc/99_main_compiler.js +++ b/cli/tsc/99_main_compiler.js @@ -336,15 +336,11 @@ delete Object.prototype.__proto__; getDefaultLibLocation() { return ASSETS; }, - writeFile(fileName, data, _writeByteOrderMark, _onError, sourceFiles) { + writeFile(fileName, data, _writeByteOrderMark, _onError, _sourceFiles) { debug(`host.writeFile("${fileName}")`); - let maybeSpecifiers; - if (sourceFiles) { - maybeSpecifiers = sourceFiles.map((sf) => sf.moduleName); - } return core.opSync( "op_emit", - { maybeSpecifiers, fileName, data }, + { fileName, data }, ); }, getCurrentDirectory() { @@ -557,16 +553,18 @@ delete Object.prototype.__proto__; configFileParsingDiagnostics, }); - const { diagnostics: emitDiagnostics } = program.emit(); - const diagnostics = [ ...program.getConfigFileParsingDiagnostics(), ...program.getSyntacticDiagnostics(), ...program.getOptionsDiagnostics(), ...program.getGlobalDiagnostics(), ...program.getSemanticDiagnostics(), - ...emitDiagnostics, ].filter(({ code }) => !IGNORED_DIAGNOSTICS.includes(code)); + + // emit the tsbuildinfo file + // @ts-ignore: emitBuildInfo is not exposed (https://github.com/microsoft/TypeScript/issues/49871) + program.emitBuildInfo(host.writeFile); + performanceProgram({ program }); core.opSync("op_respond", { -- cgit v1.2.3