diff options
Diffstat (limited to 'cli/util')
-rw-r--r-- | cli/util/checksum.rs | 32 | ||||
-rw-r--r-- | cli/util/diff.rs | 227 | ||||
-rw-r--r-- | cli/util/display.rs | 97 | ||||
-rw-r--r-- | cli/util/file_watcher.rs | 374 | ||||
-rw-r--r-- | cli/util/fs.rs | 661 | ||||
-rw-r--r-- | cli/util/logger.rs | 79 | ||||
-rw-r--r-- | cli/util/mod.rs | 14 | ||||
-rw-r--r-- | cli/util/path.rs | 452 | ||||
-rw-r--r-- | cli/util/progress_bar.rs | 143 | ||||
-rw-r--r-- | cli/util/text_encoding.rs | 162 | ||||
-rw-r--r-- | cli/util/unix.rs | 45 | ||||
-rw-r--r-- | cli/util/windows.rs | 90 |
12 files changed, 2376 insertions, 0 deletions
diff --git a/cli/util/checksum.rs b/cli/util/checksum.rs new file mode 100644 index 000000000..c0e41356d --- /dev/null +++ b/cli/util/checksum.rs @@ -0,0 +1,32 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use ring::digest::Context; +use ring::digest::SHA256; + +pub fn gen(v: &[impl AsRef<[u8]>]) -> String { + let mut ctx = Context::new(&SHA256); + for src in v { + ctx.update(src.as_ref()); + } + let digest = ctx.finish(); + let out: Vec<String> = digest + .as_ref() + .iter() + .map(|byte| format!("{:02x}", byte)) + .collect(); + out.join("") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_gen() { + let actual = gen(&[b"hello world"]); + assert_eq!( + actual, + "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9" + ); + } +} diff --git a/cli/util/diff.rs b/cli/util/diff.rs new file mode 100644 index 000000000..048464162 --- /dev/null +++ b/cli/util/diff.rs @@ -0,0 +1,227 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use crate::colors; +use dissimilar::{diff as difference, Chunk}; +use std::fmt::Write as _; + +/// Print diff of the same file_path, before and after formatting. +/// +/// Diff format is loosely based on GitHub diff formatting. +pub fn diff(orig_text: &str, edit_text: &str) -> String { + if orig_text == edit_text { + return String::new(); + } + + // normalize newlines as it adds too much noise if they differ + let orig_text = orig_text.replace("\r\n", "\n"); + let edit_text = edit_text.replace("\r\n", "\n"); + + if orig_text == edit_text { + return " | Text differed by line endings.\n".to_string(); + } + + DiffBuilder::build(&orig_text, &edit_text) +} + +struct DiffBuilder { + output: String, + line_number_width: usize, + orig_line: usize, + edit_line: usize, + orig: String, + edit: String, + has_changes: bool, +} + +impl DiffBuilder { + pub fn build(orig_text: &str, edit_text: &str) -> String { + let mut diff_builder = DiffBuilder { + output: String::new(), + orig_line: 1, + edit_line: 1, + orig: String::new(), + edit: String::new(), + has_changes: false, + line_number_width: { + let line_count = std::cmp::max( + orig_text.split('\n').count(), + edit_text.split('\n').count(), + ); + line_count.to_string().chars().count() + }, + }; + + let chunks = difference(orig_text, edit_text); + diff_builder.handle_chunks(chunks); + diff_builder.output + } + + fn handle_chunks<'a>(&'a mut self, chunks: Vec<Chunk<'a>>) { + for chunk in chunks { + match chunk { + Chunk::Delete(s) => { + let split = s.split('\n').enumerate(); + for (i, s) in split { + if i > 0 { + self.orig.push('\n'); + } + self.orig.push_str(&fmt_rem_text_highlight(s)); + } + self.has_changes = true + } + Chunk::Insert(s) => { + let split = s.split('\n').enumerate(); + for (i, s) in split { + if i > 0 { + self.edit.push('\n'); + } + self.edit.push_str(&fmt_add_text_highlight(s)); + } + self.has_changes = true + } + Chunk::Equal(s) => { + let split = s.split('\n').enumerate(); + for (i, s) in split { + if i > 0 { + self.flush_changes(); + } + self.orig.push_str(&fmt_rem_text(s)); + self.edit.push_str(&fmt_add_text(s)); + } + } + } + } + + self.flush_changes(); + } + + fn flush_changes(&mut self) { + if self.has_changes { + self.write_line_diff(); + + self.orig_line += self.orig.split('\n').count(); + self.edit_line += self.edit.split('\n').count(); + self.has_changes = false; + } else { + self.orig_line += 1; + self.edit_line += 1; + } + + self.orig.clear(); + self.edit.clear(); + } + + fn write_line_diff(&mut self) { + let split = self.orig.split('\n').enumerate(); + for (i, s) in split { + write!( + self.output, + "{:width$}{} ", + self.orig_line + i, + colors::gray(" |"), + width = self.line_number_width + ) + .unwrap(); + self.output.push_str(&fmt_rem()); + self.output.push_str(s); + self.output.push('\n'); + } + + let split = self.edit.split('\n').enumerate(); + for (i, s) in split { + write!( + self.output, + "{:width$}{} ", + self.edit_line + i, + colors::gray(" |"), + width = self.line_number_width + ) + .unwrap(); + self.output.push_str(&fmt_add()); + self.output.push_str(s); + self.output.push('\n'); + } + } +} + +fn fmt_add() -> String { + colors::green_bold("+").to_string() +} + +fn fmt_add_text(x: &str) -> String { + colors::green(x).to_string() +} + +fn fmt_add_text_highlight(x: &str) -> String { + colors::black_on_green(x).to_string() +} + +fn fmt_rem() -> String { + colors::red_bold("-").to_string() +} + +fn fmt_rem_text(x: &str) -> String { + colors::red(x).to_string() +} + +fn fmt_rem_text_highlight(x: &str) -> String { + colors::white_on_red(x).to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_diff() { + run_test( + "console.log('Hello World')", + "console.log(\"Hello World\");", + concat!( + "1 | -console.log('Hello World')\n", + "1 | +console.log(\"Hello World\");\n", + ), + ); + + run_test( + "\n\n\n\nconsole.log(\n'Hello World'\n)", + "console.log(\n\"Hello World\"\n);", + concat!( + "1 | -\n", + "2 | -\n", + "3 | -\n", + "4 | -\n", + "5 | -console.log(\n", + "1 | +console.log(\n", + "6 | -'Hello World'\n", + "2 | +\"Hello World\"\n", + "7 | -)\n3 | +);\n", + ), + ); + } + + #[test] + fn test_eof_newline_missing() { + run_test( + "test\nsome line text test", + "test\nsome line text test\n", + concat!( + "2 | -some line text test\n", + "2 | +some line text test\n", + "3 | +\n", + ), + ); + } + + #[test] + fn test_newlines_differing() { + run_test("test\n", "test\r\n", " | Text differed by line endings.\n"); + } + + fn run_test(diff_text1: &str, diff_text2: &str, expected_output: &str) { + assert_eq!( + test_util::strip_ansi_codes(&diff(diff_text1, diff_text2,)), + expected_output, + ); + } +} diff --git a/cli/util/display.rs b/cli/util/display.rs new file mode 100644 index 000000000..f13965e28 --- /dev/null +++ b/cli/util/display.rs @@ -0,0 +1,97 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use deno_core::error::AnyError; +use deno_core::serde_json; +use std::io::Write; + +/// A function that converts a float to a string the represents a human +/// readable version of that number. +pub fn human_size(size: f64) -> String { + let negative = if size.is_sign_positive() { "" } else { "-" }; + let size = size.abs(); + let units = ["B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]; + if size < 1_f64 { + return format!("{}{}{}", negative, size, "B"); + } + let delimiter = 1024_f64; + let exponent = std::cmp::min( + (size.ln() / delimiter.ln()).floor() as i32, + (units.len() - 1) as i32, + ); + let pretty_bytes = format!("{:.2}", size / delimiter.powi(exponent)) + .parse::<f64>() + .unwrap() + * 1_f64; + let unit = units[exponent as usize]; + format!("{}{}{}", negative, pretty_bytes, unit) +} + +/// A function that converts a milisecond elapsed time to a string that +/// represents a human readable version of that time. +pub fn human_elapsed(elapsed: u128) -> String { + if elapsed < 1_000 { + return format!("{}ms", elapsed); + } + if elapsed < 1_000 * 60 { + return format!("{}s", elapsed / 1000); + } + + let seconds = elapsed / 1_000; + let minutes = seconds / 60; + let seconds_remainder = seconds % 60; + format!("{}m{}s", minutes, seconds_remainder) +} + +pub fn write_to_stdout_ignore_sigpipe( + bytes: &[u8], +) -> Result<(), std::io::Error> { + use std::io::ErrorKind; + + match std::io::stdout().write_all(bytes) { + Ok(()) => Ok(()), + Err(e) => match e.kind() { + ErrorKind::BrokenPipe => Ok(()), + _ => Err(e), + }, + } +} + +pub fn write_json_to_stdout<T>(value: &T) -> Result<(), AnyError> +where + T: ?Sized + serde::ser::Serialize, +{ + let mut writer = std::io::BufWriter::new(std::io::stdout()); + serde_json::to_writer_pretty(&mut writer, value)?; + writeln!(&mut writer)?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_human_size() { + assert_eq!(human_size(1_f64), "1B"); + assert_eq!(human_size((12 * 1024) as f64), "12KB"); + assert_eq!(human_size((24_i64 * 1024 * 1024) as f64), "24MB"); + assert_eq!(human_size((24_i64 * 1024 * 1024 * 1024) as f64), "24GB"); + assert_eq!( + human_size((24_i64 * 1024 * 1024 * 1024 * 1024) as f64), + "24TB" + ); + assert_eq!(human_size(0_f64), "0B"); + assert_eq!(human_size(-10_f64), "-10B"); + } + + #[test] + fn test_human_elapsed() { + assert_eq!(human_elapsed(1), "1ms"); + assert_eq!(human_elapsed(256), "256ms"); + assert_eq!(human_elapsed(1000), "1s"); + assert_eq!(human_elapsed(1001), "1s"); + assert_eq!(human_elapsed(1020), "1s"); + assert_eq!(human_elapsed(70 * 1000), "1m10s"); + assert_eq!(human_elapsed(86 * 1000 + 100), "1m26s"); + } +} diff --git a/cli/util/file_watcher.rs b/cli/util/file_watcher.rs new file mode 100644 index 000000000..5158437a0 --- /dev/null +++ b/cli/util/file_watcher.rs @@ -0,0 +1,374 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use crate::colors; +use crate::util::fs::canonicalize_path; + +use deno_core::error::AnyError; +use deno_core::error::JsError; +use deno_core::futures::Future; +use deno_runtime::fmt_errors::format_js_error; +use log::info; +use notify::event::Event as NotifyEvent; +use notify::event::EventKind; +use notify::Error as NotifyError; +use notify::RecommendedWatcher; +use notify::RecursiveMode; +use notify::Watcher; +use std::collections::HashSet; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; +use tokio::select; +use tokio::sync::mpsc; +use tokio::sync::mpsc::UnboundedReceiver; +use tokio::time::sleep; + +const CLEAR_SCREEN: &str = "\x1B[2J\x1B[1;1H"; +const DEBOUNCE_INTERVAL: Duration = Duration::from_millis(200); + +struct DebouncedReceiver { + // The `recv()` call could be used in a tokio `select!` macro, + // and so we store this state on the struct to ensure we don't + // lose items if a `recv()` never completes + received_items: HashSet<PathBuf>, + receiver: UnboundedReceiver<Vec<PathBuf>>, +} + +impl DebouncedReceiver { + fn new_with_sender() -> (Arc<mpsc::UnboundedSender<Vec<PathBuf>>>, Self) { + let (sender, receiver) = mpsc::unbounded_channel(); + ( + Arc::new(sender), + Self { + receiver, + received_items: HashSet::new(), + }, + ) + } + + async fn recv(&mut self) -> Option<Vec<PathBuf>> { + if self.received_items.is_empty() { + self + .received_items + .extend(self.receiver.recv().await?.into_iter()); + } + + loop { + select! { + items = self.receiver.recv() => { + self.received_items.extend(items?); + } + _ = sleep(DEBOUNCE_INTERVAL) => { + return Some(self.received_items.drain().collect()); + } + } + } + } +} + +async fn error_handler<F>(watch_future: F) +where + F: Future<Output = Result<(), AnyError>>, +{ + let result = watch_future.await; + if let Err(err) = result { + let error_string = match err.downcast_ref::<JsError>() { + Some(e) => format_js_error(e), + None => format!("{:?}", err), + }; + eprintln!( + "{}: {}", + colors::red_bold("error"), + error_string.trim_start_matches("error: ") + ); + } +} + +pub enum ResolutionResult<T> { + Restart { + paths_to_watch: Vec<PathBuf>, + result: Result<T, AnyError>, + }, + Ignore, +} + +async fn next_restart<R, T, F>( + resolver: &mut R, + debounced_receiver: &mut DebouncedReceiver, +) -> (Vec<PathBuf>, Result<T, AnyError>) +where + R: FnMut(Option<Vec<PathBuf>>) -> F, + F: Future<Output = ResolutionResult<T>>, +{ + loop { + let changed = debounced_receiver.recv().await; + match resolver(changed).await { + ResolutionResult::Ignore => { + log::debug!("File change ignored") + } + ResolutionResult::Restart { + paths_to_watch, + result, + } => { + return (paths_to_watch, result); + } + } + } +} + +pub struct PrintConfig { + /// printing watcher status to terminal. + pub job_name: String, + /// determine whether to clear the terminal screen + pub clear_screen: bool, +} + +fn create_print_after_restart_fn(clear_screen: bool) -> impl Fn() { + move || { + if clear_screen { + eprint!("{}", CLEAR_SCREEN); + } + info!( + "{} File change detected! Restarting!", + colors::intense_blue("Watcher"), + ); + } +} + +/// Creates a file watcher, which will call `resolver` with every file change. +/// +/// - `resolver` is used for resolving file paths to be watched at every restarting +/// of the watcher, and can also return a value to be passed to `operation`. +/// It returns a [`ResolutionResult`], which can either instruct the watcher to restart or ignore the change. +/// This always contains paths to watch; +/// +/// - `operation` is the actual operation we want to run every time the watcher detects file +/// changes. For example, in the case where we would like to bundle, then `operation` would +/// have the logic for it like bundling the code. +pub async fn watch_func<R, O, T, F1, F2>( + mut resolver: R, + mut operation: O, + print_config: PrintConfig, +) -> Result<(), AnyError> +where + R: FnMut(Option<Vec<PathBuf>>) -> F1, + O: FnMut(T) -> F2, + F1: Future<Output = ResolutionResult<T>>, + F2: Future<Output = Result<(), AnyError>>, +{ + let (sender, mut receiver) = DebouncedReceiver::new_with_sender(); + + let PrintConfig { + job_name, + clear_screen, + } = print_config; + + // Store previous data. If module resolution fails at some point, the watcher will try to + // continue watching files using these data. + let mut paths_to_watch; + let mut resolution_result; + + let print_after_restart = create_print_after_restart_fn(clear_screen); + + match resolver(None).await { + ResolutionResult::Ignore => { + // The only situation where it makes sense to ignore the initial 'change' + // is if the command isn't supposed to do anything until something changes, + // e.g. a variant of `deno test` which doesn't run the entire test suite to start with, + // but instead does nothing until you make a change. + // + // In that case, this is probably the correct output. + info!( + "{} Waiting for file changes...", + colors::intense_blue("Watcher"), + ); + + let (paths, result) = next_restart(&mut resolver, &mut receiver).await; + paths_to_watch = paths; + resolution_result = result; + + print_after_restart(); + } + ResolutionResult::Restart { + paths_to_watch: paths, + result, + } => { + paths_to_watch = paths; + resolution_result = result; + } + }; + + info!("{} {} started.", colors::intense_blue("Watcher"), job_name,); + + loop { + let mut watcher = new_watcher(sender.clone())?; + add_paths_to_watcher(&mut watcher, &paths_to_watch); + + match resolution_result { + Ok(operation_arg) => { + let fut = error_handler(operation(operation_arg)); + select! { + (paths, result) = next_restart(&mut resolver, &mut receiver) => { + if result.is_ok() { + paths_to_watch = paths; + } + resolution_result = result; + + print_after_restart(); + continue; + }, + _ = fut => {}, + }; + + info!( + "{} {} finished. Restarting on file change...", + colors::intense_blue("Watcher"), + job_name, + ); + } + Err(error) => { + eprintln!("{}: {}", colors::red_bold("error"), error); + info!( + "{} {} failed. Restarting on file change...", + colors::intense_blue("Watcher"), + job_name, + ); + } + } + + let (paths, result) = next_restart(&mut resolver, &mut receiver).await; + if result.is_ok() { + paths_to_watch = paths; + } + resolution_result = result; + + print_after_restart(); + + drop(watcher); + } +} + +/// Creates a file watcher. +/// +/// - `operation` is the actual operation we want to run every time the watcher detects file +/// changes. For example, in the case where we would like to bundle, then `operation` would +/// have the logic for it like bundling the code. +pub async fn watch_func2<T: Clone, O, F>( + mut paths_to_watch_receiver: UnboundedReceiver<Vec<PathBuf>>, + mut operation: O, + operation_args: T, + print_config: PrintConfig, +) -> Result<(), AnyError> +where + O: FnMut(T) -> Result<F, AnyError>, + F: Future<Output = Result<(), AnyError>>, +{ + let (watcher_sender, mut watcher_receiver) = + DebouncedReceiver::new_with_sender(); + + let PrintConfig { + job_name, + clear_screen, + } = print_config; + + let print_after_restart = create_print_after_restart_fn(clear_screen); + + info!("{} {} started.", colors::intense_blue("Watcher"), job_name,); + + fn consume_paths_to_watch( + watcher: &mut RecommendedWatcher, + receiver: &mut UnboundedReceiver<Vec<PathBuf>>, + ) { + loop { + match receiver.try_recv() { + Ok(paths) => { + add_paths_to_watcher(watcher, &paths); + } + Err(e) => match e { + mpsc::error::TryRecvError::Empty => { + break; + } + // there must be at least one receiver alive + _ => unreachable!(), + }, + } + } + } + + loop { + let mut watcher = new_watcher(watcher_sender.clone())?; + consume_paths_to_watch(&mut watcher, &mut paths_to_watch_receiver); + + let receiver_future = async { + loop { + let maybe_paths = paths_to_watch_receiver.recv().await; + add_paths_to_watcher(&mut watcher, &maybe_paths.unwrap()); + } + }; + let operation_future = error_handler(operation(operation_args.clone())?); + + select! { + _ = receiver_future => {}, + _ = watcher_receiver.recv() => { + print_after_restart(); + continue; + }, + _ = operation_future => { + // TODO(bartlomieju): print exit code here? + info!( + "{} {} finished. Restarting on file change...", + colors::intense_blue("Watcher"), + job_name, + ); + consume_paths_to_watch(&mut watcher, &mut paths_to_watch_receiver); + }, + }; + + let receiver_future = async { + loop { + let maybe_paths = paths_to_watch_receiver.recv().await; + add_paths_to_watcher(&mut watcher, &maybe_paths.unwrap()); + } + }; + select! { + _ = receiver_future => {}, + _ = watcher_receiver.recv() => { + print_after_restart(); + continue; + }, + }; + } +} + +fn new_watcher( + sender: Arc<mpsc::UnboundedSender<Vec<PathBuf>>>, +) -> Result<RecommendedWatcher, AnyError> { + let watcher = Watcher::new( + move |res: Result<NotifyEvent, NotifyError>| { + if let Ok(event) = res { + if matches!( + event.kind, + EventKind::Create(_) | EventKind::Modify(_) | EventKind::Remove(_) + ) { + let paths = event + .paths + .iter() + .filter_map(|path| canonicalize_path(path).ok()) + .collect(); + sender.send(paths).unwrap(); + } + } + }, + Default::default(), + )?; + + Ok(watcher) +} + +fn add_paths_to_watcher(watcher: &mut RecommendedWatcher, paths: &[PathBuf]) { + // Ignore any error e.g. `PathNotFound` + for path in paths { + let _ = watcher.watch(path, RecursiveMode::Recursive); + } + log::debug!("Watching paths: {:?}", paths); +} diff --git a/cli/util/fs.rs b/cli/util/fs.rs new file mode 100644 index 000000000..35cdae4fa --- /dev/null +++ b/cli/util/fs.rs @@ -0,0 +1,661 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use deno_core::anyhow::Context; +use deno_core::error::AnyError; +pub use deno_core::normalize_path; +use deno_core::ModuleSpecifier; +use deno_runtime::deno_crypto::rand; +use deno_runtime::deno_node::PathClean; +use std::env::current_dir; +use std::fs::OpenOptions; +use std::io::Error; +use std::io::ErrorKind; +use std::io::Write; +use std::path::Path; +use std::path::PathBuf; +use std::time::Duration; +use walkdir::WalkDir; + +use super::path::specifier_to_file_path; + +pub fn atomic_write_file<T: AsRef<[u8]>>( + filename: &Path, + data: T, + mode: u32, +) -> std::io::Result<()> { + let rand: String = (0..4) + .map(|_| format!("{:02x}", rand::random::<u8>())) + .collect(); + let extension = format!("{}.tmp", rand); + let tmp_file = filename.with_extension(extension); + write_file(&tmp_file, data, mode)?; + std::fs::rename(tmp_file, filename)?; + Ok(()) +} + +pub fn write_file<T: AsRef<[u8]>>( + filename: &Path, + data: T, + mode: u32, +) -> std::io::Result<()> { + write_file_2(filename, data, true, mode, true, false) +} + +pub fn write_file_2<T: AsRef<[u8]>>( + filename: &Path, + data: T, + update_mode: bool, + mode: u32, + is_create: bool, + is_append: bool, +) -> std::io::Result<()> { + let mut file = OpenOptions::new() + .read(false) + .write(true) + .append(is_append) + .truncate(!is_append) + .create(is_create) + .open(filename)?; + + if update_mode { + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let mode = mode & 0o777; + let permissions = PermissionsExt::from_mode(mode); + file.set_permissions(permissions)?; + } + #[cfg(not(unix))] + let _ = mode; + } + + file.write_all(data.as_ref()) +} + +/// Similar to `std::fs::canonicalize()` but strips UNC prefixes on Windows. +pub fn canonicalize_path(path: &Path) -> Result<PathBuf, Error> { + let path = path.canonicalize()?; + #[cfg(windows)] + return Ok(strip_unc_prefix(path)); + #[cfg(not(windows))] + return Ok(path); +} + +/// Canonicalizes a path which might be non-existent by going up the +/// ancestors until it finds a directory that exists, canonicalizes +/// that path, then adds back the remaining path components. +/// +/// Note: When using this, you should be aware that a symlink may +/// subsequently be created along this path by some other code. +pub fn canonicalize_path_maybe_not_exists( + path: &Path, +) -> Result<PathBuf, Error> { + let path = path.to_path_buf().clean(); + let mut path = path.as_path(); + let mut names_stack = Vec::new(); + loop { + match canonicalize_path(path) { + Ok(mut canonicalized_path) => { + for name in names_stack.into_iter().rev() { + canonicalized_path = canonicalized_path.join(name); + } + return Ok(canonicalized_path); + } + Err(err) if err.kind() == ErrorKind::NotFound => { + names_stack.push(path.file_name().unwrap()); + path = path.parent().unwrap(); + } + Err(err) => return Err(err), + } + } +} + +#[cfg(windows)] +fn strip_unc_prefix(path: PathBuf) -> PathBuf { + use std::path::Component; + use std::path::Prefix; + + let mut components = path.components(); + match components.next() { + Some(Component::Prefix(prefix)) => { + match prefix.kind() { + // \\?\device + Prefix::Verbatim(device) => { + let mut path = PathBuf::new(); + path.push(format!(r"\\{}\", device.to_string_lossy())); + path.extend(components.filter(|c| !matches!(c, Component::RootDir))); + path + } + // \\?\c:\path + Prefix::VerbatimDisk(_) => { + let mut path = PathBuf::new(); + path.push(prefix.as_os_str().to_string_lossy().replace(r"\\?\", "")); + path.extend(components); + path + } + // \\?\UNC\hostname\share_name\path + Prefix::VerbatimUNC(hostname, share_name) => { + let mut path = PathBuf::new(); + path.push(format!( + r"\\{}\{}\", + hostname.to_string_lossy(), + share_name.to_string_lossy() + )); + path.extend(components.filter(|c| !matches!(c, Component::RootDir))); + path + } + _ => path, + } + } + _ => path, + } +} + +pub fn resolve_from_cwd(path: &Path) -> Result<PathBuf, AnyError> { + let resolved_path = if path.is_absolute() { + path.to_owned() + } else { + let cwd = + current_dir().context("Failed to get current working directory")?; + cwd.join(path) + }; + + Ok(normalize_path(&resolved_path)) +} + +/// Collects file paths that satisfy the given predicate, by recursively walking `files`. +/// If the walker visits a path that is listed in `ignore`, it skips descending into the directory. +pub fn collect_files<P>( + files: &[PathBuf], + ignore: &[PathBuf], + predicate: P, +) -> Result<Vec<PathBuf>, AnyError> +where + P: Fn(&Path) -> bool, +{ + let mut target_files = Vec::new(); + + // retain only the paths which exist and ignore the rest + let canonicalized_ignore: Vec<PathBuf> = ignore + .iter() + .filter_map(|i| canonicalize_path(i).ok()) + .collect(); + + for file in files { + for entry in WalkDir::new(file) + .into_iter() + .filter_entry(|e| { + canonicalize_path(e.path()).map_or(false, |c| { + !canonicalized_ignore.iter().any(|i| c.starts_with(i)) + }) + }) + .filter_map(|e| match e { + Ok(e) if !e.file_type().is_dir() && predicate(e.path()) => Some(e), + _ => None, + }) + { + target_files.push(canonicalize_path(entry.path())?) + } + } + + Ok(target_files) +} + +/// Collects module specifiers that satisfy the given predicate as a file path, by recursively walking `include`. +/// Specifiers that start with http and https are left intact. +pub fn collect_specifiers<P>( + include: Vec<String>, + ignore: &[PathBuf], + predicate: P, +) -> Result<Vec<ModuleSpecifier>, AnyError> +where + P: Fn(&Path) -> bool, +{ + let mut prepared = vec![]; + + let root_path = current_dir()?; + for path in include { + let lowercase_path = path.to_lowercase(); + if lowercase_path.starts_with("http://") + || lowercase_path.starts_with("https://") + { + let url = ModuleSpecifier::parse(&path)?; + prepared.push(url); + continue; + } + + let p = if lowercase_path.starts_with("file://") { + specifier_to_file_path(&ModuleSpecifier::parse(&path)?)? + } else { + root_path.join(path) + }; + let p = normalize_path(&p); + if p.is_dir() { + let test_files = collect_files(&[p], ignore, &predicate).unwrap(); + let mut test_files_as_urls = test_files + .iter() + .map(|f| ModuleSpecifier::from_file_path(f).unwrap()) + .collect::<Vec<ModuleSpecifier>>(); + + test_files_as_urls.sort(); + prepared.extend(test_files_as_urls); + } else { + let url = ModuleSpecifier::from_file_path(p).unwrap(); + prepared.push(url); + } + } + + Ok(prepared) +} + +/// Asynchronously removes a directory and all its descendants, but does not error +/// when the directory does not exist. +pub async fn remove_dir_all_if_exists(path: &Path) -> std::io::Result<()> { + let result = tokio::fs::remove_dir_all(path).await; + match result { + Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()), + _ => result, + } +} + +/// Copies a directory to another directory. +/// +/// Note: Does not handle symlinks. +pub fn copy_dir_recursive(from: &Path, to: &Path) -> Result<(), AnyError> { + std::fs::create_dir_all(to) + .with_context(|| format!("Creating {}", to.display()))?; + let read_dir = std::fs::read_dir(from) + .with_context(|| format!("Reading {}", from.display()))?; + + for entry in read_dir { + let entry = entry?; + let file_type = entry.file_type()?; + let new_from = from.join(entry.file_name()); + let new_to = to.join(entry.file_name()); + + if file_type.is_dir() { + copy_dir_recursive(&new_from, &new_to).with_context(|| { + format!("Dir {} to {}", new_from.display(), new_to.display()) + })?; + } else if file_type.is_file() { + std::fs::copy(&new_from, &new_to).with_context(|| { + format!("Copying {} to {}", new_from.display(), new_to.display()) + })?; + } + } + + Ok(()) +} + +/// Hardlinks the files in one directory to another directory. +/// +/// Note: Does not handle symlinks. +pub fn hard_link_dir_recursive(from: &Path, to: &Path) -> Result<(), AnyError> { + std::fs::create_dir_all(to) + .with_context(|| format!("Creating {}", to.display()))?; + let read_dir = std::fs::read_dir(from) + .with_context(|| format!("Reading {}", from.display()))?; + + for entry in read_dir { + let entry = entry?; + let file_type = entry.file_type()?; + let new_from = from.join(entry.file_name()); + let new_to = to.join(entry.file_name()); + + if file_type.is_dir() { + hard_link_dir_recursive(&new_from, &new_to).with_context(|| { + format!("Dir {} to {}", new_from.display(), new_to.display()) + })?; + } else if file_type.is_file() { + // note: chance for race conditions here between attempting to create, + // then removing, then attempting to create. There doesn't seem to be + // a way to hard link with overwriting in Rust, but maybe there is some + // way with platform specific code. The workaround here is to handle + // scenarios where something else might create or remove files. + if let Err(err) = std::fs::hard_link(&new_from, &new_to) { + if err.kind() == ErrorKind::AlreadyExists { + if let Err(err) = std::fs::remove_file(&new_to) { + if err.kind() == ErrorKind::NotFound { + // Assume another process/thread created this hard link to the file we are wanting + // to remove then sleep a little bit to let the other process/thread move ahead + // faster to reduce contention. + std::thread::sleep(Duration::from_millis(10)); + } else { + return Err(err).with_context(|| { + format!( + "Removing file to hard link {} to {}", + new_from.display(), + new_to.display() + ) + }); + } + } + + // Always attempt to recreate the hardlink. In contention scenarios, the other process + // might have been killed or exited after removing the file, but before creating the hardlink + if let Err(err) = std::fs::hard_link(&new_from, &new_to) { + // Assume another process/thread created this hard link to the file we are wanting + // to now create then sleep a little bit to let the other process/thread move ahead + // faster to reduce contention. + if err.kind() == ErrorKind::AlreadyExists { + std::thread::sleep(Duration::from_millis(10)); + } else { + return Err(err).with_context(|| { + format!( + "Hard linking {} to {}", + new_from.display(), + new_to.display() + ) + }); + } + } + } else { + return Err(err).with_context(|| { + format!( + "Hard linking {} to {}", + new_from.display(), + new_to.display() + ) + }); + } + } + } + } + + Ok(()) +} + +pub fn symlink_dir(oldpath: &Path, newpath: &Path) -> Result<(), AnyError> { + let err_mapper = |err: Error| { + Error::new( + err.kind(), + format!( + "{}, symlink '{}' -> '{}'", + err, + oldpath.display(), + newpath.display() + ), + ) + }; + #[cfg(unix)] + { + use std::os::unix::fs::symlink; + symlink(oldpath, newpath).map_err(err_mapper)?; + } + #[cfg(not(unix))] + { + use std::os::windows::fs::symlink_dir; + symlink_dir(oldpath, newpath).map_err(err_mapper)?; + } + Ok(()) +} + +/// Gets the total size (in bytes) of a directory. +pub fn dir_size(path: &Path) -> std::io::Result<u64> { + let entries = std::fs::read_dir(path)?; + let mut total = 0; + for entry in entries { + let entry = entry?; + total += match entry.metadata()? { + data if data.is_dir() => dir_size(&entry.path())?, + data => data.len(), + }; + } + Ok(total) +} + +#[cfg(test)] +mod tests { + use super::*; + use test_util::TempDir; + + #[test] + fn resolve_from_cwd_child() { + let cwd = current_dir().unwrap(); + assert_eq!(resolve_from_cwd(Path::new("a")).unwrap(), cwd.join("a")); + } + + #[test] + fn resolve_from_cwd_dot() { + let cwd = current_dir().unwrap(); + assert_eq!(resolve_from_cwd(Path::new(".")).unwrap(), cwd); + } + + #[test] + fn resolve_from_cwd_parent() { + let cwd = current_dir().unwrap(); + assert_eq!(resolve_from_cwd(Path::new("a/..")).unwrap(), cwd); + } + + #[test] + fn test_normalize_path() { + assert_eq!(normalize_path(Path::new("a/../b")), PathBuf::from("b")); + assert_eq!(normalize_path(Path::new("a/./b/")), PathBuf::from("a/b/")); + assert_eq!( + normalize_path(Path::new("a/./b/../c")), + PathBuf::from("a/c") + ); + + if cfg!(windows) { + assert_eq!( + normalize_path(Path::new("C:\\a\\.\\b\\..\\c")), + PathBuf::from("C:\\a\\c") + ); + } + } + + // TODO: Get a good expected value here for Windows. + #[cfg(not(windows))] + #[test] + fn resolve_from_cwd_absolute() { + let expected = Path::new("/a"); + assert_eq!(resolve_from_cwd(expected).unwrap(), expected); + } + + #[test] + fn test_collect_files() { + fn create_files(dir_path: &Path, files: &[&str]) { + std::fs::create_dir(dir_path).expect("Failed to create directory"); + for f in files { + let path = dir_path.join(f); + std::fs::write(path, "").expect("Failed to create file"); + } + } + + // dir.ts + // ├── a.ts + // ├── b.js + // ├── child + // │ ├── e.mjs + // │ ├── f.mjsx + // │ ├── .foo.TS + // │ └── README.md + // ├── c.tsx + // ├── d.jsx + // └── ignore + // ├── g.d.ts + // └── .gitignore + + let t = TempDir::new(); + + let root_dir_path = t.path().join("dir.ts"); + let root_dir_files = ["a.ts", "b.js", "c.tsx", "d.jsx"]; + create_files(&root_dir_path, &root_dir_files); + + let child_dir_path = root_dir_path.join("child"); + let child_dir_files = ["e.mjs", "f.mjsx", ".foo.TS", "README.md"]; + create_files(&child_dir_path, &child_dir_files); + + let ignore_dir_path = root_dir_path.join("ignore"); + let ignore_dir_files = ["g.d.ts", ".gitignore"]; + create_files(&ignore_dir_path, &ignore_dir_files); + + let result = collect_files(&[root_dir_path], &[ignore_dir_path], |path| { + // exclude dotfiles + path + .file_name() + .and_then(|f| f.to_str()) + .map_or(false, |f| !f.starts_with('.')) + }) + .unwrap(); + let expected = [ + "a.ts", + "b.js", + "e.mjs", + "f.mjsx", + "README.md", + "c.tsx", + "d.jsx", + ]; + for e in expected.iter() { + assert!(result.iter().any(|r| r.ends_with(e))); + } + assert_eq!(result.len(), expected.len()); + } + + #[test] + fn test_collect_specifiers() { + fn create_files(dir_path: &Path, files: &[&str]) { + std::fs::create_dir(dir_path).expect("Failed to create directory"); + for f in files { + let path = dir_path.join(f); + std::fs::write(path, "").expect("Failed to create file"); + } + } + + // dir.ts + // ├── a.ts + // ├── b.js + // ├── child + // │ ├── e.mjs + // │ ├── f.mjsx + // │ ├── .foo.TS + // │ └── README.md + // ├── c.tsx + // ├── d.jsx + // └── ignore + // ├── g.d.ts + // └── .gitignore + + let t = TempDir::new(); + + let root_dir_path = t.path().join("dir.ts"); + let root_dir_files = ["a.ts", "b.js", "c.tsx", "d.jsx"]; + create_files(&root_dir_path, &root_dir_files); + + let child_dir_path = root_dir_path.join("child"); + let child_dir_files = ["e.mjs", "f.mjsx", ".foo.TS", "README.md"]; + create_files(&child_dir_path, &child_dir_files); + + let ignore_dir_path = root_dir_path.join("ignore"); + let ignore_dir_files = ["g.d.ts", ".gitignore"]; + create_files(&ignore_dir_path, &ignore_dir_files); + + let predicate = |path: &Path| { + // exclude dotfiles + path + .file_name() + .and_then(|f| f.to_str()) + .map_or(false, |f| !f.starts_with('.')) + }; + + let result = collect_specifiers( + vec![ + "http://localhost:8080".to_string(), + root_dir_path.to_str().unwrap().to_string(), + "https://localhost:8080".to_string(), + ], + &[ignore_dir_path], + predicate, + ) + .unwrap(); + + let root_dir_url = ModuleSpecifier::from_file_path( + canonicalize_path(&root_dir_path).unwrap(), + ) + .unwrap() + .to_string(); + let expected: Vec<ModuleSpecifier> = [ + "http://localhost:8080", + &format!("{}/a.ts", root_dir_url), + &format!("{}/b.js", root_dir_url), + &format!("{}/c.tsx", root_dir_url), + &format!("{}/child/README.md", root_dir_url), + &format!("{}/child/e.mjs", root_dir_url), + &format!("{}/child/f.mjsx", root_dir_url), + &format!("{}/d.jsx", root_dir_url), + "https://localhost:8080", + ] + .iter() + .map(|f| ModuleSpecifier::parse(f).unwrap()) + .collect::<Vec<_>>(); + + assert_eq!(result, expected); + + let scheme = if cfg!(target_os = "windows") { + "file:///" + } else { + "file://" + }; + let result = collect_specifiers( + vec![format!( + "{}{}", + scheme, + root_dir_path + .join("child") + .to_str() + .unwrap() + .replace('\\', "/") + )], + &[], + predicate, + ) + .unwrap(); + + let expected: Vec<ModuleSpecifier> = [ + &format!("{}/child/README.md", root_dir_url), + &format!("{}/child/e.mjs", root_dir_url), + &format!("{}/child/f.mjsx", root_dir_url), + ] + .iter() + .map(|f| ModuleSpecifier::parse(f).unwrap()) + .collect::<Vec<_>>(); + + assert_eq!(result, expected); + } + + #[cfg(windows)] + #[test] + fn test_strip_unc_prefix() { + run_test(r"C:\", r"C:\"); + run_test(r"C:\test\file.txt", r"C:\test\file.txt"); + + run_test(r"\\?\C:\", r"C:\"); + run_test(r"\\?\C:\test\file.txt", r"C:\test\file.txt"); + + run_test(r"\\.\C:\", r"\\.\C:\"); + run_test(r"\\.\C:\Test\file.txt", r"\\.\C:\Test\file.txt"); + + run_test(r"\\?\UNC\localhost\", r"\\localhost"); + run_test(r"\\?\UNC\localhost\c$\", r"\\localhost\c$"); + run_test( + r"\\?\UNC\localhost\c$\Windows\file.txt", + r"\\localhost\c$\Windows\file.txt", + ); + run_test(r"\\?\UNC\wsl$\deno.json", r"\\wsl$\deno.json"); + + run_test(r"\\?\server1", r"\\server1"); + run_test(r"\\?\server1\e$\", r"\\server1\e$\"); + run_test( + r"\\?\server1\e$\test\file.txt", + r"\\server1\e$\test\file.txt", + ); + + fn run_test(input: &str, expected: &str) { + assert_eq!( + strip_unc_prefix(PathBuf::from(input)), + PathBuf::from(expected) + ); + } + } +} diff --git a/cli/util/logger.rs b/cli/util/logger.rs new file mode 100644 index 000000000..caa027c04 --- /dev/null +++ b/cli/util/logger.rs @@ -0,0 +1,79 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use std::io::Write; + +struct CliLogger(env_logger::Logger); + +impl CliLogger { + pub fn new(logger: env_logger::Logger) -> Self { + Self(logger) + } + + pub fn filter(&self) -> log::LevelFilter { + self.0.filter() + } +} + +impl log::Log for CliLogger { + fn enabled(&self, metadata: &log::Metadata) -> bool { + self.0.enabled(metadata) + } + + fn log(&self, record: &log::Record) { + if self.enabled(record.metadata()) { + self.0.log(record); + } + } + + fn flush(&self) { + self.0.flush(); + } +} + +pub fn init(maybe_level: Option<log::Level>) { + let log_level = maybe_level.unwrap_or(log::Level::Info); + let logger = env_logger::Builder::from_env( + env_logger::Env::default() + .default_filter_or(log_level.to_level_filter().to_string()), + ) + // https://github.com/denoland/deno/issues/6641 + .filter_module("rustyline", log::LevelFilter::Off) + // wgpu crates (gfx_backend), have a lot of useless INFO and WARN logs + .filter_module("wgpu", log::LevelFilter::Error) + .filter_module("gfx", log::LevelFilter::Error) + // used to make available the lsp_debug which is then filtered out at runtime + // in the cli logger + .filter_module("deno::lsp::performance", log::LevelFilter::Debug) + .format(|buf, record| { + let mut target = record.target().to_string(); + if let Some(line_no) = record.line() { + target.push(':'); + target.push_str(&line_no.to_string()); + } + if record.level() <= log::Level::Info + || (record.target() == "deno::lsp::performance" + && record.level() == log::Level::Debug) + { + // Print ERROR, WARN, INFO and lsp_debug logs as they are + writeln!(buf, "{}", record.args()) + } else { + // Add prefix to DEBUG or TRACE logs + writeln!( + buf, + "{} RS - {} - {}", + record.level(), + target, + record.args() + ) + } + }) + .build(); + + let cli_logger = CliLogger::new(logger); + let max_level = cli_logger.filter(); + let r = log::set_boxed_logger(Box::new(cli_logger)); + if r.is_ok() { + log::set_max_level(max_level); + } + r.expect("Could not install logger."); +} diff --git a/cli/util/mod.rs b/cli/util/mod.rs new file mode 100644 index 000000000..176991d32 --- /dev/null +++ b/cli/util/mod.rs @@ -0,0 +1,14 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +// Note: Only add code in this folder that has no application specific logic +pub mod checksum; +pub mod diff; +pub mod display; +pub mod file_watcher; +pub mod fs; +pub mod logger; +pub mod path; +pub mod progress_bar; +pub mod text_encoding; +pub mod unix; +pub mod windows; diff --git a/cli/util/path.rs b/cli/util/path.rs new file mode 100644 index 000000000..6df982f4e --- /dev/null +++ b/cli/util/path.rs @@ -0,0 +1,452 @@ +use std::borrow::Cow; +use std::path::Path; +use std::path::PathBuf; + +use deno_ast::ModuleSpecifier; +use deno_core::error::uri_error; +use deno_core::error::AnyError; + +/// Checks if the path has extension Deno supports. +pub fn is_supported_ext(path: &Path) -> bool { + if let Some(ext) = get_extension(path) { + matches!( + ext.as_str(), + "ts" | "tsx" | "js" | "jsx" | "mjs" | "mts" | "cjs" | "cts" + ) + } else { + false + } +} + +/// Get the extension of a file in lowercase. +pub fn get_extension(file_path: &Path) -> Option<String> { + return file_path + .extension() + .and_then(|e| e.to_str()) + .map(|e| e.to_lowercase()); +} + +/// Attempts to convert a specifier to a file path. By default, uses the Url +/// crate's `to_file_path()` method, but falls back to try and resolve unix-style +/// paths on Windows. +pub fn specifier_to_file_path( + specifier: &ModuleSpecifier, +) -> Result<PathBuf, AnyError> { + let result = if cfg!(windows) { + match specifier.to_file_path() { + Ok(path) => Ok(path), + Err(()) => { + // This might be a unix-style path which is used in the tests even on Windows. + // Attempt to see if we can convert it to a `PathBuf`. This code should be removed + // once/if https://github.com/servo/rust-url/issues/730 is implemented. + if specifier.scheme() == "file" + && specifier.host().is_none() + && specifier.port().is_none() + && specifier.path_segments().is_some() + { + let path_str = specifier.path(); + match String::from_utf8( + percent_encoding::percent_decode(path_str.as_bytes()).collect(), + ) { + Ok(path_str) => Ok(PathBuf::from(path_str)), + Err(_) => Err(()), + } + } else { + Err(()) + } + } + } + } else { + specifier.to_file_path() + }; + match result { + Ok(path) => Ok(path), + Err(()) => Err(uri_error(format!( + "Invalid file path.\n Specifier: {}", + specifier + ))), + } +} + +/// Ensures a specifier that will definitely be a directory has a trailing slash. +pub fn ensure_directory_specifier( + mut specifier: ModuleSpecifier, +) -> ModuleSpecifier { + let path = specifier.path(); + if !path.ends_with('/') { + let new_path = format!("{}/", path); + specifier.set_path(&new_path); + } + specifier +} + +/// Gets the parent of this module specifier. +pub fn specifier_parent(specifier: &ModuleSpecifier) -> ModuleSpecifier { + let mut specifier = specifier.clone(); + // don't use specifier.segments() because it will strip the leading slash + let mut segments = specifier.path().split('/').collect::<Vec<_>>(); + if segments.iter().all(|s| s.is_empty()) { + return specifier; + } + if let Some(last) = segments.last() { + if last.is_empty() { + segments.pop(); + } + segments.pop(); + let new_path = format!("{}/", segments.join("/")); + specifier.set_path(&new_path); + } + specifier +} + +/// `from.make_relative(to)` but with fixes. +pub fn relative_specifier( + from: &ModuleSpecifier, + to: &ModuleSpecifier, +) -> Option<String> { + let is_dir = to.path().ends_with('/'); + + if is_dir && from == to { + return Some("./".to_string()); + } + + // workaround using parent directory until https://github.com/servo/rust-url/pull/754 is merged + let from = if !from.path().ends_with('/') { + if let Some(end_slash) = from.path().rfind('/') { + let mut new_from = from.clone(); + new_from.set_path(&from.path()[..end_slash + 1]); + Cow::Owned(new_from) + } else { + Cow::Borrowed(from) + } + } else { + Cow::Borrowed(from) + }; + + // workaround for url crate not adding a trailing slash for a directory + // it seems to be fixed once a version greater than 2.2.2 is released + let mut text = from.make_relative(to)?; + if is_dir && !text.ends_with('/') && to.query().is_none() { + text.push('/'); + } + + Some(if text.starts_with("../") || text.starts_with("./") { + text + } else { + format!("./{}", text) + }) +} + +/// This function checks if input path has trailing slash or not. If input path +/// has trailing slash it will return true else it will return false. +pub fn path_has_trailing_slash(path: &Path) -> bool { + if let Some(path_str) = path.to_str() { + if cfg!(windows) { + path_str.ends_with('\\') + } else { + path_str.ends_with('/') + } + } else { + false + } +} + +/// Gets a path with the specified file stem suffix. +/// +/// Ex. `file.ts` with suffix `_2` returns `file_2.ts` +pub fn path_with_stem_suffix(path: &Path, suffix: &str) -> PathBuf { + if let Some(file_name) = path.file_name().map(|f| f.to_string_lossy()) { + if let Some(file_stem) = path.file_stem().map(|f| f.to_string_lossy()) { + if let Some(ext) = path.extension().map(|f| f.to_string_lossy()) { + return if file_stem.to_lowercase().ends_with(".d") { + path.with_file_name(format!( + "{}{}.{}.{}", + &file_stem[..file_stem.len() - ".d".len()], + suffix, + // maintain casing + &file_stem[file_stem.len() - "d".len()..], + ext + )) + } else { + path.with_file_name(format!("{}{}.{}", file_stem, suffix, ext)) + }; + } + } + + path.with_file_name(format!("{}{}", file_name, suffix)) + } else { + path.with_file_name(suffix) + } +} + +/// Gets if the provided character is not supported on all +/// kinds of file systems. +pub fn is_banned_path_char(c: char) -> bool { + matches!(c, '<' | '>' | ':' | '"' | '|' | '?' | '*') +} + +/// Gets a safe local directory name for the provided url. +/// +/// For example: +/// https://deno.land:8080/path -> deno.land_8080/path +pub fn root_url_to_safe_local_dirname(root: &ModuleSpecifier) -> PathBuf { + fn sanitize_segment(text: &str) -> String { + text + .chars() + .map(|c| if is_banned_segment_char(c) { '_' } else { c }) + .collect() + } + + fn is_banned_segment_char(c: char) -> bool { + matches!(c, '/' | '\\') || is_banned_path_char(c) + } + + let mut result = String::new(); + if let Some(domain) = root.domain() { + result.push_str(&sanitize_segment(domain)); + } + if let Some(port) = root.port() { + if !result.is_empty() { + result.push('_'); + } + result.push_str(&port.to_string()); + } + let mut result = PathBuf::from(result); + if let Some(segments) = root.path_segments() { + for segment in segments.filter(|s| !s.is_empty()) { + result = result.join(sanitize_segment(segment)); + } + } + + result +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_is_supported_ext() { + assert!(!is_supported_ext(Path::new("tests/subdir/redirects"))); + assert!(!is_supported_ext(Path::new("README.md"))); + assert!(is_supported_ext(Path::new("lib/typescript.d.ts"))); + assert!(is_supported_ext(Path::new("testdata/run/001_hello.js"))); + assert!(is_supported_ext(Path::new("testdata/run/002_hello.ts"))); + assert!(is_supported_ext(Path::new("foo.jsx"))); + assert!(is_supported_ext(Path::new("foo.tsx"))); + assert!(is_supported_ext(Path::new("foo.TS"))); + assert!(is_supported_ext(Path::new("foo.TSX"))); + assert!(is_supported_ext(Path::new("foo.JS"))); + assert!(is_supported_ext(Path::new("foo.JSX"))); + assert!(is_supported_ext(Path::new("foo.mjs"))); + assert!(is_supported_ext(Path::new("foo.mts"))); + assert!(is_supported_ext(Path::new("foo.cjs"))); + assert!(is_supported_ext(Path::new("foo.cts"))); + assert!(!is_supported_ext(Path::new("foo.mjsx"))); + } + + #[test] + fn test_specifier_to_file_path() { + run_success_test("file:///", "/"); + run_success_test("file:///test", "/test"); + run_success_test("file:///dir/test/test.txt", "/dir/test/test.txt"); + run_success_test( + "file:///dir/test%20test/test.txt", + "/dir/test test/test.txt", + ); + + fn run_success_test(specifier: &str, expected_path: &str) { + let result = + specifier_to_file_path(&ModuleSpecifier::parse(specifier).unwrap()) + .unwrap(); + assert_eq!(result, PathBuf::from(expected_path)); + } + } + + #[test] + fn test_ensure_directory_specifier() { + run_test("file:///", "file:///"); + run_test("file:///test", "file:///test/"); + run_test("file:///test/", "file:///test/"); + run_test("file:///test/other", "file:///test/other/"); + run_test("file:///test/other/", "file:///test/other/"); + + fn run_test(specifier: &str, expected: &str) { + let result = + ensure_directory_specifier(ModuleSpecifier::parse(specifier).unwrap()); + assert_eq!(result.to_string(), expected); + } + } + + #[test] + fn test_specifier_parent() { + run_test("file:///", "file:///"); + run_test("file:///test", "file:///"); + run_test("file:///test/", "file:///"); + run_test("file:///test/other", "file:///test/"); + run_test("file:///test/other.txt", "file:///test/"); + run_test("file:///test/other/", "file:///test/"); + + fn run_test(specifier: &str, expected: &str) { + let result = + specifier_parent(&ModuleSpecifier::parse(specifier).unwrap()); + assert_eq!(result.to_string(), expected); + } + } + + #[test] + fn test_relative_specifier() { + let fixtures: Vec<(&str, &str, Option<&str>)> = vec![ + ("file:///from", "file:///to", Some("./to")), + ("file:///from", "file:///from/other", Some("./from/other")), + ("file:///from", "file:///from/other/", Some("./from/other/")), + ("file:///from", "file:///other/from", Some("./other/from")), + ("file:///from/", "file:///other/from", Some("../other/from")), + ("file:///from", "file:///other/from/", Some("./other/from/")), + ( + "file:///from", + "file:///to/other.txt", + Some("./to/other.txt"), + ), + ( + "file:///from/test", + "file:///to/other.txt", + Some("../to/other.txt"), + ), + ( + "file:///from/other.txt", + "file:///to/other.txt", + Some("../to/other.txt"), + ), + ( + "https://deno.land/x/a/b/d.ts", + "https://deno.land/x/a/b/c.ts", + Some("./c.ts"), + ), + ( + "https://deno.land/x/a/b/d.ts", + "https://deno.land/x/a/c.ts", + Some("../c.ts"), + ), + ( + "https://deno.land/x/a/b/d.ts", + "https://deno.land/x/a/b/c/d.ts", + Some("./c/d.ts"), + ), + ( + "https://deno.land/x/a/b/c/", + "https://deno.land/x/a/b/c/d.ts", + Some("./d.ts"), + ), + ( + "https://deno.land/x/a/b/c/", + "https://deno.land/x/a/b/c/d/e.ts", + Some("./d/e.ts"), + ), + ( + "https://deno.land/x/a/b/c/f.ts", + "https://deno.land/x/a/b/c/d/e.ts", + Some("./d/e.ts"), + ), + ( + "https://deno.land/x/a/b/d.ts", + "https://deno.land/x/a/c.ts?foo=bar", + Some("../c.ts?foo=bar"), + ), + ( + "https://deno.land/x/a/b/d.ts?foo=bar", + "https://deno.land/x/a/b/c.ts", + Some("./c.ts"), + ), + ("file:///a/b/d.ts", "file:///a/b/c.ts", Some("./c.ts")), + ("https://deno.land/x/a/b/c.ts", "file:///a/b/c.ts", None), + ( + "https://deno.land/", + "https://deno.land/x/a/b/c.ts", + Some("./x/a/b/c.ts"), + ), + ( + "https://deno.land/x/d/e/f.ts", + "https://deno.land/x/a/b/c.ts", + Some("../../a/b/c.ts"), + ), + ]; + for (from_str, to_str, expected) in fixtures { + let from = ModuleSpecifier::parse(from_str).unwrap(); + let to = ModuleSpecifier::parse(to_str).unwrap(); + let actual = relative_specifier(&from, &to); + assert_eq!( + actual.as_deref(), + expected, + "from: \"{}\" to: \"{}\"", + from_str, + to_str + ); + } + } + + #[test] + fn test_path_has_trailing_slash() { + #[cfg(not(windows))] + { + run_test("/Users/johndoe/Desktop/deno-project/target/", true); + run_test(r"/Users/johndoe/deno-project/target//", true); + run_test("/Users/johndoe/Desktop/deno-project", false); + run_test(r"/Users/johndoe/deno-project\", false); + } + + #[cfg(windows)] + { + run_test(r"C:\test\deno-project\", true); + run_test(r"C:\test\deno-project\\", true); + run_test(r"C:\test\file.txt", false); + run_test(r"C:\test\file.txt/", false); + } + + fn run_test(path_str: &str, expected: bool) { + let path = Path::new(path_str); + let result = path_has_trailing_slash(path); + assert_eq!(result, expected); + } + } + + #[test] + fn test_path_with_stem_suffix() { + assert_eq!( + path_with_stem_suffix(&PathBuf::from("/"), "_2"), + PathBuf::from("/_2") + ); + assert_eq!( + path_with_stem_suffix(&PathBuf::from("/test"), "_2"), + PathBuf::from("/test_2") + ); + assert_eq!( + path_with_stem_suffix(&PathBuf::from("/test.txt"), "_2"), + PathBuf::from("/test_2.txt") + ); + assert_eq!( + path_with_stem_suffix(&PathBuf::from("/test/subdir"), "_2"), + PathBuf::from("/test/subdir_2") + ); + assert_eq!( + path_with_stem_suffix(&PathBuf::from("/test/subdir.other.txt"), "_2"), + PathBuf::from("/test/subdir.other_2.txt") + ); + assert_eq!( + path_with_stem_suffix(&PathBuf::from("/test.d.ts"), "_2"), + PathBuf::from("/test_2.d.ts") + ); + assert_eq!( + path_with_stem_suffix(&PathBuf::from("/test.D.TS"), "_2"), + PathBuf::from("/test_2.D.TS") + ); + assert_eq!( + path_with_stem_suffix(&PathBuf::from("/test.d.mts"), "_2"), + PathBuf::from("/test_2.d.mts") + ); + assert_eq!( + path_with_stem_suffix(&PathBuf::from("/test.d.cts"), "_2"), + PathBuf::from("/test_2.d.cts") + ); + } +} diff --git a/cli/util/progress_bar.rs b/cli/util/progress_bar.rs new file mode 100644 index 000000000..5b49fb279 --- /dev/null +++ b/cli/util/progress_bar.rs @@ -0,0 +1,143 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use crate::colors; +use deno_core::parking_lot::Mutex; +use indexmap::IndexSet; +use std::sync::Arc; +use std::time::Duration; + +#[derive(Clone, Debug, Default)] +pub struct ProgressBar(Arc<Mutex<ProgressBarInner>>); + +#[derive(Debug)] +struct ProgressBarInner { + pb: Option<indicatif::ProgressBar>, + is_tty: bool, + in_flight: IndexSet<String>, +} + +impl Default for ProgressBarInner { + fn default() -> Self { + Self { + pb: None, + is_tty: colors::is_tty(), + in_flight: IndexSet::default(), + } + } +} + +impl ProgressBarInner { + fn get_or_create_pb(&mut self) -> indicatif::ProgressBar { + if let Some(pb) = self.pb.as_ref() { + return pb.clone(); + } + + let pb = indicatif::ProgressBar::new_spinner(); + pb.enable_steady_tick(Duration::from_millis(120)); + pb.set_prefix("Download"); + pb.set_style( + indicatif::ProgressStyle::with_template( + "{prefix:.green} {spinner:.green} {msg}", + ) + .unwrap() + .tick_strings(&["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]), + ); + self.pb = Some(pb); + self.pb.as_ref().unwrap().clone() + } + + fn add_in_flight(&mut self, msg: &str) { + if self.in_flight.contains(msg) { + return; + } + + self.in_flight.insert(msg.to_string()); + } + + /// Returns if removed "in-flight" was last entry and progress + /// bar needs to be updated. + fn remove_in_flight(&mut self, msg: &str) -> bool { + if !self.in_flight.contains(msg) { + return false; + } + + let mut is_last = false; + if let Some(last) = self.in_flight.last() { + is_last = last == msg; + } + self.in_flight.remove(msg); + is_last + } + + fn update_progress_bar(&mut self) { + let pb = self.get_or_create_pb(); + if let Some(msg) = self.in_flight.last() { + pb.set_message(msg.clone()); + } + } +} + +pub struct UpdateGuard { + pb: ProgressBar, + msg: String, + noop: bool, +} + +impl Drop for UpdateGuard { + fn drop(&mut self) { + if self.noop { + return; + } + + let mut inner = self.pb.0.lock(); + if inner.remove_in_flight(&self.msg) { + inner.update_progress_bar(); + } + } +} + +impl ProgressBar { + pub fn update(&self, msg: &str) -> UpdateGuard { + let mut guard = UpdateGuard { + pb: self.clone(), + msg: msg.to_string(), + noop: false, + }; + let mut inner = self.0.lock(); + + // If we're not running in TTY we're just gonna fallback + // to using logger crate. + if !inner.is_tty { + log::log!(log::Level::Info, "{} {}", colors::green("Download"), msg); + guard.noop = true; + return guard; + } + + inner.add_in_flight(msg); + inner.update_progress_bar(); + guard + } + + pub fn clear(&self) { + let mut inner = self.0.lock(); + + if let Some(pb) = inner.pb.as_ref() { + pb.finish_and_clear(); + inner.pb = None; + } + } + + pub fn clear_guard(&self) -> ClearGuard { + ClearGuard { pb: self.clone() } + } +} + +pub struct ClearGuard { + pb: ProgressBar, +} + +impl Drop for ClearGuard { + fn drop(&mut self) { + self.pb.clear(); + } +} diff --git a/cli/util/text_encoding.rs b/cli/util/text_encoding.rs new file mode 100644 index 000000000..c16a1289d --- /dev/null +++ b/cli/util/text_encoding.rs @@ -0,0 +1,162 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +use encoding_rs::*; +use std::borrow::Cow; +use std::io::Error; +use std::io::ErrorKind; + +pub const BOM_CHAR: char = '\u{FEFF}'; + +/// Attempts to detect the character encoding of the provided bytes. +/// +/// Supports UTF-8, UTF-16 Little Endian and UTF-16 Big Endian. +pub fn detect_charset(bytes: &'_ [u8]) -> &'static str { + const UTF16_LE_BOM: &[u8] = b"\xFF\xFE"; + const UTF16_BE_BOM: &[u8] = b"\xFE\xFF"; + + if bytes.starts_with(UTF16_LE_BOM) { + "utf-16le" + } else if bytes.starts_with(UTF16_BE_BOM) { + "utf-16be" + } else { + // Assume everything else is utf-8 + "utf-8" + } +} + +/// Attempts to convert the provided bytes to a UTF-8 string. +/// +/// Supports all encodings supported by the encoding_rs crate, which includes +/// all encodings specified in the WHATWG Encoding Standard, and only those +/// encodings (see: <https://encoding.spec.whatwg.org/>). +pub fn convert_to_utf8<'a>( + bytes: &'a [u8], + charset: &'_ str, +) -> Result<Cow<'a, str>, Error> { + match Encoding::for_label(charset.as_bytes()) { + Some(encoding) => encoding + .decode_without_bom_handling_and_without_replacement(bytes) + .ok_or_else(|| ErrorKind::InvalidData.into()), + None => Err(Error::new( + ErrorKind::InvalidInput, + format!("Unsupported charset: {}", charset), + )), + } +} + +/// Strips the byte order mark from the provided text if it exists. +pub fn strip_bom(text: &str) -> &str { + if text.starts_with(BOM_CHAR) { + &text[BOM_CHAR.len_utf8()..] + } else { + text + } +} + +static SOURCE_MAP_PREFIX: &str = + "//# sourceMappingURL=data:application/json;base64,"; + +pub fn source_map_from_code(code: &str) -> Option<Vec<u8>> { + let last_line = code.rsplit(|u| u == '\n').next()?; + if last_line.starts_with(SOURCE_MAP_PREFIX) { + let input = last_line.split_at(SOURCE_MAP_PREFIX.len()).1; + let decoded_map = base64::decode(input) + .expect("Unable to decode source map from emitted file."); + Some(decoded_map) + } else { + None + } +} + +pub fn code_without_source_map(mut code: String) -> String { + if let Some(last_line_index) = code.rfind('\n') { + if code[last_line_index + 1..].starts_with(SOURCE_MAP_PREFIX) { + code.truncate(last_line_index + 1); + code + } else { + code + } + } else { + code + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn test_detection(test_data: &[u8], expected_charset: &str) { + let detected_charset = detect_charset(test_data); + assert_eq!( + expected_charset.to_lowercase(), + detected_charset.to_lowercase() + ); + } + + #[test] + fn test_detection_utf8_no_bom() { + let test_data = "Hello UTF-8 it is \u{23F0} for Deno!" + .to_owned() + .into_bytes(); + test_detection(&test_data, "utf-8"); + } + + #[test] + fn test_detection_utf16_little_endian() { + let test_data = b"\xFF\xFEHello UTF-16LE".to_owned().to_vec(); + test_detection(&test_data, "utf-16le"); + } + + #[test] + fn test_detection_utf16_big_endian() { + let test_data = b"\xFE\xFFHello UTF-16BE".to_owned().to_vec(); + test_detection(&test_data, "utf-16be"); + } + + #[test] + fn test_decoding_unsupported_charset() { + let test_data = Vec::new(); + let result = convert_to_utf8(&test_data, "utf-32le"); + assert!(result.is_err()); + let err = result.expect_err("Err expected"); + assert!(err.kind() == ErrorKind::InvalidInput); + } + + #[test] + fn test_decoding_invalid_utf8() { + let test_data = b"\xFE\xFE\xFF\xFF".to_vec(); + let result = convert_to_utf8(&test_data, "utf-8"); + assert!(result.is_err()); + let err = result.expect_err("Err expected"); + assert!(err.kind() == ErrorKind::InvalidData); + } + + #[test] + fn test_source_without_source_map() { + run_test("", ""); + run_test("\n", "\n"); + run_test("\r\n", "\r\n"); + run_test("a", "a"); + run_test("a\n", "a\n"); + run_test("a\r\n", "a\r\n"); + run_test("a\r\nb", "a\r\nb"); + run_test("a\nb\n", "a\nb\n"); + run_test("a\r\nb\r\n", "a\r\nb\r\n"); + run_test( + "test\n//# sourceMappingURL=data:application/json;base64,test", + "test\n", + ); + run_test( + "test\r\n//# sourceMappingURL=data:application/json;base64,test", + "test\r\n", + ); + run_test( + "\n//# sourceMappingURL=data:application/json;base64,test", + "\n", + ); + + fn run_test(input: &str, output: &str) { + assert_eq!(code_without_source_map(input.to_string()), output); + } + } +} diff --git a/cli/util/unix.rs b/cli/util/unix.rs new file mode 100644 index 000000000..f282f6cfe --- /dev/null +++ b/cli/util/unix.rs @@ -0,0 +1,45 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +/// Raise soft file descriptor limit to hard file descriptor limit. +/// This is the difference between `ulimit -n` and `ulimit -n -H`. +pub fn raise_fd_limit() { + #[cfg(unix)] + // TODO(bartlomieju): + #[allow(clippy::undocumented_unsafe_blocks)] + unsafe { + let mut limits = libc::rlimit { + rlim_cur: 0, + rlim_max: 0, + }; + + if 0 != libc::getrlimit(libc::RLIMIT_NOFILE, &mut limits) { + return; + } + + if limits.rlim_cur == libc::RLIM_INFINITY { + return; + } + + // No hard limit? Do a binary search for the effective soft limit. + if limits.rlim_max == libc::RLIM_INFINITY { + let mut min = limits.rlim_cur; + let mut max = 1 << 20; + + while min + 1 < max { + limits.rlim_cur = min + (max - min) / 2; + match libc::setrlimit(libc::RLIMIT_NOFILE, &limits) { + 0 => min = limits.rlim_cur, + _ => max = limits.rlim_cur, + } + } + + return; + } + + // Raise the soft limit to the hard limit. + if limits.rlim_cur < limits.rlim_max { + limits.rlim_cur = limits.rlim_max; + libc::setrlimit(libc::RLIMIT_NOFILE, &limits); + } + } +} diff --git a/cli/util/windows.rs b/cli/util/windows.rs new file mode 100644 index 000000000..0801ff2f5 --- /dev/null +++ b/cli/util/windows.rs @@ -0,0 +1,90 @@ +// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license. + +/// Ensures that stdin, stdout, and stderr are open and have valid HANDLEs +/// associated with them. There are many places where a `std::fs::File` is +/// constructed from a stdio handle; if the handle is null this causes a panic. +pub fn ensure_stdio_open() { + #[cfg(windows)] + // SAFETY: winapi calls + unsafe { + use std::mem::size_of; + use winapi::shared::minwindef::DWORD; + use winapi::shared::minwindef::FALSE; + use winapi::shared::minwindef::TRUE; + use winapi::shared::ntdef::NULL; + use winapi::shared::winerror::ERROR_INVALID_HANDLE; + use winapi::um::errhandlingapi::GetLastError; + use winapi::um::fileapi::CreateFileA; + use winapi::um::fileapi::OPEN_EXISTING; + use winapi::um::handleapi::GetHandleInformation; + use winapi::um::handleapi::INVALID_HANDLE_VALUE; + use winapi::um::minwinbase::SECURITY_ATTRIBUTES; + use winapi::um::processenv::GetStdHandle; + use winapi::um::processenv::SetStdHandle; + use winapi::um::winbase::STD_ERROR_HANDLE; + use winapi::um::winbase::STD_INPUT_HANDLE; + use winapi::um::winbase::STD_OUTPUT_HANDLE; + use winapi::um::winnt::FILE_ATTRIBUTE_NORMAL; + use winapi::um::winnt::FILE_GENERIC_READ; + use winapi::um::winnt::FILE_GENERIC_WRITE; + use winapi::um::winnt::FILE_READ_ATTRIBUTES; + use winapi::um::winnt::FILE_SHARE_READ; + use winapi::um::winnt::FILE_SHARE_WRITE; + + for std_handle in [STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, STD_ERROR_HANDLE] { + // Check whether stdio handle is open. + let is_valid = match GetStdHandle(std_handle) { + NULL | INVALID_HANDLE_VALUE => false, + handle => { + // The stdio handle is open; check whether its handle is valid. + let mut flags: DWORD = 0; + match GetHandleInformation(handle, &mut flags) { + TRUE => true, + FALSE if GetLastError() == ERROR_INVALID_HANDLE => false, + FALSE => { + panic!("GetHandleInformation failed (error {})", GetLastError()); + } + _ => unreachable!(), + } + } + }; + + if !is_valid { + // Open NUL device. + let desired_access = match std_handle { + STD_INPUT_HANDLE => FILE_GENERIC_READ, + _ => FILE_GENERIC_WRITE | FILE_READ_ATTRIBUTES, + }; + let security_attributes = SECURITY_ATTRIBUTES { + nLength: size_of::<SECURITY_ATTRIBUTES>() as DWORD, + lpSecurityDescriptor: NULL, + bInheritHandle: TRUE, + }; + let file_handle = CreateFileA( + b"\\\\?\\NUL\0" as *const _ as *mut _, + desired_access, + FILE_SHARE_READ | FILE_SHARE_WRITE, + &security_attributes as *const _ as *mut _, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + NULL, + ); + match file_handle { + NULL => unreachable!(), + INVALID_HANDLE_VALUE => { + panic!("Could not open NUL device (error {})", GetLastError()); + } + _ => {} + } + + // Assign the opened NUL handle to the missing stdio handle. + let success = SetStdHandle(std_handle, file_handle); + match success { + TRUE => {} + FALSE => panic!("SetStdHandle failed (error {})", GetLastError()), + _ => unreachable!(), + } + } + } + } +} |