From 7c3da2ec1c12c8c16b04d89b7aec5e04cd2ff3be Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Mon, 14 Oct 2024 11:10:27 +0530 Subject: fix(ext/webgpu): allow GL backend on Windows (#26206) It should be supported according to [this](https://github.com/gfx-rs/wgpu?tab=readme-ov-file#supported-platforms). Fixes https://github.com/denoland/deno/issues/26144 --- ext/webgpu/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/webgpu/lib.rs b/ext/webgpu/lib.rs index df2ab323a..9cf4ca914 100644 --- a/ext/webgpu/lib.rs +++ b/ext/webgpu/lib.rs @@ -44,7 +44,7 @@ mod macros { #[cfg(all(not(target_arch = "wasm32"), windows))] wgpu_types::Backend::Dx12 => $($c)*.$method:: $params, #[cfg(any( - all(unix, not(target_os = "macos"), not(target_os = "ios")), + all(not(target_os = "macos"), not(target_os = "ios")), feature = "angle", target_arch = "wasm32" ))] -- cgit v1.2.3 From 68b388a93a3efe443fc5e306e883847bfb8551db Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Mon, 14 Oct 2024 14:00:02 +0530 Subject: fix(ext/node): allow writing to tty columns (#26201) Behave similar to Node.js where modifying `stdout.columns` doesn't really resize the terminal. Ref https://github.com/nodejs/node/issues/17529 Fixes https://github.com/denoland/deno/issues/26196 --- ext/node/polyfills/_process/streams.mjs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/_process/streams.mjs b/ext/node/polyfills/_process/streams.mjs index 7936e82aa..3573956c9 100644 --- a/ext/node/polyfills/_process/streams.mjs +++ b/ext/node/polyfills/_process/streams.mjs @@ -66,14 +66,19 @@ export function createWritableStdioStream(writer, name, warmup = false) { // We cannot call `writer?.isTerminal()` eagerly here let getIsTTY = () => writer?.isTerminal(); + const getColumns = () => + stream._columns || + (writer?.isTerminal() ? Deno.consoleSize?.().columns : undefined); ObjectDefineProperties(stream, { columns: { __proto__: null, enumerable: true, configurable: true, - get: () => - writer?.isTerminal() ? Deno.consoleSize?.().columns : undefined, + get: () => getColumns(), + set: (value) => { + stream._columns = value; + }, }, rows: { __proto__: null, -- cgit v1.2.3 From bbad7c592282dace88c77b0e089d53cb32878673 Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Mon, 14 Oct 2024 14:24:26 +0530 Subject: fix(ext/node): compute pem length (upper bound) for key exports (#26231) Fixes https://github.com/denoland/deno/issues/26188 --- ext/node/ops/crypto/keys.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/ops/crypto/keys.rs b/ext/node/ops/crypto/keys.rs index 867b34e04..ac62f5cca 100644 --- a/ext/node/ops/crypto/keys.rs +++ b/ext/node/ops/crypto/keys.rs @@ -2024,7 +2024,9 @@ pub fn op_node_export_public_key_pem( _ => unreachable!("export_der would have errored"), }; - let mut out = vec![0; 2048]; + let pem_len = der::pem::encapsulated_len(label, LineEnding::LF, data.len()) + .map_err(|_| type_error("very large data"))?; + let mut out = vec![0; pem_len]; let mut writer = PemWriter::new(label, LineEnding::LF, &mut out)?; writer.write(&data)?; let len = writer.finish()?; @@ -2063,7 +2065,9 @@ pub fn op_node_export_private_key_pem( _ => unreachable!("export_der would have errored"), }; - let mut out = vec![0; 2048]; + let pem_len = der::pem::encapsulated_len(label, LineEnding::LF, data.len()) + .map_err(|_| type_error("very large data"))?; + let mut out = vec![0; pem_len]; let mut writer = PemWriter::new(label, LineEnding::LF, &mut out)?; writer.write(&data)?; let len = writer.finish()?; -- cgit v1.2.3 From dfbf03eee798da4f42a1bffeebd8edd1d0095406 Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Mon, 14 Oct 2024 18:01:51 +0530 Subject: perf: use fast calls for microtask ops (#26236) Updates deno_core to 0.312.0 --- ext/fs/std_fs.rs | 2 +- ext/node/ops/require.rs | 2 +- ext/node/ops/worker_threads.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'ext') diff --git a/ext/fs/std_fs.rs b/ext/fs/std_fs.rs index 41a8569ba..1a83c97c5 100644 --- a/ext/fs/std_fs.rs +++ b/ext/fs/std_fs.rs @@ -929,7 +929,7 @@ fn exists(path: &Path) -> bool { } fn realpath(path: &Path) -> FsResult { - Ok(deno_core::strip_unc_prefix(path.canonicalize()?)) + Ok(deno_path_util::strip_unc_prefix(path.canonicalize()?)) } fn read_dir(path: &Path) -> FsResult> { diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs index 547336981..7d85ee853 100644 --- a/ext/node/ops/require.rs +++ b/ext/node/ops/require.rs @@ -295,7 +295,7 @@ where let path = ensure_read_permission::

(state, &path)?; let fs = state.borrow::(); let canonicalized_path = - deno_core::strip_unc_prefix(fs.realpath_sync(&path)?); + deno_path_util::strip_unc_prefix(fs.realpath_sync(&path)?); Ok(canonicalized_path.to_string_lossy().into_owned()) } diff --git a/ext/node/ops/worker_threads.rs b/ext/node/ops/worker_threads.rs index 4c50092f2..5f139c5dc 100644 --- a/ext/node/ops/worker_threads.rs +++ b/ext/node/ops/worker_threads.rs @@ -52,7 +52,7 @@ where let path = ensure_read_permission::

(state, &path)?; let fs = state.borrow::(); let canonicalized_path = - deno_core::strip_unc_prefix(fs.realpath_sync(&path)?); + deno_path_util::strip_unc_prefix(fs.realpath_sync(&path)?); Url::from_file_path(canonicalized_path) .map_err(|e| generic_error(format!("URL from Path-String: {:#?}", e)))? }; -- cgit v1.2.3 From cb385d9e4acbd81235c3784d7e56b49c3fa41dd3 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Mon, 14 Oct 2024 13:53:17 -0700 Subject: refactor(ext/webstorage): use concrete error types (#26173) --- ext/webstorage/Cargo.toml | 1 + ext/webstorage/lib.rs | 77 ++++++++++++++++------------------------------- 2 files changed, 27 insertions(+), 51 deletions(-) (limited to 'ext') diff --git a/ext/webstorage/Cargo.toml b/ext/webstorage/Cargo.toml index 08ed8b0f2..bf9bbf23b 100644 --- a/ext/webstorage/Cargo.toml +++ b/ext/webstorage/Cargo.toml @@ -17,3 +17,4 @@ path = "lib.rs" deno_core.workspace = true deno_web.workspace = true rusqlite.workspace = true +thiserror.workspace = true diff --git a/ext/webstorage/lib.rs b/ext/webstorage/lib.rs index 99e61a180..40946f05a 100644 --- a/ext/webstorage/lib.rs +++ b/ext/webstorage/lib.rs @@ -2,10 +2,8 @@ // NOTE to all: use **cached** prepared statements when interfacing with SQLite. -use std::fmt; use std::path::PathBuf; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use rusqlite::params; @@ -14,6 +12,18 @@ use rusqlite::OptionalExtension; pub use rusqlite; +#[derive(Debug, thiserror::Error)] +pub enum WebStorageError { + #[error("LocalStorage is not supported in this context.")] + ContextNotSupported, + #[error(transparent)] + Sqlite(#[from] rusqlite::Error), + #[error(transparent)] + Io(std::io::Error), + #[error("Exceeded maximum storage size")] + StorageExceeded, +} + #[derive(Clone)] struct OriginStorageDir(PathBuf); @@ -51,15 +61,13 @@ struct SessionStorage(Connection); fn get_webstorage( state: &mut OpState, persistent: bool, -) -> Result<&Connection, AnyError> { +) -> Result<&Connection, WebStorageError> { let conn = if persistent { if state.try_borrow::().is_none() { - let path = state.try_borrow::().ok_or_else(|| { - DomExceptionNotSupportedError::new( - "LocalStorage is not supported in this context.", - ) - })?; - std::fs::create_dir_all(&path.0)?; + let path = state + .try_borrow::() + .ok_or(WebStorageError::ContextNotSupported)?; + std::fs::create_dir_all(&path.0).map_err(WebStorageError::Io)?; let conn = Connection::open(path.0.join("local_storage"))?; // Enable write-ahead-logging and tweak some other stuff. let initial_pragmas = " @@ -106,7 +114,7 @@ fn get_webstorage( pub fn op_webstorage_length( state: &mut OpState, persistent: bool, -) -> Result { +) -> Result { let conn = get_webstorage(state, persistent)?; let mut stmt = conn.prepare_cached("SELECT COUNT(*) FROM data")?; @@ -121,7 +129,7 @@ pub fn op_webstorage_key( state: &mut OpState, #[smi] index: u32, persistent: bool, -) -> Result, AnyError> { +) -> Result, WebStorageError> { let conn = get_webstorage(state, persistent)?; let mut stmt = @@ -135,14 +143,9 @@ pub fn op_webstorage_key( } #[inline] -fn size_check(input: usize) -> Result<(), AnyError> { +fn size_check(input: usize) -> Result<(), WebStorageError> { if input >= MAX_STORAGE_BYTES { - return Err( - deno_web::DomExceptionQuotaExceededError::new( - "Exceeded maximum storage size", - ) - .into(), - ); + return Err(WebStorageError::StorageExceeded); } Ok(()) @@ -154,7 +157,7 @@ pub fn op_webstorage_set( #[string] key: &str, #[string] value: &str, persistent: bool, -) -> Result<(), AnyError> { +) -> Result<(), WebStorageError> { let conn = get_webstorage(state, persistent)?; size_check(key.len() + value.len())?; @@ -178,7 +181,7 @@ pub fn op_webstorage_get( state: &mut OpState, #[string] key_name: String, persistent: bool, -) -> Result, AnyError> { +) -> Result, WebStorageError> { let conn = get_webstorage(state, persistent)?; let mut stmt = conn.prepare_cached("SELECT value FROM data WHERE key = ?")?; @@ -194,7 +197,7 @@ pub fn op_webstorage_remove( state: &mut OpState, #[string] key_name: &str, persistent: bool, -) -> Result<(), AnyError> { +) -> Result<(), WebStorageError> { let conn = get_webstorage(state, persistent)?; let mut stmt = conn.prepare_cached("DELETE FROM data WHERE key = ?")?; @@ -207,7 +210,7 @@ pub fn op_webstorage_remove( pub fn op_webstorage_clear( state: &mut OpState, persistent: bool, -) -> Result<(), AnyError> { +) -> Result<(), WebStorageError> { let conn = get_webstorage(state, persistent)?; let mut stmt = conn.prepare_cached("DELETE FROM data")?; @@ -221,7 +224,7 @@ pub fn op_webstorage_clear( pub fn op_webstorage_iterate_keys( state: &mut OpState, persistent: bool, -) -> Result, AnyError> { +) -> Result, WebStorageError> { let conn = get_webstorage(state, persistent)?; let mut stmt = conn.prepare_cached("SELECT key FROM data")?; @@ -232,31 +235,3 @@ pub fn op_webstorage_iterate_keys( Ok(keys) } - -#[derive(Debug)] -pub struct DomExceptionNotSupportedError { - pub msg: String, -} - -impl DomExceptionNotSupportedError { - pub fn new(msg: &str) -> Self { - DomExceptionNotSupportedError { - msg: msg.to_string(), - } - } -} - -impl fmt::Display for DomExceptionNotSupportedError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad(&self.msg) - } -} - -impl std::error::Error for DomExceptionNotSupportedError {} - -pub fn get_not_supported_error_class_name( - e: &AnyError, -) -> Option<&'static str> { - e.downcast_ref::() - .map(|_| "DOMExceptionNotSupportedError") -} -- cgit v1.2.3 From 48cbf85add1a9a5c3e583c68c80d16420e606502 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Mon, 14 Oct 2024 14:15:31 -0700 Subject: refactor(ext/url): use concrete error types (#26172) --- ext/url/Cargo.toml | 1 + ext/url/lib.rs | 2 ++ ext/url/urlpattern.rs | 28 ++++++++++++++-------------- 3 files changed, 17 insertions(+), 14 deletions(-) (limited to 'ext') diff --git a/ext/url/Cargo.toml b/ext/url/Cargo.toml index 13aca9953..ca4fa444d 100644 --- a/ext/url/Cargo.toml +++ b/ext/url/Cargo.toml @@ -15,6 +15,7 @@ path = "lib.rs" [dependencies] deno_core.workspace = true +thiserror.workspace = true urlpattern = "0.3.0" [dev-dependencies] diff --git a/ext/url/lib.rs b/ext/url/lib.rs index 6869d656b..f8946532a 100644 --- a/ext/url/lib.rs +++ b/ext/url/lib.rs @@ -15,6 +15,8 @@ use std::path::PathBuf; use crate::urlpattern::op_urlpattern_parse; use crate::urlpattern::op_urlpattern_process_match_input; +pub use urlpattern::UrlPatternError; + deno_core::extension!( deno_url, deps = [deno_webidl], diff --git a/ext/url/urlpattern.rs b/ext/url/urlpattern.rs index b6d9a1382..7d4e8ee71 100644 --- a/ext/url/urlpattern.rs +++ b/ext/url/urlpattern.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use urlpattern::quirks; @@ -9,21 +7,23 @@ use urlpattern::quirks::MatchInput; use urlpattern::quirks::StringOrInit; use urlpattern::quirks::UrlPattern; +#[derive(Debug, thiserror::Error)] +#[error(transparent)] +pub struct UrlPatternError(urlpattern::Error); + #[op2] #[serde] pub fn op_urlpattern_parse( #[serde] input: StringOrInit, #[string] base_url: Option, #[serde] options: urlpattern::UrlPatternOptions, -) -> Result { - let init = urlpattern::quirks::process_construct_pattern_input( - input, - base_url.as_deref(), - ) - .map_err(|e| type_error(e.to_string()))?; +) -> Result { + let init = + quirks::process_construct_pattern_input(input, base_url.as_deref()) + .map_err(UrlPatternError)?; - let pattern = urlpattern::quirks::parse_pattern(init, options) - .map_err(|e| type_error(e.to_string()))?; + let pattern = + quirks::parse_pattern(init, options).map_err(UrlPatternError)?; Ok(pattern) } @@ -33,14 +33,14 @@ pub fn op_urlpattern_parse( pub fn op_urlpattern_process_match_input( #[serde] input: StringOrInit, #[string] base_url: Option, -) -> Result, AnyError> { - let res = urlpattern::quirks::process_match_input(input, base_url.as_deref()) - .map_err(|e| type_error(e.to_string()))?; +) -> Result, UrlPatternError> { + let res = quirks::process_match_input(input, base_url.as_deref()) + .map_err(UrlPatternError)?; let (input, inputs) = match res { Some((input, inputs)) => (input, inputs), None => return Ok(None), }; - Ok(urlpattern::quirks::parse_match_input(input).map(|input| (input, inputs))) + Ok(quirks::parse_match_input(input).map(|input| (input, inputs))) } -- cgit v1.2.3 From 8dbe77dd29a3791db58fda392dea45d8249b6bc9 Mon Sep 17 00:00:00 2001 From: Mohammad Sulaiman Date: Tue, 15 Oct 2024 01:04:18 +0300 Subject: fix(console/ext/repl): support using parseFloat() (#25900) Fixes #21428 Co-authored-by: tannal --- ext/console/01_console.js | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'ext') diff --git a/ext/console/01_console.js b/ext/console/01_console.js index d9acc958a..a977a2719 100644 --- a/ext/console/01_console.js +++ b/ext/console/01_console.js @@ -84,6 +84,7 @@ const { NumberIsInteger, NumberIsNaN, NumberParseInt, + NumberParseFloat, NumberPrototypeToFixed, NumberPrototypeToString, NumberPrototypeValueOf, @@ -3010,20 +3011,18 @@ function inspectArgs(args, inspectOptions = { __proto__: null }) { } else if (ArrayPrototypeIncludes(["d", "i"], char)) { // Format as an integer. const value = args[a++]; - if (typeof value == "bigint") { - formattedArg = `${value}n`; - } else if (typeof value == "number") { - formattedArg = `${NumberParseInt(String(value))}`; - } else { + if (typeof value === "symbol") { formattedArg = "NaN"; + } else { + formattedArg = `${NumberParseInt(value)}`; } } else if (char == "f") { // Format as a floating point value. const value = args[a++]; - if (typeof value == "number") { - formattedArg = `${value}`; - } else { + if (typeof value === "symbol") { formattedArg = "NaN"; + } else { + formattedArg = `${NumberParseFloat(value)}`; } } else if (ArrayPrototypeIncludes(["O", "o"], char)) { // Format as an object. -- cgit v1.2.3 From ee7d4501435f0ebd655c8b50bd6e41ca19e71abc Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Mon, 14 Oct 2024 15:05:49 -0700 Subject: refactor(ext/ffi): use concrete error types (#26170) --- ext/ffi/Cargo.toml | 2 + ext/ffi/call.rs | 54 +++++++++----- ext/ffi/callback.rs | 51 +++++++++---- ext/ffi/dlfcn.rs | 48 ++++++++---- ext/ffi/ir.rs | 142 ++++++++++++++++++++--------------- ext/ffi/lib.rs | 7 ++ ext/ffi/repr.rs | 211 +++++++++++++++++++++++++++++++++++----------------- ext/ffi/static.rs | 31 +++++--- 8 files changed, 362 insertions(+), 184 deletions(-) (limited to 'ext') diff --git a/ext/ffi/Cargo.toml b/ext/ffi/Cargo.toml index 23a2aee1c..496e8634e 100644 --- a/ext/ffi/Cargo.toml +++ b/ext/ffi/Cargo.toml @@ -24,6 +24,8 @@ log.workspace = true serde.workspace = true serde-value = "0.7" serde_json = "1.0" +thiserror.workspace = true +tokio.workspace = true [target.'cfg(windows)'.dependencies] winapi = { workspace = true, features = ["errhandlingapi", "minwindef", "ntdef", "winbase", "winnt"] } diff --git a/ext/ffi/call.rs b/ext/ffi/call.rs index 3572b9e81..d337b29b0 100644 --- a/ext/ffi/call.rs +++ b/ext/ffi/call.rs @@ -7,9 +7,6 @@ use crate::symbol::NativeType; use crate::symbol::Symbol; use crate::FfiPermissions; use crate::ForeignFunction; -use deno_core::anyhow::anyhow; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::serde_json::Value; use deno_core::serde_v8::ExternalPointer; @@ -24,6 +21,20 @@ use std::ffi::c_void; use std::future::Future; use std::rc::Rc; +#[derive(Debug, thiserror::Error)] +pub enum CallError { + #[error(transparent)] + IR(#[from] IRError), + #[error("Nonblocking FFI call failed: {0}")] + NonblockingCallFailure(#[source] tokio::task::JoinError), + #[error("Invalid FFI symbol name: '{0}'")] + InvalidSymbol(String), + #[error(transparent)] + Permission(deno_core::error::AnyError), + #[error(transparent)] + Callback(#[from] super::CallbackError), +} + // SAFETY: Makes an FFI call unsafe fn ffi_call_rtype_struct( cif: &libffi::middle::Cif, @@ -45,7 +56,7 @@ pub(crate) fn ffi_call_sync<'scope>( args: v8::FunctionCallbackArguments, symbol: &Symbol, out_buffer: Option, -) -> Result +) -> Result where 'scope: 'scope, { @@ -201,7 +212,7 @@ fn ffi_call( parameter_types: &[NativeType], result_type: NativeType, out_buffer: Option, -) -> Result { +) -> FfiValue { let call_args: Vec = call_args .iter() .enumerate() @@ -214,7 +225,7 @@ fn ffi_call( // SAFETY: types in the `Cif` match the actual calling convention and // types of symbol. unsafe { - Ok(match result_type { + match result_type { NativeType::Void => { cif.call::<()>(fun_ptr, &call_args); FfiValue::Value(Value::from(())) @@ -267,7 +278,7 @@ fn ffi_call( ffi_call_rtype_struct(cif, &fun_ptr, call_args, out_buffer.unwrap().0); FfiValue::Value(Value::Null) } - }) + } } } @@ -280,14 +291,16 @@ pub fn op_ffi_call_ptr_nonblocking( #[serde] def: ForeignFunction, parameters: v8::Local, out_buffer: Option>, -) -> Result>, AnyError> +) -> Result>, CallError> where FP: FfiPermissions + 'static, { { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(CallError::Permission)?; }; let symbol = PtrSymbol::new(pointer, &def)?; @@ -309,7 +322,7 @@ where Ok(async move { let result = join_handle .await - .map_err(|err| anyhow!("Nonblocking FFI call failed: {}", err))??; + .map_err(CallError::NonblockingCallFailure)?; // SAFETY: Same return type declared to libffi; trust user to have it right beyond that. Ok(result) }) @@ -325,16 +338,17 @@ pub fn op_ffi_call_nonblocking( #[string] symbol: String, parameters: v8::Local, out_buffer: Option>, -) -> Result>, AnyError> { +) -> Result>, CallError> { let symbol = { let state = state.borrow(); - let resource = state.resource_table.get::(rid)?; + let resource = state + .resource_table + .get::(rid) + .map_err(CallError::Permission)?; let symbols = &resource.symbols; *symbols .get(&symbol) - .ok_or_else(|| { - type_error(format!("Invalid FFI symbol name: '{symbol}'")) - })? + .ok_or_else(|| CallError::InvalidSymbol(symbol))? .clone() }; @@ -362,7 +376,7 @@ pub fn op_ffi_call_nonblocking( Ok(async move { let result = join_handle .await - .map_err(|err| anyhow!("Nonblocking FFI call failed: {}", err))??; + .map_err(CallError::NonblockingCallFailure)?; // SAFETY: Same return type declared to libffi; trust user to have it right beyond that. Ok(result) }) @@ -377,14 +391,16 @@ pub fn op_ffi_call_ptr( #[serde] def: ForeignFunction, parameters: v8::Local, out_buffer: Option>, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(CallError::Permission)?; }; let symbol = PtrSymbol::new(pointer, &def)?; @@ -399,7 +415,7 @@ where &def.parameters, def.result.clone(), out_buffer_ptr, - )?; + ); // SAFETY: Same return type declared to libffi; trust user to have it right beyond that. Ok(result) } diff --git a/ext/ffi/callback.rs b/ext/ffi/callback.rs index 6fa166f52..f33e0413a 100644 --- a/ext/ffi/callback.rs +++ b/ext/ffi/callback.rs @@ -3,7 +3,6 @@ use crate::symbol::NativeType; use crate::FfiPermissions; use crate::ForeignFunction; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::v8; use deno_core::v8::TryCatch; @@ -34,6 +33,16 @@ thread_local! { static LOCAL_THREAD_ID: RefCell = const { RefCell::new(0) }; } +#[derive(Debug, thiserror::Error)] +pub enum CallbackError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error(transparent)] + Permission(deno_core::error::AnyError), + #[error(transparent)] + Other(deno_core::error::AnyError), +} + #[derive(Clone)] pub struct PtrSymbol { pub cif: libffi::middle::Cif, @@ -44,7 +53,7 @@ impl PtrSymbol { pub fn new( fn_ptr: *mut c_void, def: &ForeignFunction, - ) -> Result { + ) -> Result { let ptr = libffi::middle::CodePtr::from_ptr(fn_ptr as _); let cif = libffi::middle::Cif::new( def @@ -52,8 +61,13 @@ impl PtrSymbol { .clone() .into_iter() .map(libffi::middle::Type::try_from) - .collect::, _>>()?, - def.result.clone().try_into()?, + .collect::, _>>() + .map_err(CallbackError::Other)?, + def + .result + .clone() + .try_into() + .map_err(CallbackError::Other)?, ); Ok(Self { cif, ptr }) @@ -522,10 +536,12 @@ unsafe fn do_ffi_callback( pub fn op_ffi_unsafe_callback_ref( state: Rc>, #[smi] rid: ResourceId, -) -> Result>, AnyError> { +) -> Result, CallbackError> { let state = state.borrow(); - let callback_resource = - state.resource_table.get::(rid)?; + let callback_resource = state + .resource_table + .get::(rid) + .map_err(CallbackError::Resource)?; Ok(async move { let info: &mut CallbackInfo = @@ -536,7 +552,6 @@ pub fn op_ffi_unsafe_callback_ref( .into_future() .or_cancel(callback_resource.cancel.clone()) .await; - Ok(()) }) } @@ -552,12 +567,14 @@ pub fn op_ffi_unsafe_callback_create( scope: &mut v8::HandleScope<'scope>, #[serde] args: RegisterCallbackArgs, cb: v8::Local, -) -> Result, AnyError> +) -> Result, CallbackError> where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(CallbackError::Permission)?; let thread_id: u32 = LOCAL_THREAD_ID.with(|s| { let value = *s.borrow(); @@ -593,8 +610,10 @@ where .parameters .into_iter() .map(libffi::middle::Type::try_from) - .collect::, _>>()?, - libffi::middle::Type::try_from(args.result)?, + .collect::, _>>() + .map_err(CallbackError::Other)?, + libffi::middle::Type::try_from(args.result) + .map_err(CallbackError::Other)?, ); // SAFETY: CallbackInfo is leaked, is not null and stays valid as long as the callback exists. @@ -624,14 +643,16 @@ pub fn op_ffi_unsafe_callback_close( state: &mut OpState, scope: &mut v8::HandleScope, #[smi] rid: ResourceId, -) -> Result<(), AnyError> { +) -> Result<(), CallbackError> { // SAFETY: This drops the closure and the callback info associated with it. // Any retained function pointers to the closure become dangling pointers. // It is up to the user to know that it is safe to call the `close()` on the // UnsafeCallback instance. unsafe { - let callback_resource = - state.resource_table.take::(rid)?; + let callback_resource = state + .resource_table + .take::(rid) + .map_err(CallbackError::Resource)?; let info = Box::from_raw(callback_resource.info); let _ = v8::Global::from_raw(scope, info.callback); let _ = v8::Global::from_raw(scope, info.context); diff --git a/ext/ffi/dlfcn.rs b/ext/ffi/dlfcn.rs index 10199bf85..53bdcbc5c 100644 --- a/ext/ffi/dlfcn.rs +++ b/ext/ffi/dlfcn.rs @@ -6,8 +6,6 @@ use crate::symbol::Symbol; use crate::turbocall; use crate::turbocall::Turbocall; use crate::FfiPermissions; -use deno_core::error::generic_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::v8; use deno_core::GarbageCollected; @@ -21,6 +19,22 @@ use std::collections::HashMap; use std::ffi::c_void; use std::rc::Rc; +#[derive(Debug, thiserror::Error)] +pub enum DlfcnError { + #[error("Failed to register symbol {symbol}: {error}")] + RegisterSymbol { + symbol: String, + #[source] + error: dlopen2::Error, + }, + #[error(transparent)] + Dlopen(#[from] dlopen2::Error), + #[error(transparent)] + Permission(deno_core::error::AnyError), + #[error(transparent)] + Other(deno_core::error::AnyError), +} + pub struct DynamicLibraryResource { lib: Library, pub symbols: HashMap>, @@ -37,7 +51,7 @@ impl Resource for DynamicLibraryResource { } impl DynamicLibraryResource { - pub fn get_static(&self, symbol: String) -> Result<*mut c_void, AnyError> { + pub fn get_static(&self, symbol: String) -> Result<*mut c_void, DlfcnError> { // By default, Err returned by this function does not tell // which symbol wasn't exported. So we'll modify the error // message to include the name of symbol. @@ -45,9 +59,7 @@ impl DynamicLibraryResource { // SAFETY: The obtained T symbol is the size of a pointer. match unsafe { self.lib.symbol::<*mut c_void>(&symbol) } { Ok(value) => Ok(Ok(value)), - Err(err) => Err(generic_error(format!( - "Failed to register symbol {symbol}: {err}" - ))), + Err(error) => Err(DlfcnError::RegisterSymbol { symbol, error }), }? } } @@ -116,12 +128,14 @@ pub fn op_ffi_load<'scope, FP>( scope: &mut v8::HandleScope<'scope>, state: &mut OpState, #[serde] args: FfiLoadArgs, -) -> Result, AnyError> +) -> Result, DlfcnError> where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - let path = permissions.check_partial_with_path(&args.path)?; + let path = permissions + .check_partial_with_path(&args.path) + .map_err(DlfcnError::Permission)?; let lib = Library::open(&path).map_err(|e| { dlopen2::Error::OpeningLibraryError(std::io::Error::new( @@ -152,15 +166,16 @@ where // SAFETY: The obtained T symbol is the size of a pointer. match unsafe { resource.lib.symbol::<*const c_void>(symbol) } { Ok(value) => Ok(value), - Err(err) => if foreign_fn.optional { + Err(error) => if foreign_fn.optional { let null: v8::Local = v8::null(scope).into(); let func_key = v8::String::new(scope, &symbol_key).unwrap(); obj.set(scope, func_key.into(), null); break 'register_symbol; } else { - Err(generic_error(format!( - "Failed to register symbol {symbol}: {err}" - ))) + Err(DlfcnError::RegisterSymbol { + symbol: symbol.to_owned(), + error, + }) }, }?; @@ -171,8 +186,13 @@ where .clone() .into_iter() .map(libffi::middle::Type::try_from) - .collect::, _>>()?, - foreign_fn.result.clone().try_into()?, + .collect::, _>>() + .map_err(DlfcnError::Other)?, + foreign_fn + .result + .clone() + .try_into() + .map_err(DlfcnError::Other)?, ); let func_key = v8::String::new(scope, &symbol_key).unwrap(); diff --git a/ext/ffi/ir.rs b/ext/ffi/ir.rs index ebf64945b..2e8084216 100644 --- a/ext/ffi/ir.rs +++ b/ext/ffi/ir.rs @@ -1,13 +1,55 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use crate::symbol::NativeType; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::v8; use libffi::middle::Arg; use std::ffi::c_void; use std::ptr; +#[derive(Debug, thiserror::Error)] +pub enum IRError { + #[error("Invalid FFI u8 type, expected boolean")] + InvalidU8ExpectedBoolean, + #[error("Invalid FFI u8 type, expected unsigned integer")] + InvalidU8ExpectedUnsignedInteger, + #[error("Invalid FFI i8 type, expected integer")] + InvalidI8, + #[error("Invalid FFI u16 type, expected unsigned integer")] + InvalidU16, + #[error("Invalid FFI i16 type, expected integer")] + InvalidI16, + #[error("Invalid FFI u32 type, expected unsigned integer")] + InvalidU32, + #[error("Invalid FFI i32 type, expected integer")] + InvalidI32, + #[error("Invalid FFI u64 type, expected unsigned integer")] + InvalidU64, + #[error("Invalid FFI i64 type, expected integer")] + InvalidI64, + #[error("Invalid FFI usize type, expected unsigned integer")] + InvalidUsize, + #[error("Invalid FFI isize type, expected integer")] + InvalidIsize, + #[error("Invalid FFI f32 type, expected number")] + InvalidF32, + #[error("Invalid FFI f64 type, expected number")] + InvalidF64, + #[error("Invalid FFI pointer type, expected null, or External")] + InvalidPointerType, + #[error( + "Invalid FFI buffer type, expected null, ArrayBuffer, or ArrayBufferView" + )] + InvalidBufferType, + #[error("Invalid FFI ArrayBufferView, expected data in the buffer")] + InvalidArrayBufferView, + #[error("Invalid FFI ArrayBuffer, expected data in buffer")] + InvalidArrayBuffer, + #[error("Invalid FFI struct type, expected ArrayBuffer, or ArrayBufferView")] + InvalidStructType, + #[error("Invalid FFI function type, expected null, or External")] + InvalidFunctionType, +} + pub struct OutBuffer(pub *mut u8); // SAFETY: OutBuffer is allocated by us in 00_ffi.js and is guaranteed to be @@ -126,9 +168,9 @@ unsafe impl Send for NativeValue {} #[inline] pub fn ffi_parse_bool_arg( arg: v8::Local, -) -> Result { +) -> Result { let bool_value = v8::Local::::try_from(arg) - .map_err(|_| type_error("Invalid FFI u8 type, expected boolean"))? + .map_err(|_| IRError::InvalidU8ExpectedBoolean)? .is_true(); Ok(NativeValue { bool_value }) } @@ -136,9 +178,9 @@ pub fn ffi_parse_bool_arg( #[inline] pub fn ffi_parse_u8_arg( arg: v8::Local, -) -> Result { +) -> Result { let u8_value = v8::Local::::try_from(arg) - .map_err(|_| type_error("Invalid FFI u8 type, expected unsigned integer"))? + .map_err(|_| IRError::InvalidU8ExpectedUnsignedInteger)? .value() as u8; Ok(NativeValue { u8_value }) } @@ -146,9 +188,9 @@ pub fn ffi_parse_u8_arg( #[inline] pub fn ffi_parse_i8_arg( arg: v8::Local, -) -> Result { +) -> Result { let i8_value = v8::Local::::try_from(arg) - .map_err(|_| type_error("Invalid FFI i8 type, expected integer"))? + .map_err(|_| IRError::InvalidI8)? .value() as i8; Ok(NativeValue { i8_value }) } @@ -156,9 +198,9 @@ pub fn ffi_parse_i8_arg( #[inline] pub fn ffi_parse_u16_arg( arg: v8::Local, -) -> Result { +) -> Result { let u16_value = v8::Local::::try_from(arg) - .map_err(|_| type_error("Invalid FFI u16 type, expected unsigned integer"))? + .map_err(|_| IRError::InvalidU16)? .value() as u16; Ok(NativeValue { u16_value }) } @@ -166,9 +208,9 @@ pub fn ffi_parse_u16_arg( #[inline] pub fn ffi_parse_i16_arg( arg: v8::Local, -) -> Result { +) -> Result { let i16_value = v8::Local::::try_from(arg) - .map_err(|_| type_error("Invalid FFI i16 type, expected integer"))? + .map_err(|_| IRError::InvalidI16)? .value() as i16; Ok(NativeValue { i16_value }) } @@ -176,9 +218,9 @@ pub fn ffi_parse_i16_arg( #[inline] pub fn ffi_parse_u32_arg( arg: v8::Local, -) -> Result { +) -> Result { let u32_value = v8::Local::::try_from(arg) - .map_err(|_| type_error("Invalid FFI u32 type, expected unsigned integer"))? + .map_err(|_| IRError::InvalidU32)? .value(); Ok(NativeValue { u32_value }) } @@ -186,9 +228,9 @@ pub fn ffi_parse_u32_arg( #[inline] pub fn ffi_parse_i32_arg( arg: v8::Local, -) -> Result { +) -> Result { let i32_value = v8::Local::::try_from(arg) - .map_err(|_| type_error("Invalid FFI i32 type, expected integer"))? + .map_err(|_| IRError::InvalidI32)? .value(); Ok(NativeValue { i32_value }) } @@ -197,7 +239,7 @@ pub fn ffi_parse_i32_arg( pub fn ffi_parse_u64_arg( scope: &mut v8::HandleScope, arg: v8::Local, -) -> Result { +) -> Result { // Order of checking: // 1. BigInt: Uncommon and not supported by Fast API, so optimise slow call for this case. // 2. Number: Common, supported by Fast API, so let that be the optimal case. @@ -207,9 +249,7 @@ pub fn ffi_parse_u64_arg( } else if let Ok(value) = v8::Local::::try_from(arg) { value.integer_value(scope).unwrap() as u64 } else { - return Err(type_error( - "Invalid FFI u64 type, expected unsigned integer", - )); + return Err(IRError::InvalidU64); }; Ok(NativeValue { u64_value }) } @@ -218,7 +258,7 @@ pub fn ffi_parse_u64_arg( pub fn ffi_parse_i64_arg( scope: &mut v8::HandleScope, arg: v8::Local, -) -> Result { +) -> Result { // Order of checking: // 1. BigInt: Uncommon and not supported by Fast API, so optimise slow call for this case. // 2. Number: Common, supported by Fast API, so let that be the optimal case. @@ -228,7 +268,7 @@ pub fn ffi_parse_i64_arg( } else if let Ok(value) = v8::Local::::try_from(arg) { value.integer_value(scope).unwrap() } else { - return Err(type_error("Invalid FFI i64 type, expected integer")); + return Err(IRError::InvalidI64); }; Ok(NativeValue { i64_value }) } @@ -237,7 +277,7 @@ pub fn ffi_parse_i64_arg( pub fn ffi_parse_usize_arg( scope: &mut v8::HandleScope, arg: v8::Local, -) -> Result { +) -> Result { // Order of checking: // 1. BigInt: Uncommon and not supported by Fast API, so optimise slow call for this case. // 2. Number: Common, supported by Fast API, so let that be the optimal case. @@ -247,7 +287,7 @@ pub fn ffi_parse_usize_arg( } else if let Ok(value) = v8::Local::::try_from(arg) { value.integer_value(scope).unwrap() as usize } else { - return Err(type_error("Invalid FFI usize type, expected integer")); + return Err(IRError::InvalidUsize); }; Ok(NativeValue { usize_value }) } @@ -256,7 +296,7 @@ pub fn ffi_parse_usize_arg( pub fn ffi_parse_isize_arg( scope: &mut v8::HandleScope, arg: v8::Local, -) -> Result { +) -> Result { // Order of checking: // 1. BigInt: Uncommon and not supported by Fast API, so optimise slow call for this case. // 2. Number: Common, supported by Fast API, so let that be the optimal case. @@ -266,7 +306,7 @@ pub fn ffi_parse_isize_arg( } else if let Ok(value) = v8::Local::::try_from(arg) { value.integer_value(scope).unwrap() as isize } else { - return Err(type_error("Invalid FFI isize type, expected integer")); + return Err(IRError::InvalidIsize); }; Ok(NativeValue { isize_value }) } @@ -274,9 +314,9 @@ pub fn ffi_parse_isize_arg( #[inline] pub fn ffi_parse_f32_arg( arg: v8::Local, -) -> Result { +) -> Result { let f32_value = v8::Local::::try_from(arg) - .map_err(|_| type_error("Invalid FFI f32 type, expected number"))? + .map_err(|_| IRError::InvalidF32)? .value() as f32; Ok(NativeValue { f32_value }) } @@ -284,9 +324,9 @@ pub fn ffi_parse_f32_arg( #[inline] pub fn ffi_parse_f64_arg( arg: v8::Local, -) -> Result { +) -> Result { let f64_value = v8::Local::::try_from(arg) - .map_err(|_| type_error("Invalid FFI f64 type, expected number"))? + .map_err(|_| IRError::InvalidF64)? .value(); Ok(NativeValue { f64_value }) } @@ -295,15 +335,13 @@ pub fn ffi_parse_f64_arg( pub fn ffi_parse_pointer_arg( _scope: &mut v8::HandleScope, arg: v8::Local, -) -> Result { +) -> Result { let pointer = if let Ok(value) = v8::Local::::try_from(arg) { value.value() } else if arg.is_null() { ptr::null_mut() } else { - return Err(type_error( - "Invalid FFI pointer type, expected null, or External", - )); + return Err(IRError::InvalidPointerType); }; Ok(NativeValue { pointer }) } @@ -312,7 +350,7 @@ pub fn ffi_parse_pointer_arg( pub fn ffi_parse_buffer_arg( scope: &mut v8::HandleScope, arg: v8::Local, -) -> Result { +) -> Result { // Order of checking: // 1. ArrayBuffer: Fairly common and not supported by Fast API, optimise this case. // 2. ArrayBufferView: Common and supported by Fast API @@ -328,9 +366,7 @@ pub fn ffi_parse_buffer_arg( let byte_offset = value.byte_offset(); let pointer = value .buffer(scope) - .ok_or_else(|| { - type_error("Invalid FFI ArrayBufferView, expected data in the buffer") - })? + .ok_or(IRError::InvalidArrayBufferView)? .data(); if let Some(non_null) = pointer { // SAFETY: Pointer is non-null, and V8 guarantees that the byte_offset @@ -342,9 +378,7 @@ pub fn ffi_parse_buffer_arg( } else if arg.is_null() { ptr::null_mut() } else { - return Err(type_error( - "Invalid FFI buffer type, expected null, ArrayBuffer, or ArrayBufferView", - )); + return Err(IRError::InvalidBufferType); }; Ok(NativeValue { pointer }) } @@ -353,7 +387,7 @@ pub fn ffi_parse_buffer_arg( pub fn ffi_parse_struct_arg( scope: &mut v8::HandleScope, arg: v8::Local, -) -> Result { +) -> Result { // Order of checking: // 1. ArrayBuffer: Fairly common and not supported by Fast API, optimise this case. // 2. ArrayBufferView: Common and supported by Fast API @@ -362,31 +396,23 @@ pub fn ffi_parse_struct_arg( if let Some(non_null) = value.data() { non_null.as_ptr() } else { - return Err(type_error( - "Invalid FFI ArrayBuffer, expected data in buffer", - )); + return Err(IRError::InvalidArrayBuffer); } } else if let Ok(value) = v8::Local::::try_from(arg) { let byte_offset = value.byte_offset(); let pointer = value .buffer(scope) - .ok_or_else(|| { - type_error("Invalid FFI ArrayBufferView, expected data in the buffer") - })? + .ok_or(IRError::InvalidArrayBufferView)? .data(); if let Some(non_null) = pointer { // SAFETY: Pointer is non-null, and V8 guarantees that the byte_offset // is within the buffer backing store. unsafe { non_null.as_ptr().add(byte_offset) } } else { - return Err(type_error( - "Invalid FFI ArrayBufferView, expected data in buffer", - )); + return Err(IRError::InvalidArrayBufferView); } } else { - return Err(type_error( - "Invalid FFI struct type, expected ArrayBuffer, or ArrayBufferView", - )); + return Err(IRError::InvalidStructType); }; Ok(NativeValue { pointer }) } @@ -395,15 +421,13 @@ pub fn ffi_parse_struct_arg( pub fn ffi_parse_function_arg( _scope: &mut v8::HandleScope, arg: v8::Local, -) -> Result { +) -> Result { let pointer = if let Ok(value) = v8::Local::::try_from(arg) { value.value() } else if arg.is_null() { ptr::null_mut() } else { - return Err(type_error( - "Invalid FFI function type, expected null, or External", - )); + return Err(IRError::InvalidFunctionType); }; Ok(NativeValue { pointer }) } @@ -412,7 +436,7 @@ pub fn ffi_parse_args<'scope>( scope: &mut v8::HandleScope<'scope>, args: v8::Local, parameter_types: &[NativeType], -) -> Result, AnyError> +) -> Result, IRError> where 'scope: 'scope, { diff --git a/ext/ffi/lib.rs b/ext/ffi/lib.rs index 77ec3c85e..237f8c3b0 100644 --- a/ext/ffi/lib.rs +++ b/ext/ffi/lib.rs @@ -29,6 +29,13 @@ use repr::*; use symbol::NativeType; use symbol::Symbol; +pub use call::CallError; +pub use callback::CallbackError; +pub use dlfcn::DlfcnError; +pub use ir::IRError; +pub use r#static::StaticError; +pub use repr::ReprError; + #[cfg(not(target_pointer_width = "64"))] compile_error!("platform not supported"); diff --git a/ext/ffi/repr.rs b/ext/ffi/repr.rs index 315e6d53b..2f04f4feb 100644 --- a/ext/ffi/repr.rs +++ b/ext/ffi/repr.rs @@ -1,9 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use crate::FfiPermissions; -use deno_core::error::range_error; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::v8; use deno_core::OpState; @@ -12,16 +9,58 @@ use std::ffi::c_void; use std::ffi::CStr; use std::ptr; +#[derive(Debug, thiserror::Error)] +pub enum ReprError { + #[error("Invalid pointer to offset, pointer is null")] + InvalidOffset, + #[error("Invalid ArrayBuffer pointer, pointer is null")] + InvalidArrayBuffer, + #[error("Destination length is smaller than source length")] + DestinationLengthTooShort, + #[error("Invalid CString pointer, pointer is null")] + InvalidCString, + #[error("Invalid CString pointer, string exceeds max length")] + CStringTooLong, + #[error("Invalid bool pointer, pointer is null")] + InvalidBool, + #[error("Invalid u8 pointer, pointer is null")] + InvalidU8, + #[error("Invalid i8 pointer, pointer is null")] + InvalidI8, + #[error("Invalid u16 pointer, pointer is null")] + InvalidU16, + #[error("Invalid i16 pointer, pointer is null")] + InvalidI16, + #[error("Invalid u32 pointer, pointer is null")] + InvalidU32, + #[error("Invalid i32 pointer, pointer is null")] + InvalidI32, + #[error("Invalid u64 pointer, pointer is null")] + InvalidU64, + #[error("Invalid i64 pointer, pointer is null")] + InvalidI64, + #[error("Invalid f32 pointer, pointer is null")] + InvalidF32, + #[error("Invalid f64 pointer, pointer is null")] + InvalidF64, + #[error("Invalid pointer pointer, pointer is null")] + InvalidPointer, + #[error(transparent)] + Permission(deno_core::error::AnyError), +} + #[op2(fast)] pub fn op_ffi_ptr_create( state: &mut OpState, #[bigint] ptr_number: usize, -) -> Result<*mut c_void, AnyError> +) -> Result<*mut c_void, ReprError> where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; Ok(ptr_number as *mut c_void) } @@ -31,12 +70,14 @@ pub fn op_ffi_ptr_equals( state: &mut OpState, a: *const c_void, b: *const c_void, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; Ok(a == b) } @@ -45,12 +86,14 @@ where pub fn op_ffi_ptr_of( state: &mut OpState, #[anybuffer] buf: *const u8, -) -> Result<*mut c_void, AnyError> +) -> Result<*mut c_void, ReprError> where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; Ok(buf as *mut c_void) } @@ -59,12 +102,14 @@ where pub fn op_ffi_ptr_of_exact( state: &mut OpState, buf: v8::Local, -) -> Result<*mut c_void, AnyError> +) -> Result<*mut c_void, ReprError> where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; let Some(buf) = buf.get_backing_store() else { return Ok(0 as _); @@ -80,15 +125,17 @@ pub fn op_ffi_ptr_offset( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result<*mut c_void, AnyError> +) -> Result<*mut c_void, ReprError> where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid pointer to offset, pointer is null")); + return Err(ReprError::InvalidOffset); } // TODO(mmastrac): Create a RawPointer that can safely do pointer math. @@ -110,12 +157,14 @@ unsafe extern "C" fn noop_deleter_callback( pub fn op_ffi_ptr_value( state: &mut OpState, ptr: *mut c_void, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; Ok(ptr as usize) } @@ -127,15 +176,17 @@ pub fn op_ffi_get_buf( ptr: *mut c_void, #[number] offset: isize, #[number] len: usize, -) -> Result, AnyError> +) -> Result, ReprError> where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid ArrayBuffer pointer, pointer is null")); + return Err(ReprError::InvalidArrayBuffer); } // SAFETY: Trust the user to have provided a real pointer, offset, and a valid matching size to it. Since this is a foreign pointer, we should not do any deletion. @@ -144,7 +195,7 @@ where ptr.offset(offset), len, noop_deleter_callback, - std::ptr::null_mut(), + ptr::null_mut(), ) } .make_shared(); @@ -159,19 +210,19 @@ pub fn op_ffi_buf_copy_into( #[number] offset: isize, #[anybuffer] dst: &mut [u8], #[number] len: usize, -) -> Result<(), AnyError> +) -> Result<(), ReprError> where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if src.is_null() { - Err(type_error("Invalid ArrayBuffer pointer, pointer is null")) + Err(ReprError::InvalidArrayBuffer) } else if dst.len() < len { - Err(range_error( - "Destination length is smaller than source length", - )) + Err(ReprError::DestinationLengthTooShort) } else { let src = src as *const c_void; @@ -190,24 +241,24 @@ pub fn op_ffi_cstr_read( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result, AnyError> +) -> Result, ReprError> where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid CString pointer, pointer is null")); + return Err(ReprError::InvalidCString); } let cstr = // SAFETY: Pointer and offset are user provided. unsafe { CStr::from_ptr(ptr.offset(offset) as *const c_char) }.to_bytes(); let value = v8::String::new_from_utf8(scope, cstr, v8::NewStringType::Normal) - .ok_or_else(|| { - type_error("Invalid CString pointer, string exceeds max length") - })?; + .ok_or_else(|| ReprError::CStringTooLong)?; Ok(value) } @@ -216,15 +267,17 @@ pub fn op_ffi_read_bool( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid bool pointer, pointer is null")); + return Err(ReprError::InvalidBool); } // SAFETY: ptr and offset are user provided. @@ -236,15 +289,17 @@ pub fn op_ffi_read_u8( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid u8 pointer, pointer is null")); + return Err(ReprError::InvalidU8); } // SAFETY: ptr and offset are user provided. @@ -258,15 +313,17 @@ pub fn op_ffi_read_i8( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid i8 pointer, pointer is null")); + return Err(ReprError::InvalidI8); } // SAFETY: ptr and offset are user provided. @@ -280,15 +337,17 @@ pub fn op_ffi_read_u16( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid u16 pointer, pointer is null")); + return Err(ReprError::InvalidU16); } // SAFETY: ptr and offset are user provided. @@ -302,15 +361,17 @@ pub fn op_ffi_read_i16( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid i16 pointer, pointer is null")); + return Err(ReprError::InvalidI16); } // SAFETY: ptr and offset are user provided. @@ -324,15 +385,17 @@ pub fn op_ffi_read_u32( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid u32 pointer, pointer is null")); + return Err(ReprError::InvalidU32); } // SAFETY: ptr and offset are user provided. @@ -344,15 +407,17 @@ pub fn op_ffi_read_i32( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid i32 pointer, pointer is null")); + return Err(ReprError::InvalidI32); } // SAFETY: ptr and offset are user provided. @@ -367,15 +432,17 @@ pub fn op_ffi_read_u64( // Note: The representation of 64-bit integers is function-wide. We cannot // choose to take this parameter as a number while returning a bigint. #[bigint] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid u64 pointer, pointer is null")); + return Err(ReprError::InvalidU64); } let value = @@ -393,15 +460,17 @@ pub fn op_ffi_read_i64( // Note: The representation of 64-bit integers is function-wide. We cannot // choose to take this parameter as a number while returning a bigint. #[bigint] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid i64 pointer, pointer is null")); + return Err(ReprError::InvalidI64); } let value = @@ -416,15 +485,17 @@ pub fn op_ffi_read_f32( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid f32 pointer, pointer is null")); + return Err(ReprError::InvalidF32); } // SAFETY: ptr and offset are user provided. @@ -436,15 +507,17 @@ pub fn op_ffi_read_f64( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result +) -> Result where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid f64 pointer, pointer is null")); + return Err(ReprError::InvalidF64); } // SAFETY: ptr and offset are user provided. @@ -456,15 +529,17 @@ pub fn op_ffi_read_ptr( state: &mut OpState, ptr: *mut c_void, #[number] offset: isize, -) -> Result<*mut c_void, AnyError> +) -> Result<*mut c_void, ReprError> where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions.check_partial_no_path()?; + permissions + .check_partial_no_path() + .map_err(ReprError::Permission)?; if ptr.is_null() { - return Err(type_error("Invalid pointer pointer, pointer is null")); + return Err(ReprError::InvalidPointer); } // SAFETY: ptr and offset are user provided. diff --git a/ext/ffi/static.rs b/ext/ffi/static.rs index f08605754..61b405933 100644 --- a/ext/ffi/static.rs +++ b/ext/ffi/static.rs @@ -2,14 +2,24 @@ use crate::dlfcn::DynamicLibraryResource; use crate::symbol::NativeType; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::v8; use deno_core::OpState; use deno_core::ResourceId; use std::ptr; +#[derive(Debug, thiserror::Error)] +pub enum StaticError { + #[error(transparent)] + Dlfcn(super::DlfcnError), + #[error("Invalid FFI static type 'void'")] + InvalidTypeVoid, + #[error("Invalid FFI static type 'struct'")] + InvalidTypeStruct, + #[error(transparent)] + Resource(deno_core::error::AnyError), +} + #[op2] pub fn op_ffi_get_static<'scope>( scope: &mut v8::HandleScope<'scope>, @@ -18,24 +28,27 @@ pub fn op_ffi_get_static<'scope>( #[string] name: String, #[serde] static_type: NativeType, optional: bool, -) -> Result, AnyError> { - let resource = state.resource_table.get::(rid)?; +) -> Result, StaticError> { + let resource = state + .resource_table + .get::(rid) + .map_err(StaticError::Resource)?; let data_ptr = match resource.get_static(name) { - Ok(data_ptr) => Ok(data_ptr), + Ok(data_ptr) => data_ptr, Err(err) => { if optional { let null: v8::Local = v8::null(scope).into(); return Ok(null); } else { - Err(err) + return Err(StaticError::Dlfcn(err)); } } - }?; + }; Ok(match static_type { NativeType::Void => { - return Err(type_error("Invalid FFI static type 'void'")); + return Err(StaticError::InvalidTypeVoid); } NativeType::Bool => { // SAFETY: ptr is user provided @@ -132,7 +145,7 @@ pub fn op_ffi_get_static<'scope>( external } NativeType::Struct(_) => { - return Err(type_error("Invalid FFI static type 'struct'")); + return Err(StaticError::InvalidTypeStruct); } }) } -- cgit v1.2.3 From 4c9eee3ebe383f0aa8f082dd6831f609cd5d5abb Mon Sep 17 00:00:00 2001 From: David Sherret Date: Mon, 14 Oct 2024 23:25:18 -0400 Subject: perf(http): cache webidl.converters lookups in ext/fetch/23_response.js (#26256) --- ext/fetch/23_response.js | 105 ++++++++++++++++++++++++++--------------------- 1 file changed, 58 insertions(+), 47 deletions(-) (limited to 'ext') diff --git a/ext/fetch/23_response.js b/ext/fetch/23_response.js index ff4ad5fac..278dcb7de 100644 --- a/ext/fetch/23_response.js +++ b/ext/fetch/23_response.js @@ -61,6 +61,15 @@ const _mimeType = Symbol("mime type"); const _body = Symbol("body"); const _brand = webidl.brand; +// it's slightly faster to cache these +const webidlConvertersBodyInitDomString = + webidl.converters["BodyInit_DOMString?"]; +const webidlConvertersUSVString = webidl.converters["USVString"]; +const webidlConvertersUnsignedShort = webidl.converters["unsigned short"]; +const webidlConvertersAny = webidl.converters["any"]; +const webidlConvertersByteString = webidl.converters["ByteString"]; +const webidlConvertersHeadersInit = webidl.converters["HeadersInit"]; + /** * @typedef InnerResponse * @property {"basic" | "cors" | "default" | "error" | "opaque" | "opaqueredirect"} type @@ -259,8 +268,8 @@ class Response { */ static redirect(url, status = 302) { const prefix = "Failed to execute 'Response.redirect'"; - url = webidl.converters["USVString"](url, prefix, "Argument 1"); - status = webidl.converters["unsigned short"](status, prefix, "Argument 2"); + url = webidlConvertersUSVString(url, prefix, "Argument 1"); + status = webidlConvertersUnsignedShort(status, prefix, "Argument 2"); const baseURL = getLocationHref(); const parsedURL = new URL(url, baseURL); @@ -286,8 +295,8 @@ class Response { */ static json(data = undefined, init = { __proto__: null }) { const prefix = "Failed to execute 'Response.json'"; - data = webidl.converters.any(data); - init = webidl.converters["ResponseInit_fast"](init, prefix, "Argument 2"); + data = webidlConvertersAny(data); + init = webidlConvertersResponseInitFast(init, prefix, "Argument 2"); const str = serializeJSValueToJSONString(data); const res = extractBody(str); @@ -313,8 +322,8 @@ class Response { } const prefix = "Failed to construct 'Response'"; - body = webidl.converters["BodyInit_DOMString?"](body, prefix, "Argument 1"); - init = webidl.converters["ResponseInit_fast"](init, prefix, "Argument 2"); + body = webidlConvertersBodyInitDomString(body, prefix, "Argument 1"); + init = webidlConvertersResponseInitFast(init, prefix, "Argument 2"); this[_response] = newInnerResponse(); this[_headers] = headersFromHeaderList( @@ -443,47 +452,49 @@ webidl.converters["Response"] = webidl.createInterfaceConverter( "Response", ResponsePrototype, ); -webidl.converters["ResponseInit"] = webidl.createDictionaryConverter( - "ResponseInit", - [{ - key: "status", - defaultValue: 200, - converter: webidl.converters["unsigned short"], - }, { - key: "statusText", - defaultValue: "", - converter: webidl.converters["ByteString"], - }, { - key: "headers", - converter: webidl.converters["HeadersInit"], - }], -); -webidl.converters["ResponseInit_fast"] = function ( - init, - prefix, - context, - opts, -) { - if (init === undefined || init === null) { - return { status: 200, statusText: "", headers: undefined }; - } - // Fast path, if not a proxy - if (typeof init === "object" && !core.isProxy(init)) { - // Not a proxy fast path - const status = init.status !== undefined - ? webidl.converters["unsigned short"](init.status) - : 200; - const statusText = init.statusText !== undefined - ? webidl.converters["ByteString"](init.statusText) - : ""; - const headers = init.headers !== undefined - ? webidl.converters["HeadersInit"](init.headers) - : undefined; - return { status, statusText, headers }; - } - // Slow default path - return webidl.converters["ResponseInit"](init, prefix, context, opts); -}; +const webidlConvertersResponseInit = webidl.converters["ResponseInit"] = webidl + .createDictionaryConverter( + "ResponseInit", + [{ + key: "status", + defaultValue: 200, + converter: webidlConvertersUnsignedShort, + }, { + key: "statusText", + defaultValue: "", + converter: webidlConvertersByteString, + }, { + key: "headers", + converter: webidlConvertersHeadersInit, + }], + ); +const webidlConvertersResponseInitFast = webidl + .converters["ResponseInit_fast"] = function ( + init, + prefix, + context, + opts, + ) { + if (init === undefined || init === null) { + return { status: 200, statusText: "", headers: undefined }; + } + // Fast path, if not a proxy + if (typeof init === "object" && !core.isProxy(init)) { + // Not a proxy fast path + const status = init.status !== undefined + ? webidlConvertersUnsignedShort(init.status) + : 200; + const statusText = init.statusText !== undefined + ? webidlConvertersByteString(init.statusText) + : ""; + const headers = init.headers !== undefined + ? webidlConvertersHeadersInit(init.headers) + : undefined; + return { status, statusText, headers }; + } + // Slow default path + return webidlConvertersResponseInit(init, prefix, context, opts); + }; /** * @param {Response} response -- cgit v1.2.3 From 7f3747f2ef7cc9e1d45aa33360afdfe62cd20c56 Mon Sep 17 00:00:00 2001 From: David Sherret Date: Mon, 14 Oct 2024 23:25:47 -0400 Subject: perf(http): avoid clone getting request method and url (#26250) --- ext/http/fly_accept_encoding.rs | 2 +- ext/http/http_next.rs | 2 +- ext/http/request_properties.rs | 27 +++++++++++++-------------- 3 files changed, 15 insertions(+), 16 deletions(-) (limited to 'ext') diff --git a/ext/http/fly_accept_encoding.rs b/ext/http/fly_accept_encoding.rs index 94e336876..4d6fd2231 100644 --- a/ext/http/fly_accept_encoding.rs +++ b/ext/http/fly_accept_encoding.rs @@ -119,7 +119,7 @@ fn encodings_iter_inner<'s>( }; Some(Ok((encoding, qval))) }) - .map(|r| r?) // flatten Result = match request_properties.authority { Some(authority) => v8::String::new_from_utf8( scope, - authority.as_ref(), + authority.as_bytes(), v8::NewStringType::Normal, ) .unwrap() diff --git a/ext/http/request_properties.rs b/ext/http/request_properties.rs index 1422c7417..39d35a79f 100644 --- a/ext/http/request_properties.rs +++ b/ext/http/request_properties.rs @@ -34,8 +34,8 @@ pub struct HttpConnectionProperties { pub stream_type: NetworkStreamType, } -pub struct HttpRequestProperties { - pub authority: Option, +pub struct HttpRequestProperties<'a> { + pub authority: Option>, } /// Pluggable trait to determine listen, connection and request properties @@ -84,11 +84,11 @@ pub trait HttpPropertyExtractor { ) -> NetworkStream; /// Determines the request properties. - fn request_properties( - connection_properties: &HttpConnectionProperties, - uri: &Uri, - headers: &HeaderMap, - ) -> HttpRequestProperties; + fn request_properties<'a>( + connection_properties: &'a HttpConnectionProperties, + uri: &'a Uri, + headers: &'a HeaderMap, + ) -> HttpRequestProperties<'a>; } pub struct DefaultHttpPropertyExtractor {} @@ -180,18 +180,17 @@ impl HttpPropertyExtractor for DefaultHttpPropertyExtractor { } } - fn request_properties( - connection_properties: &HttpConnectionProperties, - uri: &Uri, - headers: &HeaderMap, - ) -> HttpRequestProperties { + fn request_properties<'a>( + connection_properties: &'a HttpConnectionProperties, + uri: &'a Uri, + headers: &'a HeaderMap, + ) -> HttpRequestProperties<'a> { let authority = req_host( uri, headers, connection_properties.stream_type, connection_properties.local_port.unwrap_or_default(), - ) - .map(|s| s.into_owned()); + ); HttpRequestProperties { authority } } -- cgit v1.2.3 From c7153838ec332d474c32b464b94f99382028bbbc Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Tue, 15 Oct 2024 14:47:12 +0530 Subject: fix(ext/node): implement TCP.setNoDelay (#26263) Fixes https://github.com/denoland/deno/issues/26177 The significant delay was caused by Nagel's algorithm + delayed ACKs in Linux kernels. Here's the [kernel patch](https://lwn.net/Articles/502585/) which added 40ms `tcp_default_delack_min` ``` $ deno run -A pg-bench.mjs # main Tue Oct 15 2024 12:27:22 GMT+0530 (India Standard Time): 42ms $ target/release/deno run -A pg-bench.mjs # this patch Tue Oct 15 2024 12:28:02 GMT+0530 (India Standard Time): 1ms ``` ```js import { Buffer } from "node:buffer"; import pg from 'pg' const { Client } = pg const client = new Client({ connectionString: 'postgresql://postgres:postgres@127.0.0.1:5432/postgres' }) await client.connect() async function fetch() { const startPerf = performance.now(); const res = await client.query(`select $1::int as int, $2 as string, $3::timestamp with time zone as timestamp, $4 as null, $5::bool as boolean, $6::bytea as bytea, $7::jsonb as json `, [ 1337, 'wat', new Date().toISOString(), null, false, Buffer.from('awesome'), JSON.stringify([{ some: 'json' }, { array: 'object' }]) ]) console.log(`${new Date()}: ${Math.round(performance.now() - startPerf)}ms`) } for(;;) await fetch(); ``` --- ext/node/polyfills/internal_binding/tcp_wrap.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/internal_binding/tcp_wrap.ts b/ext/node/polyfills/internal_binding/tcp_wrap.ts index 973a1d1c0..1321cc627 100644 --- a/ext/node/polyfills/internal_binding/tcp_wrap.ts +++ b/ext/node/polyfills/internal_binding/tcp_wrap.ts @@ -299,8 +299,8 @@ export class TCP extends ConnectionWrap { * @param noDelay * @return An error status code. */ - setNoDelay(_noDelay: boolean): number { - // TODO(bnoordhuis) https://github.com/denoland/deno/pull/13103 + setNoDelay(noDelay: boolean): number { + this[kStreamBaseField].setNoDelay(noDelay); return 0; } -- cgit v1.2.3 From c5a7f98d82b64408764dfd269a17b5ddb6974737 Mon Sep 17 00:00:00 2001 From: Toby Ealden Date: Tue, 15 Oct 2024 19:05:10 +0100 Subject: fix(ext/node): handle http2 server ending stream (#26235) Closes #24845 --- ext/node/ops/http2.rs | 10 ++++------ ext/node/polyfills/http2.ts | 12 ++++++++++-- 2 files changed, 14 insertions(+), 8 deletions(-) (limited to 'ext') diff --git a/ext/node/ops/http2.rs b/ext/node/ops/http2.rs index 9595cb33d..705a8ecdc 100644 --- a/ext/node/ops/http2.rs +++ b/ext/node/ops/http2.rs @@ -488,13 +488,11 @@ pub async fn op_http2_client_get_response_body_chunk( loop { let result = poll_fn(|cx| poll_data_or_trailers(cx, &mut body)).await; if let Err(err) = result { - let reason = err.reason(); - if let Some(reason) = reason { - if reason == Reason::CANCEL { - return Ok((None, false, true)); - } + match err.reason() { + Some(Reason::NO_ERROR) => return Ok((None, true, false)), + Some(Reason::CANCEL) => return Ok((None, false, true)), + _ => return Err(err.into()), } - return Err(err.into()); } match result.unwrap() { DataOrTrailers::Data(data) => { diff --git a/ext/node/polyfills/http2.ts b/ext/node/polyfills/http2.ts index a9ced2bd9..dc2379aeb 100644 --- a/ext/node/polyfills/http2.ts +++ b/ext/node/polyfills/http2.ts @@ -882,6 +882,7 @@ export class ClientHttp2Stream extends Duplex { trailersReady: false, endAfterHeaders: false, shutdownWritableCalled: false, + serverEndedCall: false, }; this[kDenoResponse] = undefined; this[kDenoRid] = undefined; @@ -1109,7 +1110,9 @@ export class ClientHttp2Stream extends Duplex { } debugHttp2(">>> chunk", chunk, finished, this[kDenoResponse].bodyRid); - if (chunk === null) { + if (finished || chunk === null) { + this[kState].serverEndedCall = true; + const trailerList = await op_http2_client_get_response_trailers( this[kDenoResponse].bodyRid, ); @@ -1237,7 +1240,9 @@ export class ClientHttp2Stream extends Duplex { this[kSession] = undefined; session[kMaybeDestroy](); - callback(err); + if (callback) { + callback(err); + } } [kMaybeDestroy](code = constants.NGHTTP2_NO_ERROR) { @@ -1280,6 +1285,9 @@ function shutdownWritable(stream, callback, streamRid) { if (state.flags & STREAM_FLAGS_HAS_TRAILERS) { onStreamTrailers(stream); callback(); + } else if (state.serverEndedCall) { + debugHttp2(">>> stream finished"); + callback(); } else { op_http2_client_send_data(streamRid, new Uint8Array(), true) .then(() => { -- cgit v1.2.3 From 7211028f1e3b0ef2eaa4053d66b8bd9972731131 Mon Sep 17 00:00:00 2001 From: Kenta Moriuchi Date: Wed, 16 Oct 2024 03:32:27 +0900 Subject: fix(ext/node): use primordials in `ext/node/polyfills/internal/buffer.mjs` (#24993) Towards #24236 --- ext/node/polyfills/internal/buffer.mjs | 575 +++++++++++++++++++-------------- 1 file changed, 331 insertions(+), 244 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/internal/buffer.mjs b/ext/node/polyfills/internal/buffer.mjs index 6687f7394..dd549221f 100644 --- a/ext/node/polyfills/internal/buffer.mjs +++ b/ext/node/polyfills/internal/buffer.mjs @@ -2,10 +2,59 @@ // Copyright Joyent and Node contributors. All rights reserved. MIT license. // Copyright Feross Aboukhadijeh, and other contributors. All rights reserved. MIT license. -// TODO(petamoriken): enable prefer-primordials for node polyfills -// deno-lint-ignore-file prefer-primordials - -import { core } from "ext:core/mod.js"; +import { core, primordials } from "ext:core/mod.js"; +const { + isAnyArrayBuffer, + isArrayBuffer, + isDataView, + isSharedArrayBuffer, + isTypedArray, +} = core; +const { + ArrayBufferPrototypeGetByteLength, + ArrayBufferPrototypeGetDetached, + ArrayIsArray, + ArrayPrototypeSlice, + BigInt, + DataViewPrototypeGetByteLength, + Float32Array, + Float64Array, + MathFloor, + MathMin, + Number, + NumberIsInteger, + NumberIsNaN, + NumberMAX_SAFE_INTEGER, + NumberMIN_SAFE_INTEGER, + NumberPrototypeToString, + ObjectCreate, + ObjectDefineProperty, + ObjectPrototypeIsPrototypeOf, + ObjectSetPrototypeOf, + RangeError, + SafeRegExp, + String, + StringFromCharCode, + StringPrototypeCharCodeAt, + StringPrototypeIncludes, + StringPrototypeReplace, + StringPrototypeToLowerCase, + StringPrototypeTrim, + SymbolFor, + SymbolToPrimitive, + TypeError, + TypeErrorPrototype, + TypedArrayPrototypeCopyWithin, + TypedArrayPrototypeFill, + TypedArrayPrototypeGetBuffer, + TypedArrayPrototypeGetByteLength, + TypedArrayPrototypeGetByteOffset, + TypedArrayPrototypeSet, + TypedArrayPrototypeSlice, + TypedArrayPrototypeSubarray, + Uint8Array, + Uint8ArrayPrototype, +} = primordials; import { op_is_ascii, op_is_utf8, op_transcode } from "ext:core/ops"; import { TextDecoder, TextEncoder } from "ext:deno_web/08_text_encoding.js"; @@ -24,11 +73,6 @@ import { hexToBytes, utf16leToBytes, } from "ext:deno_node/internal_binding/_utils.ts"; -import { - isAnyArrayBuffer, - isArrayBufferView, - isTypedArray, -} from "ext:deno_node/internal/util/types.ts"; import { normalizeEncoding } from "ext:deno_node/internal/util.mjs"; import { validateBuffer } from "ext:deno_node/internal/validators.mjs"; import { isUint8Array } from "ext:deno_node/internal/util/types.ts"; @@ -50,9 +94,13 @@ const utf8Encoder = new TextEncoder(); // Temporary buffers to convert numbers. const float32Array = new Float32Array(1); -const uInt8Float32Array = new Uint8Array(float32Array.buffer); +const uInt8Float32Array = new Uint8Array( + TypedArrayPrototypeGetBuffer(float32Array), +); const float64Array = new Float64Array(1); -const uInt8Float64Array = new Uint8Array(float64Array.buffer); +const uInt8Float64Array = new Uint8Array( + TypedArrayPrototypeGetBuffer(float64Array), +); // Check endianness. float32Array[0] = -1; // 0xBF800000 @@ -64,10 +112,7 @@ export const kMaxLength = 2147483647; export const kStringMaxLength = 536870888; const MAX_UINT32 = 2 ** 32; -const customInspectSymbol = - typeof Symbol === "function" && typeof Symbol["for"] === "function" - ? Symbol["for"]("nodejs.util.inspect.custom") - : null; +const customInspectSymbol = SymbolFor("nodejs.util.inspect.custom"); export const INSPECT_MAX_BYTES = 50; @@ -76,23 +121,25 @@ export const constants = { MAX_STRING_LENGTH: kStringMaxLength, }; -Object.defineProperty(Buffer.prototype, "parent", { +ObjectDefineProperty(Buffer.prototype, "parent", { + __proto__: null, enumerable: true, get: function () { - if (!Buffer.isBuffer(this)) { + if (!BufferIsBuffer(this)) { return void 0; } - return this.buffer; + return TypedArrayPrototypeGetBuffer(this); }, }); -Object.defineProperty(Buffer.prototype, "offset", { +ObjectDefineProperty(Buffer.prototype, "offset", { + __proto__: null, enumerable: true, get: function () { - if (!Buffer.isBuffer(this)) { + if (!BufferIsBuffer(this)) { return void 0; } - return this.byteOffset; + return TypedArrayPrototypeGetByteOffset(this); }, }); @@ -103,10 +150,21 @@ function createBuffer(length) { ); } const buf = new Uint8Array(length); - Object.setPrototypeOf(buf, Buffer.prototype); + ObjectSetPrototypeOf(buf, BufferPrototype); return buf; } +/** + * @param {ArrayBufferLike} O + * @returns {boolean} + */ +function isDetachedBuffer(O) { + if (isSharedArrayBuffer(O)) { + return false; + } + return ArrayBufferPrototypeGetDetached(O); +} + export function Buffer(arg, encodingOrOffset, length) { if (typeof arg === "number") { if (typeof encodingOrOffset === "string") { @@ -133,6 +191,7 @@ function _from(value, encodingOrOffset, length) { return fromArrayBuffer(value, encodingOrOffset, length); } + // deno-lint-ignore prefer-primordials const valueOf = value.valueOf && value.valueOf(); if ( valueOf != null && @@ -147,8 +206,8 @@ function _from(value, encodingOrOffset, length) { return b; } - if (typeof value[Symbol.toPrimitive] === "function") { - const primitive = value[Symbol.toPrimitive]("string"); + if (typeof value[SymbolToPrimitive] === "function") { + const primitive = value[SymbolToPrimitive]("string"); if (typeof primitive === "string") { return fromString(primitive, encodingOrOffset); } @@ -162,13 +221,19 @@ function _from(value, encodingOrOffset, length) { ); } -Buffer.from = function from(value, encodingOrOffset, length) { +const BufferFrom = Buffer.from = function from( + value, + encodingOrOffset, + length, +) { return _from(value, encodingOrOffset, length); }; -Object.setPrototypeOf(Buffer.prototype, Uint8Array.prototype); +const BufferPrototype = Buffer.prototype; + +ObjectSetPrototypeOf(Buffer.prototype, Uint8ArrayPrototype); -Object.setPrototypeOf(Buffer, Uint8Array); +ObjectSetPrototypeOf(Buffer, Uint8Array); function assertSize(size) { validateNumber(size, "size", 0, kMaxLength); @@ -186,6 +251,7 @@ function _alloc(size, fill, encoding) { encoding, ); } + // deno-lint-ignore prefer-primordials return buffer.fill(fill, encoding); } return buffer; @@ -212,13 +278,14 @@ function fromString(string, encoding) { if (typeof encoding !== "string" || encoding === "") { encoding = "utf8"; } - if (!Buffer.isEncoding(encoding)) { + if (!BufferIsEncoding(encoding)) { throw new codes.ERR_UNKNOWN_ENCODING(encoding); } const length = byteLength(string, encoding) | 0; let buf = createBuffer(length); const actual = buf.write(string, encoding); if (actual !== length) { + // deno-lint-ignore prefer-primordials buf = buf.slice(0, actual); } return buf; @@ -226,11 +293,12 @@ function fromString(string, encoding) { function fromArrayLike(obj) { const buf = new Uint8Array(obj); - Object.setPrototypeOf(buf, Buffer.prototype); + ObjectSetPrototypeOf(buf, BufferPrototype); return buf; } function fromObject(obj) { + // deno-lint-ignore prefer-primordials if (obj.length !== undefined || isAnyArrayBuffer(obj.buffer)) { if (typeof obj.length !== "number") { return createBuffer(0); @@ -239,7 +307,7 @@ function fromObject(obj) { return fromArrayLike(obj); } - if (obj.type === "Buffer" && Array.isArray(obj.data)) { + if (obj.type === "Buffer" && ArrayIsArray(obj.data)) { return fromArrayLike(obj.data); } } @@ -248,7 +316,7 @@ function checked(length) { if (length >= kMaxLength) { throw new RangeError( "Attempt to allocate Buffer larger than maximum size: 0x" + - kMaxLength.toString(16) + " bytes", + NumberPrototypeToString(kMaxLength, 16) + " bytes", ); } return length | 0; @@ -256,25 +324,33 @@ function checked(length) { export function SlowBuffer(length) { assertSize(length); - return Buffer.alloc(+length); + return _alloc(+length); } -Object.setPrototypeOf(SlowBuffer.prototype, Uint8Array.prototype); +ObjectSetPrototypeOf(SlowBuffer.prototype, Uint8ArrayPrototype); -Object.setPrototypeOf(SlowBuffer, Uint8Array); +ObjectSetPrototypeOf(SlowBuffer, Uint8Array); -Buffer.isBuffer = function isBuffer(b) { - return b != null && b._isBuffer === true && b !== Buffer.prototype; +const BufferIsBuffer = Buffer.isBuffer = function isBuffer(b) { + return b != null && b._isBuffer === true && b !== BufferPrototype; }; -Buffer.compare = function compare(a, b) { - if (isInstance(a, Uint8Array)) { - a = Buffer.from(a, a.offset, a.byteLength); +const BufferCompare = Buffer.compare = function compare(a, b) { + if (isUint8Array(a)) { + a = BufferFrom( + a, + TypedArrayPrototypeGetByteOffset(a), + TypedArrayPrototypeGetByteLength(a), + ); } - if (isInstance(b, Uint8Array)) { - b = Buffer.from(b, b.offset, b.byteLength); + if (isUint8Array(b)) { + b = BufferFrom( + b, + TypedArrayPrototypeGetByteOffset(b), + TypedArrayPrototypeGetByteLength(b), + ); } - if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) { + if (!BufferIsBuffer(a) || !BufferIsBuffer(b)) { throw new TypeError( 'The "buf1", "buf2" arguments must be one of type Buffer or Uint8Array', ); @@ -284,7 +360,7 @@ Buffer.compare = function compare(a, b) { } let x = a.length; let y = b.length; - for (let i = 0, len = Math.min(x, y); i < len; ++i) { + for (let i = 0, len = MathMin(x, y); i < len; ++i) { if (a[i] !== b[i]) { x = a[i]; y = b[i]; @@ -300,18 +376,18 @@ Buffer.compare = function compare(a, b) { return 0; }; -Buffer.isEncoding = function isEncoding(encoding) { +const BufferIsEncoding = Buffer.isEncoding = function isEncoding(encoding) { return typeof encoding === "string" && encoding.length !== 0 && normalizeEncoding(encoding) !== undefined; }; Buffer.concat = function concat(list, length) { - if (!Array.isArray(list)) { + if (!ArrayIsArray(list)) { throw new codes.ERR_INVALID_ARG_TYPE("list", "Array", list); } if (list.length === 0) { - return Buffer.alloc(0); + return _alloc(0); } if (length === undefined) { @@ -325,7 +401,7 @@ Buffer.concat = function concat(list, length) { validateOffset(length, "length"); } - const buffer = Buffer.allocUnsafe(length); + const buffer = _allocUnsafe(length); let pos = 0; for (let i = 0; i < list.length; i++) { const buf = list[i]; @@ -346,7 +422,7 @@ Buffer.concat = function concat(list, length) { // Zero-fill the remaining bytes if the specified `length` was more than // the actual total length, i.e. if we have some remaining allocated bytes // there were not initialized. - buffer.fill(0, pos, length); + TypedArrayPrototypeFill(buffer, 0, pos, length); } return buffer; @@ -354,7 +430,18 @@ Buffer.concat = function concat(list, length) { function byteLength(string, encoding) { if (typeof string !== "string") { - if (isArrayBufferView(string) || isAnyArrayBuffer(string)) { + if (isTypedArray(string)) { + return TypedArrayPrototypeGetByteLength(string); + } + if (isDataView(string)) { + return DataViewPrototypeGetByteLength(string); + } + if (isArrayBuffer(string)) { + return ArrayBufferPrototypeGetByteLength(string); + } + if (isSharedArrayBuffer(string)) { + // TODO(petamoriken): add SharedArayBuffer to primordials + // deno-lint-ignore prefer-primordials return string.byteLength; } @@ -463,6 +550,7 @@ Buffer.prototype.toString = function toString(encoding, start, end) { throw new codes.ERR_UNKNOWN_ENCODING(encoding); } + // deno-lint-ignore prefer-primordials return ops.slice(this, start, end); }; @@ -479,22 +567,29 @@ Buffer.prototype.equals = function equals(b) { if (this === b) { return true; } - return Buffer.compare(this, b) === 0; + return BufferCompare(this, b) === 0; }; -Buffer.prototype.inspect = function inspect() { - let str = ""; - const max = INSPECT_MAX_BYTES; - str = this.toString("hex", 0, max).replace(/(.{2})/g, "$1 ").trim(); - if (this.length > max) { - str += " ... "; - } - return ""; -}; +const SPACER_PATTERN = new SafeRegExp(/(.{2})/g); -if (customInspectSymbol) { - Buffer.prototype[customInspectSymbol] = Buffer.prototype.inspect; -} +Buffer.prototype[customInspectSymbol] = + Buffer.prototype.inspect = + function inspect() { + let str = ""; + const max = INSPECT_MAX_BYTES; + str = StringPrototypeTrim( + StringPrototypeReplace( + // deno-lint-ignore prefer-primordials + this.toString("hex", 0, max), + SPACER_PATTERN, + "$1 ", + ), + ); + if (this.length > max) { + str += " ... "; + } + return ""; + }; Buffer.prototype.compare = function compare( target, @@ -503,10 +598,14 @@ Buffer.prototype.compare = function compare( thisStart, thisEnd, ) { - if (isInstance(target, Uint8Array)) { - target = Buffer.from(target, target.offset, target.byteLength); + if (isUint8Array(target)) { + target = BufferFrom( + target, + TypedArrayPrototypeGetByteOffset(target), + TypedArrayPrototypeGetByteLength(target), + ); } - if (!Buffer.isBuffer(target)) { + if (!BufferIsBuffer(target)) { throw new codes.ERR_INVALID_ARG_TYPE( "target", ["Buffer", "Uint8Array"], @@ -563,8 +662,9 @@ Buffer.prototype.compare = function compare( } let x = thisEnd - thisStart; let y = end - start; - const len = Math.min(x, y); - const thisCopy = this.slice(thisStart, thisEnd); + const len = MathMin(x, y); + const thisCopy = TypedArrayPrototypeSlice(this, thisStart, thisEnd); + // deno-lint-ignore prefer-primordials const targetCopy = target.slice(start, end); for (let i = 0; i < len; ++i) { if (thisCopy[i] !== targetCopy[i]) { @@ -594,7 +694,8 @@ function bidirectionalIndexOf(buffer, val, byteOffset, encoding, dir) { byteOffset = -0x80000000; } byteOffset = +byteOffset; - if (Number.isNaN(byteOffset)) { + if (NumberIsNaN(byteOffset)) { + // deno-lint-ignore prefer-primordials byteOffset = dir ? 0 : (buffer.length || buffer.byteLength); } dir = !!dir; @@ -614,6 +715,7 @@ function bidirectionalIndexOf(buffer, val, byteOffset, encoding, dir) { if (ops === undefined) { throw new codes.ERR_UNKNOWN_ENCODING(encoding); } + // deno-lint-ignore prefer-primordials return ops.indexOf(buffer, val, byteOffset, dir); } @@ -630,6 +732,7 @@ function bidirectionalIndexOf(buffer, val, byteOffset, encoding, dir) { } Buffer.prototype.includes = function includes(val, byteOffset, encoding) { + // deno-lint-ignore prefer-primordials return this.indexOf(val, byteOffset, encoding) !== -1; }; @@ -649,7 +752,7 @@ Buffer.prototype.asciiSlice = function asciiSlice(offset, length) { if (offset === 0 && length === this.length) { return bytesToAscii(this); } else { - return bytesToAscii(this.slice(offset, length)); + return bytesToAscii(TypedArrayPrototypeSlice(this, offset, length)); } }; @@ -664,7 +767,9 @@ Buffer.prototype.base64Slice = function base64Slice( if (offset === 0 && length === this.length) { return forgivingBase64Encode(this); } else { - return forgivingBase64Encode(this.slice(offset, length)); + return forgivingBase64Encode( + TypedArrayPrototypeSlice(this, offset, length), + ); } }; @@ -683,7 +788,9 @@ Buffer.prototype.base64urlSlice = function base64urlSlice( if (offset === 0 && length === this.length) { return forgivingBase64UrlEncode(this); } else { - return forgivingBase64UrlEncode(this.slice(offset, length)); + return forgivingBase64UrlEncode( + TypedArrayPrototypeSlice(this, offset, length), + ); } }; @@ -728,7 +835,7 @@ Buffer.prototype.ucs2Slice = function ucs2Slice(offset, length) { if (offset === 0 && length === this.length) { return bytesToUtf16le(this); } else { - return bytesToUtf16le(this.slice(offset, length)); + return bytesToUtf16le(TypedArrayPrototypeSlice(this, offset, length)); } }; @@ -747,9 +854,9 @@ Buffer.prototype.utf8Slice = function utf8Slice(string, offset, length) { Buffer.prototype.utf8Write = function utf8Write(string, offset, length) { offset = offset || 0; - const maxLength = Math.min(length || Infinity, this.length - offset); + const maxLength = MathMin(length || Infinity, this.length - offset); const buf = offset || maxLength < this.length - ? this.subarray(offset, maxLength + offset) + ? TypedArrayPrototypeSubarray(this, offset, maxLength + offset) : this; return utf8Encoder.encodeInto(string, buf).written; }; @@ -801,7 +908,7 @@ Buffer.prototype.write = function write(string, offset, length, encoding) { Buffer.prototype.toJSON = function toJSON() { return { type: "Buffer", - data: Array.prototype.slice.call(this._arr || this, 0), + data: ArrayPrototypeSlice(this._arr || this, 0), }; }; function fromArrayBuffer(obj, byteOffset, length) { @@ -810,11 +917,12 @@ function fromArrayBuffer(obj, byteOffset, length) { byteOffset = 0; } else { byteOffset = +byteOffset; - if (Number.isNaN(byteOffset)) { + if (NumberIsNaN(byteOffset)) { byteOffset = 0; } } + // deno-lint-ignore prefer-primordials const maxLength = obj.byteLength - byteOffset; if (maxLength < 0) { @@ -836,7 +944,7 @@ function fromArrayBuffer(obj, byteOffset, length) { } const buffer = new Uint8Array(obj, byteOffset, length); - Object.setPrototypeOf(buffer, Buffer.prototype); + ObjectSetPrototypeOf(buffer, BufferPrototype); return buffer; } @@ -844,6 +952,7 @@ function _base64Slice(buf, start, end) { if (start === 0 && end === buf.length) { return forgivingBase64Encode(buf); } else { + // deno-lint-ignore prefer-primordials return forgivingBase64Encode(buf.slice(start, end)); } } @@ -852,9 +961,10 @@ const decoder = new TextDecoder(); function _utf8Slice(buf, start, end) { try { + // deno-lint-ignore prefer-primordials return decoder.decode(buf.slice(start, end)); } catch (err) { - if (err instanceof TypeError) { + if (ObjectPrototypeIsPrototypeOf(TypeErrorPrototype, err)) { throw new NodeError("ERR_STRING_TOO_LONG", "String too long"); } throw err; @@ -863,9 +973,9 @@ function _utf8Slice(buf, start, end) { function _latin1Slice(buf, start, end) { let ret = ""; - end = Math.min(buf.length, end); + end = MathMin(buf.length, end); for (let i = start; i < end; ++i) { - ret += String.fromCharCode(buf[i]); + ret += StringFromCharCode(buf[i]); } return ret; } @@ -994,42 +1104,38 @@ Buffer.prototype.readUint32BE = Buffer.prototype.readUInt32BE = readUInt32BE; Buffer.prototype.readBigUint64LE = Buffer.prototype.readBigUInt64LE = - defineBigIntMethod( - function readBigUInt64LE(offset) { - offset = offset >>> 0; - validateNumber(offset, "offset"); - const first = this[offset]; - const last = this[offset + 7]; - if (first === void 0 || last === void 0) { - boundsError(offset, this.length - 8); - } - const lo = first + this[++offset] * 2 ** 8 + - this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 24; - const hi = this[++offset] + this[++offset] * 2 ** 8 + - this[++offset] * 2 ** 16 + last * 2 ** 24; - return BigInt(lo) + (BigInt(hi) << BigInt(32)); - }, - ); + function readBigUInt64LE(offset) { + offset = offset >>> 0; + validateNumber(offset, "offset"); + const first = this[offset]; + const last = this[offset + 7]; + if (first === void 0 || last === void 0) { + boundsError(offset, this.length - 8); + } + const lo = first + this[++offset] * 2 ** 8 + + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 24; + const hi = this[++offset] + this[++offset] * 2 ** 8 + + this[++offset] * 2 ** 16 + last * 2 ** 24; + return BigInt(lo) + (BigInt(hi) << 32n); + }; Buffer.prototype.readBigUint64BE = Buffer.prototype.readBigUInt64BE = - defineBigIntMethod( - function readBigUInt64BE(offset) { - offset = offset >>> 0; - validateNumber(offset, "offset"); - const first = this[offset]; - const last = this[offset + 7]; - if (first === void 0 || last === void 0) { - boundsError(offset, this.length - 8); - } - const hi = first * 2 ** 24 + this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 8 + this[++offset]; - const lo = this[++offset] * 2 ** 24 + this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 8 + last; - return (BigInt(hi) << BigInt(32)) + BigInt(lo); - }, - ); + function readBigUInt64BE(offset) { + offset = offset >>> 0; + validateNumber(offset, "offset"); + const first = this[offset]; + const last = this[offset + 7]; + if (first === void 0 || last === void 0) { + boundsError(offset, this.length - 8); + } + const hi = first * 2 ** 24 + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 8 + this[++offset]; + const lo = this[++offset] * 2 ** 24 + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 8 + last; + return (BigInt(hi) << 32n) + BigInt(lo); + }; Buffer.prototype.readIntLE = function readIntLE( offset, @@ -1148,43 +1254,39 @@ Buffer.prototype.readInt32BE = function readInt32BE(offset = 0) { last; }; -Buffer.prototype.readBigInt64LE = defineBigIntMethod( - function readBigInt64LE(offset) { - offset = offset >>> 0; - validateNumber(offset, "offset"); - const first = this[offset]; - const last = this[offset + 7]; - if (first === void 0 || last === void 0) { - boundsError(offset, this.length - 8); - } - const val = this[offset + 4] + this[offset + 5] * 2 ** 8 + - this[offset + 6] * 2 ** 16 + (last << 24); - return (BigInt(val) << BigInt(32)) + - BigInt( - first + this[++offset] * 2 ** 8 + this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 24, - ); - }, -); +Buffer.prototype.readBigInt64LE = function readBigInt64LE(offset) { + offset = offset >>> 0; + validateNumber(offset, "offset"); + const first = this[offset]; + const last = this[offset + 7]; + if (first === void 0 || last === void 0) { + boundsError(offset, this.length - 8); + } + const val = this[offset + 4] + this[offset + 5] * 2 ** 8 + + this[offset + 6] * 2 ** 16 + (last << 24); + return (BigInt(val) << 32n) + + BigInt( + first + this[++offset] * 2 ** 8 + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 24, + ); +}; -Buffer.prototype.readBigInt64BE = defineBigIntMethod( - function readBigInt64BE(offset) { - offset = offset >>> 0; - validateNumber(offset, "offset"); - const first = this[offset]; - const last = this[offset + 7]; - if (first === void 0 || last === void 0) { - boundsError(offset, this.length - 8); - } - const val = (first << 24) + this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 8 + this[++offset]; - return (BigInt(val) << BigInt(32)) + - BigInt( - this[++offset] * 2 ** 24 + this[++offset] * 2 ** 16 + - this[++offset] * 2 ** 8 + last, - ); - }, -); +Buffer.prototype.readBigInt64BE = function readBigInt64BE(offset) { + offset = offset >>> 0; + validateNumber(offset, "offset"); + const first = this[offset]; + const last = this[offset + 7]; + if (first === void 0 || last === void 0) { + boundsError(offset, this.length - 8); + } + const val = (first << 24) + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 8 + this[++offset]; + return (BigInt(val) << 32n) + + BigInt( + this[++offset] * 2 ** 24 + this[++offset] * 2 ** 16 + + this[++offset] * 2 ** 8 + last, + ); +}; Buffer.prototype.readFloatLE = function readFloatLE(offset) { return bigEndian @@ -1293,7 +1395,7 @@ Buffer.prototype.writeUint32BE = function wrtBigUInt64LE(buf, value, offset, min, max) { checkIntBI(value, min, max, buf, offset, 7); - let lo = Number(value & BigInt(4294967295)); + let lo = Number(value & 4294967295n); buf[offset++] = lo; lo = lo >> 8; buf[offset++] = lo; @@ -1301,7 +1403,7 @@ function wrtBigUInt64LE(buf, value, offset, min, max) { buf[offset++] = lo; lo = lo >> 8; buf[offset++] = lo; - let hi = Number(value >> BigInt(32) & BigInt(4294967295)); + let hi = Number(value >> 32n & 4294967295n); buf[offset++] = hi; hi = hi >> 8; buf[offset++] = hi; @@ -1314,7 +1416,7 @@ function wrtBigUInt64LE(buf, value, offset, min, max) { function wrtBigUInt64BE(buf, value, offset, min, max) { checkIntBI(value, min, max, buf, offset, 7); - let lo = Number(value & BigInt(4294967295)); + let lo = Number(value & 4294967295n); buf[offset + 7] = lo; lo = lo >> 8; buf[offset + 6] = lo; @@ -1322,7 +1424,7 @@ function wrtBigUInt64BE(buf, value, offset, min, max) { buf[offset + 5] = lo; lo = lo >> 8; buf[offset + 4] = lo; - let hi = Number(value >> BigInt(32) & BigInt(4294967295)); + let hi = Number(value >> 32n & 4294967295n); buf[offset + 3] = hi; hi = hi >> 8; buf[offset + 2] = hi; @@ -1335,31 +1437,27 @@ function wrtBigUInt64BE(buf, value, offset, min, max) { Buffer.prototype.writeBigUint64LE = Buffer.prototype.writeBigUInt64LE = - defineBigIntMethod( - function writeBigUInt64LE(value, offset = 0) { - return wrtBigUInt64LE( - this, - value, - offset, - BigInt(0), - BigInt("0xffffffffffffffff"), - ); - }, - ); + function writeBigUInt64LE(value, offset = 0) { + return wrtBigUInt64LE( + this, + value, + offset, + 0n, + 0xffffffffffffffffn, + ); + }; Buffer.prototype.writeBigUint64BE = Buffer.prototype.writeBigUInt64BE = - defineBigIntMethod( - function writeBigUInt64BE(value, offset = 0) { - return wrtBigUInt64BE( - this, - value, - offset, - BigInt(0), - BigInt("0xffffffffffffffff"), - ); - }, - ); + function writeBigUInt64BE(value, offset = 0) { + return wrtBigUInt64BE( + this, + value, + offset, + 0n, + 0xffffffffffffffffn, + ); + }; Buffer.prototype.writeIntLE = function writeIntLE( value, @@ -1450,29 +1548,25 @@ Buffer.prototype.writeInt32BE = function writeInt32BE(value, offset = 0) { return writeU_Int32BE(this, value, offset, -0x80000000, 0x7fffffff); }; -Buffer.prototype.writeBigInt64LE = defineBigIntMethod( - function writeBigInt64LE(value, offset = 0) { - return wrtBigUInt64LE( - this, - value, - offset, - -BigInt("0x8000000000000000"), - BigInt("0x7fffffffffffffff"), - ); - }, -); +Buffer.prototype.writeBigInt64LE = function writeBigInt64LE(value, offset = 0) { + return wrtBigUInt64LE( + this, + value, + offset, + -0x8000000000000000n, + 0x7fffffffffffffffn, + ); +}; -Buffer.prototype.writeBigInt64BE = defineBigIntMethod( - function writeBigInt64BE(value, offset = 0) { - return wrtBigUInt64BE( - this, - value, - offset, - -BigInt("0x8000000000000000"), - BigInt("0x7fffffffffffffff"), - ); - }, -); +Buffer.prototype.writeBigInt64BE = function writeBigInt64BE(value, offset = 0) { + return wrtBigUInt64BE( + this, + value, + offset, + -0x8000000000000000n, + 0x7fffffffffffffffn, + ); +}; Buffer.prototype.writeFloatLE = function writeFloatLE( value, @@ -1600,14 +1694,12 @@ Buffer.prototype.copy = function copy( } const len = sourceEnd - sourceStart; - if ( - this === target && typeof Uint8Array.prototype.copyWithin === "function" - ) { - this.copyWithin(targetStart, sourceStart, sourceEnd); + if (this === target) { + TypedArrayPrototypeCopyWithin(this, targetStart, sourceStart, sourceEnd); } else { - Uint8Array.prototype.set.call( + TypedArrayPrototypeSet( target, - this.subarray(sourceStart, sourceEnd), + TypedArrayPrototypeSubarray(this, sourceStart, sourceEnd), targetStart, ); } @@ -1627,11 +1719,11 @@ Buffer.prototype.fill = function fill(val, start, end, encoding) { if (encoding !== void 0 && typeof encoding !== "string") { throw new TypeError("encoding must be a string"); } - if (typeof encoding === "string" && !Buffer.isEncoding(encoding)) { + if (typeof encoding === "string" && !BufferIsEncoding(encoding)) { throw new TypeError("Unknown encoding: " + encoding); } if (val.length === 1) { - const code = val.charCodeAt(0); + const code = StringPrototypeCharCodeAt(val, 0); if (encoding === "utf8" && code < 128 || encoding === "latin1") { val = code; } @@ -1658,7 +1750,7 @@ Buffer.prototype.fill = function fill(val, start, end, encoding) { this[i] = val; } } else { - const bytes = Buffer.isBuffer(val) ? val : Buffer.from(val, encoding); + const bytes = BufferIsBuffer(val) ? val : BufferFrom(val, encoding); const len = bytes.length; if (len === 0) { throw new codes.ERR_INVALID_ARG_VALUE( @@ -1685,7 +1777,7 @@ function checkIntBI(value, min, max, buf, offset, byteLength2) { const n = typeof min === "bigint" ? "n" : ""; let range; if (byteLength2 > 3) { - if (min === 0 || min === BigInt(0)) { + if (min === 0 || min === 0n) { range = `>= 0${n} and < 2${n} ** ${(byteLength2 + 1) * 8}${n}`; } else { range = `>= -(2${n} ** ${(byteLength2 + 1) * 8 - 1}${n}) and < 2 ** ${ @@ -1710,7 +1802,7 @@ function checkIntBI(value, min, max, buf, offset, byteLength2) { function blitBuffer(src, dst, offset, byteLength = Infinity) { const srcLength = src.length; // Establish the number of bytes to be written - const bytesToWrite = Math.min( + const bytesToWrite = MathMin( // If byte length is defined in the call, then it sets an upper bound, // otherwise it is Infinity and is never chosen. byteLength, @@ -1730,15 +1822,9 @@ function blitBuffer(src, dst, offset, byteLength = Infinity) { return bytesToWrite; } -function isInstance(obj, type) { - return obj instanceof type || - obj != null && obj.constructor != null && - obj.constructor.name != null && obj.constructor.name === type.name; -} - const hexSliceLookupTable = function () { const alphabet = "0123456789abcdef"; - const table = new Array(256); + const table = []; for (let i = 0; i < 16; ++i) { const i16 = i * 16; for (let j = 0; j < 16; ++j) { @@ -1748,14 +1834,6 @@ const hexSliceLookupTable = function () { return table; }(); -function defineBigIntMethod(fn) { - return typeof BigInt === "undefined" ? BufferBigIntNotDefined : fn; -} - -function BufferBigIntNotDefined() { - throw new Error("BigInt not supported"); -} - export function readUInt48LE(buf, offset = 0) { validateNumber(offset, "offset"); const first = buf[offset]; @@ -2079,10 +2157,10 @@ export function byteLengthUtf8(str) { function base64ByteLength(str, bytes) { // Handle padding - if (str.charCodeAt(bytes - 1) === 0x3D) { + if (StringPrototypeCharCodeAt(str, bytes - 1) === 0x3D) { bytes--; } - if (bytes > 1 && str.charCodeAt(bytes - 1) === 0x3D) { + if (bytes > 1 && StringPrototypeCharCodeAt(str, bytes - 1) === 0x3D) { bytes--; } @@ -2090,7 +2168,7 @@ function base64ByteLength(str, bytes) { return (bytes * 3) >>> 2; } -export const encodingsMap = Object.create(null); +export const encodingsMap = ObjectCreate(null); for (let i = 0; i < encodings.length; ++i) { encodingsMap[encodings[i]] = i; } @@ -2220,7 +2298,7 @@ export const encodingOps = { }; export function getEncodingOps(encoding) { - encoding = String(encoding).toLowerCase(); + encoding = StringPrototypeToLowerCase(String(encoding)); switch (encoding.length) { case 4: if (encoding === "utf8") return encodingOps.utf8; @@ -2260,6 +2338,14 @@ export function getEncodingOps(encoding) { } } +/** + * @param {Buffer} source + * @param {Buffer} target + * @param {number} targetStart + * @param {number} sourceStart + * @param {number} sourceEnd + * @returns {number} + */ export function _copyActual( source, target, @@ -2278,6 +2364,7 @@ export function _copyActual( } if (sourceStart !== 0 || sourceEnd < source.length) { + // deno-lint-ignore prefer-primordials source = new Uint8Array(source.buffer, source.byteOffset + sourceStart, nb); } @@ -2287,7 +2374,7 @@ export function _copyActual( } export function boundsError(value, length, type) { - if (Math.floor(value) !== value) { + if (MathFloor(value) !== value) { validateNumber(value, type); throw new codes.ERR_OUT_OF_RANGE(type || "offset", "an integer", value); } @@ -2310,7 +2397,7 @@ export function validateNumber(value, name, min = undefined, max) { if ( (min != null && value < min) || (max != null && value > max) || - ((min != null || max != null) && Number.isNaN(value)) + ((min != null || max != null) && NumberIsNaN(value)) ) { throw new codes.ERR_OUT_OF_RANGE( name, @@ -2344,11 +2431,11 @@ function checkInt(value, min, max, buf, offset, byteLength) { export function toInteger(n, defaultVal) { n = +n; if ( - !Number.isNaN(n) && - n >= Number.MIN_SAFE_INTEGER && - n <= Number.MAX_SAFE_INTEGER + !NumberIsNaN(n) && + n >= NumberMIN_SAFE_INTEGER && + n <= NumberMAX_SAFE_INTEGER ) { - return ((n % 1) === 0 ? n : Math.floor(n)); + return ((n % 1) === 0 ? n : MathFloor(n)); } return defaultVal; } @@ -2421,7 +2508,7 @@ export function writeU_Int48BE(buf, value, offset, min, max) { value = +value; checkInt(value, min, max, buf, offset, 5); - const newVal = Math.floor(value * 2 ** -32); + const newVal = MathFloor(value * 2 ** -32); buf[offset++] = newVal >>> 8; buf[offset++] = newVal; buf[offset + 3] = value; @@ -2439,7 +2526,7 @@ export function writeU_Int40BE(buf, value, offset, min, max) { value = +value; checkInt(value, min, max, buf, offset, 4); - buf[offset++] = Math.floor(value * 2 ** -32); + buf[offset++] = MathFloor(value * 2 ** -32); buf[offset + 3] = value; value = value >>> 8; buf[offset + 2] = value; @@ -2482,12 +2569,12 @@ export function validateOffset( value, name, min = 0, - max = Number.MAX_SAFE_INTEGER, + max = NumberMAX_SAFE_INTEGER, ) { if (typeof value !== "number") { throw new codes.ERR_INVALID_ARG_TYPE(name, "number", value); } - if (!Number.isInteger(value)) { + if (!NumberIsInteger(value)) { throw new codes.ERR_OUT_OF_RANGE(name, "an integer", value); } if (value < min || value > max) { @@ -2500,7 +2587,7 @@ export function writeU_Int48LE(buf, value, offset, min, max) { value = +value; checkInt(value, min, max, buf, offset, 5); - const newVal = Math.floor(value * 2 ** -32); + const newVal = MathFloor(value * 2 ** -32); buf[offset++] = value; value = value >>> 8; buf[offset++] = value; @@ -2526,7 +2613,7 @@ export function writeU_Int40LE(buf, value, offset, min, max) { buf[offset++] = value; value = value >>> 8; buf[offset++] = value; - buf[offset++] = Math.floor(newVal * 2 ** -32); + buf[offset++] = MathFloor(newVal * 2 ** -32); return offset; } @@ -2560,14 +2647,14 @@ export function writeU_Int24LE(buf, value, offset, min, max) { export function isUtf8(input) { if (isTypedArray(input)) { - if (input.buffer.detached) { + if (isDetachedBuffer(TypedArrayPrototypeGetBuffer(input))) { throw new ERR_INVALID_STATE("Cannot validate on a detached buffer"); } return op_is_utf8(input); } if (isAnyArrayBuffer(input)) { - if (input.detached) { + if (isDetachedBuffer(input)) { throw new ERR_INVALID_STATE("Cannot validate on a detached buffer"); } return op_is_utf8(new Uint8Array(input)); @@ -2582,14 +2669,14 @@ export function isUtf8(input) { export function isAscii(input) { if (isTypedArray(input)) { - if (input.buffer.detached) { + if (isDetachedBuffer(TypedArrayPrototypeGetBuffer(input))) { throw new ERR_INVALID_STATE("Cannot validate on a detached buffer"); } return op_is_ascii(input); } if (isAnyArrayBuffer(input)) { - if (input.detached) { + if (isDetachedBuffer(input)) { throw new ERR_INVALID_STATE("Cannot validate on a detached buffer"); } return op_is_ascii(new Uint8Array(input)); @@ -2636,7 +2723,7 @@ export function transcode(source, fromEnco, toEnco) { const result = op_transcode(new Uint8Array(source), fromEnco, toEnco); return Buffer.from(result, toEnco); } catch (err) { - if (err.message.includes("Unable to transcode Buffer")) { + if (StringPrototypeIncludes(err.message, "Unable to transcode Buffer")) { throw illegalArgumentError; } else { throw err; -- cgit v1.2.3 From 3065dadea3792539f050346c534afc0a7821c2b6 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Tue, 15 Oct 2024 14:36:45 -0700 Subject: fix(ext/console): apply coloring for console.table (#26280) Fixes #26159 --- ext/console/01_console.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/console/01_console.js b/ext/console/01_console.js index a977a2719..3c07cf64a 100644 --- a/ext/console/01_console.js +++ b/ext/console/01_console.js @@ -3256,7 +3256,7 @@ class Console { const stringifyValue = (value) => inspectValueWithQuotes(value, { - ...getDefaultInspectOptions(), + ...getConsoleInspectOptions(noColorStdout()), depth: 1, compact: true, }); -- cgit v1.2.3 From 82d13fd45b6fa8da5d390e26a349522e93811639 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Tue, 15 Oct 2024 15:36:11 -0700 Subject: refactor(ext/io): use concrete error types (#26187) --- ext/io/bi_pipe.rs | 22 ++++++++++++---------- ext/io/fs.rs | 45 ++++++++++++++++++--------------------------- ext/io/lib.rs | 12 ++++++------ 3 files changed, 36 insertions(+), 43 deletions(-) (limited to 'ext') diff --git a/ext/io/bi_pipe.rs b/ext/io/bi_pipe.rs index 402e383ac..b6fc70ca2 100644 --- a/ext/io/bi_pipe.rs +++ b/ext/io/bi_pipe.rs @@ -2,7 +2,6 @@ use std::rc::Rc; -use deno_core::error::AnyError; use deno_core::AsyncRefCell; use deno_core::AsyncResult; use deno_core::CancelHandle; @@ -71,13 +70,16 @@ impl BiPipeResource { pub async fn read( self: Rc, data: &mut [u8], - ) -> Result { + ) -> Result { let mut rd = RcRef::map(&self, |r| &r.read_half).borrow_mut().await; let cancel_handle = RcRef::map(&self, |r| &r.cancel); - Ok(rd.read(data).try_or_cancel(cancel_handle).await?) + rd.read(data).try_or_cancel(cancel_handle).await } - pub async fn write(self: Rc, data: &[u8]) -> Result { + pub async fn write( + self: Rc, + data: &[u8], + ) -> Result { let mut wr = RcRef::map(self, |r| &r.write_half).borrow_mut().await; let nwritten = wr.write(data).await?; wr.flush().await?; @@ -270,8 +272,8 @@ impl_async_write!(for BiPipe -> self.write_end); /// Creates both sides of a bidirectional pipe, returning the raw /// handles to the underlying OS resources. -pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError> -{ +pub fn bi_pipe_pair_raw( +) -> Result<(RawBiPipeHandle, RawBiPipeHandle), std::io::Error> { #[cfg(unix)] { // SockFlag is broken on macOS @@ -293,7 +295,7 @@ pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError ) }; if ret != 0 { - return Err(std::io::Error::last_os_error().into()); + return Err(std::io::Error::last_os_error()); } if cfg!(target_os = "macos") { @@ -389,7 +391,7 @@ pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError continue; } - return Err(err.into()); + return Err(err); } break (path, hd1); @@ -411,7 +413,7 @@ pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError 0, ); if hd2 == INVALID_HANDLE_VALUE { - return Err(io::Error::last_os_error().into()); + return Err(io::Error::last_os_error()); } // Will not block because we have create the pair. @@ -419,7 +421,7 @@ pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError let err = std::io::Error::last_os_error(); if err.raw_os_error() != Some(ERROR_PIPE_CONNECTED as i32) { CloseHandle(hd2); - return Err(err.into()); + return Err(err); } } diff --git a/ext/io/fs.rs b/ext/io/fs.rs index 3798c1429..06fc3da09 100644 --- a/ext/io/fs.rs +++ b/ext/io/fs.rs @@ -6,10 +6,6 @@ use std::rc::Rc; use std::time::SystemTime; use std::time::UNIX_EPOCH; -use deno_core::error::custom_error; -use deno_core::error::not_supported; -use deno_core::error::resource_unavailable; -use deno_core::error::AnyError; use deno_core::BufMutView; use deno_core::BufView; use deno_core::OpState; @@ -59,15 +55,16 @@ impl From for FsError { } } -impl From for AnyError { +impl From for deno_core::error::AnyError { fn from(err: FsError) -> Self { match err { - FsError::Io(err) => AnyError::from(err), - FsError::FileBusy => resource_unavailable(), - FsError::NotSupported => not_supported(), - FsError::NotCapable(err) => { - custom_error("NotCapable", format!("permission denied: {err}")) - } + FsError::Io(err) => err.into(), + FsError::FileBusy => deno_core::error::resource_unavailable(), + FsError::NotSupported => deno_core::error::not_supported(), + FsError::NotCapable(err) => deno_core::error::custom_error( + "NotCapable", + format!("permission denied: {err}"), + ), } } } @@ -266,9 +263,9 @@ impl FileResource { state: &OpState, rid: ResourceId, f: F, - ) -> Result + ) -> Result where - F: FnOnce(Rc) -> Result, + F: FnOnce(Rc) -> Result, { let resource = state.resource_table.get::(rid)?; f(resource) @@ -277,7 +274,7 @@ impl FileResource { pub fn get_file( state: &OpState, rid: ResourceId, - ) -> Result, AnyError> { + ) -> Result, deno_core::error::AnyError> { let resource = state.resource_table.get::(rid)?; Ok(resource.file()) } @@ -286,9 +283,9 @@ impl FileResource { state: &OpState, rid: ResourceId, f: F, - ) -> Result + ) -> Result where - F: FnOnce(Rc) -> Result, + F: FnOnce(Rc) -> Result, { Self::with_resource(state, rid, |r| f(r.file.clone())) } @@ -303,10 +300,7 @@ impl deno_core::Resource for FileResource { Cow::Borrowed(&self.name) } - fn read( - self: Rc, - limit: usize, - ) -> deno_core::AsyncResult { + fn read(self: Rc, limit: usize) -> deno_core::AsyncResult { Box::pin(async move { self .file @@ -319,8 +313,8 @@ impl deno_core::Resource for FileResource { fn read_byob( self: Rc, - buf: deno_core::BufMutView, - ) -> deno_core::AsyncResult<(usize, deno_core::BufMutView)> { + buf: BufMutView, + ) -> deno_core::AsyncResult<(usize, BufMutView)> { Box::pin(async move { self .file @@ -333,17 +327,14 @@ impl deno_core::Resource for FileResource { fn write( self: Rc, - buf: deno_core::BufView, + buf: BufView, ) -> deno_core::AsyncResult { Box::pin(async move { self.file.clone().write(buf).await.map_err(|err| err.into()) }) } - fn write_all( - self: Rc, - buf: deno_core::BufView, - ) -> deno_core::AsyncResult<()> { + fn write_all(self: Rc, buf: BufView) -> deno_core::AsyncResult<()> { Box::pin(async move { self .file diff --git a/ext/io/lib.rs b/ext/io/lib.rs index a07d64ae3..5d183aa46 100644 --- a/ext/io/lib.rs +++ b/ext/io/lib.rs @@ -1,6 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; use deno_core::op2; use deno_core::unsync::spawn_blocking; use deno_core::unsync::TaskQueue; @@ -48,6 +47,7 @@ use winapi::um::processenv::GetStdHandle; #[cfg(windows)] use winapi::um::winbase; +use deno_core::futures::TryFutureExt; #[cfg(windows)] use parking_lot::Condvar; #[cfg(windows)] @@ -348,13 +348,13 @@ where RcRef::map(self, |r| &r.stream).borrow_mut() } - async fn write(self: Rc, data: &[u8]) -> Result { + async fn write(self: Rc, data: &[u8]) -> Result { let mut stream = self.borrow_mut().await; let nwritten = stream.write(data).await?; Ok(nwritten) } - async fn shutdown(self: Rc) -> Result<(), AnyError> { + async fn shutdown(self: Rc) -> Result<(), io::Error> { let mut stream = self.borrow_mut().await; stream.shutdown().await?; Ok(()) @@ -396,7 +396,7 @@ where self.cancel_handle.cancel() } - async fn read(self: Rc, data: &mut [u8]) -> Result { + async fn read(self: Rc, data: &mut [u8]) -> Result { let mut rd = self.borrow_mut().await; let nread = rd.read(data).try_or_cancel(self.cancel_handle()).await?; Ok(nread) @@ -417,7 +417,7 @@ impl Resource for ChildStdinResource { deno_core::impl_writable!(); fn shutdown(self: Rc) -> AsyncResult<()> { - Box::pin(self.shutdown()) + Box::pin(self.shutdown().map_err(|e| e.into())) } } @@ -1010,7 +1010,7 @@ pub fn op_print( state: &mut OpState, #[string] msg: &str, is_err: bool, -) -> Result<(), AnyError> { +) -> Result<(), deno_core::error::AnyError> { let rid = if is_err { 2 } else { 1 }; FileResource::with_file(state, rid, move |file| { Ok(file.write_all_sync(msg.as_bytes())?) -- cgit v1.2.3 From 661882f10df198faf0df8f85493ee53ab64fbc97 Mon Sep 17 00:00:00 2001 From: David Sherret Date: Wed, 16 Oct 2024 00:43:20 -0400 Subject: perf(http): make heap allocation for path conditional (#26289) Code: ```js Deno.serve({ port: 8085 }, request => { return new Response(request.url); }); ``` Before: ``` % wrk -d60s http://localhost:8085/path/testing\?testing=5 Running 1m test @ http://localhost:8085/path/testing?testing=5 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 56.01us 18.34us 3.28ms 93.84% Req/Sec 81.80k 3.13k 88.26k 90.77% 9783713 requests in 1.00m, 1.67GB read Requests/sec: 162789.89 Transfer/sec: 28.41MB ``` After: ``` % wrk -d60s http://localhost:8085/path/testing\?testing=5 Running 1m test @ http://localhost:8085/path/testing?testing=5 2 threads and 10 connections Thread Stats Avg Stdev Max +/- Stdev Latency 55.44us 15.20us 2.42ms 90.41% Req/Sec 82.71k 2.92k 88.10k 89.93% 9892916 requests in 1.00m, 1.69GB read Requests/sec: 164607.06 Transfer/sec: 28.73MB ``` --- ext/http/http_next.rs | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) (limited to 'ext') diff --git a/ext/http/http_next.rs b/ext/http/http_next.rs index a58c4be5c..abc54c899 100644 --- a/ext/http/http_next.rs +++ b/ext/http/http_next.rs @@ -305,15 +305,25 @@ where }; // Only extract the path part - we handle authority elsewhere - let path = match &request_parts.uri.path_and_query() { - Some(path_and_query) => path_and_query.to_string(), - None => "".to_owned(), + let path = match request_parts.uri.path_and_query() { + Some(path_and_query) => { + let path = path_and_query.as_str(); + if matches!(path.as_bytes().first(), Some(b'/' | b'*')) { + Cow::Borrowed(path) + } else { + Cow::Owned(format!("/{}", path)) + } + } + None => Cow::Borrowed(""), }; - let path: v8::Local = - v8::String::new_from_utf8(scope, path.as_ref(), v8::NewStringType::Normal) - .unwrap() - .into(); + let path: v8::Local = v8::String::new_from_utf8( + scope, + path.as_bytes(), + v8::NewStringType::Normal, + ) + .unwrap() + .into(); let peer_address: v8::Local = v8::String::new_from_utf8( scope, -- cgit v1.2.3 From 21fa953f320c66a897822c4c731b2fae5f07c78b Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Wed, 16 Oct 2024 14:27:28 +0530 Subject: fix(ext/node): timingSafeEqual account for AB byteOffset (#26292) Fixes https://github.com/denoland/deno/issues/26276 --- .../polyfills/internal_binding/_timingSafeEqual.ts | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/internal_binding/_timingSafeEqual.ts b/ext/node/polyfills/internal_binding/_timingSafeEqual.ts index ff141fdbf..559b7685b 100644 --- a/ext/node/polyfills/internal_binding/_timingSafeEqual.ts +++ b/ext/node/polyfills/internal_binding/_timingSafeEqual.ts @@ -5,10 +5,11 @@ import { Buffer } from "node:buffer"; -function assert(cond) { - if (!cond) { - throw new Error("assertion failed"); +function toDataView(ab: ArrayBufferLike | ArrayBufferView): DataView { + if (ArrayBuffer.isView(ab)) { + return new DataView(ab.buffer, ab.byteOffset, ab.byteLength); } + return new DataView(ab); } /** Compare to array buffers or data views in a way that timing based attacks @@ -21,13 +22,11 @@ function stdTimingSafeEqual( return false; } if (!(a instanceof DataView)) { - a = new DataView(ArrayBuffer.isView(a) ? a.buffer : a); + a = toDataView(a); } if (!(b instanceof DataView)) { - b = new DataView(ArrayBuffer.isView(b) ? b.buffer : b); + b = toDataView(b); } - assert(a instanceof DataView); - assert(b instanceof DataView); const length = a.byteLength; let out = 0; let i = -1; @@ -41,7 +40,11 @@ export const timingSafeEqual = ( a: Buffer | DataView | ArrayBuffer, b: Buffer | DataView | ArrayBuffer, ): boolean => { - if (a instanceof Buffer) a = new DataView(a.buffer); - if (a instanceof Buffer) b = new DataView(a.buffer); + if (a instanceof Buffer) { + a = new DataView(a.buffer, a.byteOffset, a.byteLength); + } + if (b instanceof Buffer) { + b = new DataView(b.buffer, b.byteOffset, b.byteLength); + } return stdTimingSafeEqual(a, b); }; -- cgit v1.2.3 From d59599fc187c559ee231882773e1c5a2b932fc3d Mon Sep 17 00:00:00 2001 From: Yoshiya Hinosawa Date: Wed, 16 Oct 2024 20:58:44 +0900 Subject: fix(ext/node): fix dns.lookup result ordering (#26264) partially unblocks #25470 This PR aligns the resolution of `localhost` hostname to Node.js behavior. In Node.js `dns.lookup("localhost", (_, addr) => console.log(addr))` prints ipv6 address `::1`, but it prints ipv4 address `127.0.0.1` in Deno. That difference causes some errors in the work of enabling `createConnection` option in `http.request` (#25470). This PR fixes the issue by aligning `dns.lookup` behavior to Node.js. This PR also changes the following behaviors (resolving TODOs): - `http.createServer` now listens on ipv6 address `[::]` by default on linux/mac - `net.createServer` now listens on ipv6 address `[::]` by default on linux/mac These changes are also alignments to Node.js behaviors. --- ext/node/polyfills/http.ts | 6 +++--- ext/node/polyfills/internal/dns/utils.ts | 14 ++----------- ext/node/polyfills/internal_binding/cares_wrap.ts | 13 +++++++++--- ext/node/polyfills/net.ts | 24 +++++++---------------- 4 files changed, 22 insertions(+), 35 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/http.ts b/ext/node/polyfills/http.ts index f3f6f86ed..349caeea6 100644 --- a/ext/node/polyfills/http.ts +++ b/ext/node/polyfills/http.ts @@ -50,6 +50,7 @@ import { urlToHttpOptions } from "ext:deno_node/internal/url.ts"; import { kEmptyObject } from "ext:deno_node/internal/util.mjs"; import { constants, TCP } from "ext:deno_node/internal_binding/tcp_wrap.ts"; import { notImplemented, warnNotImplemented } from "ext:deno_node/_utils.ts"; +import { isWindows } from "ext:deno_node/_util/os.ts"; import { connResetException, ERR_HTTP_HEADERS_SENT, @@ -1586,9 +1587,8 @@ export class ServerImpl extends EventEmitter { port = options.port | 0; } - // TODO(bnoordhuis) Node prefers [::] when host is omitted, - // we on the other hand default to 0.0.0.0. - let hostname = options.host ?? "0.0.0.0"; + // Use 0.0.0.0 for Windows, and [::] for other platforms. + let hostname = options.host ?? (isWindows ? "0.0.0.0" : "[::]"); if (hostname == "localhost") { hostname = "127.0.0.1"; } diff --git a/ext/node/polyfills/internal/dns/utils.ts b/ext/node/polyfills/internal/dns/utils.ts index 226fce93d..1e0c3d9ed 100644 --- a/ext/node/polyfills/internal/dns/utils.ts +++ b/ext/node/polyfills/internal/dns/utils.ts @@ -416,20 +416,10 @@ export function emitInvalidHostnameWarning(hostname: string) { ); } -let dnsOrder = getOptionValue("--dns-result-order") || "ipv4first"; +let dnsOrder = getOptionValue("--dns-result-order") || "verbatim"; export function getDefaultVerbatim() { - switch (dnsOrder) { - case "verbatim": { - return true; - } - case "ipv4first": { - return false; - } - default: { - return false; - } - } + return dnsOrder !== "ipv4first"; } /** diff --git a/ext/node/polyfills/internal_binding/cares_wrap.ts b/ext/node/polyfills/internal_binding/cares_wrap.ts index 6feb7faf0..cbfea40b2 100644 --- a/ext/node/polyfills/internal_binding/cares_wrap.ts +++ b/ext/node/polyfills/internal_binding/cares_wrap.ts @@ -75,11 +75,18 @@ export function getaddrinfo( const recordTypes: ("A" | "AAAA")[] = []; - if (family === 0 || family === 4) { + if (family === 6) { + recordTypes.push("AAAA"); + } else if (family === 4) { recordTypes.push("A"); - } - if (family === 0 || family === 6) { + } else if (family === 0 && hostname === "localhost") { + // Ipv6 is preferred over Ipv4 for localhost recordTypes.push("AAAA"); + recordTypes.push("A"); + } else if (family === 0) { + // Only get Ipv4 addresses for the other hostnames + // This simulates what `getaddrinfo` does when the family is not specified + recordTypes.push("A"); } (async () => { diff --git a/ext/node/polyfills/net.ts b/ext/node/polyfills/net.ts index 48e1d0de8..35d273be9 100644 --- a/ext/node/polyfills/net.ts +++ b/ext/node/polyfills/net.ts @@ -1871,23 +1871,13 @@ function _setupListenHandle( // Try to bind to the unspecified IPv6 address, see if IPv6 is available if (!address && typeof fd !== "number") { - // TODO(@bartlomieju): differs from Node which tries to bind to IPv6 first - // when no address is provided. - // - // Forcing IPv4 as a workaround for Deno not aligning with Node on - // implicit binding on Windows. - // - // REF: https://github.com/denoland/deno/issues/10762 - // rval = _createServerHandle(DEFAULT_IPV6_ADDR, port, 6, fd, flags); - - // if (typeof rval === "number") { - // rval = null; - address = DEFAULT_IPV4_ADDR; - addressType = 4; - // } else { - // address = DEFAULT_IPV6_ADDR; - // addressType = 6; - // } + if (isWindows) { + address = DEFAULT_IPV4_ADDR; + addressType = 4; + } else { + address = DEFAULT_IPV6_ADDR; + addressType = 6; + } } if (rval === null) { -- cgit v1.2.3 From f7dba52133904a9e4ca0468e2c98894992ea2e42 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Wed, 16 Oct 2024 11:25:25 -0700 Subject: fix(child_process): map node `--no-warnings` flag to `--quiet` (#26288) Closes https://github.com/denoland/deno/issues/25899 --- ext/node/polyfills/child_process.ts | 2 ++ ext/node/polyfills/internal/child_process.ts | 8 ++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/child_process.ts b/ext/node/polyfills/child_process.ts index c37dfc410..eda718ff3 100644 --- a/ext/node/polyfills/child_process.ts +++ b/ext/node/polyfills/child_process.ts @@ -132,6 +132,8 @@ export function fork( rm = 2; } execArgv.splice(index, rm); + } else if (flag.startsWith("--no-warnings")) { + execArgv[index] = "--quiet"; } else { index++; } diff --git a/ext/node/polyfills/internal/child_process.ts b/ext/node/polyfills/internal/child_process.ts index 6f209b719..65d825fd2 100644 --- a/ext/node/polyfills/internal/child_process.ts +++ b/ext/node/polyfills/internal/child_process.ts @@ -1191,8 +1191,12 @@ function toDenoArgs(args: string[]): string[] { } if (flagInfo === undefined) { - // Not a known flag that expects a value. Just copy it to the output. - denoArgs.push(arg); + if (arg === "--no-warnings") { + denoArgs.push("--quiet"); + } else { + // Not a known flag that expects a value. Just copy it to the output. + denoArgs.push(arg); + } continue; } -- cgit v1.2.3 From 3385d1252e4eae093234d0a075f4a564308ba48e Mon Sep 17 00:00:00 2001 From: denobot <33910674+denobot@users.noreply.github.com> Date: Wed, 16 Oct 2024 19:48:42 -0400 Subject: chore: forward v2.0.1 release commit to main (#26338) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the release commit being forwarded back to main for 2.0.1 Co-authored-by: bartlomieju Co-authored-by: Bartek Iwańczuk --- ext/broadcast_channel/Cargo.toml | 2 +- ext/cache/Cargo.toml | 2 +- ext/canvas/Cargo.toml | 2 +- ext/console/Cargo.toml | 2 +- ext/cron/Cargo.toml | 2 +- ext/crypto/Cargo.toml | 2 +- ext/fetch/Cargo.toml | 2 +- ext/ffi/Cargo.toml | 2 +- ext/fs/Cargo.toml | 2 +- ext/http/Cargo.toml | 2 +- ext/io/Cargo.toml | 2 +- ext/kv/Cargo.toml | 2 +- ext/napi/Cargo.toml | 2 +- ext/net/Cargo.toml | 2 +- ext/node/Cargo.toml | 2 +- ext/tls/Cargo.toml | 2 +- ext/url/Cargo.toml | 2 +- ext/web/Cargo.toml | 2 +- ext/webgpu/Cargo.toml | 2 +- ext/webidl/Cargo.toml | 2 +- ext/websocket/Cargo.toml | 2 +- ext/webstorage/Cargo.toml | 2 +- 22 files changed, 22 insertions(+), 22 deletions(-) (limited to 'ext') diff --git a/ext/broadcast_channel/Cargo.toml b/ext/broadcast_channel/Cargo.toml index b19c4ce15..95d480d24 100644 --- a/ext/broadcast_channel/Cargo.toml +++ b/ext/broadcast_channel/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_broadcast_channel" -version = "0.165.0" +version = "0.166.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cache/Cargo.toml b/ext/cache/Cargo.toml index 9d876fcb7..d0f260d9c 100644 --- a/ext/cache/Cargo.toml +++ b/ext/cache/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cache" -version = "0.103.0" +version = "0.104.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/canvas/Cargo.toml b/ext/canvas/Cargo.toml index 78c674348..e88567ef1 100644 --- a/ext/canvas/Cargo.toml +++ b/ext/canvas/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_canvas" -version = "0.40.0" +version = "0.41.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/console/Cargo.toml b/ext/console/Cargo.toml index 5c143ca18..d83afb178 100644 --- a/ext/console/Cargo.toml +++ b/ext/console/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_console" -version = "0.171.0" +version = "0.172.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cron/Cargo.toml b/ext/cron/Cargo.toml index 10f09b57c..9aafa0b48 100644 --- a/ext/cron/Cargo.toml +++ b/ext/cron/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cron" -version = "0.51.0" +version = "0.52.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/crypto/Cargo.toml b/ext/crypto/Cargo.toml index c81c8f6a7..143f4b009 100644 --- a/ext/crypto/Cargo.toml +++ b/ext/crypto/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_crypto" -version = "0.185.0" +version = "0.186.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml index cc9e4f03d..c8e2c858b 100644 --- a/ext/fetch/Cargo.toml +++ b/ext/fetch/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fetch" -version = "0.195.0" +version = "0.196.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/ffi/Cargo.toml b/ext/ffi/Cargo.toml index 496e8634e..9bca9d98f 100644 --- a/ext/ffi/Cargo.toml +++ b/ext/ffi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_ffi" -version = "0.158.0" +version = "0.159.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml index 606c00ad8..904483c79 100644 --- a/ext/fs/Cargo.toml +++ b/ext/fs/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fs" -version = "0.81.0" +version = "0.82.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/http/Cargo.toml b/ext/http/Cargo.toml index b7637bec3..f8e3bb38a 100644 --- a/ext/http/Cargo.toml +++ b/ext/http/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_http" -version = "0.169.0" +version = "0.170.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/io/Cargo.toml b/ext/io/Cargo.toml index d45834a8f..5a87977e0 100644 --- a/ext/io/Cargo.toml +++ b/ext/io/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_io" -version = "0.81.0" +version = "0.82.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/kv/Cargo.toml b/ext/kv/Cargo.toml index 14c3514ff..8aee80c57 100644 --- a/ext/kv/Cargo.toml +++ b/ext/kv/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_kv" -version = "0.79.0" +version = "0.80.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/napi/Cargo.toml b/ext/napi/Cargo.toml index ade789ff8..5405d6280 100644 --- a/ext/napi/Cargo.toml +++ b/ext/napi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_napi" -version = "0.102.0" +version = "0.103.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/net/Cargo.toml b/ext/net/Cargo.toml index 9f72456b9..4ba4fb315 100644 --- a/ext/net/Cargo.toml +++ b/ext/net/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_net" -version = "0.163.0" +version = "0.164.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml index c5f07210b..a6b778c1a 100644 --- a/ext/node/Cargo.toml +++ b/ext/node/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_node" -version = "0.108.0" +version = "0.109.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/tls/Cargo.toml b/ext/tls/Cargo.toml index 9f7bffe67..9c3d5b804 100644 --- a/ext/tls/Cargo.toml +++ b/ext/tls/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_tls" -version = "0.158.0" +version = "0.159.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/url/Cargo.toml b/ext/url/Cargo.toml index ca4fa444d..f2e81becf 100644 --- a/ext/url/Cargo.toml +++ b/ext/url/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_url" -version = "0.171.0" +version = "0.172.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/web/Cargo.toml b/ext/web/Cargo.toml index 4120e978e..3d01e573d 100644 --- a/ext/web/Cargo.toml +++ b/ext/web/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_web" -version = "0.202.0" +version = "0.203.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webgpu/Cargo.toml b/ext/webgpu/Cargo.toml index 4c709b9c3..262351b81 100644 --- a/ext/webgpu/Cargo.toml +++ b/ext/webgpu/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webgpu" -version = "0.138.0" +version = "0.139.0" authors = ["the Deno authors"] edition.workspace = true license = "MIT" diff --git a/ext/webidl/Cargo.toml b/ext/webidl/Cargo.toml index 8a25be366..d5d4d9278 100644 --- a/ext/webidl/Cargo.toml +++ b/ext/webidl/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webidl" -version = "0.171.0" +version = "0.172.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/websocket/Cargo.toml b/ext/websocket/Cargo.toml index 6bef387b4..5bf217f13 100644 --- a/ext/websocket/Cargo.toml +++ b/ext/websocket/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_websocket" -version = "0.176.0" +version = "0.177.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webstorage/Cargo.toml b/ext/webstorage/Cargo.toml index bf9bbf23b..2707ccfe9 100644 --- a/ext/webstorage/Cargo.toml +++ b/ext/webstorage/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webstorage" -version = "0.166.0" +version = "0.167.0" authors.workspace = true edition.workspace = true license.workspace = true -- cgit v1.2.3 From 458d6278d2835896018086da773fd72b7af8ed66 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Wed, 16 Oct 2024 17:42:15 -0700 Subject: fix(node/http): normalize header names in `ServerResponse` (#26339) Fixes https://github.com/denoland/deno/issues/26115. We weren't normalizing the headers to lower case, so code that attempted to delete the `Content-Length` header (but used a different case) wasn't actually removing the header. --- ext/node/polyfills/http.ts | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/http.ts b/ext/node/polyfills/http.ts index 349caeea6..e117a0ec2 100644 --- a/ext/node/polyfills/http.ts +++ b/ext/node/polyfills/http.ts @@ -72,7 +72,7 @@ import { STATUS_CODES } from "node:_http_server"; import { methods as METHODS } from "node:_http_common"; const { internalRidSymbol } = core; -const { ArrayIsArray } = primordials; +const { ArrayIsArray, StringPrototypeToLowerCase } = primordials; type Chunk = string | Buffer | Uint8Array; @@ -1270,20 +1270,21 @@ export class ServerResponse extends NodeWritable { if (Array.isArray(value)) { this.#hasNonStringHeaders = true; } - this.#headers[name] = value; + this.#headers[StringPrototypeToLowerCase(name)] = value; return this; } appendHeader(name: string, value: string | string[]) { - if (this.#headers[name] === undefined) { + const key = StringPrototypeToLowerCase(name); + if (this.#headers[key] === undefined) { if (Array.isArray(value)) this.#hasNonStringHeaders = true; - this.#headers[name] = value; + this.#headers[key] = value; } else { this.#hasNonStringHeaders = true; - if (!Array.isArray(this.#headers[name])) { - this.#headers[name] = [this.#headers[name]]; + if (!Array.isArray(this.#headers[key])) { + this.#headers[key] = [this.#headers[key]]; } - const header = this.#headers[name]; + const header = this.#headers[key]; if (Array.isArray(value)) { header.push(...value); } else { @@ -1294,10 +1295,10 @@ export class ServerResponse extends NodeWritable { } getHeader(name: string) { - return this.#headers[name]; + return this.#headers[StringPrototypeToLowerCase(name)]; } removeHeader(name: string) { - delete this.#headers[name]; + delete this.#headers[StringPrototypeToLowerCase(name)]; } getHeaderNames() { return Object.keys(this.#headers); -- cgit v1.2.3 From 167f674c7cbb9632000c1feb0b747ba098b01c12 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Wed, 16 Oct 2024 19:58:11 -0700 Subject: fix: don't warn on ignored signals on windows (#26332) Closes #26183. The warnings are super noisy and not actionable for the user --- ext/node/polyfills/process.ts | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/process.ts b/ext/node/polyfills/process.ts index 3dc6ce61a..2605fa6d1 100644 --- a/ext/node/polyfills/process.ts +++ b/ext/node/polyfills/process.ts @@ -520,9 +520,7 @@ Process.prototype.on = function ( } else if ( event !== "SIGBREAK" && event !== "SIGINT" && Deno.build.os === "windows" ) { - // Ignores all signals except SIGBREAK and SIGINT on windows. - // deno-lint-ignore no-console - console.warn(`Ignoring signal "${event}" on Windows`); + // TODO(#26331): Ignores all signals except SIGBREAK and SIGINT on windows. } else { EventEmitter.prototype.on.call(this, event, listener); Deno.addSignalListener(event as Deno.Signal, listener); -- cgit v1.2.3 From a61ba3c6995bef58f508a34e537932284692c294 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Wed, 16 Oct 2024 20:56:57 -0700 Subject: fix(net): don't try to set nodelay on upgrade streams (#26342) Fixes https://github.com/denoland/deno/issues/26341. We try to call `op_set_nodelay` on an `UpgradeStream`, which doesn't support that operation. --- ext/http/00_serve.ts | 8 ++++++-- ext/net/01_net.js | 15 +++++++++++++++ ext/node/polyfills/http.ts | 4 ++-- ext/node/polyfills/internal_binding/tcp_wrap.ts | 4 +++- 4 files changed, 26 insertions(+), 5 deletions(-) (limited to 'ext') diff --git a/ext/http/00_serve.ts b/ext/http/00_serve.ts index 3b9b085a2..1b70cf212 100644 --- a/ext/http/00_serve.ts +++ b/ext/http/00_serve.ts @@ -76,7 +76,11 @@ import { ReadableStreamPrototype, resourceForReadableStream, } from "ext:deno_web/06_streams.js"; -import { listen, listenOptionApiName, TcpConn } from "ext:deno_net/01_net.js"; +import { + listen, + listenOptionApiName, + UpgradedConn, +} from "ext:deno_net/01_net.js"; import { hasTlsKeyPairOptions, listenTls } from "ext:deno_net/02_tls.js"; import { SymbolAsyncDispose } from "ext:deno_web/00_infra.js"; @@ -189,7 +193,7 @@ class InnerRequest { const upgradeRid = op_http_upgrade_raw(external); - const conn = new TcpConn( + const conn = new UpgradedConn( upgradeRid, underlyingConn?.remoteAddr, underlyingConn?.localAddr, diff --git a/ext/net/01_net.js b/ext/net/01_net.js index 5b894947e..c3e5f9e5c 100644 --- a/ext/net/01_net.js +++ b/ext/net/01_net.js @@ -194,6 +194,20 @@ class Conn { } } +class UpgradedConn extends Conn { + #rid = 0; + + constructor(rid, remoteAddr, localAddr) { + super(rid, remoteAddr, localAddr); + ObjectDefineProperty(this, internalRidSymbol, { + __proto__: null, + enumerable: false, + value: rid, + }); + this.#rid = rid; + } +} + class TcpConn extends Conn { #rid = 0; @@ -601,5 +615,6 @@ export { resolveDns, TcpConn, UnixConn, + UpgradedConn, validatePort, }; diff --git a/ext/node/polyfills/http.ts b/ext/node/polyfills/http.ts index e117a0ec2..20bef3009 100644 --- a/ext/node/polyfills/http.ts +++ b/ext/node/polyfills/http.ts @@ -67,7 +67,7 @@ import { headersEntries } from "ext:deno_fetch/20_headers.js"; import { timerId } from "ext:deno_web/03_abort_signal.js"; import { clearTimeout as webClearTimeout } from "ext:deno_web/02_timers.js"; import { resourceForReadableStream } from "ext:deno_web/06_streams.js"; -import { TcpConn } from "ext:deno_net/01_net.js"; +import { UpgradedConn } from "ext:deno_net/01_net.js"; import { STATUS_CODES } from "node:_http_server"; import { methods as METHODS } from "node:_http_common"; @@ -517,7 +517,7 @@ class ClientRequest extends OutgoingMessage { ); assert(typeof res.remoteAddrIp !== "undefined"); assert(typeof res.remoteAddrIp !== "undefined"); - const conn = new TcpConn( + const conn = new UpgradedConn( upgradeRid, { transport: "tcp", diff --git a/ext/node/polyfills/internal_binding/tcp_wrap.ts b/ext/node/polyfills/internal_binding/tcp_wrap.ts index 1321cc627..2856f808a 100644 --- a/ext/node/polyfills/internal_binding/tcp_wrap.ts +++ b/ext/node/polyfills/internal_binding/tcp_wrap.ts @@ -300,7 +300,9 @@ export class TCP extends ConnectionWrap { * @return An error status code. */ setNoDelay(noDelay: boolean): number { - this[kStreamBaseField].setNoDelay(noDelay); + if ("setNoDelay" in this[kStreamBaseField]) { + this[kStreamBaseField].setNoDelay(noDelay); + } return 0; } -- cgit v1.2.3 From ed13efc4ac3ed5262d025bd7228785553fecff1c Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Thu, 17 Oct 2024 09:43:04 -0700 Subject: refactor(ext/net): use concrete error type (#26227) --- ext/cache/lib.rs | 2 +- ext/net/Cargo.toml | 1 + ext/net/io.rs | 45 ++++++---- ext/net/ops.rs | 218 ++++++++++++++++++++++++++++++------------------ ext/net/ops_tls.rs | 107 +++++++++++++----------- ext/net/ops_unix.rs | 67 ++++++++------- ext/net/resolve_addr.rs | 5 +- 7 files changed, 261 insertions(+), 184 deletions(-) (limited to 'ext') diff --git a/ext/cache/lib.rs b/ext/cache/lib.rs index 08661c349..b9cc5427c 100644 --- a/ext/cache/lib.rs +++ b/ext/cache/lib.rs @@ -28,7 +28,7 @@ pub enum CacheError { Resource(deno_core::error::AnyError), #[error(transparent)] Other(deno_core::error::AnyError), - #[error(transparent)] + #[error("{0}")] Io(#[from] std::io::Error), } diff --git a/ext/net/Cargo.toml b/ext/net/Cargo.toml index 4ba4fb315..a57862072 100644 --- a/ext/net/Cargo.toml +++ b/ext/net/Cargo.toml @@ -21,6 +21,7 @@ pin-project.workspace = true rustls-tokio-stream.workspace = true serde.workspace = true socket2.workspace = true +thiserror.workspace = true tokio.workspace = true trust-dns-proto = "0.23" trust-dns-resolver = { version = "0.23", features = ["tokio-runtime", "serde-config"] } diff --git a/ext/net/io.rs b/ext/net/io.rs index f3aed3fcb..2907fa398 100644 --- a/ext/net/io.rs +++ b/ext/net/io.rs @@ -1,7 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::generic_error; -use deno_core::error::AnyError; +use deno_core::futures::TryFutureExt; use deno_core::AsyncMutFuture; use deno_core::AsyncRefCell; use deno_core::AsyncResult; @@ -69,25 +68,36 @@ where pub async fn read( self: Rc, data: &mut [u8], - ) -> Result { + ) -> Result { let mut rd = self.rd_borrow_mut().await; let nread = rd.read(data).try_or_cancel(self.cancel_handle()).await?; Ok(nread) } - pub async fn write(self: Rc, data: &[u8]) -> Result { + pub async fn write( + self: Rc, + data: &[u8], + ) -> Result { let mut wr = self.wr_borrow_mut().await; let nwritten = wr.write(data).await?; Ok(nwritten) } - pub async fn shutdown(self: Rc) -> Result<(), AnyError> { + pub async fn shutdown(self: Rc) -> Result<(), std::io::Error> { let mut wr = self.wr_borrow_mut().await; wr.shutdown().await?; Ok(()) } } +#[derive(Debug, thiserror::Error)] +pub enum MapError { + #[error("{0}")] + Io(std::io::Error), + #[error("Unable to get resources")] + NoResources, +} + pub type TcpStreamResource = FullDuplexResource; @@ -100,7 +110,7 @@ impl Resource for TcpStreamResource { } fn shutdown(self: Rc) -> AsyncResult<()> { - Box::pin(self.shutdown()) + Box::pin(self.shutdown().map_err(Into::into)) } fn close(self: Rc) { @@ -109,31 +119,30 @@ impl Resource for TcpStreamResource { } impl TcpStreamResource { - pub fn set_nodelay(self: Rc, nodelay: bool) -> Result<(), AnyError> { - self.map_socket(Box::new(move |socket| Ok(socket.set_nodelay(nodelay)?))) + pub fn set_nodelay(self: Rc, nodelay: bool) -> Result<(), MapError> { + self.map_socket(Box::new(move |socket| socket.set_nodelay(nodelay))) } pub fn set_keepalive( self: Rc, keepalive: bool, - ) -> Result<(), AnyError> { - self - .map_socket(Box::new(move |socket| Ok(socket.set_keepalive(keepalive)?))) + ) -> Result<(), MapError> { + self.map_socket(Box::new(move |socket| socket.set_keepalive(keepalive))) } #[allow(clippy::type_complexity)] fn map_socket( self: Rc, - map: Box Result<(), AnyError>>, - ) -> Result<(), AnyError> { + map: Box Result<(), std::io::Error>>, + ) -> Result<(), MapError> { if let Some(wr) = RcRef::map(self, |r| &r.wr).try_borrow() { let stream = wr.as_ref().as_ref(); let socket = socket2::SockRef::from(stream); - return map(socket); + return map(socket).map_err(MapError::Io); } - Err(generic_error("Unable to get resources")) + Err(MapError::NoResources) } } @@ -153,7 +162,9 @@ impl UnixStreamResource { unreachable!() } #[allow(clippy::unused_async)] - pub async fn shutdown(self: Rc) -> Result<(), AnyError> { + pub async fn shutdown( + self: Rc, + ) -> Result<(), deno_core::error::AnyError> { unreachable!() } pub fn cancel_read_ops(&self) { @@ -170,7 +181,7 @@ impl Resource for UnixStreamResource { } fn shutdown(self: Rc) -> AsyncResult<()> { - Box::pin(self.shutdown()) + Box::pin(self.shutdown().map_err(Into::into)) } fn close(self: Rc) { diff --git a/ext/net/ops.rs b/ext/net/ops.rs index 5248493f4..0f92dead0 100644 --- a/ext/net/ops.rs +++ b/ext/net/ops.rs @@ -6,10 +6,6 @@ use crate::resolve_addr::resolve_addr; use crate::resolve_addr::resolve_addr_sync; use crate::tcp::TcpListener; use crate::NetPermissions; -use deno_core::error::bad_resource; -use deno_core::error::custom_error; -use deno_core::error::generic_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::CancelFuture; @@ -43,6 +39,7 @@ use trust_dns_proto::rr::record_type::RecordType; use trust_dns_resolver::config::NameServerConfigGroup; use trust_dns_resolver::config::ResolverConfig; use trust_dns_resolver::config::ResolverOpts; +use trust_dns_resolver::error::ResolveError; use trust_dns_resolver::error::ResolveErrorKind; use trust_dns_resolver::system_conf; use trust_dns_resolver::AsyncResolver; @@ -68,11 +65,69 @@ impl From for IpAddr { } } -pub(crate) fn accept_err(e: std::io::Error) -> AnyError { +#[derive(Debug, thiserror::Error)] +pub enum NetError { + #[error("Listener has been closed")] + ListenerClosed, + #[error("Listener already in use")] + ListenerBusy, + #[error("Socket has been closed")] + SocketClosed, + #[error("Socket has been closed")] + SocketClosedNotConnected, + #[error("Socket already in use")] + SocketBusy, + #[error("{0}")] + Io(#[from] std::io::Error), + #[error("Another accept task is ongoing")] + AcceptTaskOngoing, + #[error("{0}")] + Permission(deno_core::error::AnyError), + #[error("{0}")] + Resource(deno_core::error::AnyError), + #[error("No resolved address found")] + NoResolvedAddress, + #[error("{0}")] + AddrParse(#[from] std::net::AddrParseError), + #[error("{0}")] + Map(crate::io::MapError), + #[error("{0}")] + Canceled(#[from] deno_core::Canceled), + #[error("{0}")] + DnsNotFound(ResolveError), + #[error("{0}")] + DnsNotConnected(ResolveError), + #[error("{0}")] + DnsTimedOut(ResolveError), + #[error("{0}")] + Dns(#[from] ResolveError), + #[error("Provided record type is not supported")] + UnsupportedRecordType, + #[error("File name or path {0:?} is not valid UTF-8")] + InvalidUtf8(std::ffi::OsString), + #[error("unexpected key type")] + UnexpectedKeyType, + #[error("Invalid hostname: '{0}'")] + InvalidHostname(String), // TypeError + #[error("TCP stream is currently in use")] + TcpStreamBusy, + #[error("{0}")] + Rustls(#[from] deno_tls::rustls::Error), + #[error("{0}")] + Tls(#[from] deno_tls::TlsError), + #[error("Error creating TLS certificate: Deno.listenTls requires a key")] + ListenTlsRequiresKey, // InvalidData + #[error("{0}")] + RootCertStore(deno_core::anyhow::Error), + #[error("{0}")] + Reunite(tokio::net::tcp::ReuniteError), +} + +pub(crate) fn accept_err(e: std::io::Error) -> NetError { if let std::io::ErrorKind::Interrupted = e.kind() { - bad_resource("Listener has been closed") + NetError::ListenerClosed } else { - e.into() + NetError::Io(e) } } @@ -81,15 +136,15 @@ pub(crate) fn accept_err(e: std::io::Error) -> AnyError { pub async fn op_net_accept_tcp( state: Rc>, #[smi] rid: ResourceId, -) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> { +) -> Result<(ResourceId, IpAddr, IpAddr), NetError> { let resource = state .borrow() .resource_table .get::>(rid) - .map_err(|_| bad_resource("Listener has been closed"))?; + .map_err(|_| NetError::ListenerClosed)?; let listener = RcRef::map(&resource, |r| &r.listener) .try_borrow_mut() - .ok_or_else(|| custom_error("Busy", "Another accept task is ongoing"))?; + .ok_or_else(|| NetError::AcceptTaskOngoing)?; let cancel = RcRef::map(resource, |r| &r.cancel); let (tcp_stream, _socket_addr) = listener .accept() @@ -112,12 +167,12 @@ pub async fn op_net_recv_udp( state: Rc>, #[smi] rid: ResourceId, #[buffer] mut buf: JsBuffer, -) -> Result<(usize, IpAddr), AnyError> { +) -> Result<(usize, IpAddr), NetError> { let resource = state .borrow_mut() .resource_table .get::(rid) - .map_err(|_| bad_resource("Socket has been closed"))?; + .map_err(|_| NetError::SocketClosed)?; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let cancel_handle = RcRef::map(&resource, |r| &r.cancel); let (nread, remote_addr) = socket @@ -134,27 +189,29 @@ pub async fn op_net_send_udp( #[smi] rid: ResourceId, #[serde] addr: IpAddr, #[buffer] zero_copy: JsBuffer, -) -> Result +) -> Result where NP: NetPermissions + 'static, { { let mut s = state.borrow_mut(); - s.borrow_mut::().check_net( - &(&addr.hostname, Some(addr.port)), - "Deno.DatagramConn.send()", - )?; + s.borrow_mut::() + .check_net( + &(&addr.hostname, Some(addr.port)), + "Deno.DatagramConn.send()", + ) + .map_err(NetError::Permission)?; } let addr = resolve_addr(&addr.hostname, addr.port) .await? .next() - .ok_or_else(|| generic_error("No resolved address found"))?; + .ok_or(NetError::NoResolvedAddress)?; let resource = state .borrow_mut() .resource_table .get::(rid) - .map_err(|_| bad_resource("Socket has been closed"))?; + .map_err(|_| NetError::SocketClosed)?; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let nwritten = socket.send_to(&zero_copy, &addr).await?; @@ -167,12 +224,12 @@ pub async fn op_net_join_multi_v4_udp( #[smi] rid: ResourceId, #[string] address: String, #[string] multi_interface: String, -) -> Result<(), AnyError> { +) -> Result<(), NetError> { let resource = state .borrow_mut() .resource_table .get::(rid) - .map_err(|_| bad_resource("Socket has been closed"))?; + .map_err(|_| NetError::SocketClosed)?; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let addr = Ipv4Addr::from_str(address.as_str())?; @@ -189,12 +246,12 @@ pub async fn op_net_join_multi_v6_udp( #[smi] rid: ResourceId, #[string] address: String, #[smi] multi_interface: u32, -) -> Result<(), AnyError> { +) -> Result<(), NetError> { let resource = state .borrow_mut() .resource_table .get::(rid) - .map_err(|_| bad_resource("Socket has been closed"))?; + .map_err(|_| NetError::SocketClosed)?; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let addr = Ipv6Addr::from_str(address.as_str())?; @@ -210,12 +267,12 @@ pub async fn op_net_leave_multi_v4_udp( #[smi] rid: ResourceId, #[string] address: String, #[string] multi_interface: String, -) -> Result<(), AnyError> { +) -> Result<(), NetError> { let resource = state .borrow_mut() .resource_table .get::(rid) - .map_err(|_| bad_resource("Socket has been closed"))?; + .map_err(|_| NetError::SocketClosed)?; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let addr = Ipv4Addr::from_str(address.as_str())?; @@ -232,12 +289,12 @@ pub async fn op_net_leave_multi_v6_udp( #[smi] rid: ResourceId, #[string] address: String, #[smi] multi_interface: u32, -) -> Result<(), AnyError> { +) -> Result<(), NetError> { let resource = state .borrow_mut() .resource_table .get::(rid) - .map_err(|_| bad_resource("Socket has been closed"))?; + .map_err(|_| NetError::SocketClosed)?; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; let addr = Ipv6Addr::from_str(address.as_str())?; @@ -253,16 +310,16 @@ pub async fn op_net_set_multi_loopback_udp( #[smi] rid: ResourceId, is_v4_membership: bool, loopback: bool, -) -> Result<(), AnyError> { +) -> Result<(), NetError> { let resource = state .borrow_mut() .resource_table .get::(rid) - .map_err(|_| bad_resource("Socket has been closed"))?; + .map_err(|_| NetError::SocketClosed)?; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; if is_v4_membership { - socket.set_multicast_loop_v4(loopback)? + socket.set_multicast_loop_v4(loopback)?; } else { socket.set_multicast_loop_v6(loopback)?; } @@ -275,12 +332,12 @@ pub async fn op_net_set_multi_ttl_udp( state: Rc>, #[smi] rid: ResourceId, #[smi] ttl: u32, -) -> Result<(), AnyError> { +) -> Result<(), NetError> { let resource = state .borrow_mut() .resource_table .get::(rid) - .map_err(|_| bad_resource("Socket has been closed"))?; + .map_err(|_| NetError::SocketClosed)?; let socket = RcRef::map(&resource, |r| &r.socket).borrow().await; socket.set_multicast_ttl_v4(ttl)?; @@ -293,7 +350,7 @@ pub async fn op_net_set_multi_ttl_udp( pub async fn op_net_connect_tcp( state: Rc>, #[serde] addr: IpAddr, -) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> +) -> Result<(ResourceId, IpAddr, IpAddr), NetError> where NP: NetPermissions + 'static, { @@ -304,7 +361,7 @@ where pub async fn op_net_connect_tcp_inner( state: Rc>, addr: IpAddr, -) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> +) -> Result<(ResourceId, IpAddr, IpAddr), NetError> where NP: NetPermissions + 'static, { @@ -312,13 +369,14 @@ where let mut state_ = state.borrow_mut(); state_ .borrow_mut::() - .check_net(&(&addr.hostname, Some(addr.port)), "Deno.connect()")?; + .check_net(&(&addr.hostname, Some(addr.port)), "Deno.connect()") + .map_err(NetError::Permission)?; } let addr = resolve_addr(&addr.hostname, addr.port) .await? .next() - .ok_or_else(|| generic_error("No resolved address found"))?; + .ok_or_else(|| NetError::NoResolvedAddress)?; let tcp_stream = TcpStream::connect(&addr).await?; let local_addr = tcp_stream.local_addr()?; let remote_addr = tcp_stream.peer_addr()?; @@ -353,7 +411,7 @@ pub fn op_net_listen_tcp( #[serde] addr: IpAddr, reuse_port: bool, load_balanced: bool, -) -> Result<(ResourceId, IpAddr), AnyError> +) -> Result<(ResourceId, IpAddr), NetError> where NP: NetPermissions + 'static, { @@ -362,10 +420,11 @@ where } state .borrow_mut::() - .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listen()")?; + .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listen()") + .map_err(NetError::Permission)?; let addr = resolve_addr_sync(&addr.hostname, addr.port)? .next() - .ok_or_else(|| generic_error("No resolved address found"))?; + .ok_or_else(|| NetError::NoResolvedAddress)?; let listener = if load_balanced { TcpListener::bind_load_balanced(addr) @@ -384,16 +443,17 @@ fn net_listen_udp( addr: IpAddr, reuse_address: bool, loopback: bool, -) -> Result<(ResourceId, IpAddr), AnyError> +) -> Result<(ResourceId, IpAddr), NetError> where NP: NetPermissions + 'static, { state .borrow_mut::() - .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenDatagram()")?; + .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenDatagram()") + .map_err(NetError::Permission)?; let addr = resolve_addr_sync(&addr.hostname, addr.port)? .next() - .ok_or_else(|| generic_error("No resolved address found"))?; + .ok_or_else(|| NetError::NoResolvedAddress)?; let domain = if addr.is_ipv4() { Domain::IPV4 @@ -453,7 +513,7 @@ pub fn op_net_listen_udp( #[serde] addr: IpAddr, reuse_address: bool, loopback: bool, -) -> Result<(ResourceId, IpAddr), AnyError> +) -> Result<(ResourceId, IpAddr), NetError> where NP: NetPermissions + 'static, { @@ -468,7 +528,7 @@ pub fn op_node_unstable_net_listen_udp( #[serde] addr: IpAddr, reuse_address: bool, loopback: bool, -) -> Result<(ResourceId, IpAddr), AnyError> +) -> Result<(ResourceId, IpAddr), NetError> where NP: NetPermissions + 'static, { @@ -551,7 +611,7 @@ pub struct NameServer { pub async fn op_dns_resolve( state: Rc>, #[serde] args: ResolveAddrArgs, -) -> Result, AnyError> +) -> Result, NetError> where NP: NetPermissions + 'static, { @@ -587,7 +647,9 @@ where let socker_addr = &ns.socket_addr; let ip = socker_addr.ip().to_string(); let port = socker_addr.port(); - perm.check_net(&(ip, Some(port)), "Deno.resolveDns()")?; + perm + .check_net(&(ip, Some(port)), "Deno.resolveDns()") + .map_err(NetError::Permission)?; } } @@ -618,22 +680,17 @@ where }; lookup - .map_err(|e| { - let message = format!("{e}"); - match e.kind() { - ResolveErrorKind::NoRecordsFound { .. } => { - custom_error("NotFound", message) - } - ResolveErrorKind::Message("No connections available") => { - custom_error("NotConnected", message) - } - ResolveErrorKind::Timeout => custom_error("TimedOut", message), - _ => generic_error(message), + .map_err(|e| match e.kind() { + ResolveErrorKind::NoRecordsFound { .. } => NetError::DnsNotFound(e), + ResolveErrorKind::Message("No connections available") => { + NetError::DnsNotConnected(e) } + ResolveErrorKind::Timeout => NetError::DnsTimedOut(e), + _ => NetError::Dns(e), })? .iter() .filter_map(|rdata| rdata_to_return_record(record_type)(rdata).transpose()) - .collect::, AnyError>>() + .collect::, NetError>>() } #[op2(fast)] @@ -641,7 +698,7 @@ pub fn op_set_nodelay( state: &mut OpState, #[smi] rid: ResourceId, nodelay: bool, -) -> Result<(), AnyError> { +) -> Result<(), NetError> { op_set_nodelay_inner(state, rid, nodelay) } @@ -650,10 +707,12 @@ pub fn op_set_nodelay_inner( state: &mut OpState, rid: ResourceId, nodelay: bool, -) -> Result<(), AnyError> { - let resource: Rc = - state.resource_table.get::(rid)?; - resource.set_nodelay(nodelay) +) -> Result<(), NetError> { + let resource: Rc = state + .resource_table + .get::(rid) + .map_err(NetError::Resource)?; + resource.set_nodelay(nodelay).map_err(NetError::Map) } #[op2(fast)] @@ -661,7 +720,7 @@ pub fn op_set_keepalive( state: &mut OpState, #[smi] rid: ResourceId, keepalive: bool, -) -> Result<(), AnyError> { +) -> Result<(), NetError> { op_set_keepalive_inner(state, rid, keepalive) } @@ -670,17 +729,19 @@ pub fn op_set_keepalive_inner( state: &mut OpState, rid: ResourceId, keepalive: bool, -) -> Result<(), AnyError> { - let resource: Rc = - state.resource_table.get::(rid)?; - resource.set_keepalive(keepalive) +) -> Result<(), NetError> { + let resource: Rc = state + .resource_table + .get::(rid) + .map_err(NetError::Resource)?; + resource.set_keepalive(keepalive).map_err(NetError::Map) } fn rdata_to_return_record( ty: RecordType, -) -> impl Fn(&RData) -> Result, AnyError> { +) -> impl Fn(&RData) -> Result, NetError> { use RecordType::*; - move |r: &RData| -> Result, AnyError> { + move |r: &RData| -> Result, NetError> { let record = match ty { A => r.as_a().map(ToString::to_string).map(DnsReturnRecord::A), AAAA => r @@ -761,12 +822,7 @@ fn rdata_to_return_record( .collect(); DnsReturnRecord::Txt(texts) }), - _ => { - return Err(custom_error( - "NotSupported", - "Provided record type is not supported", - )) - } + _ => return Err(NetError::UnsupportedRecordType), }; Ok(record) } @@ -985,7 +1041,7 @@ mod tests { &mut self, _host: &(T, Option), _api_name: &str, - ) -> Result<(), AnyError> { + ) -> Result<(), deno_core::error::AnyError> { Ok(()) } @@ -993,7 +1049,7 @@ mod tests { &mut self, p: &str, _api_name: &str, - ) -> Result { + ) -> Result { Ok(PathBuf::from(p)) } @@ -1001,7 +1057,7 @@ mod tests { &mut self, p: &str, _api_name: &str, - ) -> Result { + ) -> Result { Ok(PathBuf::from(p)) } @@ -1009,7 +1065,7 @@ mod tests { &mut self, p: &'a Path, _api_name: &str, - ) -> Result, AnyError> { + ) -> Result, deno_core::error::AnyError> { Ok(Cow::Borrowed(p)) } } @@ -1091,7 +1147,7 @@ mod tests { let vals = result.unwrap(); rid = rid.or(Some(vals.0)); } - }; + } let rid = rid.unwrap(); let state = runtime.op_state(); diff --git a/ext/net/ops_tls.rs b/ext/net/ops_tls.rs index a68d144b5..c7d65dd85 100644 --- a/ext/net/ops_tls.rs +++ b/ext/net/ops_tls.rs @@ -2,6 +2,7 @@ use crate::io::TcpStreamResource; use crate::ops::IpAddr; +use crate::ops::NetError; use crate::ops::TlsHandshakeInfo; use crate::raw::NetworkListenerResource; use crate::resolve_addr::resolve_addr; @@ -10,13 +11,7 @@ use crate::tcp::TcpListener; use crate::DefaultTlsOptions; use crate::NetPermissions; use crate::UnsafelyIgnoreCertificateErrors; -use deno_core::anyhow::anyhow; -use deno_core::anyhow::bail; -use deno_core::error::bad_resource; -use deno_core::error::custom_error; -use deno_core::error::generic_error; -use deno_core::error::invalid_hostname; -use deno_core::error::AnyError; +use deno_core::futures::TryFutureExt; use deno_core::op2; use deno_core::v8; use deno_core::AsyncRefCell; @@ -118,20 +113,23 @@ impl TlsStreamResource { pub async fn read( self: Rc, data: &mut [u8], - ) -> Result { + ) -> Result { let mut rd = RcRef::map(&self, |r| &r.rd).borrow_mut().await; let cancel_handle = RcRef::map(&self, |r| &r.cancel_handle); - Ok(rd.read(data).try_or_cancel(cancel_handle).await?) + rd.read(data).try_or_cancel(cancel_handle).await } - pub async fn write(self: Rc, data: &[u8]) -> Result { + pub async fn write( + self: Rc, + data: &[u8], + ) -> Result { let mut wr = RcRef::map(self, |r| &r.wr).borrow_mut().await; let nwritten = wr.write(data).await?; wr.flush().await?; Ok(nwritten) } - pub async fn shutdown(self: Rc) -> Result<(), AnyError> { + pub async fn shutdown(self: Rc) -> Result<(), std::io::Error> { let mut wr = RcRef::map(self, |r| &r.wr).borrow_mut().await; wr.shutdown().await?; Ok(()) @@ -139,7 +137,7 @@ impl TlsStreamResource { pub async fn handshake( self: &Rc, - ) -> Result { + ) -> Result { if let Some(tls_info) = &*self.handshake_info.borrow() { return Ok(tls_info.clone()); } @@ -164,7 +162,7 @@ impl Resource for TlsStreamResource { } fn shutdown(self: Rc) -> AsyncResult<()> { - Box::pin(self.shutdown()) + Box::pin(self.shutdown().map_err(Into::into)) } fn close(self: Rc) { @@ -201,7 +199,7 @@ pub fn op_tls_key_null() -> TlsKeysHolder { pub fn op_tls_key_static( #[string] cert: &str, #[string] key: &str, -) -> Result { +) -> Result { let cert = load_certs(&mut BufReader::new(cert.as_bytes()))?; let key = load_private_keys(key.as_bytes())? .into_iter() @@ -236,9 +234,9 @@ pub fn op_tls_cert_resolver_resolve( #[cppgc] lookup: &TlsKeyLookup, #[string] sni: String, #[cppgc] key: &TlsKeysHolder, -) -> Result<(), AnyError> { +) -> Result<(), NetError> { let TlsKeys::Static(key) = key.take() else { - bail!("unexpected key type"); + return Err(NetError::UnexpectedKeyType); }; lookup.resolve(sni, Ok(key)); Ok(()) @@ -258,7 +256,7 @@ pub fn op_tls_cert_resolver_resolve_error( pub fn op_tls_start( state: Rc>, #[serde] args: StartTlsArgs, -) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> +) -> Result<(ResourceId, IpAddr, IpAddr), NetError> where NP: NetPermissions + 'static, { @@ -271,7 +269,9 @@ where { let mut s = state.borrow_mut(); let permissions = s.borrow_mut::(); - permissions.check_net(&(&hostname, Some(0)), "Deno.startTls()")?; + permissions + .check_net(&(&hostname, Some(0)), "Deno.startTls()") + .map_err(NetError::Permission)?; } let ca_certs = args @@ -281,7 +281,7 @@ where .collect::>(); let hostname_dns = ServerName::try_from(hostname.to_string()) - .map_err(|_| invalid_hostname(&hostname))?; + .map_err(|_| NetError::InvalidHostname(hostname))?; let unsafely_ignore_certificate_errors = state .borrow() @@ -291,19 +291,21 @@ where let root_cert_store = state .borrow() .borrow::() - .root_cert_store()?; + .root_cert_store() + .map_err(NetError::RootCertStore)?; let resource_rc = state .borrow_mut() .resource_table - .take::(rid)?; + .take::(rid) + .map_err(NetError::Resource)?; // This TCP connection might be used somewhere else. If it's the case, we cannot proceed with the // process of starting a TLS connection on top of this TCP connection, so we just return a Busy error. // See also: https://github.com/denoland/deno/pull/16242 - let resource = Rc::try_unwrap(resource_rc) - .map_err(|_| custom_error("Busy", "TCP stream is currently in use"))?; + let resource = + Rc::try_unwrap(resource_rc).map_err(|_| NetError::TcpStreamBusy)?; let (read_half, write_half) = resource.into_inner(); - let tcp_stream = read_half.reunite(write_half)?; + let tcp_stream = read_half.reunite(write_half).map_err(NetError::Reunite)?; let local_addr = tcp_stream.local_addr()?; let remote_addr = tcp_stream.peer_addr()?; @@ -345,7 +347,7 @@ pub async fn op_net_connect_tls( #[serde] addr: IpAddr, #[serde] args: ConnectTlsArgs, #[cppgc] key_pair: &TlsKeysHolder, -) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> +) -> Result<(ResourceId, IpAddr, IpAddr), NetError> where NP: NetPermissions + 'static, { @@ -359,9 +361,14 @@ where let mut s = state.borrow_mut(); let permissions = s.borrow_mut::(); permissions - .check_net(&(&addr.hostname, Some(addr.port)), "Deno.connectTls()")?; + .check_net(&(&addr.hostname, Some(addr.port)), "Deno.connectTls()") + .map_err(NetError::Permission)?; if let Some(path) = cert_file { - Some(permissions.check_read(path, "Deno.connectTls()")?) + Some( + permissions + .check_read(path, "Deno.connectTls()") + .map_err(NetError::Permission)?, + ) } else { None } @@ -382,17 +389,18 @@ where let root_cert_store = state .borrow() .borrow::() - .root_cert_store()?; + .root_cert_store() + .map_err(NetError::RootCertStore)?; let hostname_dns = if let Some(server_name) = args.server_name { ServerName::try_from(server_name) } else { ServerName::try_from(addr.hostname.clone()) } - .map_err(|_| invalid_hostname(&addr.hostname))?; + .map_err(|_| NetError::InvalidHostname(addr.hostname.clone()))?; let connect_addr = resolve_addr(&addr.hostname, addr.port) .await? .next() - .ok_or_else(|| generic_error("No resolved address found"))?; + .ok_or_else(|| NetError::NoResolvedAddress)?; let tcp_stream = TcpStream::connect(connect_addr).await?; let local_addr = tcp_stream.local_addr()?; let remote_addr = tcp_stream.peer_addr()?; @@ -444,7 +452,7 @@ pub fn op_net_listen_tls( #[serde] addr: IpAddr, #[serde] args: ListenTlsArgs, #[cppgc] keys: &TlsKeysHolder, -) -> Result<(ResourceId, IpAddr), AnyError> +) -> Result<(ResourceId, IpAddr), NetError> where NP: NetPermissions + 'static, { @@ -455,12 +463,13 @@ where { let permissions = state.borrow_mut::(); permissions - .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenTls()")?; + .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenTls()") + .map_err(NetError::Permission)?; } let bind_addr = resolve_addr_sync(&addr.hostname, addr.port)? .next() - .ok_or_else(|| generic_error("No resolved address found"))?; + .ok_or(NetError::NoResolvedAddress)?; let tcp_listener = if args.load_balanced { TcpListener::bind_load_balanced(bind_addr) @@ -475,28 +484,24 @@ where .map(|s| s.into_bytes()) .collect(); let listener = match keys.take() { - TlsKeys::Null => Err(anyhow!("Deno.listenTls requires a key")), + TlsKeys::Null => return Err(NetError::ListenTlsRequiresKey), TlsKeys::Static(TlsKey(cert, key)) => { let mut tls_config = ServerConfig::builder() .with_no_client_auth() - .with_single_cert(cert, key) - .map_err(|e| anyhow!(e))?; + .with_single_cert(cert, key)?; tls_config.alpn_protocols = alpn; - Ok(TlsListener { + TlsListener { tcp_listener, tls_config: Some(tls_config.into()), server_config_provider: None, - }) + } } - TlsKeys::Resolver(resolver) => Ok(TlsListener { + TlsKeys::Resolver(resolver) => TlsListener { tcp_listener, tls_config: None, server_config_provider: Some(resolver.into_server_config_provider(alpn)), - }), - } - .map_err(|e| { - custom_error("InvalidData", "Error creating TLS certificate").context(e) - })?; + }, + }; let tls_listener_resource = NetworkListenerResource::new(listener); @@ -510,23 +515,23 @@ where pub async fn op_net_accept_tls( state: Rc>, #[smi] rid: ResourceId, -) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> { +) -> Result<(ResourceId, IpAddr, IpAddr), NetError> { let resource = state .borrow() .resource_table .get::>(rid) - .map_err(|_| bad_resource("Listener has been closed"))?; + .map_err(|_| NetError::ListenerClosed)?; let cancel_handle = RcRef::map(&resource, |r| &r.cancel); let listener = RcRef::map(&resource, |r| &r.listener) .try_borrow_mut() - .ok_or_else(|| custom_error("Busy", "Another accept task is ongoing"))?; + .ok_or_else(|| NetError::AcceptTaskOngoing)?; let (tls_stream, remote_addr) = match listener.accept().try_or_cancel(&cancel_handle).await { Ok(tuple) => tuple, Err(err) if err.kind() == ErrorKind::Interrupted => { - return Err(bad_resource("Listener has been closed")); + return Err(NetError::ListenerClosed); } Err(err) => return Err(err.into()), }; @@ -547,11 +552,11 @@ pub async fn op_net_accept_tls( pub async fn op_tls_handshake( state: Rc>, #[smi] rid: ResourceId, -) -> Result { +) -> Result { let resource = state .borrow() .resource_table .get::(rid) - .map_err(|_| bad_resource("Listener has been closed"))?; - resource.handshake().await + .map_err(|_| NetError::ListenerClosed)?; + resource.handshake().await.map_err(Into::into) } diff --git a/ext/net/ops_unix.rs b/ext/net/ops_unix.rs index 95293284f..04ae84906 100644 --- a/ext/net/ops_unix.rs +++ b/ext/net/ops_unix.rs @@ -1,11 +1,9 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use crate::io::UnixStreamResource; +use crate::ops::NetError; use crate::raw::NetworkListenerResource; use crate::NetPermissions; -use deno_core::error::bad_resource; -use deno_core::error::custom_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::AsyncRefCell; use deno_core::CancelHandle; @@ -26,11 +24,8 @@ use tokio::net::UnixListener; pub use tokio::net::UnixStream; /// A utility function to map OsStrings to Strings -pub fn into_string(s: std::ffi::OsString) -> Result { - s.into_string().map_err(|s| { - let message = format!("File name or path {s:?} is not valid UTF-8"); - custom_error("InvalidData", message) - }) +pub fn into_string(s: std::ffi::OsString) -> Result { + s.into_string().map_err(NetError::InvalidUtf8) } pub struct UnixDatagramResource { @@ -63,15 +58,15 @@ pub struct UnixListenArgs { pub async fn op_net_accept_unix( state: Rc>, #[smi] rid: ResourceId, -) -> Result<(ResourceId, Option, Option), AnyError> { +) -> Result<(ResourceId, Option, Option), NetError> { let resource = state .borrow() .resource_table .get::>(rid) - .map_err(|_| bad_resource("Listener has been closed"))?; + .map_err(|_| NetError::ListenerClosed)?; let listener = RcRef::map(&resource, |r| &r.listener) .try_borrow_mut() - .ok_or_else(|| custom_error("Busy", "Listener already in use"))?; + .ok_or(NetError::ListenerBusy)?; let cancel = RcRef::map(resource, |r| &r.cancel); let (unix_stream, _socket_addr) = listener .accept() @@ -95,7 +90,7 @@ pub async fn op_net_accept_unix( pub async fn op_net_connect_unix( state: Rc>, #[string] address_path: String, -) -> Result<(ResourceId, Option, Option), AnyError> +) -> Result<(ResourceId, Option, Option), NetError> where NP: NetPermissions + 'static, { @@ -103,10 +98,12 @@ where let mut state_ = state.borrow_mut(); let address_path = state_ .borrow_mut::() - .check_read(&address_path, "Deno.connect()")?; + .check_read(&address_path, "Deno.connect()") + .map_err(NetError::Permission)?; _ = state_ .borrow_mut::() - .check_write_path(&address_path, "Deno.connect()")?; + .check_write_path(&address_path, "Deno.connect()") + .map_err(NetError::Permission)?; address_path }; let unix_stream = UnixStream::connect(&address_path).await?; @@ -127,15 +124,15 @@ pub async fn op_net_recv_unixpacket( state: Rc>, #[smi] rid: ResourceId, #[buffer] mut buf: JsBuffer, -) -> Result<(usize, Option), AnyError> { +) -> Result<(usize, Option), NetError> { let resource = state .borrow() .resource_table .get::(rid) - .map_err(|_| bad_resource("Socket has been closed"))?; + .map_err(|_| NetError::SocketClosed)?; let socket = RcRef::map(&resource, |r| &r.socket) .try_borrow_mut() - .ok_or_else(|| custom_error("Busy", "Socket already in use"))?; + .ok_or(NetError::SocketBusy)?; let cancel = RcRef::map(resource, |r| &r.cancel); let (nread, remote_addr) = socket.recv_from(&mut buf).try_or_cancel(cancel).await?; @@ -150,24 +147,25 @@ pub async fn op_net_send_unixpacket( #[smi] rid: ResourceId, #[string] address_path: String, #[buffer] zero_copy: JsBuffer, -) -> Result +) -> Result where NP: NetPermissions + 'static, { let address_path = { let mut s = state.borrow_mut(); s.borrow_mut::() - .check_write(&address_path, "Deno.DatagramConn.send()")? + .check_write(&address_path, "Deno.DatagramConn.send()") + .map_err(NetError::Permission)? }; let resource = state .borrow() .resource_table .get::(rid) - .map_err(|_| custom_error("NotConnected", "Socket has been closed"))?; + .map_err(|_| NetError::SocketClosedNotConnected)?; let socket = RcRef::map(&resource, |r| &r.socket) .try_borrow_mut() - .ok_or_else(|| custom_error("Busy", "Socket already in use"))?; + .ok_or(NetError::SocketBusy)?; let nwritten = socket.send_to(&zero_copy, address_path).await?; Ok(nwritten) @@ -179,14 +177,18 @@ pub fn op_net_listen_unix( state: &mut OpState, #[string] address_path: String, #[string] api_name: String, -) -> Result<(ResourceId, Option), AnyError> +) -> Result<(ResourceId, Option), NetError> where NP: NetPermissions + 'static, { let permissions = state.borrow_mut::(); let api_call_expr = format!("{}()", api_name); - let address_path = permissions.check_read(&address_path, &api_call_expr)?; - _ = permissions.check_write_path(&address_path, &api_call_expr)?; + let address_path = permissions + .check_read(&address_path, &api_call_expr) + .map_err(NetError::Permission)?; + _ = permissions + .check_write_path(&address_path, &api_call_expr) + .map_err(NetError::Permission)?; let listener = UnixListener::bind(address_path)?; let local_addr = listener.local_addr()?; let pathname = local_addr.as_pathname().map(pathstring).transpose()?; @@ -198,14 +200,17 @@ where pub fn net_listen_unixpacket( state: &mut OpState, address_path: String, -) -> Result<(ResourceId, Option), AnyError> +) -> Result<(ResourceId, Option), NetError> where NP: NetPermissions + 'static, { let permissions = state.borrow_mut::(); - let address_path = - permissions.check_read(&address_path, "Deno.listenDatagram()")?; - _ = permissions.check_write_path(&address_path, "Deno.listenDatagram()")?; + let address_path = permissions + .check_read(&address_path, "Deno.listenDatagram()") + .map_err(NetError::Permission)?; + _ = permissions + .check_write_path(&address_path, "Deno.listenDatagram()") + .map_err(NetError::Permission)?; let socket = UnixDatagram::bind(address_path)?; let local_addr = socket.local_addr()?; let pathname = local_addr.as_pathname().map(pathstring).transpose()?; @@ -222,7 +227,7 @@ where pub fn op_net_listen_unixpacket( state: &mut OpState, #[string] path: String, -) -> Result<(ResourceId, Option), AnyError> +) -> Result<(ResourceId, Option), NetError> where NP: NetPermissions + 'static, { @@ -235,13 +240,13 @@ where pub fn op_node_unstable_net_listen_unixpacket( state: &mut OpState, #[string] path: String, -) -> Result<(ResourceId, Option), AnyError> +) -> Result<(ResourceId, Option), NetError> where NP: NetPermissions + 'static, { net_listen_unixpacket::(state, path) } -pub fn pathstring(pathname: &Path) -> Result { +pub fn pathstring(pathname: &Path) -> Result { into_string(pathname.into()) } diff --git a/ext/net/resolve_addr.rs b/ext/net/resolve_addr.rs index 8bbdd5192..3a97081ea 100644 --- a/ext/net/resolve_addr.rs +++ b/ext/net/resolve_addr.rs @@ -1,6 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; use std::net::SocketAddr; use std::net::ToSocketAddrs; use tokio::net::lookup_host; @@ -9,7 +8,7 @@ use tokio::net::lookup_host; pub async fn resolve_addr( hostname: &str, port: u16, -) -> Result + '_, AnyError> { +) -> Result + '_, std::io::Error> { let addr_port_pair = make_addr_port_pair(hostname, port); let result = lookup_host(addr_port_pair).await?; Ok(result) @@ -19,7 +18,7 @@ pub async fn resolve_addr( pub fn resolve_addr_sync( hostname: &str, port: u16, -) -> Result, AnyError> { +) -> Result, std::io::Error> { let addr_port_pair = make_addr_port_pair(hostname, port); let result = addr_port_pair.to_socket_addrs()?; Ok(result) -- cgit v1.2.3 From 7c790da8260db24af12e700f191af551f4307476 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Thu, 17 Oct 2024 10:59:02 -0700 Subject: refactor(ext/kv): use concrete error type (#26239) --- ext/kv/Cargo.toml | 1 + ext/kv/lib.rs | 288 ++++++++++++++++++++++++++++++++---------------------- 2 files changed, 174 insertions(+), 115 deletions(-) (limited to 'ext') diff --git a/ext/kv/Cargo.toml b/ext/kv/Cargo.toml index 8aee80c57..98c14dc31 100644 --- a/ext/kv/Cargo.toml +++ b/ext/kv/Cargo.toml @@ -36,6 +36,7 @@ prost.workspace = true rand.workspace = true rusqlite.workspace = true serde.workspace = true +thiserror.workspace = true url.workspace = true [build-dependencies] diff --git a/ext/kv/lib.rs b/ext/kv/lib.rs index 13e4f1662..a4ccfe3d6 100644 --- a/ext/kv/lib.rs +++ b/ext/kv/lib.rs @@ -12,15 +12,11 @@ use std::num::NonZeroU32; use std::rc::Rc; use std::time::Duration; -use anyhow::bail; use base64::prelude::BASE64_URL_SAFE; use base64::Engine; use chrono::DateTime; use chrono::Utc; -use deno_core::anyhow::Context; use deno_core::error::get_custom_error_class; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::futures::StreamExt; use deno_core::op2; use deno_core::serde_v8::AnyValue; @@ -118,12 +114,72 @@ impl Resource for DatabaseWatcherResource { } } +#[derive(Debug, thiserror::Error)] +pub enum KvError { + #[error(transparent)] + DatabaseHandler(deno_core::error::AnyError), + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error("Too many ranges (max {0})")] + TooManyRanges(usize), + #[error("Too many entries (max {0})")] + TooManyEntries(usize), + #[error("Too many checks (max {0})")] + TooManyChecks(usize), + #[error("Too many mutations (max {0})")] + TooManyMutations(usize), + #[error("Too many keys (max {0})")] + TooManyKeys(usize), + #[error("limit must be greater than 0")] + InvalidLimit, + #[error("Invalid boundary key")] + InvalidBoundaryKey, + #[error("Key too large for read (max {0} bytes)")] + KeyTooLargeToRead(usize), + #[error("Key too large for write (max {0} bytes)")] + KeyTooLargeToWrite(usize), + #[error("Total mutation size too large (max {0} bytes)")] + TotalMutationTooLarge(usize), + #[error("Total key size too large (max {0} bytes)")] + TotalKeyTooLarge(usize), + #[error(transparent)] + Kv(deno_core::error::AnyError), + #[error(transparent)] + Io(#[from] std::io::Error), + #[error("Queue message not found")] + QueueMessageNotFound, + #[error("Start key is not in the keyspace defined by prefix")] + StartKeyNotInKeyspace, + #[error("End key is not in the keyspace defined by prefix")] + EndKeyNotInKeyspace, + #[error("Start key is greater than end key")] + StartKeyGreaterThanEndKey, + #[error("Invalid check")] + InvalidCheck(#[source] KvCheckError), + #[error("Invalid mutation")] + InvalidMutation(#[source] KvMutationError), + #[error("Invalid enqueue")] + InvalidEnqueue(#[source] std::io::Error), + #[error("key cannot be empty")] + EmptyKey, // TypeError + #[error("Value too large (max {0} bytes)")] + ValueTooLarge(usize), // TypeError + #[error("enqueue payload too large (max {0} bytes)")] + EnqueuePayloadTooLarge(usize), // TypeError + #[error("invalid cursor")] + InvalidCursor, + #[error("cursor out of bounds")] + CursorOutOfBounds, + #[error("Invalid range")] + InvalidRange, +} + #[op2(async)] #[smi] async fn op_kv_database_open( state: Rc>, #[string] path: Option, -) -> Result +) -> Result where DBH: DatabaseHandler + 'static, { @@ -134,7 +190,10 @@ where .check_or_exit(UNSTABLE_FEATURE_NAME, "Deno.openKv"); state.borrow::>().clone() }; - let db = handler.open(state.clone(), path).await?; + let db = handler + .open(state.clone(), path) + .await + .map_err(KvError::DatabaseHandler)?; let rid = state.borrow_mut().resource_table.add(DatabaseResource { db, cancel_handle: CancelHandle::new_rc(), @@ -184,8 +243,8 @@ enum ToV8Value { } impl TryFrom for KvValue { - type Error = AnyError; - fn try_from(value: FromV8Value) -> Result { + type Error = num_bigint::TryFromBigIntError; + fn try_from(value: FromV8Value) -> Result { Ok(match value { FromV8Value::V8(buf) => KvValue::V8(buf.to_vec()), FromV8Value::Bytes(buf) => KvValue::Bytes(buf.to_vec()), @@ -214,8 +273,8 @@ struct ToV8KvEntry { } impl TryFrom for ToV8KvEntry { - type Error = AnyError; - fn try_from(entry: KvEntry) -> Result { + type Error = std::io::Error; + fn try_from(entry: KvEntry) -> Result { Ok(ToV8KvEntry { key: decode_key(&entry.key)? .0 @@ -261,14 +320,16 @@ async fn op_kv_snapshot_read( #[smi] rid: ResourceId, #[serde] ranges: Vec, #[serde] consistency: V8Consistency, -) -> Result>, AnyError> +) -> Result>, KvError> where DBH: DatabaseHandler + 'static, { let db = { let state = state.borrow(); - let resource = - state.resource_table.get::>(rid)?; + let resource = state + .resource_table + .get::>(rid) + .map_err(KvError::Resource)?; resource.db.clone() }; @@ -278,10 +339,7 @@ where }; if ranges.len() > config.max_read_ranges { - return Err(type_error(format!( - "Too many ranges (max {})", - config.max_read_ranges - ))); + return Err(KvError::TooManyRanges(config.max_read_ranges)); } let mut total_entries = 0usize; @@ -300,33 +358,32 @@ where Ok(ReadRange { start, end, - limit: NonZeroU32::new(limit) - .with_context(|| "limit must be greater than 0")?, + limit: NonZeroU32::new(limit).ok_or(KvError::InvalidLimit)?, reverse, }) }) - .collect::, AnyError>>()?; + .collect::, KvError>>()?; if total_entries > config.max_read_entries { - return Err(type_error(format!( - "Too many entries (max {})", - config.max_read_entries - ))); + return Err(KvError::TooManyEntries(config.max_read_entries)); } let opts = SnapshotReadOptions { consistency: consistency.into(), }; - let output_ranges = db.snapshot_read(read_ranges, opts).await?; + let output_ranges = db + .snapshot_read(read_ranges, opts) + .await + .map_err(KvError::Kv)?; let output_ranges = output_ranges .into_iter() .map(|x| { x.entries .into_iter() .map(TryInto::try_into) - .collect::, AnyError>>() + .collect::, std::io::Error>>() }) - .collect::, AnyError>>()?; + .collect::, std::io::Error>>()?; Ok(output_ranges) } @@ -345,7 +402,7 @@ impl Resource for QueueMessageResource { async fn op_kv_dequeue_next_message( state: Rc>, #[smi] rid: ResourceId, -) -> Result, AnyError> +) -> Result, KvError> where DBH: DatabaseHandler + 'static, { @@ -358,17 +415,19 @@ where if get_custom_error_class(&err) == Some("BadResource") { return Ok(None); } else { - return Err(err); + return Err(KvError::Resource(err)); } } }; resource.db.clone() }; - let Some(mut handle) = db.dequeue_next_message().await? else { + let Some(mut handle) = + db.dequeue_next_message().await.map_err(KvError::Kv)? + else { return Ok(None); }; - let payload = handle.take_payload().await?.into(); + let payload = handle.take_payload().await.map_err(KvError::Kv)?.into(); let handle_rid = { let mut state = state.borrow_mut(); state.resource_table.add(QueueMessageResource { handle }) @@ -382,18 +441,18 @@ fn op_kv_watch( state: &mut OpState, #[smi] rid: ResourceId, #[serde] keys: Vec, -) -> Result +) -> Result where DBH: DatabaseHandler + 'static, { - let resource = state.resource_table.get::>(rid)?; + let resource = state + .resource_table + .get::>(rid) + .map_err(KvError::Resource)?; let config = state.borrow::>().clone(); if keys.len() > config.max_watched_keys { - return Err(type_error(format!( - "Too many keys (max {})", - config.max_watched_keys - ))); + return Err(KvError::TooManyKeys(config.max_watched_keys)); } let keys: Vec> = keys @@ -428,10 +487,13 @@ enum WatchEntry { async fn op_kv_watch_next( state: Rc>, #[smi] rid: ResourceId, -) -> Result>, AnyError> { +) -> Result>, KvError> { let resource = { let state = state.borrow(); - let resource = state.resource_table.get::(rid)?; + let resource = state + .resource_table + .get::(rid) + .map_err(KvError::Resource)?; resource.clone() }; @@ -457,7 +519,7 @@ async fn op_kv_watch_next( return Ok(None); }; - let entries = res?; + let entries = res.map_err(KvError::Kv)?; let entries = entries .into_iter() .map(|entry| { @@ -468,7 +530,7 @@ async fn op_kv_watch_next( WatchKeyOutput::Unchanged => WatchEntry::Unchanged, }) }) - .collect::>()?; + .collect::>()?; Ok(Some(entries)) } @@ -478,7 +540,7 @@ async fn op_kv_finish_dequeued_message( state: Rc>, #[smi] handle_rid: ResourceId, success: bool, -) -> Result<(), AnyError> +) -> Result<(), KvError> where DBH: DatabaseHandler + 'static, { @@ -487,9 +549,9 @@ where let handle = state .resource_table .take::::DB as Database>::QMH>>(handle_rid) - .map_err(|_| type_error("Queue message not found"))?; + .map_err(|_| KvError::QueueMessageNotFound)?; Rc::try_unwrap(handle) - .map_err(|_| type_error("Queue message not found"))? + .map_err(|_| KvError::QueueMessageNotFound)? .handle }; // if we fail to finish the message, there is not much we can do and the @@ -500,32 +562,52 @@ where Ok(()) } +#[derive(Debug, thiserror::Error)] +pub enum KvCheckError { + #[error("invalid versionstamp")] + InvalidVersionstamp, + #[error(transparent)] + Io(std::io::Error), +} + type V8KvCheck = (KvKey, Option); -fn check_from_v8(value: V8KvCheck) -> Result { +fn check_from_v8(value: V8KvCheck) -> Result { let versionstamp = match value.1 { Some(data) => { let mut out = [0u8; 10]; if data.len() != out.len() * 2 { - bail!(type_error("invalid versionstamp")); + return Err(KvCheckError::InvalidVersionstamp); } faster_hex::hex_decode(&data, &mut out) - .map_err(|_| type_error("invalid versionstamp"))?; + .map_err(|_| KvCheckError::InvalidVersionstamp)?; Some(out) } None => None, }; Ok(Check { - key: encode_v8_key(value.0)?, + key: encode_v8_key(value.0).map_err(KvCheckError::Io)?, versionstamp, }) } +#[derive(Debug, thiserror::Error)] +pub enum KvMutationError { + #[error(transparent)] + BigInt(#[from] num_bigint::TryFromBigIntError), + #[error(transparent)] + Io(#[from] std::io::Error), + #[error("Invalid mutation '{0}' with value")] + InvalidMutationWithValue(String), + #[error("Invalid mutation '{0}' without value")] + InvalidMutationWithoutValue(String), +} + type V8KvMutation = (KvKey, String, Option, Option); fn mutation_from_v8( (value, current_timstamp): (V8KvMutation, DateTime), -) -> Result { +) -> Result { let key = encode_v8_key(value.0)?; let kind = match (value.1.as_str(), value.2) { ("set", Some(value)) => MutationKind::Set(value.try_into()?), @@ -542,10 +624,10 @@ fn mutation_from_v8( MutationKind::SetSuffixVersionstampedKey(value.try_into()?) } (op, Some(_)) => { - return Err(type_error(format!("Invalid mutation '{op}' with value"))) + return Err(KvMutationError::InvalidMutationWithValue(op.to_string())) } (op, None) => { - return Err(type_error(format!("Invalid mutation '{op}' without value"))) + return Err(KvMutationError::InvalidMutationWithoutValue(op.to_string())) } }; Ok(Mutation { @@ -562,7 +644,7 @@ type V8Enqueue = (JsBuffer, u64, Vec, Option>); fn enqueue_from_v8( value: V8Enqueue, current_timestamp: DateTime, -) -> Result { +) -> Result { Ok(Enqueue { payload: value.0.to_vec(), deadline: current_timestamp @@ -597,7 +679,7 @@ impl RawSelector { prefix: Option, start: Option, end: Option, - ) -> Result { + ) -> Result { let prefix = prefix.map(encode_v8_key).transpose()?; let start = start.map(encode_v8_key).transpose()?; let end = end.map(encode_v8_key).transpose()?; @@ -610,9 +692,7 @@ impl RawSelector { }), (Some(prefix), Some(start), None) => { if !start.starts_with(&prefix) || start.len() == prefix.len() { - return Err(type_error( - "Start key is not in the keyspace defined by prefix", - )); + return Err(KvError::StartKeyNotInKeyspace); } Ok(Self::Prefixed { prefix, @@ -622,9 +702,7 @@ impl RawSelector { } (Some(prefix), None, Some(end)) => { if !end.starts_with(&prefix) || end.len() == prefix.len() { - return Err(type_error( - "End key is not in the keyspace defined by prefix", - )); + return Err(KvError::EndKeyNotInKeyspace); } Ok(Self::Prefixed { prefix, @@ -634,7 +712,7 @@ impl RawSelector { } (None, Some(start), Some(end)) => { if start > end { - return Err(type_error("Start key is greater than end key")); + return Err(KvError::StartKeyGreaterThanEndKey); } Ok(Self::Range { start, end }) } @@ -642,7 +720,7 @@ impl RawSelector { let end = start.iter().copied().chain(Some(0)).collect(); Ok(Self::Range { start, end }) } - _ => Err(type_error("Invalid range")), + _ => Err(KvError::InvalidRange), } } @@ -701,10 +779,10 @@ fn common_prefix_for_bytes<'a>(a: &'a [u8], b: &'a [u8]) -> &'a [u8] { fn encode_cursor( selector: &RawSelector, boundary_key: &[u8], -) -> Result { +) -> Result { let common_prefix = selector.common_prefix(); if !boundary_key.starts_with(common_prefix) { - return Err(type_error("Invalid boundary key")); + return Err(KvError::InvalidBoundaryKey); } Ok(BASE64_URL_SAFE.encode(&boundary_key[common_prefix.len()..])) } @@ -713,7 +791,7 @@ fn decode_selector_and_cursor( selector: &RawSelector, reverse: bool, cursor: Option<&ByteString>, -) -> Result<(Vec, Vec), AnyError> { +) -> Result<(Vec, Vec), KvError> { let Some(cursor) = cursor else { return Ok((selector.range_start_key(), selector.range_end_key())); }; @@ -721,7 +799,7 @@ fn decode_selector_and_cursor( let common_prefix = selector.common_prefix(); let cursor = BASE64_URL_SAFE .decode(cursor) - .map_err(|_| type_error("invalid cursor"))?; + .map_err(|_| KvError::InvalidCursor)?; let first_key: Vec; let last_key: Vec; @@ -746,13 +824,13 @@ fn decode_selector_and_cursor( // Defend against out-of-bounds reading if let Some(start) = selector.start() { if &first_key[..] < start { - return Err(type_error("cursor out of bounds")); + return Err(KvError::CursorOutOfBounds); } } if let Some(end) = selector.end() { if &last_key[..] > end { - return Err(type_error("cursor out of bounds")); + return Err(KvError::CursorOutOfBounds); } } @@ -767,15 +845,17 @@ async fn op_kv_atomic_write( #[serde] checks: Vec, #[serde] mutations: Vec, #[serde] enqueues: Vec, -) -> Result, AnyError> +) -> Result, KvError> where DBH: DatabaseHandler + 'static, { let current_timestamp = chrono::Utc::now(); let db = { let state = state.borrow(); - let resource = - state.resource_table.get::>(rid)?; + let resource = state + .resource_table + .get::>(rid) + .map_err(KvError::Resource)?; resource.db.clone() }; @@ -785,34 +865,28 @@ where }; if checks.len() > config.max_checks { - return Err(type_error(format!( - "Too many checks (max {})", - config.max_checks - ))); + return Err(KvError::TooManyChecks(config.max_checks)); } if mutations.len() + enqueues.len() > config.max_mutations { - return Err(type_error(format!( - "Too many mutations (max {})", - config.max_mutations - ))); + return Err(KvError::TooManyMutations(config.max_mutations)); } let checks = checks .into_iter() .map(check_from_v8) - .collect::, AnyError>>() - .with_context(|| "invalid check")?; + .collect::, KvCheckError>>() + .map_err(KvError::InvalidCheck)?; let mutations = mutations .into_iter() .map(|mutation| mutation_from_v8((mutation, current_timestamp))) - .collect::, AnyError>>() - .with_context(|| "Invalid mutation")?; + .collect::, KvMutationError>>() + .map_err(KvError::InvalidMutation)?; let enqueues = enqueues .into_iter() .map(|e| enqueue_from_v8(e, current_timestamp)) - .collect::, AnyError>>() - .with_context(|| "invalid enqueue")?; + .collect::, std::io::Error>>() + .map_err(KvError::InvalidEnqueue)?; let mut total_payload_size = 0usize; let mut total_key_size = 0usize; @@ -823,7 +897,7 @@ where .chain(mutations.iter().map(|m| &m.key)) { if key.is_empty() { - return Err(type_error("key cannot be empty")); + return Err(KvError::EmptyKey); } total_payload_size += check_write_key_size(key, &config)?; @@ -847,17 +921,13 @@ where } if total_payload_size > config.max_total_mutation_size_bytes { - return Err(type_error(format!( - "Total mutation size too large (max {} bytes)", - config.max_total_mutation_size_bytes - ))); + return Err(KvError::TotalMutationTooLarge( + config.max_total_mutation_size_bytes, + )); } if total_key_size > config.max_total_key_size_bytes { - return Err(type_error(format!( - "Total key size too large (max {} bytes)", - config.max_total_key_size_bytes - ))); + return Err(KvError::TotalKeyTooLarge(config.max_total_key_size_bytes)); } let atomic_write = AtomicWrite { @@ -866,7 +936,7 @@ where enqueues, }; - let result = db.atomic_write(atomic_write).await?; + let result = db.atomic_write(atomic_write).await.map_err(KvError::Kv)?; Ok(result.map(|res| faster_hex::hex_string(&res.versionstamp))) } @@ -879,19 +949,16 @@ type EncodeCursorRangeSelector = (Option, Option, Option); fn op_kv_encode_cursor( #[serde] (prefix, start, end): EncodeCursorRangeSelector, #[serde] boundary_key: KvKey, -) -> Result { +) -> Result { let selector = RawSelector::from_tuple(prefix, start, end)?; let boundary_key = encode_v8_key(boundary_key)?; let cursor = encode_cursor(&selector, &boundary_key)?; Ok(cursor) } -fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), AnyError> { +fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), KvError> { if key.len() > config.max_read_key_size_bytes { - Err(type_error(format!( - "Key too large for read (max {} bytes)", - config.max_read_key_size_bytes - ))) + Err(KvError::KeyTooLargeToRead(config.max_read_key_size_bytes)) } else { Ok(()) } @@ -900,12 +967,9 @@ fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), AnyError> { fn check_write_key_size( key: &[u8], config: &KvConfig, -) -> Result { +) -> Result { if key.len() > config.max_write_key_size_bytes { - Err(type_error(format!( - "Key too large for write (max {} bytes)", - config.max_write_key_size_bytes - ))) + Err(KvError::KeyTooLargeToWrite(config.max_write_key_size_bytes)) } else { Ok(key.len()) } @@ -914,7 +978,7 @@ fn check_write_key_size( fn check_value_size( value: &KvValue, config: &KvConfig, -) -> Result { +) -> Result { let payload = match value { KvValue::Bytes(x) => x, KvValue::V8(x) => x, @@ -922,10 +986,7 @@ fn check_value_size( }; if payload.len() > config.max_value_size_bytes { - Err(type_error(format!( - "Value too large (max {} bytes)", - config.max_value_size_bytes - ))) + Err(KvError::ValueTooLarge(config.max_value_size_bytes)) } else { Ok(payload.len()) } @@ -934,12 +995,9 @@ fn check_value_size( fn check_enqueue_payload_size( payload: &[u8], config: &KvConfig, -) -> Result { +) -> Result { if payload.len() > config.max_value_size_bytes { - Err(type_error(format!( - "enqueue payload too large (max {} bytes)", - config.max_value_size_bytes - ))) + Err(KvError::EnqueuePayloadTooLarge(config.max_value_size_bytes)) } else { Ok(payload.len()) } -- cgit v1.2.3 From eca83fc9b45ab1e5a73bd7b13b05ee42ab1a4dcc Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Thu, 17 Oct 2024 12:05:38 -0700 Subject: refactor(ext/web): use concrete error types (#26185) --- ext/crypto/lib.rs | 5 +- ext/web/Cargo.toml | 1 + ext/web/blob.rs | 60 +++++++++++-------- ext/web/compression.rs | 64 +++++++++++--------- ext/web/lib.rs | 146 +++++++++++++++------------------------------ ext/web/message_port.rs | 46 +++++++++----- ext/web/stream_resource.rs | 36 +++++++---- 7 files changed, 178 insertions(+), 180 deletions(-) (limited to 'ext') diff --git a/ext/crypto/lib.rs b/ext/crypto/lib.rs index c96029bf4..b5bafc580 100644 --- a/ext/crypto/lib.rs +++ b/ext/crypto/lib.rs @@ -149,10 +149,7 @@ pub fn op_crypto_get_random_values( #[buffer] out: &mut [u8], ) -> Result<(), AnyError> { if out.len() > 65536 { - return Err( - deno_web::DomExceptionQuotaExceededError::new(&format!("The ArrayBufferView's byte length ({}) exceeds the number of bytes of entropy available via this API (65536)", out.len())) - .into(), - ); + return Err(custom_error("DOMExceptionQuotaExceededError", format!("The ArrayBufferView's byte length ({}) exceeds the number of bytes of entropy available via this API (65536)", out.len()))); } let maybe_seeded_rng = state.try_borrow_mut::(); diff --git a/ext/web/Cargo.toml b/ext/web/Cargo.toml index 3d01e573d..ad44f5320 100644 --- a/ext/web/Cargo.toml +++ b/ext/web/Cargo.toml @@ -23,6 +23,7 @@ encoding_rs.workspace = true flate2 = { workspace = true, features = ["default"] } futures.workspace = true serde = "1.0.149" +thiserror.workspace = true tokio.workspace = true uuid = { workspace = true, features = ["serde"] } diff --git a/ext/web/blob.rs b/ext/web/blob.rs index 392f36acb..bc64a0f27 100644 --- a/ext/web/blob.rs +++ b/ext/web/blob.rs @@ -7,8 +7,6 @@ use std::rc::Rc; use std::sync::Arc; use async_trait::async_trait; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::parking_lot::Mutex; use deno_core::url::Url; @@ -19,6 +17,18 @@ use serde::Deserialize; use serde::Serialize; use uuid::Uuid; +#[derive(Debug, thiserror::Error)] +pub enum BlobError { + #[error("Blob part not found")] + BlobPartNotFound, + #[error("start + len can not be larger than blob part size")] + SizeLargerThanBlobPart, + #[error("Blob URLs are not supported in this context")] + BlobURLsNotSupported, + #[error(transparent)] + Url(#[from] deno_core::url::ParseError), +} + use crate::Location; pub type PartMap = HashMap>; @@ -96,18 +106,18 @@ pub struct Blob { impl Blob { // TODO(lucacsonato): this should be a stream! - pub async fn read_all(&self) -> Result, AnyError> { + pub async fn read_all(&self) -> Vec { let size = self.size(); let mut bytes = Vec::with_capacity(size); for part in &self.parts { - let chunk = part.read().await?; + let chunk = part.read().await; bytes.extend_from_slice(chunk); } assert_eq!(bytes.len(), size); - Ok(bytes) + bytes } fn size(&self) -> usize { @@ -122,7 +132,7 @@ impl Blob { #[async_trait] pub trait BlobPart: Debug { // TODO(lucacsonato): this should be a stream! - async fn read(&self) -> Result<&[u8], AnyError>; + async fn read(&self) -> &[u8]; fn size(&self) -> usize; } @@ -137,8 +147,8 @@ impl From> for InMemoryBlobPart { #[async_trait] impl BlobPart for InMemoryBlobPart { - async fn read(&self) -> Result<&[u8], AnyError> { - Ok(&self.0) + async fn read(&self) -> &[u8] { + &self.0 } fn size(&self) -> usize { @@ -155,9 +165,9 @@ pub struct SlicedBlobPart { #[async_trait] impl BlobPart for SlicedBlobPart { - async fn read(&self) -> Result<&[u8], AnyError> { - let original = self.part.read().await?; - Ok(&original[self.start..self.start + self.len]) + async fn read(&self) -> &[u8] { + let original = self.part.read().await; + &original[self.start..self.start + self.len] } fn size(&self) -> usize { @@ -189,19 +199,17 @@ pub fn op_blob_slice_part( state: &mut OpState, #[serde] id: Uuid, #[serde] options: SliceOptions, -) -> Result { +) -> Result { let blob_store = state.borrow::>(); let part = blob_store .get_part(&id) - .ok_or_else(|| type_error("Blob part not found"))?; + .ok_or(BlobError::BlobPartNotFound)?; let SliceOptions { start, len } = options; let size = part.size(); if start + len > size { - return Err(type_error( - "start + len can not be larger than blob part size", - )); + return Err(BlobError::SizeLargerThanBlobPart); } let sliced_part = SlicedBlobPart { part, start, len }; @@ -215,14 +223,14 @@ pub fn op_blob_slice_part( pub async fn op_blob_read_part( state: Rc>, #[serde] id: Uuid, -) -> Result { +) -> Result { let part = { let state = state.borrow(); let blob_store = state.borrow::>(); blob_store.get_part(&id) } - .ok_or_else(|| type_error("Blob part not found"))?; - let buf = part.read().await?; + .ok_or(BlobError::BlobPartNotFound)?; + let buf = part.read().await; Ok(ToJsBuffer::from(buf.to_vec())) } @@ -238,13 +246,13 @@ pub fn op_blob_create_object_url( state: &mut OpState, #[string] media_type: String, #[serde] part_ids: Vec, -) -> Result { +) -> Result { let mut parts = Vec::with_capacity(part_ids.len()); let blob_store = state.borrow::>(); for part_id in part_ids { let part = blob_store .get_part(&part_id) - .ok_or_else(|| type_error("Blob part not found"))?; + .ok_or(BlobError::BlobPartNotFound)?; parts.push(part); } @@ -263,7 +271,7 @@ pub fn op_blob_create_object_url( pub fn op_blob_revoke_object_url( state: &mut OpState, #[string] url: &str, -) -> Result<(), AnyError> { +) -> Result<(), BlobError> { let url = Url::parse(url)?; let blob_store = state.borrow::>(); blob_store.remove_object_url(&url); @@ -287,15 +295,15 @@ pub struct ReturnBlobPart { pub fn op_blob_from_object_url( state: &mut OpState, #[string] url: String, -) -> Result, AnyError> { +) -> Result, BlobError> { let url = Url::parse(&url)?; if url.scheme() != "blob" { return Ok(None); } - let blob_store = state.try_borrow::>().ok_or_else(|| { - type_error("Blob URLs are not supported in this context.") - })?; + let blob_store = state + .try_borrow::>() + .ok_or(BlobError::BlobURLsNotSupported)?; if let Some(blob) = blob_store.get_object_url(url) { let parts = blob .parts diff --git a/ext/web/compression.rs b/ext/web/compression.rs index b9ae12ef1..696700991 100644 --- a/ext/web/compression.rs +++ b/ext/web/compression.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use flate2::write::DeflateDecoder; use flate2::write::DeflateEncoder; @@ -13,6 +11,18 @@ use flate2::Compression; use std::cell::RefCell; use std::io::Write; +#[derive(Debug, thiserror::Error)] +pub enum CompressionError { + #[error("Unsupported format")] + UnsupportedFormat, + #[error("resource is closed")] + ResourceClosed, + #[error(transparent)] + IoTypeError(std::io::Error), + #[error(transparent)] + Io(std::io::Error), +} + #[derive(Debug)] struct CompressionResource(RefCell>); @@ -34,7 +44,7 @@ enum Inner { pub fn op_compression_new( #[string] format: &str, is_decoder: bool, -) -> Result { +) -> Result { let w = Vec::new(); let inner = match (format, is_decoder) { ("deflate", true) => Inner::DeflateDecoder(ZlibDecoder::new(w)), @@ -49,7 +59,7 @@ pub fn op_compression_new( ("gzip", false) => { Inner::GzEncoder(GzEncoder::new(w, Compression::default())) } - _ => return Err(type_error("Unsupported format")), + _ => return Err(CompressionError::UnsupportedFormat), }; Ok(CompressionResource(RefCell::new(Some(inner)))) } @@ -59,40 +69,38 @@ pub fn op_compression_new( pub fn op_compression_write( #[cppgc] resource: &CompressionResource, #[anybuffer] input: &[u8], -) -> Result, AnyError> { +) -> Result, CompressionError> { let mut inner = resource.0.borrow_mut(); - let inner = inner - .as_mut() - .ok_or_else(|| type_error("resource is closed"))?; + let inner = inner.as_mut().ok_or(CompressionError::ResourceClosed)?; let out: Vec = match &mut *inner { Inner::DeflateDecoder(d) => { - d.write_all(input).map_err(|e| type_error(e.to_string()))?; - d.flush()?; + d.write_all(input).map_err(CompressionError::IoTypeError)?; + d.flush().map_err(CompressionError::Io)?; d.get_mut().drain(..) } Inner::DeflateEncoder(d) => { - d.write_all(input).map_err(|e| type_error(e.to_string()))?; - d.flush()?; + d.write_all(input).map_err(CompressionError::IoTypeError)?; + d.flush().map_err(CompressionError::Io)?; d.get_mut().drain(..) } Inner::DeflateRawDecoder(d) => { - d.write_all(input).map_err(|e| type_error(e.to_string()))?; - d.flush()?; + d.write_all(input).map_err(CompressionError::IoTypeError)?; + d.flush().map_err(CompressionError::Io)?; d.get_mut().drain(..) } Inner::DeflateRawEncoder(d) => { - d.write_all(input).map_err(|e| type_error(e.to_string()))?; - d.flush()?; + d.write_all(input).map_err(CompressionError::IoTypeError)?; + d.flush().map_err(CompressionError::Io)?; d.get_mut().drain(..) } Inner::GzDecoder(d) => { - d.write_all(input).map_err(|e| type_error(e.to_string()))?; - d.flush()?; + d.write_all(input).map_err(CompressionError::IoTypeError)?; + d.flush().map_err(CompressionError::Io)?; d.get_mut().drain(..) } Inner::GzEncoder(d) => { - d.write_all(input).map_err(|e| type_error(e.to_string()))?; - d.flush()?; + d.write_all(input).map_err(CompressionError::IoTypeError)?; + d.flush().map_err(CompressionError::Io)?; d.get_mut().drain(..) } } @@ -105,27 +113,27 @@ pub fn op_compression_write( pub fn op_compression_finish( #[cppgc] resource: &CompressionResource, report_errors: bool, -) -> Result, AnyError> { +) -> Result, CompressionError> { let inner = resource .0 .borrow_mut() .take() - .ok_or_else(|| type_error("resource is closed"))?; + .ok_or(CompressionError::ResourceClosed)?; let out = match inner { Inner::DeflateDecoder(d) => { - d.finish().map_err(|e| type_error(e.to_string())) + d.finish().map_err(CompressionError::IoTypeError) } Inner::DeflateEncoder(d) => { - d.finish().map_err(|e| type_error(e.to_string())) + d.finish().map_err(CompressionError::IoTypeError) } Inner::DeflateRawDecoder(d) => { - d.finish().map_err(|e| type_error(e.to_string())) + d.finish().map_err(CompressionError::IoTypeError) } Inner::DeflateRawEncoder(d) => { - d.finish().map_err(|e| type_error(e.to_string())) + d.finish().map_err(CompressionError::IoTypeError) } - Inner::GzDecoder(d) => d.finish().map_err(|e| type_error(e.to_string())), - Inner::GzEncoder(d) => d.finish().map_err(|e| type_error(e.to_string())), + Inner::GzDecoder(d) => d.finish().map_err(CompressionError::IoTypeError), + Inner::GzEncoder(d) => d.finish().map_err(CompressionError::IoTypeError), }; match out { Err(err) => { diff --git a/ext/web/lib.rs b/ext/web/lib.rs index 3977379a5..4935af5bd 100644 --- a/ext/web/lib.rs +++ b/ext/web/lib.rs @@ -6,9 +6,6 @@ mod message_port; mod stream_resource; mod timers; -use deno_core::error::range_error; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::url::Url; use deno_core::v8; @@ -22,10 +19,14 @@ use encoding_rs::DecoderResult; use encoding_rs::Encoding; use std::borrow::Cow; use std::cell::RefCell; -use std::fmt; use std::path::PathBuf; use std::sync::Arc; +pub use blob::BlobError; +pub use compression::CompressionError; +pub use message_port::MessagePortError; +pub use stream_resource::StreamResourceError; + use crate::blob::op_blob_create_object_url; use crate::blob::op_blob_create_part; use crate::blob::op_blob_from_object_url; @@ -126,9 +127,27 @@ deno_core::extension!(deno_web, } ); +#[derive(Debug, thiserror::Error)] +pub enum WebError { + #[error("Failed to decode base64")] + Base64Decode, + #[error("The encoding label provided ('{0}') is invalid.")] + InvalidEncodingLabel(String), + #[error("buffer exceeds maximum length")] + BufferTooLong, + #[error("Value too large to decode")] + ValueTooLarge, + #[error("Provided buffer too small")] + BufferTooSmall, + #[error("The encoded data is not valid")] + DataInvalid, + #[error(transparent)] + DataError(#[from] v8::DataError), +} + #[op2] #[serde] -fn op_base64_decode(#[string] input: String) -> Result { +fn op_base64_decode(#[string] input: String) -> Result { let mut s = input.into_bytes(); let decoded_len = forgiving_base64_decode_inplace(&mut s)?; s.truncate(decoded_len); @@ -137,7 +156,7 @@ fn op_base64_decode(#[string] input: String) -> Result { #[op2] #[serde] -fn op_base64_atob(#[serde] mut s: ByteString) -> Result { +fn op_base64_atob(#[serde] mut s: ByteString) -> Result { let decoded_len = forgiving_base64_decode_inplace(&mut s)?; s.truncate(decoded_len); Ok(s) @@ -147,11 +166,9 @@ fn op_base64_atob(#[serde] mut s: ByteString) -> Result { #[inline] fn forgiving_base64_decode_inplace( input: &mut [u8], -) -> Result { - let error = - || DomExceptionInvalidCharacterError::new("Failed to decode base64"); - let decoded = - base64_simd::forgiving_decode_inplace(input).map_err(|_| error())?; +) -> Result { + let decoded = base64_simd::forgiving_decode_inplace(input) + .map_err(|_| WebError::Base64Decode)?; Ok(decoded.len()) } @@ -177,13 +194,9 @@ fn forgiving_base64_encode(s: &[u8]) -> String { #[string] fn op_encoding_normalize_label( #[string] label: String, -) -> Result { +) -> Result { let encoding = Encoding::for_label_no_replacement(label.as_bytes()) - .ok_or_else(|| { - range_error(format!( - "The encoding label provided ('{label}') is invalid." - )) - })?; + .ok_or(WebError::InvalidEncodingLabel(label))?; Ok(encoding.name().to_lowercase()) } @@ -192,7 +205,7 @@ fn op_encoding_decode_utf8<'a>( scope: &mut v8::HandleScope<'a>, #[anybuffer] zero_copy: &[u8], ignore_bom: bool, -) -> Result, AnyError> { +) -> Result, WebError> { let buf = &zero_copy; let buf = if !ignore_bom @@ -216,7 +229,7 @@ fn op_encoding_decode_utf8<'a>( // - https://github.com/v8/v8/blob/d68fb4733e39525f9ff0a9222107c02c28096e2a/include/v8.h#L3277-L3278 match v8::String::new_from_utf8(scope, buf, v8::NewStringType::Normal) { Some(text) => Ok(text), - None => Err(type_error("buffer exceeds maximum length")), + None => Err(WebError::BufferTooLong), } } @@ -227,12 +240,9 @@ fn op_encoding_decode_single( #[string] label: String, fatal: bool, ignore_bom: bool, -) -> Result { - let encoding = Encoding::for_label(label.as_bytes()).ok_or_else(|| { - range_error(format!( - "The encoding label provided ('{label}') is invalid." - )) - })?; +) -> Result { + let encoding = Encoding::for_label(label.as_bytes()) + .ok_or(WebError::InvalidEncodingLabel(label))?; let mut decoder = if ignore_bom { encoding.new_decoder_without_bom_handling() @@ -242,7 +252,7 @@ fn op_encoding_decode_single( let max_buffer_length = decoder .max_utf16_buffer_length(data.len()) - .ok_or_else(|| range_error("Value too large to decode."))?; + .ok_or(WebError::ValueTooLarge)?; let mut output = vec![0; max_buffer_length]; @@ -254,12 +264,8 @@ fn op_encoding_decode_single( output.truncate(written); Ok(output.into()) } - DecoderResult::OutputFull => { - Err(range_error("Provided buffer too small.")) - } - DecoderResult::Malformed(_, _) => { - Err(type_error("The encoded data is not valid.")) - } + DecoderResult::OutputFull => Err(WebError::BufferTooSmall), + DecoderResult::Malformed(_, _) => Err(WebError::DataInvalid), } } else { let (result, _, written, _) = @@ -269,7 +275,7 @@ fn op_encoding_decode_single( output.truncate(written); Ok(output.into()) } - CoderResult::OutputFull => Err(range_error("Provided buffer too small.")), + CoderResult::OutputFull => Err(WebError::BufferTooSmall), } } } @@ -280,12 +286,9 @@ fn op_encoding_new_decoder( #[string] label: &str, fatal: bool, ignore_bom: bool, -) -> Result { - let encoding = Encoding::for_label(label.as_bytes()).ok_or_else(|| { - range_error(format!( - "The encoding label provided ('{label}') is invalid." - )) - })?; +) -> Result { + let encoding = Encoding::for_label(label.as_bytes()) + .ok_or_else(|| WebError::InvalidEncodingLabel(label.to_string()))?; let decoder = if ignore_bom { encoding.new_decoder_without_bom_handling() @@ -305,13 +308,13 @@ fn op_encoding_decode( #[anybuffer] data: &[u8], #[cppgc] resource: &TextDecoderResource, stream: bool, -) -> Result { +) -> Result { let mut decoder = resource.decoder.borrow_mut(); let fatal = resource.fatal; let max_buffer_length = decoder .max_utf16_buffer_length(data.len()) - .ok_or_else(|| range_error("Value too large to decode."))?; + .ok_or(WebError::ValueTooLarge)?; let mut output = vec![0; max_buffer_length]; @@ -323,12 +326,8 @@ fn op_encoding_decode( output.truncate(written); Ok(output.into()) } - DecoderResult::OutputFull => { - Err(range_error("Provided buffer too small.")) - } - DecoderResult::Malformed(_, _) => { - Err(type_error("The encoded data is not valid.")) - } + DecoderResult::OutputFull => Err(WebError::BufferTooSmall), + DecoderResult::Malformed(_, _) => Err(WebError::DataInvalid), } } else { let (result, _, written, _) = @@ -338,7 +337,7 @@ fn op_encoding_decode( output.truncate(written); Ok(output.into()) } - CoderResult::OutputFull => Err(range_error("Provided buffer too small.")), + CoderResult::OutputFull => Err(WebError::BufferTooSmall), } } } @@ -356,7 +355,7 @@ fn op_encoding_encode_into( input: v8::Local, #[buffer] buffer: &mut [u8], #[buffer] out_buf: &mut [u32], -) -> Result<(), AnyError> { +) -> Result<(), WebError> { let s = v8::Local::::try_from(input)?; let mut nchars = 0; @@ -414,53 +413,4 @@ pub fn get_declaration() -> PathBuf { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("lib.deno_web.d.ts") } -#[derive(Debug)] -pub struct DomExceptionQuotaExceededError { - pub msg: String, -} - -impl DomExceptionQuotaExceededError { - pub fn new(msg: &str) -> Self { - DomExceptionQuotaExceededError { - msg: msg.to_string(), - } - } -} - -#[derive(Debug)] -pub struct DomExceptionInvalidCharacterError { - pub msg: String, -} - -impl DomExceptionInvalidCharacterError { - pub fn new(msg: &str) -> Self { - DomExceptionInvalidCharacterError { - msg: msg.to_string(), - } - } -} - -impl fmt::Display for DomExceptionQuotaExceededError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad(&self.msg) - } -} -impl fmt::Display for DomExceptionInvalidCharacterError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad(&self.msg) - } -} - -impl std::error::Error for DomExceptionQuotaExceededError {} - -impl std::error::Error for DomExceptionInvalidCharacterError {} - -pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> { - e.downcast_ref::() - .map(|_| "DOMExceptionQuotaExceededError") - .or_else(|| { - e.downcast_ref::() - .map(|_| "DOMExceptionInvalidCharacterError") - }) -} pub struct Location(pub Url); diff --git a/ext/web/message_port.rs b/ext/web/message_port.rs index fa299475d..1a4a09073 100644 --- a/ext/web/message_port.rs +++ b/ext/web/message_port.rs @@ -4,8 +4,6 @@ use std::borrow::Cow; use std::cell::RefCell; use std::rc::Rc; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::CancelFuture; @@ -23,6 +21,20 @@ use tokio::sync::mpsc::unbounded_channel; use tokio::sync::mpsc::UnboundedReceiver; use tokio::sync::mpsc::UnboundedSender; +#[derive(Debug, thiserror::Error)] +pub enum MessagePortError { + #[error("Invalid message port transfer")] + InvalidTransfer, + #[error("Message port is not ready for transfer")] + NotReady, + #[error("Can not transfer self message port")] + TransferSelf, + #[error(transparent)] + Canceled(#[from] deno_core::Canceled), + #[error(transparent)] + Resource(deno_core::error::AnyError), +} + pub enum Transferable { MessagePort(MessagePort), ArrayBuffer(u32), @@ -40,7 +52,7 @@ impl MessagePort { &self, state: &mut OpState, data: JsMessageData, - ) -> Result<(), AnyError> { + ) -> Result<(), MessagePortError> { let transferables = deserialize_js_transferables(state, data.transferables)?; @@ -56,7 +68,7 @@ impl MessagePort { pub async fn recv( &self, state: Rc>, - ) -> Result, AnyError> { + ) -> Result, MessagePortError> { let rx = &self.rx; let maybe_data = poll_fn(|cx| { @@ -147,7 +159,7 @@ pub enum JsTransferable { pub fn deserialize_js_transferables( state: &mut OpState, js_transferables: Vec, -) -> Result, AnyError> { +) -> Result, MessagePortError> { let mut transferables = Vec::with_capacity(js_transferables.len()); for js_transferable in js_transferables { match js_transferable { @@ -155,10 +167,10 @@ pub fn deserialize_js_transferables( let resource = state .resource_table .take::(id) - .map_err(|_| type_error("Invalid message port transfer"))?; + .map_err(|_| MessagePortError::InvalidTransfer)?; resource.cancel.cancel(); - let resource = Rc::try_unwrap(resource) - .map_err(|_| type_error("Message port is not ready for transfer"))?; + let resource = + Rc::try_unwrap(resource).map_err(|_| MessagePortError::NotReady)?; transferables.push(Transferable::MessagePort(resource.port)); } JsTransferable::ArrayBuffer(id) => { @@ -202,16 +214,19 @@ pub fn op_message_port_post_message( state: &mut OpState, #[smi] rid: ResourceId, #[serde] data: JsMessageData, -) -> Result<(), AnyError> { +) -> Result<(), MessagePortError> { for js_transferable in &data.transferables { if let JsTransferable::MessagePort(id) = js_transferable { if *id == rid { - return Err(type_error("Can not transfer self message port")); + return Err(MessagePortError::TransferSelf); } } } - let resource = state.resource_table.get::(rid)?; + let resource = state + .resource_table + .get::(rid) + .map_err(MessagePortError::Resource)?; resource.port.send(state, data) } @@ -220,7 +235,7 @@ pub fn op_message_port_post_message( pub async fn op_message_port_recv_message( state: Rc>, #[smi] rid: ResourceId, -) -> Result, AnyError> { +) -> Result, MessagePortError> { let resource = { let state = state.borrow(); match state.resource_table.get::(rid) { @@ -237,8 +252,11 @@ pub async fn op_message_port_recv_message( pub fn op_message_port_recv_message_sync( state: &mut OpState, // Rc>, #[smi] rid: ResourceId, -) -> Result, AnyError> { - let resource = state.resource_table.get::(rid)?; +) -> Result, MessagePortError> { + let resource = state + .resource_table + .get::(rid) + .map_err(MessagePortError::Resource)?; let mut rx = resource.port.rx.borrow_mut(); match rx.try_recv() { diff --git a/ext/web/stream_resource.rs b/ext/web/stream_resource.rs index 78487883b..c44a385ea 100644 --- a/ext/web/stream_resource.rs +++ b/ext/web/stream_resource.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use bytes::BytesMut; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::external; use deno_core::op2; use deno_core::serde_v8::V8Slice; @@ -18,6 +16,7 @@ use deno_core::RcRef; use deno_core::Resource; use deno_core::ResourceId; use futures::future::poll_fn; +use futures::TryFutureExt; use std::borrow::Cow; use std::cell::RefCell; use std::cell::RefMut; @@ -31,6 +30,14 @@ use std::task::Context; use std::task::Poll; use std::task::Waker; +#[derive(Debug, thiserror::Error)] +pub enum StreamResourceError { + #[error(transparent)] + Canceled(#[from] deno_core::Canceled), + #[error("{0}")] + Js(String), +} + // How many buffers we'll allow in the channel before we stop allowing writes. const BUFFER_CHANNEL_SIZE: u16 = 1024; @@ -48,7 +55,7 @@ struct BoundedBufferChannelInner { buffers: [MaybeUninit>; BUFFER_CHANNEL_SIZE as _], ring_producer: u16, ring_consumer: u16, - error: Option, + error: Option, current_size: usize, // TODO(mmastrac): we can math this field instead of accounting for it len: usize, @@ -141,7 +148,10 @@ impl BoundedBufferChannelInner { self.len = 0; } - pub fn read(&mut self, limit: usize) -> Result, AnyError> { + pub fn read( + &mut self, + limit: usize, + ) -> Result, StreamResourceError> { // Empty buffers will return the error, if one exists, or None if self.len == 0 { if let Some(error) = self.error.take() { @@ -230,7 +240,7 @@ impl BoundedBufferChannelInner { Ok(()) } - pub fn write_error(&mut self, error: AnyError) { + pub fn write_error(&mut self, error: StreamResourceError) { self.error = Some(error); if let Some(waker) = self.read_waker.take() { waker.wake(); @@ -306,7 +316,10 @@ impl BoundedBufferChannel { self.inner.borrow_mut() } - pub fn read(&self, limit: usize) -> Result, AnyError> { + pub fn read( + &self, + limit: usize, + ) -> Result, StreamResourceError> { self.inner().read(limit) } @@ -314,7 +327,7 @@ impl BoundedBufferChannel { self.inner().write(buffer) } - pub fn write_error(&self, error: AnyError) { + pub fn write_error(&self, error: StreamResourceError) { self.inner().write_error(error) } @@ -358,7 +371,10 @@ impl ReadableStreamResource { RcRef::map(self, |s| &s.cancel_handle).clone() } - async fn read(self: Rc, limit: usize) -> Result { + async fn read( + self: Rc, + limit: usize, + ) -> Result { let cancel_handle = self.cancel_handle(); // Serialize all the reads using a task queue. let _read_permit = self.read_queue.acquire().await; @@ -387,7 +403,7 @@ impl Resource for ReadableStreamResource { } fn read(self: Rc, limit: usize) -> AsyncResult { - Box::pin(ReadableStreamResource::read(self, limit)) + Box::pin(ReadableStreamResource::read(self, limit).map_err(|e| e.into())) } fn close(self: Rc) { @@ -550,7 +566,7 @@ pub fn op_readable_stream_resource_write_error( ) -> bool { let sender = get_sender(sender); // We can always write an error, no polling required - sender.write_error(type_error(Cow::Owned(error))); + sender.write_error(StreamResourceError::Js(error)); !sender.closed() } -- cgit v1.2.3 From 9fde5cb5e045551fe344b3f60370744eea30ccb4 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Thu, 17 Oct 2024 12:51:15 -0700 Subject: fix(node/fs): copyFile with `COPYFILE_EXCL` should not throw if the destination doesn't exist (#26360) Fixes #26313. We were checking for the NotFound error, but still calling the callback with the error / throwing. --- ext/node/polyfills/_fs/_fs_copy.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/_fs/_fs_copy.ts b/ext/node/polyfills/_fs/_fs_copy.ts index 2f8ddf4fc..0434bff4d 100644 --- a/ext/node/polyfills/_fs/_fs_copy.ts +++ b/ext/node/polyfills/_fs/_fs_copy.ts @@ -53,8 +53,9 @@ export function copyFile( }, (e) => { if (e instanceof Deno.errors.NotFound) { Deno.copyFile(srcStr, destStr).then(() => cb(null), cb); + } else { + cb(e); } - cb(e); }); } else { Deno.copyFile(srcStr, destStr).then(() => cb(null), cb); @@ -83,8 +84,9 @@ export function copyFileSync( } catch (e) { if (e instanceof Deno.errors.NotFound) { Deno.copyFileSync(srcStr, destStr); + } else { + throw e; } - throw e; } } else { Deno.copyFileSync(srcStr, destStr); -- cgit v1.2.3 From 8cfd9968fa9ef79777ac2e7dfcef8f1ed618b78b Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Thu, 17 Oct 2024 12:51:33 -0700 Subject: refactor(ext/napi): use concrete error types (#26186) --- ext/napi/Cargo.toml | 1 + ext/napi/lib.rs | 45 ++++++++++++++++++++++++++------------------- 2 files changed, 27 insertions(+), 19 deletions(-) (limited to 'ext') diff --git a/ext/napi/Cargo.toml b/ext/napi/Cargo.toml index 5405d6280..68d281ccc 100644 --- a/ext/napi/Cargo.toml +++ b/ext/napi/Cargo.toml @@ -17,3 +17,4 @@ path = "lib.rs" deno_core.workspace = true deno_permissions.workspace = true libloading = { version = "0.7" } +thiserror.workspace = true diff --git a/ext/napi/lib.rs b/ext/napi/lib.rs index 4500c66fd..0b2b3eb5e 100644 --- a/ext/napi/lib.rs +++ b/ext/napi/lib.rs @@ -6,8 +6,6 @@ #![deny(clippy::missing_safety_doc)] use core::ptr::NonNull; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::parking_lot::RwLock; use deno_core::url::Url; @@ -20,6 +18,18 @@ use std::path::PathBuf; use std::rc::Rc; use std::thread_local; +#[derive(Debug, thiserror::Error)] +pub enum NApiError { + #[error("Invalid path")] + InvalidPath, + #[error(transparent)] + LibLoading(#[from] libloading::Error), + #[error("Unable to find register Node-API module at {}", .0.display())] + ModuleNotFound(PathBuf), + #[error(transparent)] + Permission(deno_core::error::AnyError), +} + #[cfg(unix)] use libloading::os::unix::*; @@ -482,14 +492,20 @@ deno_core::extension!(deno_napi, pub trait NapiPermissions { #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] - fn check(&mut self, path: &str) -> std::result::Result; + fn check( + &mut self, + path: &str, + ) -> Result; } // NOTE(bartlomieju): for now, NAPI uses `--allow-ffi` flag, but that might // change in the future. impl NapiPermissions for deno_permissions::PermissionsContainer { #[inline(always)] - fn check(&mut self, path: &str) -> Result { + fn check( + &mut self, + path: &str, + ) -> Result { deno_permissions::PermissionsContainer::check_ffi(self, path) } } @@ -512,7 +528,7 @@ fn op_napi_open( global: v8::Local<'scope, v8::Object>, buffer_constructor: v8::Local<'scope, v8::Function>, report_error: v8::Local<'scope, v8::Function>, -) -> std::result::Result, AnyError> +) -> Result, NApiError> where NP: NapiPermissions + 'static, { @@ -521,7 +537,7 @@ where let (async_work_sender, cleanup_hooks, external_ops_tracker, path) = { let mut op_state = op_state.borrow_mut(); let permissions = op_state.borrow_mut::(); - let path = permissions.check(&path)?; + let path = permissions.check(&path).map_err(NApiError::Permission)?; let napi_state = op_state.borrow::(); ( op_state.borrow::().clone(), @@ -540,7 +556,7 @@ where let type_tag = v8::Global::new(scope, type_tag); let url_filename = - Url::from_file_path(&path).map_err(|_| type_error("Invalid path"))?; + Url::from_file_path(&path).map_err(|_| NApiError::InvalidPath)?; let env_shared = EnvShared::new(napi_wrap, type_tag, format!("{url_filename}\0")); @@ -565,17 +581,11 @@ where // SAFETY: opening a DLL calls dlopen #[cfg(unix)] - let library = match unsafe { Library::open(Some(&path), flags) } { - Ok(lib) => lib, - Err(e) => return Err(type_error(e.to_string())), - }; + let library = unsafe { Library::open(Some(&path), flags) }?; // SAFETY: opening a DLL calls dlopen #[cfg(not(unix))] - let library = match unsafe { Library::load_with_flags(&path, flags) } { - Ok(lib) => lib, - Err(e) => return Err(type_error(e.to_string())), - }; + let library = unsafe { Library::load_with_flags(&path, flags) }?; let maybe_module = MODULE_TO_REGISTER.with(|cell| { let mut slot = cell.borrow_mut(); @@ -610,10 +620,7 @@ where // SAFETY: we are going blind, calling the register function on the other side. unsafe { init(env_ptr, exports.into()) } } else { - return Err(type_error(format!( - "Unable to find register Node-API module at {}", - path.display() - ))); + return Err(NApiError::ModuleNotFound(path)); }; let exports = maybe_exports.unwrap_or(exports.into()); -- cgit v1.2.3 From 8f3eb9d0e7bbbfa67058c67c5e5c1484dee1ef9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Thu, 17 Oct 2024 23:57:05 +0100 Subject: fix(ext/node): add null check for kStreamBaseField (#26368) It's not guaranteed that `kStreamBaseField` is not undefined, so added a check for it. Closes https://github.com/denoland/deno/issues/26363 --- ext/node/polyfills/internal_binding/tcp_wrap.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/node/polyfills/internal_binding/tcp_wrap.ts b/ext/node/polyfills/internal_binding/tcp_wrap.ts index 2856f808a..d9f1c5356 100644 --- a/ext/node/polyfills/internal_binding/tcp_wrap.ts +++ b/ext/node/polyfills/internal_binding/tcp_wrap.ts @@ -300,7 +300,7 @@ export class TCP extends ConnectionWrap { * @return An error status code. */ setNoDelay(noDelay: boolean): number { - if ("setNoDelay" in this[kStreamBaseField]) { + if (this[kStreamBaseField] && "setNoDelay" in this[kStreamBaseField]) { this[kStreamBaseField].setNoDelay(noDelay); } return 0; -- cgit v1.2.3 From 3ae10a01e0c8b9c425276a33b98f661c1473cd59 Mon Sep 17 00:00:00 2001 From: denobot <33910674+denobot@users.noreply.github.com> Date: Thu, 17 Oct 2024 21:12:49 -0400 Subject: chore: forward v2.0.2 release commit to main (#26376) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is the release commit being forwarded back to main for 2.0.2 Co-authored-by: bartlomieju Co-authored-by: Bartek Iwańczuk --- ext/broadcast_channel/Cargo.toml | 2 +- ext/cache/Cargo.toml | 2 +- ext/canvas/Cargo.toml | 2 +- ext/console/Cargo.toml | 2 +- ext/cron/Cargo.toml | 2 +- ext/crypto/Cargo.toml | 2 +- ext/fetch/Cargo.toml | 2 +- ext/ffi/Cargo.toml | 2 +- ext/fs/Cargo.toml | 2 +- ext/http/Cargo.toml | 2 +- ext/io/Cargo.toml | 2 +- ext/kv/Cargo.toml | 2 +- ext/napi/Cargo.toml | 2 +- ext/net/Cargo.toml | 2 +- ext/node/Cargo.toml | 2 +- ext/tls/Cargo.toml | 2 +- ext/url/Cargo.toml | 2 +- ext/web/Cargo.toml | 2 +- ext/webgpu/Cargo.toml | 2 +- ext/webidl/Cargo.toml | 2 +- ext/websocket/Cargo.toml | 2 +- ext/webstorage/Cargo.toml | 2 +- 22 files changed, 22 insertions(+), 22 deletions(-) (limited to 'ext') diff --git a/ext/broadcast_channel/Cargo.toml b/ext/broadcast_channel/Cargo.toml index 95d480d24..7ca058a44 100644 --- a/ext/broadcast_channel/Cargo.toml +++ b/ext/broadcast_channel/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_broadcast_channel" -version = "0.166.0" +version = "0.167.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cache/Cargo.toml b/ext/cache/Cargo.toml index d0f260d9c..dee4d7274 100644 --- a/ext/cache/Cargo.toml +++ b/ext/cache/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cache" -version = "0.104.0" +version = "0.105.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/canvas/Cargo.toml b/ext/canvas/Cargo.toml index e88567ef1..6ca3d76c0 100644 --- a/ext/canvas/Cargo.toml +++ b/ext/canvas/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_canvas" -version = "0.41.0" +version = "0.42.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/console/Cargo.toml b/ext/console/Cargo.toml index d83afb178..f83f7138d 100644 --- a/ext/console/Cargo.toml +++ b/ext/console/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_console" -version = "0.172.0" +version = "0.173.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cron/Cargo.toml b/ext/cron/Cargo.toml index 9aafa0b48..ccd81de0c 100644 --- a/ext/cron/Cargo.toml +++ b/ext/cron/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cron" -version = "0.52.0" +version = "0.53.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/crypto/Cargo.toml b/ext/crypto/Cargo.toml index 143f4b009..e3b59ed46 100644 --- a/ext/crypto/Cargo.toml +++ b/ext/crypto/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_crypto" -version = "0.186.0" +version = "0.187.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml index c8e2c858b..ddb58a3f3 100644 --- a/ext/fetch/Cargo.toml +++ b/ext/fetch/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fetch" -version = "0.196.0" +version = "0.197.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/ffi/Cargo.toml b/ext/ffi/Cargo.toml index 9bca9d98f..80b0180d0 100644 --- a/ext/ffi/Cargo.toml +++ b/ext/ffi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_ffi" -version = "0.159.0" +version = "0.160.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml index 904483c79..a33347f9c 100644 --- a/ext/fs/Cargo.toml +++ b/ext/fs/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fs" -version = "0.82.0" +version = "0.83.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/http/Cargo.toml b/ext/http/Cargo.toml index f8e3bb38a..6fa7598cb 100644 --- a/ext/http/Cargo.toml +++ b/ext/http/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_http" -version = "0.170.0" +version = "0.171.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/io/Cargo.toml b/ext/io/Cargo.toml index 5a87977e0..8f407c820 100644 --- a/ext/io/Cargo.toml +++ b/ext/io/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_io" -version = "0.82.0" +version = "0.83.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/kv/Cargo.toml b/ext/kv/Cargo.toml index 98c14dc31..f7b28af67 100644 --- a/ext/kv/Cargo.toml +++ b/ext/kv/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_kv" -version = "0.80.0" +version = "0.81.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/napi/Cargo.toml b/ext/napi/Cargo.toml index 68d281ccc..ef2e41d57 100644 --- a/ext/napi/Cargo.toml +++ b/ext/napi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_napi" -version = "0.103.0" +version = "0.104.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/net/Cargo.toml b/ext/net/Cargo.toml index a57862072..634dd7dda 100644 --- a/ext/net/Cargo.toml +++ b/ext/net/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_net" -version = "0.164.0" +version = "0.165.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml index a6b778c1a..55d53d0ce 100644 --- a/ext/node/Cargo.toml +++ b/ext/node/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_node" -version = "0.109.0" +version = "0.110.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/tls/Cargo.toml b/ext/tls/Cargo.toml index 9c3d5b804..b060f2829 100644 --- a/ext/tls/Cargo.toml +++ b/ext/tls/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_tls" -version = "0.159.0" +version = "0.160.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/url/Cargo.toml b/ext/url/Cargo.toml index f2e81becf..ceef18f45 100644 --- a/ext/url/Cargo.toml +++ b/ext/url/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_url" -version = "0.172.0" +version = "0.173.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/web/Cargo.toml b/ext/web/Cargo.toml index ad44f5320..395ea182f 100644 --- a/ext/web/Cargo.toml +++ b/ext/web/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_web" -version = "0.203.0" +version = "0.204.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webgpu/Cargo.toml b/ext/webgpu/Cargo.toml index 262351b81..8f22ca764 100644 --- a/ext/webgpu/Cargo.toml +++ b/ext/webgpu/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webgpu" -version = "0.139.0" +version = "0.140.0" authors = ["the Deno authors"] edition.workspace = true license = "MIT" diff --git a/ext/webidl/Cargo.toml b/ext/webidl/Cargo.toml index d5d4d9278..d069b3212 100644 --- a/ext/webidl/Cargo.toml +++ b/ext/webidl/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webidl" -version = "0.172.0" +version = "0.173.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/websocket/Cargo.toml b/ext/websocket/Cargo.toml index 5bf217f13..b265ba78a 100644 --- a/ext/websocket/Cargo.toml +++ b/ext/websocket/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_websocket" -version = "0.177.0" +version = "0.178.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webstorage/Cargo.toml b/ext/webstorage/Cargo.toml index 2707ccfe9..0d6fcd6a0 100644 --- a/ext/webstorage/Cargo.toml +++ b/ext/webstorage/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webstorage" -version = "0.167.0" +version = "0.168.0" authors.workspace = true edition.workspace = true license.workspace = true -- cgit v1.2.3 From 85a99eb405ef3ec5f8e478d93b2c866afbc53f95 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Fri, 18 Oct 2024 06:38:17 -0700 Subject: refactor(ext/fs): use concrete error types (#26317) --- ext/fs/Cargo.toml | 1 + ext/fs/lib.rs | 2 + ext/fs/ops.rs | 573 ++++++++++++++++++++++++++++++++++++------------------ 3 files changed, 385 insertions(+), 191 deletions(-) (limited to 'ext') diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml index a33347f9c..313c84fdb 100644 --- a/ext/fs/Cargo.toml +++ b/ext/fs/Cargo.toml @@ -28,6 +28,7 @@ libc.workspace = true rand.workspace = true rayon = "1.8.0" serde.workspace = true +thiserror.workspace = true [target.'cfg(unix)'.dependencies] nix.workspace = true diff --git a/ext/fs/lib.rs b/ext/fs/lib.rs index bd49078b2..cd2baf22a 100644 --- a/ext/fs/lib.rs +++ b/ext/fs/lib.rs @@ -14,6 +14,8 @@ pub use crate::interface::FileSystemRc; pub use crate::interface::FsDirEntry; pub use crate::interface::FsFileType; pub use crate::interface::OpenOptions; +pub use crate::ops::FsOpsError; +pub use crate::ops::OperationError; pub use crate::std_fs::RealFs; pub use crate::sync::MaybeSend; pub use crate::sync::MaybeSync; diff --git a/ext/fs/ops.rs b/ext/fs/ops.rs index b13d3a7d1..a3f59da4e 100644 --- a/ext/fs/ops.rs +++ b/ext/fs/ops.rs @@ -1,6 +1,8 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use std::cell::RefCell; +use std::error::Error; +use std::fmt::Formatter; use std::io; use std::io::SeekFrom; use std::path::Path; @@ -8,10 +10,6 @@ use std::path::PathBuf; use std::path::StripPrefixError; use std::rc::Rc; -use deno_core::anyhow::bail; -use deno_core::error::custom_error; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::CancelFuture; use deno_core::CancelHandle; @@ -34,6 +32,67 @@ use crate::interface::FsFileType; use crate::FsPermissions; use crate::OpenOptions; +#[derive(Debug, thiserror::Error)] +pub enum FsOpsError { + #[error("{0}")] + Io(#[source] std::io::Error), + #[error("{0}")] + OperationError(#[source] OperationError), + #[error(transparent)] + Permission(deno_core::error::AnyError), + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error("File name or path {0:?} is not valid UTF-8")] + InvalidUtf8(std::ffi::OsString), + #[error("{0}")] + StripPrefix(#[from] StripPrefixError), + #[error("{0}")] + Canceled(#[from] deno_core::Canceled), + #[error("Invalid seek mode: {0}")] + InvalidSeekMode(i32), + #[error("Invalid control character in prefix or suffix: {0:?}")] + InvalidControlCharacter(String), + #[error("Invalid character in prefix or suffix: {0:?}")] + InvalidCharacter(String), + #[cfg(windows)] + #[error("Invalid trailing character in suffix")] + InvalidTrailingCharacter, + #[error("Requires {err} access to {path}, {}", print_not_capable_info(*.standalone, .err))] + NotCapableAccess { + // NotCapable + standalone: bool, + err: &'static str, + path: String, + }, + #[error("permission denied: {0}")] + NotCapable(&'static str), // NotCapable + #[error(transparent)] + Other(deno_core::error::AnyError), +} + +impl From for FsOpsError { + fn from(err: FsError) -> Self { + match err { + FsError::Io(err) => FsOpsError::Io(err), + FsError::FileBusy => { + FsOpsError::Other(deno_core::error::resource_unavailable()) + } + FsError::NotSupported => { + FsOpsError::Other(deno_core::error::not_supported()) + } + FsError::NotCapable(err) => FsOpsError::NotCapable(err), + } + } +} + +fn print_not_capable_info(standalone: bool, err: &'static str) -> String { + if standalone { + format!("specify the required permissions during compilation using `deno compile --allow-{err}`") + } else { + format!("run again with the --allow-{err} flag") + } +} + fn sync_permission_check<'a, P: FsPermissions + 'static>( permissions: &'a mut P, api_name: &'static str, @@ -58,7 +117,7 @@ fn map_permission_error( operation: &'static str, error: FsError, path: &Path, -) -> AnyError { +) -> FsOpsError { match error { FsError::NotCapable(err) => { let path = format!("{path:?}"); @@ -67,14 +126,12 @@ fn map_permission_error( } else { (path.as_str(), "") }; - let msg = if deno_permissions::is_standalone() { - format!( - "Requires {err} access to {path}{truncated}, specify the required permissions during compilation using `deno compile --allow-{err}`") - } else { - format!( - "Requires {err} access to {path}{truncated}, run again with the --allow-{err} flag") - }; - custom_error("NotCapable", msg) + + FsOpsError::NotCapableAccess { + standalone: deno_permissions::is_standalone(), + err, + path: format!("{path}{truncated}"), + } } err => Err::<(), _>(err) .context_path(operation, path) @@ -85,7 +142,7 @@ fn map_permission_error( #[op2] #[string] -pub fn op_fs_cwd

(state: &mut OpState) -> Result +pub fn op_fs_cwd

(state: &mut OpState) -> Result where P: FsPermissions + 'static, { @@ -93,7 +150,8 @@ where let path = fs.cwd()?; state .borrow_mut::

() - .check_read_blind(&path, "CWD", "Deno.cwd()")?; + .check_read_blind(&path, "CWD", "Deno.cwd()") + .map_err(FsOpsError::Permission)?; let path_str = path_into_string(path.into_os_string())?; Ok(path_str) } @@ -102,13 +160,14 @@ where pub fn op_fs_chdir

( state: &mut OpState, #[string] directory: &str, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let d = state .borrow_mut::

() - .check_read(directory, "Deno.chdir()")?; + .check_read(directory, "Deno.chdir()") + .map_err(FsOpsError::Permission)?; state .borrow::() .chdir(&d) @@ -119,7 +178,7 @@ where pub fn op_fs_umask( state: &mut OpState, mask: Option, -) -> Result +) -> Result where { state.borrow::().umask(mask).context("umask") @@ -131,7 +190,7 @@ pub fn op_fs_open_sync

( state: &mut OpState, #[string] path: String, #[serde] options: Option, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -158,7 +217,7 @@ pub async fn op_fs_open_async

( state: Rc>, #[string] path: String, #[serde] options: Option, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -186,7 +245,7 @@ pub fn op_fs_mkdir_sync

( #[string] path: String, recursive: bool, mode: Option, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { @@ -194,7 +253,8 @@ where let path = state .borrow_mut::

() - .check_write(&path, "Deno.mkdirSync()")?; + .check_write(&path, "Deno.mkdirSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); fs.mkdir_sync(&path, recursive, Some(mode)) @@ -209,7 +269,7 @@ pub async fn op_fs_mkdir_async

( #[string] path: String, recursive: bool, mode: Option, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { @@ -217,7 +277,10 @@ where let (fs, path) = { let mut state = state.borrow_mut(); - let path = state.borrow_mut::

().check_write(&path, "Deno.mkdir()")?; + let path = state + .borrow_mut::

() + .check_write(&path, "Deno.mkdir()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), path) }; @@ -233,13 +296,14 @@ pub fn op_fs_chmod_sync

( state: &mut OpState, #[string] path: String, mode: u32, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let path = state .borrow_mut::

() - .check_write(&path, "Deno.chmodSync()")?; + .check_write(&path, "Deno.chmodSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); fs.chmod_sync(&path, mode).context_path("chmod", &path)?; Ok(()) @@ -250,13 +314,16 @@ pub async fn op_fs_chmod_async

( state: Rc>, #[string] path: String, mode: u32, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let (fs, path) = { let mut state = state.borrow_mut(); - let path = state.borrow_mut::

().check_write(&path, "Deno.chmod()")?; + let path = state + .borrow_mut::

() + .check_write(&path, "Deno.chmod()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), path) }; fs.chmod_async(path.clone(), mode) @@ -271,13 +338,14 @@ pub fn op_fs_chown_sync

( #[string] path: String, uid: Option, gid: Option, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let path = state .borrow_mut::

() - .check_write(&path, "Deno.chownSync()")?; + .check_write(&path, "Deno.chownSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); fs.chown_sync(&path, uid, gid) .context_path("chown", &path)?; @@ -290,13 +358,16 @@ pub async fn op_fs_chown_async

( #[string] path: String, uid: Option, gid: Option, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let (fs, path) = { let mut state = state.borrow_mut(); - let path = state.borrow_mut::

().check_write(&path, "Deno.chown()")?; + let path = state + .borrow_mut::

() + .check_write(&path, "Deno.chown()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), path) }; fs.chown_async(path.clone(), uid, gid) @@ -310,13 +381,14 @@ pub fn op_fs_remove_sync

( state: &mut OpState, #[string] path: &str, recursive: bool, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let path = state .borrow_mut::

() - .check_write(path, "Deno.removeSync()")?; + .check_write(path, "Deno.removeSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); fs.remove_sync(&path, recursive) @@ -330,7 +402,7 @@ pub async fn op_fs_remove_async

( state: Rc>, #[string] path: String, recursive: bool, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { @@ -339,11 +411,13 @@ where let path = if recursive { state .borrow_mut::

() - .check_write(&path, "Deno.remove()")? + .check_write(&path, "Deno.remove()") + .map_err(FsOpsError::Permission)? } else { state .borrow_mut::

() - .check_write_partial(&path, "Deno.remove()")? + .check_write_partial(&path, "Deno.remove()") + .map_err(FsOpsError::Permission)? }; (state.borrow::().clone(), path) @@ -361,13 +435,17 @@ pub fn op_fs_copy_file_sync

( state: &mut OpState, #[string] from: &str, #[string] to: &str, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let permissions = state.borrow_mut::

(); - let from = permissions.check_read(from, "Deno.copyFileSync()")?; - let to = permissions.check_write(to, "Deno.copyFileSync()")?; + let from = permissions + .check_read(from, "Deno.copyFileSync()") + .map_err(FsOpsError::Permission)?; + let to = permissions + .check_write(to, "Deno.copyFileSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); fs.copy_file_sync(&from, &to) @@ -381,15 +459,19 @@ pub async fn op_fs_copy_file_async

( state: Rc>, #[string] from: String, #[string] to: String, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let (fs, from, to) = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - let from = permissions.check_read(&from, "Deno.copyFile()")?; - let to = permissions.check_write(&to, "Deno.copyFile()")?; + let from = permissions + .check_read(&from, "Deno.copyFile()") + .map_err(FsOpsError::Permission)?; + let to = permissions + .check_write(&to, "Deno.copyFile()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), from, to) }; @@ -405,13 +487,14 @@ pub fn op_fs_stat_sync

( state: &mut OpState, #[string] path: String, #[buffer] stat_out_buf: &mut [u32], -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let path = state .borrow_mut::

() - .check_read(&path, "Deno.statSync()")?; + .check_read(&path, "Deno.statSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); let stat = fs.stat_sync(&path).context_path("stat", &path)?; let serializable_stat = SerializableStat::from(stat); @@ -424,14 +507,16 @@ where pub async fn op_fs_stat_async

( state: Rc>, #[string] path: String, -) -> Result +) -> Result where P: FsPermissions + 'static, { let (fs, path) = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - let path = permissions.check_read(&path, "Deno.stat()")?; + let path = permissions + .check_read(&path, "Deno.stat()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), path) }; let stat = fs @@ -446,13 +531,14 @@ pub fn op_fs_lstat_sync

( state: &mut OpState, #[string] path: String, #[buffer] stat_out_buf: &mut [u32], -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let path = state .borrow_mut::

() - .check_read(&path, "Deno.lstatSync()")?; + .check_read(&path, "Deno.lstatSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); let stat = fs.lstat_sync(&path).context_path("lstat", &path)?; let serializable_stat = SerializableStat::from(stat); @@ -465,14 +551,16 @@ where pub async fn op_fs_lstat_async

( state: Rc>, #[string] path: String, -) -> Result +) -> Result where P: FsPermissions + 'static, { let (fs, path) = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - let path = permissions.check_read(&path, "Deno.lstat()")?; + let path = permissions + .check_read(&path, "Deno.lstat()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), path) }; let stat = fs @@ -487,15 +575,19 @@ where pub fn op_fs_realpath_sync

( state: &mut OpState, #[string] path: String, -) -> Result +) -> Result where P: FsPermissions + 'static, { let fs = state.borrow::().clone(); let permissions = state.borrow_mut::

(); - let path = permissions.check_read(&path, "Deno.realPathSync()")?; + let path = permissions + .check_read(&path, "Deno.realPathSync()") + .map_err(FsOpsError::Permission)?; if path.is_relative() { - permissions.check_read_blind(&fs.cwd()?, "CWD", "Deno.realPathSync()")?; + permissions + .check_read_blind(&fs.cwd()?, "CWD", "Deno.realPathSync()") + .map_err(FsOpsError::Permission)?; } let resolved_path = @@ -510,7 +602,7 @@ where pub async fn op_fs_realpath_async

( state: Rc>, #[string] path: String, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -518,9 +610,13 @@ where let mut state = state.borrow_mut(); let fs = state.borrow::().clone(); let permissions = state.borrow_mut::

(); - let path = permissions.check_read(&path, "Deno.realPath()")?; + let path = permissions + .check_read(&path, "Deno.realPath()") + .map_err(FsOpsError::Permission)?; if path.is_relative() { - permissions.check_read_blind(&fs.cwd()?, "CWD", "Deno.realPath()")?; + permissions + .check_read_blind(&fs.cwd()?, "CWD", "Deno.realPath()") + .map_err(FsOpsError::Permission)?; } (fs, path) }; @@ -538,13 +634,14 @@ where pub fn op_fs_read_dir_sync

( state: &mut OpState, #[string] path: String, -) -> Result, AnyError> +) -> Result, FsOpsError> where P: FsPermissions + 'static, { let path = state .borrow_mut::

() - .check_read(&path, "Deno.readDirSync()")?; + .check_read(&path, "Deno.readDirSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); let entries = fs.read_dir_sync(&path).context_path("readdir", &path)?; @@ -557,7 +654,7 @@ where pub async fn op_fs_read_dir_async

( state: Rc>, #[string] path: String, -) -> Result, AnyError> +) -> Result, FsOpsError> where P: FsPermissions + 'static, { @@ -565,7 +662,8 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_read(&path, "Deno.readDir()")?; + .check_read(&path, "Deno.readDir()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), path) }; @@ -582,14 +680,20 @@ pub fn op_fs_rename_sync

( state: &mut OpState, #[string] oldpath: String, #[string] newpath: String, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let permissions = state.borrow_mut::

(); - let _ = permissions.check_read(&oldpath, "Deno.renameSync()")?; - let oldpath = permissions.check_write(&oldpath, "Deno.renameSync()")?; - let newpath = permissions.check_write(&newpath, "Deno.renameSync()")?; + let _ = permissions + .check_read(&oldpath, "Deno.renameSync()") + .map_err(FsOpsError::Permission)?; + let oldpath = permissions + .check_write(&oldpath, "Deno.renameSync()") + .map_err(FsOpsError::Permission)?; + let newpath = permissions + .check_write(&newpath, "Deno.renameSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); fs.rename_sync(&oldpath, &newpath) @@ -603,16 +707,22 @@ pub async fn op_fs_rename_async

( state: Rc>, #[string] oldpath: String, #[string] newpath: String, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let (fs, oldpath, newpath) = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - _ = permissions.check_read(&oldpath, "Deno.rename()")?; - let oldpath = permissions.check_write(&oldpath, "Deno.rename()")?; - let newpath = permissions.check_write(&newpath, "Deno.rename()")?; + _ = permissions + .check_read(&oldpath, "Deno.rename()") + .map_err(FsOpsError::Permission)?; + let oldpath = permissions + .check_write(&oldpath, "Deno.rename()") + .map_err(FsOpsError::Permission)?; + let newpath = permissions + .check_write(&newpath, "Deno.rename()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), oldpath, newpath) }; @@ -628,15 +738,23 @@ pub fn op_fs_link_sync

( state: &mut OpState, #[string] oldpath: &str, #[string] newpath: &str, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let permissions = state.borrow_mut::

(); - _ = permissions.check_read(oldpath, "Deno.linkSync()")?; - let oldpath = permissions.check_write(oldpath, "Deno.linkSync()")?; - _ = permissions.check_read(newpath, "Deno.linkSync()")?; - let newpath = permissions.check_write(newpath, "Deno.linkSync()")?; + _ = permissions + .check_read(oldpath, "Deno.linkSync()") + .map_err(FsOpsError::Permission)?; + let oldpath = permissions + .check_write(oldpath, "Deno.linkSync()") + .map_err(FsOpsError::Permission)?; + _ = permissions + .check_read(newpath, "Deno.linkSync()") + .map_err(FsOpsError::Permission)?; + let newpath = permissions + .check_write(newpath, "Deno.linkSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); fs.link_sync(&oldpath, &newpath) @@ -650,17 +768,25 @@ pub async fn op_fs_link_async

( state: Rc>, #[string] oldpath: String, #[string] newpath: String, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let (fs, oldpath, newpath) = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - _ = permissions.check_read(&oldpath, "Deno.link()")?; - let oldpath = permissions.check_write(&oldpath, "Deno.link()")?; - _ = permissions.check_read(&newpath, "Deno.link()")?; - let newpath = permissions.check_write(&newpath, "Deno.link()")?; + _ = permissions + .check_read(&oldpath, "Deno.link()") + .map_err(FsOpsError::Permission)?; + let oldpath = permissions + .check_write(&oldpath, "Deno.link()") + .map_err(FsOpsError::Permission)?; + _ = permissions + .check_read(&newpath, "Deno.link()") + .map_err(FsOpsError::Permission)?; + let newpath = permissions + .check_write(&newpath, "Deno.link()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), oldpath, newpath) }; @@ -677,7 +803,7 @@ pub fn op_fs_symlink_sync

( #[string] oldpath: &str, #[string] newpath: &str, #[serde] file_type: Option, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { @@ -685,8 +811,12 @@ where let newpath = PathBuf::from(newpath); let permissions = state.borrow_mut::

(); - permissions.check_write_all("Deno.symlinkSync()")?; - permissions.check_read_all("Deno.symlinkSync()")?; + permissions + .check_write_all("Deno.symlinkSync()") + .map_err(FsOpsError::Permission)?; + permissions + .check_read_all("Deno.symlinkSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); fs.symlink_sync(&oldpath, &newpath, file_type) @@ -701,7 +831,7 @@ pub async fn op_fs_symlink_async

( #[string] oldpath: String, #[string] newpath: String, #[serde] file_type: Option, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { @@ -711,8 +841,12 @@ where let fs = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - permissions.check_write_all("Deno.symlink()")?; - permissions.check_read_all("Deno.symlink()")?; + permissions + .check_write_all("Deno.symlink()") + .map_err(FsOpsError::Permission)?; + permissions + .check_read_all("Deno.symlink()") + .map_err(FsOpsError::Permission)?; state.borrow::().clone() }; @@ -728,13 +862,14 @@ where pub fn op_fs_read_link_sync

( state: &mut OpState, #[string] path: String, -) -> Result +) -> Result where P: FsPermissions + 'static, { let path = state .borrow_mut::

() - .check_read(&path, "Deno.readLink()")?; + .check_read(&path, "Deno.readLink()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); @@ -748,7 +883,7 @@ where pub async fn op_fs_read_link_async

( state: Rc>, #[string] path: String, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -756,7 +891,8 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_read(&path, "Deno.readLink()")?; + .check_read(&path, "Deno.readLink()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), path) }; @@ -773,13 +909,14 @@ pub fn op_fs_truncate_sync

( state: &mut OpState, #[string] path: &str, #[number] len: u64, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let path = state .borrow_mut::

() - .check_write(path, "Deno.truncateSync()")?; + .check_write(path, "Deno.truncateSync()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); fs.truncate_sync(&path, len) @@ -793,7 +930,7 @@ pub async fn op_fs_truncate_async

( state: Rc>, #[string] path: String, #[number] len: u64, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { @@ -801,7 +938,8 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_write(&path, "Deno.truncate()")?; + .check_write(&path, "Deno.truncate()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), path) }; @@ -820,11 +958,14 @@ pub fn op_fs_utime_sync

( #[smi] atime_nanos: u32, #[number] mtime_secs: i64, #[smi] mtime_nanos: u32, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { - let path = state.borrow_mut::

().check_write(path, "Deno.utime()")?; + let path = state + .borrow_mut::

() + .check_write(path, "Deno.utime()") + .map_err(FsOpsError::Permission)?; let fs = state.borrow::(); fs.utime_sync(&path, atime_secs, atime_nanos, mtime_secs, mtime_nanos) @@ -841,13 +982,16 @@ pub async fn op_fs_utime_async

( #[smi] atime_nanos: u32, #[number] mtime_secs: i64, #[smi] mtime_nanos: u32, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { let (fs, path) = { let mut state = state.borrow_mut(); - let path = state.borrow_mut::

().check_write(&path, "Deno.utime()")?; + let path = state + .borrow_mut::

() + .check_write(&path, "Deno.utime()") + .map_err(FsOpsError::Permission)?; (state.borrow::().clone(), path) }; @@ -871,7 +1015,7 @@ pub fn op_fs_make_temp_dir_sync

( #[string] dir_arg: Option, #[string] prefix: Option, #[string] suffix: Option, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -913,7 +1057,7 @@ pub async fn op_fs_make_temp_dir_async

( #[string] dir_arg: Option, #[string] prefix: Option, #[string] suffix: Option, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -959,7 +1103,7 @@ pub fn op_fs_make_temp_file_sync

( #[string] dir_arg: Option, #[string] prefix: Option, #[string] suffix: Option, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -1007,7 +1151,7 @@ pub async fn op_fs_make_temp_file_async

( #[string] dir_arg: Option, #[string] prefix: Option, #[string] suffix: Option, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -1069,18 +1213,22 @@ fn make_temp_check_sync

( state: &mut OpState, dir: Option<&str>, api_name: &str, -) -> Result<(PathBuf, FileSystemRc), AnyError> +) -> Result<(PathBuf, FileSystemRc), FsOpsError> where P: FsPermissions + 'static, { let fs = state.borrow::().clone(); let dir = match dir { - Some(dir) => state.borrow_mut::

().check_write(dir, api_name)?, + Some(dir) => state + .borrow_mut::

() + .check_write(dir, api_name) + .map_err(FsOpsError::Permission)?, None => { let dir = fs.tmp_dir().context("tmpdir")?; state .borrow_mut::

() - .check_write_blind(&dir, "TMP", api_name)?; + .check_write_blind(&dir, "TMP", api_name) + .map_err(FsOpsError::Permission)?; dir } }; @@ -1091,19 +1239,23 @@ fn make_temp_check_async

( state: Rc>, dir: Option<&str>, api_name: &str, -) -> Result<(PathBuf, FileSystemRc), AnyError> +) -> Result<(PathBuf, FileSystemRc), FsOpsError> where P: FsPermissions + 'static, { let mut state = state.borrow_mut(); let fs = state.borrow::().clone(); let dir = match dir { - Some(dir) => state.borrow_mut::

().check_write(dir, api_name)?, + Some(dir) => state + .borrow_mut::

() + .check_write(dir, api_name) + .map_err(FsOpsError::Permission)?, None => { let dir = fs.tmp_dir().context("tmpdir")?; state .borrow_mut::

() - .check_write_blind(&dir, "TMP", api_name)?; + .check_write_blind(&dir, "TMP", api_name) + .map_err(FsOpsError::Permission)?; dir } }; @@ -1116,10 +1268,10 @@ where fn validate_temporary_filename_component( component: &str, #[allow(unused_variables)] suffix: bool, -) -> Result<(), AnyError> { +) -> Result<(), FsOpsError> { // Ban ASCII and Unicode control characters: these will often fail if let Some(c) = component.matches(|c: char| c.is_control()).next() { - bail!("Invalid control character in prefix or suffix: {:?}", c); + return Err(FsOpsError::InvalidControlCharacter(c.to_string())); } // Windows has the most restrictive filenames. As temp files aren't normal files, we just // use this set of banned characters for all platforms because wildcard-like files can also @@ -1135,13 +1287,13 @@ fn validate_temporary_filename_component( .matches(|c: char| "<>:\"/\\|?*".contains(c)) .next() { - bail!("Invalid character in prefix or suffix: {:?}", c); + return Err(FsOpsError::InvalidCharacter(c.to_string())); } // This check is only for Windows #[cfg(windows)] if suffix && component.ends_with(|c: char| ". ".contains(c)) { - bail!("Invalid trailing character in suffix"); + return Err(FsOpsError::InvalidTrailingCharacter); } Ok(()) @@ -1152,7 +1304,7 @@ fn tmp_name( dir: &Path, prefix: Option<&str>, suffix: Option<&str>, -) -> Result { +) -> Result { let prefix = prefix.unwrap_or(""); validate_temporary_filename_component(prefix, false)?; let suffix = suffix.unwrap_or(""); @@ -1179,7 +1331,7 @@ pub fn op_fs_write_file_sync

( create: bool, create_new: bool, #[buffer] data: JsBuffer, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { @@ -1207,7 +1359,7 @@ pub async fn op_fs_write_file_async

( create_new: bool, #[buffer] data: JsBuffer, #[smi] cancel_rid: Option, -) -> Result<(), AnyError> +) -> Result<(), FsOpsError> where P: FsPermissions + 'static, { @@ -1255,7 +1407,7 @@ where pub fn op_fs_read_file_sync

( state: &mut OpState, #[string] path: String, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -1277,7 +1429,7 @@ pub async fn op_fs_read_file_async

( state: Rc>, #[string] path: String, #[smi] cancel_rid: Option, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -1318,7 +1470,7 @@ where pub fn op_fs_read_file_text_sync

( state: &mut OpState, #[string] path: String, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -1340,7 +1492,7 @@ pub async fn op_fs_read_file_text_async

( state: Rc>, #[string] path: String, #[smi] cancel_rid: Option, -) -> Result +) -> Result where P: FsPermissions + 'static, { @@ -1377,13 +1529,13 @@ where Ok(str) } -fn to_seek_from(offset: i64, whence: i32) -> Result { +fn to_seek_from(offset: i64, whence: i32) -> Result { let seek_from = match whence { 0 => SeekFrom::Start(offset as u64), 1 => SeekFrom::Current(offset), 2 => SeekFrom::End(offset), _ => { - return Err(type_error(format!("Invalid seek mode: {whence}"))); + return Err(FsOpsError::InvalidSeekMode(whence)); } }; Ok(seek_from) @@ -1396,9 +1548,10 @@ pub fn op_fs_seek_sync( #[smi] rid: ResourceId, #[number] offset: i64, #[smi] whence: i32, -) -> Result { +) -> Result { let pos = to_seek_from(offset, whence)?; - let file = FileResource::get_file(state, rid)?; + let file = + FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; let cursor = file.seek_sync(pos)?; Ok(cursor) } @@ -1410,9 +1563,10 @@ pub async fn op_fs_seek_async( #[smi] rid: ResourceId, #[number] offset: i64, #[smi] whence: i32, -) -> Result { +) -> Result { let pos = to_seek_from(offset, whence)?; - let file = FileResource::get_file(&state.borrow(), rid)?; + let file = FileResource::get_file(&state.borrow(), rid) + .map_err(FsOpsError::Resource)?; let cursor = file.seek_async(pos).await?; Ok(cursor) } @@ -1421,8 +1575,9 @@ pub async fn op_fs_seek_async( pub fn op_fs_file_sync_data_sync( state: &mut OpState, #[smi] rid: ResourceId, -) -> Result<(), AnyError> { - let file = FileResource::get_file(state, rid)?; +) -> Result<(), FsOpsError> { + let file = + FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; file.datasync_sync()?; Ok(()) } @@ -1431,8 +1586,9 @@ pub fn op_fs_file_sync_data_sync( pub async fn op_fs_file_sync_data_async( state: Rc>, #[smi] rid: ResourceId, -) -> Result<(), AnyError> { - let file = FileResource::get_file(&state.borrow(), rid)?; +) -> Result<(), FsOpsError> { + let file = FileResource::get_file(&state.borrow(), rid) + .map_err(FsOpsError::Resource)?; file.datasync_async().await?; Ok(()) } @@ -1441,8 +1597,9 @@ pub async fn op_fs_file_sync_data_async( pub fn op_fs_file_sync_sync( state: &mut OpState, #[smi] rid: ResourceId, -) -> Result<(), AnyError> { - let file = FileResource::get_file(state, rid)?; +) -> Result<(), FsOpsError> { + let file = + FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; file.sync_sync()?; Ok(()) } @@ -1451,8 +1608,9 @@ pub fn op_fs_file_sync_sync( pub async fn op_fs_file_sync_async( state: Rc>, #[smi] rid: ResourceId, -) -> Result<(), AnyError> { - let file = FileResource::get_file(&state.borrow(), rid)?; +) -> Result<(), FsOpsError> { + let file = FileResource::get_file(&state.borrow(), rid) + .map_err(FsOpsError::Resource)?; file.sync_async().await?; Ok(()) } @@ -1462,8 +1620,9 @@ pub fn op_fs_file_stat_sync( state: &mut OpState, #[smi] rid: ResourceId, #[buffer] stat_out_buf: &mut [u32], -) -> Result<(), AnyError> { - let file = FileResource::get_file(state, rid)?; +) -> Result<(), FsOpsError> { + let file = + FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; let stat = file.stat_sync()?; let serializable_stat = SerializableStat::from(stat); serializable_stat.write(stat_out_buf); @@ -1475,8 +1634,9 @@ pub fn op_fs_file_stat_sync( pub async fn op_fs_file_stat_async( state: Rc>, #[smi] rid: ResourceId, -) -> Result { - let file = FileResource::get_file(&state.borrow(), rid)?; +) -> Result { + let file = FileResource::get_file(&state.borrow(), rid) + .map_err(FsOpsError::Resource)?; let stat = file.stat_async().await?; Ok(stat.into()) } @@ -1486,8 +1646,9 @@ pub fn op_fs_flock_sync( state: &mut OpState, #[smi] rid: ResourceId, exclusive: bool, -) -> Result<(), AnyError> { - let file = FileResource::get_file(state, rid)?; +) -> Result<(), FsOpsError> { + let file = + FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; file.lock_sync(exclusive)?; Ok(()) } @@ -1497,8 +1658,9 @@ pub async fn op_fs_flock_async( state: Rc>, #[smi] rid: ResourceId, exclusive: bool, -) -> Result<(), AnyError> { - let file = FileResource::get_file(&state.borrow(), rid)?; +) -> Result<(), FsOpsError> { + let file = FileResource::get_file(&state.borrow(), rid) + .map_err(FsOpsError::Resource)?; file.lock_async(exclusive).await?; Ok(()) } @@ -1507,8 +1669,9 @@ pub async fn op_fs_flock_async( pub fn op_fs_funlock_sync( state: &mut OpState, #[smi] rid: ResourceId, -) -> Result<(), AnyError> { - let file = FileResource::get_file(state, rid)?; +) -> Result<(), FsOpsError> { + let file = + FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; file.unlock_sync()?; Ok(()) } @@ -1517,8 +1680,9 @@ pub fn op_fs_funlock_sync( pub async fn op_fs_funlock_async( state: Rc>, #[smi] rid: ResourceId, -) -> Result<(), AnyError> { - let file = FileResource::get_file(&state.borrow(), rid)?; +) -> Result<(), FsOpsError> { + let file = FileResource::get_file(&state.borrow(), rid) + .map_err(FsOpsError::Resource)?; file.unlock_async().await?; Ok(()) } @@ -1528,8 +1692,9 @@ pub fn op_fs_ftruncate_sync( state: &mut OpState, #[smi] rid: ResourceId, #[number] len: u64, -) -> Result<(), AnyError> { - let file = FileResource::get_file(state, rid)?; +) -> Result<(), FsOpsError> { + let file = + FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; file.truncate_sync(len)?; Ok(()) } @@ -1539,8 +1704,9 @@ pub async fn op_fs_file_truncate_async( state: Rc>, #[smi] rid: ResourceId, #[number] len: u64, -) -> Result<(), AnyError> { - let file = FileResource::get_file(&state.borrow(), rid)?; +) -> Result<(), FsOpsError> { + let file = FileResource::get_file(&state.borrow(), rid) + .map_err(FsOpsError::Resource)?; file.truncate_async(len).await?; Ok(()) } @@ -1553,8 +1719,9 @@ pub fn op_fs_futime_sync( #[smi] atime_nanos: u32, #[number] mtime_secs: i64, #[smi] mtime_nanos: u32, -) -> Result<(), AnyError> { - let file = FileResource::get_file(state, rid)?; +) -> Result<(), FsOpsError> { + let file = + FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; file.utime_sync(atime_secs, atime_nanos, mtime_secs, mtime_nanos)?; Ok(()) } @@ -1567,42 +1734,64 @@ pub async fn op_fs_futime_async( #[smi] atime_nanos: u32, #[number] mtime_secs: i64, #[smi] mtime_nanos: u32, -) -> Result<(), AnyError> { - let file = FileResource::get_file(&state.borrow(), rid)?; +) -> Result<(), FsOpsError> { + let file = FileResource::get_file(&state.borrow(), rid) + .map_err(FsOpsError::Resource)?; file .utime_async(atime_secs, atime_nanos, mtime_secs, mtime_nanos) .await?; Ok(()) } -trait WithContext { - fn context>>( - self, - desc: E, - ) -> AnyError; +#[derive(Debug)] +pub struct OperationError { + operation: &'static str, + kind: OperationErrorKind, + pub err: FsError, } -impl WithContext for FsError { - fn context>>( - self, - desc: E, - ) -> AnyError { - match self { - FsError::Io(io) => { - AnyError::new(io::Error::new(io.kind(), desc)).context(io) +impl std::fmt::Display for OperationError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + if let FsError::Io(e) = &self.err { + std::fmt::Display::fmt(&e, f)?; + f.write_str(": ")?; + } + + f.write_str(self.operation)?; + + match &self.kind { + OperationErrorKind::Bare => Ok(()), + OperationErrorKind::WithPath(path) => write!(f, " '{}'", path.display()), + OperationErrorKind::WithTwoPaths(from, to) => { + write!(f, " '{}' -> '{}'", from.display(), to.display()) } - _ => self.into(), } } } +impl std::error::Error for OperationError { + fn source(&self) -> Option<&(dyn Error + 'static)> { + if let FsError::Io(err) = &self.err { + Some(err) + } else { + None + } + } +} + +#[derive(Debug)] +pub enum OperationErrorKind { + Bare, + WithPath(PathBuf), + WithTwoPaths(PathBuf, PathBuf), +} + trait MapErrContext { type R; - fn context_fn(self, f: F) -> Self::R + fn context_fn(self, f: F) -> Self::R where - F: FnOnce() -> E, - E: Into>; + F: FnOnce(FsError) -> OperationError; fn context(self, desc: &'static str) -> Self::R; @@ -1617,25 +1806,29 @@ trait MapErrContext { } impl MapErrContext for Result { - type R = Result; + type R = Result; - fn context_fn(self, f: F) -> Self::R + fn context_fn(self, f: F) -> Self::R where - F: FnOnce() -> E, - E: Into>, + F: FnOnce(FsError) -> OperationError, { - self.map_err(|err| { - let message = f(); - err.context(message) - }) + self.map_err(|err| FsOpsError::OperationError(f(err))) } - fn context(self, desc: &'static str) -> Self::R { - self.context_fn(move || desc) + fn context(self, operation: &'static str) -> Self::R { + self.context_fn(move |err| OperationError { + operation, + kind: OperationErrorKind::Bare, + err, + }) } fn context_path(self, operation: &'static str, path: &Path) -> Self::R { - self.context_fn(|| format!("{operation} '{}'", path.display())) + self.context_fn(|err| OperationError { + operation, + kind: OperationErrorKind::WithPath(path.to_path_buf()), + err, + }) } fn context_two_path( @@ -1644,21 +1837,19 @@ impl MapErrContext for Result { oldpath: &Path, newpath: &Path, ) -> Self::R { - self.context_fn(|| { - format!( - "{operation} '{}' -> '{}'", - oldpath.display(), - newpath.display() - ) + self.context_fn(|err| OperationError { + operation, + kind: OperationErrorKind::WithTwoPaths( + oldpath.to_path_buf(), + newpath.to_path_buf(), + ), + err, }) } } -fn path_into_string(s: std::ffi::OsString) -> Result { - s.into_string().map_err(|s| { - let message = format!("File name or path {s:?} is not valid UTF-8"); - custom_error("InvalidData", message) - }) +fn path_into_string(s: std::ffi::OsString) -> Result { + s.into_string().map_err(FsOpsError::InvalidUtf8) } macro_rules! create_struct_writer { -- cgit v1.2.3 From 1bccf45ecb8b5ce2aa685d650c6654bf6c25e605 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Fri, 18 Oct 2024 10:11:06 -0700 Subject: fix(ext/node): properly map reparse point error in readlink (#26375) --- ext/node/ops/winerror.rs | 1 + ext/node/polyfills/_fs/_fs_readlink.ts | 33 +++++++++++++++++++++------------ 2 files changed, 22 insertions(+), 12 deletions(-) (limited to 'ext') diff --git a/ext/node/ops/winerror.rs b/ext/node/ops/winerror.rs index c0d66f7d0..e9dbadb6f 100644 --- a/ext/node/ops/winerror.rs +++ b/ext/node/ops/winerror.rs @@ -66,6 +66,7 @@ pub fn op_node_sys_to_uv_error(err: i32) -> String { ERROR_INVALID_PARAMETER => "EINVAL", WSAEINVAL => "EINVAL", WSAEPFNOSUPPORT => "EINVAL", + ERROR_NOT_A_REPARSE_POINT => "EINVAL", ERROR_BEGINNING_OF_MEDIA => "EIO", ERROR_BUS_RESET => "EIO", ERROR_CRC => "EIO", diff --git a/ext/node/polyfills/_fs/_fs_readlink.ts b/ext/node/polyfills/_fs/_fs_readlink.ts index 5f2312798..08bea843f 100644 --- a/ext/node/polyfills/_fs/_fs_readlink.ts +++ b/ext/node/polyfills/_fs/_fs_readlink.ts @@ -4,13 +4,10 @@ // deno-lint-ignore-file prefer-primordials import { TextEncoder } from "ext:deno_web/08_text_encoding.js"; -import { - intoCallbackAPIWithIntercept, - MaybeEmpty, - notImplemented, -} from "ext:deno_node/_utils.ts"; +import { MaybeEmpty, notImplemented } from "ext:deno_node/_utils.ts"; import { pathFromURL } from "ext:deno_web/00_infra.js"; import { promisify } from "ext:deno_node/internal/util.mjs"; +import { denoErrorToNodeError } from "ext:deno_node/internal/errors.ts"; type ReadlinkCallback = ( err: MaybeEmpty, @@ -69,12 +66,17 @@ export function readlink( const encoding = getEncoding(optOrCallback); - intoCallbackAPIWithIntercept( - Deno.readLink, - (data: string): string | Uint8Array => maybeEncode(data, encoding), - cb, - path, - ); + Deno.readLink(path).then((data: string) => { + const res = maybeEncode(data, encoding); + if (cb) cb(null, res); + }, (err: Error) => { + if (cb) { + (cb as (e: Error) => void)(denoErrorToNodeError(err, { + syscall: "readlink", + path, + })); + } + }); } export const readlinkPromise = promisify(readlink) as ( @@ -88,5 +90,12 @@ export function readlinkSync( ): string | Uint8Array { path = path instanceof URL ? pathFromURL(path) : path; - return maybeEncode(Deno.readLinkSync(path), getEncoding(opt)); + try { + return maybeEncode(Deno.readLinkSync(path), getEncoding(opt)); + } catch (error) { + throw denoErrorToNodeError(error, { + syscall: "readlink", + path, + }); + } } -- cgit v1.2.3 From d047cab14b754d20a43c7119e327b451440aaed9 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Fri, 18 Oct 2024 12:30:46 -0700 Subject: refactor(ext/websocket): use concrete error type (#26226) --- ext/http/http_next.rs | 6 +- ext/http/lib.rs | 8 +- ext/websocket/Cargo.toml | 1 + ext/websocket/lib.rs | 189 +++++++++++++++++++++++++++-------------------- 4 files changed, 118 insertions(+), 86 deletions(-) (limited to 'ext') diff --git a/ext/http/http_next.rs b/ext/http/http_next.rs index abc54c899..7a6cbfa45 100644 --- a/ext/http/http_next.rs +++ b/ext/http/http_next.rs @@ -246,7 +246,11 @@ pub async fn op_http_upgrade_websocket_next( // Stage 3: take the extracted raw network stream and upgrade it to a websocket, then return it let (stream, bytes) = extract_network_stream(upgraded); - ws_create_server_stream(&mut state.borrow_mut(), stream, bytes) + Ok(ws_create_server_stream( + &mut state.borrow_mut(), + stream, + bytes, + )) } #[op2(fast)] diff --git a/ext/http/lib.rs b/ext/http/lib.rs index 934f8a002..5461713aa 100644 --- a/ext/http/lib.rs +++ b/ext/http/lib.rs @@ -1053,9 +1053,11 @@ async fn op_http_upgrade_websocket( let (transport, bytes) = extract_network_stream(hyper_v014::upgrade::on(request).await?); - let ws_rid = - ws_create_server_stream(&mut state.borrow_mut(), transport, bytes)?; - Ok(ws_rid) + Ok(ws_create_server_stream( + &mut state.borrow_mut(), + transport, + bytes, + )) } // Needed so hyper can use non Send futures diff --git a/ext/websocket/Cargo.toml b/ext/websocket/Cargo.toml index b265ba78a..ec359100d 100644 --- a/ext/websocket/Cargo.toml +++ b/ext/websocket/Cargo.toml @@ -28,4 +28,5 @@ hyper-util.workspace = true once_cell.workspace = true rustls-tokio-stream.workspace = true serde.workspace = true +thiserror.workspace = true tokio.workspace = true diff --git a/ext/websocket/lib.rs b/ext/websocket/lib.rs index b8043516b..2a67ac5a1 100644 --- a/ext/websocket/lib.rs +++ b/ext/websocket/lib.rs @@ -1,10 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use crate::stream::WebSocketStream; use bytes::Bytes; -use deno_core::anyhow::bail; -use deno_core::error::invalid_hostname; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::futures::TryFutureExt; use deno_core::op2; use deno_core::unsync::spawn; @@ -43,7 +39,6 @@ use serde::Serialize; use std::borrow::Cow; use std::cell::Cell; use std::cell::RefCell; -use std::fmt; use std::future::Future; use std::num::NonZeroUsize; use std::path::PathBuf; @@ -75,11 +70,33 @@ static USE_WRITEV: Lazy = Lazy::new(|| { false }); +#[derive(Debug, thiserror::Error)] +pub enum WebsocketError { + #[error(transparent)] + Url(url::ParseError), + #[error(transparent)] + Permission(deno_core::error::AnyError), + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error(transparent)] + Uri(#[from] http::uri::InvalidUri), + #[error("{0}")] + Io(#[from] std::io::Error), + #[error(transparent)] + WebSocket(#[from] fastwebsockets::WebSocketError), + #[error("failed to connect to WebSocket: {0}")] + ConnectionFailed(#[from] HandshakeError), + #[error(transparent)] + Canceled(#[from] deno_core::Canceled), +} + #[derive(Clone)] pub struct WsRootStoreProvider(Option>); impl WsRootStoreProvider { - pub fn get_or_try_init(&self) -> Result, AnyError> { + pub fn get_or_try_init( + &self, + ) -> Result, deno_core::error::AnyError> { Ok(match &self.0 { Some(provider) => Some(provider.get_or_try_init()?.clone()), None => None, @@ -95,7 +112,7 @@ pub trait WebSocketPermissions { &mut self, _url: &url::Url, _api_name: &str, - ) -> Result<(), AnyError>; + ) -> Result<(), deno_core::error::AnyError>; } impl WebSocketPermissions for deno_permissions::PermissionsContainer { @@ -104,7 +121,7 @@ impl WebSocketPermissions for deno_permissions::PermissionsContainer { &mut self, url: &url::Url, api_name: &str, - ) -> Result<(), AnyError> { + ) -> Result<(), deno_core::error::AnyError> { deno_permissions::PermissionsContainer::check_net_url(self, url, api_name) } } @@ -137,13 +154,17 @@ pub fn op_ws_check_permission_and_cancel_handle( #[string] api_name: String, #[string] url: String, cancel_handle: bool, -) -> Result, AnyError> +) -> Result, WebsocketError> where WP: WebSocketPermissions + 'static, { state .borrow_mut::() - .check_net_url(&url::Url::parse(&url)?, &api_name)?; + .check_net_url( + &url::Url::parse(&url).map_err(WebsocketError::Url)?, + &api_name, + ) + .map_err(WebsocketError::Permission)?; if cancel_handle { let rid = state @@ -163,16 +184,46 @@ pub struct CreateResponse { extensions: String, } +#[derive(Debug, thiserror::Error)] +pub enum HandshakeError { + #[error("Missing path in url")] + MissingPath, + #[error("Invalid status code {0}")] + InvalidStatusCode(StatusCode), + #[error(transparent)] + Http(#[from] http::Error), + #[error(transparent)] + WebSocket(#[from] fastwebsockets::WebSocketError), + #[error("Didn't receive h2 alpn, aborting connection")] + NoH2Alpn, + #[error(transparent)] + Rustls(#[from] deno_tls::rustls::Error), + #[error(transparent)] + Io(#[from] std::io::Error), + #[error(transparent)] + H2(#[from] h2::Error), + #[error("Invalid hostname: '{0}'")] + InvalidHostname(String), + #[error(transparent)] + RootStoreError(deno_core::error::AnyError), + #[error(transparent)] + Tls(deno_tls::TlsError), + #[error(transparent)] + HeaderName(#[from] http::header::InvalidHeaderName), + #[error(transparent)] + HeaderValue(#[from] http::header::InvalidHeaderValue), +} + async fn handshake_websocket( state: &Rc>, uri: &Uri, protocols: &str, headers: Option>, -) -> Result<(WebSocket, http::HeaderMap), AnyError> { +) -> Result<(WebSocket, http::HeaderMap), HandshakeError> { let mut request = Request::builder().method(Method::GET).uri( uri .path_and_query() - .ok_or(type_error("Missing path in url".to_string()))? + .ok_or(HandshakeError::MissingPath)? .as_str(), ); @@ -194,7 +245,9 @@ async fn handshake_websocket( request = populate_common_request_headers(request, &user_agent, protocols, &headers)?; - let request = request.body(http_body_util::Empty::new())?; + let request = request + .body(http_body_util::Empty::new()) + .map_err(HandshakeError::Http)?; let domain = &uri.host().unwrap().to_string(); let port = &uri.port_u16().unwrap_or(match uri.scheme_str() { Some("wss") => 443, @@ -231,7 +284,7 @@ async fn handshake_websocket( async fn handshake_http1_ws( request: Request>, addr: &String, -) -> Result<(WebSocket, http::HeaderMap), AnyError> { +) -> Result<(WebSocket, http::HeaderMap), HandshakeError> { let tcp_socket = TcpStream::connect(addr).await?; handshake_connection(request, tcp_socket).await } @@ -241,11 +294,11 @@ async fn handshake_http1_wss( request: Request>, domain: &str, addr: &str, -) -> Result<(WebSocket, http::HeaderMap), AnyError> { +) -> Result<(WebSocket, http::HeaderMap), HandshakeError> { let tcp_socket = TcpStream::connect(addr).await?; let tls_config = create_ws_client_config(state, SocketUse::Http1Only)?; let dnsname = ServerName::try_from(domain.to_string()) - .map_err(|_| invalid_hostname(domain))?; + .map_err(|_| HandshakeError::InvalidHostname(domain.to_string()))?; let mut tls_connector = TlsStream::new_client_side( tcp_socket, ClientConnection::new(tls_config.into(), dnsname)?, @@ -266,11 +319,11 @@ async fn handshake_http2_wss( domain: &str, headers: &Option>, addr: &str, -) -> Result<(WebSocket, http::HeaderMap), AnyError> { +) -> Result<(WebSocket, http::HeaderMap), HandshakeError> { let tcp_socket = TcpStream::connect(addr).await?; let tls_config = create_ws_client_config(state, SocketUse::Http2Only)?; let dnsname = ServerName::try_from(domain.to_string()) - .map_err(|_| invalid_hostname(domain))?; + .map_err(|_| HandshakeError::InvalidHostname(domain.to_string()))?; // We need to better expose the underlying errors here let mut tls_connector = TlsStream::new_client_side( tcp_socket, @@ -279,7 +332,7 @@ async fn handshake_http2_wss( ); let handshake = tls_connector.handshake().await?; if handshake.alpn.is_none() { - bail!("Didn't receive h2 alpn, aborting connection"); + return Err(HandshakeError::NoH2Alpn); } let h2 = h2::client::Builder::new(); let (mut send, conn) = h2.handshake::<_, Bytes>(tls_connector).await?; @@ -298,7 +351,7 @@ async fn handshake_http2_wss( let (resp, send) = send.send_request(request.body(())?, false)?; let resp = resp.await?; if resp.status() != StatusCode::OK { - bail!("Invalid status code: {}", resp.status()); + return Err(HandshakeError::InvalidStatusCode(resp.status())); } let (http::response::Parts { headers, .. }, recv) = resp.into_parts(); let mut stream = WebSocket::after_handshake( @@ -317,7 +370,7 @@ async fn handshake_connection< >( request: Request>, socket: S, -) -> Result<(WebSocket, http::HeaderMap), AnyError> { +) -> Result<(WebSocket, http::HeaderMap), HandshakeError> { let (upgraded, response) = fastwebsockets::handshake::client(&LocalExecutor, request, socket).await?; @@ -332,7 +385,7 @@ async fn handshake_connection< pub fn create_ws_client_config( state: &Rc>, socket_use: SocketUse, -) -> Result { +) -> Result { let unsafely_ignore_certificate_errors: Option> = state .borrow() .try_borrow::() @@ -340,7 +393,8 @@ pub fn create_ws_client_config( let root_cert_store = state .borrow() .borrow::() - .get_or_try_init()?; + .get_or_try_init() + .map_err(HandshakeError::RootStoreError)?; create_client_config( root_cert_store, @@ -349,7 +403,7 @@ pub fn create_ws_client_config( TlsKeys::Null, socket_use, ) - .map_err(|e| e.into()) + .map_err(HandshakeError::Tls) } /// Headers common to both http/1.1 and h2 requests. @@ -358,7 +412,7 @@ fn populate_common_request_headers( user_agent: &str, protocols: &str, headers: &Option>, -) -> Result { +) -> Result { request = request .header("User-Agent", user_agent) .header("Sec-WebSocket-Version", "13"); @@ -369,10 +423,8 @@ fn populate_common_request_headers( if let Some(headers) = headers { for (key, value) in headers { - let name = HeaderName::from_bytes(key) - .map_err(|err| type_error(err.to_string()))?; - let v = HeaderValue::from_bytes(value) - .map_err(|err| type_error(err.to_string()))?; + let name = HeaderName::from_bytes(key)?; + let v = HeaderValue::from_bytes(value)?; let is_disallowed_header = matches!( name, @@ -402,14 +454,17 @@ pub async fn op_ws_create( #[string] protocols: String, #[smi] cancel_handle: Option, #[serde] headers: Option>, -) -> Result +) -> Result where WP: WebSocketPermissions + 'static, { { let mut s = state.borrow_mut(); s.borrow_mut::() - .check_net_url(&url::Url::parse(&url)?, &api_name) + .check_net_url( + &url::Url::parse(&url).map_err(WebsocketError::Url)?, + &api_name, + ) .expect( "Permission check should have been done in op_ws_check_permission", ); @@ -419,7 +474,8 @@ where let r = state .borrow_mut() .resource_table - .get::(cancel_rid)?; + .get::(cancel_rid) + .map_err(WebsocketError::Resource)?; Some(r.0.clone()) } else { None @@ -428,15 +484,11 @@ where let uri: Uri = url.parse()?; let handshake = handshake_websocket(&state, &uri, &protocols, headers) - .map_err(|err| { - AnyError::from(DomExceptionNetworkError::new(&format!( - "failed to connect to WebSocket: {err}" - ))) - }); + .map_err(WebsocketError::ConnectionFailed); let (stream, response) = match cancel_resource { - Some(rc) => handshake.try_or_cancel(rc).await, - None => handshake.await, - }?; + Some(rc) => handshake.try_or_cancel(rc).await?, + None => handshake.await?, + }; if let Some(cancel_rid) = cancel_handle { if let Ok(res) = state.borrow_mut().resource_table.take_any(cancel_rid) { @@ -521,14 +573,12 @@ impl ServerWebSocket { self: &Rc, lock: AsyncMutFuture>>, frame: Frame<'_>, - ) -> Result<(), AnyError> { + ) -> Result<(), WebsocketError> { let mut ws = lock.await; if ws.is_closed() { return Ok(()); } - ws.write_frame(frame) - .await - .map_err(|err| type_error(err.to_string()))?; + ws.write_frame(frame).await?; Ok(()) } } @@ -543,7 +593,7 @@ pub fn ws_create_server_stream( state: &mut OpState, transport: NetworkStream, read_buf: Bytes, -) -> Result { +) -> ResourceId { let mut ws = WebSocket::after_handshake( WebSocketStream::new( stream::WsStreamKind::Network(transport), @@ -555,8 +605,7 @@ pub fn ws_create_server_stream( ws.set_auto_close(true); ws.set_auto_pong(true); - let rid = state.resource_table.add(ServerWebSocket::new(ws)); - Ok(rid) + state.resource_table.add(ServerWebSocket::new(ws)) } fn send_binary(state: &mut OpState, rid: ResourceId, data: &[u8]) { @@ -626,11 +675,12 @@ pub async fn op_ws_send_binary_async( state: Rc>, #[smi] rid: ResourceId, #[buffer] data: JsBuffer, -) -> Result<(), AnyError> { +) -> Result<(), WebsocketError> { let resource = state .borrow_mut() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(WebsocketError::Resource)?; let data = data.to_vec(); let lock = resource.reserve_lock(); resource @@ -644,11 +694,12 @@ pub async fn op_ws_send_text_async( state: Rc>, #[smi] rid: ResourceId, #[string] data: String, -) -> Result<(), AnyError> { +) -> Result<(), WebsocketError> { let resource = state .borrow_mut() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(WebsocketError::Resource)?; let lock = resource.reserve_lock(); resource .write_frame( @@ -678,11 +729,12 @@ pub fn op_ws_get_buffered_amount( pub async fn op_ws_send_ping( state: Rc>, #[smi] rid: ResourceId, -) -> Result<(), AnyError> { +) -> Result<(), WebsocketError> { let resource = state .borrow_mut() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(WebsocketError::Resource)?; let lock = resource.reserve_lock(); resource .write_frame( @@ -698,7 +750,7 @@ pub async fn op_ws_close( #[smi] rid: ResourceId, #[smi] code: Option, #[string] reason: Option, -) -> Result<(), AnyError> { +) -> Result<(), WebsocketError> { let Ok(resource) = state .borrow_mut() .resource_table @@ -713,8 +765,7 @@ pub async fn op_ws_close( resource.closed.set(true); let lock = resource.reserve_lock(); - resource.write_frame(lock, frame).await?; - Ok(()) + resource.write_frame(lock, frame).await } #[op2] @@ -868,32 +919,6 @@ pub fn get_declaration() -> PathBuf { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("lib.deno_websocket.d.ts") } -#[derive(Debug)] -pub struct DomExceptionNetworkError { - pub msg: String, -} - -impl DomExceptionNetworkError { - pub fn new(msg: &str) -> Self { - DomExceptionNetworkError { - msg: msg.to_string(), - } - } -} - -impl fmt::Display for DomExceptionNetworkError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad(&self.msg) - } -} - -impl std::error::Error for DomExceptionNetworkError {} - -pub fn get_network_error_class_name(e: &AnyError) -> Option<&'static str> { - e.downcast_ref::() - .map(|_| "DOMExceptionNetworkError") -} - // Needed so hyper can use non Send futures #[derive(Clone)] struct LocalExecutor; -- cgit v1.2.3 From d48434e91fcb2945f103521fde782adad5742c63 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Fri, 18 Oct 2024 14:39:32 -0700 Subject: fix(ext/node): stub HTTPParser internal binding (#26401) Fixes https://github.com/denoland/deno/issues/26394. --- ext/node/lib.rs | 1 + ext/node/polyfills/internal_binding/http_parser.ts | 159 +++++++++++++++++++++ ext/node/polyfills/internal_binding/mod.ts | 3 +- 3 files changed, 162 insertions(+), 1 deletion(-) create mode 100644 ext/node/polyfills/internal_binding/http_parser.ts (limited to 'ext') diff --git a/ext/node/lib.rs b/ext/node/lib.rs index 03462f36f..9b22add45 100644 --- a/ext/node/lib.rs +++ b/ext/node/lib.rs @@ -469,6 +469,7 @@ deno_core::extension!(deno_node, "internal_binding/constants.ts", "internal_binding/crypto.ts", "internal_binding/handle_wrap.ts", + "internal_binding/http_parser.ts", "internal_binding/mod.ts", "internal_binding/node_file.ts", "internal_binding/node_options.ts", diff --git a/ext/node/polyfills/internal_binding/http_parser.ts b/ext/node/polyfills/internal_binding/http_parser.ts new file mode 100644 index 000000000..ca4f896e2 --- /dev/null +++ b/ext/node/polyfills/internal_binding/http_parser.ts @@ -0,0 +1,159 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Copyright Joyent, Inc. and other Node contributors. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to permit +// persons to whom the Software is furnished to do so, subject to the +// following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN +// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE +// USE OR OTHER DEALINGS IN THE SOFTWARE. + +import { primordials } from "ext:core/mod.js"; +import { AsyncWrap } from "ext:deno_node/internal_binding/async_wrap.ts"; + +const { + ObjectDefineProperty, + ObjectEntries, + ObjectSetPrototypeOf, + SafeArrayIterator, +} = primordials; + +export const methods = [ + "DELETE", + "GET", + "HEAD", + "POST", + "PUT", + "CONNECT", + "OPTIONS", + "TRACE", + "COPY", + "LOCK", + "MKCOL", + "MOVE", + "PROPFIND", + "PROPPATCH", + "SEARCH", + "UNLOCK", + "BIND", + "REBIND", + "UNBIND", + "ACL", + "REPORT", + "MKACTIVITY", + "CHECKOUT", + "MERGE", + "M-SEARCH", + "NOTIFY", + "SUBSCRIBE", + "UNSUBSCRIBE", + "PATCH", + "PURGE", + "MKCALENDAR", + "LINK", + "UNLINK", + "SOURCE", + "QUERY", +]; + +export const allMethods = [ + "DELETE", + "GET", + "HEAD", + "POST", + "PUT", + "CONNECT", + "OPTIONS", + "TRACE", + "COPY", + "LOCK", + "MKCOL", + "MOVE", + "PROPFIND", + "PROPPATCH", + "SEARCH", + "UNLOCK", + "BIND", + "REBIND", + "UNBIND", + "ACL", + "REPORT", + "MKACTIVITY", + "CHECKOUT", + "MERGE", + "M-SEARCH", + "NOTIFY", + "SUBSCRIBE", + "UNSUBSCRIBE", + "PATCH", + "PURGE", + "MKCALENDAR", + "LINK", + "UNLINK", + "SOURCE", + "PRI", + "DESCRIBE", + "ANNOUNCE", + "SETUP", + "PLAY", + "PAUSE", + "TEARDOWN", + "GET_PARAMETER", + "SET_PARAMETER", + "REDIRECT", + "RECORD", + "FLUSH", + "QUERY", +]; + +export function HTTPParser() { +} + +ObjectSetPrototypeOf(HTTPParser.prototype, AsyncWrap.prototype); + +function defineProps(obj: object, props: Record) { + for (const entry of new SafeArrayIterator(ObjectEntries(props))) { + ObjectDefineProperty(obj, entry[0], { + value: entry[1], + enumerable: true, + writable: true, + configurable: true, + }); + } +} + +defineProps(HTTPParser, { + REQUEST: 1, + RESPONSE: 2, + kOnMessageBegin: 0, + kOnHeaders: 1, + kOnHeadersComplete: 2, + kOnBody: 3, + kOnMessageComplete: 4, + kOnExecute: 5, + kOnTimeout: 6, + kLenientNone: 0, + kLenientHeaders: 1, + kLenientChunkedLength: 2, + kLenientKeepAlive: 4, + kLenientTransferEncoding: 8, + kLenientVersion: 16, + kLenientDataAfterClose: 32, + kLenientOptionalLFAfterCR: 64, + kLenientOptionalCRLFAfterChunk: 128, + kLenientOptionalCRBeforeLF: 256, + kLenientSpacesAfterChunkSize: 512, + kLenientAll: 1023, +}); diff --git a/ext/node/polyfills/internal_binding/mod.ts b/ext/node/polyfills/internal_binding/mod.ts index f2d7f55bc..ebbfc629f 100644 --- a/ext/node/polyfills/internal_binding/mod.ts +++ b/ext/node/polyfills/internal_binding/mod.ts @@ -17,6 +17,7 @@ import * as types from "ext:deno_node/internal_binding/types.ts"; import * as udpWrap from "ext:deno_node/internal_binding/udp_wrap.ts"; import * as util from "ext:deno_node/internal_binding/util.ts"; import * as uv from "ext:deno_node/internal_binding/uv.ts"; +import * as httpParser from "ext:deno_node/internal_binding/http_parser.ts"; const modules = { "async_wrap": asyncWrap, @@ -32,7 +33,7 @@ const modules = { "fs_dir": {}, "fs_event_wrap": {}, "heap_utils": {}, - "http_parser": {}, + "http_parser": httpParser, icu: {}, inspector: {}, "js_stream": {}, -- cgit v1.2.3 From 8ca8174c81a3de35bcb02fc371c90f9d0a7303ab Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Fri, 18 Oct 2024 15:23:20 -0700 Subject: refactor(ext/crypto): use concrete error types (#26167) --- ext/crypto/Cargo.toml | 2 + ext/crypto/decrypt.rs | 124 ++++++++++--------- ext/crypto/ed25519.rs | 22 ++-- ext/crypto/encrypt.rs | 74 +++++++----- ext/crypto/export_key.rs | 79 +++++-------- ext/crypto/generate_key.rs | 51 +++++--- ext/crypto/import_key.rs | 288 +++++++++++++++++++++++++-------------------- ext/crypto/lib.rs | 272 ++++++++++++++++++++++++------------------ ext/crypto/shared.rs | 87 ++++++++------ ext/crypto/x25519.rs | 18 +-- ext/crypto/x448.rs | 19 +-- 11 files changed, 578 insertions(+), 458 deletions(-) (limited to 'ext') diff --git a/ext/crypto/Cargo.toml b/ext/crypto/Cargo.toml index e3b59ed46..2f970ca53 100644 --- a/ext/crypto/Cargo.toml +++ b/ext/crypto/Cargo.toml @@ -41,5 +41,7 @@ sha1.workspace = true sha2.workspace = true signature.workspace = true spki.workspace = true +thiserror.workspace = true +tokio.workspace = true uuid.workspace = true x25519-dalek = "2.0.0" diff --git a/ext/crypto/decrypt.rs b/ext/crypto/decrypt.rs index 9b104e178..114047518 100644 --- a/ext/crypto/decrypt.rs +++ b/ext/crypto/decrypt.rs @@ -16,9 +16,6 @@ use ctr::cipher::StreamCipher; use ctr::Ctr128BE; use ctr::Ctr32BE; use ctr::Ctr64BE; -use deno_core::error::custom_error; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::unsync::spawn_blocking; use deno_core::JsBuffer; @@ -73,12 +70,36 @@ pub enum DecryptAlgorithm { }, } +#[derive(Debug, thiserror::Error)] +pub enum DecryptError { + #[error(transparent)] + General(#[from] SharedError), + #[error(transparent)] + Pkcs1(#[from] rsa::pkcs1::Error), + #[error("Decryption failed")] + Failed, + #[error("invalid length")] + InvalidLength, + #[error("invalid counter length. Currently supported 32/64/128 bits")] + InvalidCounterLength, + #[error("tag length not equal to 128")] + InvalidTagLength, + #[error("invalid key or iv")] + InvalidKeyOrIv, + #[error("tried to decrypt too much data")] + TooMuchData, + #[error("iv length not equal to 12 or 16")] + InvalidIvLength, + #[error("{0}")] + Rsa(rsa::Error), +} + #[op2(async)] #[serde] pub async fn op_crypto_decrypt( #[serde] opts: DecryptOptions, #[buffer] data: JsBuffer, -) -> Result { +) -> Result { let key = opts.key; let fun = move || match opts.algorithm { DecryptAlgorithm::RsaOaep { hash, label } => { @@ -108,7 +129,7 @@ fn decrypt_rsa_oaep( hash: ShaHash, label: Vec, data: &[u8], -) -> Result, deno_core::anyhow::Error> { +) -> Result, DecryptError> { let key = key.as_rsa_private_key()?; let private_key = rsa::RsaPrivateKey::from_pkcs1_der(key)?; @@ -139,7 +160,7 @@ fn decrypt_rsa_oaep( private_key .decrypt(padding, data) - .map_err(|e| custom_error("DOMExceptionOperationError", e.to_string())) + .map_err(DecryptError::Rsa) } fn decrypt_aes_cbc( @@ -147,7 +168,7 @@ fn decrypt_aes_cbc( length: usize, iv: Vec, data: &[u8], -) -> Result, deno_core::anyhow::Error> { +) -> Result, DecryptError> { let key = key.as_secret_key()?; // 2. @@ -155,53 +176,32 @@ fn decrypt_aes_cbc( 128 => { // Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315 type Aes128CbcDec = cbc::Decryptor; - let cipher = Aes128CbcDec::new_from_slices(key, &iv).map_err(|_| { - custom_error( - "DOMExceptionOperationError", - "Invalid key or iv".to_string(), - ) - })?; + let cipher = Aes128CbcDec::new_from_slices(key, &iv) + .map_err(|_| DecryptError::InvalidKeyOrIv)?; - cipher.decrypt_padded_vec_mut::(data).map_err(|_| { - custom_error( - "DOMExceptionOperationError", - "Decryption failed".to_string(), - ) - })? + cipher + .decrypt_padded_vec_mut::(data) + .map_err(|_| DecryptError::Failed)? } 192 => { // Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315 type Aes192CbcDec = cbc::Decryptor; - let cipher = Aes192CbcDec::new_from_slices(key, &iv).map_err(|_| { - custom_error( - "DOMExceptionOperationError", - "Invalid key or iv".to_string(), - ) - })?; + let cipher = Aes192CbcDec::new_from_slices(key, &iv) + .map_err(|_| DecryptError::InvalidKeyOrIv)?; - cipher.decrypt_padded_vec_mut::(data).map_err(|_| { - custom_error( - "DOMExceptionOperationError", - "Decryption failed".to_string(), - ) - })? + cipher + .decrypt_padded_vec_mut::(data) + .map_err(|_| DecryptError::Failed)? } 256 => { // Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315 type Aes256CbcDec = cbc::Decryptor; - let cipher = Aes256CbcDec::new_from_slices(key, &iv).map_err(|_| { - custom_error( - "DOMExceptionOperationError", - "Invalid key or iv".to_string(), - ) - })?; + let cipher = Aes256CbcDec::new_from_slices(key, &iv) + .map_err(|_| DecryptError::InvalidKeyOrIv)?; - cipher.decrypt_padded_vec_mut::(data).map_err(|_| { - custom_error( - "DOMExceptionOperationError", - "Decryption failed".to_string(), - ) - })? + cipher + .decrypt_padded_vec_mut::(data) + .map_err(|_| DecryptError::Failed)? } _ => unreachable!(), }; @@ -214,7 +214,7 @@ fn decrypt_aes_ctr_gen( key: &[u8], counter: &[u8], data: &[u8], -) -> Result, AnyError> +) -> Result, DecryptError> where B: KeyIvInit + StreamCipher, { @@ -223,7 +223,7 @@ where let mut plaintext = data.to_vec(); cipher .try_apply_keystream(&mut plaintext) - .map_err(|_| operation_error("tried to decrypt too much data"))?; + .map_err(|_| DecryptError::TooMuchData)?; Ok(plaintext) } @@ -235,12 +235,12 @@ fn decrypt_aes_gcm_gen>( length: usize, additional_data: Vec, plaintext: &mut [u8], -) -> Result<(), AnyError> { +) -> Result<(), DecryptError> { let nonce = Nonce::from_slice(nonce); match length { 128 => { let cipher = aes_gcm::AesGcm::::new_from_slice(key) - .map_err(|_| operation_error("Decryption failed"))?; + .map_err(|_| DecryptError::Failed)?; cipher .decrypt_in_place_detached( nonce, @@ -248,11 +248,11 @@ fn decrypt_aes_gcm_gen>( plaintext, tag, ) - .map_err(|_| operation_error("Decryption failed"))? + .map_err(|_| DecryptError::Failed)? } 192 => { let cipher = aes_gcm::AesGcm::::new_from_slice(key) - .map_err(|_| operation_error("Decryption failed"))?; + .map_err(|_| DecryptError::Failed)?; cipher .decrypt_in_place_detached( nonce, @@ -260,11 +260,11 @@ fn decrypt_aes_gcm_gen>( plaintext, tag, ) - .map_err(|_| operation_error("Decryption failed"))? + .map_err(|_| DecryptError::Failed)? } 256 => { let cipher = aes_gcm::AesGcm::::new_from_slice(key) - .map_err(|_| operation_error("Decryption failed"))?; + .map_err(|_| DecryptError::Failed)?; cipher .decrypt_in_place_detached( nonce, @@ -272,9 +272,9 @@ fn decrypt_aes_gcm_gen>( plaintext, tag, ) - .map_err(|_| operation_error("Decryption failed"))? + .map_err(|_| DecryptError::Failed)? } - _ => return Err(type_error("invalid length")), + _ => return Err(DecryptError::InvalidLength), }; Ok(()) @@ -286,7 +286,7 @@ fn decrypt_aes_ctr( counter: &[u8], ctr_length: usize, data: &[u8], -) -> Result, deno_core::anyhow::Error> { +) -> Result, DecryptError> { let key = key.as_secret_key()?; match ctr_length { @@ -294,23 +294,21 @@ fn decrypt_aes_ctr( 128 => decrypt_aes_ctr_gen::>(key, counter, data), 192 => decrypt_aes_ctr_gen::>(key, counter, data), 256 => decrypt_aes_ctr_gen::>(key, counter, data), - _ => Err(type_error("invalid length")), + _ => Err(DecryptError::InvalidLength), }, 64 => match key_length { 128 => decrypt_aes_ctr_gen::>(key, counter, data), 192 => decrypt_aes_ctr_gen::>(key, counter, data), 256 => decrypt_aes_ctr_gen::>(key, counter, data), - _ => Err(type_error("invalid length")), + _ => Err(DecryptError::InvalidLength), }, 128 => match key_length { 128 => decrypt_aes_ctr_gen::>(key, counter, data), 192 => decrypt_aes_ctr_gen::>(key, counter, data), 256 => decrypt_aes_ctr_gen::>(key, counter, data), - _ => Err(type_error("invalid length")), + _ => Err(DecryptError::InvalidLength), }, - _ => Err(type_error( - "invalid counter length. Currently supported 32/64/128 bits", - )), + _ => Err(DecryptError::InvalidCounterLength), } } @@ -321,7 +319,7 @@ fn decrypt_aes_gcm( iv: Vec, additional_data: Option>, data: &[u8], -) -> Result, AnyError> { +) -> Result, DecryptError> { let key = key.as_secret_key()?; let additional_data = additional_data.unwrap_or_default(); @@ -330,7 +328,7 @@ fn decrypt_aes_gcm( // Note that encryption won't fail, it instead truncates the tag // to the specified tag length as specified in the spec. if tag_length != 128 { - return Err(type_error("tag length not equal to 128")); + return Err(DecryptError::InvalidTagLength); } let sep = data.len() - (tag_length / 8); @@ -357,7 +355,7 @@ fn decrypt_aes_gcm( additional_data, &mut plaintext, )?, - _ => return Err(type_error("iv length not equal to 12 or 16")), + _ => return Err(DecryptError::InvalidIvLength), } Ok(plaintext) diff --git a/ext/crypto/ed25519.rs b/ext/crypto/ed25519.rs index 4f604fe51..da34b7d25 100644 --- a/ext/crypto/ed25519.rs +++ b/ext/crypto/ed25519.rs @@ -2,8 +2,6 @@ use base64::prelude::BASE64_URL_SAFE_NO_PAD; use base64::Engine; -use deno_core::error::custom_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::ToJsBuffer; use elliptic_curve::pkcs8::PrivateKeyInfo; @@ -15,6 +13,16 @@ use spki::der::asn1::BitString; use spki::der::Decode; use spki::der::Encode; +#[derive(Debug, thiserror::Error)] +pub enum Ed25519Error { + #[error("Failed to export key")] + FailedExport, + #[error(transparent)] + Der(#[from] rsa::pkcs1::der::Error), + #[error(transparent)] + KeyRejected(#[from] ring::error::KeyRejected), +} + #[op2(fast)] pub fn op_crypto_generate_ed25519_keypair( #[buffer] pkey: &mut [u8], @@ -116,7 +124,7 @@ pub fn op_crypto_import_pkcs8_ed25519( #[serde] pub fn op_crypto_export_spki_ed25519( #[buffer] pubkey: &[u8], -) -> Result { +) -> Result { let key_info = spki::SubjectPublicKeyInfo { algorithm: spki::AlgorithmIdentifierOwned { // id-Ed25519 @@ -128,9 +136,7 @@ pub fn op_crypto_export_spki_ed25519( Ok( key_info .to_der() - .map_err(|_| { - custom_error("DOMExceptionOperationError", "Failed to export key") - })? + .map_err(|_| Ed25519Error::FailedExport)? .into(), ) } @@ -139,7 +145,7 @@ pub fn op_crypto_export_spki_ed25519( #[serde] pub fn op_crypto_export_pkcs8_ed25519( #[buffer] pkey: &[u8], -) -> Result { +) -> Result { use rsa::pkcs1::der::Encode; // This should probably use OneAsymmetricKey instead @@ -164,7 +170,7 @@ pub fn op_crypto_export_pkcs8_ed25519( #[string] pub fn op_crypto_jwk_x_ed25519( #[buffer] pkey: &[u8], -) -> Result { +) -> Result { let pair = Ed25519KeyPair::from_seed_unchecked(pkey)?; Ok(BASE64_URL_SAFE_NO_PAD.encode(pair.public_key().as_ref())) } diff --git a/ext/crypto/encrypt.rs b/ext/crypto/encrypt.rs index 204648e89..66b27657f 100644 --- a/ext/crypto/encrypt.rs +++ b/ext/crypto/encrypt.rs @@ -16,8 +16,6 @@ use aes_gcm::Nonce; use ctr::Ctr128BE; use ctr::Ctr32BE; use ctr::Ctr64BE; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::unsync::spawn_blocking; use deno_core::JsBuffer; @@ -73,12 +71,30 @@ pub enum EncryptAlgorithm { }, } +#[derive(Debug, thiserror::Error)] +pub enum EncryptError { + #[error(transparent)] + General(#[from] SharedError), + #[error("invalid length")] + InvalidLength, + #[error("invalid key or iv")] + InvalidKeyOrIv, + #[error("iv length not equal to 12 or 16")] + InvalidIvLength, + #[error("invalid counter length. Currently supported 32/64/128 bits")] + InvalidCounterLength, + #[error("tried to encrypt too much data")] + TooMuchData, + #[error("Encryption failed")] + Failed, +} + #[op2(async)] #[serde] pub async fn op_crypto_encrypt( #[serde] opts: EncryptOptions, #[buffer] data: JsBuffer, -) -> Result { +) -> Result { let key = opts.key; let fun = move || match opts.algorithm { EncryptAlgorithm::RsaOaep { hash, label } => { @@ -108,12 +124,12 @@ fn encrypt_rsa_oaep( hash: ShaHash, label: Vec, data: &[u8], -) -> Result, AnyError> { +) -> Result, EncryptError> { let label = String::from_utf8_lossy(&label).to_string(); let public_key = key.as_rsa_public_key()?; let public_key = rsa::RsaPublicKey::from_pkcs1_der(&public_key) - .map_err(|_| operation_error("failed to decode public key"))?; + .map_err(|_| SharedError::FailedDecodePublicKey)?; let mut rng = OsRng; let padding = match hash { ShaHash::Sha1 => rsa::Oaep { @@ -139,7 +155,7 @@ fn encrypt_rsa_oaep( }; let encrypted = public_key .encrypt(&mut rng, padding, data) - .map_err(|_| operation_error("Encryption failed"))?; + .map_err(|_| EncryptError::Failed)?; Ok(encrypted) } @@ -148,7 +164,7 @@ fn encrypt_aes_cbc( length: usize, iv: Vec, data: &[u8], -) -> Result, AnyError> { +) -> Result, EncryptError> { let key = key.as_secret_key()?; let ciphertext = match length { 128 => { @@ -156,7 +172,7 @@ fn encrypt_aes_cbc( type Aes128CbcEnc = cbc::Encryptor; let cipher = Aes128CbcEnc::new_from_slices(key, &iv) - .map_err(|_| operation_error("invalid key or iv".to_string()))?; + .map_err(|_| EncryptError::InvalidKeyOrIv)?; cipher.encrypt_padded_vec_mut::(data) } 192 => { @@ -164,7 +180,7 @@ fn encrypt_aes_cbc( type Aes192CbcEnc = cbc::Encryptor; let cipher = Aes192CbcEnc::new_from_slices(key, &iv) - .map_err(|_| operation_error("invalid key or iv".to_string()))?; + .map_err(|_| EncryptError::InvalidKeyOrIv)?; cipher.encrypt_padded_vec_mut::(data) } 256 => { @@ -172,10 +188,10 @@ fn encrypt_aes_cbc( type Aes256CbcEnc = cbc::Encryptor; let cipher = Aes256CbcEnc::new_from_slices(key, &iv) - .map_err(|_| operation_error("invalid key or iv".to_string()))?; + .map_err(|_| EncryptError::InvalidKeyOrIv)?; cipher.encrypt_padded_vec_mut::(data) } - _ => return Err(type_error("invalid length")), + _ => return Err(EncryptError::InvalidLength), }; Ok(ciphertext) } @@ -186,31 +202,31 @@ fn encrypt_aes_gcm_general>( length: usize, ciphertext: &mut [u8], additional_data: Vec, -) -> Result { +) -> Result { let nonce = Nonce::::from_slice(&iv); let tag = match length { 128 => { let cipher = aes_gcm::AesGcm::::new_from_slice(key) - .map_err(|_| operation_error("Encryption failed"))?; + .map_err(|_| EncryptError::Failed)?; cipher .encrypt_in_place_detached(nonce, &additional_data, ciphertext) - .map_err(|_| operation_error("Encryption failed"))? + .map_err(|_| EncryptError::Failed)? } 192 => { let cipher = aes_gcm::AesGcm::::new_from_slice(key) - .map_err(|_| operation_error("Encryption failed"))?; + .map_err(|_| EncryptError::Failed)?; cipher .encrypt_in_place_detached(nonce, &additional_data, ciphertext) - .map_err(|_| operation_error("Encryption failed"))? + .map_err(|_| EncryptError::Failed)? } 256 => { let cipher = aes_gcm::AesGcm::::new_from_slice(key) - .map_err(|_| operation_error("Encryption failed"))?; + .map_err(|_| EncryptError::Failed)?; cipher .encrypt_in_place_detached(nonce, &additional_data, ciphertext) - .map_err(|_| operation_error("Encryption failed"))? + .map_err(|_| EncryptError::Failed)? } - _ => return Err(type_error("invalid length")), + _ => return Err(EncryptError::InvalidLength), }; Ok(tag) @@ -223,7 +239,7 @@ fn encrypt_aes_gcm( iv: Vec, additional_data: Option>, data: &[u8], -) -> Result, AnyError> { +) -> Result, EncryptError> { let key = key.as_secret_key()?; let additional_data = additional_data.unwrap_or_default(); @@ -244,7 +260,7 @@ fn encrypt_aes_gcm( &mut ciphertext, additional_data, )?, - _ => return Err(type_error("iv length not equal to 12 or 16")), + _ => return Err(EncryptError::InvalidIvLength), }; // Truncated tag to the specified tag length. @@ -261,7 +277,7 @@ fn encrypt_aes_ctr_gen( key: &[u8], counter: &[u8], data: &[u8], -) -> Result, AnyError> +) -> Result, EncryptError> where B: KeyIvInit + StreamCipher, { @@ -270,7 +286,7 @@ where let mut ciphertext = data.to_vec(); cipher .try_apply_keystream(&mut ciphertext) - .map_err(|_| operation_error("tried to encrypt too much data"))?; + .map_err(|_| EncryptError::TooMuchData)?; Ok(ciphertext) } @@ -281,7 +297,7 @@ fn encrypt_aes_ctr( counter: &[u8], ctr_length: usize, data: &[u8], -) -> Result, AnyError> { +) -> Result, EncryptError> { let key = key.as_secret_key()?; match ctr_length { @@ -289,22 +305,20 @@ fn encrypt_aes_ctr( 128 => encrypt_aes_ctr_gen::>(key, counter, data), 192 => encrypt_aes_ctr_gen::>(key, counter, data), 256 => encrypt_aes_ctr_gen::>(key, counter, data), - _ => Err(type_error("invalid length")), + _ => Err(EncryptError::InvalidLength), }, 64 => match key_length { 128 => encrypt_aes_ctr_gen::>(key, counter, data), 192 => encrypt_aes_ctr_gen::>(key, counter, data), 256 => encrypt_aes_ctr_gen::>(key, counter, data), - _ => Err(type_error("invalid length")), + _ => Err(EncryptError::InvalidLength), }, 128 => match key_length { 128 => encrypt_aes_ctr_gen::>(key, counter, data), 192 => encrypt_aes_ctr_gen::>(key, counter, data), 256 => encrypt_aes_ctr_gen::>(key, counter, data), - _ => Err(type_error("invalid length")), + _ => Err(EncryptError::InvalidLength), }, - _ => Err(type_error( - "invalid counter length. Currently supported 32/64/128 bits", - )), + _ => Err(EncryptError::InvalidCounterLength), } } diff --git a/ext/crypto/export_key.rs b/ext/crypto/export_key.rs index 00ce7e11c..edf0d7239 100644 --- a/ext/crypto/export_key.rs +++ b/ext/crypto/export_key.rs @@ -4,8 +4,6 @@ use base64::prelude::BASE64_URL_SAFE_NO_PAD; use base64::Engine; use const_oid::AssociatedOid; use const_oid::ObjectIdentifier; -use deno_core::error::custom_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::ToJsBuffer; use elliptic_curve::sec1::ToEncodedPoint; @@ -22,6 +20,16 @@ use spki::AlgorithmIdentifierOwned; use crate::shared::*; +#[derive(Debug, thiserror::Error)] +pub enum ExportKeyError { + #[error(transparent)] + General(#[from] SharedError), + #[error(transparent)] + Der(#[from] spki::der::Error), + #[error("Unsupported named curve")] + UnsupportedNamedCurve, +} + #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub struct ExportKeyOptions { @@ -99,7 +107,7 @@ pub enum ExportKeyResult { pub fn op_crypto_export_key( #[serde] opts: ExportKeyOptions, #[serde] key_data: V8RawKeyData, -) -> Result { +) -> Result { match opts.algorithm { ExportKeyAlgorithm::RsassaPkcs1v15 {} | ExportKeyAlgorithm::RsaPss {} @@ -125,7 +133,7 @@ fn bytes_to_b64(bytes: &[u8]) -> String { fn export_key_rsa( format: ExportKeyFormat, key_data: V8RawKeyData, -) -> Result { +) -> Result { match format { ExportKeyFormat::Spki => { let subject_public_key = &key_data.as_rsa_public_key()?; @@ -181,12 +189,7 @@ fn export_key_rsa( ExportKeyFormat::JwkPublic => { let public_key = key_data.as_rsa_public_key()?; let public_key = rsa::pkcs1::RsaPublicKey::from_der(&public_key) - .map_err(|_| { - custom_error( - "DOMExceptionOperationError", - "failed to decode public key", - ) - })?; + .map_err(|_| SharedError::FailedDecodePublicKey)?; Ok(ExportKeyResult::JwkPublicRsa { n: uint_to_b64(public_key.modulus), @@ -196,12 +199,7 @@ fn export_key_rsa( ExportKeyFormat::JwkPrivate => { let private_key = key_data.as_rsa_private_key()?; let private_key = rsa::pkcs1::RsaPrivateKey::from_der(private_key) - .map_err(|_| { - custom_error( - "DOMExceptionOperationError", - "failed to decode private key", - ) - })?; + .map_err(|_| SharedError::FailedDecodePrivateKey)?; Ok(ExportKeyResult::JwkPrivateRsa { n: uint_to_b64(private_key.modulus), @@ -214,14 +212,14 @@ fn export_key_rsa( qi: uint_to_b64(private_key.coefficient), }) } - _ => Err(unsupported_format()), + _ => Err(SharedError::UnsupportedFormat.into()), } } fn export_key_symmetric( format: ExportKeyFormat, key_data: V8RawKeyData, -) -> Result { +) -> Result { match format { ExportKeyFormat::JwkSecret => { let bytes = key_data.as_secret_key()?; @@ -230,7 +228,7 @@ fn export_key_symmetric( k: bytes_to_b64(bytes), }) } - _ => Err(unsupported_format()), + _ => Err(SharedError::UnsupportedFormat.into()), } } @@ -239,7 +237,7 @@ fn export_key_ec( key_data: V8RawKeyData, algorithm: ExportKeyAlgorithm, named_curve: EcNamedCurve, -) -> Result { +) -> Result { match format { ExportKeyFormat::Raw => { let subject_public_key = match named_curve { @@ -332,10 +330,7 @@ fn export_key_ec( y: bytes_to_b64(y), }) } else { - Err(custom_error( - "DOMExceptionOperationError", - "failed to decode public key", - )) + Err(SharedError::FailedDecodePublicKey.into()) } } EcNamedCurve::P384 => { @@ -350,10 +345,7 @@ fn export_key_ec( y: bytes_to_b64(y), }) } else { - Err(custom_error( - "DOMExceptionOperationError", - "failed to decode public key", - )) + Err(SharedError::FailedDecodePublicKey.into()) } } EcNamedCurve::P521 => { @@ -368,10 +360,7 @@ fn export_key_ec( y: bytes_to_b64(y), }) } else { - Err(custom_error( - "DOMExceptionOperationError", - "failed to decode public key", - )) + Err(SharedError::FailedDecodePublicKey.into()) } } }, @@ -380,13 +369,8 @@ fn export_key_ec( match named_curve { EcNamedCurve::P256 => { - let ec_key = - p256::SecretKey::from_pkcs8_der(private_key).map_err(|_| { - custom_error( - "DOMExceptionOperationError", - "failed to decode private key", - ) - })?; + let ec_key = p256::SecretKey::from_pkcs8_der(private_key) + .map_err(|_| SharedError::FailedDecodePrivateKey)?; let point = ec_key.public_key().to_encoded_point(false); if let elliptic_curve::sec1::Coordinates::Uncompressed { x, y } = @@ -398,18 +382,13 @@ fn export_key_ec( d: bytes_to_b64(&ec_key.to_bytes()), }) } else { - Err(data_error("expected valid public EC key")) + Err(SharedError::ExpectedValidPublicECKey.into()) } } EcNamedCurve::P384 => { - let ec_key = - p384::SecretKey::from_pkcs8_der(private_key).map_err(|_| { - custom_error( - "DOMExceptionOperationError", - "failed to decode private key", - ) - })?; + let ec_key = p384::SecretKey::from_pkcs8_der(private_key) + .map_err(|_| SharedError::FailedDecodePrivateKey)?; let point = ec_key.public_key().to_encoded_point(false); if let elliptic_curve::sec1::Coordinates::Uncompressed { x, y } = @@ -421,12 +400,12 @@ fn export_key_ec( d: bytes_to_b64(&ec_key.to_bytes()), }) } else { - Err(data_error("expected valid public EC key")) + Err(SharedError::ExpectedValidPublicECKey.into()) } } - _ => Err(not_supported_error("Unsupported namedCurve")), + _ => Err(ExportKeyError::UnsupportedNamedCurve), } } - ExportKeyFormat::JwkSecret => Err(unsupported_format()), + ExportKeyFormat::JwkSecret => Err(SharedError::UnsupportedFormat.into()), } } diff --git a/ext/crypto/generate_key.rs b/ext/crypto/generate_key.rs index 43aea2c70..3c0bd77c2 100644 --- a/ext/crypto/generate_key.rs +++ b/ext/crypto/generate_key.rs @@ -1,6 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; use deno_core::op2; use deno_core::unsync::spawn_blocking; use deno_core::ToJsBuffer; @@ -16,6 +15,26 @@ use serde::Deserialize; use crate::shared::*; +#[derive(Debug, thiserror::Error)] +pub enum GenerateKeyError { + #[error(transparent)] + General(#[from] SharedError), + #[error("Bad public exponent")] + BadPublicExponent, + #[error("Invalid HMAC key length")] + InvalidHMACKeyLength, + #[error("Failed to serialize RSA key")] + FailedRSAKeySerialization, + #[error("Invalid AES key length")] + InvalidAESKeyLength, + #[error("Failed to generate RSA key")] + FailedRSAKeyGeneration, + #[error("Failed to generate EC key")] + FailedECKeyGeneration, + #[error("Failed to generate key")] + FailedKeyGeneration, +} + // Allowlist for RSA public exponents. static PUB_EXPONENT_1: Lazy = Lazy::new(|| BigUint::from_u64(3).unwrap()); @@ -46,7 +65,7 @@ pub enum GenerateKeyOptions { #[serde] pub async fn op_crypto_generate_key( #[serde] opts: GenerateKeyOptions, -) -> Result { +) -> Result { let fun = || match opts { GenerateKeyOptions::Rsa { modulus_length, @@ -65,21 +84,21 @@ pub async fn op_crypto_generate_key( fn generate_key_rsa( modulus_length: u32, public_exponent: &[u8], -) -> Result, AnyError> { +) -> Result, GenerateKeyError> { let exponent = BigUint::from_bytes_be(public_exponent); if exponent != *PUB_EXPONENT_1 && exponent != *PUB_EXPONENT_2 { - return Err(operation_error("Bad public exponent")); + return Err(GenerateKeyError::BadPublicExponent); } let mut rng = OsRng; let private_key = RsaPrivateKey::new_with_exp(&mut rng, modulus_length as usize, &exponent) - .map_err(|_| operation_error("Failed to generate RSA key"))?; + .map_err(|_| GenerateKeyError::FailedRSAKeyGeneration)?; let private_key = private_key .to_pkcs1_der() - .map_err(|_| operation_error("Failed to serialize RSA key"))?; + .map_err(|_| GenerateKeyError::FailedRSAKeySerialization)?; Ok(private_key.as_bytes().to_vec()) } @@ -90,7 +109,9 @@ fn generate_key_ec_p521() -> Vec { key.to_nonzero_scalar().to_bytes().to_vec() } -fn generate_key_ec(named_curve: EcNamedCurve) -> Result, AnyError> { +fn generate_key_ec( + named_curve: EcNamedCurve, +) -> Result, GenerateKeyError> { let curve = match named_curve { EcNamedCurve::P256 => &ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING, EcNamedCurve::P384 => &ring::signature::ECDSA_P384_SHA384_FIXED_SIGNING, @@ -100,21 +121,21 @@ fn generate_key_ec(named_curve: EcNamedCurve) -> Result, AnyError> { let rng = ring::rand::SystemRandom::new(); let pkcs8 = EcdsaKeyPair::generate_pkcs8(curve, &rng) - .map_err(|_| operation_error("Failed to generate EC key"))?; + .map_err(|_| GenerateKeyError::FailedECKeyGeneration)?; Ok(pkcs8.as_ref().to_vec()) } -fn generate_key_aes(length: usize) -> Result, AnyError> { +fn generate_key_aes(length: usize) -> Result, GenerateKeyError> { if length % 8 != 0 || length > 256 { - return Err(operation_error("Invalid AES key length")); + return Err(GenerateKeyError::InvalidAESKeyLength); } let mut key = vec![0u8; length / 8]; let rng = ring::rand::SystemRandom::new(); rng .fill(&mut key) - .map_err(|_| operation_error("Failed to generate key"))?; + .map_err(|_| GenerateKeyError::FailedKeyGeneration)?; Ok(key) } @@ -122,7 +143,7 @@ fn generate_key_aes(length: usize) -> Result, AnyError> { fn generate_key_hmac( hash: ShaHash, length: Option, -) -> Result, AnyError> { +) -> Result, GenerateKeyError> { let hash = match hash { ShaHash::Sha1 => &ring::hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY, ShaHash::Sha256 => &ring::hmac::HMAC_SHA256, @@ -132,12 +153,12 @@ fn generate_key_hmac( let length = if let Some(length) = length { if length % 8 != 0 { - return Err(operation_error("Invalid HMAC key length")); + return Err(GenerateKeyError::InvalidHMACKeyLength); } let length = length / 8; if length > ring::digest::MAX_BLOCK_LEN { - return Err(operation_error("Invalid HMAC key length")); + return Err(GenerateKeyError::InvalidHMACKeyLength); } length @@ -149,7 +170,7 @@ fn generate_key_hmac( let mut key = vec![0u8; length]; rng .fill(&mut key) - .map_err(|_| operation_error("Failed to generate key"))?; + .map_err(|_| GenerateKeyError::FailedKeyGeneration)?; Ok(key) } diff --git a/ext/crypto/import_key.rs b/ext/crypto/import_key.rs index e30baea03..3463ca2be 100644 --- a/ext/crypto/import_key.rs +++ b/ext/crypto/import_key.rs @@ -1,7 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use base64::Engine; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::JsBuffer; use deno_core::ToJsBuffer; @@ -15,6 +14,70 @@ use spki::der::Decode; use crate::shared::*; +#[derive(Debug, thiserror::Error)] +pub enum ImportKeyError { + #[error(transparent)] + General(#[from] SharedError), + #[error("invalid modulus")] + InvalidModulus, + #[error("invalid public exponent")] + InvalidPublicExponent, + #[error("invalid private exponent")] + InvalidPrivateExponent, + #[error("invalid first prime factor")] + InvalidFirstPrimeFactor, + #[error("invalid second prime factor")] + InvalidSecondPrimeFactor, + #[error("invalid first CRT exponent")] + InvalidFirstCRTExponent, + #[error("invalid second CRT exponent")] + InvalidSecondCRTExponent, + #[error("invalid CRT coefficient")] + InvalidCRTCoefficient, + #[error("invalid b64 coordinate")] + InvalidB64Coordinate, + #[error("invalid RSA public key")] + InvalidRSAPublicKey, + #[error("invalid RSA private key")] + InvalidRSAPrivateKey, + #[error("unsupported algorithm")] + UnsupportedAlgorithm, + #[error("public key is invalid (too long)")] + PublicKeyTooLong, + #[error("private key is invalid (too long)")] + PrivateKeyTooLong, + #[error("invalid P-256 elliptic curve point")] + InvalidP256ECPoint, + #[error("invalid P-384 elliptic curve point")] + InvalidP384ECPoint, + #[error("invalid P-521 elliptic curve point")] + InvalidP521ECPoint, + #[error("invalid P-256 elliptic curve SPKI data")] + InvalidP256ECSPKIData, + #[error("invalid P-384 elliptic curve SPKI data")] + InvalidP384ECSPKIData, + #[error("invalid P-521 elliptic curve SPKI data")] + InvalidP521ECSPKIData, + #[error("curve mismatch")] + CurveMismatch, + #[error("Unsupported named curve")] + UnsupportedNamedCurve, + #[error("invalid key data")] + InvalidKeyData, + #[error("invalid JWK private key")] + InvalidJWKPrivateKey, + #[error(transparent)] + EllipticCurve(#[from] elliptic_curve::Error), + #[error("expected valid PKCS#8 data")] + ExpectedValidPkcs8Data, + #[error("malformed parameters")] + MalformedParameters, + #[error(transparent)] + Spki(#[from] spki::Error), + #[error(transparent)] + Der(#[from] rsa::pkcs1::der::Error), +} + #[derive(Deserialize)] #[serde(rename_all = "camelCase")] pub enum KeyData { @@ -93,7 +156,7 @@ pub enum ImportKeyResult { pub fn op_crypto_import_key( #[serde] opts: ImportKeyOptions, #[serde] key_data: KeyData, -) -> Result { +) -> Result { match opts { ImportKeyOptions::RsassaPkcs1v15 {} => import_key_rsassa(key_data), ImportKeyOptions::RsaPss {} => import_key_rsapss(key_data), @@ -117,21 +180,21 @@ const BASE64_URL_SAFE_FORGIVING: ); macro_rules! jwt_b64_int_or_err { - ($name:ident, $b64:expr, $err:expr) => { + ($name:ident, $b64:expr, $err:tt) => { let bytes = BASE64_URL_SAFE_FORGIVING .decode($b64) - .map_err(|_| data_error($err))?; - let $name = UintRef::new(&bytes).map_err(|_| data_error($err))?; + .map_err(|_| ImportKeyError::$err)?; + let $name = UintRef::new(&bytes).map_err(|_| ImportKeyError::$err)?; }; } fn import_key_rsa_jwk( key_data: KeyData, -) -> Result { +) -> Result { match key_data { KeyData::JwkPublicRsa { n, e } => { - jwt_b64_int_or_err!(modulus, &n, "invalid modulus"); - jwt_b64_int_or_err!(public_exponent, &e, "invalid public exponent"); + jwt_b64_int_or_err!(modulus, &n, InvalidModulus); + jwt_b64_int_or_err!(public_exponent, &e, InvalidPublicExponent); let public_key = rsa::pkcs1::RsaPublicKey { modulus, @@ -141,7 +204,7 @@ fn import_key_rsa_jwk( let mut data = Vec::new(); public_key .encode_to_vec(&mut data) - .map_err(|_| data_error("invalid rsa public key"))?; + .map_err(|_| ImportKeyError::InvalidRSAPublicKey)?; let public_exponent = public_key.public_exponent.as_bytes().to_vec().into(); @@ -163,14 +226,14 @@ fn import_key_rsa_jwk( dq, qi, } => { - jwt_b64_int_or_err!(modulus, &n, "invalid modulus"); - jwt_b64_int_or_err!(public_exponent, &e, "invalid public exponent"); - jwt_b64_int_or_err!(private_exponent, &d, "invalid private exponent"); - jwt_b64_int_or_err!(prime1, &p, "invalid first prime factor"); - jwt_b64_int_or_err!(prime2, &q, "invalid second prime factor"); - jwt_b64_int_or_err!(exponent1, &dp, "invalid first CRT exponent"); - jwt_b64_int_or_err!(exponent2, &dq, "invalid second CRT exponent"); - jwt_b64_int_or_err!(coefficient, &qi, "invalid CRT coefficient"); + jwt_b64_int_or_err!(modulus, &n, InvalidModulus); + jwt_b64_int_or_err!(public_exponent, &e, InvalidPublicExponent); + jwt_b64_int_or_err!(private_exponent, &d, InvalidPrivateExponent); + jwt_b64_int_or_err!(prime1, &p, InvalidFirstPrimeFactor); + jwt_b64_int_or_err!(prime2, &q, InvalidSecondPrimeFactor); + jwt_b64_int_or_err!(exponent1, &dp, InvalidFirstCRTExponent); + jwt_b64_int_or_err!(exponent2, &dq, InvalidSecondCRTExponent); + jwt_b64_int_or_err!(coefficient, &qi, InvalidCRTCoefficient); let private_key = rsa::pkcs1::RsaPrivateKey { modulus, @@ -187,7 +250,7 @@ fn import_key_rsa_jwk( let mut data = Vec::new(); private_key .encode_to_vec(&mut data) - .map_err(|_| data_error("invalid rsa private key"))?; + .map_err(|_| ImportKeyError::InvalidRSAPrivateKey)?; let public_exponent = private_key.public_exponent.as_bytes().to_vec().into(); @@ -205,37 +268,33 @@ fn import_key_rsa_jwk( fn import_key_rsassa( key_data: KeyData, -) -> Result { +) -> Result { match key_data { KeyData::Spki(data) => { // 2-3. - let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data) - .map_err(|e| data_error(e.to_string()))?; + let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?; // 4-5. let alg = pk_info.algorithm.oid; // 6-7. (skipped, only support rsaEncryption for interoperability) if alg != RSA_ENCRYPTION_OID { - return Err(data_error("unsupported algorithm")); + return Err(ImportKeyError::UnsupportedAlgorithm); } // 8-9. let public_key = rsa::pkcs1::RsaPublicKey::from_der( pk_info.subject_public_key.raw_bytes(), - ) - .map_err(|e| data_error(e.to_string()))?; + )?; - let bytes_consumed = public_key - .encoded_len() - .map_err(|e| data_error(e.to_string()))?; + let bytes_consumed = public_key.encoded_len()?; if bytes_consumed != rsa::pkcs1::der::Length::new( pk_info.subject_public_key.raw_bytes().len() as u16, ) { - return Err(data_error("public key is invalid (too long)")); + return Err(ImportKeyError::PublicKeyTooLong); } let data = pk_info.subject_public_key.raw_bytes().to_vec().into(); @@ -251,30 +310,26 @@ fn import_key_rsassa( } KeyData::Pkcs8(data) => { // 2-3. - let pk_info = PrivateKeyInfo::from_der(&data) - .map_err(|e| data_error(e.to_string()))?; + let pk_info = PrivateKeyInfo::from_der(&data)?; // 4-5. let alg = pk_info.algorithm.oid; // 6-7. (skipped, only support rsaEncryption for interoperability) if alg != RSA_ENCRYPTION_OID { - return Err(data_error("unsupported algorithm")); + return Err(ImportKeyError::UnsupportedAlgorithm); } // 8-9. let private_key = - rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key) - .map_err(|e| data_error(e.to_string()))?; + rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)?; - let bytes_consumed = private_key - .encoded_len() - .map_err(|e| data_error(e.to_string()))?; + let bytes_consumed = private_key.encoded_len()?; if bytes_consumed != rsa::pkcs1::der::Length::new(pk_info.private_key.len() as u16) { - return Err(data_error("private key is invalid (too long)")); + return Err(ImportKeyError::PrivateKeyTooLong); } let data = pk_info.private_key.to_vec().into(); @@ -291,43 +346,39 @@ fn import_key_rsassa( KeyData::JwkPublicRsa { .. } | KeyData::JwkPrivateRsa { .. } => { import_key_rsa_jwk(key_data) } - _ => Err(unsupported_format()), + _ => Err(SharedError::UnsupportedFormat.into()), } } fn import_key_rsapss( key_data: KeyData, -) -> Result { +) -> Result { match key_data { KeyData::Spki(data) => { // 2-3. - let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data) - .map_err(|e| data_error(e.to_string()))?; + let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?; // 4-5. let alg = pk_info.algorithm.oid; // 6-7. (skipped, only support rsaEncryption for interoperability) if alg != RSA_ENCRYPTION_OID { - return Err(data_error("unsupported algorithm")); + return Err(ImportKeyError::UnsupportedAlgorithm); } // 8-9. let public_key = rsa::pkcs1::RsaPublicKey::from_der( pk_info.subject_public_key.raw_bytes(), - ) - .map_err(|e| data_error(e.to_string()))?; + )?; - let bytes_consumed = public_key - .encoded_len() - .map_err(|e| data_error(e.to_string()))?; + let bytes_consumed = public_key.encoded_len()?; if bytes_consumed != rsa::pkcs1::der::Length::new( pk_info.subject_public_key.raw_bytes().len() as u16, ) { - return Err(data_error("public key is invalid (too long)")); + return Err(ImportKeyError::PublicKeyTooLong); } let data = pk_info.subject_public_key.raw_bytes().to_vec().into(); @@ -343,30 +394,26 @@ fn import_key_rsapss( } KeyData::Pkcs8(data) => { // 2-3. - let pk_info = PrivateKeyInfo::from_der(&data) - .map_err(|e| data_error(e.to_string()))?; + let pk_info = PrivateKeyInfo::from_der(&data)?; // 4-5. let alg = pk_info.algorithm.oid; // 6-7. (skipped, only support rsaEncryption for interoperability) if alg != RSA_ENCRYPTION_OID { - return Err(data_error("unsupported algorithm")); + return Err(ImportKeyError::UnsupportedAlgorithm); } // 8-9. let private_key = - rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key) - .map_err(|e| data_error(e.to_string()))?; + rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)?; - let bytes_consumed = private_key - .encoded_len() - .map_err(|e| data_error(e.to_string()))?; + let bytes_consumed = private_key.encoded_len()?; if bytes_consumed != rsa::pkcs1::der::Length::new(pk_info.private_key.len() as u16) { - return Err(data_error("private key is invalid (too long)")); + return Err(ImportKeyError::PrivateKeyTooLong); } let data = pk_info.private_key.to_vec().into(); @@ -383,43 +430,39 @@ fn import_key_rsapss( KeyData::JwkPublicRsa { .. } | KeyData::JwkPrivateRsa { .. } => { import_key_rsa_jwk(key_data) } - _ => Err(unsupported_format()), + _ => Err(SharedError::UnsupportedFormat.into()), } } fn import_key_rsaoaep( key_data: KeyData, -) -> Result { +) -> Result { match key_data { KeyData::Spki(data) => { // 2-3. - let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data) - .map_err(|e| data_error(e.to_string()))?; + let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?; // 4-5. let alg = pk_info.algorithm.oid; // 6-7. (skipped, only support rsaEncryption for interoperability) if alg != RSA_ENCRYPTION_OID { - return Err(data_error("unsupported algorithm")); + return Err(ImportKeyError::UnsupportedAlgorithm); } // 8-9. let public_key = rsa::pkcs1::RsaPublicKey::from_der( pk_info.subject_public_key.raw_bytes(), - ) - .map_err(|e| data_error(e.to_string()))?; + )?; - let bytes_consumed = public_key - .encoded_len() - .map_err(|e| data_error(e.to_string()))?; + let bytes_consumed = public_key.encoded_len()?; if bytes_consumed != rsa::pkcs1::der::Length::new( pk_info.subject_public_key.raw_bytes().len() as u16, ) { - return Err(data_error("public key is invalid (too long)")); + return Err(ImportKeyError::PublicKeyTooLong); } let data = pk_info.subject_public_key.raw_bytes().to_vec().into(); @@ -435,30 +478,26 @@ fn import_key_rsaoaep( } KeyData::Pkcs8(data) => { // 2-3. - let pk_info = PrivateKeyInfo::from_der(&data) - .map_err(|e| data_error(e.to_string()))?; + let pk_info = PrivateKeyInfo::from_der(&data)?; // 4-5. let alg = pk_info.algorithm.oid; // 6-7. (skipped, only support rsaEncryption for interoperability) if alg != RSA_ENCRYPTION_OID { - return Err(data_error("unsupported algorithm")); + return Err(ImportKeyError::UnsupportedAlgorithm); } // 8-9. let private_key = - rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key) - .map_err(|e| data_error(e.to_string()))?; + rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)?; - let bytes_consumed = private_key - .encoded_len() - .map_err(|e| data_error(e.to_string()))?; + let bytes_consumed = private_key.encoded_len()?; if bytes_consumed != rsa::pkcs1::der::Length::new(pk_info.private_key.len() as u16) { - return Err(data_error("private key is invalid (too long)")); + return Err(ImportKeyError::PrivateKeyTooLong); } let data = pk_info.private_key.to_vec().into(); @@ -475,14 +514,14 @@ fn import_key_rsaoaep( KeyData::JwkPublicRsa { .. } | KeyData::JwkPrivateRsa { .. } => { import_key_rsa_jwk(key_data) } - _ => Err(unsupported_format()), + _ => Err(SharedError::UnsupportedFormat.into()), } } fn decode_b64url_to_field_bytes( b64: &str, -) -> Result, deno_core::anyhow::Error> { - jwt_b64_int_or_err!(val, b64, "invalid b64 coordinate"); +) -> Result, ImportKeyError> { + jwt_b64_int_or_err!(val, b64, InvalidB64Coordinate); let mut bytes = elliptic_curve::FieldBytes::::default(); let original_bytes = val.as_bytes(); @@ -495,7 +534,7 @@ fn decode_b64url_to_field_bytes( let val = new_bytes.as_slice(); if val.len() != bytes.len() { - return Err(data_error("invalid b64 coordinate")); + return Err(ImportKeyError::InvalidB64Coordinate); } bytes.copy_from_slice(val); @@ -506,7 +545,7 @@ fn import_key_ec_jwk_to_point( x: String, y: String, named_curve: EcNamedCurve, -) -> Result, deno_core::anyhow::Error> { +) -> Result, ImportKeyError> { let point_bytes = match named_curve { EcNamedCurve::P256 => { let x = decode_b64url_to_field_bytes::(&x)?; @@ -534,7 +573,7 @@ fn import_key_ec_jwk_to_point( fn import_key_ec_jwk( key_data: KeyData, named_curve: EcNamedCurve, -) -> Result { +) -> Result { match key_data { KeyData::JwkPublicEc { x, y } => { let point_bytes = import_key_ec_jwk_to_point(x, y, named_curve)?; @@ -550,21 +589,21 @@ fn import_key_ec_jwk( let pk = p256::SecretKey::from_bytes(&d)?; pk.to_pkcs8_der() - .map_err(|_| data_error("invalid JWK private key"))? + .map_err(|_| ImportKeyError::InvalidJWKPrivateKey)? } EcNamedCurve::P384 => { let d = decode_b64url_to_field_bytes::(&d)?; let pk = p384::SecretKey::from_bytes(&d)?; pk.to_pkcs8_der() - .map_err(|_| data_error("invalid JWK private key"))? + .map_err(|_| ImportKeyError::InvalidJWKPrivateKey)? } EcNamedCurve::P521 => { let d = decode_b64url_to_field_bytes::(&d)?; let pk = p521::SecretKey::from_bytes(&d)?; pk.to_pkcs8_der() - .map_err(|_| data_error("invalid JWK private key"))? + .map_err(|_| ImportKeyError::InvalidJWKPrivateKey)? } }; @@ -595,7 +634,7 @@ impl<'a> TryFrom> for ECParametersSpki { fn import_key_ec( key_data: KeyData, named_curve: EcNamedCurve, -) -> Result { +) -> Result { match key_data { KeyData::Raw(data) => { // The point is parsed and validated, ultimately the original data is @@ -604,28 +643,28 @@ fn import_key_ec( EcNamedCurve::P256 => { // 1-2. let point = p256::EncodedPoint::from_bytes(&data) - .map_err(|_| data_error("invalid P-256 elliptic curve point"))?; + .map_err(|_| ImportKeyError::InvalidP256ECPoint)?; // 3. if point.is_identity() { - return Err(data_error("invalid P-256 elliptic curve point")); + return Err(ImportKeyError::InvalidP256ECPoint); } } EcNamedCurve::P384 => { // 1-2. let point = p384::EncodedPoint::from_bytes(&data) - .map_err(|_| data_error("invalid P-384 elliptic curve point"))?; + .map_err(|_| ImportKeyError::InvalidP384ECPoint)?; // 3. if point.is_identity() { - return Err(data_error("invalid P-384 elliptic curve point")); + return Err(ImportKeyError::InvalidP384ECPoint); } } EcNamedCurve::P521 => { // 1-2. let point = p521::EncodedPoint::from_bytes(&data) - .map_err(|_| data_error("invalid P-521 elliptic curve point"))?; + .map_err(|_| ImportKeyError::InvalidP521ECPoint)?; // 3. if point.is_identity() { - return Err(data_error("invalid P-521 elliptic curve point")); + return Err(ImportKeyError::InvalidP521ECPoint); } } }; @@ -635,11 +674,11 @@ fn import_key_ec( } KeyData::Pkcs8(data) => { let pk = PrivateKeyInfo::from_der(data.as_ref()) - .map_err(|_| data_error("expected valid PKCS#8 data"))?; + .map_err(|_| ImportKeyError::ExpectedValidPkcs8Data)?; let named_curve_alg = pk .algorithm .parameters - .ok_or_else(|| data_error("malformed parameters"))? + .ok_or(ImportKeyError::MalformedParameters)? .try_into() .unwrap(); @@ -654,7 +693,7 @@ fn import_key_ec( }; if pk_named_curve != Some(named_curve) { - return Err(data_error("curve mismatch")); + return Err(ImportKeyError::CurveMismatch); } Ok(ImportKeyResult::Ec { @@ -663,14 +702,13 @@ fn import_key_ec( } KeyData::Spki(data) => { // 2-3. - let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data) - .map_err(|e| data_error(e.to_string()))?; + let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?; // 4. let alg = pk_info.algorithm.oid; // id-ecPublicKey if alg != elliptic_curve::ALGORITHM_OID { - return Err(data_error("unsupported algorithm")); + return Err(ImportKeyError::UnsupportedAlgorithm); } // 5-7. @@ -678,9 +716,9 @@ fn import_key_ec( pk_info .algorithm .parameters - .ok_or_else(|| data_error("malformed parameters"))?, + .ok_or(ImportKeyError::MalformedParameters)?, ) - .map_err(|_| data_error("malformed parameters"))?; + .map_err(|_| ImportKeyError::MalformedParameters)?; // 8-9. let named_curve_alg = params.named_curve_alg; @@ -704,36 +742,30 @@ fn import_key_ec( let bytes_consumed = match named_curve { EcNamedCurve::P256 => { - let point = - p256::EncodedPoint::from_bytes(&*encoded_key).map_err(|_| { - data_error("invalid P-256 elliptic curve SPKI data") - })?; + let point = p256::EncodedPoint::from_bytes(&*encoded_key) + .map_err(|_| ImportKeyError::InvalidP256ECSPKIData)?; if point.is_identity() { - return Err(data_error("invalid P-256 elliptic curve point")); + return Err(ImportKeyError::InvalidP256ECPoint); } point.as_bytes().len() } EcNamedCurve::P384 => { - let point = - p384::EncodedPoint::from_bytes(&*encoded_key).map_err(|_| { - data_error("invalid P-384 elliptic curve SPKI data") - })?; + let point = p384::EncodedPoint::from_bytes(&*encoded_key) + .map_err(|_| ImportKeyError::InvalidP384ECSPKIData)?; if point.is_identity() { - return Err(data_error("invalid P-384 elliptic curve point")); + return Err(ImportKeyError::InvalidP384ECPoint); } point.as_bytes().len() } EcNamedCurve::P521 => { - let point = - p521::EncodedPoint::from_bytes(&*encoded_key).map_err(|_| { - data_error("invalid P-521 elliptic curve SPKI data") - })?; + let point = p521::EncodedPoint::from_bytes(&*encoded_key) + .map_err(|_| ImportKeyError::InvalidP521ECSPKIData)?; if point.is_identity() { - return Err(data_error("invalid P-521 elliptic curve point")); + return Err(ImportKeyError::InvalidP521ECPoint); } point.as_bytes().len() @@ -741,15 +773,15 @@ fn import_key_ec( }; if bytes_consumed != pk_info.subject_public_key.raw_bytes().len() { - return Err(data_error("public key is invalid (too long)")); + return Err(ImportKeyError::PublicKeyTooLong); } // 11. if named_curve != pk_named_curve { - return Err(data_error("curve mismatch")); + return Err(ImportKeyError::CurveMismatch); } } else { - return Err(data_error("Unsupported named curve")); + return Err(ImportKeyError::UnsupportedNamedCurve); } Ok(ImportKeyResult::Ec { @@ -759,34 +791,38 @@ fn import_key_ec( KeyData::JwkPublicEc { .. } | KeyData::JwkPrivateEc { .. } => { import_key_ec_jwk(key_data, named_curve) } - _ => Err(unsupported_format()), + _ => Err(SharedError::UnsupportedFormat.into()), } } -fn import_key_aes(key_data: KeyData) -> Result { +fn import_key_aes( + key_data: KeyData, +) -> Result { Ok(match key_data { KeyData::JwkSecret { k } => { let data = BASE64_URL_SAFE_FORGIVING .decode(k) - .map_err(|_| data_error("invalid key data"))?; + .map_err(|_| ImportKeyError::InvalidKeyData)?; ImportKeyResult::Hmac { raw_data: RustRawKeyData::Secret(data.into()), } } - _ => return Err(unsupported_format()), + _ => return Err(SharedError::UnsupportedFormat.into()), }) } -fn import_key_hmac(key_data: KeyData) -> Result { +fn import_key_hmac( + key_data: KeyData, +) -> Result { Ok(match key_data { KeyData::JwkSecret { k } => { let data = BASE64_URL_SAFE_FORGIVING .decode(k) - .map_err(|_| data_error("invalid key data"))?; + .map_err(|_| ImportKeyError::InvalidKeyData)?; ImportKeyResult::Hmac { raw_data: RustRawKeyData::Secret(data.into()), } } - _ => return Err(unsupported_format()), + _ => return Err(SharedError::UnsupportedFormat.into()), }) } diff --git a/ext/crypto/lib.rs b/ext/crypto/lib.rs index b5bafc580..69dcd1413 100644 --- a/ext/crypto/lib.rs +++ b/ext/crypto/lib.rs @@ -6,10 +6,7 @@ use aes_kw::KekAes256; use base64::prelude::BASE64_URL_SAFE_NO_PAD; use base64::Engine; -use deno_core::error::custom_error; use deno_core::error::not_supported; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::ToJsBuffer; @@ -17,7 +14,6 @@ use deno_core::unsync::spawn_blocking; use deno_core::JsBuffer; use deno_core::OpState; use serde::Deserialize; -use shared::operation_error; use p256::elliptic_curve::sec1::FromEncodedPoint; use p256::pkcs8::DecodePrivateKey; @@ -67,15 +63,24 @@ mod x25519; mod x448; pub use crate::decrypt::op_crypto_decrypt; +pub use crate::decrypt::DecryptError; +pub use crate::ed25519::Ed25519Error; pub use crate::encrypt::op_crypto_encrypt; +pub use crate::encrypt::EncryptError; pub use crate::export_key::op_crypto_export_key; +pub use crate::export_key::ExportKeyError; pub use crate::generate_key::op_crypto_generate_key; +pub use crate::generate_key::GenerateKeyError; pub use crate::import_key::op_crypto_import_key; +pub use crate::import_key::ImportKeyError; use crate::key::Algorithm; use crate::key::CryptoHash; use crate::key::CryptoNamedCurve; use crate::key::HkdfOutput; +pub use crate::shared::SharedError; use crate::shared::V8RawKeyData; +pub use crate::x25519::X25519Error; +pub use crate::x448::X448Error; deno_core::extension!(deno_crypto, deps = [ deno_webidl, deno_web ], @@ -127,11 +132,63 @@ deno_core::extension!(deno_crypto, }, ); +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error(transparent)] + General(#[from] SharedError), + #[error(transparent)] + JoinError(#[from] tokio::task::JoinError), + #[error(transparent)] + Der(#[from] rsa::pkcs1::der::Error), + #[error("Missing argument hash")] + MissingArgumentHash, + #[error("Missing argument saltLength")] + MissingArgumentSaltLength, + #[error("unsupported algorithm")] + UnsupportedAlgorithm, + #[error(transparent)] + KeyRejected(#[from] ring::error::KeyRejected), + #[error(transparent)] + RSA(#[from] rsa::Error), + #[error(transparent)] + Pkcs1(#[from] rsa::pkcs1::Error), + #[error(transparent)] + Unspecified(#[from] ring::error::Unspecified), + #[error("Invalid key format")] + InvalidKeyFormat, + #[error(transparent)] + P256Ecdsa(#[from] p256::ecdsa::Error), + #[error("Unexpected error decoding private key")] + DecodePrivateKey, + #[error("Missing argument publicKey")] + MissingArgumentPublicKey, + #[error("Missing argument namedCurve")] + MissingArgumentNamedCurve, + #[error("Missing argument info")] + MissingArgumentInfo, + #[error("The length provided for HKDF is too large")] + HKDFLengthTooLarge, + #[error(transparent)] + Base64Decode(#[from] base64::DecodeError), + #[error("Data must be multiple of 8 bytes")] + DataInvalidSize, + #[error("Invalid key length")] + InvalidKeyLength, + #[error("encryption error")] + EncryptionError, + #[error("decryption error - integrity check failed")] + DecryptionError, + #[error("The ArrayBufferView's byte length ({0}) exceeds the number of bytes of entropy available via this API (65536)")] + ArrayBufferViewLengthExceeded(usize), + #[error(transparent)] + Other(deno_core::error::AnyError), +} + #[op2] #[serde] pub fn op_crypto_base64url_decode( #[string] data: String, -) -> Result { +) -> Result { let data: Vec = BASE64_URL_SAFE_NO_PAD.decode(data)?; Ok(data.into()) } @@ -147,9 +204,9 @@ pub fn op_crypto_base64url_encode(#[buffer] data: JsBuffer) -> String { pub fn op_crypto_get_random_values( state: &mut OpState, #[buffer] out: &mut [u8], -) -> Result<(), AnyError> { +) -> Result<(), Error> { if out.len() > 65536 { - return Err(custom_error("DOMExceptionQuotaExceededError", format!("The ArrayBufferView's byte length ({}) exceeds the number of bytes of entropy available via this API (65536)", out.len()))); + return Err(Error::ArrayBufferViewLengthExceeded(out.len())); } let maybe_seeded_rng = state.try_borrow_mut::(); @@ -201,7 +258,7 @@ pub struct SignArg { pub async fn op_crypto_sign_key( #[serde] args: SignArg, #[buffer] zero_copy: JsBuffer, -) -> Result { +) -> Result { deno_core::unsync::spawn_blocking(move || { let data = &*zero_copy; let algorithm = args.algorithm; @@ -210,10 +267,7 @@ pub async fn op_crypto_sign_key( Algorithm::RsassaPkcs1v15 => { use rsa::pkcs1v15::SigningKey; let private_key = RsaPrivateKey::from_pkcs1_der(&args.key.data)?; - match args - .hash - .ok_or_else(|| type_error("Missing argument hash".to_string()))? - { + match args.hash.ok_or_else(|| Error::MissingArgumentHash)? { CryptoHash::Sha1 => { let signing_key = SigningKey::::new(private_key); signing_key.sign(data) @@ -236,15 +290,13 @@ pub async fn op_crypto_sign_key( Algorithm::RsaPss => { let private_key = RsaPrivateKey::from_pkcs1_der(&args.key.data)?; - let salt_len = args.salt_length.ok_or_else(|| { - type_error("Missing argument saltLength".to_string()) - })? as usize; + let salt_len = args + .salt_length + .ok_or_else(|| Error::MissingArgumentSaltLength)? + as usize; let mut rng = OsRng; - match args - .hash - .ok_or_else(|| type_error("Missing argument hash".to_string()))? - { + match args.hash.ok_or_else(|| Error::MissingArgumentHash)? { CryptoHash::Sha1 => { let signing_key = Pss::new_with_salt::(salt_len); let hashed = Sha1::digest(data); @@ -269,8 +321,10 @@ pub async fn op_crypto_sign_key( .to_vec() } Algorithm::Ecdsa => { - let curve: &EcdsaSigningAlgorithm = - args.named_curve.ok_or_else(not_supported)?.into(); + let curve: &EcdsaSigningAlgorithm = args + .named_curve + .ok_or_else(|| Error::Other(not_supported()))? + .into(); let rng = RingRand::SystemRandom::new(); let key_pair = EcdsaKeyPair::from_pkcs8(curve, &args.key.data, &rng)?; @@ -279,7 +333,7 @@ pub async fn op_crypto_sign_key( if let Some(hash) = args.hash { match hash { CryptoHash::Sha256 | CryptoHash::Sha384 => (), - _ => return Err(type_error("Unsupported algorithm")), + _ => return Err(Error::UnsupportedAlgorithm), } }; @@ -289,14 +343,17 @@ pub async fn op_crypto_sign_key( signature.as_ref().to_vec() } Algorithm::Hmac => { - let hash: HmacAlgorithm = args.hash.ok_or_else(not_supported)?.into(); + let hash: HmacAlgorithm = args + .hash + .ok_or_else(|| Error::Other(not_supported()))? + .into(); let key = HmacKey::new(hash, &args.key.data); let signature = ring::hmac::sign(&key, data); signature.as_ref().to_vec() } - _ => return Err(type_error("Unsupported algorithm".to_string())), + _ => return Err(Error::UnsupportedAlgorithm), }; Ok(signature.into()) @@ -319,7 +376,7 @@ pub struct VerifyArg { pub async fn op_crypto_verify_key( #[serde] args: VerifyArg, #[buffer] zero_copy: JsBuffer, -) -> Result { +) -> Result { deno_core::unsync::spawn_blocking(move || { let data = &*zero_copy; let algorithm = args.algorithm; @@ -330,10 +387,7 @@ pub async fn op_crypto_verify_key( use rsa::pkcs1v15::VerifyingKey; let public_key = read_rsa_public_key(args.key)?; let signature: Signature = args.signature.as_ref().try_into()?; - match args - .hash - .ok_or_else(|| type_error("Missing argument hash".to_string()))? - { + match args.hash.ok_or_else(|| Error::MissingArgumentHash)? { CryptoHash::Sha1 => { let verifying_key = VerifyingKey::::new(public_key); verifying_key.verify(data, &signature).is_ok() @@ -356,14 +410,12 @@ pub async fn op_crypto_verify_key( let public_key = read_rsa_public_key(args.key)?; let signature = args.signature.as_ref(); - let salt_len = args.salt_length.ok_or_else(|| { - type_error("Missing argument saltLength".to_string()) - })? as usize; + let salt_len = args + .salt_length + .ok_or_else(|| Error::MissingArgumentSaltLength)? + as usize; - match args - .hash - .ok_or_else(|| type_error("Missing argument hash".to_string()))? - { + match args.hash.ok_or_else(|| Error::MissingArgumentHash)? { CryptoHash::Sha1 => { let pss = Pss::new_with_salt::(salt_len); let hashed = Sha1::digest(data); @@ -387,15 +439,22 @@ pub async fn op_crypto_verify_key( } } Algorithm::Hmac => { - let hash: HmacAlgorithm = args.hash.ok_or_else(not_supported)?.into(); + let hash: HmacAlgorithm = args + .hash + .ok_or_else(|| Error::Other(not_supported()))? + .into(); let key = HmacKey::new(hash, &args.key.data); ring::hmac::verify(&key, data, &args.signature).is_ok() } Algorithm::Ecdsa => { - let signing_alg: &EcdsaSigningAlgorithm = - args.named_curve.ok_or_else(not_supported)?.into(); - let verify_alg: &EcdsaVerificationAlgorithm = - args.named_curve.ok_or_else(not_supported)?.into(); + let signing_alg: &EcdsaSigningAlgorithm = args + .named_curve + .ok_or_else(|| Error::Other(not_supported()))? + .into(); + let verify_alg: &EcdsaVerificationAlgorithm = args + .named_curve + .ok_or_else(|| Error::Other(not_supported()))? + .into(); let private_key; @@ -408,7 +467,7 @@ pub async fn op_crypto_verify_key( private_key.public_key().as_ref() } KeyType::Public => &*args.key.data, - _ => return Err(type_error("Invalid Key format".to_string())), + _ => return Err(Error::InvalidKeyFormat), }; let public_key = @@ -416,7 +475,7 @@ pub async fn op_crypto_verify_key( public_key.verify(data, &args.signature).is_ok() } - _ => return Err(type_error("Unsupported algorithm".to_string())), + _ => return Err(Error::UnsupportedAlgorithm), }; Ok(verification) @@ -444,70 +503,68 @@ pub struct DeriveKeyArg { pub async fn op_crypto_derive_bits( #[serde] args: DeriveKeyArg, #[buffer] zero_copy: Option, -) -> Result { +) -> Result { deno_core::unsync::spawn_blocking(move || { let algorithm = args.algorithm; match algorithm { Algorithm::Pbkdf2 => { - let zero_copy = zero_copy.ok_or_else(not_supported)?; + let zero_copy = + zero_copy.ok_or_else(|| Error::Other(not_supported()))?; let salt = &*zero_copy; // The caller must validate these cases. assert!(args.length > 0); assert!(args.length % 8 == 0); - let algorithm = match args.hash.ok_or_else(not_supported)? { - CryptoHash::Sha1 => pbkdf2::PBKDF2_HMAC_SHA1, - CryptoHash::Sha256 => pbkdf2::PBKDF2_HMAC_SHA256, - CryptoHash::Sha384 => pbkdf2::PBKDF2_HMAC_SHA384, - CryptoHash::Sha512 => pbkdf2::PBKDF2_HMAC_SHA512, - }; + let algorithm = + match args.hash.ok_or_else(|| Error::Other(not_supported()))? { + CryptoHash::Sha1 => pbkdf2::PBKDF2_HMAC_SHA1, + CryptoHash::Sha256 => pbkdf2::PBKDF2_HMAC_SHA256, + CryptoHash::Sha384 => pbkdf2::PBKDF2_HMAC_SHA384, + CryptoHash::Sha512 => pbkdf2::PBKDF2_HMAC_SHA512, + }; // This will never panic. We have already checked length earlier. - let iterations = - NonZeroU32::new(args.iterations.ok_or_else(not_supported)?).unwrap(); + let iterations = NonZeroU32::new( + args + .iterations + .ok_or_else(|| Error::Other(not_supported()))?, + ) + .unwrap(); let secret = args.key.data; let mut out = vec![0; args.length / 8]; pbkdf2::derive(algorithm, iterations, salt, &secret, &mut out); Ok(out.into()) } Algorithm::Ecdh => { - let named_curve = args.named_curve.ok_or_else(|| { - type_error("Missing argument namedCurve".to_string()) - })?; + let named_curve = args + .named_curve + .ok_or_else(|| Error::MissingArgumentNamedCurve)?; let public_key = args .public_key - .ok_or_else(|| type_error("Missing argument publicKey"))?; + .ok_or_else(|| Error::MissingArgumentPublicKey)?; match named_curve { CryptoNamedCurve::P256 => { let secret_key = p256::SecretKey::from_pkcs8_der(&args.key.data) - .map_err(|_| { - type_error("Unexpected error decoding private key") - })?; + .map_err(|_| Error::DecodePrivateKey)?; let public_key = match public_key.r#type { KeyType::Private => { p256::SecretKey::from_pkcs8_der(&public_key.data) - .map_err(|_| { - type_error("Unexpected error decoding private key") - })? + .map_err(|_| Error::DecodePrivateKey)? .public_key() } KeyType::Public => { let point = p256::EncodedPoint::from_bytes(public_key.data) - .map_err(|_| { - type_error("Unexpected error decoding private key") - })?; + .map_err(|_| Error::DecodePrivateKey)?; let pk = p256::PublicKey::from_encoded_point(&point); // pk is a constant time Option. if pk.is_some().into() { pk.unwrap() } else { - return Err(type_error( - "Unexpected error decoding private key", - )); + return Err(Error::DecodePrivateKey); } } _ => unreachable!(), @@ -523,32 +580,24 @@ pub async fn op_crypto_derive_bits( } CryptoNamedCurve::P384 => { let secret_key = p384::SecretKey::from_pkcs8_der(&args.key.data) - .map_err(|_| { - type_error("Unexpected error decoding private key") - })?; + .map_err(|_| Error::DecodePrivateKey)?; let public_key = match public_key.r#type { KeyType::Private => { p384::SecretKey::from_pkcs8_der(&public_key.data) - .map_err(|_| { - type_error("Unexpected error decoding private key") - })? + .map_err(|_| Error::DecodePrivateKey)? .public_key() } KeyType::Public => { let point = p384::EncodedPoint::from_bytes(public_key.data) - .map_err(|_| { - type_error("Unexpected error decoding private key") - })?; + .map_err(|_| Error::DecodePrivateKey)?; let pk = p384::PublicKey::from_encoded_point(&point); // pk is a constant time Option. if pk.is_some().into() { pk.unwrap() } else { - return Err(type_error( - "Unexpected error decoding private key", - )); + return Err(Error::DecodePrivateKey); } } _ => unreachable!(), @@ -565,18 +614,18 @@ pub async fn op_crypto_derive_bits( } } Algorithm::Hkdf => { - let zero_copy = zero_copy.ok_or_else(not_supported)?; + let zero_copy = + zero_copy.ok_or_else(|| Error::Other(not_supported()))?; let salt = &*zero_copy; - let algorithm = match args.hash.ok_or_else(not_supported)? { - CryptoHash::Sha1 => hkdf::HKDF_SHA1_FOR_LEGACY_USE_ONLY, - CryptoHash::Sha256 => hkdf::HKDF_SHA256, - CryptoHash::Sha384 => hkdf::HKDF_SHA384, - CryptoHash::Sha512 => hkdf::HKDF_SHA512, - }; - - let info = args - .info - .ok_or_else(|| type_error("Missing argument info".to_string()))?; + let algorithm = + match args.hash.ok_or_else(|| Error::Other(not_supported()))? { + CryptoHash::Sha1 => hkdf::HKDF_SHA1_FOR_LEGACY_USE_ONLY, + CryptoHash::Sha256 => hkdf::HKDF_SHA256, + CryptoHash::Sha384 => hkdf::HKDF_SHA384, + CryptoHash::Sha512 => hkdf::HKDF_SHA512, + }; + + let info = args.info.ok_or_else(|| Error::MissingArgumentInfo)?; // IKM let secret = args.key.data; // L @@ -585,23 +634,20 @@ pub async fn op_crypto_derive_bits( let salt = hkdf::Salt::new(algorithm, salt); let prk = salt.extract(&secret); let info = &[&*info]; - let okm = prk.expand(info, HkdfOutput(length)).map_err(|_e| { - custom_error( - "DOMExceptionOperationError", - "The length provided for HKDF is too large", - ) - })?; + let okm = prk + .expand(info, HkdfOutput(length)) + .map_err(|_e| Error::HKDFLengthTooLarge)?; let mut r = vec![0u8; length]; okm.fill(&mut r)?; Ok(r.into()) } - _ => Err(type_error("Unsupported algorithm".to_string())), + _ => Err(Error::UnsupportedAlgorithm), } }) .await? } -fn read_rsa_public_key(key_data: KeyData) -> Result { +fn read_rsa_public_key(key_data: KeyData) -> Result { let public_key = match key_data.r#type { KeyType::Private => { RsaPrivateKey::from_pkcs1_der(&key_data.data)?.to_public_key() @@ -614,7 +660,7 @@ fn read_rsa_public_key(key_data: KeyData) -> Result { #[op2] #[string] -pub fn op_crypto_random_uuid(state: &mut OpState) -> Result { +pub fn op_crypto_random_uuid(state: &mut OpState) -> Result { let maybe_seeded_rng = state.try_borrow_mut::(); let uuid = if let Some(seeded_rng) = maybe_seeded_rng { let mut bytes = [0u8; 16]; @@ -635,7 +681,7 @@ pub fn op_crypto_random_uuid(state: &mut OpState) -> Result { pub async fn op_crypto_subtle_digest( #[serde] algorithm: CryptoHash, #[buffer] data: JsBuffer, -) -> Result { +) -> Result { let output = spawn_blocking(move || { digest::digest(algorithm.into(), &data) .as_ref() @@ -659,7 +705,7 @@ pub struct WrapUnwrapKeyArg { pub fn op_crypto_wrap_key( #[serde] args: WrapUnwrapKeyArg, #[buffer] data: JsBuffer, -) -> Result { +) -> Result { let algorithm = args.algorithm; match algorithm { @@ -667,20 +713,20 @@ pub fn op_crypto_wrap_key( let key = args.key.as_secret_key()?; if data.len() % 8 != 0 { - return Err(type_error("Data must be multiple of 8 bytes")); + return Err(Error::DataInvalidSize); } let wrapped_key = match key.len() { 16 => KekAes128::new(key.into()).wrap_vec(&data), 24 => KekAes192::new(key.into()).wrap_vec(&data), 32 => KekAes256::new(key.into()).wrap_vec(&data), - _ => return Err(type_error("Invalid key length")), + _ => return Err(Error::InvalidKeyLength), } - .map_err(|_| operation_error("encryption error"))?; + .map_err(|_| Error::EncryptionError)?; Ok(wrapped_key.into()) } - _ => Err(type_error("Unsupported algorithm")), + _ => Err(Error::UnsupportedAlgorithm), } } @@ -689,29 +735,27 @@ pub fn op_crypto_wrap_key( pub fn op_crypto_unwrap_key( #[serde] args: WrapUnwrapKeyArg, #[buffer] data: JsBuffer, -) -> Result { +) -> Result { let algorithm = args.algorithm; match algorithm { Algorithm::AesKw => { let key = args.key.as_secret_key()?; if data.len() % 8 != 0 { - return Err(type_error("Data must be multiple of 8 bytes")); + return Err(Error::DataInvalidSize); } let unwrapped_key = match key.len() { 16 => KekAes128::new(key.into()).unwrap_vec(&data), 24 => KekAes192::new(key.into()).unwrap_vec(&data), 32 => KekAes256::new(key.into()).unwrap_vec(&data), - _ => return Err(type_error("Invalid key length")), + _ => return Err(Error::InvalidKeyLength), } - .map_err(|_| { - operation_error("decryption error - integrity check failed") - })?; + .map_err(|_| Error::DecryptionError)?; Ok(unwrapped_key.into()) } - _ => Err(type_error("Unsupported algorithm")), + _ => Err(Error::UnsupportedAlgorithm), } } diff --git a/ext/crypto/shared.rs b/ext/crypto/shared.rs index d06a268cd..f70d32856 100644 --- a/ext/crypto/shared.rs +++ b/ext/crypto/shared.rs @@ -2,9 +2,6 @@ use std::borrow::Cow; -use deno_core::error::custom_error; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::JsBuffer; use deno_core::ToJsBuffer; use elliptic_curve::sec1::ToEncodedPoint; @@ -63,47 +60,73 @@ pub enum RustRawKeyData { Public(ToJsBuffer), } +#[derive(Debug, thiserror::Error)] +pub enum SharedError { + #[error("expected valid private key")] + ExpectedValidPrivateKey, + #[error("expected valid public key")] + ExpectedValidPublicKey, + #[error("expected valid private EC key")] + ExpectedValidPrivateECKey, + #[error("expected valid public EC key")] + ExpectedValidPublicECKey, + #[error("expected private key")] + ExpectedPrivateKey, + #[error("expected public key")] + ExpectedPublicKey, + #[error("expected secret key")] + ExpectedSecretKey, + #[error("failed to decode private key")] + FailedDecodePrivateKey, + #[error("failed to decode public key")] + FailedDecodePublicKey, + #[error("unsupported format")] + UnsupportedFormat, +} + impl V8RawKeyData { - pub fn as_rsa_public_key(&self) -> Result, AnyError> { + pub fn as_rsa_public_key(&self) -> Result, SharedError> { match self { V8RawKeyData::Public(data) => Ok(Cow::Borrowed(data)), V8RawKeyData::Private(data) => { let private_key = RsaPrivateKey::from_pkcs1_der(data) - .map_err(|_| type_error("expected valid private key"))?; + .map_err(|_| SharedError::ExpectedValidPrivateKey)?; let public_key_doc = private_key .to_public_key() .to_pkcs1_der() - .map_err(|_| type_error("expected valid public key"))?; + .map_err(|_| SharedError::ExpectedValidPublicKey)?; Ok(Cow::Owned(public_key_doc.as_bytes().into())) } - _ => Err(type_error("expected public key")), + _ => Err(SharedError::ExpectedPublicKey), } } - pub fn as_rsa_private_key(&self) -> Result<&[u8], AnyError> { + pub fn as_rsa_private_key(&self) -> Result<&[u8], SharedError> { match self { V8RawKeyData::Private(data) => Ok(data), - _ => Err(type_error("expected private key")), + _ => Err(SharedError::ExpectedPrivateKey), } } - pub fn as_secret_key(&self) -> Result<&[u8], AnyError> { + pub fn as_secret_key(&self) -> Result<&[u8], SharedError> { match self { V8RawKeyData::Secret(data) => Ok(data), - _ => Err(type_error("expected secret key")), + _ => Err(SharedError::ExpectedSecretKey), } } - pub fn as_ec_public_key_p256(&self) -> Result { + pub fn as_ec_public_key_p256( + &self, + ) -> Result { match self { V8RawKeyData::Public(data) => p256::PublicKey::from_sec1_bytes(data) .map(|p| p.to_encoded_point(false)) - .map_err(|_| type_error("expected valid public EC key")), + .map_err(|_| SharedError::ExpectedValidPublicECKey), V8RawKeyData::Private(data) => { let signing_key = p256::SecretKey::from_pkcs8_der(data) - .map_err(|_| type_error("expected valid private EC key"))?; + .map_err(|_| SharedError::ExpectedValidPrivateECKey)?; Ok(signing_key.public_key().to_encoded_point(false)) } // Should never reach here. @@ -111,14 +134,16 @@ impl V8RawKeyData { } } - pub fn as_ec_public_key_p384(&self) -> Result { + pub fn as_ec_public_key_p384( + &self, + ) -> Result { match self { V8RawKeyData::Public(data) => p384::PublicKey::from_sec1_bytes(data) .map(|p| p.to_encoded_point(false)) - .map_err(|_| type_error("expected valid public EC key")), + .map_err(|_| SharedError::ExpectedValidPublicECKey), V8RawKeyData::Private(data) => { let signing_key = p384::SecretKey::from_pkcs8_der(data) - .map_err(|_| type_error("expected valid private EC key"))?; + .map_err(|_| SharedError::ExpectedValidPrivateECKey)?; Ok(signing_key.public_key().to_encoded_point(false)) } // Should never reach here. @@ -126,16 +151,18 @@ impl V8RawKeyData { } } - pub fn as_ec_public_key_p521(&self) -> Result { + pub fn as_ec_public_key_p521( + &self, + ) -> Result { match self { V8RawKeyData::Public(data) => { // public_key is a serialized EncodedPoint p521::EncodedPoint::from_bytes(data) - .map_err(|_| type_error("expected valid public EC key")) + .map_err(|_| SharedError::ExpectedValidPublicECKey) } V8RawKeyData::Private(data) => { let signing_key = p521::SecretKey::from_pkcs8_der(data) - .map_err(|_| type_error("expected valid private EC key"))?; + .map_err(|_| SharedError::ExpectedValidPrivateECKey)?; Ok(signing_key.public_key().to_encoded_point(false)) } // Should never reach here. @@ -143,26 +170,10 @@ impl V8RawKeyData { } } - pub fn as_ec_private_key(&self) -> Result<&[u8], AnyError> { + pub fn as_ec_private_key(&self) -> Result<&[u8], SharedError> { match self { V8RawKeyData::Private(data) => Ok(data), - _ => Err(type_error("expected private key")), + _ => Err(SharedError::ExpectedPrivateKey), } } } - -pub fn data_error(msg: impl Into>) -> AnyError { - custom_error("DOMExceptionDataError", msg) -} - -pub fn not_supported_error(msg: impl Into>) -> AnyError { - custom_error("DOMExceptionNotSupportedError", msg) -} - -pub fn operation_error(msg: impl Into>) -> AnyError { - custom_error("DOMExceptionOperationError", msg) -} - -pub fn unsupported_format() -> AnyError { - not_supported_error("unsupported format") -} diff --git a/ext/crypto/x25519.rs b/ext/crypto/x25519.rs index cdbd1d7c8..d2c4d986b 100644 --- a/ext/crypto/x25519.rs +++ b/ext/crypto/x25519.rs @@ -1,8 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use curve25519_dalek::montgomery::MontgomeryPoint; -use deno_core::error::custom_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::ToJsBuffer; use elliptic_curve::pkcs8::PrivateKeyInfo; @@ -13,6 +11,14 @@ use spki::der::asn1::BitString; use spki::der::Decode; use spki::der::Encode; +#[derive(Debug, thiserror::Error)] +pub enum X25519Error { + #[error("Failed to export key")] + FailedExport, + #[error(transparent)] + Der(#[from] spki::der::Error), +} + #[op2(fast)] pub fn op_crypto_generate_x25519_keypair( #[buffer] pkey: &mut [u8], @@ -113,7 +119,7 @@ pub fn op_crypto_import_pkcs8_x25519( #[serde] pub fn op_crypto_export_spki_x25519( #[buffer] pubkey: &[u8], -) -> Result { +) -> Result { let key_info = spki::SubjectPublicKeyInfo { algorithm: spki::AlgorithmIdentifierRef { // id-X25519 @@ -125,9 +131,7 @@ pub fn op_crypto_export_spki_x25519( Ok( key_info .to_der() - .map_err(|_| { - custom_error("DOMExceptionOperationError", "Failed to export key") - })? + .map_err(|_| X25519Error::FailedExport)? .into(), ) } @@ -136,7 +140,7 @@ pub fn op_crypto_export_spki_x25519( #[serde] pub fn op_crypto_export_pkcs8_x25519( #[buffer] pkey: &[u8], -) -> Result { +) -> Result { use rsa::pkcs1::der::Encode; // This should probably use OneAsymmetricKey instead diff --git a/ext/crypto/x448.rs b/ext/crypto/x448.rs index 3c8f24c31..89bf48e28 100644 --- a/ext/crypto/x448.rs +++ b/ext/crypto/x448.rs @@ -1,6 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::custom_error; -use deno_core::error::AnyError; + use deno_core::op2; use deno_core::ToJsBuffer; use ed448_goldilocks::curve::MontgomeryPoint; @@ -13,6 +12,14 @@ use spki::der::asn1::BitString; use spki::der::Decode; use spki::der::Encode; +#[derive(Debug, thiserror::Error)] +pub enum X448Error { + #[error("Failed to export key")] + FailedExport, + #[error(transparent)] + Der(#[from] spki::der::Error), +} + #[op2(fast)] pub fn op_crypto_generate_x448_keypair( #[buffer] pkey: &mut [u8], @@ -56,7 +63,7 @@ const X448_OID: const_oid::ObjectIdentifier = #[serde] pub fn op_crypto_export_spki_x448( #[buffer] pubkey: &[u8], -) -> Result { +) -> Result { let key_info = spki::SubjectPublicKeyInfo { algorithm: spki::AlgorithmIdentifierRef { oid: X448_OID, @@ -67,9 +74,7 @@ pub fn op_crypto_export_spki_x448( Ok( key_info .to_der() - .map_err(|_| { - custom_error("DOMExceptionOperationError", "Failed to export key") - })? + .map_err(|_| X448Error::FailedExport)? .into(), ) } @@ -78,7 +83,7 @@ pub fn op_crypto_export_spki_x448( #[serde] pub fn op_crypto_export_pkcs8_x448( #[buffer] pkey: &[u8], -) -> Result { +) -> Result { use rsa::pkcs1::der::Encode; let pk_info = rsa::pkcs8::PrivateKeyInfo { -- cgit v1.2.3 From 2c3900370ac3e0b62f1e0dfb86a883c75952146d Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Fri, 18 Oct 2024 15:57:12 -0700 Subject: refactor(ext/http): use concrete error types (#26377) --- ext/http/http_next.rs | 132 ++++++++++++++++++++++++-------------- ext/http/lib.rs | 145 ++++++++++++++++++++++++++++-------------- ext/http/request_body.rs | 12 ++-- ext/http/service.rs | 13 ++-- ext/http/websocket_upgrade.rs | 60 +++++++++++------ 5 files changed, 235 insertions(+), 127 deletions(-) (limited to 'ext') diff --git a/ext/http/http_next.rs b/ext/http/http_next.rs index 7a6cbfa45..56c46de92 100644 --- a/ext/http/http_next.rs +++ b/ext/http/http_next.rs @@ -19,7 +19,6 @@ use crate::service::SignallingRc; use crate::websocket_upgrade::WebSocketUpgrade; use crate::LocalExecutor; use cache_control::CacheControl; -use deno_core::error::AnyError; use deno_core::external; use deno_core::futures::future::poll_fn; use deno_core::futures::TryFutureExt; @@ -146,12 +145,32 @@ macro_rules! clone_external { }}; } +#[derive(Debug, thiserror::Error)] +pub enum HttpNextError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error("{0}")] + Io(#[from] io::Error), + #[error(transparent)] + WebSocketUpgrade(crate::websocket_upgrade::WebSocketUpgradeError), + #[error("{0}")] + Hyper(#[from] hyper::Error), + #[error(transparent)] + JoinError(#[from] tokio::task::JoinError), + #[error(transparent)] + Canceled(#[from] deno_core::Canceled), + #[error(transparent)] + HttpPropertyExtractor(deno_core::error::AnyError), + #[error(transparent)] + UpgradeUnavailable(#[from] crate::service::UpgradeUnavailableError), +} + #[op2(fast)] #[smi] pub fn op_http_upgrade_raw( state: &mut OpState, external: *const c_void, -) -> Result { +) -> Result { // SAFETY: external is deleted before calling this op. let http = unsafe { take_external!(external, "op_http_upgrade_raw") }; @@ -177,7 +196,7 @@ pub fn op_http_upgrade_raw( upgraded.write_all(&bytes).await?; break upgraded; } - Err(err) => return Err(err), + Err(err) => return Err(HttpNextError::WebSocketUpgrade(err)), } }; @@ -193,7 +212,7 @@ pub fn op_http_upgrade_raw( } read_tx.write_all(&buf[..read]).await?; } - Ok::<_, AnyError>(()) + Ok::<_, HttpNextError>(()) }); spawn(async move { let mut buf = [0; 1024]; @@ -204,7 +223,7 @@ pub fn op_http_upgrade_raw( } upgraded_tx.write_all(&buf[..read]).await?; } - Ok::<_, AnyError>(()) + Ok::<_, HttpNextError>(()) }); Ok(()) @@ -223,7 +242,7 @@ pub async fn op_http_upgrade_websocket_next( state: Rc>, external: *const c_void, #[serde] headers: Vec<(ByteString, ByteString)>, -) -> Result { +) -> Result { let http = // SAFETY: external is deleted before calling this op. unsafe { take_external!(external, "op_http_upgrade_websocket_next") }; @@ -690,7 +709,7 @@ pub async fn op_http_set_response_body_resource( #[smi] stream_rid: ResourceId, auto_close: bool, status: u16, -) -> Result { +) -> Result { let http = // SAFETY: op is called with external. unsafe { clone_external!(external, "op_http_set_response_body_resource") }; @@ -705,9 +724,15 @@ pub async fn op_http_set_response_body_resource( let resource = { let mut state = state.borrow_mut(); if auto_close { - state.resource_table.take_any(stream_rid)? + state + .resource_table + .take_any(stream_rid) + .map_err(HttpNextError::Resource)? } else { - state.resource_table.get_any(stream_rid)? + state + .resource_table + .get_any(stream_rid) + .map_err(HttpNextError::Resource)? } }; @@ -814,17 +839,17 @@ async fn serve_http2_autodetect( io: impl HttpServeStream, svc: impl HttpService + 'static, cancel: Rc, -) -> Result<(), AnyError> { +) -> Result<(), HttpNextError> { let prefix = NetworkStreamPrefixCheck::new(io, HTTP2_PREFIX); let (matches, io) = prefix.match_prefix().await?; if matches { serve_http2_unconditional(io, svc, cancel) .await - .map_err(|e| e.into()) + .map_err(HttpNextError::Hyper) } else { serve_http11_unconditional(io, svc, cancel) .await - .map_err(|e| e.into()) + .map_err(HttpNextError::Hyper) } } @@ -833,7 +858,7 @@ fn serve_https( request_info: HttpConnectionProperties, lifetime: HttpLifetime, tx: tokio::sync::mpsc::Sender>, -) -> JoinHandle> { +) -> JoinHandle> { let HttpLifetime { server_state, connection_cancel_handle, @@ -852,11 +877,11 @@ fn serve_https( if Some(TLS_ALPN_HTTP_2) == handshake.as_deref() { serve_http2_unconditional(io, svc, listen_cancel_handle) .await - .map_err(|e| e.into()) + .map_err(HttpNextError::Hyper) } else if Some(TLS_ALPN_HTTP_11) == handshake.as_deref() { serve_http11_unconditional(io, svc, listen_cancel_handle) .await - .map_err(|e| e.into()) + .map_err(HttpNextError::Hyper) } else { serve_http2_autodetect(io, svc, listen_cancel_handle).await } @@ -870,7 +895,7 @@ fn serve_http( request_info: HttpConnectionProperties, lifetime: HttpLifetime, tx: tokio::sync::mpsc::Sender>, -) -> JoinHandle> { +) -> JoinHandle> { let HttpLifetime { server_state, connection_cancel_handle, @@ -891,7 +916,7 @@ fn serve_http_on( listen_properties: &HttpListenProperties, lifetime: HttpLifetime, tx: tokio::sync::mpsc::Sender>, -) -> JoinHandle> +) -> JoinHandle> where HTTP: HttpPropertyExtractor, { @@ -922,7 +947,7 @@ struct HttpLifetime { } struct HttpJoinHandle { - join_handle: AsyncRefCell>>>, + join_handle: AsyncRefCell>>>, connection_cancel_handle: Rc, listen_cancel_handle: Rc, rx: AsyncRefCell>>, @@ -982,12 +1007,13 @@ impl Drop for HttpJoinHandle { pub fn op_http_serve( state: Rc>, #[smi] listener_rid: ResourceId, -) -> Result<(ResourceId, &'static str, String), AnyError> +) -> Result<(ResourceId, &'static str, String), HttpNextError> where HTTP: HttpPropertyExtractor, { let listener = - HTTP::get_listener_for_rid(&mut state.borrow_mut(), listener_rid)?; + HTTP::get_listener_for_rid(&mut state.borrow_mut(), listener_rid) + .map_err(HttpNextError::Resource)?; let listen_properties = HTTP::listen_properties_from_listener(&listener)?; @@ -1002,7 +1028,8 @@ where loop { let conn = HTTP::accept_connection_from_listener(&listener) .try_or_cancel(listen_cancel_clone.clone()) - .await?; + .await + .map_err(HttpNextError::HttpPropertyExtractor)?; serve_http_on::( conn, &listen_properties_clone, @@ -1011,7 +1038,7 @@ where ); } #[allow(unreachable_code)] - Ok::<_, AnyError>(()) + Ok::<_, HttpNextError>(()) }); // Set the handle after we start the future @@ -1031,25 +1058,25 @@ where pub fn op_http_serve_on( state: Rc>, #[smi] connection_rid: ResourceId, -) -> Result<(ResourceId, &'static str, String), AnyError> +) -> Result<(ResourceId, &'static str, String), HttpNextError> where HTTP: HttpPropertyExtractor, { let connection = - HTTP::get_connection_for_rid(&mut state.borrow_mut(), connection_rid)?; + HTTP::get_connection_for_rid(&mut state.borrow_mut(), connection_rid) + .map_err(HttpNextError::Resource)?; let listen_properties = HTTP::listen_properties_from_connection(&connection)?; let (tx, rx) = tokio::sync::mpsc::channel(10); let resource: Rc = Rc::new(HttpJoinHandle::new(rx)); - let handle: JoinHandle> = - serve_http_on::( - connection, - &listen_properties, - resource.lifetime(), - tx, - ); + let handle = serve_http_on::( + connection, + &listen_properties, + resource.lifetime(), + tx, + ); // Set the handle after we start the future *RcRef::map(&resource, |this| &this.join_handle) @@ -1095,12 +1122,13 @@ pub fn op_http_try_wait( pub async fn op_http_wait( state: Rc>, #[smi] rid: ResourceId, -) -> Result<*const c_void, AnyError> { +) -> Result<*const c_void, HttpNextError> { // We will get the join handle initially, as we might be consuming requests still let join_handle = state .borrow_mut() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(HttpNextError::Resource)?; let cancel = join_handle.listen_cancel_handle(); let next = async { @@ -1127,13 +1155,12 @@ pub async fn op_http_wait( // Filter out shutdown (ENOTCONN) errors if let Err(err) = res { - if let Some(err) = err.source() { - if let Some(err) = err.downcast_ref::() { - if err.kind() == io::ErrorKind::NotConnected { - return Ok(null()); - } + if let HttpNextError::Io(err) = &err { + if err.kind() == io::ErrorKind::NotConnected { + return Ok(null()); } } + return Err(err); } @@ -1146,7 +1173,7 @@ pub fn op_http_cancel( state: &mut OpState, #[smi] rid: ResourceId, graceful: bool, -) -> Result<(), AnyError> { +) -> Result<(), deno_core::error::AnyError> { let join_handle = state.resource_table.get::(rid)?; if graceful { @@ -1166,11 +1193,12 @@ pub async fn op_http_close( state: Rc>, #[smi] rid: ResourceId, graceful: bool, -) -> Result<(), AnyError> { +) -> Result<(), HttpNextError> { let join_handle = state .borrow_mut() .resource_table - .take::(rid)?; + .take::(rid) + .map_err(HttpNextError::Resource)?; if graceful { http_general_trace!("graceful shutdown"); @@ -1216,23 +1244,26 @@ impl UpgradeStream { } } - async fn read(self: Rc, buf: &mut [u8]) -> Result { + async fn read( + self: Rc, + buf: &mut [u8], + ) -> Result { let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle); async { let read = RcRef::map(self, |this| &this.read); let mut read = read.borrow_mut().await; - Ok(Pin::new(&mut *read).read(buf).await?) + Pin::new(&mut *read).read(buf).await } .try_or_cancel(cancel_handle) .await } - async fn write(self: Rc, buf: &[u8]) -> Result { + async fn write(self: Rc, buf: &[u8]) -> Result { let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle); async { let write = RcRef::map(self, |this| &this.write); let mut write = write.borrow_mut().await; - Ok(Pin::new(&mut *write).write(buf).await?) + Pin::new(&mut *write).write(buf).await } .try_or_cancel(cancel_handle) .await @@ -1242,7 +1273,7 @@ impl UpgradeStream { self: Rc, buf1: &[u8], buf2: &[u8], - ) -> Result { + ) -> Result { let mut wr = RcRef::map(self, |r| &r.write).borrow_mut().await; let total = buf1.len() + buf2.len(); @@ -1295,9 +1326,12 @@ pub async fn op_raw_write_vectored( #[smi] rid: ResourceId, #[buffer] buf1: JsBuffer, #[buffer] buf2: JsBuffer, -) -> Result { - let resource: Rc = - state.borrow().resource_table.get::(rid)?; +) -> Result { + let resource: Rc = state + .borrow() + .resource_table + .get::(rid) + .map_err(HttpNextError::Resource)?; let nwritten = resource.write_vectored(&buf1, &buf2).await?; Ok(nwritten) } diff --git a/ext/http/lib.rs b/ext/http/lib.rs index 5461713aa..6243804a1 100644 --- a/ext/http/lib.rs +++ b/ext/http/lib.rs @@ -6,8 +6,6 @@ use async_compression::Level; use base64::prelude::BASE64_STANDARD; use base64::Engine; use cache_control::CacheControl; -use deno_core::error::custom_error; -use deno_core::error::AnyError; use deno_core::futures::channel::mpsc; use deno_core::futures::channel::oneshot; use deno_core::futures::future::pending; @@ -89,11 +87,14 @@ mod service; mod websocket_upgrade; use fly_accept_encoding::Encoding; +pub use http_next::HttpNextError; pub use request_properties::DefaultHttpPropertyExtractor; pub use request_properties::HttpConnectionProperties; pub use request_properties::HttpListenProperties; pub use request_properties::HttpPropertyExtractor; pub use request_properties::HttpRequestProperties; +pub use service::UpgradeUnavailableError; +pub use websocket_upgrade::WebSocketUpgradeError; deno_core::extension!( deno_http, @@ -134,6 +135,38 @@ deno_core::extension!( esm = ["00_serve.ts", "01_http.js", "02_websocket.ts"], ); +#[derive(Debug, thiserror::Error)] +pub enum HttpError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error(transparent)] + Canceled(#[from] deno_core::Canceled), + #[error("{0}")] + HyperV014(#[source] Arc), + #[error("{0}")] + InvalidHeaderName(#[from] hyper_v014::header::InvalidHeaderName), + #[error("{0}")] + InvalidHeaderValue(#[from] hyper_v014::header::InvalidHeaderValue), + #[error("{0}")] + Http(#[from] hyper_v014::http::Error), + #[error("response headers already sent")] + ResponseHeadersAlreadySent, + #[error("connection closed while sending response")] + ConnectionClosedWhileSendingResponse, + #[error("already in use")] + AlreadyInUse, + #[error("{0}")] + Io(#[from] std::io::Error), + #[error("no response headers")] + NoResponseHeaders, + #[error("response already completed")] + ResponseAlreadyCompleted, + #[error("cannot upgrade because request body was used")] + UpgradeBodyUsed, + #[error(transparent)] + Other(deno_core::error::AnyError), +} + pub enum HttpSocketAddr { IpSocket(std::net::SocketAddr), #[cfg(unix)] @@ -216,7 +249,7 @@ impl HttpConnResource { String, String, )>, - AnyError, + HttpError, > { let fut = async { let (request_tx, request_rx) = oneshot::channel(); @@ -259,8 +292,8 @@ impl HttpConnResource { } /// A future that completes when this HTTP connection is closed or errors. - async fn closed(&self) -> Result<(), AnyError> { - self.closed_fut.clone().map_err(AnyError::from).await + async fn closed(&self) -> Result<(), HttpError> { + self.closed_fut.clone().map_err(HttpError::HyperV014).await } } @@ -280,14 +313,13 @@ pub fn http_create_conn_resource( io: S, addr: A, scheme: &'static str, -) -> Result +) -> ResourceId where S: AsyncRead + AsyncWrite + Unpin + Send + 'static, A: Into, { let conn = HttpConnResource::new(io, scheme, addr.into()); - let rid = state.resource_table.add(conn); - Ok(rid) + state.resource_table.add(conn) } /// An object that implements the `hyper::Service` trait, through which Hyper @@ -423,7 +455,9 @@ impl Resource for HttpStreamReadResource { // safely call `await` on it without creating a race condition. Some(_) => match body.as_mut().next().await.unwrap() { Ok(chunk) => assert!(chunk.is_empty()), - Err(err) => break Err(AnyError::from(err)), + Err(err) => { + break Err(HttpError::HyperV014(Arc::new(err)).into()) + } }, None => break Ok(BufView::empty()), } @@ -545,8 +579,12 @@ struct NextRequestResponse( async fn op_http_accept( state: Rc>, #[smi] rid: ResourceId, -) -> Result, AnyError> { - let conn = state.borrow().resource_table.get::(rid)?; +) -> Result, HttpError> { + let conn = state + .borrow() + .resource_table + .get::(rid) + .map_err(HttpError::Resource)?; match conn.accept().await { Ok(Some((read_stream, write_stream, method, url))) => { @@ -657,11 +695,12 @@ async fn op_http_write_headers( #[smi] status: u16, #[serde] headers: Vec<(ByteString, ByteString)>, #[serde] data: Option, -) -> Result<(), AnyError> { +) -> Result<(), HttpError> { let stream = state .borrow_mut() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(HttpError::Resource)?; // Track supported encoding let encoding = stream.accept_encoding; @@ -708,14 +747,14 @@ async fn op_http_write_headers( let mut old_wr = RcRef::map(&stream, |r| &r.wr).borrow_mut().await; let response_tx = match replace(&mut *old_wr, new_wr) { HttpResponseWriter::Headers(response_tx) => response_tx, - _ => return Err(http_error("response headers already sent")), + _ => return Err(HttpError::ResponseHeadersAlreadySent), }; match response_tx.send(body) { Ok(_) => Ok(()), Err(_) => { stream.conn.closed().await?; - Err(http_error("connection closed while sending response")) + Err(HttpError::ConnectionClosedWhileSendingResponse) } } } @@ -725,11 +764,14 @@ async fn op_http_write_headers( fn op_http_headers( state: &mut OpState, #[smi] rid: u32, -) -> Result, AnyError> { - let stream = state.resource_table.get::(rid)?; +) -> Result, HttpError> { + let stream = state + .resource_table + .get::(rid) + .map_err(HttpError::Resource)?; let rd = RcRef::map(&stream, |r| &r.rd) .try_borrow() - .ok_or_else(|| http_error("already in use"))?; + .ok_or(HttpError::AlreadyInUse)?; match &*rd { HttpRequestReader::Headers(request) => Ok(req_headers(request.headers())), HttpRequestReader::Body(headers, _) => Ok(req_headers(headers)), @@ -741,7 +783,7 @@ fn http_response( data: Option, compressing: bool, encoding: Encoding, -) -> Result<(HttpResponseWriter, hyper_v014::Body), AnyError> { +) -> Result<(HttpResponseWriter, hyper_v014::Body), HttpError> { // Gzip, after level 1, doesn't produce significant size difference. // This default matches nginx default gzip compression level (1): // https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level @@ -878,25 +920,34 @@ async fn op_http_write_resource( state: Rc>, #[smi] rid: ResourceId, #[smi] stream: ResourceId, -) -> Result<(), AnyError> { +) -> Result<(), HttpError> { let http_stream = state .borrow() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(HttpError::Resource)?; let mut wr = RcRef::map(&http_stream, |r| &r.wr).borrow_mut().await; - let resource = state.borrow().resource_table.get_any(stream)?; + let resource = state + .borrow() + .resource_table + .get_any(stream) + .map_err(HttpError::Resource)?; loop { match *wr { HttpResponseWriter::Headers(_) => { - return Err(http_error("no response headers")) + return Err(HttpError::NoResponseHeaders) } HttpResponseWriter::Closed => { - return Err(http_error("response already completed")) + return Err(HttpError::ResponseAlreadyCompleted) } _ => {} }; - let view = resource.clone().read(64 * 1024).await?; // 64KB + let view = resource + .clone() + .read(64 * 1024) + .await + .map_err(HttpError::Other)?; // 64KB if view.is_empty() { break; } @@ -937,16 +988,17 @@ async fn op_http_write( state: Rc>, #[smi] rid: ResourceId, #[buffer] buf: JsBuffer, -) -> Result<(), AnyError> { +) -> Result<(), HttpError> { let stream = state .borrow() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(HttpError::Resource)?; let mut wr = RcRef::map(&stream, |r| &r.wr).borrow_mut().await; match &mut *wr { - HttpResponseWriter::Headers(_) => Err(http_error("no response headers")), - HttpResponseWriter::Closed => Err(http_error("response already completed")), + HttpResponseWriter::Headers(_) => Err(HttpError::NoResponseHeaders), + HttpResponseWriter::Closed => Err(HttpError::ResponseAlreadyCompleted), HttpResponseWriter::Body { writer, .. } => { let mut result = writer.write_all(&buf).await; if result.is_ok() { @@ -961,7 +1013,7 @@ async fn op_http_write( stream.conn.closed().await?; // If there was no connection error, drop body_tx. *wr = HttpResponseWriter::Closed; - Err(http_error("response already completed")) + Err(HttpError::ResponseAlreadyCompleted) } } } @@ -975,7 +1027,7 @@ async fn op_http_write( stream.conn.closed().await?; // If there was no connection error, drop body_tx. *wr = HttpResponseWriter::Closed; - Err(http_error("response already completed")) + Err(HttpError::ResponseAlreadyCompleted) } } } @@ -989,11 +1041,12 @@ async fn op_http_write( async fn op_http_shutdown( state: Rc>, #[smi] rid: ResourceId, -) -> Result<(), AnyError> { +) -> Result<(), HttpError> { let stream = state .borrow() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(HttpError::Resource)?; let mut wr = RcRef::map(&stream, |r| &r.wr).borrow_mut().await; let wr = take(&mut *wr); match wr { @@ -1022,14 +1075,12 @@ async fn op_http_shutdown( #[op2] #[string] -fn op_http_websocket_accept_header( - #[string] key: String, -) -> Result { +fn op_http_websocket_accept_header(#[string] key: String) -> String { let digest = ring::digest::digest( &ring::digest::SHA1_FOR_LEGACY_USE_ONLY, format!("{key}258EAFA5-E914-47DA-95CA-C5AB0DC85B11").as_bytes(), ); - Ok(BASE64_STANDARD.encode(digest)) + BASE64_STANDARD.encode(digest) } #[op2(async)] @@ -1037,22 +1088,24 @@ fn op_http_websocket_accept_header( async fn op_http_upgrade_websocket( state: Rc>, #[smi] rid: ResourceId, -) -> Result { +) -> Result { let stream = state .borrow_mut() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(HttpError::Resource)?; let mut rd = RcRef::map(&stream, |r| &r.rd).borrow_mut().await; let request = match &mut *rd { HttpRequestReader::Headers(request) => request, - _ => { - return Err(http_error("cannot upgrade because request body was used")) - } + _ => return Err(HttpError::UpgradeBodyUsed), }; - let (transport, bytes) = - extract_network_stream(hyper_v014::upgrade::on(request).await?); + let (transport, bytes) = extract_network_stream( + hyper_v014::upgrade::on(request) + .await + .map_err(|err| HttpError::HyperV014(Arc::new(err)))?, + ); Ok(ws_create_server_stream( &mut state.borrow_mut(), transport, @@ -1084,10 +1137,6 @@ where } } -fn http_error(message: &'static str) -> AnyError { - custom_error("Http", message) -} - /// Filters out the ever-surprising 'shutdown ENOTCONN' errors. fn filter_enotconn( result: Result<(), hyper_v014::Error>, diff --git a/ext/http/request_body.rs b/ext/http/request_body.rs index 45df12457..f1c3f358e 100644 --- a/ext/http/request_body.rs +++ b/ext/http/request_body.rs @@ -1,9 +1,9 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use bytes::Bytes; -use deno_core::error::AnyError; use deno_core::futures::stream::Peekable; use deno_core::futures::Stream; use deno_core::futures::StreamExt; +use deno_core::futures::TryFutureExt; use deno_core::AsyncRefCell; use deno_core::AsyncResult; use deno_core::BufView; @@ -22,7 +22,7 @@ use std::task::Poll; struct ReadFuture(Incoming); impl Stream for ReadFuture { - type Item = Result; + type Item = Result; fn poll_next( self: Pin<&mut Self>, @@ -37,13 +37,13 @@ impl Stream for ReadFuture { if let Ok(data) = frame.into_data() { // Ensure that we never yield an empty frame if !data.is_empty() { - break Poll::Ready(Some(Ok::<_, AnyError>(data))); + break Poll::Ready(Some(Ok(data))); } } // Loop again so we don't lose the waker continue; } - Some(Err(e)) => Poll::Ready(Some(Err(e.into()))), + Some(Err(e)) => Poll::Ready(Some(Err(e))), None => Poll::Ready(None), }; } @@ -58,7 +58,7 @@ impl HttpRequestBody { Self(AsyncRefCell::new(ReadFuture(body).peekable()), size_hint) } - async fn read(self: Rc, limit: usize) -> Result { + async fn read(self: Rc, limit: usize) -> Result { let peekable = RcRef::map(self, |this| &this.0); let mut peekable = peekable.borrow_mut().await; match Pin::new(&mut *peekable).peek_mut().await { @@ -82,7 +82,7 @@ impl Resource for HttpRequestBody { } fn read(self: Rc, limit: usize) -> AsyncResult { - Box::pin(HttpRequestBody::read(self, limit)) + Box::pin(HttpRequestBody::read(self, limit).map_err(Into::into)) } fn size_hint(&self) -> (u64, Option) { diff --git a/ext/http/service.rs b/ext/http/service.rs index 787e9babf..75f93d77c 100644 --- a/ext/http/service.rs +++ b/ext/http/service.rs @@ -2,7 +2,6 @@ use crate::request_properties::HttpConnectionProperties; use crate::response_body::ResponseBytesInner; use crate::response_body::ResponseStreamResult; -use deno_core::error::AnyError; use deno_core::futures::ready; use deno_core::BufView; use deno_core::OpState; @@ -206,6 +205,10 @@ pub(crate) async fn handle_request( Ok(response) } +#[derive(Debug, thiserror::Error)] +#[error("upgrade unavailable")] +pub struct UpgradeUnavailableError; + struct HttpRecordInner { server_state: SignallingRc, request_info: HttpConnectionProperties, @@ -344,14 +347,14 @@ impl HttpRecord { } /// Perform the Hyper upgrade on this record. - pub fn upgrade(&self) -> Result { + pub fn upgrade(&self) -> Result { // Manually perform the upgrade. We're peeking into hyper's underlying machinery here a bit self .self_mut() .request_parts .extensions .remove::() - .ok_or_else(|| AnyError::msg("upgrade unavailable")) + .ok_or(UpgradeUnavailableError) } /// Take the Hyper body from this record. @@ -515,7 +518,7 @@ pub struct HttpRecordResponse(ManuallyDrop>); impl Body for HttpRecordResponse { type Data = BufView; - type Error = AnyError; + type Error = deno_core::error::AnyError; fn poll_frame( self: Pin<&mut Self>, @@ -640,7 +643,7 @@ mod tests { } #[tokio::test] - async fn test_handle_request() -> Result<(), AnyError> { + async fn test_handle_request() -> Result<(), deno_core::error::AnyError> { let (tx, mut rx) = tokio::sync::mpsc::channel(10); let server_state = HttpServerState::new(); let server_state_check = server_state.clone(); diff --git a/ext/http/websocket_upgrade.rs b/ext/http/websocket_upgrade.rs index 4dead767a..af9504717 100644 --- a/ext/http/websocket_upgrade.rs +++ b/ext/http/websocket_upgrade.rs @@ -4,7 +4,6 @@ use std::marker::PhantomData; use bytes::Bytes; use bytes::BytesMut; -use deno_core::error::AnyError; use httparse::Status; use hyper::header::HeaderName; use hyper::header::HeaderValue; @@ -13,12 +12,30 @@ use memmem::Searcher; use memmem::TwoWaySearcher; use once_cell::sync::OnceCell; -use crate::http_error; +#[derive(Debug, thiserror::Error)] +pub enum WebSocketUpgradeError { + #[error("invalid headers")] + InvalidHeaders, + #[error("{0}")] + HttpParse(#[from] httparse::Error), + #[error("{0}")] + Http(#[from] http::Error), + #[error("{0}")] + Utf8(#[from] std::str::Utf8Error), + #[error("{0}")] + InvalidHeaderName(#[from] http::header::InvalidHeaderName), + #[error("{0}")] + InvalidHeaderValue(#[from] http::header::InvalidHeaderValue), + #[error("invalid HTTP status line")] + InvalidHttpStatusLine, + #[error("attempted to write to completed upgrade buffer")] + UpgradeBufferAlreadyCompleted, +} /// Given a buffer that ends in `\n\n` or `\r\n\r\n`, returns a parsed [`Request`]. fn parse_response( header_bytes: &[u8], -) -> Result<(usize, Response), AnyError> { +) -> Result<(usize, Response), WebSocketUpgradeError> { let mut headers = [httparse::EMPTY_HEADER; 16]; let status = httparse::parse_headers(header_bytes, &mut headers)?; match status { @@ -32,7 +49,7 @@ fn parse_response( } Ok((index, resp)) } - _ => Err(http_error("invalid headers")), + _ => Err(WebSocketUpgradeError::InvalidHeaders), } } @@ -69,11 +86,14 @@ pub struct WebSocketUpgrade { impl WebSocketUpgrade { /// Ensures that the status line starts with "HTTP/1.1 101 " which matches all of the node.js /// WebSocket libraries that are known. We don't care about the trailing status text. - fn validate_status(&self, status: &[u8]) -> Result<(), AnyError> { + fn validate_status( + &self, + status: &[u8], + ) -> Result<(), WebSocketUpgradeError> { if status.starts_with(b"HTTP/1.1 101 ") { Ok(()) } else { - Err(http_error("invalid HTTP status line")) + Err(WebSocketUpgradeError::InvalidHttpStatusLine) } } @@ -82,7 +102,7 @@ impl WebSocketUpgrade { pub fn write( &mut self, bytes: &[u8], - ) -> Result, Bytes)>, AnyError> { + ) -> Result, Bytes)>, WebSocketUpgradeError> { use WebSocketUpgradeState::*; match self.state { @@ -142,9 +162,7 @@ impl WebSocketUpgrade { Ok(None) } } - Complete => { - Err(http_error("attempted to write to completed upgrade buffer")) - } + Complete => Err(WebSocketUpgradeError::UpgradeBufferAlreadyCompleted), } } } @@ -157,8 +175,8 @@ mod tests { type ExpectedResponseAndHead = Option<(Response, &'static [u8])>; fn assert_response( - result: Result, Bytes)>, AnyError>, - expected: Result, + result: Result, Bytes)>, WebSocketUpgradeError>, + expected: Result, chunk_info: Option<(usize, usize)>, ) { let formatted = format!("{result:?}"); @@ -189,8 +207,8 @@ mod tests { "Expected Ok(None), was {formatted}", ), Err(e) => assert_eq!( - e, - result.err().map(|e| format!("{e:?}")).unwrap_or_default(), + format!("{e:?}"), + format!("{:?}", result.unwrap_err()), "Expected error, was {formatted}", ), } @@ -198,7 +216,7 @@ mod tests { fn validate_upgrade_all_at_once( s: &str, - expected: Result, + expected: Result, ) { let mut upgrade = WebSocketUpgrade::default(); let res = upgrade.write(s.as_bytes()); @@ -209,7 +227,7 @@ mod tests { fn validate_upgrade_chunks( s: &str, size: usize, - expected: Result, + expected: Result, ) { let chunk_info = Some((s.as_bytes().len(), size)); let mut upgrade = WebSocketUpgrade::default(); @@ -226,7 +244,7 @@ mod tests { fn validate_upgrade( s: &str, - expected: fn() -> Result, + expected: fn() -> Result, ) { validate_upgrade_all_at_once(s, expected()); validate_upgrade_chunks(s, 1, expected()); @@ -315,7 +333,7 @@ mod tests { #[test] fn upgrade_invalid_status() { validate_upgrade("HTTP/1.1 200 OK\nConnection: Upgrade\n\n", || { - Err("invalid HTTP status line") + Err(WebSocketUpgradeError::InvalidHttpStatusLine) }); } @@ -327,7 +345,11 @@ mod tests { .join("\n"); validate_upgrade( &format!("HTTP/1.1 101 Switching Protocols\n{headers}\n\n"), - || Err("too many headers"), + || { + Err(WebSocketUpgradeError::HttpParse( + httparse::Error::TooManyHeaders, + )) + }, ); } } -- cgit v1.2.3 From 6c4ef11f04cc35b61417bf08d5e7592d44197c75 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Fri, 18 Oct 2024 18:20:58 -0700 Subject: refactor(ext/fetch): use concrete error types (#26220) --- ext/fetch/Cargo.toml | 1 + ext/fetch/fs_fetch_handler.rs | 5 +- ext/fetch/lib.rs | 244 ++++++++++++++++++++++++++---------------- ext/node/ops/http.rs | 95 +++++++++------- 4 files changed, 211 insertions(+), 134 deletions(-) (limited to 'ext') diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml index ddb58a3f3..afffd3ffb 100644 --- a/ext/fetch/Cargo.toml +++ b/ext/fetch/Cargo.toml @@ -32,6 +32,7 @@ percent-encoding.workspace = true rustls-webpki.workspace = true serde.workspace = true serde_json.workspace = true +thiserror.workspace = true tokio.workspace = true tokio-rustls.workspace = true tokio-socks.workspace = true diff --git a/ext/fetch/fs_fetch_handler.rs b/ext/fetch/fs_fetch_handler.rs index 4c2b81f35..c236dd9c6 100644 --- a/ext/fetch/fs_fetch_handler.rs +++ b/ext/fetch/fs_fetch_handler.rs @@ -4,7 +4,6 @@ use crate::CancelHandle; use crate::CancelableResponseFuture; use crate::FetchHandler; -use deno_core::error::type_error; use deno_core::futures::FutureExt; use deno_core::futures::TryFutureExt; use deno_core::futures::TryStreamExt; @@ -42,9 +41,7 @@ impl FetchHandler for FsFetchHandler { .map_err(|_| ())?; Ok::<_, ()>(response) } - .map_err(move |_| { - type_error("NetworkError when attempting to fetch resource") - }) + .map_err(move |_| super::FetchError::NetworkError) .or_cancel(&cancel_handle) .boxed_local(); diff --git a/ext/fetch/lib.rs b/ext/fetch/lib.rs index 88f303852..4df8dc3d7 100644 --- a/ext/fetch/lib.rs +++ b/ext/fetch/lib.rs @@ -17,10 +17,6 @@ use std::sync::Arc; use std::task::Context; use std::task::Poll; -use deno_core::anyhow::anyhow; -use deno_core::anyhow::Error; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::futures::stream::Peekable; use deno_core::futures::Future; use deno_core::futures::FutureExt; @@ -28,6 +24,7 @@ use deno_core::futures::Stream; use deno_core::futures::StreamExt; use deno_core::futures::TryFutureExt; use deno_core::op2; +use deno_core::url; use deno_core::url::Url; use deno_core::AsyncRefCell; use deno_core::AsyncResult; @@ -87,15 +84,18 @@ pub struct Options { pub root_cert_store_provider: Option>, pub proxy: Option, #[allow(clippy::type_complexity)] - pub request_builder_hook: - Option) -> Result<(), AnyError>>, + pub request_builder_hook: Option< + fn(&mut http::Request) -> Result<(), deno_core::error::AnyError>, + >, pub unsafely_ignore_certificate_errors: Option>, pub client_cert_chain_and_key: TlsKeys, pub file_fetch_handler: Rc, } impl Options { - pub fn root_cert_store(&self) -> Result, AnyError> { + pub fn root_cert_store( + &self, + ) -> Result, deno_core::error::AnyError> { Ok(match &self.root_cert_store_provider { Some(provider) => Some(provider.get_or_try_init()?.clone()), None => None, @@ -144,6 +144,51 @@ deno_core::extension!(deno_fetch, }, ); +#[derive(Debug, thiserror::Error)] +pub enum FetchError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error(transparent)] + Permission(deno_core::error::AnyError), + #[error("NetworkError when attempting to fetch resource")] + NetworkError, + #[error("Fetching files only supports the GET method: received {0}")] + FsNotGet(Method), + #[error("Invalid URL {0}")] + InvalidUrl(Url), + #[error(transparent)] + InvalidHeaderName(#[from] http::header::InvalidHeaderName), + #[error(transparent)] + InvalidHeaderValue(#[from] http::header::InvalidHeaderValue), + #[error("{0:?}")] + DataUrl(data_url::DataUrlError), + #[error("{0:?}")] + Base64(data_url::forgiving_base64::InvalidBase64), + #[error("Blob for the given URL not found.")] + BlobNotFound, + #[error("Url scheme '{0}' not supported")] + SchemeNotSupported(String), + #[error("Request was cancelled")] + RequestCanceled, + #[error(transparent)] + Http(#[from] http::Error), + #[error(transparent)] + ClientCreate(#[from] HttpClientCreateError), + #[error(transparent)] + Url(#[from] url::ParseError), + #[error(transparent)] + Method(#[from] http::method::InvalidMethod), + #[error(transparent)] + ClientSend(#[from] ClientSendError), + #[error(transparent)] + RequestBuilderHook(deno_core::error::AnyError), + #[error(transparent)] + Io(#[from] std::io::Error), + // Only used for node upgrade + #[error(transparent)] + Hyper(#[from] hyper::Error), +} + pub type CancelableResponseFuture = Pin>>; @@ -170,11 +215,7 @@ impl FetchHandler for DefaultFileFetchHandler { _state: &mut OpState, _url: &Url, ) -> (CancelableResponseFuture, Option>) { - let fut = async move { - Ok(Err(type_error( - "NetworkError when attempting to fetch resource", - ))) - }; + let fut = async move { Ok(Err(FetchError::NetworkError)) }; (Box::pin(fut), None) } } @@ -191,7 +232,7 @@ pub struct FetchReturn { pub fn get_or_create_client_from_state( state: &mut OpState, -) -> Result { +) -> Result { if let Some(client) = state.try_borrow::() { Ok(client.clone()) } else { @@ -204,11 +245,13 @@ pub fn get_or_create_client_from_state( pub fn create_client_from_options( options: &Options, -) -> Result { +) -> Result { create_http_client( &options.user_agent, CreateHttpClientOptions { - root_cert_store: options.root_cert_store()?, + root_cert_store: options + .root_cert_store() + .map_err(HttpClientCreateError::RootCertStore)?, ca_certs: vec![], proxy: options.proxy.clone(), unsafely_ignore_certificate_errors: options @@ -230,7 +273,9 @@ pub fn create_client_from_options( #[allow(clippy::type_complexity)] pub struct ResourceToBodyAdapter( Rc, - Option>>>>, + Option< + Pin>>>, + >, ); impl ResourceToBodyAdapter { @@ -246,7 +291,7 @@ unsafe impl Send for ResourceToBodyAdapter {} unsafe impl Sync for ResourceToBodyAdapter {} impl Stream for ResourceToBodyAdapter { - type Item = Result; + type Item = Result; fn poll_next( self: Pin<&mut Self>, @@ -276,7 +321,7 @@ impl Stream for ResourceToBodyAdapter { impl hyper::body::Body for ResourceToBodyAdapter { type Data = Bytes; - type Error = Error; + type Error = deno_core::error::AnyError; fn poll_frame( self: Pin<&mut Self>, @@ -301,13 +346,13 @@ pub trait FetchPermissions { &mut self, url: &Url, api_name: &str, - ) -> Result<(), AnyError>; + ) -> Result<(), deno_core::error::AnyError>; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_read<'a>( &mut self, p: &'a Path, api_name: &str, - ) -> Result, AnyError>; + ) -> Result, deno_core::error::AnyError>; } impl FetchPermissions for deno_permissions::PermissionsContainer { @@ -316,7 +361,7 @@ impl FetchPermissions for deno_permissions::PermissionsContainer { &mut self, url: &Url, api_name: &str, - ) -> Result<(), AnyError> { + ) -> Result<(), deno_core::error::AnyError> { deno_permissions::PermissionsContainer::check_net_url(self, url, api_name) } @@ -325,7 +370,7 @@ impl FetchPermissions for deno_permissions::PermissionsContainer { &mut self, path: &'a Path, api_name: &str, - ) -> Result, AnyError> { + ) -> Result, deno_core::error::AnyError> { deno_permissions::PermissionsContainer::check_read_path( self, path, @@ -346,12 +391,15 @@ pub fn op_fetch( has_body: bool, #[buffer] data: Option, #[smi] resource: Option, -) -> Result +) -> Result where FP: FetchPermissions + 'static, { let (client, allow_host) = if let Some(rid) = client_rid { - let r = state.resource_table.get::(rid)?; + let r = state + .resource_table + .get::(rid) + .map_err(FetchError::Resource)?; (r.client.clone(), r.allow_host) } else { (get_or_create_client_from_state(state)?, false) @@ -364,20 +412,18 @@ where let scheme = url.scheme(); let (request_rid, cancel_handle_rid) = match scheme { "file" => { - let path = url.to_file_path().map_err(|_| { - type_error("NetworkError when attempting to fetch resource") - })?; + let path = url.to_file_path().map_err(|_| FetchError::NetworkError)?; let permissions = state.borrow_mut::(); - let path = permissions.check_read(&path, "fetch()")?; + let path = permissions + .check_read(&path, "fetch()") + .map_err(FetchError::Permission)?; let url = match path { Cow::Owned(path) => Url::from_file_path(path).unwrap(), Cow::Borrowed(_) => url, }; if method != Method::GET { - return Err(type_error(format!( - "Fetching files only supports the GET method: received {method}" - ))); + return Err(FetchError::FsNotGet(method)); } let Options { @@ -396,13 +442,15 @@ where } "http" | "https" => { let permissions = state.borrow_mut::(); - permissions.check_net_url(&url, "fetch()")?; + permissions + .check_net_url(&url, "fetch()") + .map_err(FetchError::Resource)?; let maybe_authority = extract_authority(&mut url); let uri = url .as_str() .parse::() - .map_err(|_| type_error(format!("Invalid URL {url}")))?; + .map_err(|_| FetchError::InvalidUrl(url.clone()))?; let mut con_len = None; let body = if has_body { @@ -416,7 +464,10 @@ where .boxed() } (_, Some(resource)) => { - let resource = state.resource_table.take_any(resource)?; + let resource = state + .resource_table + .take_any(resource) + .map_err(FetchError::Resource)?; match resource.size_hint() { (body_size, Some(n)) if body_size == n && body_size > 0 => { con_len = Some(body_size); @@ -453,10 +504,8 @@ where } for (key, value) in headers { - let name = HeaderName::from_bytes(&key) - .map_err(|err| type_error(err.to_string()))?; - let v = HeaderValue::from_bytes(&value) - .map_err(|err| type_error(err.to_string()))?; + let name = HeaderName::from_bytes(&key)?; + let v = HeaderValue::from_bytes(&value)?; if (name != HOST || allow_host) && name != CONTENT_LENGTH { request.headers_mut().append(name, v); @@ -474,20 +523,18 @@ where let options = state.borrow::(); if let Some(request_builder_hook) = options.request_builder_hook { request_builder_hook(&mut request) - .map_err(|err| type_error(err.to_string()))?; + .map_err(FetchError::RequestBuilderHook)?; } let cancel_handle = CancelHandle::new_rc(); let cancel_handle_ = cancel_handle.clone(); - let fut = { - async move { - client - .send(request) - .map_err(Into::into) - .or_cancel(cancel_handle_) - .await - } + let fut = async move { + client + .send(request) + .map_err(Into::into) + .or_cancel(cancel_handle_) + .await }; let request_rid = state.resource_table.add(FetchRequestResource { @@ -501,12 +548,10 @@ where (request_rid, Some(cancel_handle_rid)) } "data" => { - let data_url = DataUrl::process(url.as_str()) - .map_err(|e| type_error(format!("{e:?}")))?; + let data_url = + DataUrl::process(url.as_str()).map_err(FetchError::DataUrl)?; - let (body, _) = data_url - .decode_to_vec() - .map_err(|e| type_error(format!("{e:?}")))?; + let (body, _) = data_url.decode_to_vec().map_err(FetchError::Base64)?; let body = http_body_util::Full::new(body.into()) .map_err(|never| match never {}) .boxed(); @@ -528,11 +573,9 @@ where "blob" => { // Blob URL resolution happens in the JS side of fetch. If we got here is // because the URL isn't an object URL. - return Err(type_error("Blob for the given URL not found.")); - } - _ => { - return Err(type_error(format!("Url scheme '{scheme}' not supported"))) + return Err(FetchError::BlobNotFound); } + _ => return Err(FetchError::SchemeNotSupported(scheme.to_string())), }; Ok(FetchReturn { @@ -564,11 +607,12 @@ pub struct FetchResponse { pub async fn op_fetch_send( state: Rc>, #[smi] rid: ResourceId, -) -> Result { +) -> Result { let request = state .borrow_mut() .resource_table - .take::(rid)?; + .take::(rid) + .map_err(FetchError::Resource)?; let request = Rc::try_unwrap(request) .ok() @@ -581,22 +625,23 @@ pub async fn op_fetch_send( // If any error in the chain is a hyper body error, return that as a special result we can use to // reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`). // TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead - let mut err_ref: &dyn std::error::Error = err.as_ref(); - while let Some(err_src) = std::error::Error::source(err_ref) { - if let Some(err_src) = err_src.downcast_ref::() { - if let Some(err_src) = std::error::Error::source(err_src) { - return Ok(FetchResponse { - error: Some((err.to_string(), err_src.to_string())), - ..Default::default() - }); + + if let FetchError::ClientSend(err_src) = &err { + if let Some(client_err) = std::error::Error::source(&err_src.source) { + if let Some(err_src) = client_err.downcast_ref::() { + if let Some(err_src) = std::error::Error::source(err_src) { + return Ok(FetchResponse { + error: Some((err.to_string(), err_src.to_string())), + ..Default::default() + }); + } } } - err_ref = err_src; } - return Err(type_error(err.to_string())); + return Err(err); } - Err(_) => return Err(type_error("Request was cancelled")), + Err(_) => return Err(FetchError::RequestCanceled), }; let status = res.status(); @@ -636,7 +681,7 @@ pub async fn op_fetch_send( } type CancelableResponseResult = - Result, AnyError>, Canceled>; + Result, FetchError>, Canceled>; pub struct FetchRequestResource { pub future: Pin>>, @@ -691,7 +736,7 @@ impl FetchResponseResource { } } - pub async fn upgrade(self) -> Result { + pub async fn upgrade(self) -> Result { let reader = self.response_reader.into_inner(); match reader { FetchResponseReader::Start(resp) => Ok(hyper::upgrade::on(resp).await?), @@ -746,7 +791,9 @@ impl Resource for FetchResponseResource { // safely call `await` on it without creating a race condition. Some(_) => match reader.as_mut().next().await.unwrap() { Ok(chunk) => assert!(chunk.is_empty()), - Err(err) => break Err(type_error(err.to_string())), + Err(err) => { + break Err(deno_core::error::type_error(err.to_string())) + } }, None => break Ok(BufView::empty()), } @@ -809,14 +856,16 @@ pub fn op_fetch_custom_client( state: &mut OpState, #[serde] args: CreateHttpClientArgs, #[cppgc] tls_keys: &TlsKeysHolder, -) -> Result +) -> Result where FP: FetchPermissions + 'static, { if let Some(proxy) = args.proxy.clone() { let permissions = state.borrow_mut::(); let url = Url::parse(&proxy.url)?; - permissions.check_net_url(&url, "Deno.createHttpClient()")?; + permissions + .check_net_url(&url, "Deno.createHttpClient()") + .map_err(FetchError::Permission)?; } let options = state.borrow::(); @@ -829,7 +878,9 @@ where let client = create_http_client( &options.user_agent, CreateHttpClientOptions { - root_cert_store: options.root_cert_store()?, + root_cert_store: options + .root_cert_store() + .map_err(HttpClientCreateError::RootCertStore)?, ca_certs, proxy: args.proxy, unsafely_ignore_certificate_errors: options @@ -887,19 +938,34 @@ impl Default for CreateHttpClientOptions { } } +#[derive(Debug, thiserror::Error)] +pub enum HttpClientCreateError { + #[error(transparent)] + Tls(deno_tls::TlsError), + #[error("Illegal characters in User-Agent: received {0}")] + InvalidUserAgent(String), + #[error("invalid proxy url")] + InvalidProxyUrl, + #[error("Cannot create Http Client: either `http1` or `http2` needs to be set to true")] + HttpVersionSelectionInvalid, + #[error(transparent)] + RootCertStore(deno_core::error::AnyError), +} + /// Create new instance of async Client. This client supports /// proxies and doesn't follow redirects. pub fn create_http_client( user_agent: &str, options: CreateHttpClientOptions, -) -> Result { +) -> Result { let mut tls_config = deno_tls::create_client_config( options.root_cert_store, options.ca_certs, options.unsafely_ignore_certificate_errors, options.client_cert_chain_and_key.into(), deno_tls::SocketUse::Http, - )?; + ) + .map_err(HttpClientCreateError::Tls)?; // Proxy TLS should not send ALPN tls_config.alpn_protocols.clear(); @@ -919,9 +985,7 @@ pub fn create_http_client( http_connector.enforce_http(false); let user_agent = user_agent.parse::().map_err(|_| { - type_error(format!( - "Illegal characters in User-Agent: received {user_agent}" - )) + HttpClientCreateError::InvalidUserAgent(user_agent.to_string()) })?; let mut builder = @@ -932,7 +996,7 @@ pub fn create_http_client( let mut proxies = proxy::from_env(); if let Some(proxy) = options.proxy { let mut intercept = proxy::Intercept::all(&proxy.url) - .ok_or_else(|| type_error("invalid proxy url"))?; + .ok_or_else(|| HttpClientCreateError::InvalidProxyUrl)?; if let Some(basic_auth) = &proxy.basic_auth { intercept.set_auth(&basic_auth.username, &basic_auth.password); } @@ -964,7 +1028,7 @@ pub fn create_http_client( } (true, true) => {} (false, false) => { - return Err(type_error("Cannot create Http Client: either `http1` or `http2` needs to be set to true")) + return Err(HttpClientCreateError::HttpVersionSelectionInvalid) } } @@ -980,10 +1044,8 @@ pub fn create_http_client( #[op2] #[serde] -pub fn op_utf8_to_byte_string( - #[string] input: String, -) -> Result { - Ok(input.into()) +pub fn op_utf8_to_byte_string(#[string] input: String) -> ByteString { + input.into() } #[derive(Clone, Debug)] @@ -1003,7 +1065,7 @@ const STAR_STAR: HeaderValue = HeaderValue::from_static("*/*"); #[derive(Debug)] pub struct ClientSendError { uri: Uri, - source: hyper_util::client::legacy::Error, + pub source: hyper_util::client::legacy::Error, } impl ClientSendError { @@ -1075,12 +1137,14 @@ impl Client { .oneshot(req) .await .map_err(|e| ClientSendError { uri, source: e })?; - Ok(resp.map(|b| b.map_err(|e| anyhow!(e)).boxed())) + Ok(resp.map(|b| b.map_err(|e| deno_core::anyhow::anyhow!(e)).boxed())) } } -pub type ReqBody = http_body_util::combinators::BoxBody; -pub type ResBody = http_body_util::combinators::BoxBody; +pub type ReqBody = + http_body_util::combinators::BoxBody; +pub type ResBody = + http_body_util::combinators::BoxBody; /// Copied from https://github.com/seanmonstar/reqwest/blob/b9d62a0323d96f11672a61a17bf8849baec00275/src/async_impl/request.rs#L572 /// Check the request URL for a "username:password" type authority, and if diff --git a/ext/node/ops/http.rs b/ext/node/ops/http.rs index 773902ded..730e1e482 100644 --- a/ext/node/ops/http.rs +++ b/ext/node/ops/http.rs @@ -8,14 +8,12 @@ use std::task::Context; use std::task::Poll; use bytes::Bytes; -use deno_core::anyhow; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::futures::stream::Peekable; use deno_core::futures::Future; use deno_core::futures::FutureExt; use deno_core::futures::Stream; use deno_core::futures::StreamExt; +use deno_core::futures::TryFutureExt; use deno_core::op2; use deno_core::serde::Serialize; use deno_core::unsync::spawn; @@ -33,6 +31,7 @@ use deno_core::Resource; use deno_core::ResourceId; use deno_fetch::get_or_create_client_from_state; use deno_fetch::FetchCancelHandle; +use deno_fetch::FetchError; use deno_fetch::FetchRequestResource; use deno_fetch::FetchReturn; use deno_fetch::HttpClientResource; @@ -59,12 +58,15 @@ pub fn op_node_http_request

( #[serde] headers: Vec<(ByteString, ByteString)>, #[smi] client_rid: Option, #[smi] body: Option, -) -> Result +) -> Result where P: crate::NodePermissions + 'static, { let client = if let Some(rid) = client_rid { - let r = state.resource_table.get::(rid)?; + let r = state + .resource_table + .get::(rid) + .map_err(FetchError::Resource)?; r.client.clone() } else { get_or_create_client_from_state(state)? @@ -76,15 +78,15 @@ where { let permissions = state.borrow_mut::

(); - permissions.check_net_url(&url, "ClientRequest")?; + permissions + .check_net_url(&url, "ClientRequest") + .map_err(FetchError::Permission)?; } let mut header_map = HeaderMap::new(); for (key, value) in headers { - let name = HeaderName::from_bytes(&key) - .map_err(|err| type_error(err.to_string()))?; - let v = HeaderValue::from_bytes(&value) - .map_err(|err| type_error(err.to_string()))?; + let name = HeaderName::from_bytes(&key)?; + let v = HeaderValue::from_bytes(&value)?; header_map.append(name, v); } @@ -92,7 +94,10 @@ where let (body, con_len) = if let Some(body) = body { ( BodyExt::boxed(NodeHttpResourceToBodyAdapter::new( - state.resource_table.take_any(body)?, + state + .resource_table + .take_any(body) + .map_err(FetchError::Resource)?, )), None, ) @@ -117,7 +122,7 @@ where *request.uri_mut() = url .as_str() .parse() - .map_err(|_| type_error("Invalid URL"))?; + .map_err(|_| FetchError::InvalidUrl(url.clone()))?; *request.headers_mut() = header_map; if let Some((username, password)) = maybe_authority { @@ -136,9 +141,9 @@ where let fut = async move { client .send(request) + .map_err(Into::into) .or_cancel(cancel_handle_) .await - .map(|res| res.map_err(|err| type_error(err.to_string()))) }; let request_rid = state.resource_table.add(FetchRequestResource { @@ -174,11 +179,12 @@ pub struct NodeHttpFetchResponse { pub async fn op_node_http_fetch_send( state: Rc>, #[smi] rid: ResourceId, -) -> Result { +) -> Result { let request = state .borrow_mut() .resource_table - .take::(rid)?; + .take::(rid) + .map_err(FetchError::Resource)?; let request = Rc::try_unwrap(request) .ok() @@ -191,22 +197,23 @@ pub async fn op_node_http_fetch_send( // If any error in the chain is a hyper body error, return that as a special result we can use to // reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`). // TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead - let mut err_ref: &dyn std::error::Error = err.as_ref(); - while let Some(err) = std::error::Error::source(err_ref) { - if let Some(err) = err.downcast_ref::() { - if let Some(err) = std::error::Error::source(err) { - return Ok(NodeHttpFetchResponse { - error: Some(err.to_string()), - ..Default::default() - }); + + if let FetchError::ClientSend(err_src) = &err { + if let Some(client_err) = std::error::Error::source(&err_src.source) { + if let Some(err_src) = client_err.downcast_ref::() { + if let Some(err_src) = std::error::Error::source(err_src) { + return Ok(NodeHttpFetchResponse { + error: Some(err_src.to_string()), + ..Default::default() + }); + } } } - err_ref = err; } - return Err(type_error(err.to_string())); + return Err(err); } - Err(_) => return Err(type_error("request was cancelled")), + Err(_) => return Err(FetchError::RequestCanceled), }; let status = res.status(); @@ -250,11 +257,12 @@ pub async fn op_node_http_fetch_send( pub async fn op_node_http_fetch_response_upgrade( state: Rc>, #[smi] rid: ResourceId, -) -> Result { +) -> Result { let raw_response = state .borrow_mut() .resource_table - .take::(rid)?; + .take::(rid) + .map_err(FetchError::Resource)?; let raw_response = Rc::try_unwrap(raw_response) .expect("Someone is holding onto NodeHttpFetchResponseResource"); @@ -277,7 +285,7 @@ pub async fn op_node_http_fetch_response_upgrade( } read_tx.write_all(&buf[..read]).await?; } - Ok::<_, AnyError>(()) + Ok::<_, FetchError>(()) }); spawn(async move { let mut buf = [0; 1024]; @@ -288,7 +296,7 @@ pub async fn op_node_http_fetch_response_upgrade( } upgraded_tx.write_all(&buf[..read]).await?; } - Ok::<_, AnyError>(()) + Ok::<_, FetchError>(()) }); } @@ -318,23 +326,26 @@ impl UpgradeStream { } } - async fn read(self: Rc, buf: &mut [u8]) -> Result { + async fn read( + self: Rc, + buf: &mut [u8], + ) -> Result { let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle); async { let read = RcRef::map(self, |this| &this.read); let mut read = read.borrow_mut().await; - Ok(Pin::new(&mut *read).read(buf).await?) + Pin::new(&mut *read).read(buf).await } .try_or_cancel(cancel_handle) .await } - async fn write(self: Rc, buf: &[u8]) -> Result { + async fn write(self: Rc, buf: &[u8]) -> Result { let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle); async { let write = RcRef::map(self, |this| &this.write); let mut write = write.borrow_mut().await; - Ok(Pin::new(&mut *write).write(buf).await?) + Pin::new(&mut *write).write(buf).await } .try_or_cancel(cancel_handle) .await @@ -387,7 +398,7 @@ impl NodeHttpFetchResponseResource { } } - pub async fn upgrade(self) -> Result { + pub async fn upgrade(self) -> Result { let reader = self.response_reader.into_inner(); match reader { NodeHttpFetchResponseReader::Start(resp) => { @@ -445,7 +456,9 @@ impl Resource for NodeHttpFetchResponseResource { // safely call `await` on it without creating a race condition. Some(_) => match reader.as_mut().next().await.unwrap() { Ok(chunk) => assert!(chunk.is_empty()), - Err(err) => break Err(type_error(err.to_string())), + Err(err) => { + break Err(deno_core::error::type_error(err.to_string())) + } }, None => break Ok(BufView::empty()), } @@ -453,7 +466,7 @@ impl Resource for NodeHttpFetchResponseResource { }; let cancel_handle = RcRef::map(self, |r| &r.cancel); - fut.try_or_cancel(cancel_handle).await + fut.try_or_cancel(cancel_handle).await.map_err(Into::into) }) } @@ -469,7 +482,9 @@ impl Resource for NodeHttpFetchResponseResource { #[allow(clippy::type_complexity)] pub struct NodeHttpResourceToBodyAdapter( Rc, - Option>>>>, + Option< + Pin>>>, + >, ); impl NodeHttpResourceToBodyAdapter { @@ -485,7 +500,7 @@ unsafe impl Send for NodeHttpResourceToBodyAdapter {} unsafe impl Sync for NodeHttpResourceToBodyAdapter {} impl Stream for NodeHttpResourceToBodyAdapter { - type Item = Result; + type Item = Result; fn poll_next( self: Pin<&mut Self>, @@ -515,7 +530,7 @@ impl Stream for NodeHttpResourceToBodyAdapter { impl hyper::body::Body for NodeHttpResourceToBodyAdapter { type Data = Bytes; - type Error = anyhow::Error; + type Error = deno_core::anyhow::Error; fn poll_frame( self: Pin<&mut Self>, -- cgit v1.2.3 From 615e6b7cc2e89a2a5cc3ab614eecde5aa7de0596 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Fri, 18 Oct 2024 18:53:04 -0700 Subject: refactor(ext/webgpu): use concrete error type (#26198) --- ext/webgpu/Cargo.toml | 1 + ext/webgpu/buffer.rs | 54 +++++++++++++++++++----------- ext/webgpu/bundle.rs | 67 +++++++++++++++++++------------------ ext/webgpu/byow.rs | 84 ++++++++++++++++++++++++++++++++++------------- ext/webgpu/error.rs | 28 ---------------- ext/webgpu/lib.rs | 59 ++++++++++++++++++++++----------- ext/webgpu/render_pass.rs | 72 +++++++++++++++++++++------------------- ext/webgpu/sampler.rs | 3 +- ext/webgpu/shader.rs | 3 +- ext/webgpu/surface.rs | 43 ++++++++++++++++-------- ext/webgpu/texture.rs | 5 ++- 11 files changed, 243 insertions(+), 176 deletions(-) (limited to 'ext') diff --git a/ext/webgpu/Cargo.toml b/ext/webgpu/Cargo.toml index 8f22ca764..7354919d4 100644 --- a/ext/webgpu/Cargo.toml +++ b/ext/webgpu/Cargo.toml @@ -25,6 +25,7 @@ serde = { workspace = true, features = ["derive"] } tokio = { workspace = true, features = ["full"] } wgpu-types = { workspace = true, features = ["serde"] } raw-window-handle = { workspace = true } +thiserror.workspace = true [target.'cfg(not(target_arch = "wasm32"))'.dependencies.wgpu-core] workspace = true diff --git a/ext/webgpu/buffer.rs b/ext/webgpu/buffer.rs index c6cd6f0a7..c2b53890e 100644 --- a/ext/webgpu/buffer.rs +++ b/ext/webgpu/buffer.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_core::Resource; @@ -13,9 +11,18 @@ use std::sync::Arc; use std::sync::Mutex; use std::time::Duration; -use super::error::DomExceptionOperationError; use super::error::WebGpuResult; +#[derive(Debug, thiserror::Error)] +pub enum BufferError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error("usage is not valid")] + InvalidUsage, + #[error(transparent)] + Access(wgpu_core::resource::BufferAccessError), +} + pub(crate) struct WebGpuBuffer( pub(crate) super::Instance, pub(crate) wgpu_core::id::BufferId, @@ -46,18 +53,19 @@ pub fn op_webgpu_create_buffer( #[number] size: u64, usage: u32, mapped_at_creation: bool, -) -> Result { +) -> Result { let instance = state.borrow::(); let device_resource = state .resource_table - .get::(device_rid)?; + .get::(device_rid) + .map_err(BufferError::Resource)?; let device = device_resource.1; let descriptor = wgpu_core::resource::BufferDescriptor { label: Some(label), size, usage: wgpu_types::BufferUsages::from_bits(usage) - .ok_or_else(|| type_error("usage is not valid"))?, + .ok_or(BufferError::InvalidUsage)?, mapped_at_creation, }; @@ -77,18 +85,21 @@ pub async fn op_webgpu_buffer_get_map_async( mode: u32, #[number] offset: u64, #[number] size: u64, -) -> Result { +) -> Result { let device; let done = Arc::new(Mutex::new(None)); { let state_ = state.borrow(); let instance = state_.borrow::(); - let buffer_resource = - state_.resource_table.get::(buffer_rid)?; + let buffer_resource = state_ + .resource_table + .get::(buffer_rid) + .map_err(BufferError::Resource)?; let buffer = buffer_resource.1; let device_resource = state_ .resource_table - .get::(device_rid)?; + .get::(device_rid) + .map_err(BufferError::Resource)?; device = device_resource.1; let done_ = done.clone(); @@ -120,9 +131,7 @@ pub async fn op_webgpu_buffer_get_map_async( let result = done.lock().unwrap().take(); match result { Some(Ok(())) => return Ok(WebGpuResult::empty()), - Some(Err(e)) => { - return Err(DomExceptionOperationError::new(&e.to_string()).into()) - } + Some(Err(e)) => return Err(BufferError::Access(e)), None => { { let state = state.borrow(); @@ -143,9 +152,12 @@ pub fn op_webgpu_buffer_get_mapped_range( #[number] offset: u64, #[number] size: Option, #[buffer] buf: &mut [u8], -) -> Result { +) -> Result { let instance = state.borrow::(); - let buffer_resource = state.resource_table.get::(buffer_rid)?; + let buffer_resource = state + .resource_table + .get::(buffer_rid) + .map_err(BufferError::Resource)?; let buffer = buffer_resource.1; let (slice_pointer, range_size) = @@ -154,7 +166,7 @@ pub fn op_webgpu_buffer_get_mapped_range( offset, size )) - .map_err(|e| DomExceptionOperationError::new(&e.to_string()))?; + .map_err(BufferError::Access)?; // SAFETY: guarantee to be safe from wgpu let slice = unsafe { @@ -176,12 +188,16 @@ pub fn op_webgpu_buffer_unmap( #[smi] buffer_rid: ResourceId, #[smi] mapped_rid: ResourceId, #[buffer] buf: Option<&[u8]>, -) -> Result { +) -> Result { let mapped_resource = state .resource_table - .take::(mapped_rid)?; + .take::(mapped_rid) + .map_err(BufferError::Resource)?; let instance = state.borrow::(); - let buffer_resource = state.resource_table.get::(buffer_rid)?; + let buffer_resource = state + .resource_table + .get::(buffer_rid) + .map_err(BufferError::Resource)?; let buffer = buffer_resource.1; if let Some(buf) = buf { diff --git a/ext/webgpu/bundle.rs b/ext/webgpu/bundle.rs index 57158271c..d9a5b2953 100644 --- a/ext/webgpu/bundle.rs +++ b/ext/webgpu/bundle.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_core::Resource; @@ -13,6 +11,14 @@ use std::rc::Rc; use super::error::WebGpuResult; +#[derive(Debug, thiserror::Error)] +pub enum BundleError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error("size must be larger than 0")] + InvalidSize, +} + struct WebGpuRenderBundleEncoder( RefCell, ); @@ -53,7 +59,7 @@ pub struct CreateRenderBundleEncoderArgs { pub fn op_webgpu_create_render_bundle_encoder( state: &mut OpState, #[serde] args: CreateRenderBundleEncoderArgs, -) -> Result { +) -> Result { let device_resource = state .resource_table .get::(args.device_rid)?; @@ -100,7 +106,7 @@ pub fn op_webgpu_render_bundle_encoder_finish( state: &mut OpState, #[smi] render_bundle_encoder_rid: ResourceId, #[string] label: Cow, -) -> Result { +) -> Result { let render_bundle_encoder_resource = state .resource_table @@ -131,7 +137,7 @@ pub fn op_webgpu_render_bundle_encoder_set_bind_group( #[buffer] dynamic_offsets_data: &[u32], #[number] dynamic_offsets_data_start: usize, #[number] dynamic_offsets_data_length: usize, -) -> Result { +) -> Result { let bind_group_resource = state .resource_table @@ -171,7 +177,7 @@ pub fn op_webgpu_render_bundle_encoder_push_debug_group( state: &mut OpState, #[smi] render_bundle_encoder_rid: ResourceId, #[string] group_label: &str, -) -> Result { +) -> Result { let render_bundle_encoder_resource = state .resource_table @@ -195,7 +201,7 @@ pub fn op_webgpu_render_bundle_encoder_push_debug_group( pub fn op_webgpu_render_bundle_encoder_pop_debug_group( state: &mut OpState, #[smi] render_bundle_encoder_rid: ResourceId, -) -> Result { +) -> Result { let render_bundle_encoder_resource = state .resource_table @@ -214,7 +220,7 @@ pub fn op_webgpu_render_bundle_encoder_insert_debug_marker( state: &mut OpState, #[smi] render_bundle_encoder_rid: ResourceId, #[string] marker_label: &str, -) -> Result { +) -> Result { let render_bundle_encoder_resource = state .resource_table @@ -239,7 +245,7 @@ pub fn op_webgpu_render_bundle_encoder_set_pipeline( state: &mut OpState, #[smi] render_bundle_encoder_rid: ResourceId, #[smi] pipeline: ResourceId, -) -> Result { +) -> Result { let render_pipeline_resource = state .resource_table @@ -266,18 +272,17 @@ pub fn op_webgpu_render_bundle_encoder_set_index_buffer( #[serde] index_format: wgpu_types::IndexFormat, #[number] offset: u64, #[number] size: u64, -) -> Result { +) -> Result { let buffer_resource = state .resource_table - .get::(buffer)?; - let render_bundle_encoder_resource = - state - .resource_table - .get::(render_bundle_encoder_rid)?; - let size = Some( - std::num::NonZeroU64::new(size) - .ok_or_else(|| type_error("size must be larger than 0"))?, - ); + .get::(buffer) + .map_err(BundleError::Resource)?; + let render_bundle_encoder_resource = state + .resource_table + .get::(render_bundle_encoder_rid) + .map_err(BundleError::Resource)?; + let size = + Some(std::num::NonZeroU64::new(size).ok_or(BundleError::InvalidSize)?); render_bundle_encoder_resource .0 @@ -296,19 +301,17 @@ pub fn op_webgpu_render_bundle_encoder_set_vertex_buffer( #[smi] buffer: ResourceId, #[number] offset: u64, #[number] size: Option, -) -> Result { +) -> Result { let buffer_resource = state .resource_table - .get::(buffer)?; - let render_bundle_encoder_resource = - state - .resource_table - .get::(render_bundle_encoder_rid)?; + .get::(buffer) + .map_err(BundleError::Resource)?; + let render_bundle_encoder_resource = state + .resource_table + .get::(render_bundle_encoder_rid) + .map_err(BundleError::Resource)?; let size = if let Some(size) = size { - Some( - std::num::NonZeroU64::new(size) - .ok_or_else(|| type_error("size must be larger than 0"))?, - ) + Some(std::num::NonZeroU64::new(size).ok_or(BundleError::InvalidSize)?) } else { None }; @@ -333,7 +336,7 @@ pub fn op_webgpu_render_bundle_encoder_draw( instance_count: u32, first_vertex: u32, first_instance: u32, -) -> Result { +) -> Result { let render_bundle_encoder_resource = state .resource_table @@ -360,7 +363,7 @@ pub fn op_webgpu_render_bundle_encoder_draw_indexed( first_index: u32, base_vertex: i32, first_instance: u32, -) -> Result { +) -> Result { let render_bundle_encoder_resource = state .resource_table @@ -385,7 +388,7 @@ pub fn op_webgpu_render_bundle_encoder_draw_indirect( #[smi] render_bundle_encoder_rid: ResourceId, #[smi] indirect_buffer: ResourceId, #[number] indirect_offset: u64, -) -> Result { +) -> Result { let buffer_resource = state .resource_table .get::(indirect_buffer)?; diff --git a/ext/webgpu/byow.rs b/ext/webgpu/byow.rs index 3a43f416e..c9e1177b1 100644 --- a/ext/webgpu/byow.rs +++ b/ext/webgpu/byow.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_core::ResourceId; @@ -16,6 +14,47 @@ use std::ptr::NonNull; use crate::surface::WebGpuSurface; +#[derive(Debug, thiserror::Error)] +pub enum ByowError { + #[error("Cannot create surface outside of WebGPU context. Did you forget to call `navigator.gpu.requestAdapter()`?")] + WebGPUNotInitiated, + #[error("Invalid parameters")] + InvalidParameters, + #[error(transparent)] + CreateSurface(wgpu_core::instance::CreateSurfaceError), + #[cfg(target_os = "windows")] + #[error("Invalid system on Windows")] + InvalidSystem, + #[cfg(target_os = "macos")] + #[error("Invalid system on macOS")] + InvalidSystem, + #[cfg(any( + target_os = "linux", + target_os = "freebsd", + target_os = "openbsd" + ))] + #[error("Invalid system on Linux/BSD")] + InvalidSystem, + #[cfg(any( + target_os = "windows", + target_os = "linux", + target_os = "freebsd", + target_os = "openbsd" + ))] + #[error("window is null")] + NullWindow, + #[cfg(any( + target_os = "linux", + target_os = "freebsd", + target_os = "openbsd" + ))] + #[error("display is null")] + NullDisplay, + #[cfg(target_os = "macos")] + #[error("ns_view is null")] + NSViewDisplay, +} + #[op2(fast)] #[smi] pub fn op_webgpu_surface_create( @@ -23,10 +62,10 @@ pub fn op_webgpu_surface_create( #[string] system: &str, p1: *const c_void, p2: *const c_void, -) -> Result { - let instance = state.try_borrow::().ok_or_else(|| { - type_error("Cannot create surface outside of WebGPU context. Did you forget to call `navigator.gpu.requestAdapter()`?") - })?; +) -> Result { + let instance = state + .try_borrow::() + .ok_or(ByowError::WebGPUNotInitiated)?; // Security note: // // The `p1` and `p2` parameters are pointers to platform-specific window @@ -41,13 +80,15 @@ pub fn op_webgpu_surface_create( // // - Only FFI can export v8::External to user code. if p1.is_null() { - return Err(type_error("Invalid parameters")); + return Err(ByowError::InvalidParameters); } let (win_handle, display_handle) = raw_window(system, p1, p2)?; // SAFETY: see above comment let surface = unsafe { - instance.instance_create_surface(display_handle, win_handle, None)? + instance + .instance_create_surface(display_handle, win_handle, None) + .map_err(ByowError::CreateSurface)? }; let rid = state @@ -66,15 +107,14 @@ fn raw_window( system: &str, _ns_window: *const c_void, ns_view: *const c_void, -) -> Result { +) -> Result { if system != "cocoa" { - return Err(type_error("Invalid system on macOS")); + return Err(ByowError::InvalidSystem); } let win_handle = raw_window_handle::RawWindowHandle::AppKit( raw_window_handle::AppKitWindowHandle::new( - NonNull::new(ns_view as *mut c_void) - .ok_or(type_error("ns_view is null"))?, + NonNull::new(ns_view as *mut c_void).ok_or(ByowError::NSViewDisplay)?, ), ); @@ -89,16 +129,16 @@ fn raw_window( system: &str, window: *const c_void, hinstance: *const c_void, -) -> Result { +) -> Result { use raw_window_handle::WindowsDisplayHandle; if system != "win32" { - return Err(type_error("Invalid system on Windows")); + return Err(ByowError::InvalidSystem); } let win_handle = { let mut handle = raw_window_handle::Win32WindowHandle::new( std::num::NonZeroIsize::new(window as isize) - .ok_or(type_error("window is null"))?, + .ok_or(ByowError::NullWindow)?, ); handle.hinstance = std::num::NonZeroIsize::new(hinstance as isize); @@ -115,7 +155,7 @@ fn raw_window( system: &str, window: *const c_void, display: *const c_void, -) -> Result { +) -> Result { let (win_handle, display_handle); if system == "x11" { win_handle = raw_window_handle::RawWindowHandle::Xlib( @@ -131,19 +171,17 @@ fn raw_window( } else if system == "wayland" { win_handle = raw_window_handle::RawWindowHandle::Wayland( raw_window_handle::WaylandWindowHandle::new( - NonNull::new(window as *mut c_void) - .ok_or(type_error("window is null"))?, + NonNull::new(window as *mut c_void).ok_or(ByowError::NullWindow)?, ), ); display_handle = raw_window_handle::RawDisplayHandle::Wayland( raw_window_handle::WaylandDisplayHandle::new( - NonNull::new(display as *mut c_void) - .ok_or(type_error("display is null"))?, + NonNull::new(display as *mut c_void).ok_or(ByowError::NullDisplay)?, ), ); } else { - return Err(type_error("Invalid system on Linux/BSD")); + return Err(ByowError::InvalidSystem); } Ok((win_handle, display_handle)) @@ -160,6 +198,6 @@ fn raw_window( _system: &str, _window: *const c_void, _display: *const c_void, -) -> Result { - Err(type_error("Unsupported platform")) +) -> Result { + Err(deno_core::error::type_error("Unsupported platform")) } diff --git a/ext/webgpu/error.rs b/ext/webgpu/error.rs index 5b55d506a..f08f76538 100644 --- a/ext/webgpu/error.rs +++ b/ext/webgpu/error.rs @@ -1,11 +1,9 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; use deno_core::ResourceId; use serde::Serialize; use std::convert::From; use std::error::Error; -use std::fmt; use wgpu_core::binding_model::CreateBindGroupError; use wgpu_core::binding_model::CreateBindGroupLayoutError; use wgpu_core::binding_model::CreatePipelineLayoutError; @@ -286,29 +284,3 @@ impl From for WebGpuError { WebGpuError::Validation(fmt_err(&err)) } } - -#[derive(Debug)] -pub struct DomExceptionOperationError { - pub msg: String, -} - -impl DomExceptionOperationError { - pub fn new(msg: &str) -> Self { - DomExceptionOperationError { - msg: msg.to_string(), - } - } -} - -impl fmt::Display for DomExceptionOperationError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.pad(&self.msg) - } -} - -impl std::error::Error for DomExceptionOperationError {} - -pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> { - e.downcast_ref::() - .map(|_| "DOMExceptionOperationError") -} diff --git a/ext/webgpu/lib.rs b/ext/webgpu/lib.rs index 9cf4ca914..5dc8278e4 100644 --- a/ext/webgpu/lib.rs +++ b/ext/webgpu/lib.rs @@ -2,7 +2,6 @@ #![cfg(not(target_arch = "wasm32"))] #![warn(unsafe_op_in_unsafe_fn)] -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_core::Resource; @@ -16,7 +15,6 @@ use std::rc::Rc; pub use wgpu_core; pub use wgpu_types; -use error::DomExceptionOperationError; use error::WebGpuResult; pub const UNSTABLE_FEATURE_NAME: &str = "webgpu"; @@ -85,6 +83,18 @@ pub mod shader; pub mod surface; pub mod texture; +#[derive(Debug, thiserror::Error)] +pub enum InitError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error(transparent)] + InvalidAdapter(wgpu_core::instance::InvalidAdapter), + #[error(transparent)] + RequestDevice(wgpu_core::instance::RequestDeviceError), + #[error(transparent)] + InvalidDevice(wgpu_core::device::InvalidDevice), +} + pub type Instance = std::sync::Arc; struct WebGpuAdapter(Instance, wgpu_core::id::AdapterId); @@ -400,7 +410,7 @@ pub fn op_webgpu_request_adapter( state: Rc>, #[serde] power_preference: Option, force_fallback_adapter: bool, -) -> Result { +) -> Result { let mut state = state.borrow_mut(); let backends = std::env::var("DENO_WEBGPU_BACKEND").map_or_else( @@ -441,10 +451,11 @@ pub fn op_webgpu_request_adapter( } }; let adapter_features = - gfx_select!(adapter => instance.adapter_features(adapter))?; + gfx_select!(adapter => instance.adapter_features(adapter)) + .map_err(InitError::InvalidAdapter)?; let features = deserialize_features(&adapter_features); - let adapter_limits = - gfx_select!(adapter => instance.adapter_limits(adapter))?; + let adapter_limits = gfx_select!(adapter => instance.adapter_limits(adapter)) + .map_err(InitError::InvalidAdapter)?; let instance = instance.clone(); @@ -663,10 +674,12 @@ pub fn op_webgpu_request_device( #[string] label: String, #[serde] required_features: GpuRequiredFeatures, #[serde] required_limits: Option, -) -> Result { +) -> Result { let mut state = state.borrow_mut(); - let adapter_resource = - state.resource_table.take::(adapter_rid)?; + let adapter_resource = state + .resource_table + .take::(adapter_rid) + .map_err(InitError::Resource)?; let adapter = adapter_resource.1; let instance = state.borrow::(); @@ -685,13 +698,14 @@ pub fn op_webgpu_request_device( )); adapter_resource.close(); if let Some(err) = maybe_err { - return Err(DomExceptionOperationError::new(&err.to_string()).into()); + return Err(InitError::RequestDevice(err)); } - let device_features = - gfx_select!(device => instance.device_features(device))?; + let device_features = gfx_select!(device => instance.device_features(device)) + .map_err(InitError::InvalidDevice)?; let features = deserialize_features(&device_features); - let limits = gfx_select!(device => instance.device_limits(device))?; + let limits = gfx_select!(device => instance.device_limits(device)) + .map_err(InitError::InvalidDevice)?; let instance = instance.clone(); let instance2 = instance.clone(); @@ -722,14 +736,17 @@ pub struct GPUAdapterInfo { pub fn op_webgpu_request_adapter_info( state: Rc>, #[smi] adapter_rid: ResourceId, -) -> Result { +) -> Result { let state = state.borrow_mut(); - let adapter_resource = - state.resource_table.get::(adapter_rid)?; + let adapter_resource = state + .resource_table + .get::(adapter_rid) + .map_err(InitError::Resource)?; let adapter = adapter_resource.1; let instance = state.borrow::(); - let info = gfx_select!(adapter => instance.adapter_get_info(adapter))?; + let info = gfx_select!(adapter => instance.adapter_get_info(adapter)) + .map_err(InitError::InvalidAdapter)?; Ok(GPUAdapterInfo { vendor: info.vendor.to_string(), @@ -770,9 +787,11 @@ impl From for wgpu_types::QueryType { pub fn op_webgpu_create_query_set( state: &mut OpState, #[serde] args: CreateQuerySetArgs, -) -> Result { - let device_resource = - state.resource_table.get::(args.device_rid)?; +) -> Result { + let device_resource = state + .resource_table + .get::(args.device_rid) + .map_err(InitError::Resource)?; let device = device_resource.1; let instance = state.borrow::(); diff --git a/ext/webgpu/render_pass.rs b/ext/webgpu/render_pass.rs index c68be3d99..9b9d87d9f 100644 --- a/ext/webgpu/render_pass.rs +++ b/ext/webgpu/render_pass.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_core::Resource; @@ -12,6 +10,14 @@ use std::cell::RefCell; use super::error::WebGpuResult; +#[derive(Debug, thiserror::Error)] +pub enum RenderPassError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error("size must be larger than 0")] + InvalidSize, +} + pub(crate) struct WebGpuRenderPass( pub(crate) RefCell, ); @@ -38,7 +44,7 @@ pub struct RenderPassSetViewportArgs { pub fn op_webgpu_render_pass_set_viewport( state: &mut OpState, #[serde] args: RenderPassSetViewportArgs, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(args.render_pass_rid)?; @@ -65,7 +71,7 @@ pub fn op_webgpu_render_pass_set_scissor_rect( y: u32, width: u32, height: u32, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(render_pass_rid)?; @@ -87,7 +93,7 @@ pub fn op_webgpu_render_pass_set_blend_constant( state: &mut OpState, #[smi] render_pass_rid: ResourceId, #[serde] color: wgpu_types::Color, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(render_pass_rid)?; @@ -106,7 +112,7 @@ pub fn op_webgpu_render_pass_set_stencil_reference( state: &mut OpState, #[smi] render_pass_rid: ResourceId, reference: u32, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(render_pass_rid)?; @@ -125,7 +131,7 @@ pub fn op_webgpu_render_pass_begin_occlusion_query( state: &mut OpState, #[smi] render_pass_rid: ResourceId, query_index: u32, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(render_pass_rid)?; @@ -143,7 +149,7 @@ pub fn op_webgpu_render_pass_begin_occlusion_query( pub fn op_webgpu_render_pass_end_occlusion_query( state: &mut OpState, #[smi] render_pass_rid: ResourceId, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(render_pass_rid)?; @@ -161,7 +167,7 @@ pub fn op_webgpu_render_pass_execute_bundles( state: &mut OpState, #[smi] render_pass_rid: ResourceId, #[serde] bundles: Vec, -) -> Result { +) -> Result { let bundles = bundles .iter() .map(|rid| { @@ -171,7 +177,7 @@ pub fn op_webgpu_render_pass_execute_bundles( .get::(*rid)?; Ok(render_bundle_resource.1) }) - .collect::, AnyError>>()?; + .collect::, deno_core::error::AnyError>>()?; let render_pass_resource = state .resource_table @@ -191,7 +197,7 @@ pub fn op_webgpu_render_pass_end( state: &mut OpState, #[smi] command_encoder_rid: ResourceId, #[smi] render_pass_rid: ResourceId, -) -> Result { +) -> Result { let command_encoder_resource = state .resource_table .get::( @@ -217,7 +223,7 @@ pub fn op_webgpu_render_pass_set_bind_group( #[buffer] dynamic_offsets_data: &[u32], #[number] dynamic_offsets_data_start: usize, #[number] dynamic_offsets_data_length: usize, -) -> Result { +) -> Result { let bind_group_resource = state .resource_table @@ -251,7 +257,7 @@ pub fn op_webgpu_render_pass_push_debug_group( state: &mut OpState, #[smi] render_pass_rid: ResourceId, #[string] group_label: &str, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(render_pass_rid)?; @@ -270,7 +276,7 @@ pub fn op_webgpu_render_pass_push_debug_group( pub fn op_webgpu_render_pass_pop_debug_group( state: &mut OpState, #[smi] render_pass_rid: ResourceId, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(render_pass_rid)?; @@ -288,7 +294,7 @@ pub fn op_webgpu_render_pass_insert_debug_marker( state: &mut OpState, #[smi] render_pass_rid: ResourceId, #[string] marker_label: &str, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(render_pass_rid)?; @@ -308,7 +314,7 @@ pub fn op_webgpu_render_pass_set_pipeline( state: &mut OpState, #[smi] render_pass_rid: ResourceId, pipeline: u32, -) -> Result { +) -> Result { let render_pipeline_resource = state .resource_table @@ -334,19 +340,18 @@ pub fn op_webgpu_render_pass_set_index_buffer( #[serde] index_format: wgpu_types::IndexFormat, #[number] offset: u64, #[number] size: Option, -) -> Result { +) -> Result { let buffer_resource = state .resource_table - .get::(buffer)?; + .get::(buffer) + .map_err(RenderPassError::Resource)?; let render_pass_resource = state .resource_table - .get::(render_pass_rid)?; + .get::(render_pass_rid) + .map_err(RenderPassError::Resource)?; let size = if let Some(size) = size { - Some( - std::num::NonZeroU64::new(size) - .ok_or_else(|| type_error("size must be larger than 0"))?, - ) + Some(std::num::NonZeroU64::new(size).ok_or(RenderPassError::InvalidSize)?) } else { None }; @@ -370,19 +375,18 @@ pub fn op_webgpu_render_pass_set_vertex_buffer( buffer: u32, #[number] offset: u64, #[number] size: Option, -) -> Result { +) -> Result { let buffer_resource = state .resource_table - .get::(buffer)?; + .get::(buffer) + .map_err(RenderPassError::Resource)?; let render_pass_resource = state .resource_table - .get::(render_pass_rid)?; + .get::(render_pass_rid) + .map_err(RenderPassError::Resource)?; let size = if let Some(size) = size { - Some( - std::num::NonZeroU64::new(size) - .ok_or_else(|| type_error("size must be larger than 0"))?, - ) + Some(std::num::NonZeroU64::new(size).ok_or(RenderPassError::InvalidSize)?) } else { None }; @@ -407,7 +411,7 @@ pub fn op_webgpu_render_pass_draw( instance_count: u32, first_vertex: u32, first_instance: u32, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(render_pass_rid)?; @@ -433,7 +437,7 @@ pub fn op_webgpu_render_pass_draw_indexed( first_index: u32, base_vertex: i32, first_instance: u32, -) -> Result { +) -> Result { let render_pass_resource = state .resource_table .get::(render_pass_rid)?; @@ -457,7 +461,7 @@ pub fn op_webgpu_render_pass_draw_indirect( #[smi] render_pass_rid: ResourceId, indirect_buffer: u32, #[number] indirect_offset: u64, -) -> Result { +) -> Result { let buffer_resource = state .resource_table .get::(indirect_buffer)?; @@ -481,7 +485,7 @@ pub fn op_webgpu_render_pass_draw_indexed_indirect( #[smi] render_pass_rid: ResourceId, indirect_buffer: u32, #[number] indirect_offset: u64, -) -> Result { +) -> Result { let buffer_resource = state .resource_table .get::(indirect_buffer)?; diff --git a/ext/webgpu/sampler.rs b/ext/webgpu/sampler.rs index 27c36802e..9fc1269ea 100644 --- a/ext/webgpu/sampler.rs +++ b/ext/webgpu/sampler.rs @@ -1,6 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_core::Resource; @@ -47,7 +46,7 @@ pub struct CreateSamplerArgs { pub fn op_webgpu_create_sampler( state: &mut OpState, #[serde] args: CreateSamplerArgs, -) -> Result { +) -> Result { let instance = state.borrow::(); let device_resource = state .resource_table diff --git a/ext/webgpu/shader.rs b/ext/webgpu/shader.rs index 0b3991c5d..4653bd85b 100644 --- a/ext/webgpu/shader.rs +++ b/ext/webgpu/shader.rs @@ -1,6 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_core::Resource; @@ -31,7 +30,7 @@ pub fn op_webgpu_create_shader_module( #[smi] device_rid: ResourceId, #[string] label: Cow, #[string] code: Cow, -) -> Result { +) -> Result { let instance = state.borrow::(); let device_resource = state .resource_table diff --git a/ext/webgpu/surface.rs b/ext/webgpu/surface.rs index 1f6d2c87d..297eaeb00 100644 --- a/ext/webgpu/surface.rs +++ b/ext/webgpu/surface.rs @@ -1,7 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use super::WebGpuResult; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_core::Resource; @@ -11,6 +10,16 @@ use std::borrow::Cow; use std::rc::Rc; use wgpu_types::SurfaceStatus; +#[derive(Debug, thiserror::Error)] +pub enum SurfaceError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error("Invalid Surface Status")] + InvalidStatus, + #[error(transparent)] + Surface(wgpu_core::present::SurfaceError), +} + pub struct WebGpuSurface(pub crate::Instance, pub wgpu_core::id::SurfaceId); impl Resource for WebGpuSurface { fn name(&self) -> Cow { @@ -41,7 +50,7 @@ pub struct SurfaceConfigureArgs { pub fn op_webgpu_surface_configure( state: &mut OpState, #[serde] args: SurfaceConfigureArgs, -) -> Result { +) -> Result { let instance = state.borrow::(); let device_resource = state .resource_table @@ -75,18 +84,22 @@ pub fn op_webgpu_surface_get_current_texture( state: &mut OpState, #[smi] device_rid: ResourceId, #[smi] surface_rid: ResourceId, -) -> Result { +) -> Result { let instance = state.borrow::(); let device_resource = state .resource_table - .get::(device_rid)?; + .get::(device_rid) + .map_err(SurfaceError::Resource)?; let device = device_resource.1; - let surface_resource = - state.resource_table.get::(surface_rid)?; + let surface_resource = state + .resource_table + .get::(surface_rid) + .map_err(SurfaceError::Resource)?; let surface = surface_resource.1; let output = - gfx_select!(device => instance.surface_get_current_texture(surface, None))?; + gfx_select!(device => instance.surface_get_current_texture(surface, None)) + .map_err(SurfaceError::Surface)?; match output.status { SurfaceStatus::Good | SurfaceStatus::Suboptimal => { @@ -98,7 +111,7 @@ pub fn op_webgpu_surface_get_current_texture( }); Ok(WebGpuResult::rid(rid)) } - _ => Err(AnyError::msg("Invalid Surface Status")), + _ => Err(SurfaceError::InvalidStatus), } } @@ -107,17 +120,21 @@ pub fn op_webgpu_surface_present( state: &mut OpState, #[smi] device_rid: ResourceId, #[smi] surface_rid: ResourceId, -) -> Result<(), AnyError> { +) -> Result<(), SurfaceError> { let instance = state.borrow::(); let device_resource = state .resource_table - .get::(device_rid)?; + .get::(device_rid) + .map_err(SurfaceError::Resource)?; let device = device_resource.1; - let surface_resource = - state.resource_table.get::(surface_rid)?; + let surface_resource = state + .resource_table + .get::(surface_rid) + .map_err(SurfaceError::Resource)?; let surface = surface_resource.1; - let _ = gfx_select!(device => instance.surface_present(surface))?; + let _ = gfx_select!(device => instance.surface_present(surface)) + .map_err(SurfaceError::Surface)?; Ok(()) } diff --git a/ext/webgpu/texture.rs b/ext/webgpu/texture.rs index 44edd1a88..f8a5e05a3 100644 --- a/ext/webgpu/texture.rs +++ b/ext/webgpu/texture.rs @@ -1,6 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_core::Resource; @@ -62,7 +61,7 @@ pub struct CreateTextureArgs { pub fn op_webgpu_create_texture( state: &mut OpState, #[serde] args: CreateTextureArgs, -) -> Result { +) -> Result { let instance = state.borrow::(); let device_resource = state .resource_table @@ -111,7 +110,7 @@ pub struct CreateTextureViewArgs { pub fn op_webgpu_create_texture_view( state: &mut OpState, #[serde] args: CreateTextureViewArgs, -) -> Result { +) -> Result { let instance = state.borrow::(); let texture_resource = state .resource_table -- cgit v1.2.3 From 0710af034ffc7737cbfa689ff1826d9c1537f6da Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Sat, 19 Oct 2024 08:42:59 +0530 Subject: perf: avoid multiple calls to runMicrotask (#26378) Improves HTTP throughput by 8-9k rps on Linux: this patch ``` Requests/sec: 145001.69 Transfer/sec: 20.74MB ``` main ``` Requests/sec: 137866.61 Transfer/sec: 19.72MB ``` The improvements comes from the reduced number of calls to `op_run_microtask` per request. Returning `true` from a macrotask callback already calls `op_run_microtask` so the extra call was redundant. Here's `--strace-ops` output for a single request: main ``` [ 4.667] op_http_wait : CompletedAsync Async [ 4.667] op_run_microtasks : Dispatched Slow [ 4.668] op_http_try_wait : Dispatched Slow [ 4.668] op_http_try_wait : Completed Slow [ 4.668] op_http_wait : Dispatched Async [ 4.668] op_http_set_response_header : Dispatched Slow [ 4.668] op_http_set_response_header : Completed Slow [ 4.669] op_http_set_response_body_text : Dispatched Slow [ 4.669] op_http_set_response_body_text : Completed Slow [ 4.669] op_run_microtasks : Completed Slow [ 4.669] op_has_tick_scheduled : Dispatched Slow [ 4.669] op_has_tick_scheduled : Completed Slow [ 4.669] op_run_microtasks : Dispatched Slow [ 4.669] op_run_microtasks : Completed Slow [ 4.669] op_run_microtasks : Dispatched Slow [ 4.669] op_run_microtasks : Completed Slow ``` this pr ``` [ 3.726] op_http_wait : CompletedAsync Async [ 3.727] op_run_microtasks : Dispatched Slow [ 3.727] op_http_try_wait : Dispatched Slow [ 3.727] op_http_try_wait : Completed Slow [ 3.727] op_http_wait : Dispatched Async [ 3.727] op_http_set_response_header : Dispatched Slow [ 3.728] op_http_set_response_header : Completed Slow [ 3.728] op_http_set_response_body_text : Dispatched Slow [ 3.728] op_http_set_response_body_text : Completed Slow [ 3.728] op_run_microtasks : Completed Slow [ 3.728] op_run_microtasks : Dispatched Slow [ 3.728] op_run_microtasks : Completed Slow ``` --- ext/node/polyfills/_next_tick.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/_next_tick.ts b/ext/node/polyfills/_next_tick.ts index 5ee27728d..62470c564 100644 --- a/ext/node/polyfills/_next_tick.ts +++ b/ext/node/polyfills/_next_tick.ts @@ -87,8 +87,7 @@ export function runNextTicks() { // runMicrotasks(); // if (!hasTickScheduled() && !hasRejectionToWarn()) // return; - if (!core.hasTickScheduled()) { - core.runMicrotasks(); + if (queue.isEmpty() || !core.hasTickScheduled()) { return true; } -- cgit v1.2.3 From 473e3069de4bf5877a6f1140aa0462e05f745536 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Sat, 19 Oct 2024 14:59:39 -0700 Subject: chore: update nix crate (#26422) Dedupes nix dependency, since `rustyline` depends on a newer version that what we currently use --- ext/fs/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml index 313c84fdb..ab0bf22fd 100644 --- a/ext/fs/Cargo.toml +++ b/ext/fs/Cargo.toml @@ -31,7 +31,7 @@ serde.workspace = true thiserror.workspace = true [target.'cfg(unix)'.dependencies] -nix.workspace = true +nix = { workspace = true, features = ["user"] } [target.'cfg(windows)'.dependencies] winapi = { workspace = true, features = ["winbase"] } -- cgit v1.2.3 From afb33b3c2597c9ec943f71218b236486fbc86e23 Mon Sep 17 00:00:00 2001 From: jiang1997 Date: Mon, 21 Oct 2024 15:50:53 +0800 Subject: fix(ext/node): use primordials in `ext/node/polyfills/https.ts` (#26323) Towards https://github.com/denoland/deno/issues/24236 --- ext/node/polyfills/https.ts | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/https.ts b/ext/node/polyfills/https.ts index f60c5e471..dd24cf048 100644 --- a/ext/node/polyfills/https.ts +++ b/ext/node/polyfills/https.ts @@ -1,9 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright Joyent and Node contributors. All rights reserved. MIT license. -// TODO(petamoriken): enable prefer-primordials for node polyfills -// deno-lint-ignore-file prefer-primordials - import { notImplemented } from "ext:deno_node/_utils.ts"; import { urlToHttpOptions } from "ext:deno_node/internal/url.ts"; import { @@ -17,6 +14,14 @@ import { type ServerHandler, ServerImpl as HttpServer } from "node:http"; import { validateObject } from "ext:deno_node/internal/validators.mjs"; import { kEmptyObject } from "ext:deno_node/internal/util.mjs"; import { Buffer } from "node:buffer"; +import { primordials } from "ext:core/mod.js"; +const { + ArrayPrototypeShift, + ArrayPrototypeUnshift, + ArrayIsArray, + ObjectPrototypeIsPrototypeOf, + ObjectAssign, +} = primordials; export class Server extends HttpServer { constructor(opts, requestListener?: ServerHandler) { @@ -29,11 +34,11 @@ export class Server extends HttpServer { validateObject(opts, "options"); } - if (opts.cert && Array.isArray(opts.cert)) { + if (opts.cert && ArrayIsArray(opts.cert)) { notImplemented("https.Server.opts.cert array type"); } - if (opts.key && Array.isArray(opts.key)) { + if (opts.key && ArrayIsArray(opts.key)) { notImplemented("https.Server.opts.key array type"); } @@ -42,10 +47,12 @@ export class Server extends HttpServer { _additionalServeOptions() { return { - cert: this._opts.cert instanceof Buffer + cert: ObjectPrototypeIsPrototypeOf(Buffer, this._opts.cert) + // deno-lint-ignore prefer-primordials ? this._opts.cert.toString() : this._opts.cert, - key: this._opts.key instanceof Buffer + key: ObjectPrototypeIsPrototypeOf(Buffer, this._opts.key) + // deno-lint-ignore prefer-primordials ? this._opts.key.toString() : this._opts.key, }; @@ -159,18 +166,18 @@ export function request(...args: any[]) { let options = {}; if (typeof args[0] === "string") { - const urlStr = args.shift(); + const urlStr = ArrayPrototypeShift(args); options = urlToHttpOptions(new URL(urlStr)); - } else if (args[0] instanceof URL) { - options = urlToHttpOptions(args.shift()); + } else if (ObjectPrototypeIsPrototypeOf(URL, args[0])) { + options = urlToHttpOptions(ArrayPrototypeShift(args)); } if (args[0] && typeof args[0] !== "function") { - Object.assign(options, args.shift()); + ObjectAssign(options, ArrayPrototypeShift(args)); } options._defaultAgent = globalAgent; - args.unshift(options); + ArrayPrototypeUnshift(args, options); return new HttpsClientRequest(args[0], args[1]); } -- cgit v1.2.3 From 9696e0b3780ede9bb59ab3cf3bf4f2df179d0b32 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Tue, 22 Oct 2024 01:57:58 -0700 Subject: fix(ext/console): ignore casing for named colors in css parsing (#26466) --- ext/console/01_console.js | 1 + 1 file changed, 1 insertion(+) (limited to 'ext') diff --git a/ext/console/01_console.js b/ext/console/01_console.js index 3c07cf64a..3803492b9 100644 --- a/ext/console/01_console.js +++ b/ext/console/01_console.js @@ -2653,6 +2653,7 @@ const HSL_PATTERN = new SafeRegExp( ); function parseCssColor(colorString) { + colorString = StringPrototypeToLowerCase(colorString); if (colorKeywords.has(colorString)) { colorString = colorKeywords.get(colorString); } -- cgit v1.2.3 From 285635daa67274f961d29c1e7795222709dc9618 Mon Sep 17 00:00:00 2001 From: Yoshiya Hinosawa Date: Wed, 23 Oct 2024 11:28:04 +0900 Subject: fix(ext/node): map `ERROR_INVALID_NAME` to `ENOENT` on windows (#26475) In libuv on windows, `ERROR_INVALID_NAME` is mapped to `ENOENT`, but it is mapped to `EINVAL` in our compat implementation, which causes the issue #24899. ref: https://github.com/libuv/libuv/blob/d4ab6fbba4669935a6bc23645372dfe4ac29ab39/src/win/error.c#L138 closes #24899 closes #26411 closes #23635 closes #21165 closes #19067 --- ext/node/ops/winerror.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/node/ops/winerror.rs b/ext/node/ops/winerror.rs index e9dbadb6f..cb053774e 100644 --- a/ext/node/ops/winerror.rs +++ b/ext/node/ops/winerror.rs @@ -62,7 +62,7 @@ pub fn op_node_sys_to_uv_error(err: i32) -> String { WSAEHOSTUNREACH => "EHOSTUNREACH", ERROR_INSUFFICIENT_BUFFER => "EINVAL", ERROR_INVALID_DATA => "EINVAL", - ERROR_INVALID_NAME => "EINVAL", + ERROR_INVALID_NAME => "ENOENT", ERROR_INVALID_PARAMETER => "EINVAL", WSAEINVAL => "EINVAL", WSAEPFNOSUPPORT => "EINVAL", -- cgit v1.2.3 From 92ed4d38dbef98b9353d6dd6d96abb400be56f9f Mon Sep 17 00:00:00 2001 From: Satya Rohith Date: Wed, 23 Oct 2024 13:17:43 +0530 Subject: fix(node:tls): set TLSSocket.alpnProtocol for client connections (#26476) Towards https://github.com/denoland/deno/issues/26127 --- ext/node/polyfills/_tls_wrap.ts | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/node/polyfills/_tls_wrap.ts b/ext/node/polyfills/_tls_wrap.ts index a614b45df..e36fc637e 100644 --- a/ext/node/polyfills/_tls_wrap.ts +++ b/ext/node/polyfills/_tls_wrap.ts @@ -68,6 +68,7 @@ export class TLSSocket extends net.Socket { secureConnecting: boolean; _SNICallback: any; servername: string | null; + alpnProtocol: string | boolean | null; alpnProtocols: string[] | null; authorized: boolean; authorizationError: any; @@ -114,6 +115,7 @@ export class TLSSocket extends net.Socket { this.secureConnecting = true; this._SNICallback = null; this.servername = null; + this.alpnProtocol = null; this.alpnProtocols = tlsOptions.ALPNProtocols; this.authorized = false; this.authorizationError = null; @@ -151,10 +153,21 @@ export class TLSSocket extends net.Socket { handle.afterConnect = async (req: any, status: number) => { try { const conn = await Deno.startTls(handle[kStreamBaseField], options); + try { + const hs = await conn.handshake(); + if (hs.alpnProtocol) { + tlssock.alpnProtocol = hs.alpnProtocol; + } else { + tlssock.alpnProtocol = false; + } + } catch { + // Don't interrupt "secure" event to let the first read/write + // operation emit the error. + } handle[kStreamBaseField] = conn; tlssock.emit("secure"); tlssock.removeListener("end", onConnectEnd); - } catch { + } catch (_) { // TODO(kt3k): Handle this } return afterConnect.call(handle, req, status); @@ -269,6 +282,7 @@ export class ServerImpl extends EventEmitter { // Creates TCP handle and socket directly from Deno.TlsConn. // This works as TLS socket. We don't use TLSSocket class for doing // this because Deno.startTls only supports client side tcp connection. + // TODO(@satyarohith): set TLSSocket.alpnProtocol when we use TLSSocket class. const handle = new TCP(TCPConstants.SOCKET, await listener.accept()); const socket = new net.Socket({ handle }); this.emit("secureConnection", socket); -- cgit v1.2.3 From fa49fd404be82daf8ac7228bf54e780135f67b17 Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Thu, 24 Oct 2024 09:41:38 +0530 Subject: fix(ext/ffi): return u64/i64 as bigints from nonblocking ffi calls (#26486) Fixes https://github.com/denoland/deno/issues/25194 --- ext/ffi/Cargo.toml | 1 + ext/ffi/call.rs | 27 +++++++++++++++------------ 2 files changed, 16 insertions(+), 12 deletions(-) (limited to 'ext') diff --git a/ext/ffi/Cargo.toml b/ext/ffi/Cargo.toml index 80b0180d0..59dc13ed9 100644 --- a/ext/ffi/Cargo.toml +++ b/ext/ffi/Cargo.toml @@ -21,6 +21,7 @@ dynasmrt = "1.2.3" libffi = "=3.2.0" libffi-sys = "=2.3.0" log.workspace = true +num-bigint.workspace = true serde.workspace = true serde-value = "0.7" serde_json = "1.0" diff --git a/ext/ffi/call.rs b/ext/ffi/call.rs index d337b29b0..ef61dc383 100644 --- a/ext/ffi/call.rs +++ b/ext/ffi/call.rs @@ -9,12 +9,14 @@ use crate::FfiPermissions; use crate::ForeignFunction; use deno_core::op2; use deno_core::serde_json::Value; +use deno_core::serde_v8::BigInt as V8BigInt; use deno_core::serde_v8::ExternalPointer; use deno_core::unsync::spawn_blocking; use deno_core::v8; use deno_core::OpState; use deno_core::ResourceId; use libffi::middle::Arg; +use num_bigint::BigInt; use serde::Serialize; use std::cell::RefCell; use std::ffi::c_void; @@ -202,6 +204,7 @@ where #[serde(untagged)] pub enum FfiValue { Value(Value), + BigInt(V8BigInt), External(ExternalPointer), } @@ -251,18 +254,18 @@ fn ffi_call( NativeType::I32 => { FfiValue::Value(Value::from(cif.call::(fun_ptr, &call_args))) } - NativeType::U64 => { - FfiValue::Value(Value::from(cif.call::(fun_ptr, &call_args))) - } - NativeType::I64 => { - FfiValue::Value(Value::from(cif.call::(fun_ptr, &call_args))) - } - NativeType::USize => { - FfiValue::Value(Value::from(cif.call::(fun_ptr, &call_args))) - } - NativeType::ISize => { - FfiValue::Value(Value::from(cif.call::(fun_ptr, &call_args))) - } + NativeType::U64 => FfiValue::BigInt(V8BigInt::from(BigInt::from( + cif.call::(fun_ptr, &call_args), + ))), + NativeType::I64 => FfiValue::BigInt(V8BigInt::from(BigInt::from( + cif.call::(fun_ptr, &call_args), + ))), + NativeType::USize => FfiValue::BigInt(V8BigInt::from(BigInt::from( + cif.call::(fun_ptr, &call_args), + ))), + NativeType::ISize => FfiValue::BigInt(V8BigInt::from(BigInt::from( + cif.call::(fun_ptr, &call_args), + ))), NativeType::F32 => { FfiValue::Value(Value::from(cif.call::(fun_ptr, &call_args))) } -- cgit v1.2.3 From 7c57105cc41cf16c5f48f85d85c7fd9bd3bb4d1f Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Wed, 23 Oct 2024 21:13:30 -0700 Subject: fix(ext/node): only set our end of child process pipe to nonblocking mode (#26495) Fixes playwright on linux, as reported in https://github.com/denoland/deno/issues/16899#issuecomment-2378268454. The issue was that we were opening the socket in nonblocking mode, which meant that subprocesses trying to use it would get a `EWOULDBLOCK` error (unexpectedly). The fix here is to only set nonblocking mode on our end (which we need to use asynchronously) --- ext/io/bi_pipe.rs | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'ext') diff --git a/ext/io/bi_pipe.rs b/ext/io/bi_pipe.rs index b6fc70ca2..3492e2f44 100644 --- a/ext/io/bi_pipe.rs +++ b/ext/io/bi_pipe.rs @@ -183,9 +183,10 @@ fn from_raw( ) -> Result<(BiPipeRead, BiPipeWrite), std::io::Error> { use std::os::fd::FromRawFd; // Safety: The fd is part of a pair of connected sockets - let unix_stream = tokio::net::UnixStream::from_std(unsafe { - std::os::unix::net::UnixStream::from_raw_fd(stream) - })?; + let unix_stream = + unsafe { std::os::unix::net::UnixStream::from_raw_fd(stream) }; + unix_stream.set_nonblocking(true)?; + let unix_stream = tokio::net::UnixStream::from_std(unix_stream)?; let (read, write) = unix_stream.into_split(); Ok((BiPipeRead { inner: read }, BiPipeWrite { inner: write })) } @@ -280,7 +281,7 @@ pub fn bi_pipe_pair_raw( // https://github.com/nix-rust/nix/issues/861 let mut fds = [-1, -1]; #[cfg(not(target_os = "macos"))] - let flags = libc::SOCK_CLOEXEC | libc::SOCK_NONBLOCK; + let flags = libc::SOCK_CLOEXEC; #[cfg(target_os = "macos")] let flags = 0; @@ -301,13 +302,13 @@ pub fn bi_pipe_pair_raw( if cfg!(target_os = "macos") { let fcntl = |fd: i32, flag: libc::c_int| -> Result<(), std::io::Error> { // SAFETY: libc call, fd is valid - let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) }; + let flags = unsafe { libc::fcntl(fd, libc::F_GETFD) }; if flags == -1 { return Err(fail(fds)); } // SAFETY: libc call, fd is valid - let ret = unsafe { libc::fcntl(fd, libc::F_SETFL, flags | flag) }; + let ret = unsafe { libc::fcntl(fd, libc::F_SETFD, flags | flag) }; if ret == -1 { return Err(fail(fds)); } @@ -323,13 +324,9 @@ pub fn bi_pipe_pair_raw( std::io::Error::last_os_error() } - // SOCK_NONBLOCK is not supported on macOS. - (fcntl)(fds[0], libc::O_NONBLOCK)?; - (fcntl)(fds[1], libc::O_NONBLOCK)?; - // SOCK_CLOEXEC is not supported on macOS. - (fcntl)(fds[0], libc::FD_CLOEXEC)?; - (fcntl)(fds[1], libc::FD_CLOEXEC)?; + fcntl(fds[0], libc::FD_CLOEXEC)?; + fcntl(fds[1], libc::FD_CLOEXEC)?; } let fd1 = fds[0]; -- cgit v1.2.3 From 27df42f659ae7b77968d31363ade89addb516ea1 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Wed, 23 Oct 2024 21:50:35 -0700 Subject: fix(ext/node): cancel pending ipc writes on channel close (#26504) Fixes the issue described in https://github.com/denoland/deno/issues/23882#issuecomment-2423316362. The parent was starting to send a message right before the process would exit, and the channel closed in the middle of the write. Unlike with reads, we weren't cancelling the pending writes, which resulted in a `Broken pipe` error surfacing to the user. --- ext/node/ops/ipc.rs | 9 ++++++++- ext/node/polyfills/internal/child_process.ts | 19 +++++++++++++++---- 2 files changed, 23 insertions(+), 5 deletions(-) (limited to 'ext') diff --git a/ext/node/ops/ipc.rs b/ext/node/ops/ipc.rs index 59b6fece1..c5e0f875d 100644 --- a/ext/node/ops/ipc.rs +++ b/ext/node/ops/ipc.rs @@ -216,10 +216,17 @@ mod impl_ { queue_ok.set_index(scope, 0, v); } Ok(async move { - stream.clone().write_msg_bytes(&serialized).await?; + let cancel = stream.cancel.clone(); + let result = stream + .clone() + .write_msg_bytes(&serialized) + .or_cancel(cancel) + .await; + // adjust count even on error stream .queued_bytes .fetch_sub(serialized.len(), std::sync::atomic::Ordering::Relaxed); + result??; Ok(()) }) } diff --git a/ext/node/polyfills/internal/child_process.ts b/ext/node/polyfills/internal/child_process.ts index 65d825fd2..cfff1079f 100644 --- a/ext/node/polyfills/internal/child_process.ts +++ b/ext/node/polyfills/internal/child_process.ts @@ -1339,7 +1339,7 @@ export function setupChannel(target: any, ipc: number) { } } - process.nextTick(handleMessage, msg); + nextTick(handleMessage, msg); } } catch (err) { if ( @@ -1400,7 +1400,7 @@ export function setupChannel(target: any, ipc: number) { if (!target.connected) { const err = new ERR_IPC_CHANNEL_CLOSED(); if (typeof callback === "function") { - process.nextTick(callback, err); + nextTick(callback, err); } else { nextTick(() => target.emit("error", err)); } @@ -1416,7 +1416,18 @@ export function setupChannel(target: any, ipc: number) { .then(() => { control.unrefCounted(); if (callback) { - process.nextTick(callback, null); + nextTick(callback, null); + } + }, (err: Error) => { + control.unrefCounted(); + if (err instanceof Deno.errors.Interrupted) { + // Channel closed on us mid-write. + } else { + if (typeof callback === "function") { + nextTick(callback, err); + } else { + nextTick(() => target.emit("error", err)); + } } }); return queueOk[0]; @@ -1433,7 +1444,7 @@ export function setupChannel(target: any, ipc: number) { target.connected = false; target[kCanDisconnect] = false; control[kControlDisconnect](); - process.nextTick(() => { + nextTick(() => { target.channel = null; core.close(ipc); target.emit("disconnect"); -- cgit v1.2.3 From 79a3ad2b950009f560641cea359d7deb6f7a61ac Mon Sep 17 00:00:00 2001 From: snek Date: Thu, 24 Oct 2024 09:13:54 +0200 Subject: feat: support node-api in denort (#26389) exposes node-api symbols in denort so that `deno compile` can run native addons. --- ext/napi/Cargo.toml | 9 + ext/napi/README.md | 114 + ext/napi/build.rs | 22 + ext/napi/generated_symbol_exports_list_linux.def | 1 + ext/napi/generated_symbol_exports_list_macos.def | 160 + ext/napi/generated_symbol_exports_list_windows.def | 162 + ext/napi/js_native_api.rs | 3616 ++++++++++++++++++++ ext/napi/lib.rs | 43 + ext/napi/node_api.rs | 1010 ++++++ ext/napi/sym/Cargo.toml | 21 + ext/napi/sym/README.md | 38 + ext/napi/sym/lib.rs | 31 + ext/napi/sym/symbol_exports.json | 164 + ext/napi/util.rs | 287 ++ ext/napi/uv.rs | 231 ++ 15 files changed, 5909 insertions(+) create mode 100644 ext/napi/build.rs create mode 100644 ext/napi/generated_symbol_exports_list_linux.def create mode 100644 ext/napi/generated_symbol_exports_list_macos.def create mode 100644 ext/napi/generated_symbol_exports_list_windows.def create mode 100644 ext/napi/js_native_api.rs create mode 100644 ext/napi/node_api.rs create mode 100644 ext/napi/sym/Cargo.toml create mode 100644 ext/napi/sym/README.md create mode 100644 ext/napi/sym/lib.rs create mode 100644 ext/napi/sym/symbol_exports.json create mode 100644 ext/napi/util.rs create mode 100644 ext/napi/uv.rs (limited to 'ext') diff --git a/ext/napi/Cargo.toml b/ext/napi/Cargo.toml index ef2e41d57..ed39d96df 100644 --- a/ext/napi/Cargo.toml +++ b/ext/napi/Cargo.toml @@ -16,5 +16,14 @@ path = "lib.rs" [dependencies] deno_core.workspace = true deno_permissions.workspace = true +libc.workspace = true libloading = { version = "0.7" } +log.workspace = true +napi_sym.workspace = true thiserror.workspace = true + +[target.'cfg(windows)'.dependencies] +windows-sys.workspace = true + +[dev-dependencies] +libuv-sys-lite = "=1.48.2" diff --git a/ext/napi/README.md b/ext/napi/README.md index e69de29bb..b47929524 100644 --- a/ext/napi/README.md +++ b/ext/napi/README.md @@ -0,0 +1,114 @@ +# napi + +This directory contains source for Deno's Node-API implementation. It depends on +`napi_sym` and `deno_napi`. + +Files are generally organized the same as in Node.js's implementation to ease in +ensuring compatibility. + +## Adding a new function + +Add the symbol name to +[`cli/napi_sym/symbol_exports.json`](../napi_sym/symbol_exports.json). + +```diff +{ + "symbols": [ + ... + "napi_get_undefined", +- "napi_get_null" ++ "napi_get_null", ++ "napi_get_boolean" + ] +} +``` + +Determine where to place the implementation. `napi_get_boolean` is related to JS +values so we will place it in `js_native_api.rs`. If something is not clear, +just create a new file module. + +See [`napi_sym`](../napi_sym/) for writing the implementation: + +```rust +#[napi_sym::napi_sym] +fn napi_get_boolean( + env: *mut Env, + value: bool, + result: *mut napi_value, +) -> Result { + // ... + Ok(()) +} +``` + +Update the generated symbol lists using the script: + +``` +deno run --allow-write tools/napi/generate_symbols_lists.js +``` + +Add a test in [`/tests/napi`](../../tests/napi/). You can also refer to Node.js +test suite for Node-API. + +```js +// tests/napi/boolean_test.js +import { assertEquals, loadTestLibrary } from "./common.js"; +const lib = loadTestLibrary(); +Deno.test("napi get boolean", function () { + assertEquals(lib.test_get_boolean(true), true); + assertEquals(lib.test_get_boolean(false), false); +}); +``` + +```rust +// tests/napi/src/boolean.rs + +use napi_sys::Status::napi_ok; +use napi_sys::ValueType::napi_boolean; +use napi_sys::*; + +extern "C" fn test_boolean( + env: napi_env, + info: napi_callback_info, +) -> napi_value { + let (args, argc, _) = crate::get_callback_info!(env, info, 1); + assert_eq!(argc, 1); + + let mut ty = -1; + assert!(unsafe { napi_typeof(env, args[0], &mut ty) } == napi_ok); + assert_eq!(ty, napi_boolean); + + // Use napi_get_boolean here... + + value +} + +pub fn init(env: napi_env, exports: napi_value) { + let properties = &[crate::new_property!(env, "test_boolean\0", test_boolean)]; + + unsafe { + napi_define_properties(env, exports, properties.len(), properties.as_ptr()) + }; +} +``` + +```diff +// tests/napi/src/lib.rs + ++ mod boolean; + +... + +#[no_mangle] +unsafe extern "C" fn napi_register_module_v1( + env: napi_env, + exports: napi_value, +) -> napi_value { + ... ++ boolean::init(env, exports); + + exports +} +``` + +Run the test using `cargo test -p tests/napi`. diff --git a/ext/napi/build.rs b/ext/napi/build.rs new file mode 100644 index 000000000..8705830a9 --- /dev/null +++ b/ext/napi/build.rs @@ -0,0 +1,22 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +fn main() { + let symbols_file_name = match std::env::consts::OS { + "android" | "freebsd" | "openbsd" => { + "generated_symbol_exports_list_linux.def".to_string() + } + os => format!("generated_symbol_exports_list_{}.def", os), + }; + let symbols_path = std::path::Path::new(".") + .join(symbols_file_name) + .canonicalize() + .expect( + "Missing symbols list! Generate using tools/napi/generate_symbols_lists.js", + ); + + println!("cargo:rustc-rerun-if-changed={}", symbols_path.display()); + + let path = std::path::PathBuf::from(std::env::var("OUT_DIR").unwrap()) + .join("napi_symbol_path.txt"); + std::fs::write(path, symbols_path.as_os_str().as_encoded_bytes()).unwrap(); +} diff --git a/ext/napi/generated_symbol_exports_list_linux.def b/ext/napi/generated_symbol_exports_list_linux.def new file mode 100644 index 000000000..614880ebf --- /dev/null +++ b/ext/napi/generated_symbol_exports_list_linux.def @@ -0,0 +1 @@ +{ "node_api_create_syntax_error"; "napi_make_callback"; "napi_has_named_property"; "napi_async_destroy"; "napi_coerce_to_object"; "napi_get_arraybuffer_info"; "napi_detach_arraybuffer"; "napi_get_undefined"; "napi_reference_unref"; "napi_fatal_error"; "napi_open_callback_scope"; "napi_close_callback_scope"; "napi_get_value_uint32"; "napi_create_function"; "napi_create_arraybuffer"; "napi_get_value_int64"; "napi_get_all_property_names"; "napi_resolve_deferred"; "napi_is_detached_arraybuffer"; "napi_create_string_utf8"; "napi_create_threadsafe_function"; "node_api_throw_syntax_error"; "napi_create_bigint_int64"; "napi_wrap"; "napi_set_property"; "napi_get_value_bigint_int64"; "napi_open_handle_scope"; "napi_create_error"; "napi_create_buffer"; "napi_cancel_async_work"; "napi_is_exception_pending"; "napi_acquire_threadsafe_function"; "napi_create_external"; "napi_get_threadsafe_function_context"; "napi_get_null"; "napi_create_string_utf16"; "node_api_create_external_string_utf16"; "napi_get_value_bigint_uint64"; "napi_module_register"; "napi_is_typedarray"; "napi_create_external_buffer"; "napi_get_new_target"; "napi_get_instance_data"; "napi_close_handle_scope"; "napi_get_value_string_utf16"; "napi_get_property_names"; "napi_is_arraybuffer"; "napi_get_cb_info"; "napi_define_properties"; "napi_add_env_cleanup_hook"; "node_api_get_module_file_name"; "napi_get_node_version"; "napi_create_int64"; "napi_create_double"; "napi_get_and_clear_last_exception"; "napi_create_reference"; "napi_get_typedarray_info"; "napi_call_threadsafe_function"; "napi_get_last_error_info"; "napi_create_array_with_length"; "napi_coerce_to_number"; "napi_get_global"; "napi_is_error"; "napi_set_instance_data"; "napi_create_typedarray"; "napi_throw_type_error"; "napi_has_property"; "napi_get_value_external"; "napi_create_range_error"; "napi_typeof"; "napi_ref_threadsafe_function"; "napi_create_bigint_uint64"; "napi_get_prototype"; "napi_adjust_external_memory"; "napi_release_threadsafe_function"; "napi_delete_async_work"; "napi_create_string_latin1"; "node_api_create_external_string_latin1"; "napi_is_array"; "napi_unref_threadsafe_function"; "napi_throw_error"; "napi_has_own_property"; "napi_get_reference_value"; "napi_remove_env_cleanup_hook"; "napi_get_value_string_utf8"; "napi_is_promise"; "napi_get_boolean"; "napi_run_script"; "napi_get_element"; "napi_get_named_property"; "napi_get_buffer_info"; "napi_get_value_bool"; "napi_reference_ref"; "napi_create_object"; "napi_create_promise"; "napi_create_int32"; "napi_escape_handle"; "napi_open_escapable_handle_scope"; "napi_throw"; "napi_get_value_double"; "napi_set_named_property"; "napi_call_function"; "napi_create_date"; "napi_object_freeze"; "napi_get_uv_event_loop"; "napi_get_value_string_latin1"; "napi_reject_deferred"; "napi_add_finalizer"; "napi_create_array"; "napi_delete_reference"; "napi_get_date_value"; "napi_create_dataview"; "napi_get_version"; "napi_define_class"; "napi_is_date"; "napi_remove_wrap"; "napi_delete_property"; "napi_instanceof"; "napi_create_buffer_copy"; "napi_delete_element"; "napi_object_seal"; "napi_queue_async_work"; "napi_get_value_bigint_words"; "napi_is_buffer"; "napi_get_array_length"; "napi_get_property"; "napi_new_instance"; "napi_set_element"; "napi_create_bigint_words"; "napi_strict_equals"; "napi_is_dataview"; "napi_close_escapable_handle_scope"; "napi_get_dataview_info"; "napi_get_value_int32"; "napi_unwrap"; "napi_throw_range_error"; "napi_coerce_to_bool"; "napi_create_uint32"; "napi_has_element"; "napi_create_external_arraybuffer"; "napi_create_symbol"; "node_api_symbol_for"; "napi_coerce_to_string"; "napi_create_type_error"; "napi_fatal_exception"; "napi_create_async_work"; "napi_async_init"; "node_api_create_property_key_utf16"; "napi_type_tag_object"; "napi_check_object_type_tag"; "node_api_post_finalizer"; "napi_add_async_cleanup_hook"; "napi_remove_async_cleanup_hook"; "uv_mutex_init"; "uv_mutex_lock"; "uv_mutex_unlock"; "uv_mutex_destroy"; "uv_async_init"; "uv_async_send"; "uv_close"; }; \ No newline at end of file diff --git a/ext/napi/generated_symbol_exports_list_macos.def b/ext/napi/generated_symbol_exports_list_macos.def new file mode 100644 index 000000000..36b2f37fa --- /dev/null +++ b/ext/napi/generated_symbol_exports_list_macos.def @@ -0,0 +1,160 @@ +_node_api_create_syntax_error +_napi_make_callback +_napi_has_named_property +_napi_async_destroy +_napi_coerce_to_object +_napi_get_arraybuffer_info +_napi_detach_arraybuffer +_napi_get_undefined +_napi_reference_unref +_napi_fatal_error +_napi_open_callback_scope +_napi_close_callback_scope +_napi_get_value_uint32 +_napi_create_function +_napi_create_arraybuffer +_napi_get_value_int64 +_napi_get_all_property_names +_napi_resolve_deferred +_napi_is_detached_arraybuffer +_napi_create_string_utf8 +_napi_create_threadsafe_function +_node_api_throw_syntax_error +_napi_create_bigint_int64 +_napi_wrap +_napi_set_property +_napi_get_value_bigint_int64 +_napi_open_handle_scope +_napi_create_error +_napi_create_buffer +_napi_cancel_async_work +_napi_is_exception_pending +_napi_acquire_threadsafe_function +_napi_create_external +_napi_get_threadsafe_function_context +_napi_get_null +_napi_create_string_utf16 +_node_api_create_external_string_utf16 +_napi_get_value_bigint_uint64 +_napi_module_register +_napi_is_typedarray +_napi_create_external_buffer +_napi_get_new_target +_napi_get_instance_data +_napi_close_handle_scope +_napi_get_value_string_utf16 +_napi_get_property_names +_napi_is_arraybuffer +_napi_get_cb_info +_napi_define_properties +_napi_add_env_cleanup_hook +_node_api_get_module_file_name +_napi_get_node_version +_napi_create_int64 +_napi_create_double +_napi_get_and_clear_last_exception +_napi_create_reference +_napi_get_typedarray_info +_napi_call_threadsafe_function +_napi_get_last_error_info +_napi_create_array_with_length +_napi_coerce_to_number +_napi_get_global +_napi_is_error +_napi_set_instance_data +_napi_create_typedarray +_napi_throw_type_error +_napi_has_property +_napi_get_value_external +_napi_create_range_error +_napi_typeof +_napi_ref_threadsafe_function +_napi_create_bigint_uint64 +_napi_get_prototype +_napi_adjust_external_memory +_napi_release_threadsafe_function +_napi_delete_async_work +_napi_create_string_latin1 +_node_api_create_external_string_latin1 +_napi_is_array +_napi_unref_threadsafe_function +_napi_throw_error +_napi_has_own_property +_napi_get_reference_value +_napi_remove_env_cleanup_hook +_napi_get_value_string_utf8 +_napi_is_promise +_napi_get_boolean +_napi_run_script +_napi_get_element +_napi_get_named_property +_napi_get_buffer_info +_napi_get_value_bool +_napi_reference_ref +_napi_create_object +_napi_create_promise +_napi_create_int32 +_napi_escape_handle +_napi_open_escapable_handle_scope +_napi_throw +_napi_get_value_double +_napi_set_named_property +_napi_call_function +_napi_create_date +_napi_object_freeze +_napi_get_uv_event_loop +_napi_get_value_string_latin1 +_napi_reject_deferred +_napi_add_finalizer +_napi_create_array +_napi_delete_reference +_napi_get_date_value +_napi_create_dataview +_napi_get_version +_napi_define_class +_napi_is_date +_napi_remove_wrap +_napi_delete_property +_napi_instanceof +_napi_create_buffer_copy +_napi_delete_element +_napi_object_seal +_napi_queue_async_work +_napi_get_value_bigint_words +_napi_is_buffer +_napi_get_array_length +_napi_get_property +_napi_new_instance +_napi_set_element +_napi_create_bigint_words +_napi_strict_equals +_napi_is_dataview +_napi_close_escapable_handle_scope +_napi_get_dataview_info +_napi_get_value_int32 +_napi_unwrap +_napi_throw_range_error +_napi_coerce_to_bool +_napi_create_uint32 +_napi_has_element +_napi_create_external_arraybuffer +_napi_create_symbol +_node_api_symbol_for +_napi_coerce_to_string +_napi_create_type_error +_napi_fatal_exception +_napi_create_async_work +_napi_async_init +_node_api_create_property_key_utf16 +_napi_type_tag_object +_napi_check_object_type_tag +_node_api_post_finalizer +_napi_add_async_cleanup_hook +_napi_remove_async_cleanup_hook +_uv_mutex_init +_uv_mutex_lock +_uv_mutex_unlock +_uv_mutex_destroy +_uv_async_init +_uv_async_send +_uv_close \ No newline at end of file diff --git a/ext/napi/generated_symbol_exports_list_windows.def b/ext/napi/generated_symbol_exports_list_windows.def new file mode 100644 index 000000000..b7355112e --- /dev/null +++ b/ext/napi/generated_symbol_exports_list_windows.def @@ -0,0 +1,162 @@ +LIBRARY +EXPORTS + node_api_create_syntax_error + napi_make_callback + napi_has_named_property + napi_async_destroy + napi_coerce_to_object + napi_get_arraybuffer_info + napi_detach_arraybuffer + napi_get_undefined + napi_reference_unref + napi_fatal_error + napi_open_callback_scope + napi_close_callback_scope + napi_get_value_uint32 + napi_create_function + napi_create_arraybuffer + napi_get_value_int64 + napi_get_all_property_names + napi_resolve_deferred + napi_is_detached_arraybuffer + napi_create_string_utf8 + napi_create_threadsafe_function + node_api_throw_syntax_error + napi_create_bigint_int64 + napi_wrap + napi_set_property + napi_get_value_bigint_int64 + napi_open_handle_scope + napi_create_error + napi_create_buffer + napi_cancel_async_work + napi_is_exception_pending + napi_acquire_threadsafe_function + napi_create_external + napi_get_threadsafe_function_context + napi_get_null + napi_create_string_utf16 + node_api_create_external_string_utf16 + napi_get_value_bigint_uint64 + napi_module_register + napi_is_typedarray + napi_create_external_buffer + napi_get_new_target + napi_get_instance_data + napi_close_handle_scope + napi_get_value_string_utf16 + napi_get_property_names + napi_is_arraybuffer + napi_get_cb_info + napi_define_properties + napi_add_env_cleanup_hook + node_api_get_module_file_name + napi_get_node_version + napi_create_int64 + napi_create_double + napi_get_and_clear_last_exception + napi_create_reference + napi_get_typedarray_info + napi_call_threadsafe_function + napi_get_last_error_info + napi_create_array_with_length + napi_coerce_to_number + napi_get_global + napi_is_error + napi_set_instance_data + napi_create_typedarray + napi_throw_type_error + napi_has_property + napi_get_value_external + napi_create_range_error + napi_typeof + napi_ref_threadsafe_function + napi_create_bigint_uint64 + napi_get_prototype + napi_adjust_external_memory + napi_release_threadsafe_function + napi_delete_async_work + napi_create_string_latin1 + node_api_create_external_string_latin1 + napi_is_array + napi_unref_threadsafe_function + napi_throw_error + napi_has_own_property + napi_get_reference_value + napi_remove_env_cleanup_hook + napi_get_value_string_utf8 + napi_is_promise + napi_get_boolean + napi_run_script + napi_get_element + napi_get_named_property + napi_get_buffer_info + napi_get_value_bool + napi_reference_ref + napi_create_object + napi_create_promise + napi_create_int32 + napi_escape_handle + napi_open_escapable_handle_scope + napi_throw + napi_get_value_double + napi_set_named_property + napi_call_function + napi_create_date + napi_object_freeze + napi_get_uv_event_loop + napi_get_value_string_latin1 + napi_reject_deferred + napi_add_finalizer + napi_create_array + napi_delete_reference + napi_get_date_value + napi_create_dataview + napi_get_version + napi_define_class + napi_is_date + napi_remove_wrap + napi_delete_property + napi_instanceof + napi_create_buffer_copy + napi_delete_element + napi_object_seal + napi_queue_async_work + napi_get_value_bigint_words + napi_is_buffer + napi_get_array_length + napi_get_property + napi_new_instance + napi_set_element + napi_create_bigint_words + napi_strict_equals + napi_is_dataview + napi_close_escapable_handle_scope + napi_get_dataview_info + napi_get_value_int32 + napi_unwrap + napi_throw_range_error + napi_coerce_to_bool + napi_create_uint32 + napi_has_element + napi_create_external_arraybuffer + napi_create_symbol + node_api_symbol_for + napi_coerce_to_string + napi_create_type_error + napi_fatal_exception + napi_create_async_work + napi_async_init + node_api_create_property_key_utf16 + napi_type_tag_object + napi_check_object_type_tag + node_api_post_finalizer + napi_add_async_cleanup_hook + napi_remove_async_cleanup_hook + uv_mutex_init + uv_mutex_lock + uv_mutex_unlock + uv_mutex_destroy + uv_async_init + uv_async_send + uv_close \ No newline at end of file diff --git a/ext/napi/js_native_api.rs b/ext/napi/js_native_api.rs new file mode 100644 index 000000000..53a12d6eb --- /dev/null +++ b/ext/napi/js_native_api.rs @@ -0,0 +1,3616 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +#![allow(non_upper_case_globals)] +#![deny(unsafe_op_in_unsafe_fn)] + +const NAPI_VERSION: u32 = 9; + +use crate::*; +use libc::INT_MAX; + +use super::util::check_new_from_utf8; +use super::util::check_new_from_utf8_len; +use super::util::get_array_buffer_ptr; +use super::util::make_external_backing_store; +use super::util::napi_clear_last_error; +use super::util::napi_set_last_error; +use super::util::v8_name_from_property_descriptor; +use crate::check_arg; +use crate::check_env; +use crate::function::create_function; +use crate::function::create_function_template; +use crate::function::CallbackInfo; +use napi_sym::napi_sym; +use std::ptr::NonNull; + +#[derive(Debug, Clone, Copy, PartialEq)] +enum ReferenceOwnership { + Runtime, + Userland, +} + +enum ReferenceState { + Strong(v8::Global), + Weak(v8::Weak), +} + +struct Reference { + env: *mut Env, + state: ReferenceState, + ref_count: u32, + ownership: ReferenceOwnership, + finalize_cb: Option, + finalize_data: *mut c_void, + finalize_hint: *mut c_void, +} + +impl Reference { + fn new( + env: *mut Env, + value: v8::Local, + initial_ref_count: u32, + ownership: ReferenceOwnership, + finalize_cb: Option, + finalize_data: *mut c_void, + finalize_hint: *mut c_void, + ) -> Box { + let isolate = unsafe { (*env).isolate() }; + + let mut reference = Box::new(Reference { + env, + state: ReferenceState::Strong(v8::Global::new(isolate, value)), + ref_count: initial_ref_count, + ownership, + finalize_cb, + finalize_data, + finalize_hint, + }); + + if initial_ref_count == 0 { + reference.set_weak(); + } + + reference + } + + fn ref_(&mut self) -> u32 { + self.ref_count += 1; + if self.ref_count == 1 { + self.set_strong(); + } + self.ref_count + } + + fn unref(&mut self) -> u32 { + let old_ref_count = self.ref_count; + if self.ref_count > 0 { + self.ref_count -= 1; + } + if old_ref_count == 1 && self.ref_count == 0 { + self.set_weak(); + } + self.ref_count + } + + fn reset(&mut self) { + self.finalize_cb = None; + self.finalize_data = std::ptr::null_mut(); + self.finalize_hint = std::ptr::null_mut(); + } + + fn set_strong(&mut self) { + if let ReferenceState::Weak(w) = &self.state { + let isolate = unsafe { (*self.env).isolate() }; + if let Some(g) = w.to_global(isolate) { + self.state = ReferenceState::Strong(g); + } + } + } + + fn set_weak(&mut self) { + let reference = self as *mut Reference; + if let ReferenceState::Strong(g) = &self.state { + let cb = Box::new(move |_: &mut v8::Isolate| { + Reference::weak_callback(reference) + }); + let isolate = unsafe { (*self.env).isolate() }; + self.state = + ReferenceState::Weak(v8::Weak::with_finalizer(isolate, g, cb)); + } + } + + fn weak_callback(reference: *mut Reference) { + let reference = unsafe { &mut *reference }; + + let finalize_cb = reference.finalize_cb; + let finalize_data = reference.finalize_data; + let finalize_hint = reference.finalize_hint; + reference.reset(); + + // copy this value before the finalize callback, since + // it might free the reference (which would be a UAF) + let ownership = reference.ownership; + if let Some(finalize_cb) = finalize_cb { + unsafe { + finalize_cb(reference.env as _, finalize_data, finalize_hint); + } + } + + if ownership == ReferenceOwnership::Runtime { + unsafe { drop(Reference::from_raw(reference)) } + } + } + + fn into_raw(r: Box) -> *mut Reference { + Box::into_raw(r) + } + + unsafe fn from_raw(r: *mut Reference) -> Box { + unsafe { Box::from_raw(r) } + } + + unsafe fn remove(r: *mut Reference) { + let r = unsafe { &mut *r }; + if r.ownership == ReferenceOwnership::Userland { + r.reset(); + } else { + unsafe { drop(Reference::from_raw(r)) } + } + } +} + +#[napi_sym] +fn napi_get_last_error_info( + env: *mut Env, + result: *mut *const napi_extended_error_info, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, result); + + if env.last_error.error_code == napi_ok { + napi_clear_last_error(env); + } else { + env.last_error.error_message = + ERROR_MESSAGES[env.last_error.error_code as usize].as_ptr(); + } + + unsafe { + *result = &env.last_error; + } + + napi_ok +} + +#[napi_sym] +fn napi_create_function<'s>( + env: &'s mut Env, + name: *const c_char, + length: usize, + cb: Option, + cb_info: napi_callback_info, + result: *mut napi_value<'s>, +) -> napi_status { + let env_ptr = env as *mut Env; + check_arg!(env, result); + check_arg!(env, cb); + + let name = if !name.is_null() { + match unsafe { check_new_from_utf8_len(env, name, length) } { + Ok(s) => Some(s), + Err(status) => return status, + } + } else { + None + }; + + unsafe { + *result = + create_function(&mut env.scope(), env_ptr, name, cb.unwrap(), cb_info) + .into(); + } + + napi_ok +} + +#[napi_sym] +#[allow(clippy::too_many_arguments)] +fn napi_define_class<'s>( + env: &'s mut Env, + utf8name: *const c_char, + length: usize, + constructor: Option, + callback_data: *mut c_void, + property_count: usize, + properties: *const napi_property_descriptor, + result: *mut napi_value<'s>, +) -> napi_status { + let env_ptr = env as *mut Env; + check_arg!(env, result); + check_arg!(env, constructor); + + if property_count > 0 { + check_arg!(env, properties); + } + + let name = match unsafe { check_new_from_utf8_len(env, utf8name, length) } { + Ok(string) => string, + Err(status) => return status, + }; + + let tpl = create_function_template( + &mut env.scope(), + env_ptr, + Some(name), + constructor.unwrap(), + callback_data, + ); + + let napi_properties: &[napi_property_descriptor] = if property_count > 0 { + unsafe { std::slice::from_raw_parts(properties, property_count) } + } else { + &[] + }; + let mut static_property_count = 0; + + for p in napi_properties { + if p.attributes & napi_static != 0 { + // Will be handled below + static_property_count += 1; + continue; + } + + let name = match unsafe { v8_name_from_property_descriptor(env_ptr, p) } { + Ok(name) => name, + Err(status) => return status, + }; + + let mut accessor_property = v8::PropertyAttribute::NONE; + + if p.attributes & napi_enumerable == 0 { + accessor_property = accessor_property | v8::PropertyAttribute::DONT_ENUM; + } + if p.attributes & napi_configurable == 0 { + accessor_property = + accessor_property | v8::PropertyAttribute::DONT_DELETE; + } + + if p.getter.is_some() || p.setter.is_some() { + let getter = p.getter.map(|g| { + create_function_template(&mut env.scope(), env_ptr, None, g, p.data) + }); + let setter = p.setter.map(|s| { + create_function_template(&mut env.scope(), env_ptr, None, s, p.data) + }); + if getter.is_some() + && setter.is_some() + && (p.attributes & napi_writable) == 0 + { + accessor_property = + accessor_property | v8::PropertyAttribute::READ_ONLY; + } + let proto = tpl.prototype_template(&mut env.scope()); + proto.set_accessor_property(name, getter, setter, accessor_property); + } else if let Some(method) = p.method { + let function = create_function_template( + &mut env.scope(), + env_ptr, + None, + method, + p.data, + ); + let proto = tpl.prototype_template(&mut env.scope()); + proto.set_with_attr(name, function.into(), accessor_property); + } else { + let proto = tpl.prototype_template(&mut env.scope()); + if (p.attributes & napi_writable) == 0 { + accessor_property = + accessor_property | v8::PropertyAttribute::READ_ONLY; + } + proto.set_with_attr(name, p.value.unwrap().into(), accessor_property); + } + } + + let value: v8::Local = + tpl.get_function(&mut env.scope()).unwrap().into(); + + unsafe { + *result = value.into(); + } + + if static_property_count > 0 { + let mut static_descriptors = Vec::with_capacity(static_property_count); + + for p in napi_properties { + if p.attributes & napi_static != 0 { + static_descriptors.push(*p); + } + } + + crate::status_call!(unsafe { + napi_define_properties( + env_ptr, + *result, + static_descriptors.len(), + static_descriptors.as_ptr(), + ) + }); + } + + napi_ok +} + +#[napi_sym] +fn napi_get_property_names( + env: *mut Env, + object: napi_value, + result: *mut napi_value, +) -> napi_status { + unsafe { + napi_get_all_property_names( + env, + object, + napi_key_include_prototypes, + napi_key_enumerable | napi_key_skip_symbols, + napi_key_numbers_to_strings, + result, + ) + } +} + +#[napi_sym] +fn napi_get_all_property_names<'s>( + env: &'s mut Env, + object: napi_value, + key_mode: napi_key_collection_mode, + key_filter: napi_key_filter, + key_conversion: napi_key_conversion, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, result); + + let scope = &mut env.scope(); + + let Some(obj) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + let mut filter = v8::PropertyFilter::ALL_PROPERTIES; + + if key_filter & napi_key_writable != 0 { + filter = filter | v8::PropertyFilter::ONLY_WRITABLE; + } + if key_filter & napi_key_enumerable != 0 { + filter = filter | v8::PropertyFilter::ONLY_ENUMERABLE; + } + if key_filter & napi_key_configurable != 0 { + filter = filter | v8::PropertyFilter::ONLY_CONFIGURABLE; + } + if key_filter & napi_key_skip_strings != 0 { + filter = filter | v8::PropertyFilter::SKIP_STRINGS; + } + if key_filter & napi_key_skip_symbols != 0 { + filter = filter | v8::PropertyFilter::SKIP_SYMBOLS; + } + + let key_mode = match key_mode { + napi_key_include_prototypes => v8::KeyCollectionMode::IncludePrototypes, + napi_key_own_only => v8::KeyCollectionMode::OwnOnly, + _ => return napi_invalid_arg, + }; + + let key_conversion = match key_conversion { + napi_key_keep_numbers => v8::KeyConversionMode::KeepNumbers, + napi_key_numbers_to_strings => v8::KeyConversionMode::ConvertToString, + _ => return napi_invalid_arg, + }; + + let filter = v8::GetPropertyNamesArgsBuilder::new() + .mode(key_mode) + .property_filter(filter) + .index_filter(v8::IndexFilter::IncludeIndices) + .key_conversion(key_conversion) + .build(); + + let property_names = match obj.get_property_names(scope, filter) { + Some(n) => n, + None => return napi_generic_failure, + }; + + unsafe { + *result = property_names.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_set_property( + env: &mut Env, + object: napi_value, + key: napi_value, + value: napi_value, +) -> napi_status { + check_arg!(env, key); + check_arg!(env, value); + + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + if object.set(scope, key.unwrap(), value.unwrap()).is_none() { + return napi_generic_failure; + }; + + napi_ok +} + +#[napi_sym] +fn napi_has_property( + env: &mut Env, + object: napi_value, + key: napi_value, + result: *mut bool, +) -> napi_status { + check_arg!(env, key); + check_arg!(env, result); + + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + let Some(has) = object.has(scope, key.unwrap()) else { + return napi_generic_failure; + }; + + unsafe { + *result = has; + } + + napi_ok +} + +#[napi_sym] +fn napi_get_property<'s>( + env: &'s mut Env, + object: napi_value, + key: napi_value, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, key); + check_arg!(env, result); + + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + let Some(value) = object.get(scope, key.unwrap()) else { + return napi_generic_failure; + }; + + unsafe { + *result = value.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_delete_property( + env: &mut Env, + object: napi_value, + key: napi_value, + result: *mut bool, +) -> napi_status { + check_arg!(env, key); + + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + let Some(deleted) = object.delete(scope, key.unwrap()) else { + return napi_generic_failure; + }; + + if !result.is_null() { + unsafe { + *result = deleted; + } + } + + napi_ok +} + +#[napi_sym] +fn napi_has_own_property( + env: &mut Env, + object: napi_value, + key: napi_value, + result: *mut bool, +) -> napi_status { + check_arg!(env, key); + check_arg!(env, result); + + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + let Ok(key) = v8::Local::::try_from(key.unwrap()) else { + return napi_name_expected; + }; + + let Some(has_own) = object.has_own_property(scope, key) else { + return napi_generic_failure; + }; + + unsafe { + *result = has_own; + } + + napi_ok +} + +#[napi_sym] +fn napi_has_named_property<'s>( + env: &'s mut Env, + object: napi_value<'s>, + utf8name: *const c_char, + result: *mut bool, +) -> napi_status { + let env_ptr = env as *mut Env; + check_arg!(env, result); + + let Some(object) = object.and_then(|o| o.to_object(&mut env.scope())) else { + return napi_object_expected; + }; + + let key = match unsafe { check_new_from_utf8(env_ptr, utf8name) } { + Ok(key) => key, + Err(status) => return status, + }; + + let Some(has_property) = object.has(&mut env.scope(), key.into()) else { + return napi_generic_failure; + }; + + unsafe { + *result = has_property; + } + + napi_ok +} + +#[napi_sym] +fn napi_set_named_property<'s>( + env: &'s mut Env, + object: napi_value<'s>, + utf8name: *const c_char, + value: napi_value<'s>, +) -> napi_status { + check_arg!(env, value); + let env_ptr = env as *mut Env; + + let Some(object) = object.and_then(|o| o.to_object(&mut env.scope())) else { + return napi_object_expected; + }; + + let key = match unsafe { check_new_from_utf8(env_ptr, utf8name) } { + Ok(key) => key, + Err(status) => return status, + }; + + let value = value.unwrap(); + + if !object + .set(&mut env.scope(), key.into(), value) + .unwrap_or(false) + { + return napi_generic_failure; + } + + napi_ok +} + +#[napi_sym] +fn napi_get_named_property<'s>( + env: &'s mut Env, + object: napi_value<'s>, + utf8name: *const c_char, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, result); + let env_ptr = env as *mut Env; + + let Some(object) = object.and_then(|o| o.to_object(&mut env.scope())) else { + return napi_object_expected; + }; + + let key = match unsafe { check_new_from_utf8(env_ptr, utf8name) } { + Ok(key) => key, + Err(status) => return status, + }; + + let Some(value) = object.get(&mut env.scope(), key.into()) else { + return napi_generic_failure; + }; + + unsafe { + *result = value.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_set_element<'s>( + env: &'s mut Env, + object: napi_value<'s>, + index: u32, + value: napi_value<'s>, +) -> napi_status { + check_arg!(env, value); + + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + if !object + .set_index(scope, index, value.unwrap()) + .unwrap_or(false) + { + return napi_generic_failure; + } + + napi_ok +} + +#[napi_sym] +fn napi_has_element( + env: &mut Env, + object: napi_value, + index: u32, + result: *mut bool, +) -> napi_status { + check_arg!(env, result); + + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + let Some(has) = object.has_index(scope, index) else { + return napi_generic_failure; + }; + + unsafe { + *result = has; + } + + napi_ok +} + +#[napi_sym] +fn napi_get_element<'s>( + env: &'s mut Env, + object: napi_value, + index: u32, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, result); + + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + let Some(value) = object.get_index(scope, index) else { + return napi_generic_failure; + }; + + unsafe { + *result = value.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_delete_element( + env: &mut Env, + object: napi_value, + index: u32, + result: *mut bool, +) -> napi_status { + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + let Some(deleted) = object.delete_index(scope, index) else { + return napi_generic_failure; + }; + + if !result.is_null() { + unsafe { + *result = deleted; + } + } + + napi_ok +} + +#[napi_sym] +fn napi_define_properties( + env: &mut Env, + object: napi_value, + property_count: usize, + properties: *const napi_property_descriptor, +) -> napi_status { + let env_ptr = env as *mut Env; + + if property_count > 0 { + check_arg!(env, properties); + } + + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + let properties = if property_count == 0 { + &[] + } else { + unsafe { std::slice::from_raw_parts(properties, property_count) } + }; + for property in properties { + let property_name = + match unsafe { v8_name_from_property_descriptor(env_ptr, property) } { + Ok(name) => name, + Err(status) => return status, + }; + + let writable = property.attributes & napi_writable != 0; + let enumerable = property.attributes & napi_enumerable != 0; + let configurable = property.attributes & napi_configurable != 0; + + if property.getter.is_some() || property.setter.is_some() { + let local_getter: v8::Local = if let Some(getter) = + property.getter + { + create_function(&mut env.scope(), env_ptr, None, getter, property.data) + .into() + } else { + v8::undefined(scope).into() + }; + let local_setter: v8::Local = if let Some(setter) = + property.setter + { + create_function(&mut env.scope(), env_ptr, None, setter, property.data) + .into() + } else { + v8::undefined(scope).into() + }; + + let mut desc = + v8::PropertyDescriptor::new_from_get_set(local_getter, local_setter); + desc.set_enumerable(enumerable); + desc.set_configurable(configurable); + + if !object + .define_property(scope, property_name, &desc) + .unwrap_or(false) + { + return napi_invalid_arg; + } + } else if let Some(method) = property.method { + let method: v8::Local = { + let function = create_function( + &mut env.scope(), + env_ptr, + None, + method, + property.data, + ); + function.into() + }; + + let mut desc = + v8::PropertyDescriptor::new_from_value_writable(method, writable); + desc.set_enumerable(enumerable); + desc.set_configurable(configurable); + + if !object + .define_property(scope, property_name, &desc) + .unwrap_or(false) + { + return napi_generic_failure; + } + } else { + let value = property.value.unwrap(); + + if enumerable & writable & configurable { + if !object + .create_data_property(scope, property_name, value) + .unwrap_or(false) + { + return napi_invalid_arg; + } + } else { + let mut desc = + v8::PropertyDescriptor::new_from_value_writable(value, writable); + desc.set_enumerable(enumerable); + desc.set_configurable(configurable); + + if !object + .define_property(scope, property_name, &desc) + .unwrap_or(false) + { + return napi_invalid_arg; + } + } + } + } + + napi_ok +} + +#[napi_sym] +fn napi_object_freeze(env: &mut Env, object: napi_value) -> napi_status { + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + if !object + .set_integrity_level(scope, v8::IntegrityLevel::Frozen) + .unwrap_or(false) + { + return napi_generic_failure; + } + + napi_ok +} + +#[napi_sym] +fn napi_object_seal(env: &mut Env, object: napi_value) -> napi_status { + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + if !object + .set_integrity_level(scope, v8::IntegrityLevel::Sealed) + .unwrap_or(false) + { + return napi_generic_failure; + } + + napi_ok +} + +#[napi_sym] +fn napi_is_array( + env: *mut Env, + value: napi_value, + result: *mut bool, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, result); + + let value = value.unwrap(); + + unsafe { + *result = value.is_array(); + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_get_array_length( + env: &mut Env, + value: napi_value, + result: *mut u32, +) -> napi_status { + check_arg!(env, value); + check_arg!(env, result); + + let value = value.unwrap(); + + match v8::Local::::try_from(value) { + Ok(array) => { + unsafe { + *result = array.length(); + } + napi_ok + } + Err(_) => napi_array_expected, + } +} + +#[napi_sym] +fn napi_strict_equals( + env: &mut Env, + lhs: napi_value, + rhs: napi_value, + result: *mut bool, +) -> napi_status { + check_arg!(env, lhs); + check_arg!(env, rhs); + check_arg!(env, result); + + unsafe { + *result = lhs.unwrap().strict_equals(rhs.unwrap()); + } + + napi_ok +} + +#[napi_sym] +fn napi_get_prototype<'s>( + env: &'s mut Env, + object: napi_value, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, result); + + let scope = &mut env.scope(); + + let Some(object) = object.and_then(|o| o.to_object(scope)) else { + return napi_object_expected; + }; + + let Some(proto) = object.get_prototype(scope) else { + return napi_generic_failure; + }; + + unsafe { + *result = proto.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_create_object( + env_ptr: *mut Env, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + unsafe { + *result = v8::Object::new(&mut env.scope()).into(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_create_array( + env_ptr: *mut Env, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + unsafe { + *result = v8::Array::new(&mut env.scope(), 0).into(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_create_array_with_length( + env_ptr: *mut Env, + length: usize, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + unsafe { + *result = v8::Array::new(&mut env.scope(), length as _).into(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_create_string_latin1( + env_ptr: *mut Env, + string: *const c_char, + length: usize, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + if length > 0 { + check_arg!(env, string); + } + crate::return_status_if_false!( + env, + (length == NAPI_AUTO_LENGTH) || length <= INT_MAX as _, + napi_invalid_arg + ); + + let buffer = if length > 0 { + unsafe { + std::slice::from_raw_parts( + string as _, + if length == NAPI_AUTO_LENGTH { + std::ffi::CStr::from_ptr(string).to_bytes().len() + } else { + length + }, + ) + } + } else { + &[] + }; + + let Some(string) = v8::String::new_from_one_byte( + &mut env.scope(), + buffer, + v8::NewStringType::Normal, + ) else { + return napi_set_last_error(env_ptr, napi_generic_failure); + }; + + unsafe { + *result = string.into(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +pub(crate) fn napi_create_string_utf8( + env_ptr: *mut Env, + string: *const c_char, + length: usize, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + if length > 0 { + check_arg!(env, string); + } + crate::return_status_if_false!( + env, + (length == NAPI_AUTO_LENGTH) || length <= INT_MAX as _, + napi_invalid_arg + ); + + let buffer = if length > 0 { + unsafe { + std::slice::from_raw_parts( + string as _, + if length == NAPI_AUTO_LENGTH { + std::ffi::CStr::from_ptr(string).to_bytes().len() + } else { + length + }, + ) + } + } else { + &[] + }; + + let Some(string) = v8::String::new_from_utf8( + &mut env.scope(), + buffer, + v8::NewStringType::Normal, + ) else { + return napi_set_last_error(env_ptr, napi_generic_failure); + }; + + unsafe { + *result = string.into(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_create_string_utf16( + env_ptr: *mut Env, + string: *const u16, + length: usize, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + if length > 0 { + check_arg!(env, string); + } + crate::return_status_if_false!( + env, + (length == NAPI_AUTO_LENGTH) || length <= INT_MAX as _, + napi_invalid_arg + ); + + let buffer = if length > 0 { + unsafe { + std::slice::from_raw_parts( + string, + if length == NAPI_AUTO_LENGTH { + let mut length = 0; + while *(string.add(length)) != 0 { + length += 1; + } + length + } else { + length + }, + ) + } + } else { + &[] + }; + + let Some(string) = v8::String::new_from_two_byte( + &mut env.scope(), + buffer, + v8::NewStringType::Normal, + ) else { + return napi_set_last_error(env_ptr, napi_generic_failure); + }; + + unsafe { + *result = string.into(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn node_api_create_external_string_latin1( + env_ptr: *mut Env, + string: *const c_char, + length: usize, + nogc_finalize_callback: Option, + finalize_hint: *mut c_void, + result: *mut napi_value, + copied: *mut bool, +) -> napi_status { + let status = + unsafe { napi_create_string_latin1(env_ptr, string, length, result) }; + + if status == napi_ok { + unsafe { + *copied = true; + } + + if let Some(finalize) = nogc_finalize_callback { + unsafe { + finalize(env_ptr as napi_env, string as *mut c_void, finalize_hint); + } + } + } + + status +} + +#[napi_sym] +fn node_api_create_external_string_utf16( + env_ptr: *mut Env, + string: *const u16, + length: usize, + nogc_finalize_callback: Option, + finalize_hint: *mut c_void, + result: *mut napi_value, + copied: *mut bool, +) -> napi_status { + let status = + unsafe { napi_create_string_utf16(env_ptr, string, length, result) }; + + if status == napi_ok { + unsafe { + *copied = true; + } + + if let Some(finalize) = nogc_finalize_callback { + unsafe { + finalize(env_ptr as napi_env, string as *mut c_void, finalize_hint); + } + } + } + + status +} + +#[napi_sym] +fn node_api_create_property_key_utf16( + env_ptr: *mut Env, + string: *const u16, + length: usize, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + if length > 0 { + check_arg!(env, string); + } + crate::return_status_if_false!( + env, + (length == NAPI_AUTO_LENGTH) || length <= INT_MAX as _, + napi_invalid_arg + ); + + let buffer = if length > 0 { + unsafe { + std::slice::from_raw_parts( + string, + if length == NAPI_AUTO_LENGTH { + let mut length = 0; + while *(string.add(length)) != 0 { + length += 1; + } + length + } else { + length + }, + ) + } + } else { + &[] + }; + + let Some(string) = v8::String::new_from_two_byte( + &mut env.scope(), + buffer, + v8::NewStringType::Internalized, + ) else { + return napi_set_last_error(env_ptr, napi_generic_failure); + }; + + unsafe { + *result = string.into(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_create_double( + env_ptr: *mut Env, + value: f64, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + unsafe { + *result = v8::Number::new(&mut env.scope(), value).into(); + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_create_int32( + env_ptr: *mut Env, + value: i32, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + unsafe { + *result = v8::Integer::new(&mut env.scope(), value).into(); + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_create_uint32( + env_ptr: *mut Env, + value: u32, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + unsafe { + *result = v8::Integer::new_from_unsigned(&mut env.scope(), value).into(); + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_create_int64( + env_ptr: *mut Env, + value: i64, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + unsafe { + *result = v8::Number::new(&mut env.scope(), value as _).into(); + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_create_bigint_int64( + env_ptr: *mut Env, + value: i64, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + unsafe { + *result = v8::BigInt::new_from_i64(&mut env.scope(), value).into(); + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_create_bigint_uint64( + env_ptr: *mut Env, + value: u64, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + unsafe { + *result = v8::BigInt::new_from_u64(&mut env.scope(), value).into(); + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_create_bigint_words<'s>( + env: &'s mut Env, + sign_bit: bool, + word_count: usize, + words: *const u64, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, words); + check_arg!(env, result); + + if word_count > INT_MAX as _ { + return napi_invalid_arg; + } + + match v8::BigInt::new_from_words(&mut env.scope(), sign_bit, unsafe { + std::slice::from_raw_parts(words, word_count) + }) { + Some(value) => unsafe { + *result = value.into(); + }, + None => { + return napi_generic_failure; + } + } + + napi_ok +} + +#[napi_sym] +fn napi_get_boolean( + env: *mut Env, + value: bool, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, result); + + unsafe { + *result = v8::Boolean::new(env.isolate(), value).into(); + } + + return napi_clear_last_error(env); +} + +#[napi_sym] +fn napi_create_symbol( + env_ptr: *mut Env, + description: napi_value, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + let description = if let Some(d) = *description { + let Some(d) = d.to_string(&mut env.scope()) else { + return napi_set_last_error(env, napi_string_expected); + }; + Some(d) + } else { + None + }; + + unsafe { + *result = v8::Symbol::new(&mut env.scope(), description).into(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn node_api_symbol_for( + env: *mut Env, + utf8description: *const c_char, + length: usize, + result: *mut napi_value, +) -> napi_status { + { + let env = check_env!(env); + check_arg!(env, result); + + let description_string = + match unsafe { check_new_from_utf8_len(env, utf8description, length) } { + Ok(s) => s, + Err(status) => return napi_set_last_error(env, status), + }; + + unsafe { + *result = + v8::Symbol::for_key(&mut env.scope(), description_string).into(); + } + } + + napi_clear_last_error(env) +} + +macro_rules! napi_create_error_impl { + ($env_ptr:ident, $code:ident, $msg:ident, $result:ident, $error:ident) => {{ + let env_ptr = $env_ptr; + let code = $code; + let msg = $msg; + let result = $result; + + let env = check_env!(env_ptr); + check_arg!(env, msg); + check_arg!(env, result); + + let Some(message) = + msg.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_string_expected); + }; + + let error = v8::Exception::$error(&mut env.scope(), message); + + if let Some(code) = *code { + let error_obj: v8::Local = error.try_into().unwrap(); + let code_key = v8::String::new(&mut env.scope(), "code").unwrap(); + if !error_obj + .set(&mut env.scope(), code_key.into(), code) + .unwrap_or(false) + { + return napi_set_last_error(env_ptr, napi_generic_failure); + } + } + + unsafe { + *result = error.into(); + } + + return napi_clear_last_error(env_ptr); + }}; +} + +#[napi_sym] +fn napi_create_error( + env_ptr: *mut Env, + code: napi_value, + msg: napi_value, + result: *mut napi_value, +) -> napi_status { + napi_create_error_impl!(env_ptr, code, msg, result, error) +} + +#[napi_sym] +fn napi_create_type_error( + env_ptr: *mut Env, + code: napi_value, + msg: napi_value, + result: *mut napi_value, +) -> napi_status { + napi_create_error_impl!(env_ptr, code, msg, result, type_error) +} + +#[napi_sym] +fn napi_create_range_error( + env_ptr: *mut Env, + code: napi_value, + msg: napi_value, + result: *mut napi_value, +) -> napi_status { + napi_create_error_impl!(env_ptr, code, msg, result, range_error) +} + +#[napi_sym] +fn node_api_create_syntax_error( + env_ptr: *mut Env, + code: napi_value, + msg: napi_value, + result: *mut napi_value, +) -> napi_status { + napi_create_error_impl!(env_ptr, code, msg, result, syntax_error) +} + +pub fn get_value_type(value: v8::Local) -> Option { + if value.is_undefined() { + Some(napi_undefined) + } else if value.is_null() { + Some(napi_null) + } else if value.is_external() { + Some(napi_external) + } else if value.is_boolean() { + Some(napi_boolean) + } else if value.is_number() { + Some(napi_number) + } else if value.is_big_int() { + Some(napi_bigint) + } else if value.is_string() { + Some(napi_string) + } else if value.is_symbol() { + Some(napi_symbol) + } else if value.is_function() { + Some(napi_function) + } else if value.is_object() { + Some(napi_object) + } else { + None + } +} + +#[napi_sym] +fn napi_typeof( + env: *mut Env, + value: napi_value, + result: *mut napi_valuetype, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, result); + + let Some(ty) = get_value_type(value.unwrap()) else { + return napi_set_last_error(env, napi_invalid_arg); + }; + + unsafe { + *result = ty; + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_get_undefined(env: *mut Env, result: *mut napi_value) -> napi_status { + let env = check_env!(env); + check_arg!(env, result); + + unsafe { + *result = v8::undefined(&mut env.scope()).into(); + } + + return napi_clear_last_error(env); +} + +#[napi_sym] +fn napi_get_null(env: *mut Env, result: *mut napi_value) -> napi_status { + let env = check_env!(env); + check_arg!(env, result); + + unsafe { + *result = v8::null(&mut env.scope()).into(); + } + + return napi_clear_last_error(env); +} + +#[napi_sym] +fn napi_get_cb_info( + env: *mut Env, + cbinfo: napi_callback_info, + argc: *mut i32, + argv: *mut napi_value, + this_arg: *mut napi_value, + data: *mut *mut c_void, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, cbinfo); + + let cbinfo: &CallbackInfo = unsafe { &*(cbinfo as *const CallbackInfo) }; + let args = unsafe { &*(cbinfo.args as *const v8::FunctionCallbackArguments) }; + + if !argv.is_null() { + check_arg!(env, argc); + let argc = unsafe { *argc as usize }; + for i in 0..argc { + let arg = args.get(i as _); + unsafe { + *argv.add(i) = arg.into(); + } + } + } + + if !argc.is_null() { + unsafe { + *argc = args.length(); + } + } + + if !this_arg.is_null() { + unsafe { + *this_arg = args.this().into(); + } + } + + if !data.is_null() { + unsafe { + *data = cbinfo.cb_info; + } + } + + napi_clear_last_error(env); + napi_ok +} + +#[napi_sym] +fn napi_get_new_target( + env: *mut Env, + cbinfo: napi_callback_info, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, cbinfo); + check_arg!(env, result); + + let cbinfo: &CallbackInfo = unsafe { &*(cbinfo as *const CallbackInfo) }; + let args = unsafe { &*(cbinfo.args as *const v8::FunctionCallbackArguments) }; + + unsafe { + *result = args.new_target().into(); + } + + return napi_clear_last_error(env); +} + +#[napi_sym] +fn napi_call_function<'s>( + env: &'s mut Env, + recv: napi_value<'s>, + func: napi_value<'s>, + argc: usize, + argv: *const napi_value<'s>, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, recv); + let args = if argc > 0 { + check_arg!(env, argv); + unsafe { + std::slice::from_raw_parts(argv as *mut v8::Local, argc) + } + } else { + &[] + }; + + let Some(func) = + func.and_then(|f| v8::Local::::try_from(f).ok()) + else { + return napi_function_expected; + }; + + let Some(v) = func.call(&mut env.scope(), recv.unwrap(), args) else { + return napi_generic_failure; + }; + + if !result.is_null() { + unsafe { + *result = v.into(); + } + } + + napi_ok +} + +#[napi_sym] +fn napi_get_global(env_ptr: *mut Env, result: *mut napi_value) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + let global = v8::Local::new(&mut env.scope(), &env.global); + unsafe { + *result = global.into(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_throw(env: *mut Env, error: napi_value) -> napi_status { + let env = check_env!(env); + check_arg!(env, error); + + if env.last_exception.is_some() { + return napi_pending_exception; + } + + let error = error.unwrap(); + env.scope().throw_exception(error); + let error = v8::Global::new(&mut env.scope(), error); + env.last_exception = Some(error); + + napi_clear_last_error(env) +} + +macro_rules! napi_throw_error_impl { + ($env:ident, $code:ident, $msg:ident, $error:ident) => {{ + let env = check_env!($env); + let env_ptr = env as *mut Env; + let code = $code; + let msg = $msg; + + if env.last_exception.is_some() { + return napi_pending_exception; + } + + let str_ = match unsafe { check_new_from_utf8(env, msg) } { + Ok(s) => s, + Err(status) => return status, + }; + + let error = v8::Exception::$error(&mut env.scope(), str_); + + if !code.is_null() { + let error_obj: v8::Local = error.try_into().unwrap(); + let code = match unsafe { check_new_from_utf8(env_ptr, code) } { + Ok(s) => s, + Err(status) => return napi_set_last_error(env, status), + }; + let code_key = v8::String::new(&mut env.scope(), "code").unwrap(); + if !error_obj + .set(&mut env.scope(), code_key.into(), code.into()) + .unwrap_or(false) + { + return napi_set_last_error(env, napi_generic_failure); + } + } + + env.scope().throw_exception(error); + let error = v8::Global::new(&mut env.scope(), error); + env.last_exception = Some(error); + + napi_clear_last_error(env) + }}; +} + +#[napi_sym] +fn napi_throw_error( + env: *mut Env, + code: *const c_char, + msg: *const c_char, +) -> napi_status { + napi_throw_error_impl!(env, code, msg, error) +} + +#[napi_sym] +fn napi_throw_type_error( + env: *mut Env, + code: *const c_char, + msg: *const c_char, +) -> napi_status { + napi_throw_error_impl!(env, code, msg, type_error) +} + +#[napi_sym] +fn napi_throw_range_error( + env: *mut Env, + code: *const c_char, + msg: *const c_char, +) -> napi_status { + napi_throw_error_impl!(env, code, msg, range_error) +} + +#[napi_sym] +fn node_api_throw_syntax_error( + env: *mut Env, + code: *const c_char, + msg: *const c_char, +) -> napi_status { + napi_throw_error_impl!(env, code, msg, syntax_error) +} + +#[napi_sym] +fn napi_is_error( + env: *mut Env, + value: napi_value, + result: *mut bool, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, result); + + unsafe { + *result = value.unwrap().is_native_error(); + } + + return napi_clear_last_error(env); +} + +#[napi_sym] +fn napi_get_value_double( + env_ptr: *mut Env, + value: napi_value, + result: *mut f64, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + check_arg!(env, result); + + let Some(number) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_number_expected); + }; + + unsafe { + *result = number.value(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_get_value_int32( + env_ptr: *mut Env, + value: napi_value, + result: *mut i32, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + check_arg!(env, result); + + let Some(value) = value.unwrap().int32_value(&mut env.scope()) else { + return napi_set_last_error(env, napi_number_expected); + }; + + unsafe { + *result = value; + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_get_value_uint32( + env_ptr: *mut Env, + value: napi_value, + result: *mut u32, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + check_arg!(env, result); + + let Some(value) = value.unwrap().uint32_value(&mut env.scope()) else { + return napi_set_last_error(env, napi_number_expected); + }; + + unsafe { + *result = value; + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_get_value_int64( + env_ptr: *mut Env, + value: napi_value, + result: *mut i64, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + check_arg!(env, result); + + let Some(number) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_number_expected); + }; + + let value = number.value(); + + unsafe { + if value.is_finite() { + *result = value as _; + } else { + *result = 0; + } + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_get_value_bigint_int64( + env_ptr: *mut Env, + value: napi_value, + result: *mut i64, + lossless: *mut bool, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + check_arg!(env, result); + check_arg!(env, lossless); + + let Some(bigint) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_bigint_expected); + }; + + let (result_, lossless_) = bigint.i64_value(); + + unsafe { + *result = result_; + *lossless = lossless_; + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_get_value_bigint_uint64( + env_ptr: *mut Env, + value: napi_value, + result: *mut u64, + lossless: *mut bool, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + check_arg!(env, result); + check_arg!(env, lossless); + + let Some(bigint) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_bigint_expected); + }; + + let (result_, lossless_) = bigint.u64_value(); + + unsafe { + *result = result_; + *lossless = lossless_; + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_get_value_bigint_words( + env_ptr: *mut Env, + value: napi_value, + sign_bit: *mut i32, + word_count: *mut usize, + words: *mut u64, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + check_arg!(env, word_count); + + let Some(bigint) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_bigint_expected); + }; + + let word_count_int; + + if sign_bit.is_null() && words.is_null() { + word_count_int = bigint.word_count(); + } else { + check_arg!(env, sign_bit); + check_arg!(env, words); + let out_words = + unsafe { std::slice::from_raw_parts_mut(words, *word_count) }; + let (sign, slice_) = bigint.to_words_array(out_words); + word_count_int = slice_.len(); + unsafe { + *sign_bit = sign as i32; + } + } + + unsafe { + *word_count = word_count_int; + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_get_value_bool( + env_ptr: *mut Env, + value: napi_value, + result: *mut bool, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + check_arg!(env, result); + + let Some(boolean) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_boolean_expected); + }; + + unsafe { + *result = boolean.is_true(); + } + + return napi_clear_last_error(env_ptr); +} + +#[napi_sym] +fn napi_get_value_string_latin1( + env_ptr: *mut Env, + value: napi_value, + buf: *mut c_char, + bufsize: usize, + result: *mut usize, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + + let Some(value) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_string_expected); + }; + + if buf.is_null() { + check_arg!(env, result); + unsafe { + *result = value.length(); + } + } else if bufsize != 0 { + let buffer = + unsafe { std::slice::from_raw_parts_mut(buf as _, bufsize - 1) }; + let copied = value.write_one_byte( + &mut env.scope(), + buffer, + 0, + v8::WriteOptions::NO_NULL_TERMINATION, + ); + unsafe { + buf.add(copied).write(0); + } + if !result.is_null() { + unsafe { + *result = copied; + } + } + } else if !result.is_null() { + unsafe { + *result = 0; + } + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_get_value_string_utf8( + env_ptr: *mut Env, + value: napi_value, + buf: *mut u8, + bufsize: usize, + result: *mut usize, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + + let Some(value) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_string_expected); + }; + + if buf.is_null() { + check_arg!(env, result); + unsafe { + *result = value.utf8_length(env.isolate()); + } + } else if bufsize != 0 { + let buffer = + unsafe { std::slice::from_raw_parts_mut(buf as _, bufsize - 1) }; + let copied = value.write_utf8( + &mut env.scope(), + buffer, + None, + v8::WriteOptions::REPLACE_INVALID_UTF8 + | v8::WriteOptions::NO_NULL_TERMINATION, + ); + unsafe { + buf.add(copied).write(0); + } + if !result.is_null() { + unsafe { + *result = copied; + } + } + } else if !result.is_null() { + unsafe { + *result = 0; + } + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_get_value_string_utf16( + env_ptr: *mut Env, + value: napi_value, + buf: *mut u16, + bufsize: usize, + result: *mut usize, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + + let Some(value) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_string_expected); + }; + + if buf.is_null() { + check_arg!(env, result); + unsafe { + *result = value.length(); + } + } else if bufsize != 0 { + let buffer = + unsafe { std::slice::from_raw_parts_mut(buf as _, bufsize - 1) }; + let copied = value.write( + &mut env.scope(), + buffer, + 0, + v8::WriteOptions::NO_NULL_TERMINATION, + ); + unsafe { + buf.add(copied).write(0); + } + if !result.is_null() { + unsafe { + *result = copied; + } + } + } else if !result.is_null() { + unsafe { + *result = 0; + } + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_coerce_to_bool<'s>( + env: &'s mut Env, + value: napi_value, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, value); + check_arg!(env, result); + + let coerced = value.unwrap().to_boolean(&mut env.scope()); + + unsafe { + *result = coerced.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_coerce_to_number<'s>( + env: &'s mut Env, + value: napi_value, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, value); + check_arg!(env, result); + + let Some(coerced) = value.unwrap().to_number(&mut env.scope()) else { + return napi_number_expected; + }; + + unsafe { + *result = coerced.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_coerce_to_object<'s>( + env: &'s mut Env, + value: napi_value, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, value); + check_arg!(env, result); + + let Some(coerced) = value.unwrap().to_object(&mut env.scope()) else { + return napi_object_expected; + }; + + unsafe { + *result = coerced.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_coerce_to_string<'s>( + env: &'s mut Env, + value: napi_value, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, value); + check_arg!(env, result); + + let Some(coerced) = value.unwrap().to_string(&mut env.scope()) else { + return napi_string_expected; + }; + + unsafe { + *result = coerced.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_wrap( + env: &mut Env, + js_object: napi_value, + native_object: *mut c_void, + finalize_cb: Option, + finalize_hint: *mut c_void, + result: *mut napi_ref, +) -> napi_status { + check_arg!(env, js_object); + let env_ptr = env as *mut Env; + + let Some(obj) = + js_object.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_invalid_arg; + }; + + let napi_wrap = v8::Local::new(&mut env.scope(), &env.shared().napi_wrap); + + if obj + .has_private(&mut env.scope(), napi_wrap) + .unwrap_or(false) + { + return napi_invalid_arg; + } + + if !result.is_null() { + check_arg!(env, finalize_cb); + } + + let ownership = if result.is_null() { + ReferenceOwnership::Runtime + } else { + ReferenceOwnership::Userland + }; + let reference = Reference::new( + env_ptr, + obj.into(), + 0, + ownership, + finalize_cb, + native_object, + finalize_hint, + ); + + let reference = Reference::into_raw(reference) as *mut c_void; + + if !result.is_null() { + check_arg!(env, finalize_cb); + unsafe { + *result = reference; + } + } + + let external = v8::External::new(&mut env.scope(), reference); + assert!(obj + .set_private(&mut env.scope(), napi_wrap, external.into()) + .unwrap()); + + napi_ok +} + +fn unwrap( + env: &mut Env, + obj: napi_value, + result: *mut *mut c_void, + keep: bool, +) -> napi_status { + check_arg!(env, obj); + if keep { + check_arg!(env, result); + } + + let Some(obj) = obj.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_invalid_arg; + }; + + let napi_wrap = v8::Local::new(&mut env.scope(), &env.shared().napi_wrap); + let Some(val) = obj.get_private(&mut env.scope(), napi_wrap) else { + return napi_invalid_arg; + }; + + let Ok(external) = v8::Local::::try_from(val) else { + return napi_invalid_arg; + }; + + let reference = external.value() as *mut Reference; + let reference = unsafe { &mut *reference }; + + if !result.is_null() { + unsafe { + *result = reference.finalize_data; + } + } + + if !keep { + assert!(obj + .delete_private(&mut env.scope(), napi_wrap) + .unwrap_or(false)); + unsafe { Reference::remove(reference) }; + } + + napi_ok +} + +#[napi_sym] +fn napi_unwrap( + env: &mut Env, + obj: napi_value, + result: *mut *mut c_void, +) -> napi_status { + unwrap(env, obj, result, true) +} + +#[napi_sym] +fn napi_remove_wrap( + env: &mut Env, + obj: napi_value, + result: *mut *mut c_void, +) -> napi_status { + unwrap(env, obj, result, false) +} + +struct ExternalWrapper { + data: *mut c_void, + type_tag: Option, +} + +#[napi_sym] +fn napi_create_external<'s>( + env: &'s mut Env, + data: *mut c_void, + finalize_cb: Option, + finalize_hint: *mut c_void, + result: *mut napi_value<'s>, +) -> napi_status { + let env_ptr = env as *mut Env; + check_arg!(env, result); + + let wrapper = Box::new(ExternalWrapper { + data, + type_tag: None, + }); + + let wrapper = Box::into_raw(wrapper); + let external = v8::External::new(&mut env.scope(), wrapper as _); + + if let Some(finalize_cb) = finalize_cb { + Reference::into_raw(Reference::new( + env_ptr, + external.into(), + 0, + ReferenceOwnership::Runtime, + Some(finalize_cb), + data, + finalize_hint, + )); + } + + unsafe { + *result = external.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_type_tag_object( + env: &mut Env, + object_or_external: napi_value, + type_tag: *const napi_type_tag, +) -> napi_status { + check_arg!(env, object_or_external); + check_arg!(env, type_tag); + + let val = object_or_external.unwrap(); + + if let Ok(external) = v8::Local::::try_from(val) { + let wrapper_ptr = external.value() as *mut ExternalWrapper; + let wrapper = unsafe { &mut *wrapper_ptr }; + if wrapper.type_tag.is_some() { + return napi_invalid_arg; + } + wrapper.type_tag = Some(unsafe { *type_tag }); + return napi_ok; + } + + let Some(object) = val.to_object(&mut env.scope()) else { + return napi_object_expected; + }; + + let key = v8::Local::new(&mut env.scope(), &env.shared().type_tag); + + if object.has_private(&mut env.scope(), key).unwrap_or(false) { + return napi_invalid_arg; + } + + let slice = unsafe { std::slice::from_raw_parts(type_tag as *const u64, 2) }; + let Some(tag) = v8::BigInt::new_from_words(&mut env.scope(), false, slice) + else { + return napi_generic_failure; + }; + + if !object + .set_private(&mut env.scope(), key, tag.into()) + .unwrap_or(false) + { + return napi_generic_failure; + } + + napi_ok +} + +#[napi_sym] +fn napi_check_object_type_tag( + env: &mut Env, + object_or_external: napi_value, + type_tag: *const napi_type_tag, + result: *mut bool, +) -> napi_status { + check_arg!(env, object_or_external); + check_arg!(env, type_tag); + check_arg!(env, result); + + let type_tag = unsafe { *type_tag }; + + let val = object_or_external.unwrap(); + + if let Ok(external) = v8::Local::::try_from(val) { + let wrapper_ptr = external.value() as *mut ExternalWrapper; + let wrapper = unsafe { &mut *wrapper_ptr }; + unsafe { + *result = match wrapper.type_tag { + Some(t) => t == type_tag, + None => false, + }; + }; + return napi_ok; + } + + let Some(object) = val.to_object(&mut env.scope()) else { + return napi_object_expected; + }; + + let key = v8::Local::new(&mut env.scope(), &env.shared().type_tag); + + let Some(val) = object.get_private(&mut env.scope(), key) else { + return napi_generic_failure; + }; + + unsafe { + *result = false; + } + + if let Ok(bigint) = v8::Local::::try_from(val) { + let mut words = [0u64; 2]; + let (sign, words) = bigint.to_words_array(&mut words); + if !sign { + let pass = if words.len() == 2 { + type_tag.lower == words[0] && type_tag.upper == words[1] + } else if words.len() == 1 { + type_tag.lower == words[0] && type_tag.upper == 0 + } else if words.is_empty() { + type_tag.lower == 0 && type_tag.upper == 0 + } else { + false + }; + unsafe { + *result = pass; + } + } + } + + napi_ok +} + +#[napi_sym] +fn napi_get_value_external( + env: *mut Env, + value: napi_value, + result: *mut *mut c_void, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, result); + + let Some(external) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env, napi_invalid_arg); + }; + + let wrapper_ptr = external.value() as *const ExternalWrapper; + let wrapper = unsafe { &*wrapper_ptr }; + + unsafe { + *result = wrapper.data; + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_create_reference( + env: *mut Env, + value: napi_value, + initial_refcount: u32, + result: *mut napi_ref, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, result); + + let value = value.unwrap(); + + let reference = Reference::new( + env, + value, + initial_refcount, + ReferenceOwnership::Userland, + None, + std::ptr::null_mut(), + std::ptr::null_mut(), + ); + + let ptr = Reference::into_raw(reference); + + unsafe { + *result = ptr as _; + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_delete_reference(env: *mut Env, ref_: napi_ref) -> napi_status { + let env = check_env!(env); + check_arg!(env, ref_); + + let reference = unsafe { Reference::from_raw(ref_ as _) }; + + drop(reference); + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_reference_ref( + env: *mut Env, + ref_: napi_ref, + result: *mut u32, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, ref_); + + let reference = unsafe { &mut *(ref_ as *mut Reference) }; + + let count = reference.ref_(); + + if !result.is_null() { + unsafe { + *result = count; + } + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_reference_unref( + env: *mut Env, + ref_: napi_ref, + result: *mut u32, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, ref_); + + let reference = unsafe { &mut *(ref_ as *mut Reference) }; + + if reference.ref_count == 0 { + return napi_set_last_error(env, napi_generic_failure); + } + + let count = reference.unref(); + + if !result.is_null() { + unsafe { + *result = count; + } + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_get_reference_value( + env_ptr: *mut Env, + ref_: napi_ref, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, ref_); + check_arg!(env, result); + + let reference = unsafe { &mut *(ref_ as *mut Reference) }; + + let value = match &reference.state { + ReferenceState::Strong(g) => Some(v8::Local::new(&mut env.scope(), g)), + ReferenceState::Weak(w) => w.to_local(&mut env.scope()), + }; + + unsafe { + *result = value.into(); + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_open_handle_scope( + env: *mut Env, + _result: *mut napi_handle_scope, +) -> napi_status { + let env = check_env!(env); + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_close_handle_scope( + env: *mut Env, + _scope: napi_handle_scope, +) -> napi_status { + let env = check_env!(env); + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_open_escapable_handle_scope( + env: *mut Env, + _result: *mut napi_escapable_handle_scope, +) -> napi_status { + let env = check_env!(env); + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_close_escapable_handle_scope( + env: *mut Env, + _scope: napi_escapable_handle_scope, +) -> napi_status { + let env = check_env!(env); + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_escape_handle<'s>( + env: *mut Env, + _scope: napi_escapable_handle_scope, + escapee: napi_value<'s>, + result: *mut napi_value<'s>, +) -> napi_status { + let env = check_env!(env); + + unsafe { + *result = escapee; + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_new_instance<'s>( + env: &'s mut Env, + constructor: napi_value, + argc: usize, + argv: *const napi_value, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, constructor); + if argc > 0 { + check_arg!(env, argv); + } + check_arg!(env, result); + + let Some(func) = + constructor.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_invalid_arg; + }; + + let args = if argc > 0 { + unsafe { + std::slice::from_raw_parts(argv as *mut v8::Local, argc) + } + } else { + &[] + }; + + let Some(value) = func.new_instance(&mut env.scope(), args) else { + return napi_pending_exception; + }; + + unsafe { + *result = value.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_instanceof( + env: &mut Env, + object: napi_value, + constructor: napi_value, + result: *mut bool, +) -> napi_status { + check_arg!(env, object); + check_arg!(env, result); + + let Some(ctor) = constructor.and_then(|v| v.to_object(&mut env.scope())) + else { + return napi_object_expected; + }; + + if !ctor.is_function() { + unsafe { + napi_throw_type_error( + env, + c"ERR_NAPI_CONS_FUNCTION".as_ptr(), + c"Constructor must be a function".as_ptr(), + ); + } + return napi_function_expected; + } + + let Some(res) = object.unwrap().instance_of(&mut env.scope(), ctor) else { + return napi_generic_failure; + }; + + unsafe { + *result = res; + } + + napi_ok +} + +#[napi_sym] +fn napi_is_exception_pending( + env_ptr: *mut Env, + result: *mut bool, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + unsafe { + *result = env.last_exception.is_some(); + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_get_and_clear_last_exception( + env_ptr: *mut Env, + result: *mut napi_value, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, result); + + let ex: v8::Local = + if let Some(last_exception) = env.last_exception.take() { + v8::Local::new(&mut env.scope(), last_exception) + } else { + v8::undefined(&mut env.scope()).into() + }; + + unsafe { + *result = ex.into(); + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_is_arraybuffer( + env: *mut Env, + value: napi_value, + result: *mut bool, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, result); + + unsafe { + *result = value.unwrap().is_array_buffer(); + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_create_arraybuffer<'s>( + env: &'s mut Env, + len: usize, + data: *mut *mut c_void, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, result); + + let buffer = v8::ArrayBuffer::new(&mut env.scope(), len); + + if !data.is_null() { + unsafe { + *data = get_array_buffer_ptr(buffer); + } + } + + unsafe { + *result = buffer.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_create_external_arraybuffer<'s>( + env: &'s mut Env, + data: *mut c_void, + byte_length: usize, + finalize_cb: napi_finalize, + finalize_hint: *mut c_void, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, result); + + let store = make_external_backing_store( + env, + data, + byte_length, + std::ptr::null_mut(), + finalize_cb, + finalize_hint, + ); + + let ab = + v8::ArrayBuffer::with_backing_store(&mut env.scope(), &store.make_shared()); + let value: v8::Local = ab.into(); + + unsafe { + *result = value.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_get_arraybuffer_info( + env: *mut Env, + value: napi_value, + data: *mut *mut c_void, + length: *mut usize, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + + let Some(buf) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env, napi_invalid_arg); + }; + + if !data.is_null() { + unsafe { + *data = get_array_buffer_ptr(buf); + } + } + + if !length.is_null() { + unsafe { + *length = buf.byte_length(); + } + } + + napi_ok +} + +#[napi_sym] +fn napi_is_typedarray( + env: *mut Env, + value: napi_value, + result: *mut bool, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, result); + + unsafe { + *result = value.unwrap().is_typed_array(); + } + + napi_ok +} + +#[napi_sym] +fn napi_create_typedarray<'s>( + env: &'s mut Env, + ty: napi_typedarray_type, + length: usize, + arraybuffer: napi_value, + byte_offset: usize, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, arraybuffer); + check_arg!(env, result); + + let Some(ab) = + arraybuffer.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_arraybuffer_expected; + }; + + macro_rules! create { + ($TypedArray:ident, $size_of_element:expr) => {{ + let soe = $size_of_element; + if soe > 1 && byte_offset % soe != 0 { + let message = v8::String::new( + &mut env.scope(), + format!( + "start offset of {} should be multiple of {}", + stringify!($TypedArray), + soe + ) + .as_str(), + ) + .unwrap(); + let exc = v8::Exception::range_error(&mut env.scope(), message); + env.scope().throw_exception(exc); + return napi_pending_exception; + } + + if length * soe + byte_offset > ab.byte_length() { + let message = + v8::String::new(&mut env.scope(), "Invalid typed array length") + .unwrap(); + let exc = v8::Exception::range_error(&mut env.scope(), message); + env.scope().throw_exception(exc); + return napi_pending_exception; + } + + let Some(ta) = + v8::$TypedArray::new(&mut env.scope(), ab, byte_offset, length) + else { + return napi_generic_failure; + }; + ta.into() + }}; + } + + let typedarray: v8::Local = match ty { + napi_uint8_array => create!(Uint8Array, 1), + napi_uint8_clamped_array => create!(Uint8ClampedArray, 1), + napi_int8_array => create!(Int8Array, 1), + napi_uint16_array => create!(Uint16Array, 2), + napi_int16_array => create!(Int16Array, 2), + napi_uint32_array => create!(Uint32Array, 4), + napi_int32_array => create!(Int32Array, 4), + napi_float32_array => create!(Float32Array, 4), + napi_float64_array => create!(Float64Array, 8), + napi_bigint64_array => create!(BigInt64Array, 8), + napi_biguint64_array => create!(BigUint64Array, 8), + _ => { + return napi_invalid_arg; + } + }; + + unsafe { + *result = typedarray.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_get_typedarray_info( + env_ptr: *mut Env, + typedarray: napi_value, + type_: *mut napi_typedarray_type, + length: *mut usize, + data: *mut *mut c_void, + arraybuffer: *mut napi_value, + byte_offset: *mut usize, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, typedarray); + + let Some(array) = + typedarray.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env_ptr, napi_invalid_arg); + }; + + if !type_.is_null() { + let tatype = if array.is_int8_array() { + napi_int8_array + } else if array.is_uint8_array() { + napi_uint8_array + } else if array.is_uint8_clamped_array() { + napi_uint8_clamped_array + } else if array.is_int16_array() { + napi_int16_array + } else if array.is_uint16_array() { + napi_uint16_array + } else if array.is_int32_array() { + napi_int32_array + } else if array.is_uint32_array() { + napi_uint32_array + } else if array.is_float32_array() { + napi_float32_array + } else if array.is_float64_array() { + napi_float64_array + } else if array.is_big_int64_array() { + napi_bigint64_array + } else if array.is_big_uint64_array() { + napi_biguint64_array + } else { + unreachable!(); + }; + + unsafe { + *type_ = tatype; + } + } + + if !length.is_null() { + unsafe { + *length = array.length(); + } + } + + if !data.is_null() { + unsafe { + *data = array.data(); + } + } + + if !arraybuffer.is_null() { + let buf = array.buffer(&mut env.scope()).unwrap(); + unsafe { + *arraybuffer = buf.into(); + } + } + + if !byte_offset.is_null() { + unsafe { + *byte_offset = array.byte_offset(); + } + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_create_dataview<'s>( + env: &'s mut Env, + byte_length: usize, + arraybuffer: napi_value<'s>, + byte_offset: usize, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, arraybuffer); + check_arg!(env, result); + + let Some(buffer) = + arraybuffer.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_invalid_arg; + }; + + if byte_length + byte_offset > buffer.byte_length() { + unsafe { + return napi_throw_range_error( + env, + c"ERR_NAPI_INVALID_DATAVIEW_ARGS".as_ptr(), + c"byte_offset + byte_length should be less than or equal to the size in bytes of the array passed in".as_ptr(), + ); + } + } + + let dataview = + v8::DataView::new(&mut env.scope(), buffer, byte_offset, byte_length); + + unsafe { + *result = dataview.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_is_dataview( + env: *mut Env, + value: napi_value, + result: *mut bool, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, result); + + unsafe { + *result = value.unwrap().is_data_view(); + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_get_dataview_info( + env_ptr: *mut Env, + dataview: napi_value, + byte_length: *mut usize, + data: *mut *mut c_void, + arraybuffer: *mut napi_value, + byte_offset: *mut usize, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, dataview); + + let Some(array) = + dataview.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_invalid_arg; + }; + + if !byte_length.is_null() { + unsafe { + *byte_length = array.byte_length(); + } + } + + if !arraybuffer.is_null() { + let Some(buffer) = array.buffer(&mut env.scope()) else { + return napi_generic_failure; + }; + + unsafe { + *arraybuffer = buffer.into(); + } + } + + if !data.is_null() { + unsafe { + *data = array.data(); + } + } + + if !byte_offset.is_null() { + unsafe { + *byte_offset = array.byte_offset(); + } + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn napi_get_version(env: *mut Env, result: *mut u32) -> napi_status { + let env = check_env!(env); + check_arg!(env, result); + + unsafe { + *result = NAPI_VERSION; + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_create_promise<'s>( + env: &'s mut Env, + deferred: *mut napi_deferred, + promise: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, deferred); + check_arg!(env, promise); + + let resolver = v8::PromiseResolver::new(&mut env.scope()).unwrap(); + + let global = v8::Global::new(&mut env.scope(), resolver); + let global_ptr = global.into_raw().as_ptr() as napi_deferred; + + let p = resolver.get_promise(&mut env.scope()); + + unsafe { + *deferred = global_ptr; + } + + unsafe { + *promise = p.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_resolve_deferred( + env: &mut Env, + deferred: napi_deferred, + result: napi_value, +) -> napi_status { + check_arg!(env, result); + check_arg!(env, deferred); + + // Make sure microtasks don't run and call back into JS + env + .scope() + .set_microtasks_policy(v8::MicrotasksPolicy::Explicit); + + let deferred_ptr = + unsafe { NonNull::new_unchecked(deferred as *mut v8::PromiseResolver) }; + let global = unsafe { v8::Global::from_raw(env.isolate(), deferred_ptr) }; + let resolver = v8::Local::new(&mut env.scope(), global); + + let success = resolver + .resolve(&mut env.scope(), result.unwrap()) + .unwrap_or(false); + + // Restore policy + env + .scope() + .set_microtasks_policy(v8::MicrotasksPolicy::Auto); + + if success { + napi_ok + } else { + napi_generic_failure + } +} + +#[napi_sym] +fn napi_reject_deferred( + env: &mut Env, + deferred: napi_deferred, + result: napi_value, +) -> napi_status { + check_arg!(env, result); + check_arg!(env, deferred); + + let deferred_ptr = + unsafe { NonNull::new_unchecked(deferred as *mut v8::PromiseResolver) }; + let global = unsafe { v8::Global::from_raw(env.isolate(), deferred_ptr) }; + let resolver = v8::Local::new(&mut env.scope(), global); + + if !resolver + .reject(&mut env.scope(), result.unwrap()) + .unwrap_or(false) + { + return napi_generic_failure; + } + + napi_ok +} + +#[napi_sym] +fn napi_is_promise( + env: *mut Env, + value: napi_value, + is_promise: *mut bool, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, is_promise); + + unsafe { + *is_promise = value.unwrap().is_promise(); + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_create_date<'s>( + env: &'s mut Env, + time: f64, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, result); + + let Some(date) = v8::Date::new(&mut env.scope(), time) else { + return napi_generic_failure; + }; + + unsafe { + *result = date.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_is_date( + env: *mut Env, + value: napi_value, + is_date: *mut bool, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, is_date); + + unsafe { + *is_date = value.unwrap().is_date(); + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_get_date_value( + env: &mut Env, + value: napi_value, + result: *mut f64, +) -> napi_status { + check_arg!(env, result); + + let Some(date) = value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_date_expected; + }; + + unsafe { + *result = date.value_of(); + } + + napi_ok +} + +#[napi_sym] +fn napi_run_script<'s>( + env: &'s mut Env, + script: napi_value, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, script); + check_arg!(env, result); + + let Some(script) = + script.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_string_expected; + }; + + let Some(script) = v8::Script::compile(&mut env.scope(), script, None) else { + return napi_generic_failure; + }; + + let Some(rv) = script.run(&mut env.scope()) else { + return napi_generic_failure; + }; + + unsafe { + *result = rv.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_add_finalizer( + env_ptr: *mut Env, + value: napi_value, + finalize_data: *mut c_void, + finalize_cb: Option, + finalize_hint: *mut c_void, + result: *mut napi_ref, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, value); + check_arg!(env, finalize_cb); + + let Some(value) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env, napi_invalid_arg); + }; + + let ownership = if result.is_null() { + ReferenceOwnership::Runtime + } else { + ReferenceOwnership::Userland + }; + let reference = Reference::new( + env, + value.into(), + 0, + ownership, + finalize_cb, + finalize_data, + finalize_hint, + ); + + if !result.is_null() { + unsafe { + *result = Reference::into_raw(reference) as _; + } + } + + napi_clear_last_error(env_ptr) +} + +#[napi_sym] +fn node_api_post_finalizer( + env: *mut Env, + _finalize_cb: napi_finalize, + _finalize_data: *mut c_void, + _finalize_hint: *mut c_void, +) -> napi_status { + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_adjust_external_memory( + env: *mut Env, + change_in_bytes: i64, + adjusted_value: *mut i64, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, adjusted_value); + + unsafe { + *adjusted_value = env + .isolate() + .adjust_amount_of_external_allocated_memory(change_in_bytes); + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_set_instance_data( + env: *mut Env, + data: *mut c_void, + finalize_cb: Option, + finalize_hint: *mut c_void, +) -> napi_status { + let env = check_env!(env); + + env.shared_mut().instance_data = Some(InstanceData { + data, + finalize_cb, + finalize_hint, + }); + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_get_instance_data( + env: *mut Env, + data: *mut *mut c_void, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, data); + + let instance_data = match &env.shared().instance_data { + Some(v) => v.data, + None => std::ptr::null_mut(), + }; + + unsafe { *data = instance_data }; + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_detach_arraybuffer(env: *mut Env, value: napi_value) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + + let Some(ab) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env, napi_arraybuffer_expected); + }; + + if !ab.is_detachable() { + return napi_set_last_error(env, napi_detachable_arraybuffer_expected); + } + + // Expected to crash for None. + ab.detach(None).unwrap(); + + napi_clear_last_error(env); + napi_ok +} + +#[napi_sym] +fn napi_is_detached_arraybuffer( + env_ptr: *mut Env, + arraybuffer: napi_value, + result: *mut bool, +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, arraybuffer); + check_arg!(env, result); + + let is_detached = match arraybuffer + .and_then(|v| v8::Local::::try_from(v).ok()) + { + Some(ab) => ab.was_detached(), + None => false, + }; + + unsafe { + *result = is_detached; + } + + napi_clear_last_error(env) +} diff --git a/ext/napi/lib.rs b/ext/napi/lib.rs index 0b2b3eb5e..0d41bb40f 100644 --- a/ext/napi/lib.rs +++ b/ext/napi/lib.rs @@ -5,6 +5,22 @@ #![allow(clippy::undocumented_unsafe_blocks)] #![deny(clippy::missing_safety_doc)] +//! Symbols to be exported are now defined in this JSON file. +//! The `#[napi_sym]` macro checks for missing entries and panics. +//! +//! `./tools/napi/generate_symbols_list.js` is used to generate the LINK `cli/exports.def` on Windows, +//! which is also checked into git. +//! +//! To add a new napi function: +//! 1. Place `#[napi_sym]` on top of your implementation. +//! 2. Add the function's identifier to this JSON list. +//! 3. Finally, run `tools/napi/generate_symbols_list.js` to update `ext/napi/generated_symbol_exports_list_*.def`. + +pub mod js_native_api; +pub mod node_api; +pub mod util; +pub mod uv; + use core::ptr::NonNull; use deno_core::op2; use deno_core::parking_lot::RwLock; @@ -631,3 +647,30 @@ where Ok(exports) } + +#[allow(clippy::print_stdout)] +pub fn print_linker_flags(name: &str) { + let symbols_path = + include_str!(concat!(env!("OUT_DIR"), "/napi_symbol_path.txt")); + + #[cfg(target_os = "windows")] + println!("cargo:rustc-link-arg-bin={name}=/DEF:{}", symbols_path); + + #[cfg(target_os = "macos")] + println!( + "cargo:rustc-link-arg-bin={name}=-Wl,-exported_symbols_list,{}", + symbols_path, + ); + + #[cfg(target_os = "linux")] + println!( + "cargo:rustc-link-arg-bin={name}=-Wl,--export-dynamic-symbol-list={}", + symbols_path, + ); + + #[cfg(target_os = "android")] + println!( + "cargo:rustc-link-arg-bin={name}=-Wl,--export-dynamic-symbol-list={}", + symbols_path, + ); +} diff --git a/ext/napi/node_api.rs b/ext/napi/node_api.rs new file mode 100644 index 000000000..186ae42c4 --- /dev/null +++ b/ext/napi/node_api.rs @@ -0,0 +1,1010 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +#![deny(unsafe_op_in_unsafe_fn)] + +use super::util::get_array_buffer_ptr; +use super::util::make_external_backing_store; +use super::util::napi_clear_last_error; +use super::util::napi_set_last_error; +use super::util::SendPtr; +use crate::check_arg; +use crate::check_env; +use crate::*; +use deno_core::parking_lot::Condvar; +use deno_core::parking_lot::Mutex; +use deno_core::V8CrossThreadTaskSpawner; +use napi_sym::napi_sym; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::AtomicU8; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::sync::Arc; + +#[napi_sym] +fn napi_module_register(module: *const NapiModule) -> napi_status { + MODULE_TO_REGISTER.with(|cell| { + let mut slot = cell.borrow_mut(); + let prev = slot.replace(module); + assert!(prev.is_none()); + }); + napi_ok +} + +#[napi_sym] +fn napi_add_env_cleanup_hook( + env: *mut Env, + fun: Option, + arg: *mut c_void, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, fun); + + let fun = fun.unwrap(); + + env.add_cleanup_hook(fun, arg); + + napi_ok +} + +#[napi_sym] +fn napi_remove_env_cleanup_hook( + env: *mut Env, + fun: Option, + arg: *mut c_void, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, fun); + + let fun = fun.unwrap(); + + env.remove_cleanup_hook(fun, arg); + + napi_ok +} + +struct AsyncCleanupHandle { + env: *mut Env, + hook: napi_async_cleanup_hook, + data: *mut c_void, +} + +unsafe extern "C" fn async_cleanup_handler(arg: *mut c_void) { + unsafe { + let handle = Box::::from_raw(arg as _); + (handle.hook)(arg, handle.data); + } +} + +#[napi_sym] +fn napi_add_async_cleanup_hook( + env: *mut Env, + hook: Option, + arg: *mut c_void, + remove_handle: *mut napi_async_cleanup_hook_handle, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, hook); + + let hook = hook.unwrap(); + + let handle = Box::into_raw(Box::new(AsyncCleanupHandle { + env, + hook, + data: arg, + })) as *mut c_void; + + env.add_cleanup_hook(async_cleanup_handler, handle); + + if !remove_handle.is_null() { + unsafe { + *remove_handle = handle; + } + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_remove_async_cleanup_hook( + remove_handle: napi_async_cleanup_hook_handle, +) -> napi_status { + if remove_handle.is_null() { + return napi_invalid_arg; + } + + let handle = + unsafe { Box::::from_raw(remove_handle as _) }; + + let env = unsafe { &mut *handle.env }; + + env.remove_cleanup_hook(async_cleanup_handler, remove_handle); + + napi_ok +} + +#[napi_sym] +fn napi_fatal_exception(env: &mut Env, err: napi_value) -> napi_status { + check_arg!(env, err); + + let report_error = v8::Local::new(&mut env.scope(), &env.report_error); + + let this = v8::undefined(&mut env.scope()); + if report_error + .call(&mut env.scope(), this.into(), &[err.unwrap()]) + .is_none() + { + return napi_generic_failure; + } + + napi_ok +} + +#[napi_sym] +#[allow(clippy::print_stderr)] +fn napi_fatal_error( + location: *const c_char, + location_len: usize, + message: *const c_char, + message_len: usize, +) -> napi_status { + let location = if location.is_null() { + None + } else { + unsafe { + Some(if location_len == NAPI_AUTO_LENGTH { + std::ffi::CStr::from_ptr(location).to_str().unwrap() + } else { + let slice = std::slice::from_raw_parts( + location as *const u8, + location_len as usize, + ); + std::str::from_utf8(slice).unwrap() + }) + } + }; + + let message = if message_len == NAPI_AUTO_LENGTH { + unsafe { std::ffi::CStr::from_ptr(message).to_str().unwrap() } + } else { + let slice = unsafe { + std::slice::from_raw_parts(message as *const u8, message_len as usize) + }; + std::str::from_utf8(slice).unwrap() + }; + + if let Some(location) = location { + eprintln!("NODE API FATAL ERROR: {} {}", location, message); + } else { + eprintln!("NODE API FATAL ERROR: {}", message); + } + + std::process::abort(); +} + +#[napi_sym] +fn napi_open_callback_scope( + env: *mut Env, + _resource_object: napi_value, + _context: napi_value, + result: *mut napi_callback_scope, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, result); + + // we open scope automatically when it's needed + unsafe { + *result = std::ptr::null_mut(); + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_close_callback_scope( + env: *mut Env, + scope: napi_callback_scope, +) -> napi_status { + let env = check_env!(env); + // we close scope automatically when it's needed + assert!(scope.is_null()); + napi_clear_last_error(env) +} + +// NOTE: we don't support "async_hooks::AsyncContext" so these APIs are noops. +#[napi_sym] +fn napi_async_init( + env: *mut Env, + _async_resource: napi_value, + _async_resource_name: napi_value, + result: *mut napi_async_context, +) -> napi_status { + let env = check_env!(env); + unsafe { + *result = ptr::null_mut(); + } + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_async_destroy( + env: *mut Env, + async_context: napi_async_context, +) -> napi_status { + let env = check_env!(env); + assert!(async_context.is_null()); + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_make_callback<'s>( + env: &'s mut Env, + _async_context: napi_async_context, + recv: napi_value, + func: napi_value, + argc: usize, + argv: *const napi_value<'s>, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, recv); + if argc > 0 { + check_arg!(env, argv); + } + + let Some(recv) = recv.and_then(|v| v.to_object(&mut env.scope())) else { + return napi_object_expected; + }; + + let Some(func) = + func.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_function_expected; + }; + + let args = if argc > 0 { + unsafe { + std::slice::from_raw_parts(argv as *mut v8::Local, argc) + } + } else { + &[] + }; + + // TODO: async_context + + let Some(v) = func.call(&mut env.scope(), recv.into(), args) else { + return napi_generic_failure; + }; + + unsafe { + *result = v.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_create_buffer<'s>( + env: &'s mut Env, + length: usize, + data: *mut *mut c_void, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, result); + + let ab = v8::ArrayBuffer::new(&mut env.scope(), length); + + let buffer_constructor = + v8::Local::new(&mut env.scope(), &env.buffer_constructor); + let Some(buffer) = + buffer_constructor.new_instance(&mut env.scope(), &[ab.into()]) + else { + return napi_generic_failure; + }; + + if !data.is_null() { + unsafe { + *data = get_array_buffer_ptr(ab); + } + } + + unsafe { + *result = buffer.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_create_external_buffer<'s>( + env: &'s mut Env, + length: usize, + data: *mut c_void, + finalize_cb: napi_finalize, + finalize_hint: *mut c_void, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, result); + + let store = make_external_backing_store( + env, + data, + length, + ptr::null_mut(), + finalize_cb, + finalize_hint, + ); + + let ab = + v8::ArrayBuffer::with_backing_store(&mut env.scope(), &store.make_shared()); + + let buffer_constructor = + v8::Local::new(&mut env.scope(), &env.buffer_constructor); + let Some(buffer) = + buffer_constructor.new_instance(&mut env.scope(), &[ab.into()]) + else { + return napi_generic_failure; + }; + + unsafe { + *result = buffer.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_create_buffer_copy<'s>( + env: &'s mut Env, + length: usize, + data: *mut c_void, + result_data: *mut *mut c_void, + result: *mut napi_value<'s>, +) -> napi_status { + check_arg!(env, result); + + let ab = v8::ArrayBuffer::new(&mut env.scope(), length); + + let buffer_constructor = + v8::Local::new(&mut env.scope(), &env.buffer_constructor); + let Some(buffer) = + buffer_constructor.new_instance(&mut env.scope(), &[ab.into()]) + else { + return napi_generic_failure; + }; + + let ptr = get_array_buffer_ptr(ab); + unsafe { + std::ptr::copy(data, ptr, length); + } + + if !result_data.is_null() { + unsafe { + *result_data = ptr; + } + } + + unsafe { + *result = buffer.into(); + } + + napi_ok +} + +#[napi_sym] +fn napi_is_buffer( + env: *mut Env, + value: napi_value, + result: *mut bool, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + check_arg!(env, result); + + let buffer_constructor = + v8::Local::new(&mut env.scope(), &env.buffer_constructor); + + let Some(is_buffer) = value + .unwrap() + .instance_of(&mut env.scope(), buffer_constructor.into()) + else { + return napi_set_last_error(env, napi_generic_failure); + }; + + unsafe { + *result = is_buffer; + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_get_buffer_info( + env: *mut Env, + value: napi_value, + data: *mut *mut c_void, + length: *mut usize, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, value); + + // NB: Any TypedArray instance seems to be accepted by this function + // in Node.js. + let Some(ta) = + value.and_then(|v| v8::Local::::try_from(v).ok()) + else { + return napi_set_last_error(env, napi_invalid_arg); + }; + + if !data.is_null() { + unsafe { + *data = ta.data(); + } + } + + if !length.is_null() { + unsafe { + *length = ta.byte_length(); + } + } + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_get_node_version( + env: *mut Env, + result: *mut *const napi_node_version, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, result); + + const NODE_VERSION: napi_node_version = napi_node_version { + major: 20, + minor: 11, + patch: 1, + release: c"Deno".as_ptr(), + }; + + unsafe { + *result = &NODE_VERSION as *const napi_node_version; + } + + napi_clear_last_error(env) +} + +struct AsyncWork { + state: AtomicU8, + env: *mut Env, + _async_resource: v8::Global, + _async_resource_name: String, + execute: napi_async_execute_callback, + complete: Option, + data: *mut c_void, +} + +impl AsyncWork { + const IDLE: u8 = 0; + const QUEUED: u8 = 1; + const RUNNING: u8 = 2; +} + +#[napi_sym] +pub(crate) fn napi_create_async_work( + env: *mut Env, + async_resource: napi_value, + async_resource_name: napi_value, + execute: Option, + complete: Option, + data: *mut c_void, + result: *mut napi_async_work, +) -> napi_status { + let env_ptr = env; + let env = check_env!(env); + check_arg!(env, execute); + check_arg!(env, result); + + let resource = if let Some(v) = *async_resource { + let Some(resource) = v.to_object(&mut env.scope()) else { + return napi_set_last_error(env, napi_object_expected); + }; + resource + } else { + v8::Object::new(&mut env.scope()) + }; + + let Some(resource_name) = + async_resource_name.and_then(|v| v.to_string(&mut env.scope())) + else { + return napi_set_last_error(env, napi_string_expected); + }; + + let resource_name = resource_name.to_rust_string_lossy(&mut env.scope()); + + let work = Box::new(AsyncWork { + state: AtomicU8::new(AsyncWork::IDLE), + env: env_ptr, + _async_resource: v8::Global::new(&mut env.scope(), resource), + _async_resource_name: resource_name, + execute: execute.unwrap(), + complete, + data, + }); + + unsafe { + *result = Box::into_raw(work) as _; + } + + napi_clear_last_error(env) +} + +#[napi_sym] +pub(crate) fn napi_delete_async_work( + env: *mut Env, + work: napi_async_work, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, work); + + drop(unsafe { Box::::from_raw(work as _) }); + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_get_uv_event_loop( + env_ptr: *mut Env, + uv_loop: *mut *mut (), +) -> napi_status { + let env = check_env!(env_ptr); + check_arg!(env, uv_loop); + unsafe { + *uv_loop = env_ptr.cast(); + } + 0 +} + +#[napi_sym] +pub(crate) fn napi_queue_async_work( + env: *mut Env, + work: napi_async_work, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, work); + + let work = unsafe { &*(work as *mut AsyncWork) }; + + let result = + work + .state + .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |state| { + // allow queue if idle or if running, but not if already queued. + if state == AsyncWork::IDLE || state == AsyncWork::RUNNING { + Some(AsyncWork::QUEUED) + } else { + None + } + }); + + if result.is_err() { + return napi_clear_last_error(env); + } + + let work = SendPtr(work); + + env.add_async_work(move || { + let work = work.take(); + let work = unsafe { &*work }; + + let state = work.state.compare_exchange( + AsyncWork::QUEUED, + AsyncWork::RUNNING, + Ordering::SeqCst, + Ordering::SeqCst, + ); + + if state.is_ok() { + unsafe { + (work.execute)(work.env as _, work.data); + } + + // reset back to idle if its still marked as running + let _ = work.state.compare_exchange( + AsyncWork::RUNNING, + AsyncWork::IDLE, + Ordering::SeqCst, + Ordering::Relaxed, + ); + } + + if let Some(complete) = work.complete { + let status = if state.is_ok() { + napi_ok + } else if state == Err(AsyncWork::IDLE) { + napi_cancelled + } else { + napi_generic_failure + }; + + unsafe { + complete(work.env as _, status, work.data); + } + } + + // `complete` probably deletes this `work`, so don't use it here. + }); + + napi_clear_last_error(env) +} + +#[napi_sym] +fn napi_cancel_async_work(env: *mut Env, work: napi_async_work) -> napi_status { + let env = check_env!(env); + check_arg!(env, work); + + let work = unsafe { &*(work as *mut AsyncWork) }; + + let _ = work.state.compare_exchange( + AsyncWork::QUEUED, + AsyncWork::IDLE, + Ordering::SeqCst, + Ordering::Relaxed, + ); + + napi_clear_last_error(env) +} + +extern "C" fn default_call_js_cb( + env: napi_env, + js_callback: napi_value, + _context: *mut c_void, + _data: *mut c_void, +) { + if let Some(js_callback) = *js_callback { + if let Ok(js_callback) = v8::Local::::try_from(js_callback) { + let env = unsafe { &mut *(env as *mut Env) }; + let scope = &mut env.scope(); + let recv = v8::undefined(scope); + js_callback.call(scope, recv.into(), &[]); + } + } +} + +struct TsFn { + env: *mut Env, + func: Option>, + max_queue_size: usize, + queue_size: Mutex, + queue_cond: Condvar, + thread_count: AtomicUsize, + thread_finalize_data: *mut c_void, + thread_finalize_cb: Option, + context: *mut c_void, + call_js_cb: napi_threadsafe_function_call_js, + _resource: v8::Global, + _resource_name: String, + is_closing: AtomicBool, + is_closed: Arc, + sender: V8CrossThreadTaskSpawner, + is_ref: AtomicBool, +} + +impl Drop for TsFn { + fn drop(&mut self) { + assert!(self + .is_closed + .compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed) + .is_ok()); + + self.unref(); + + if let Some(finalizer) = self.thread_finalize_cb { + unsafe { + (finalizer)(self.env as _, self.thread_finalize_data, self.context); + } + } + } +} + +impl TsFn { + pub fn acquire(&self) -> napi_status { + if self.is_closing.load(Ordering::SeqCst) { + return napi_closing; + } + self.thread_count.fetch_add(1, Ordering::Relaxed); + napi_ok + } + + pub fn release( + tsfn: *mut TsFn, + mode: napi_threadsafe_function_release_mode, + ) -> napi_status { + let tsfn = unsafe { &mut *tsfn }; + + let result = tsfn.thread_count.fetch_update( + Ordering::Relaxed, + Ordering::Relaxed, + |x| { + if x == 0 { + None + } else { + Some(x - 1) + } + }, + ); + + if result.is_err() { + return napi_invalid_arg; + } + + if (result == Ok(1) || mode == napi_tsfn_abort) + && tsfn + .is_closing + .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) + .is_ok() + { + tsfn.queue_cond.notify_all(); + let tsfnptr = SendPtr(tsfn); + // drop must be queued in order to preserve ordering consistent + // with Node.js and so that the finalizer runs on the main thread. + tsfn.sender.spawn(move |_| { + let tsfn = unsafe { Box::from_raw(tsfnptr.take() as *mut TsFn) }; + drop(tsfn); + }); + } + + napi_ok + } + + pub fn ref_(&self) -> napi_status { + if self + .is_ref + .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) + .is_ok() + { + let env = unsafe { &mut *self.env }; + env.threadsafe_function_ref(); + } + napi_ok + } + + pub fn unref(&self) -> napi_status { + if self + .is_ref + .compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst) + .is_ok() + { + let env = unsafe { &mut *self.env }; + env.threadsafe_function_unref(); + } + + napi_ok + } + + pub fn call( + &self, + data: *mut c_void, + mode: napi_threadsafe_function_call_mode, + ) -> napi_status { + if self.is_closing.load(Ordering::SeqCst) { + return napi_closing; + } + + if self.max_queue_size > 0 { + let mut queue_size = self.queue_size.lock(); + while *queue_size >= self.max_queue_size { + if mode == napi_tsfn_blocking { + self.queue_cond.wait(&mut queue_size); + + if self.is_closing.load(Ordering::SeqCst) { + return napi_closing; + } + } else { + return napi_queue_full; + } + } + *queue_size += 1; + } + + let is_closed = self.is_closed.clone(); + let tsfn = SendPtr(self); + let data = SendPtr(data); + let context = SendPtr(self.context); + let call_js_cb = self.call_js_cb; + + self.sender.spawn(move |scope: &mut v8::HandleScope| { + let data = data.take(); + + // if is_closed then tsfn is freed, don't read from it. + if is_closed.load(Ordering::Relaxed) { + unsafe { + call_js_cb( + std::ptr::null_mut(), + None::>.into(), + context.take() as _, + data as _, + ); + } + } else { + let tsfn = tsfn.take(); + + let tsfn = unsafe { &*tsfn }; + + if tsfn.max_queue_size > 0 { + let mut queue_size = tsfn.queue_size.lock(); + let size = *queue_size; + *queue_size -= 1; + if size == tsfn.max_queue_size { + tsfn.queue_cond.notify_one(); + } + } + + let func = tsfn.func.as_ref().map(|f| v8::Local::new(scope, f)); + + unsafe { + (tsfn.call_js_cb)( + tsfn.env as _, + func.into(), + tsfn.context, + data as _, + ); + } + } + }); + + napi_ok + } +} + +#[napi_sym] +#[allow(clippy::too_many_arguments)] +fn napi_create_threadsafe_function( + env: *mut Env, + func: napi_value, + async_resource: napi_value, + async_resource_name: napi_value, + max_queue_size: usize, + initial_thread_count: usize, + thread_finalize_data: *mut c_void, + thread_finalize_cb: Option, + context: *mut c_void, + call_js_cb: Option, + result: *mut napi_threadsafe_function, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, async_resource_name); + if initial_thread_count == 0 { + return napi_set_last_error(env, napi_invalid_arg); + } + check_arg!(env, result); + + let func = if let Some(value) = *func { + let Ok(func) = v8::Local::::try_from(value) else { + return napi_set_last_error(env, napi_function_expected); + }; + Some(v8::Global::new(&mut env.scope(), func)) + } else { + check_arg!(env, call_js_cb); + None + }; + + let resource = if let Some(v) = *async_resource { + let Some(resource) = v.to_object(&mut env.scope()) else { + return napi_set_last_error(env, napi_object_expected); + }; + resource + } else { + v8::Object::new(&mut env.scope()) + }; + let resource = v8::Global::new(&mut env.scope(), resource); + + let Some(resource_name) = + async_resource_name.and_then(|v| v.to_string(&mut env.scope())) + else { + return napi_set_last_error(env, napi_string_expected); + }; + let resource_name = resource_name.to_rust_string_lossy(&mut env.scope()); + + let tsfn = Box::new(TsFn { + env, + func, + max_queue_size, + queue_size: Mutex::new(0), + queue_cond: Condvar::new(), + thread_count: AtomicUsize::new(initial_thread_count), + thread_finalize_data, + thread_finalize_cb, + context, + call_js_cb: call_js_cb.unwrap_or(default_call_js_cb), + _resource: resource, + _resource_name: resource_name, + is_closing: AtomicBool::new(false), + is_closed: Arc::new(AtomicBool::new(false)), + is_ref: AtomicBool::new(false), + sender: env.async_work_sender.clone(), + }); + + tsfn.ref_(); + + unsafe { + *result = Box::into_raw(tsfn) as _; + } + + napi_clear_last_error(env) +} + +/// Maybe called from any thread. +#[napi_sym] +fn napi_get_threadsafe_function_context( + func: napi_threadsafe_function, + result: *mut *const c_void, +) -> napi_status { + assert!(!func.is_null()); + let tsfn = unsafe { &*(func as *const TsFn) }; + unsafe { + *result = tsfn.context; + } + napi_ok +} + +#[napi_sym] +fn napi_call_threadsafe_function( + func: napi_threadsafe_function, + data: *mut c_void, + is_blocking: napi_threadsafe_function_call_mode, +) -> napi_status { + assert!(!func.is_null()); + let tsfn = unsafe { &*(func as *mut TsFn) }; + tsfn.call(data, is_blocking) +} + +#[napi_sym] +fn napi_acquire_threadsafe_function( + tsfn: napi_threadsafe_function, +) -> napi_status { + assert!(!tsfn.is_null()); + let tsfn = unsafe { &*(tsfn as *mut TsFn) }; + tsfn.acquire() +} + +#[napi_sym] +fn napi_release_threadsafe_function( + tsfn: napi_threadsafe_function, + mode: napi_threadsafe_function_release_mode, +) -> napi_status { + assert!(!tsfn.is_null()); + TsFn::release(tsfn as _, mode) +} + +#[napi_sym] +fn napi_unref_threadsafe_function( + _env: &mut Env, + func: napi_threadsafe_function, +) -> napi_status { + assert!(!func.is_null()); + let tsfn = unsafe { &*(func as *mut TsFn) }; + tsfn.unref() +} + +#[napi_sym] +fn napi_ref_threadsafe_function( + _env: &mut Env, + func: napi_threadsafe_function, +) -> napi_status { + assert!(!func.is_null()); + let tsfn = unsafe { &*(func as *mut TsFn) }; + tsfn.ref_() +} + +#[napi_sym] +fn node_api_get_module_file_name( + env: *mut Env, + result: *mut *const c_char, +) -> napi_status { + let env = check_env!(env); + check_arg!(env, result); + + unsafe { + *result = env.shared().filename.as_ptr() as _; + } + + napi_clear_last_error(env) +} diff --git a/ext/napi/sym/Cargo.toml b/ext/napi/sym/Cargo.toml new file mode 100644 index 000000000..7c9bf208c --- /dev/null +++ b/ext/napi/sym/Cargo.toml @@ -0,0 +1,21 @@ +# Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +[package] +name = "napi_sym" +version = "0.103.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +readme = "README.md" +repository.workspace = true +description = "proc macro for writing N-API symbols" + +[lib] +path = "./lib.rs" +proc-macro = true + +[dependencies] +quote.workspace = true +serde.workspace = true +serde_json.workspace = true +syn.workspace = true diff --git a/ext/napi/sym/README.md b/ext/napi/sym/README.md new file mode 100644 index 000000000..66eb4bff2 --- /dev/null +++ b/ext/napi/sym/README.md @@ -0,0 +1,38 @@ +# napi_sym + +A proc_macro for Deno's Node-API implementation. It does the following things: + +- Marks the symbol as `#[no_mangle]` and rewrites it as + `unsafe extern "C" $name`. +- Asserts that the function symbol is present in + [`symbol_exports.json`](./symbol_exports.json). +- Maps `deno_napi::Result` to raw `napi_result`. + +```rust +use deno_napi::napi_value; +use deno_napi::Env; +use deno_napi::Error; +use deno_napi::Result; + +#[napi_sym::napi_sym] +fn napi_get_boolean( + env: *mut Env, + value: bool, + result: *mut napi_value, +) -> Result { + let _env: &mut Env = env.as_mut().ok_or(Error::InvalidArg)?; + // *result = ... + Ok(()) +} +``` + +### `symbol_exports.json` + +A file containing the symbols that need to be put into the executable's dynamic +symbol table at link-time. + +This is done using `/DEF:` on Windows, `-exported_symbol,_` on macOS and +`--export-dynamic-symbol=` on Linux. See [`cli/build.rs`](../build.rs). + +On Windows, you need to generate the `.def` file by running +[`tools/napi/generate_symbols_lists.js`](../../tools/napi/generate_symbols_lists.js). diff --git a/ext/napi/sym/lib.rs b/ext/napi/sym/lib.rs new file mode 100644 index 000000000..e2826306b --- /dev/null +++ b/ext/napi/sym/lib.rs @@ -0,0 +1,31 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use proc_macro::TokenStream; +use quote::quote; +use serde::Deserialize; + +static NAPI_EXPORTS: &str = include_str!("./symbol_exports.json"); + +#[derive(Deserialize)] +struct SymbolExports { + pub symbols: Vec, +} + +#[proc_macro_attribute] +pub fn napi_sym(_attr: TokenStream, item: TokenStream) -> TokenStream { + let func = syn::parse::(item).expect("expected a function"); + + let exports: SymbolExports = + serde_json::from_str(NAPI_EXPORTS).expect("failed to parse exports"); + let name = &func.sig.ident; + assert!( + exports.symbols.contains(&name.to_string()), + "cli/napi/sym/symbol_exports.json is out of sync!" + ); + + TokenStream::from(quote! { + crate::napi_wrap! { + #func + } + }) +} diff --git a/ext/napi/sym/symbol_exports.json b/ext/napi/sym/symbol_exports.json new file mode 100644 index 000000000..00946b8ed --- /dev/null +++ b/ext/napi/sym/symbol_exports.json @@ -0,0 +1,164 @@ +{ + "symbols": [ + "node_api_create_syntax_error", + "napi_make_callback", + "napi_has_named_property", + "napi_async_destroy", + "napi_coerce_to_object", + "napi_get_arraybuffer_info", + "napi_detach_arraybuffer", + "napi_get_undefined", + "napi_reference_unref", + "napi_fatal_error", + "napi_open_callback_scope", + "napi_close_callback_scope", + "napi_get_value_uint32", + "napi_create_function", + "napi_create_arraybuffer", + "napi_get_value_int64", + "napi_get_all_property_names", + "napi_resolve_deferred", + "napi_is_detached_arraybuffer", + "napi_create_string_utf8", + "napi_create_threadsafe_function", + "node_api_throw_syntax_error", + "napi_create_bigint_int64", + "napi_wrap", + "napi_set_property", + "napi_get_value_bigint_int64", + "napi_open_handle_scope", + "napi_create_error", + "napi_create_buffer", + "napi_cancel_async_work", + "napi_is_exception_pending", + "napi_acquire_threadsafe_function", + "napi_create_external", + "napi_get_threadsafe_function_context", + "napi_get_null", + "napi_create_string_utf16", + "node_api_create_external_string_utf16", + "napi_get_value_bigint_uint64", + "napi_module_register", + "napi_is_typedarray", + "napi_create_external_buffer", + "napi_get_new_target", + "napi_get_instance_data", + "napi_close_handle_scope", + "napi_get_value_string_utf16", + "napi_get_property_names", + "napi_is_arraybuffer", + "napi_get_cb_info", + "napi_define_properties", + "napi_add_env_cleanup_hook", + "node_api_get_module_file_name", + "napi_get_node_version", + "napi_create_int64", + "napi_create_double", + "napi_get_and_clear_last_exception", + "napi_create_reference", + "napi_get_typedarray_info", + "napi_call_threadsafe_function", + "napi_get_last_error_info", + "napi_create_array_with_length", + "napi_coerce_to_number", + "napi_get_global", + "napi_is_error", + "napi_set_instance_data", + "napi_create_typedarray", + "napi_throw_type_error", + "napi_has_property", + "napi_get_value_external", + "napi_create_range_error", + "napi_typeof", + "napi_ref_threadsafe_function", + "napi_create_bigint_uint64", + "napi_get_prototype", + "napi_adjust_external_memory", + "napi_release_threadsafe_function", + "napi_delete_async_work", + "napi_create_string_latin1", + "node_api_create_external_string_latin1", + "napi_is_array", + "napi_unref_threadsafe_function", + "napi_throw_error", + "napi_has_own_property", + "napi_get_reference_value", + "napi_remove_env_cleanup_hook", + "napi_get_value_string_utf8", + "napi_is_promise", + "napi_get_boolean", + "napi_run_script", + "napi_get_element", + "napi_get_named_property", + "napi_get_buffer_info", + "napi_get_value_bool", + "napi_reference_ref", + "napi_create_object", + "napi_create_promise", + "napi_create_int32", + "napi_escape_handle", + "napi_open_escapable_handle_scope", + "napi_throw", + "napi_get_value_double", + "napi_set_named_property", + "napi_call_function", + "napi_create_date", + "napi_object_freeze", + "napi_get_uv_event_loop", + "napi_get_value_string_latin1", + "napi_reject_deferred", + "napi_add_finalizer", + "napi_create_array", + "napi_delete_reference", + "napi_get_date_value", + "napi_create_dataview", + "napi_get_version", + "napi_define_class", + "napi_is_date", + "napi_remove_wrap", + "napi_delete_property", + "napi_instanceof", + "napi_create_buffer_copy", + "napi_delete_element", + "napi_object_seal", + "napi_queue_async_work", + "napi_get_value_bigint_words", + "napi_is_buffer", + "napi_get_array_length", + "napi_get_property", + "napi_new_instance", + "napi_set_element", + "napi_create_bigint_words", + "napi_strict_equals", + "napi_is_dataview", + "napi_close_escapable_handle_scope", + "napi_get_dataview_info", + "napi_get_value_int32", + "napi_unwrap", + "napi_throw_range_error", + "napi_coerce_to_bool", + "napi_create_uint32", + "napi_has_element", + "napi_create_external_arraybuffer", + "napi_create_symbol", + "node_api_symbol_for", + "napi_coerce_to_string", + "napi_create_type_error", + "napi_fatal_exception", + "napi_create_async_work", + "napi_async_init", + "node_api_create_property_key_utf16", + "napi_type_tag_object", + "napi_check_object_type_tag", + "node_api_post_finalizer", + "napi_add_async_cleanup_hook", + "napi_remove_async_cleanup_hook", + "uv_mutex_init", + "uv_mutex_lock", + "uv_mutex_unlock", + "uv_mutex_destroy", + "uv_async_init", + "uv_async_send", + "uv_close" + ] +} diff --git a/ext/napi/util.rs b/ext/napi/util.rs new file mode 100644 index 000000000..21e9d433a --- /dev/null +++ b/ext/napi/util.rs @@ -0,0 +1,287 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use crate::*; +use libc::INT_MAX; + +#[repr(transparent)] +pub(crate) struct SendPtr(pub *const T); + +impl SendPtr { + // silly function to get around `clippy::redundant_locals` + pub fn take(self) -> *const T { + self.0 + } +} + +unsafe impl Send for SendPtr {} +unsafe impl Sync for SendPtr {} + +pub fn get_array_buffer_ptr(ab: v8::Local) -> *mut c_void { + match ab.data() { + Some(p) => p.as_ptr(), + None => std::ptr::null_mut(), + } +} + +struct BufferFinalizer { + env: *mut Env, + finalize_cb: napi_finalize, + finalize_data: *mut c_void, + finalize_hint: *mut c_void, +} + +impl Drop for BufferFinalizer { + fn drop(&mut self) { + unsafe { + (self.finalize_cb)(self.env as _, self.finalize_data, self.finalize_hint); + } + } +} + +pub(crate) extern "C" fn backing_store_deleter_callback( + data: *mut c_void, + _byte_length: usize, + deleter_data: *mut c_void, +) { + let mut finalizer = + unsafe { Box::::from_raw(deleter_data as _) }; + + finalizer.finalize_data = data; + + drop(finalizer); +} + +pub(crate) fn make_external_backing_store( + env: *mut Env, + data: *mut c_void, + byte_length: usize, + finalize_data: *mut c_void, + finalize_cb: napi_finalize, + finalize_hint: *mut c_void, +) -> v8::UniqueRef { + let finalizer = Box::new(BufferFinalizer { + env, + finalize_data, + finalize_cb, + finalize_hint, + }); + + unsafe { + v8::ArrayBuffer::new_backing_store_from_ptr( + data, + byte_length, + backing_store_deleter_callback, + Box::into_raw(finalizer) as _, + ) + } +} + +#[macro_export] +macro_rules! check_env { + ($env: expr) => {{ + let env = $env; + if env.is_null() { + return napi_invalid_arg; + } + unsafe { &mut *env } + }}; +} + +#[macro_export] +macro_rules! return_error_status_if_false { + ($env: expr, $condition: expr, $status: ident) => { + if !$condition { + return Err($crate::util::napi_set_last_error($env, $status).into()); + } + }; +} + +#[macro_export] +macro_rules! return_status_if_false { + ($env: expr, $condition: expr, $status: ident) => { + if !$condition { + return $crate::util::napi_set_last_error($env, $status); + } + }; +} + +pub(crate) unsafe fn check_new_from_utf8_len<'s>( + env: *mut Env, + str_: *const c_char, + len: usize, +) -> Result, napi_status> { + let env = unsafe { &mut *env }; + return_error_status_if_false!( + env, + (len == NAPI_AUTO_LENGTH) || len <= INT_MAX as _, + napi_invalid_arg + ); + return_error_status_if_false!(env, !str_.is_null(), napi_invalid_arg); + let string = if len == NAPI_AUTO_LENGTH { + let result = unsafe { std::ffi::CStr::from_ptr(str_ as *const _) }.to_str(); + return_error_status_if_false!(env, result.is_ok(), napi_generic_failure); + result.unwrap() + } else { + let string = unsafe { std::slice::from_raw_parts(str_ as *const u8, len) }; + let result = std::str::from_utf8(string); + return_error_status_if_false!(env, result.is_ok(), napi_generic_failure); + result.unwrap() + }; + let result = { + let env = unsafe { &mut *(env as *mut Env) }; + v8::String::new(&mut env.scope(), string) + }; + return_error_status_if_false!(env, result.is_some(), napi_generic_failure); + Ok(result.unwrap()) +} + +#[inline] +pub(crate) unsafe fn check_new_from_utf8<'s>( + env: *mut Env, + str_: *const c_char, +) -> Result, napi_status> { + unsafe { check_new_from_utf8_len(env, str_, NAPI_AUTO_LENGTH) } +} + +pub(crate) unsafe fn v8_name_from_property_descriptor<'s>( + env: *mut Env, + p: &'s napi_property_descriptor, +) -> Result, napi_status> { + if !p.utf8name.is_null() { + unsafe { check_new_from_utf8(env, p.utf8name).map(|v| v.into()) } + } else { + match *p.name { + Some(v) => match v.try_into() { + Ok(name) => Ok(name), + Err(_) => Err(napi_name_expected), + }, + None => Err(napi_name_expected), + } + } +} + +pub(crate) fn napi_clear_last_error(env: *mut Env) -> napi_status { + let env = unsafe { &mut *env }; + env.last_error.error_code = napi_ok; + env.last_error.engine_error_code = 0; + env.last_error.engine_reserved = std::ptr::null_mut(); + env.last_error.error_message = std::ptr::null_mut(); + napi_ok +} + +pub(crate) fn napi_set_last_error( + env: *mut Env, + error_code: napi_status, +) -> napi_status { + let env = unsafe { &mut *env }; + env.last_error.error_code = error_code; + error_code +} + +#[macro_export] +macro_rules! status_call { + ($call: expr) => { + let status = $call; + if status != napi_ok { + return status; + } + }; +} + +pub trait Nullable { + fn is_null(&self) -> bool; +} + +impl Nullable for *mut T { + fn is_null(&self) -> bool { + (*self).is_null() + } +} + +impl Nullable for *const T { + fn is_null(&self) -> bool { + (*self).is_null() + } +} + +impl Nullable for Option { + fn is_null(&self) -> bool { + self.is_none() + } +} + +impl<'s> Nullable for napi_value<'s> { + fn is_null(&self) -> bool { + self.is_none() + } +} + +#[macro_export] +macro_rules! check_arg { + ($env: expr, $ptr: expr) => { + $crate::return_status_if_false!( + $env, + !$crate::util::Nullable::is_null(&$ptr), + napi_invalid_arg + ); + }; +} + +#[macro_export] +macro_rules! napi_wrap { + ( $( # [ $attr:meta ] )* $vis:vis fn $name:ident $( < $( $x:lifetime ),* > )? ( $env:ident : & $( $lt:lifetime )? mut Env $( , $ident:ident : $ty:ty )* $(,)? ) -> napi_status $body:block ) => { + $( # [ $attr ] )* + #[no_mangle] + $vis unsafe extern "C" fn $name $( < $( $x ),* > )? ( env_ptr : *mut Env , $( $ident : $ty ),* ) -> napi_status { + let env: & $( $lt )? mut Env = $crate::check_env!(env_ptr); + + if env.last_exception.is_some() { + return napi_pending_exception; + } + + $crate::util::napi_clear_last_error(env); + + let scope_env = unsafe { &mut *env_ptr }; + let scope = &mut scope_env.scope(); + let try_catch = &mut v8::TryCatch::new(scope); + + #[inline(always)] + fn inner $( < $( $x ),* > )? ( $env: & $( $lt )? mut Env , $( $ident : $ty ),* ) -> napi_status $body + + log::trace!("NAPI ENTER: {}", stringify!($name)); + + let result = inner( env, $( $ident ),* ); + + log::trace!("NAPI EXIT: {} {}", stringify!($name), result); + + if let Some(exception) = try_catch.exception() { + let env = unsafe { &mut *env_ptr }; + let global = v8::Global::new(env.isolate(), exception); + env.last_exception = Some(global); + return $crate::util::napi_set_last_error(env_ptr, napi_pending_exception); + } + + if result != napi_ok { + return $crate::util::napi_set_last_error(env_ptr, result); + } + + return result; + } + }; + + ( $( # [ $attr:meta ] )* $vis:vis fn $name:ident $( < $( $x:lifetime ),* > )? ( $( $ident:ident : $ty:ty ),* $(,)? ) -> napi_status $body:block ) => { + $( # [ $attr ] )* + #[no_mangle] + $vis unsafe extern "C" fn $name $( < $( $x ),* > )? ( $( $ident : $ty ),* ) -> napi_status { + #[inline(always)] + fn inner $( < $( $x ),* > )? ( $( $ident : $ty ),* ) -> napi_status $body + + log::trace!("NAPI ENTER: {}", stringify!($name)); + + let result = inner( $( $ident ),* ); + + log::trace!("NAPI EXIT: {} {}", stringify!($name), result); + + result + } + }; +} diff --git a/ext/napi/uv.rs b/ext/napi/uv.rs new file mode 100644 index 000000000..6f728a92b --- /dev/null +++ b/ext/napi/uv.rs @@ -0,0 +1,231 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use crate::*; +use deno_core::parking_lot::Mutex; +use std::mem::MaybeUninit; +use std::ptr::addr_of_mut; + +#[allow(clippy::print_stderr)] +fn assert_ok(res: c_int) -> c_int { + if res != 0 { + eprintln!("bad result in uv polyfill: {res}"); + // don't panic because that might unwind into + // c/c++ + std::process::abort(); + } + res +} + +use js_native_api::napi_create_string_utf8; +use node_api::napi_create_async_work; +use node_api::napi_delete_async_work; +use node_api::napi_queue_async_work; +use std::ffi::c_int; + +const UV_MUTEX_SIZE: usize = { + #[cfg(unix)] + { + std::mem::size_of::() + } + #[cfg(windows)] + { + std::mem::size_of::( + ) + } +}; + +#[repr(C)] +struct uv_mutex_t { + mutex: Mutex<()>, + _padding: [MaybeUninit; const { + (UV_MUTEX_SIZE - size_of::>()) / size_of::() + }], +} + +#[no_mangle] +unsafe extern "C" fn uv_mutex_init(lock: *mut uv_mutex_t) -> c_int { + unsafe { + addr_of_mut!((*lock).mutex).write(Mutex::new(())); + 0 + } +} + +#[no_mangle] +unsafe extern "C" fn uv_mutex_lock(lock: *mut uv_mutex_t) { + unsafe { + let guard = (*lock).mutex.lock(); + // forget the guard so it doesn't unlock when it goes out of scope. + // we're going to unlock it manually + std::mem::forget(guard); + } +} + +#[no_mangle] +unsafe extern "C" fn uv_mutex_unlock(lock: *mut uv_mutex_t) { + unsafe { + (*lock).mutex.force_unlock(); + } +} + +#[no_mangle] +unsafe extern "C" fn uv_mutex_destroy(_lock: *mut uv_mutex_t) { + // no cleanup required +} + +#[repr(C)] +#[derive(Clone, Copy, Debug)] +#[allow(dead_code)] +enum uv_handle_type { + UV_UNKNOWN_HANDLE = 0, + UV_ASYNC, + UV_CHECK, + UV_FS_EVENT, + UV_FS_POLL, + UV_HANDLE, + UV_IDLE, + UV_NAMED_PIPE, + UV_POLL, + UV_PREPARE, + UV_PROCESS, + UV_STREAM, + UV_TCP, + UV_TIMER, + UV_TTY, + UV_UDP, + UV_SIGNAL, + UV_FILE, + UV_HANDLE_TYPE_MAX, +} + +const UV_HANDLE_SIZE: usize = 96; + +#[repr(C)] +struct uv_handle_t { + // public members + pub data: *mut c_void, + pub r#loop: *mut uv_loop_t, + pub r#type: uv_handle_type, + + _padding: [MaybeUninit; const { + (UV_HANDLE_SIZE + - size_of::<*mut c_void>() + - size_of::<*mut uv_loop_t>() + - size_of::()) + / size_of::() + }], +} + +#[cfg(unix)] +const UV_ASYNC_SIZE: usize = 128; + +#[cfg(windows)] +const UV_ASYNC_SIZE: usize = 224; + +#[repr(C)] +struct uv_async_t { + // public members + pub data: *mut c_void, + pub r#loop: *mut uv_loop_t, + pub r#type: uv_handle_type, + // private + async_cb: uv_async_cb, + work: napi_async_work, + _padding: [MaybeUninit; const { + (UV_ASYNC_SIZE + - size_of::<*mut c_void>() + - size_of::<*mut uv_loop_t>() + - size_of::() + - size_of::() + - size_of::()) + / size_of::() + }], +} + +type uv_loop_t = Env; +type uv_async_cb = extern "C" fn(handle: *mut uv_async_t); +#[no_mangle] +unsafe extern "C" fn uv_async_init( + r#loop: *mut uv_loop_t, + // probably uninitialized + r#async: *mut uv_async_t, + async_cb: uv_async_cb, +) -> c_int { + unsafe { + addr_of_mut!((*r#async).r#loop).write(r#loop); + addr_of_mut!((*r#async).r#type).write(uv_handle_type::UV_ASYNC); + addr_of_mut!((*r#async).async_cb).write(async_cb); + + let mut resource_name: MaybeUninit = MaybeUninit::uninit(); + assert_ok(napi_create_string_utf8( + r#loop, + c"uv_async".as_ptr(), + usize::MAX, + resource_name.as_mut_ptr(), + )); + let resource_name = resource_name.assume_init(); + + let res = napi_create_async_work( + r#loop, + None::>.into(), + resource_name, + Some(async_exec_wrap), + None, + r#async.cast(), + addr_of_mut!((*r#async).work), + ); + -res + } +} + +#[no_mangle] +unsafe extern "C" fn uv_async_send(handle: *mut uv_async_t) -> c_int { + unsafe { -napi_queue_async_work((*handle).r#loop, (*handle).work) } +} + +type uv_close_cb = unsafe extern "C" fn(*mut uv_handle_t); + +#[no_mangle] +unsafe extern "C" fn uv_close(handle: *mut uv_handle_t, close: uv_close_cb) { + unsafe { + if handle.is_null() { + close(handle); + return; + } + if let uv_handle_type::UV_ASYNC = (*handle).r#type { + let handle: *mut uv_async_t = handle.cast(); + napi_delete_async_work((*handle).r#loop, (*handle).work); + } + close(handle); + } +} + +unsafe extern "C" fn async_exec_wrap(_env: napi_env, data: *mut c_void) { + let data: *mut uv_async_t = data.cast(); + unsafe { + ((*data).async_cb)(data); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn sizes() { + assert_eq!( + std::mem::size_of::(), + UV_MUTEX_SIZE + ); + assert_eq!( + std::mem::size_of::(), + UV_HANDLE_SIZE + ); + assert_eq!( + std::mem::size_of::(), + UV_ASYNC_SIZE + ); + assert_eq!(std::mem::size_of::(), UV_MUTEX_SIZE); + assert_eq!(std::mem::size_of::(), UV_HANDLE_SIZE); + assert_eq!(std::mem::size_of::(), UV_ASYNC_SIZE); + } +} -- cgit v1.2.3 From ef53ce3ac40e2c545397669e61e4e92423555d94 Mon Sep 17 00:00:00 2001 From: Marvin Hagemeister Date: Thu, 24 Oct 2024 11:46:51 +0200 Subject: fix(node/util): support array formats in `styleText` (#26507) We missed adding support for an array of formats being passed to `util.styleText`. Fixes https://github.com/denoland/deno/issues/26496 --- ext/node/polyfills/internal/util/inspect.mjs | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'ext') diff --git a/ext/node/polyfills/internal/util/inspect.mjs b/ext/node/polyfills/internal/util/inspect.mjs index 3a61c387c..ae797449b 100644 --- a/ext/node/polyfills/internal/util/inspect.mjs +++ b/ext/node/polyfills/internal/util/inspect.mjs @@ -565,6 +565,19 @@ export function stripVTControlCharacters(str) { export function styleText(format, text) { validateString(text, "text"); + + if (Array.isArray(format)) { + for (let i = 0; i < format.length; i++) { + const item = format[i]; + const formatCodes = inspect.colors[item]; + if (formatCodes == null) { + validateOneOf(item, "format", Object.keys(inspect.colors)); + } + text = `\u001b[${formatCodes[0]}m${text}\u001b[${formatCodes[1]}m`; + } + return text; + } + const formatCodes = inspect.colors[format]; if (formatCodes == null) { validateOneOf(format, "format", Object.keys(inspect.colors)); -- cgit v1.2.3 From c71e020668b40666aecfdffb1dbf979abcb41958 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Thu, 24 Oct 2024 10:45:17 -0700 Subject: refactor(ext/node): use concrete error types (#26419) --- ext/io/fs.rs | 30 +++++++------- ext/node/lib.rs | 2 +- ext/node/ops/blocklist.rs | 53 ++++++++++++++++-------- ext/node/ops/fs.rs | 76 ++++++++++++++++++++++------------ ext/node/ops/http2.rs | 73 ++++++++++++++++++++++----------- ext/node/ops/idna.rs | 47 ++++++++++----------- ext/node/ops/ipc.rs | 50 +++++++++++++++-------- ext/node/ops/os/mod.rs | 54 ++++++++++++++++-------- ext/node/ops/os/priority.rs | 30 +++++++++----- ext/node/ops/process.rs | 3 +- ext/node/ops/require.rs | 93 +++++++++++++++++++++++++++--------------- ext/node/ops/util.rs | 3 +- ext/node/ops/v8.rs | 25 ++++++------ ext/node/ops/worker_threads.rs | 61 +++++++++++++++++++-------- ext/node/ops/zlib/brotli.rs | 77 +++++++++++++++++++++++----------- ext/node/ops/zlib/mod.rs | 76 +++++++++++++++++----------------- ext/node/ops/zlib/mode.rs | 21 +++------- 17 files changed, 477 insertions(+), 297 deletions(-) (limited to 'ext') diff --git a/ext/io/fs.rs b/ext/io/fs.rs index 06fc3da09..885426520 100644 --- a/ext/io/fs.rs +++ b/ext/io/fs.rs @@ -1,6 +1,7 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use std::borrow::Cow; +use std::fmt::Formatter; use std::io; use std::rc::Rc; use std::time::SystemTime; @@ -21,6 +22,21 @@ pub enum FsError { NotCapable(&'static str), } +impl std::fmt::Display for FsError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match self { + FsError::Io(err) => std::fmt::Display::fmt(err, f), + FsError::FileBusy => f.write_str("file busy"), + FsError::NotSupported => f.write_str("not supported"), + FsError::NotCapable(err) => { + f.write_str(&format!("requires {err} access")) + } + } + } +} + +impl std::error::Error for FsError {} + impl FsError { pub fn kind(&self) -> io::ErrorKind { match self { @@ -55,20 +71,6 @@ impl From for FsError { } } -impl From for deno_core::error::AnyError { - fn from(err: FsError) -> Self { - match err { - FsError::Io(err) => err.into(), - FsError::FileBusy => deno_core::error::resource_unavailable(), - FsError::NotSupported => deno_core::error::not_supported(), - FsError::NotCapable(err) => deno_core::error::custom_error( - "NotCapable", - format!("permission denied: {err}"), - ), - } - } -} - impl From for FsError { fn from(err: JoinError) -> Self { if err.is_cancelled() { diff --git a/ext/node/lib.rs b/ext/node/lib.rs index 9b22add45..32624f38b 100644 --- a/ext/node/lib.rs +++ b/ext/node/lib.rs @@ -24,7 +24,7 @@ use once_cell::sync::Lazy; extern crate libz_sys as zlib; mod global; -mod ops; +pub mod ops; mod polyfill; pub use deno_package_json::PackageJson; diff --git a/ext/node/ops/blocklist.rs b/ext/node/ops/blocklist.rs index 332cdda8f..6c64d68ec 100644 --- a/ext/node/ops/blocklist.rs +++ b/ext/node/ops/blocklist.rs @@ -7,9 +7,6 @@ use std::net::Ipv4Addr; use std::net::Ipv6Addr; use std::net::SocketAddr; -use deno_core::anyhow::anyhow; -use deno_core::anyhow::bail; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; @@ -27,13 +24,25 @@ impl deno_core::GarbageCollected for BlockListResource {} #[derive(Serialize)] struct SocketAddressSerialization(String, String); +#[derive(Debug, thiserror::Error)] +pub enum BlocklistError { + #[error("{0}")] + AddrParse(#[from] std::net::AddrParseError), + #[error("{0}")] + IpNetwork(#[from] ipnetwork::IpNetworkError), + #[error("Invalid address")] + InvalidAddress, + #[error("IP version mismatch between start and end addresses")] + IpVersionMismatch, +} + #[op2(fast)] pub fn op_socket_address_parse( state: &mut OpState, #[string] addr: &str, #[smi] port: u16, #[string] family: &str, -) -> Result { +) -> Result { let ip = addr.parse::()?; let parsed: SocketAddr = SocketAddr::new(ip, port); let parsed_ip_str = parsed.ip().to_string(); @@ -52,7 +61,7 @@ pub fn op_socket_address_parse( Ok(false) } } else { - Err(anyhow!("Invalid address")) + Err(BlocklistError::InvalidAddress) } } @@ -60,8 +69,8 @@ pub fn op_socket_address_parse( #[serde] pub fn op_socket_address_get_serialization( state: &mut OpState, -) -> Result { - Ok(state.take::()) +) -> SocketAddressSerialization { + state.take::() } #[op2] @@ -77,7 +86,7 @@ pub fn op_blocklist_new() -> BlockListResource { pub fn op_blocklist_add_address( #[cppgc] wrap: &BlockListResource, #[string] addr: &str, -) -> Result<(), AnyError> { +) -> Result<(), BlocklistError> { wrap.blocklist.borrow_mut().add_address(addr) } @@ -86,7 +95,7 @@ pub fn op_blocklist_add_range( #[cppgc] wrap: &BlockListResource, #[string] start: &str, #[string] end: &str, -) -> Result { +) -> Result { wrap.blocklist.borrow_mut().add_range(start, end) } @@ -95,7 +104,7 @@ pub fn op_blocklist_add_subnet( #[cppgc] wrap: &BlockListResource, #[string] addr: &str, #[smi] prefix: u8, -) -> Result<(), AnyError> { +) -> Result<(), BlocklistError> { wrap.blocklist.borrow_mut().add_subnet(addr, prefix) } @@ -104,7 +113,7 @@ pub fn op_blocklist_check( #[cppgc] wrap: &BlockListResource, #[string] addr: &str, #[string] r#type: &str, -) -> Result { +) -> Result { wrap.blocklist.borrow().check(addr, r#type) } @@ -123,7 +132,7 @@ impl BlockList { &mut self, addr: IpAddr, prefix: Option, - ) -> Result<(), AnyError> { + ) -> Result<(), BlocklistError> { match addr { IpAddr::V4(addr) => { let ipv4_prefix = prefix.unwrap_or(32); @@ -154,7 +163,7 @@ impl BlockList { Ok(()) } - pub fn add_address(&mut self, address: &str) -> Result<(), AnyError> { + pub fn add_address(&mut self, address: &str) -> Result<(), BlocklistError> { let ip: IpAddr = address.parse()?; self.map_addr_add_network(ip, None)?; Ok(()) @@ -164,7 +173,7 @@ impl BlockList { &mut self, start: &str, end: &str, - ) -> Result { + ) -> Result { let start_ip: IpAddr = start.parse()?; let end_ip: IpAddr = end.parse()?; @@ -193,25 +202,33 @@ impl BlockList { self.map_addr_add_network(IpAddr::V6(addr), None)?; } } - _ => bail!("IP version mismatch between start and end addresses"), + _ => return Err(BlocklistError::IpVersionMismatch), } Ok(true) } - pub fn add_subnet(&mut self, addr: &str, prefix: u8) -> Result<(), AnyError> { + pub fn add_subnet( + &mut self, + addr: &str, + prefix: u8, + ) -> Result<(), BlocklistError> { let ip: IpAddr = addr.parse()?; self.map_addr_add_network(ip, Some(prefix))?; Ok(()) } - pub fn check(&self, addr: &str, r#type: &str) -> Result { + pub fn check( + &self, + addr: &str, + r#type: &str, + ) -> Result { let addr: IpAddr = addr.parse()?; let family = r#type.to_lowercase(); if family == "ipv4" && addr.is_ipv4() || family == "ipv6" && addr.is_ipv6() { Ok(self.rules.iter().any(|net| net.contains(addr))) } else { - Err(anyhow!("Invalid address")) + Err(BlocklistError::InvalidAddress) } } } diff --git a/ext/node/ops/fs.rs b/ext/node/ops/fs.rs index 6253f32d0..6645792e7 100644 --- a/ext/node/ops/fs.rs +++ b/ext/node/ops/fs.rs @@ -3,7 +3,6 @@ use std::cell::RefCell; use std::rc::Rc; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_fs::FileSystemRc; @@ -11,11 +10,27 @@ use serde::Serialize; use crate::NodePermissions; +#[derive(Debug, thiserror::Error)] +pub enum FsError { + #[error(transparent)] + Permission(deno_core::error::AnyError), + #[error("{0}")] + Io(#[from] std::io::Error), + #[cfg(windows)] + #[error("Path has no root.")] + PathHasNoRoot, + #[cfg(not(any(unix, windows)))] + #[error("Unsupported platform.")] + UnsupportedPlatform, + #[error(transparent)] + Fs(#[from] deno_io::fs::FsError), +} + #[op2(fast)] pub fn op_node_fs_exists_sync

( state: &mut OpState, #[string] path: String, -) -> Result +) -> Result where P: NodePermissions + 'static, { @@ -30,7 +45,7 @@ where pub async fn op_node_fs_exists

( state: Rc>, #[string] path: String, -) -> Result +) -> Result where P: NodePermissions + 'static, { @@ -38,7 +53,8 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_read_with_api_name(&path, Some("node:fs.exists()"))?; + .check_read_with_api_name(&path, Some("node:fs.exists()")) + .map_err(FsError::Permission)?; (state.borrow::().clone(), path) }; @@ -50,16 +66,18 @@ pub fn op_node_cp_sync

( state: &mut OpState, #[string] path: &str, #[string] new_path: &str, -) -> Result<(), AnyError> +) -> Result<(), FsError> where P: NodePermissions + 'static, { let path = state .borrow_mut::

() - .check_read_with_api_name(path, Some("node:fs.cpSync"))?; + .check_read_with_api_name(path, Some("node:fs.cpSync")) + .map_err(FsError::Permission)?; let new_path = state .borrow_mut::

() - .check_write_with_api_name(new_path, Some("node:fs.cpSync"))?; + .check_write_with_api_name(new_path, Some("node:fs.cpSync")) + .map_err(FsError::Permission)?; let fs = state.borrow::(); fs.cp_sync(&path, &new_path)?; @@ -71,7 +89,7 @@ pub async fn op_node_cp

( state: Rc>, #[string] path: String, #[string] new_path: String, -) -> Result<(), AnyError> +) -> Result<(), FsError> where P: NodePermissions + 'static, { @@ -79,10 +97,12 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_read_with_api_name(&path, Some("node:fs.cpSync"))?; + .check_read_with_api_name(&path, Some("node:fs.cpSync")) + .map_err(FsError::Permission)?; let new_path = state .borrow_mut::

() - .check_write_with_api_name(&new_path, Some("node:fs.cpSync"))?; + .check_write_with_api_name(&new_path, Some("node:fs.cpSync")) + .map_err(FsError::Permission)?; (state.borrow::().clone(), path, new_path) }; @@ -108,7 +128,7 @@ pub fn op_node_statfs

( state: Rc>, #[string] path: String, bigint: bool, -) -> Result +) -> Result where P: NodePermissions + 'static, { @@ -116,10 +136,12 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_read_with_api_name(&path, Some("node:fs.statfs"))?; + .check_read_with_api_name(&path, Some("node:fs.statfs")) + .map_err(FsError::Permission)?; state .borrow_mut::

() - .check_sys("statfs", "node:fs.statfs")?; + .check_sys("statfs", "node:fs.statfs") + .map_err(FsError::Permission)?; path }; #[cfg(unix)] @@ -176,7 +198,6 @@ where } #[cfg(windows)] { - use deno_core::anyhow::anyhow; use std::ffi::OsStr; use std::os::windows::ffi::OsStrExt; use windows_sys::Win32::Storage::FileSystem::GetDiskFreeSpaceW; @@ -186,10 +207,7 @@ where // call below. #[allow(clippy::disallowed_methods)] let path = path.canonicalize()?; - let root = path - .ancestors() - .last() - .ok_or(anyhow!("Path has no root."))?; + let root = path.ancestors().last().ok_or(FsError::PathHasNoRoot)?; let mut root = OsStr::new(root).encode_wide().collect::>(); root.push(0); let mut sectors_per_cluster = 0; @@ -229,7 +247,7 @@ where { let _ = path; let _ = bigint; - Err(anyhow!("Unsupported platform.")) + Err(FsError::UnsupportedPlatform) } } @@ -241,13 +259,14 @@ pub fn op_node_lutimes_sync

( #[smi] atime_nanos: u32, #[number] mtime_secs: i64, #[smi] mtime_nanos: u32, -) -> Result<(), AnyError> +) -> Result<(), FsError> where P: NodePermissions + 'static, { let path = state .borrow_mut::

() - .check_write_with_api_name(path, Some("node:fs.lutimes"))?; + .check_write_with_api_name(path, Some("node:fs.lutimes")) + .map_err(FsError::Permission)?; let fs = state.borrow::(); fs.lutime_sync(&path, atime_secs, atime_nanos, mtime_secs, mtime_nanos)?; @@ -262,7 +281,7 @@ pub async fn op_node_lutimes

( #[smi] atime_nanos: u32, #[number] mtime_secs: i64, #[smi] mtime_nanos: u32, -) -> Result<(), AnyError> +) -> Result<(), FsError> where P: NodePermissions + 'static, { @@ -270,7 +289,8 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_write_with_api_name(&path, Some("node:fs.lutimesSync"))?; + .check_write_with_api_name(&path, Some("node:fs.lutimesSync")) + .map_err(FsError::Permission)?; (state.borrow::().clone(), path) }; @@ -286,13 +306,14 @@ pub fn op_node_lchown_sync

( #[string] path: String, uid: Option, gid: Option, -) -> Result<(), AnyError> +) -> Result<(), FsError> where P: NodePermissions + 'static, { let path = state .borrow_mut::

() - .check_write_with_api_name(&path, Some("node:fs.lchownSync"))?; + .check_write_with_api_name(&path, Some("node:fs.lchownSync")) + .map_err(FsError::Permission)?; let fs = state.borrow::(); fs.lchown_sync(&path, uid, gid)?; Ok(()) @@ -304,7 +325,7 @@ pub async fn op_node_lchown

( #[string] path: String, uid: Option, gid: Option, -) -> Result<(), AnyError> +) -> Result<(), FsError> where P: NodePermissions + 'static, { @@ -312,7 +333,8 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_write_with_api_name(&path, Some("node:fs.lchown"))?; + .check_write_with_api_name(&path, Some("node:fs.lchown")) + .map_err(FsError::Permission)?; (state.borrow::().clone(), path) }; fs.lchown_async(path, uid, gid).await?; diff --git a/ext/node/ops/http2.rs b/ext/node/ops/http2.rs index 705a8ecdc..53dada9f4 100644 --- a/ext/node/ops/http2.rs +++ b/ext/node/ops/http2.rs @@ -7,7 +7,6 @@ use std::rc::Rc; use std::task::Poll; use bytes::Bytes; -use deno_core::error::AnyError; use deno_core::futures::future::poll_fn; use deno_core::op2; use deno_core::serde::Serialize; @@ -110,17 +109,28 @@ impl Resource for Http2ServerSendResponse { } } +#[derive(Debug, thiserror::Error)] +pub enum Http2Error { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error(transparent)] + UrlParse(#[from] url::ParseError), + #[error(transparent)] + H2(#[from] h2::Error), +} + #[op2(async)] #[serde] pub async fn op_http2_connect( state: Rc>, #[smi] rid: ResourceId, #[string] url: String, -) -> Result<(ResourceId, ResourceId), AnyError> { +) -> Result<(ResourceId, ResourceId), Http2Error> { // No permission check necessary because we're using an existing connection let network_stream = { let mut state = state.borrow_mut(); - take_network_stream_resource(&mut state.resource_table, rid)? + take_network_stream_resource(&mut state.resource_table, rid) + .map_err(Http2Error::Resource)? }; let url = Url::parse(&url)?; @@ -144,9 +154,10 @@ pub async fn op_http2_connect( pub async fn op_http2_listen( state: Rc>, #[smi] rid: ResourceId, -) -> Result { +) -> Result { let stream = - take_network_stream_resource(&mut state.borrow_mut().resource_table, rid)?; + take_network_stream_resource(&mut state.borrow_mut().resource_table, rid) + .map_err(Http2Error::Resource)?; let conn = h2::server::Builder::new().handshake(stream).await?; Ok( @@ -166,12 +177,13 @@ pub async fn op_http2_accept( #[smi] rid: ResourceId, ) -> Result< Option<(Vec<(ByteString, ByteString)>, ResourceId, ResourceId)>, - AnyError, + Http2Error, > { let resource = state .borrow() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(Http2Error::Resource)?; let mut conn = RcRef::map(&resource, |r| &r.conn).borrow_mut().await; if let Some(res) = conn.accept().await { let (req, resp) = res?; @@ -233,11 +245,12 @@ pub async fn op_http2_send_response( #[smi] rid: ResourceId, #[smi] status: u16, #[serde] headers: Vec<(ByteString, ByteString)>, -) -> Result<(ResourceId, u32), AnyError> { +) -> Result<(ResourceId, u32), Http2Error> { let resource = state .borrow() .resource_table - .get::(rid)?; + .get::(rid) + .map_err(Http2Error::Resource)?; let mut send_response = RcRef::map(resource, |r| &r.send_response) .borrow_mut() .await; @@ -262,8 +275,12 @@ pub async fn op_http2_send_response( pub async fn op_http2_poll_client_connection( state: Rc>, #[smi] rid: ResourceId, -) -> Result<(), AnyError> { - let resource = state.borrow().resource_table.get::(rid)?; +) -> Result<(), Http2Error> { + let resource = state + .borrow() + .resource_table + .get::(rid) + .map_err(Http2Error::Resource)?; let cancel_handle = RcRef::map(resource.clone(), |this| &this.cancel_handle); let mut conn = RcRef::map(resource, |this| &this.conn).borrow_mut().await; @@ -289,11 +306,12 @@ pub async fn op_http2_client_request( // 4 strings of keys? #[serde] mut pseudo_headers: HashMap, #[serde] headers: Vec<(ByteString, ByteString)>, -) -> Result<(ResourceId, u32), AnyError> { +) -> Result<(ResourceId, u32), Http2Error> { let resource = state .borrow() .resource_table - .get::(client_rid)?; + .get::(client_rid) + .map_err(Http2Error::Resource)?; let url = resource.url.clone(); @@ -326,7 +344,10 @@ pub async fn op_http2_client_request( let resource = { let state = state.borrow(); - state.resource_table.get::(client_rid)? + state + .resource_table + .get::(client_rid) + .map_err(Http2Error::Resource)? }; let mut client = RcRef::map(&resource, |r| &r.client).borrow_mut().await; poll_fn(|cx| client.poll_ready(cx)).await?; @@ -345,11 +366,12 @@ pub async fn op_http2_client_send_data( #[smi] stream_rid: ResourceId, #[buffer] data: JsBuffer, end_of_stream: bool, -) -> Result<(), AnyError> { +) -> Result<(), Http2Error> { let resource = state .borrow() .resource_table - .get::(stream_rid)?; + .get::(stream_rid) + .map_err(Http2Error::Resource)?; let mut stream = RcRef::map(&resource, |r| &r.stream).borrow_mut().await; stream.send_data(data.to_vec().into(), end_of_stream)?; @@ -361,7 +383,7 @@ pub async fn op_http2_client_reset_stream( state: Rc>, #[smi] stream_rid: ResourceId, #[smi] code: u32, -) -> Result<(), AnyError> { +) -> Result<(), deno_core::error::AnyError> { let resource = state .borrow() .resource_table @@ -376,11 +398,12 @@ pub async fn op_http2_client_send_trailers( state: Rc>, #[smi] stream_rid: ResourceId, #[serde] trailers: Vec<(ByteString, ByteString)>, -) -> Result<(), AnyError> { +) -> Result<(), Http2Error> { let resource = state .borrow() .resource_table - .get::(stream_rid)?; + .get::(stream_rid) + .map_err(Http2Error::Resource)?; let mut stream = RcRef::map(&resource, |r| &r.stream).borrow_mut().await; let mut trailers_map = http::HeaderMap::new(); @@ -408,11 +431,12 @@ pub struct Http2ClientResponse { pub async fn op_http2_client_get_response( state: Rc>, #[smi] stream_rid: ResourceId, -) -> Result<(Http2ClientResponse, bool), AnyError> { +) -> Result<(Http2ClientResponse, bool), Http2Error> { let resource = state .borrow() .resource_table - .get::(stream_rid)?; + .get::(stream_rid) + .map_err(Http2Error::Resource)?; let mut response_future = RcRef::map(&resource, |r| &r.response).borrow_mut().await; @@ -478,11 +502,12 @@ fn poll_data_or_trailers( pub async fn op_http2_client_get_response_body_chunk( state: Rc>, #[smi] body_rid: ResourceId, -) -> Result<(Option>, bool, bool), AnyError> { +) -> Result<(Option>, bool, bool), Http2Error> { let resource = state .borrow() .resource_table - .get::(body_rid)?; + .get::(body_rid) + .map_err(Http2Error::Resource)?; let mut body = RcRef::map(&resource, |r| &r.body).borrow_mut().await; loop { @@ -525,7 +550,7 @@ pub async fn op_http2_client_get_response_body_chunk( pub async fn op_http2_client_get_response_trailers( state: Rc>, #[smi] body_rid: ResourceId, -) -> Result>, AnyError> { +) -> Result>, deno_core::error::AnyError> { let resource = state .borrow() .resource_table diff --git a/ext/node/ops/idna.rs b/ext/node/ops/idna.rs index 9c9450c70..a3d85e77c 100644 --- a/ext/node/ops/idna.rs +++ b/ext/node/ops/idna.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::anyhow::Error; -use deno_core::error::range_error; use deno_core::op2; use std::borrow::Cow; @@ -11,19 +9,21 @@ use std::borrow::Cow; const PUNY_PREFIX: &str = "xn--"; -fn invalid_input_err() -> Error { - range_error("Invalid input") -} - -fn not_basic_err() -> Error { - range_error("Illegal input >= 0x80 (not a basic code point)") +#[derive(Debug, thiserror::Error)] +pub enum IdnaError { + #[error("Invalid input")] + InvalidInput, + #[error("Input would take more than 63 characters to encode")] + InputTooLong, + #[error("Illegal input >= 0x80 (not a basic code point)")] + IllegalInput, } /// map a domain by mapping each label with the given function -fn map_domain( +fn map_domain( domain: &str, - f: impl Fn(&str) -> Result, E>, -) -> Result { + f: impl Fn(&str) -> Result, IdnaError>, +) -> Result { let mut result = String::with_capacity(domain.len()); let mut domain = domain; @@ -48,7 +48,7 @@ fn map_domain( /// Maps a unicode domain to ascii by punycode encoding each label /// /// Note this is not IDNA2003 or IDNA2008 compliant, rather it matches node.js's punycode implementation -fn to_ascii(input: &str) -> Result { +fn to_ascii(input: &str) -> Result { if input.is_ascii() { return Ok(input.into()); } @@ -61,9 +61,7 @@ fn to_ascii(input: &str) -> Result { } else { idna::punycode::encode_str(label) .map(|encoded| [PUNY_PREFIX, &encoded].join("").into()) // add the prefix - .ok_or_else(|| { - Error::msg("Input would take more than 63 characters to encode") // only error possible per the docs - }) + .ok_or(IdnaError::InputTooLong) // only error possible per the docs } })?; @@ -74,13 +72,13 @@ fn to_ascii(input: &str) -> Result { /// Maps an ascii domain to unicode by punycode decoding each label /// /// Note this is not IDNA2003 or IDNA2008 compliant, rather it matches node.js's punycode implementation -fn to_unicode(input: &str) -> Result { +fn to_unicode(input: &str) -> Result { map_domain(input, |s| { if let Some(puny) = s.strip_prefix(PUNY_PREFIX) { // it's a punycode encoded label Ok( idna::punycode::decode_to_string(&puny.to_lowercase()) - .ok_or_else(invalid_input_err)? + .ok_or(IdnaError::InvalidInput)? .into(), ) } else { @@ -95,7 +93,7 @@ fn to_unicode(input: &str) -> Result { #[string] pub fn op_node_idna_punycode_to_ascii( #[string] domain: String, -) -> Result { +) -> Result { to_ascii(&domain) } @@ -105,7 +103,7 @@ pub fn op_node_idna_punycode_to_ascii( #[string] pub fn op_node_idna_punycode_to_unicode( #[string] domain: String, -) -> Result { +) -> Result { to_unicode(&domain) } @@ -115,8 +113,8 @@ pub fn op_node_idna_punycode_to_unicode( #[string] pub fn op_node_idna_domain_to_ascii( #[string] domain: String, -) -> Result { - idna::domain_to_ascii(&domain).map_err(|e| e.into()) +) -> Result { + idna::domain_to_ascii(&domain) } /// Converts a domain to Unicode as per the IDNA spec @@ -131,7 +129,7 @@ pub fn op_node_idna_domain_to_unicode(#[string] domain: String) -> String { #[string] pub fn op_node_idna_punycode_decode( #[string] domain: String, -) -> Result { +) -> Result { if domain.is_empty() { return Ok(domain); } @@ -147,11 +145,10 @@ pub fn op_node_idna_punycode_decode( .unwrap_or(domain.len() - 1); if !domain[..last_dash].is_ascii() { - return Err(not_basic_err()); + return Err(IdnaError::IllegalInput); } - idna::punycode::decode_to_string(&domain) - .ok_or_else(|| deno_core::error::range_error("Invalid input")) + idna::punycode::decode_to_string(&domain).ok_or(IdnaError::InvalidInput) } #[op2] diff --git a/ext/node/ops/ipc.rs b/ext/node/ops/ipc.rs index c5e0f875d..672cf0d70 100644 --- a/ext/node/ops/ipc.rs +++ b/ext/node/ops/ipc.rs @@ -17,8 +17,6 @@ mod impl_ { use std::task::Context; use std::task::Poll; - use deno_core::error::bad_resource_id; - use deno_core::error::AnyError; use deno_core::op2; use deno_core::serde; use deno_core::serde::Serializer; @@ -167,7 +165,7 @@ mod impl_ { #[smi] pub fn op_node_child_ipc_pipe( state: &mut OpState, - ) -> Result, AnyError> { + ) -> Result, io::Error> { let fd = match state.try_borrow_mut::() { Some(child_pipe_fd) => child_pipe_fd.0, None => return Ok(None), @@ -180,6 +178,18 @@ mod impl_ { )) } + #[derive(Debug, thiserror::Error)] + pub enum IpcError { + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error(transparent)] + IpcJsonStream(#[from] IpcJsonStreamError), + #[error(transparent)] + Canceled(#[from] deno_core::Canceled), + #[error("failed to serialize json value: {0}")] + SerdeJson(serde_json::Error), + } + #[op2(async)] pub fn op_node_ipc_write<'a>( scope: &mut v8::HandleScope<'a>, @@ -192,27 +202,23 @@ mod impl_ { // ideally we would just return `Result<(impl Future, bool), ..>`, but that's not // supported by `op2` currently. queue_ok: v8::Local<'a, v8::Array>, - ) -> Result>, AnyError> { + ) -> Result>, IpcError> { let mut serialized = Vec::with_capacity(64); let mut ser = serde_json::Serializer::new(&mut serialized); - serialize_v8_value(scope, value, &mut ser).map_err(|e| { - deno_core::error::type_error(format!( - "failed to serialize json value: {e}" - )) - })?; + serialize_v8_value(scope, value, &mut ser).map_err(IpcError::SerdeJson)?; serialized.push(b'\n'); let stream = state .borrow() .resource_table .get::(rid) - .map_err(|_| bad_resource_id())?; + .map_err(IpcError::Resource)?; let old = stream .queued_bytes .fetch_add(serialized.len(), std::sync::atomic::Ordering::Relaxed); if old + serialized.len() > 2 * INITIAL_CAPACITY { // sending messages too fast - let v = false.to_v8(scope)?; + let v = false.to_v8(scope).unwrap(); // Infallible queue_ok.set_index(scope, 0, v); } Ok(async move { @@ -246,12 +252,12 @@ mod impl_ { pub async fn op_node_ipc_read( state: Rc>, #[smi] rid: ResourceId, - ) -> Result { + ) -> Result { let stream = state .borrow() .resource_table .get::(rid) - .map_err(|_| bad_resource_id())?; + .map_err(IpcError::Resource)?; let cancel = stream.cancel.clone(); let mut stream = RcRef::map(stream, |r| &r.read_half).borrow_mut().await; @@ -407,7 +413,7 @@ mod impl_ { async fn write_msg_bytes( self: Rc, msg: &[u8], - ) -> Result<(), AnyError> { + ) -> Result<(), io::Error> { let mut write_half = RcRef::map(self, |r| &r.write_half).borrow_mut().await; write_half.write_all(msg).await?; @@ -462,6 +468,14 @@ mod impl_ { } } + #[derive(Debug, thiserror::Error)] + pub enum IpcJsonStreamError { + #[error("{0}")] + Io(#[source] std::io::Error), + #[error("{0}")] + SimdJson(#[source] simd_json::Error), + } + // JSON serialization stream over IPC pipe. // // `\n` is used as a delimiter between messages. @@ -482,7 +496,7 @@ mod impl_ { async fn read_msg( &mut self, - ) -> Result, AnyError> { + ) -> Result, IpcJsonStreamError> { let mut json = None; let nread = read_msg_inner( &mut self.pipe, @@ -490,7 +504,8 @@ mod impl_ { &mut json, &mut self.read_buffer, ) - .await?; + .await + .map_err(IpcJsonStreamError::Io)?; if nread == 0 { // EOF. return Ok(None); @@ -500,7 +515,8 @@ mod impl_ { Some(v) => v, None => { // Took more than a single read and some buffering. - simd_json::from_slice(&mut self.buffer[..nread])? + simd_json::from_slice(&mut self.buffer[..nread]) + .map_err(IpcJsonStreamError::SimdJson)? } }; diff --git a/ext/node/ops/os/mod.rs b/ext/node/ops/os/mod.rs index ca91895f2..b4c9eaa8c 100644 --- a/ext/node/ops/os/mod.rs +++ b/ext/node/ops/os/mod.rs @@ -1,28 +1,38 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use crate::NodePermissions; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; mod cpus; -mod priority; +pub mod priority; + +#[derive(Debug, thiserror::Error)] +pub enum OsError { + #[error(transparent)] + Priority(priority::PriorityError), + #[error(transparent)] + Permission(deno_core::error::AnyError), + #[error("Failed to get cpu info")] + FailedToGetCpuInfo, +} #[op2(fast)] pub fn op_node_os_get_priority

( state: &mut OpState, pid: u32, -) -> Result +) -> Result where P: NodePermissions + 'static, { { let permissions = state.borrow_mut::

(); - permissions.check_sys("getPriority", "node:os.getPriority()")?; + permissions + .check_sys("getPriority", "node:os.getPriority()") + .map_err(OsError::Permission)?; } - priority::get_priority(pid) + priority::get_priority(pid).map_err(OsError::Priority) } #[op2(fast)] @@ -30,21 +40,25 @@ pub fn op_node_os_set_priority

( state: &mut OpState, pid: u32, priority: i32, -) -> Result<(), AnyError> +) -> Result<(), OsError> where P: NodePermissions + 'static, { { let permissions = state.borrow_mut::

(); - permissions.check_sys("setPriority", "node:os.setPriority()")?; + permissions + .check_sys("setPriority", "node:os.setPriority()") + .map_err(OsError::Permission)?; } - priority::set_priority(pid, priority) + priority::set_priority(pid, priority).map_err(OsError::Priority) } #[op2] #[string] -pub fn op_node_os_username

(state: &mut OpState) -> Result +pub fn op_node_os_username

( + state: &mut OpState, +) -> Result where P: NodePermissions + 'static, { @@ -57,7 +71,9 @@ where } #[op2(fast)] -pub fn op_geteuid

(state: &mut OpState) -> Result +pub fn op_geteuid

( + state: &mut OpState, +) -> Result where P: NodePermissions + 'static, { @@ -76,7 +92,9 @@ where } #[op2(fast)] -pub fn op_getegid

(state: &mut OpState) -> Result +pub fn op_getegid

( + state: &mut OpState, +) -> Result where P: NodePermissions + 'static, { @@ -96,21 +114,25 @@ where #[op2] #[serde] -pub fn op_cpus

(state: &mut OpState) -> Result, AnyError> +pub fn op_cpus

(state: &mut OpState) -> Result, OsError> where P: NodePermissions + 'static, { { let permissions = state.borrow_mut::

(); - permissions.check_sys("cpus", "node:os.cpus()")?; + permissions + .check_sys("cpus", "node:os.cpus()") + .map_err(OsError::Permission)?; } - cpus::cpu_info().ok_or_else(|| type_error("Failed to get cpu info")) + cpus::cpu_info().ok_or(OsError::FailedToGetCpuInfo) } #[op2] #[string] -pub fn op_homedir

(state: &mut OpState) -> Result, AnyError> +pub fn op_homedir

( + state: &mut OpState, +) -> Result, deno_core::error::AnyError> where P: NodePermissions + 'static, { diff --git a/ext/node/ops/os/priority.rs b/ext/node/ops/os/priority.rs index 043928e2a..9a1ebcca7 100644 --- a/ext/node/ops/os/priority.rs +++ b/ext/node/ops/os/priority.rs @@ -1,12 +1,18 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; - pub use impl_::*; +#[derive(Debug, thiserror::Error)] +pub enum PriorityError { + #[error("{0}")] + Io(#[from] std::io::Error), + #[cfg(windows)] + #[error("Invalid priority")] + InvalidPriority, +} + #[cfg(unix)] mod impl_ { - use super::*; use errno::errno; use errno::set_errno; use errno::Errno; @@ -16,7 +22,7 @@ mod impl_ { const PRIORITY_HIGH: i32 = -14; // Ref: https://github.com/libuv/libuv/blob/55376b044b74db40772e8a6e24d67a8673998e02/src/unix/core.c#L1533-L1547 - pub fn get_priority(pid: u32) -> Result { + pub fn get_priority(pid: u32) -> Result { set_errno(Errno(0)); match ( // SAFETY: libc::getpriority is unsafe @@ -29,7 +35,10 @@ mod impl_ { } } - pub fn set_priority(pid: u32, priority: i32) -> Result<(), AnyError> { + pub fn set_priority( + pid: u32, + priority: i32, + ) -> Result<(), super::PriorityError> { // SAFETY: libc::setpriority is unsafe match unsafe { libc::setpriority(PRIO_PROCESS, pid as id_t, priority) } { -1 => Err(std::io::Error::last_os_error().into()), @@ -40,8 +49,6 @@ mod impl_ { #[cfg(windows)] mod impl_ { - use super::*; - use deno_core::error::type_error; use winapi::shared::minwindef::DWORD; use winapi::shared::minwindef::FALSE; use winapi::shared::ntdef::NULL; @@ -67,7 +74,7 @@ mod impl_ { const PRIORITY_HIGHEST: i32 = -20; // Ported from: https://github.com/libuv/libuv/blob/a877ca2435134ef86315326ef4ef0c16bdbabf17/src/win/util.c#L1649-L1685 - pub fn get_priority(pid: u32) -> Result { + pub fn get_priority(pid: u32) -> Result { // SAFETY: Windows API calls unsafe { let handle = if pid == 0 { @@ -95,7 +102,10 @@ mod impl_ { } // Ported from: https://github.com/libuv/libuv/blob/a877ca2435134ef86315326ef4ef0c16bdbabf17/src/win/util.c#L1688-L1719 - pub fn set_priority(pid: u32, priority: i32) -> Result<(), AnyError> { + pub fn set_priority( + pid: u32, + priority: i32, + ) -> Result<(), super::PriorityError> { // SAFETY: Windows API calls unsafe { let handle = if pid == 0 { @@ -109,7 +119,7 @@ mod impl_ { #[allow(clippy::manual_range_contains)] let priority_class = if priority < PRIORITY_HIGHEST || priority > PRIORITY_LOW { - return Err(type_error("Invalid priority")); + return Err(super::PriorityError::InvalidPriority); } else if priority < PRIORITY_HIGH { REALTIME_PRIORITY_CLASS } else if priority < PRIORITY_ABOVE_NORMAL { diff --git a/ext/node/ops/process.rs b/ext/node/ops/process.rs index 0992c46c6..282567226 100644 --- a/ext/node/ops/process.rs +++ b/ext/node/ops/process.rs @@ -1,6 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_permissions::PermissionsContainer; @@ -51,7 +50,7 @@ pub fn op_node_process_kill( state: &mut OpState, #[smi] pid: i32, #[smi] sig: i32, -) -> Result { +) -> Result { state .borrow_mut::() .check_run_all("process.kill")?; diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs index 7d85ee853..f4607f4e8 100644 --- a/ext/node/ops/require.rs +++ b/ext/node/ops/require.rs @@ -1,8 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::anyhow::Context; -use deno_core::error::generic_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::url::Url; use deno_core::v8; @@ -30,7 +27,7 @@ use crate::NpmResolverRc; fn ensure_read_permission<'a, P>( state: &mut OpState, file_path: &'a Path, -) -> Result, AnyError> +) -> Result, deno_core::error::AnyError> where P: NodePermissions + 'static, { @@ -39,6 +36,32 @@ where resolver.ensure_read_permission(permissions, file_path) } +#[derive(Debug, thiserror::Error)] +pub enum RequireError { + #[error(transparent)] + UrlParse(#[from] url::ParseError), + #[error(transparent)] + Permission(deno_core::error::AnyError), + #[error(transparent)] + PackageExportsResolve( + #[from] node_resolver::errors::PackageExportsResolveError, + ), + #[error(transparent)] + PackageJsonLoad(#[from] node_resolver::errors::PackageJsonLoadError), + #[error(transparent)] + ClosestPkgJson(#[from] node_resolver::errors::ClosestPkgJsonError), + #[error(transparent)] + PackageImportsResolve( + #[from] node_resolver::errors::PackageImportsResolveError, + ), + #[error("failed to convert '{0}' to file path")] + FilePathConversion(Url), + #[error(transparent)] + Fs(#[from] deno_io::fs::FsError), + #[error("Unable to get CWD: {0}")] + UnableToGetCwd(deno_io::fs::FsError), +} + #[op2] #[serde] pub fn op_require_init_paths() -> Vec { @@ -95,7 +118,7 @@ pub fn op_require_init_paths() -> Vec { pub fn op_require_node_module_paths

( state: &mut OpState, #[string] from: String, -) -> Result, AnyError> +) -> Result, RequireError> where P: NodePermissions + 'static, { @@ -104,12 +127,12 @@ where let from = if from.starts_with("file:///") { url_to_file_path(&Url::parse(&from)?)? } else { - let current_dir = - &(fs.cwd().map_err(AnyError::from)).context("Unable to get CWD")?; - deno_path_util::normalize_path(current_dir.join(from)) + let current_dir = &fs.cwd().map_err(RequireError::UnableToGetCwd)?; + normalize_path(current_dir.join(from)) }; - let from = ensure_read_permission::

(state, &from)?; + let from = ensure_read_permission::

(state, &from) + .map_err(RequireError::Permission)?; if cfg!(windows) { // return root node_modules when path is 'D:\\'. @@ -264,7 +287,7 @@ pub fn op_require_path_is_absolute(#[string] p: String) -> bool { pub fn op_require_stat

( state: &mut OpState, #[string] path: String, -) -> Result +) -> Result where P: NodePermissions + 'static, { @@ -287,12 +310,13 @@ where pub fn op_require_real_path

( state: &mut OpState, #[string] request: String, -) -> Result +) -> Result where P: NodePermissions + 'static, { let path = PathBuf::from(request); - let path = ensure_read_permission::

(state, &path)?; + let path = ensure_read_permission::

(state, &path) + .map_err(RequireError::Permission)?; let fs = state.borrow::(); let canonicalized_path = deno_path_util::strip_unc_prefix(fs.realpath_sync(&path)?); @@ -319,12 +343,14 @@ pub fn op_require_path_resolve(#[serde] parts: Vec) -> String { #[string] pub fn op_require_path_dirname( #[string] request: String, -) -> Result { +) -> Result { let p = PathBuf::from(request); if let Some(parent) = p.parent() { Ok(parent.to_string_lossy().into_owned()) } else { - Err(generic_error("Path doesn't have a parent")) + Err(deno_core::error::generic_error( + "Path doesn't have a parent", + )) } } @@ -332,12 +358,14 @@ pub fn op_require_path_dirname( #[string] pub fn op_require_path_basename( #[string] request: String, -) -> Result { +) -> Result { let p = PathBuf::from(request); if let Some(path) = p.file_name() { Ok(path.to_string_lossy().into_owned()) } else { - Err(generic_error("Path doesn't have a file name")) + Err(deno_core::error::generic_error( + "Path doesn't have a file name", + )) } } @@ -348,7 +376,7 @@ pub fn op_require_try_self_parent_path

( has_parent: bool, #[string] maybe_parent_filename: Option, #[string] maybe_parent_id: Option, -) -> Result, AnyError> +) -> Result, deno_core::error::AnyError> where P: NodePermissions + 'static, { @@ -378,7 +406,7 @@ pub fn op_require_try_self

( state: &mut OpState, #[string] parent_path: Option, #[string] request: String, -) -> Result, AnyError> +) -> Result, RequireError> where P: NodePermissions + 'static, { @@ -440,12 +468,13 @@ where pub fn op_require_read_file

( state: &mut OpState, #[string] file_path: String, -) -> Result +) -> Result where P: NodePermissions + 'static, { let file_path = PathBuf::from(file_path); - let file_path = ensure_read_permission::

(state, &file_path)?; + let file_path = ensure_read_permission::

(state, &file_path) + .map_err(RequireError::Permission)?; let fs = state.borrow::(); Ok(fs.read_text_file_lossy_sync(&file_path, None)?) } @@ -472,7 +501,7 @@ pub fn op_require_resolve_exports

( #[string] name: String, #[string] expansion: String, #[string] parent_path: String, -) -> Result, AnyError> +) -> Result, RequireError> where P: NodePermissions + 'static, { @@ -525,16 +554,14 @@ where pub fn op_require_read_closest_package_json

( state: &mut OpState, #[string] filename: String, -) -> Result, AnyError> +) -> Result, node_resolver::errors::ClosestPkgJsonError> where P: NodePermissions + 'static, { let filename = PathBuf::from(filename); // permissions: allow reading the closest package.json files let node_resolver = state.borrow::().clone(); - node_resolver - .get_closest_package_json_from_path(&filename) - .map_err(AnyError::from) + node_resolver.get_closest_package_json_from_path(&filename) } #[op2] @@ -564,12 +591,13 @@ pub fn op_require_package_imports_resolve

( state: &mut OpState, #[string] referrer_filename: String, #[string] request: String, -) -> Result, AnyError> +) -> Result, RequireError> where P: NodePermissions + 'static, { let referrer_path = PathBuf::from(&referrer_filename); - let referrer_path = ensure_read_permission::

(state, &referrer_path)?; + let referrer_path = ensure_read_permission::

(state, &referrer_path) + .map_err(RequireError::Permission)?; let node_resolver = state.borrow::(); let Some(pkg) = node_resolver.get_closest_package_json_from_path(&referrer_path)? @@ -578,8 +606,7 @@ where }; if pkg.imports.is_some() { - let referrer_url = - deno_core::url::Url::from_file_path(&referrer_filename).unwrap(); + let referrer_url = Url::from_file_path(&referrer_filename).unwrap(); let url = node_resolver.package_imports_resolve( &request, Some(&referrer_url), @@ -604,17 +631,15 @@ pub fn op_require_break_on_next_statement(state: Rc>) { inspector.wait_for_session_and_break_on_next_statement() } -fn url_to_file_path_string(url: &Url) -> Result { +fn url_to_file_path_string(url: &Url) -> Result { let file_path = url_to_file_path(url)?; Ok(file_path.to_string_lossy().into_owned()) } -fn url_to_file_path(url: &Url) -> Result { +fn url_to_file_path(url: &Url) -> Result { match url.to_file_path() { Ok(file_path) => Ok(file_path), - Err(()) => { - deno_core::anyhow::bail!("failed to convert '{}' to file path", url) - } + Err(()) => Err(RequireError::FilePathConversion(url.clone())), } } diff --git a/ext/node/ops/util.rs b/ext/node/ops/util.rs index 533d51c92..1c177ac04 100644 --- a/ext/node/ops/util.rs +++ b/ext/node/ops/util.rs @@ -1,6 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; use deno_core::op2; use deno_core::OpState; use deno_core::ResourceHandle; @@ -22,7 +21,7 @@ enum HandleType { pub fn op_node_guess_handle_type( state: &mut OpState, rid: u32, -) -> Result { +) -> Result { let handle = state.resource_table.get_handle(rid)?; let handle_type = match handle { diff --git a/ext/node/ops/v8.rs b/ext/node/ops/v8.rs index 8813d2e18..61f67f11f 100644 --- a/ext/node/ops/v8.rs +++ b/ext/node/ops/v8.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::generic_error; -use deno_core::error::type_error; -use deno_core::error::AnyError; + use deno_core::op2; use deno_core::v8; use deno_core::FastString; @@ -206,10 +204,9 @@ pub fn op_v8_write_value( scope: &mut v8::HandleScope, #[cppgc] ser: &Serializer, value: v8::Local, -) -> Result<(), AnyError> { +) { let context = scope.get_current_context(); ser.inner.write_value(context, value); - Ok(()) } struct DeserBuffer { @@ -271,11 +268,13 @@ pub fn op_v8_new_deserializer( scope: &mut v8::HandleScope, obj: v8::Local, buffer: v8::Local, -) -> Result, AnyError> { +) -> Result, deno_core::error::AnyError> { let offset = buffer.byte_offset(); let len = buffer.byte_length(); let backing_store = buffer.get_backing_store().ok_or_else(|| { - generic_error("deserialization buffer has no backing store") + deno_core::error::generic_error( + "deserialization buffer has no backing store", + ) })?; let (buf_slice, buf_ptr) = if let Some(data) = backing_store.data() { // SAFETY: the offset is valid for the underlying buffer because we're getting it directly from v8 @@ -317,10 +316,10 @@ pub fn op_v8_transfer_array_buffer_de( #[op2(fast)] pub fn op_v8_read_double( #[cppgc] deser: &Deserializer, -) -> Result { +) -> Result { let mut double = 0f64; if !deser.inner.read_double(&mut double) { - return Err(type_error("ReadDouble() failed")); + return Err(deno_core::error::type_error("ReadDouble() failed")); } Ok(double) } @@ -355,10 +354,10 @@ pub fn op_v8_read_raw_bytes( #[op2(fast)] pub fn op_v8_read_uint32( #[cppgc] deser: &Deserializer, -) -> Result { +) -> Result { let mut value = 0; if !deser.inner.read_uint32(&mut value) { - return Err(type_error("ReadUint32() failed")); + return Err(deno_core::error::type_error("ReadUint32() failed")); } Ok(value) @@ -368,10 +367,10 @@ pub fn op_v8_read_uint32( #[serde] pub fn op_v8_read_uint64( #[cppgc] deser: &Deserializer, -) -> Result<(u32, u32), AnyError> { +) -> Result<(u32, u32), deno_core::error::AnyError> { let mut val = 0; if !deser.inner.read_uint64(&mut val) { - return Err(type_error("ReadUint64() failed")); + return Err(deno_core::error::type_error("ReadUint64() failed")); } Ok(((val >> 32) as u32, val as u32)) diff --git a/ext/node/ops/worker_threads.rs b/ext/node/ops/worker_threads.rs index 5f139c5dc..e33cf9157 100644 --- a/ext/node/ops/worker_threads.rs +++ b/ext/node/ops/worker_threads.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::generic_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::url::Url; use deno_core::OpState; @@ -19,7 +17,7 @@ use crate::NodeResolverRc; fn ensure_read_permission<'a, P>( state: &mut OpState, file_path: &'a Path, -) -> Result, AnyError> +) -> Result, deno_core::error::AnyError> where P: NodePermissions + 'static, { @@ -28,12 +26,36 @@ where resolver.ensure_read_permission(permissions, file_path) } +#[derive(Debug, thiserror::Error)] +pub enum WorkerThreadsFilenameError { + #[error(transparent)] + Permission(deno_core::error::AnyError), + #[error("{0}")] + UrlParse(#[from] url::ParseError), + #[error("Relative path entries must start with '.' or '..'")] + InvalidRelativeUrl, + #[error("URL from Path-String")] + UrlFromPathString, + #[error("URL to Path-String")] + UrlToPathString, + #[error("URL to Path")] + UrlToPath, + #[error("File not found [{0:?}]")] + FileNotFound(PathBuf), + #[error("Neither ESM nor CJS")] + NeitherEsmNorCjs, + #[error("{0}")] + UrlToNodeResolution(node_resolver::errors::UrlToNodeResolutionError), + #[error(transparent)] + Fs(#[from] deno_io::fs::FsError), +} + #[op2] #[string] pub fn op_worker_threads_filename

( state: &mut OpState, #[string] specifier: String, -) -> Result +) -> Result where P: NodePermissions + 'static, { @@ -45,40 +67,47 @@ where } else { let path = PathBuf::from(&specifier); if path.is_relative() && !specifier.starts_with('.') { - return Err(generic_error( - "Relative path entries must start with '.' or '..'", - )); + return Err(WorkerThreadsFilenameError::InvalidRelativeUrl); } - let path = ensure_read_permission::

(state, &path)?; + let path = ensure_read_permission::

(state, &path) + .map_err(WorkerThreadsFilenameError::Permission)?; let fs = state.borrow::(); let canonicalized_path = deno_path_util::strip_unc_prefix(fs.realpath_sync(&path)?); Url::from_file_path(canonicalized_path) - .map_err(|e| generic_error(format!("URL from Path-String: {:#?}", e)))? + .map_err(|_| WorkerThreadsFilenameError::UrlFromPathString)? }; let url_path = url .to_file_path() - .map_err(|e| generic_error(format!("URL to Path-String: {:#?}", e)))?; - let url_path = ensure_read_permission::

(state, &url_path)?; + .map_err(|_| WorkerThreadsFilenameError::UrlToPathString)?; + let url_path = ensure_read_permission::

(state, &url_path) + .map_err(WorkerThreadsFilenameError::Permission)?; let fs = state.borrow::(); if !fs.exists_sync(&url_path) { - return Err(generic_error(format!("File not found [{:?}]", url_path))); + return Err(WorkerThreadsFilenameError::FileNotFound( + url_path.to_path_buf(), + )); } let node_resolver = state.borrow::(); - match node_resolver.url_to_node_resolution(url)? { + match node_resolver + .url_to_node_resolution(url) + .map_err(WorkerThreadsFilenameError::UrlToNodeResolution)? + { NodeResolution::Esm(u) => Ok(u.to_string()), NodeResolution::CommonJs(u) => wrap_cjs(u), - NodeResolution::BuiltIn(_) => Err(generic_error("Neither ESM nor CJS")), + NodeResolution::BuiltIn(_) => { + Err(WorkerThreadsFilenameError::NeitherEsmNorCjs) + } } } /// /// Wrap a CJS file-URL and the required setup in a stringified `data:`-URL /// -fn wrap_cjs(url: Url) -> Result { +fn wrap_cjs(url: Url) -> Result { let path = url .to_file_path() - .map_err(|e| generic_error(format!("URL to Path: {:#?}", e)))?; + .map_err(|_| WorkerThreadsFilenameError::UrlToPath)?; let filename = path.file_name().unwrap().to_string_lossy(); Ok(format!( "data:text/javascript,import {{ createRequire }} from \"node:module\";\ diff --git a/ext/node/ops/zlib/brotli.rs b/ext/node/ops/zlib/brotli.rs index 3e3905fc3..1a681ff7f 100644 --- a/ext/node/ops/zlib/brotli.rs +++ b/ext/node/ops/zlib/brotli.rs @@ -9,8 +9,6 @@ use brotli::BrotliDecompressStream; use brotli::BrotliResult; use brotli::BrotliState; use brotli::Decompressor; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::JsBuffer; use deno_core::OpState; @@ -19,7 +17,23 @@ use deno_core::ToJsBuffer; use std::cell::RefCell; use std::io::Read; -fn encoder_mode(mode: u32) -> Result { +#[derive(Debug, thiserror::Error)] +pub enum BrotliError { + #[error("Invalid encoder mode")] + InvalidEncoderMode, + #[error("Failed to compress")] + CompressFailed, + #[error("Failed to decompress")] + DecompressFailed, + #[error(transparent)] + Join(#[from] tokio::task::JoinError), + #[error(transparent)] + Resource(deno_core::error::AnyError), + #[error("{0}")] + Io(std::io::Error), +} + +fn encoder_mode(mode: u32) -> Result { Ok(match mode { 0 => BrotliEncoderMode::BROTLI_MODE_GENERIC, 1 => BrotliEncoderMode::BROTLI_MODE_TEXT, @@ -28,7 +42,7 @@ fn encoder_mode(mode: u32) -> Result { 4 => BrotliEncoderMode::BROTLI_FORCE_MSB_PRIOR, 5 => BrotliEncoderMode::BROTLI_FORCE_UTF8_PRIOR, 6 => BrotliEncoderMode::BROTLI_FORCE_SIGNED_PRIOR, - _ => return Err(type_error("Invalid encoder mode")), + _ => return Err(BrotliError::InvalidEncoderMode), }) } @@ -40,7 +54,7 @@ pub fn op_brotli_compress( #[smi] quality: i32, #[smi] lgwin: i32, #[smi] mode: u32, -) -> Result { +) -> Result { let mode = encoder_mode(mode)?; let mut out_size = out.len(); @@ -57,7 +71,7 @@ pub fn op_brotli_compress( &mut |_, _, _, _| (), ); if result != 1 { - return Err(type_error("Failed to compress")); + return Err(BrotliError::CompressFailed); } Ok(out_size) @@ -87,7 +101,7 @@ pub async fn op_brotli_compress_async( #[smi] quality: i32, #[smi] lgwin: i32, #[smi] mode: u32, -) -> Result { +) -> Result { let mode = encoder_mode(mode)?; tokio::task::spawn_blocking(move || { let input = &*input; @@ -107,7 +121,7 @@ pub async fn op_brotli_compress_async( &mut |_, _, _, _| (), ); if result != 1 { - return Err(type_error("Failed to compress")); + return Err(BrotliError::CompressFailed); } out.truncate(out_size); @@ -151,8 +165,11 @@ pub fn op_brotli_compress_stream( #[smi] rid: u32, #[buffer] input: &[u8], #[buffer] output: &mut [u8], -) -> Result { - let ctx = state.resource_table.get::(rid)?; +) -> Result { + let ctx = state + .resource_table + .get::(rid) + .map_err(BrotliError::Resource)?; let mut inst = ctx.inst.borrow_mut(); let mut output_offset = 0; @@ -168,7 +185,7 @@ pub fn op_brotli_compress_stream( &mut |_, _, _, _| (), ); if !result { - return Err(type_error("Failed to compress")); + return Err(BrotliError::CompressFailed); } Ok(output_offset) @@ -180,8 +197,11 @@ pub fn op_brotli_compress_stream_end( state: &mut OpState, #[smi] rid: u32, #[buffer] output: &mut [u8], -) -> Result { - let ctx = state.resource_table.get::(rid)?; +) -> Result { + let ctx = state + .resource_table + .get::(rid) + .map_err(BrotliError::Resource)?; let mut inst = ctx.inst.borrow_mut(); let mut output_offset = 0; @@ -197,13 +217,13 @@ pub fn op_brotli_compress_stream_end( &mut |_, _, _, _| (), ); if !result { - return Err(type_error("Failed to compress")); + return Err(BrotliError::CompressFailed); } Ok(output_offset) } -fn brotli_decompress(buffer: &[u8]) -> Result { +fn brotli_decompress(buffer: &[u8]) -> Result { let mut output = Vec::with_capacity(4096); let mut decompressor = Decompressor::new(buffer, buffer.len()); decompressor.read_to_end(&mut output)?; @@ -214,7 +234,7 @@ fn brotli_decompress(buffer: &[u8]) -> Result { #[serde] pub fn op_brotli_decompress( #[buffer] buffer: &[u8], -) -> Result { +) -> Result { brotli_decompress(buffer) } @@ -222,8 +242,11 @@ pub fn op_brotli_decompress( #[serde] pub async fn op_brotli_decompress_async( #[buffer] buffer: JsBuffer, -) -> Result { - tokio::task::spawn_blocking(move || brotli_decompress(&buffer)).await? +) -> Result { + tokio::task::spawn_blocking(move || { + brotli_decompress(&buffer).map_err(BrotliError::Io) + }) + .await? } struct BrotliDecompressCtx { @@ -252,8 +275,11 @@ pub fn op_brotli_decompress_stream( #[smi] rid: u32, #[buffer] input: &[u8], #[buffer] output: &mut [u8], -) -> Result { - let ctx = state.resource_table.get::(rid)?; +) -> Result { + let ctx = state + .resource_table + .get::(rid) + .map_err(BrotliError::Resource)?; let mut inst = ctx.inst.borrow_mut(); let mut output_offset = 0; @@ -268,7 +294,7 @@ pub fn op_brotli_decompress_stream( &mut inst, ); if matches!(result, BrotliResult::ResultFailure) { - return Err(type_error("Failed to decompress")); + return Err(BrotliError::DecompressFailed); } Ok(output_offset) @@ -280,8 +306,11 @@ pub fn op_brotli_decompress_stream_end( state: &mut OpState, #[smi] rid: u32, #[buffer] output: &mut [u8], -) -> Result { - let ctx = state.resource_table.get::(rid)?; +) -> Result { + let ctx = state + .resource_table + .get::(rid) + .map_err(BrotliError::Resource)?; let mut inst = ctx.inst.borrow_mut(); let mut output_offset = 0; @@ -296,7 +325,7 @@ pub fn op_brotli_decompress_stream_end( &mut inst, ); if matches!(result, BrotliResult::ResultFailure) { - return Err(type_error("Failed to decompress")); + return Err(BrotliError::DecompressFailed); } Ok(output_offset) diff --git a/ext/node/ops/zlib/mod.rs b/ext/node/ops/zlib/mod.rs index b1d6d21d2..e75ef050d 100644 --- a/ext/node/ops/zlib/mod.rs +++ b/ext/node/ops/zlib/mod.rs @@ -1,6 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::type_error; -use deno_core::error::AnyError; + use deno_core::op2; use std::borrow::Cow; use std::cell::RefCell; @@ -8,7 +7,7 @@ use zlib::*; mod alloc; pub mod brotli; -mod mode; +pub mod mode; mod stream; use mode::Flush; @@ -17,11 +16,11 @@ use mode::Mode; use self::stream::StreamWrapper; #[inline] -fn check(condition: bool, msg: &str) -> Result<(), AnyError> { +fn check(condition: bool, msg: &str) -> Result<(), deno_core::error::AnyError> { if condition { Ok(()) } else { - Err(type_error(msg.to_string())) + Err(deno_core::error::type_error(msg.to_string())) } } @@ -56,7 +55,7 @@ impl ZlibInner { out_off: u32, out_len: u32, flush: Flush, - ) -> Result<(), AnyError> { + ) -> Result<(), deno_core::error::AnyError> { check(self.init_done, "write before init")?; check(!self.write_in_progress, "write already in progress")?; check(!self.pending_close, "close already in progress")?; @@ -65,11 +64,11 @@ impl ZlibInner { let next_in = input .get(in_off as usize..in_off as usize + in_len as usize) - .ok_or_else(|| type_error("invalid input range"))? + .ok_or_else(|| deno_core::error::type_error("invalid input range"))? .as_ptr() as *mut _; let next_out = out .get_mut(out_off as usize..out_off as usize + out_len as usize) - .ok_or_else(|| type_error("invalid output range"))? + .ok_or_else(|| deno_core::error::type_error("invalid output range"))? .as_mut_ptr(); self.strm.avail_in = in_len; @@ -81,7 +80,10 @@ impl ZlibInner { Ok(()) } - fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> { + fn do_write( + &mut self, + flush: Flush, + ) -> Result<(), deno_core::error::AnyError> { self.flush = flush; match self.mode { Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => { @@ -127,7 +129,7 @@ impl ZlibInner { self.mode = Mode::Inflate; } } else if next_expected_header_byte.is_some() { - return Err(type_error( + return Err(deno_core::error::type_error( "invalid number of gzip magic number bytes read", )); } @@ -181,7 +183,7 @@ impl ZlibInner { Ok(()) } - fn init_stream(&mut self) -> Result<(), AnyError> { + fn init_stream(&mut self) -> Result<(), deno_core::error::AnyError> { match self.mode { Mode::Gzip | Mode::Gunzip => self.window_bits += 16, Mode::Unzip => self.window_bits += 32, @@ -199,7 +201,7 @@ impl ZlibInner { Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => { self.strm.inflate_init(self.window_bits) } - Mode::None => return Err(type_error("Unknown mode")), + Mode::None => return Err(deno_core::error::type_error("Unknown mode")), }; self.write_in_progress = false; @@ -208,7 +210,7 @@ impl ZlibInner { Ok(()) } - fn close(&mut self) -> Result { + fn close(&mut self) -> Result { if self.write_in_progress { self.pending_close = true; return Ok(false); @@ -222,10 +224,8 @@ impl ZlibInner { Ok(true) } - fn reset_stream(&mut self) -> Result<(), AnyError> { + fn reset_stream(&mut self) { self.err = self.strm.reset(self.mode); - - Ok(()) } } @@ -243,7 +243,7 @@ impl deno_core::Resource for Zlib { #[op2] #[cppgc] -pub fn op_zlib_new(#[smi] mode: i32) -> Result { +pub fn op_zlib_new(#[smi] mode: i32) -> Result { let mode = Mode::try_from(mode)?; let inner = ZlibInner { @@ -256,12 +256,20 @@ pub fn op_zlib_new(#[smi] mode: i32) -> Result { }) } +#[derive(Debug, thiserror::Error)] +pub enum ZlibError { + #[error("zlib not initialized")] + NotInitialized, + #[error(transparent)] + Mode(#[from] mode::ModeError), + #[error(transparent)] + Other(#[from] deno_core::error::AnyError), +} + #[op2(fast)] -pub fn op_zlib_close(#[cppgc] resource: &Zlib) -> Result<(), AnyError> { +pub fn op_zlib_close(#[cppgc] resource: &Zlib) -> Result<(), ZlibError> { let mut resource = resource.inner.borrow_mut(); - let zlib = resource - .as_mut() - .ok_or_else(|| type_error("zlib not initialized"))?; + let zlib = resource.as_mut().ok_or(ZlibError::NotInitialized)?; // If there is a pending write, defer the close until the write is done. zlib.close()?; @@ -282,11 +290,9 @@ pub fn op_zlib_write( #[smi] out_off: u32, #[smi] out_len: u32, #[buffer] result: &mut [u32], -) -> Result { +) -> Result { let mut zlib = resource.inner.borrow_mut(); - let zlib = zlib - .as_mut() - .ok_or_else(|| type_error("zlib not initialized"))?; + let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?; let flush = Flush::try_from(flush)?; zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?; @@ -307,11 +313,9 @@ pub fn op_zlib_init( #[smi] mem_level: i32, #[smi] strategy: i32, #[buffer] dictionary: &[u8], -) -> Result { +) -> Result { let mut zlib = resource.inner.borrow_mut(); - let zlib = zlib - .as_mut() - .ok_or_else(|| type_error("zlib not initialized"))?; + let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?; check((8..=15).contains(&window_bits), "invalid windowBits")?; check((-1..=9).contains(&level), "invalid level")?; @@ -348,13 +352,11 @@ pub fn op_zlib_init( #[op2(fast)] #[smi] -pub fn op_zlib_reset(#[cppgc] resource: &Zlib) -> Result { +pub fn op_zlib_reset(#[cppgc] resource: &Zlib) -> Result { let mut zlib = resource.inner.borrow_mut(); - let zlib = zlib - .as_mut() - .ok_or_else(|| type_error("zlib not initialized"))?; + let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?; - zlib.reset_stream()?; + zlib.reset_stream(); Ok(zlib.err) } @@ -362,12 +364,10 @@ pub fn op_zlib_reset(#[cppgc] resource: &Zlib) -> Result { #[op2(fast)] pub fn op_zlib_close_if_pending( #[cppgc] resource: &Zlib, -) -> Result<(), AnyError> { +) -> Result<(), ZlibError> { let pending_close = { let mut zlib = resource.inner.borrow_mut(); - let zlib = zlib - .as_mut() - .ok_or_else(|| type_error("zlib not initialized"))?; + let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?; zlib.write_in_progress = false; zlib.pending_close diff --git a/ext/node/ops/zlib/mode.rs b/ext/node/ops/zlib/mode.rs index 753300cc4..41565f9b1 100644 --- a/ext/node/ops/zlib/mode.rs +++ b/ext/node/ops/zlib/mode.rs @@ -1,19 +1,8 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -#[derive(Debug)] -pub enum Error { - BadArgument, -} - -impl std::fmt::Display for Error { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Error::BadArgument => write!(f, "bad argument"), - } - } -} - -impl std::error::Error for Error {} +#[derive(Debug, thiserror::Error)] +#[error("bad argument")] +pub struct ModeError; macro_rules! repr_i32 { ($(#[$meta:meta])* $vis:vis enum $name:ident { @@ -25,12 +14,12 @@ macro_rules! repr_i32 { } impl core::convert::TryFrom for $name { - type Error = Error; + type Error = ModeError; fn try_from(v: i32) -> Result { match v { $(x if x == $name::$vname as i32 => Ok($name::$vname),)* - _ => Err(Error::BadArgument), + _ => Err(ModeError), } } } -- cgit v1.2.3 From 8dd6177c624649d75ffcacca77e7c4f48cea07a2 Mon Sep 17 00:00:00 2001 From: Nicola Bovolato <61934734+nicolabovolato@users.noreply.github.com> Date: Fri, 25 Oct 2024 00:02:26 +0200 Subject: fix(ext/node): refactor http.ServerResponse into function class (#26210) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While testing, I found out that light-my-request relies on `ServerResponse.connection`, which is deprecated, so I added that and `socket`, the non deprecated property. It also relies on an undocumented `_header` property, apparently for [raw header processing](https://github.com/fastify/light-my-request/blob/v6.1.0/lib/response.js#L180-L186). I added it as an empty string, feel free to provide other approaches. Fixes #19901 Co-authored-by: Bartek Iwańczuk --- ext/node/polyfills/http.ts | 524 ++++++++++++++++++++++++++++----------------- 1 file changed, 324 insertions(+), 200 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/http.ts b/ext/node/polyfills/http.ts index 20bef3009..e7a860ff1 100644 --- a/ext/node/polyfills/http.ts +++ b/ext/node/polyfills/http.ts @@ -34,6 +34,7 @@ import { finished, Readable as NodeReadable, Writable as NodeWritable, + WritableOptions as NodeWritableOptions, } from "node:stream"; import { kUniqueHeaders, @@ -70,6 +71,7 @@ import { resourceForReadableStream } from "ext:deno_web/06_streams.js"; import { UpgradedConn } from "ext:deno_net/01_net.js"; import { STATUS_CODES } from "node:_http_server"; import { methods as METHODS } from "node:_http_common"; +import { deprecate } from "node:util"; const { internalRidSymbol } = core; const { ArrayIsArray, StringPrototypeToLowerCase } = primordials; @@ -1184,49 +1186,95 @@ function onError(self, error, cb) { } } -export class ServerResponse extends NodeWritable { - statusCode = 200; - statusMessage?: string = undefined; - #headers: Record = { __proto__: null }; - #hasNonStringHeaders: boolean = false; - #readable: ReadableStream; - override writable = true; - // used by `npm:on-finished` - finished = false; - headersSent = false; - #resolve: (value: Response | PromiseLike) => void; +export type ServerResponse = { + statusCode: number; + statusMessage?: string; + + _headers: Record; + _hasNonStringHeaders: boolean; + + _readable: ReadableStream; + finished: boolean; + headersSent: boolean; + _resolve: (value: Response | PromiseLike) => void; + // deno-lint-ignore no-explicit-any + _socketOverride: any | null; // deno-lint-ignore no-explicit-any - #socketOverride: any | null = null; + socket: any | null; - static #enqueue(controller: ReadableStreamDefaultController, chunk: Chunk) { - try { - if (typeof chunk === "string") { - controller.enqueue(ENCODER.encode(chunk)); - } else { - controller.enqueue(chunk); - } - } catch (_) { - // The stream might have been closed. Ignore the error. - } - } + setHeader(name: string, value: string | string[]): void; + appendHeader(name: string, value: string | string[]): void; + getHeader(name: string): string | string[]; + removeHeader(name: string): void; + getHeaderNames(): string[]; + getHeaders(): Record; + hasHeader(name: string): boolean; - /** Returns true if the response body should be null with the given - * http status code */ - static #bodyShouldBeNull(status: number) { - return status === 101 || status === 204 || status === 205 || status === 304; - } + writeHead( + status: number, + statusMessage?: string, + headers?: + | Record + | Array<[string, string]>, + ): void; + writeHead( + status: number, + headers?: + | Record + | Array<[string, string]>, + ): void; - constructor( + _ensureHeaders(singleChunk?: Chunk): void; + + respond(final: boolean, singleChunk?: Chunk): void; + // deno-lint-ignore no-explicit-any + end(chunk?: any, encoding?: any, cb?: any): void; + + flushHeaders(): void; + _implicitHeader(): void; + + // Undocumented field used by `npm:light-my-request`. + _header: string; + + assignSocket(socket): void; + detachSocket(socket): void; +} & { -readonly [K in keyof NodeWritable]: NodeWritable[K] }; + +type ServerResponseStatic = { + new ( resolve: (value: Response | PromiseLike) => void, socket: FakeSocket, - ) { - let controller: ReadableByteStreamController; - const readable = new ReadableStream({ - start(c) { - controller = c as ReadableByteStreamController; - }, - }); - super({ + ): ServerResponse; + _enqueue(controller: ReadableStreamDefaultController, chunk: Chunk): void; + _bodyShouldBeNull(statusCode: number): boolean; +}; + +export const ServerResponse = function ( + this: ServerResponse, + resolve: (value: Response | PromiseLike) => void, + socket: FakeSocket, +) { + this.statusCode = 200; + this.statusMessage = undefined; + this._headers = { __proto__: null }; + this._hasNonStringHeaders = false; + this.writable = true; + + // used by `npm:on-finished` + this.finished = false; + this.headersSent = false; + this._socketOverride = null; + + let controller: ReadableByteStreamController; + const readable = new ReadableStream({ + start(c) { + controller = c as ReadableByteStreamController; + }, + }); + + NodeWritable.call( + this, + { autoDestroy: true, defaultEncoding: "utf-8", emitClose: true, @@ -1235,16 +1283,16 @@ export class ServerResponse extends NodeWritable { write: (chunk, encoding, cb) => { // Writes chunks are directly written to the socket if // one is assigned via assignSocket() - if (this.#socketOverride && this.#socketOverride.writable) { - this.#socketOverride.write(chunk, encoding); + if (this._socketOverride && this._socketOverride.writable) { + this._socketOverride.write(chunk, encoding); return cb(); } if (!this.headersSent) { - ServerResponse.#enqueue(controller, chunk); + ServerResponse._enqueue(controller, chunk); this.respond(false); return cb(); } - ServerResponse.#enqueue(controller, chunk); + ServerResponse._enqueue(controller, chunk); return cb(); }, final: (cb) => { @@ -1260,193 +1308,269 @@ export class ServerResponse extends NodeWritable { } return cb(null); }, - }); - this.#readable = readable; - this.#resolve = resolve; - this.socket = socket; + } satisfies NodeWritableOptions, + ); + + this._readable = readable; + this._resolve = resolve; + this.socket = socket; + + this._header = ""; +} as unknown as ServerResponseStatic; + +Object.setPrototypeOf(ServerResponse.prototype, NodeWritable.prototype); +Object.setPrototypeOf(ServerResponse, NodeWritable); + +ServerResponse._enqueue = function ( + this: ServerResponse, + controller: ReadableStreamDefaultController, + chunk: Chunk, +) { + try { + if (typeof chunk === "string") { + controller.enqueue(ENCODER.encode(chunk)); + } else { + controller.enqueue(chunk); + } + } catch (_) { + // The stream might have been closed. Ignore the error. } +}; - setHeader(name: string, value: string | string[]) { - if (Array.isArray(value)) { - this.#hasNonStringHeaders = true; - } - this.#headers[StringPrototypeToLowerCase(name)] = value; - return this; +/** Returns true if the response body should be null with the given + * http status code */ +ServerResponse._bodyShouldBeNull = function ( + this: ServerResponse, + status: number, +) { + return status === 101 || status === 204 || status === 205 || status === 304; +}; + +ServerResponse.prototype.setHeader = function ( + this: ServerResponse, + name: string, + value: string | string[], +) { + if (Array.isArray(value)) { + this._hasNonStringHeaders = true; } + this._headers[StringPrototypeToLowerCase(name)] = value; + return this; +}; - appendHeader(name: string, value: string | string[]) { - const key = StringPrototypeToLowerCase(name); - if (this.#headers[key] === undefined) { - if (Array.isArray(value)) this.#hasNonStringHeaders = true; - this.#headers[key] = value; +ServerResponse.prototype.appendHeader = function ( + this: ServerResponse, + name: string, + value: string | string[], +) { + const key = StringPrototypeToLowerCase(name); + if (this._headers[key] === undefined) { + if (Array.isArray(value)) this._hasNonStringHeaders = true; + this._headers[key] = value; + } else { + this._hasNonStringHeaders = true; + if (!Array.isArray(this._headers[key])) { + this._headers[key] = [this._headers[key]]; + } + const header = this._headers[key]; + if (Array.isArray(value)) { + header.push(...value); } else { - this.#hasNonStringHeaders = true; - if (!Array.isArray(this.#headers[key])) { - this.#headers[key] = [this.#headers[key]]; - } - const header = this.#headers[key]; - if (Array.isArray(value)) { - header.push(...value); - } else { - header.push(value); - } + header.push(value); } - return this; } + return this; +}; - getHeader(name: string) { - return this.#headers[StringPrototypeToLowerCase(name)]; - } - removeHeader(name: string) { - delete this.#headers[StringPrototypeToLowerCase(name)]; - } - getHeaderNames() { - return Object.keys(this.#headers); - } - getHeaders(): Record { - // @ts-ignore Ignore null __proto__ - return { __proto__: null, ...this.#headers }; - } - hasHeader(name: string) { - return Object.hasOwn(this.#headers, name); - } +ServerResponse.prototype.getHeader = function ( + this: ServerResponse, + name: string, +) { + return this._headers[StringPrototypeToLowerCase(name)]; +}; - writeHead( - status: number, - statusMessage?: string, - headers?: - | Record - | Array<[string, string]>, - ): this; - writeHead( - status: number, - headers?: - | Record - | Array<[string, string]>, - ): this; - writeHead( - status: number, - statusMessageOrHeaders?: - | string - | Record - | Array<[string, string]>, - maybeHeaders?: - | Record - | Array<[string, string]>, - ): this { - this.statusCode = status; - - let headers = null; - if (typeof statusMessageOrHeaders === "string") { - this.statusMessage = statusMessageOrHeaders; - if (maybeHeaders !== undefined) { - headers = maybeHeaders; - } - } else if (statusMessageOrHeaders !== undefined) { - headers = statusMessageOrHeaders; - } +ServerResponse.prototype.removeHeader = function ( + this: ServerResponse, + name: string, +) { + delete this._headers[StringPrototypeToLowerCase(name)]; +}; - if (headers !== null) { - if (ArrayIsArray(headers)) { - headers = headers as Array<[string, string]>; - for (let i = 0; i < headers.length; i++) { - this.appendHeader(headers[i][0], headers[i][1]); - } - } else { - headers = headers as Record; - for (const k in headers) { - if (Object.hasOwn(headers, k)) { - this.setHeader(k, headers[k]); - } +ServerResponse.prototype.getHeaderNames = function (this: ServerResponse) { + return Object.keys(this._headers); +}; + +ServerResponse.prototype.getHeaders = function ( + this: ServerResponse, +): Record { + return { __proto__: null, ...this._headers }; +}; + +ServerResponse.prototype.hasHeader = function ( + this: ServerResponse, + name: string, +) { + return Object.hasOwn(this._headers, name); +}; + +ServerResponse.prototype.writeHead = function ( + this: ServerResponse, + status: number, + statusMessageOrHeaders?: + | string + | Record + | Array<[string, string]>, + maybeHeaders?: + | Record + | Array<[string, string]>, +) { + this.statusCode = status; + + let headers = null; + if (typeof statusMessageOrHeaders === "string") { + this.statusMessage = statusMessageOrHeaders; + if (maybeHeaders !== undefined) { + headers = maybeHeaders; + } + } else if (statusMessageOrHeaders !== undefined) { + headers = statusMessageOrHeaders; + } + + if (headers !== null) { + if (ArrayIsArray(headers)) { + headers = headers as Array<[string, string]>; + for (let i = 0; i < headers.length; i++) { + this.appendHeader(headers[i][0], headers[i][1]); + } + } else { + headers = headers as Record; + for (const k in headers) { + if (Object.hasOwn(headers, k)) { + this.setHeader(k, headers[k]); } } } + } - return this; + return this; +}; + +ServerResponse.prototype._ensureHeaders = function ( + this: ServerResponse, + singleChunk?: Chunk, +) { + if (this.statusCode === 200 && this.statusMessage === undefined) { + this.statusMessage = "OK"; + } + if (typeof singleChunk === "string" && !this.hasHeader("content-type")) { + this.setHeader("content-type", "text/plain;charset=UTF-8"); } +}; - #ensureHeaders(singleChunk?: Chunk) { - if (this.statusCode === 200 && this.statusMessage === undefined) { - this.statusMessage = "OK"; - } - if ( - typeof singleChunk === "string" && - !this.hasHeader("content-type") - ) { - this.setHeader("content-type", "text/plain;charset=UTF-8"); - } - } - - respond(final: boolean, singleChunk?: Chunk) { - this.headersSent = true; - this.#ensureHeaders(singleChunk); - let body = singleChunk ?? (final ? null : this.#readable); - if (ServerResponse.#bodyShouldBeNull(this.statusCode)) { - body = null; - } - let headers: Record | [string, string][] = this - .#headers as Record; - if (this.#hasNonStringHeaders) { - headers = []; - // Guard is not needed as this is a null prototype object. - // deno-lint-ignore guard-for-in - for (const key in this.#headers) { - const entry = this.#headers[key]; - if (Array.isArray(entry)) { - for (const value of entry) { - headers.push([key, value]); - } - } else { - headers.push([key, entry]); +ServerResponse.prototype.respond = function ( + this: ServerResponse, + final: boolean, + singleChunk?: Chunk, +) { + this.headersSent = true; + this._ensureHeaders(singleChunk); + let body = singleChunk ?? (final ? null : this._readable); + if (ServerResponse._bodyShouldBeNull(this.statusCode)) { + body = null; + } + let headers: Record | [string, string][] = this + ._headers as Record; + if (this._hasNonStringHeaders) { + headers = []; + // Guard is not needed as this is a null prototype object. + // deno-lint-ignore guard-for-in + for (const key in this._headers) { + const entry = this._headers[key]; + if (Array.isArray(entry)) { + for (const value of entry) { + headers.push([key, value]); } + } else { + headers.push([key, entry]); } } - this.#resolve( - new Response(body, { - headers, - status: this.statusCode, - statusText: this.statusMessage, - }), - ); } + this._resolve( + new Response(body, { + headers, + status: this.statusCode, + statusText: this.statusMessage, + }), + ); +}; +ServerResponse.prototype.end = function ( + this: ServerResponse, // deno-lint-ignore no-explicit-any - override end(chunk?: any, encoding?: any, cb?: any): this { - this.finished = true; - if (!chunk && "transfer-encoding" in this.#headers) { - // FIXME(bnoordhuis) Node sends a zero length chunked body instead, i.e., - // the trailing "0\r\n", but respondWith() just hangs when I try that. - this.#headers["content-length"] = "0"; - delete this.#headers["transfer-encoding"]; - } + chunk?: any, + // deno-lint-ignore no-explicit-any + encoding?: any, + // deno-lint-ignore no-explicit-any + cb?: any, +) { + this.finished = true; + if (!chunk && "transfer-encoding" in this._headers) { + // FIXME(bnoordhuis) Node sends a zero length chunked body instead, i.e., + // the trailing "0\r\n", but respondWith() just hangs when I try that. + this._headers["content-length"] = "0"; + delete this._headers["transfer-encoding"]; + } + + // @ts-expect-error The signature for cb is stricter than the one implemented here + NodeWritable.prototype.end.call(this, chunk, encoding, cb); +}; - // @ts-expect-error The signature for cb is stricter than the one implemented here - return super.end(chunk, encoding, cb); - } +ServerResponse.prototype.flushHeaders = function (this: ServerResponse) { + // no-op +}; - flushHeaders() { - // no-op - } +// Undocumented API used by `npm:compression`. +ServerResponse.prototype._implicitHeader = function (this: ServerResponse) { + this.writeHead(this.statusCode); +}; - // Undocumented API used by `npm:compression`. - _implicitHeader() { - this.writeHead(this.statusCode); +ServerResponse.prototype.assignSocket = function ( + this: ServerResponse, + socket, +) { + if (socket._httpMessage) { + throw new ERR_HTTP_SOCKET_ASSIGNED(); } + socket._httpMessage = this; + this._socketOverride = socket; +}; - assignSocket(socket) { - if (socket._httpMessage) { - throw new ERR_HTTP_SOCKET_ASSIGNED(); - } - socket._httpMessage = this; - this.#socketOverride = socket; - } +ServerResponse.prototype.detachSocket = function ( + this: ServerResponse, + socket, +) { + assert(socket._httpMessage === this); + socket._httpMessage = null; + this._socketOverride = null; +}; - detachSocket(socket) { - assert(socket._httpMessage === this); - socket._httpMessage = null; - this.#socketOverride = null; - } -} +Object.defineProperty(ServerResponse.prototype, "connection", { + get: deprecate( + function (this: ServerResponse) { + return this._socketOverride; + }, + "ServerResponse.prototype.connection is deprecated", + "DEP0066", + ), + set: deprecate( + // deno-lint-ignore no-explicit-any + function (this: ServerResponse, socket: any) { + this._socketOverride = socket; + }, + "ServerResponse.prototype.connection is deprecated", + "DEP0066", + ), +}); // TODO(@AaronO): optimize export class IncomingMessageForServer extends NodeReadable { -- cgit v1.2.3 From cead8af104af693558db07675a423761f174d260 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Fri, 25 Oct 2024 11:53:34 +0100 Subject: build: use 'fs' feature of 'nix' crate in ext/fs (#26533) Hot-fix to unblock `v2.0.3` release --- ext/fs/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml index ab0bf22fd..f5daa1bcd 100644 --- a/ext/fs/Cargo.toml +++ b/ext/fs/Cargo.toml @@ -31,7 +31,7 @@ serde.workspace = true thiserror.workspace = true [target.'cfg(unix)'.dependencies] -nix = { workspace = true, features = ["user"] } +nix = { workspace = true, features = ["fs", "user"] } [target.'cfg(windows)'.dependencies] winapi = { workspace = true, features = ["winbase"] } -- cgit v1.2.3 From 730331622ee17cf603447f4eb53631b9cfd7bef1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Fri, 25 Oct 2024 14:57:40 +0100 Subject: chore: forward v2.0.3 commit to main (#26535) Forwarding v2.0.3 commit to `main` Co-authored-by: denobot <33910674+denobot@users.noreply.github.com> Co-authored-by: bartlomieju --- ext/broadcast_channel/Cargo.toml | 2 +- ext/cache/Cargo.toml | 2 +- ext/canvas/Cargo.toml | 2 +- ext/console/Cargo.toml | 2 +- ext/cron/Cargo.toml | 2 +- ext/crypto/Cargo.toml | 2 +- ext/fetch/Cargo.toml | 2 +- ext/ffi/Cargo.toml | 2 +- ext/fs/Cargo.toml | 2 +- ext/http/Cargo.toml | 2 +- ext/io/Cargo.toml | 2 +- ext/kv/Cargo.toml | 2 +- ext/napi/Cargo.toml | 2 +- ext/napi/sym/Cargo.toml | 2 +- ext/net/Cargo.toml | 2 +- ext/node/Cargo.toml | 2 +- ext/tls/Cargo.toml | 2 +- ext/url/Cargo.toml | 2 +- ext/web/Cargo.toml | 2 +- ext/webgpu/Cargo.toml | 2 +- ext/webidl/Cargo.toml | 2 +- ext/websocket/Cargo.toml | 2 +- ext/webstorage/Cargo.toml | 2 +- 23 files changed, 23 insertions(+), 23 deletions(-) (limited to 'ext') diff --git a/ext/broadcast_channel/Cargo.toml b/ext/broadcast_channel/Cargo.toml index 7ca058a44..845544f0c 100644 --- a/ext/broadcast_channel/Cargo.toml +++ b/ext/broadcast_channel/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_broadcast_channel" -version = "0.167.0" +version = "0.168.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cache/Cargo.toml b/ext/cache/Cargo.toml index dee4d7274..a07a8f720 100644 --- a/ext/cache/Cargo.toml +++ b/ext/cache/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cache" -version = "0.105.0" +version = "0.106.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/canvas/Cargo.toml b/ext/canvas/Cargo.toml index 6ca3d76c0..4c125ea9b 100644 --- a/ext/canvas/Cargo.toml +++ b/ext/canvas/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_canvas" -version = "0.42.0" +version = "0.43.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/console/Cargo.toml b/ext/console/Cargo.toml index f83f7138d..b89f67fec 100644 --- a/ext/console/Cargo.toml +++ b/ext/console/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_console" -version = "0.173.0" +version = "0.174.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cron/Cargo.toml b/ext/cron/Cargo.toml index ccd81de0c..6c5547c35 100644 --- a/ext/cron/Cargo.toml +++ b/ext/cron/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cron" -version = "0.53.0" +version = "0.54.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/crypto/Cargo.toml b/ext/crypto/Cargo.toml index 2f970ca53..e9761f48f 100644 --- a/ext/crypto/Cargo.toml +++ b/ext/crypto/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_crypto" -version = "0.187.0" +version = "0.188.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml index afffd3ffb..316a6eea8 100644 --- a/ext/fetch/Cargo.toml +++ b/ext/fetch/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fetch" -version = "0.197.0" +version = "0.198.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/ffi/Cargo.toml b/ext/ffi/Cargo.toml index 59dc13ed9..405030df2 100644 --- a/ext/ffi/Cargo.toml +++ b/ext/ffi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_ffi" -version = "0.160.0" +version = "0.161.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml index f5daa1bcd..2a1c5bf2c 100644 --- a/ext/fs/Cargo.toml +++ b/ext/fs/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fs" -version = "0.83.0" +version = "0.84.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/http/Cargo.toml b/ext/http/Cargo.toml index 6fa7598cb..0bae99ef7 100644 --- a/ext/http/Cargo.toml +++ b/ext/http/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_http" -version = "0.171.0" +version = "0.172.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/io/Cargo.toml b/ext/io/Cargo.toml index 8f407c820..f7ffe46bc 100644 --- a/ext/io/Cargo.toml +++ b/ext/io/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_io" -version = "0.83.0" +version = "0.84.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/kv/Cargo.toml b/ext/kv/Cargo.toml index f7b28af67..a4e35193d 100644 --- a/ext/kv/Cargo.toml +++ b/ext/kv/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_kv" -version = "0.81.0" +version = "0.82.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/napi/Cargo.toml b/ext/napi/Cargo.toml index ed39d96df..622429f7c 100644 --- a/ext/napi/Cargo.toml +++ b/ext/napi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_napi" -version = "0.104.0" +version = "0.105.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/napi/sym/Cargo.toml b/ext/napi/sym/Cargo.toml index 7c9bf208c..9fe2d839a 100644 --- a/ext/napi/sym/Cargo.toml +++ b/ext/napi/sym/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "napi_sym" -version = "0.103.0" +version = "0.104.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/net/Cargo.toml b/ext/net/Cargo.toml index 634dd7dda..a458dbe9e 100644 --- a/ext/net/Cargo.toml +++ b/ext/net/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_net" -version = "0.165.0" +version = "0.166.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml index 55d53d0ce..3743e6a85 100644 --- a/ext/node/Cargo.toml +++ b/ext/node/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_node" -version = "0.110.0" +version = "0.111.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/tls/Cargo.toml b/ext/tls/Cargo.toml index b060f2829..d163fd7eb 100644 --- a/ext/tls/Cargo.toml +++ b/ext/tls/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_tls" -version = "0.160.0" +version = "0.161.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/url/Cargo.toml b/ext/url/Cargo.toml index ceef18f45..ef7890e2a 100644 --- a/ext/url/Cargo.toml +++ b/ext/url/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_url" -version = "0.173.0" +version = "0.174.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/web/Cargo.toml b/ext/web/Cargo.toml index 395ea182f..0cb08e19c 100644 --- a/ext/web/Cargo.toml +++ b/ext/web/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_web" -version = "0.204.0" +version = "0.205.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webgpu/Cargo.toml b/ext/webgpu/Cargo.toml index 7354919d4..84ee4a012 100644 --- a/ext/webgpu/Cargo.toml +++ b/ext/webgpu/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webgpu" -version = "0.140.0" +version = "0.141.0" authors = ["the Deno authors"] edition.workspace = true license = "MIT" diff --git a/ext/webidl/Cargo.toml b/ext/webidl/Cargo.toml index d069b3212..a403db2fe 100644 --- a/ext/webidl/Cargo.toml +++ b/ext/webidl/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webidl" -version = "0.173.0" +version = "0.174.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/websocket/Cargo.toml b/ext/websocket/Cargo.toml index ec359100d..b48b39c99 100644 --- a/ext/websocket/Cargo.toml +++ b/ext/websocket/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_websocket" -version = "0.178.0" +version = "0.179.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webstorage/Cargo.toml b/ext/webstorage/Cargo.toml index 0d6fcd6a0..911e25241 100644 --- a/ext/webstorage/Cargo.toml +++ b/ext/webstorage/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webstorage" -version = "0.168.0" +version = "0.169.0" authors.workspace = true edition.workspace = true license.workspace = true -- cgit v1.2.3 From 793b155cd31bfc79e4aac4447bd250c9a6d31f6b Mon Sep 17 00:00:00 2001 From: Mayank Kumar <103447831+mayank-Pareek@users.noreply.github.com> Date: Sat, 26 Oct 2024 13:42:14 -0400 Subject: fix(ext/node): use primordials in ext\node\polyfills\internal\crypto\_randomInt.ts (#26534) Towards #24236 --- ext/node/polyfills/internal/crypto/_randomInt.ts | 26 ++++++++++++++++-------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/internal/crypto/_randomInt.ts b/ext/node/polyfills/internal/crypto/_randomInt.ts index 7f4d703ad..e08b3e963 100644 --- a/ext/node/polyfills/internal/crypto/_randomInt.ts +++ b/ext/node/polyfills/internal/crypto/_randomInt.ts @@ -1,9 +1,15 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -// TODO(petamoriken): enable prefer-primordials for node polyfills -// deno-lint-ignore-file prefer-primordials - import { op_node_random_int } from "ext:core/ops"; +import { primordials } from "ext:core/mod.js"; +const { + Error, + MathCeil, + MathFloor, + MathPow, + NumberIsSafeInteger, + RangeError, +} = primordials; export default function randomInt(max: number): number; export default function randomInt(min: number, max: number): number; @@ -23,7 +29,9 @@ export default function randomInt( cb?: (err: Error | null, n?: number) => void, ): number | void { if (typeof max === "number" && typeof min === "number") { - [max, min] = [min, max]; + const temp = max; + max = min; + min = temp; } if (min === undefined) min = 0; else if (typeof min === "function") { @@ -32,13 +40,13 @@ export default function randomInt( } if ( - !Number.isSafeInteger(min) || - typeof max === "number" && !Number.isSafeInteger(max) + !NumberIsSafeInteger(min) || + typeof max === "number" && !NumberIsSafeInteger(max) ) { throw new Error("max or min is not a Safe Number"); } - if (max - min > Math.pow(2, 48)) { + if (max - min > MathPow(2, 48)) { throw new RangeError("max - min should be less than 2^48!"); } @@ -46,8 +54,8 @@ export default function randomInt( throw new Error("Min is bigger than Max!"); } - min = Math.ceil(min); - max = Math.floor(max); + min = MathCeil(min); + max = MathFloor(max); const result = op_node_random_int(min, max); if (cb) { -- cgit v1.2.3 From c314b2d8577289078d6b00a0dd58f8f36ff6920a Mon Sep 17 00:00:00 2001 From: familyboat <84062528+familyboat@users.noreply.github.com> Date: Sun, 27 Oct 2024 11:04:35 +0800 Subject: fix(ext/node): add path to `fs.stat` and `fs.statSync` error (#26037) --- ext/node/polyfills/_fs/_fs_stat.ts | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/_fs/_fs_stat.ts b/ext/node/polyfills/_fs/_fs_stat.ts index c4ed82d57..d00c81ffb 100644 --- a/ext/node/polyfills/_fs/_fs_stat.ts +++ b/ext/node/polyfills/_fs/_fs_stat.ts @@ -383,7 +383,10 @@ export function stat( Deno.stat(path).then( (stat) => callback(null, CFISBIS(stat, options.bigint)), - (err) => callback(denoErrorToNodeError(err, { syscall: "stat" })), + (err) => + callback( + denoErrorToNodeError(err, { syscall: "stat", path: getPathname(path) }), + ), ); } @@ -417,9 +420,16 @@ export function statSync( return; } if (err instanceof Error) { - throw denoErrorToNodeError(err, { syscall: "stat" }); + throw denoErrorToNodeError(err, { + syscall: "stat", + path: getPathname(path), + }); } else { throw err; } } } + +function getPathname(path: string | URL) { + return typeof path === "string" ? path : path.pathname; +} -- cgit v1.2.3 From 4e38fbd0a3e0ca139314e503494a8d4795007d8a Mon Sep 17 00:00:00 2001 From: snek Date: Mon, 28 Oct 2024 18:16:43 +0100 Subject: fix: report exceptions from nextTick (#26579) Fixes: https://github.com/denoland/deno/issues/24713 Fixes: https://github.com/denoland/deno/issues/25855 --- ext/node/polyfills/_next_tick.ts | 2 ++ 1 file changed, 2 insertions(+) (limited to 'ext') diff --git a/ext/node/polyfills/_next_tick.ts b/ext/node/polyfills/_next_tick.ts index 62470c564..af306a29c 100644 --- a/ext/node/polyfills/_next_tick.ts +++ b/ext/node/polyfills/_next_tick.ts @@ -62,6 +62,8 @@ export function processTicksAndRejections() { callback(...args); } } + } catch (e) { + reportError(e); } finally { // FIXME(bartlomieju): Deno currently doesn't support async hooks // if (destroyHooksExist()) -- cgit v1.2.3 From 46e5ed1a642edcd998d1e22c9b37e0c4aa298e1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Tue, 29 Oct 2024 00:41:02 +0000 Subject: Revert "fix(ext/node): use primordials in `ext/node/polyfills/https.ts` (#26323)" (#26613) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …s` (#26323)" This reverts commit afb33b3c2597c9ec943f71218b236486fbc86e23. Reverting because it caused a regression - https://github.com/denoland/deno/issues/26612. Closes https://github.com/denoland/deno/issues/26612. --- ext/node/polyfills/https.ts | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/https.ts b/ext/node/polyfills/https.ts index dd24cf048..f60c5e471 100644 --- a/ext/node/polyfills/https.ts +++ b/ext/node/polyfills/https.ts @@ -1,6 +1,9 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. // Copyright Joyent and Node contributors. All rights reserved. MIT license. +// TODO(petamoriken): enable prefer-primordials for node polyfills +// deno-lint-ignore-file prefer-primordials + import { notImplemented } from "ext:deno_node/_utils.ts"; import { urlToHttpOptions } from "ext:deno_node/internal/url.ts"; import { @@ -14,14 +17,6 @@ import { type ServerHandler, ServerImpl as HttpServer } from "node:http"; import { validateObject } from "ext:deno_node/internal/validators.mjs"; import { kEmptyObject } from "ext:deno_node/internal/util.mjs"; import { Buffer } from "node:buffer"; -import { primordials } from "ext:core/mod.js"; -const { - ArrayPrototypeShift, - ArrayPrototypeUnshift, - ArrayIsArray, - ObjectPrototypeIsPrototypeOf, - ObjectAssign, -} = primordials; export class Server extends HttpServer { constructor(opts, requestListener?: ServerHandler) { @@ -34,11 +29,11 @@ export class Server extends HttpServer { validateObject(opts, "options"); } - if (opts.cert && ArrayIsArray(opts.cert)) { + if (opts.cert && Array.isArray(opts.cert)) { notImplemented("https.Server.opts.cert array type"); } - if (opts.key && ArrayIsArray(opts.key)) { + if (opts.key && Array.isArray(opts.key)) { notImplemented("https.Server.opts.key array type"); } @@ -47,12 +42,10 @@ export class Server extends HttpServer { _additionalServeOptions() { return { - cert: ObjectPrototypeIsPrototypeOf(Buffer, this._opts.cert) - // deno-lint-ignore prefer-primordials + cert: this._opts.cert instanceof Buffer ? this._opts.cert.toString() : this._opts.cert, - key: ObjectPrototypeIsPrototypeOf(Buffer, this._opts.key) - // deno-lint-ignore prefer-primordials + key: this._opts.key instanceof Buffer ? this._opts.key.toString() : this._opts.key, }; @@ -166,18 +159,18 @@ export function request(...args: any[]) { let options = {}; if (typeof args[0] === "string") { - const urlStr = ArrayPrototypeShift(args); + const urlStr = args.shift(); options = urlToHttpOptions(new URL(urlStr)); - } else if (ObjectPrototypeIsPrototypeOf(URL, args[0])) { - options = urlToHttpOptions(ArrayPrototypeShift(args)); + } else if (args[0] instanceof URL) { + options = urlToHttpOptions(args.shift()); } if (args[0] && typeof args[0] !== "function") { - ObjectAssign(options, ArrayPrototypeShift(args)); + Object.assign(options, args.shift()); } options._defaultAgent = globalAgent; - ArrayPrototypeUnshift(args, options); + args.unshift(options); return new HttpsClientRequest(args[0], args[1]); } -- cgit v1.2.3 From efb5e912e5ef6745fc7fdfb1a84b5bd362548d61 Mon Sep 17 00:00:00 2001 From: Volker Schlecht <47375452+VlkrS@users.noreply.github.com> Date: Tue, 29 Oct 2024 15:10:32 +0100 Subject: fix(ext/node): compatibility with {Free,Open}BSD (#26604) Ports for both BSDs contain patches to the same effect. See https://github.com/freebsd/freebsd-ports/blob/main/www/deno/files/patch-ext_node_ops_fs.rs and https://github.com/openbsd/ports/blob/8644910cae24458306b6a7c4ef4ef7811bc3d1f5/lang/deno/patches/patch-ext_node_ops_fs_rs --- ext/node/ops/fs.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/ops/fs.rs b/ext/node/ops/fs.rs index 6645792e7..98b3c46a1 100644 --- a/ext/node/ops/fs.rs +++ b/ext/node/ops/fs.rs @@ -152,13 +152,21 @@ where let mut cpath = path.as_bytes().to_vec(); cpath.push(0); if bigint { - #[cfg(not(target_os = "macos"))] + #[cfg(not(any( + target_os = "macos", + target_os = "freebsd", + target_os = "openbsd" + )))] // SAFETY: `cpath` is NUL-terminated and result is pointer to valid statfs memory. let (code, result) = unsafe { let mut result: libc::statfs64 = std::mem::zeroed(); (libc::statfs64(cpath.as_ptr() as _, &mut result), result) }; - #[cfg(target_os = "macos")] + #[cfg(any( + target_os = "macos", + target_os = "freebsd", + target_os = "openbsd" + ))] // SAFETY: `cpath` is NUL-terminated and result is pointer to valid statfs memory. let (code, result) = unsafe { let mut result: libc::statfs = std::mem::zeroed(); @@ -168,7 +176,10 @@ where return Err(std::io::Error::last_os_error().into()); } Ok(StatFs { + #[cfg(not(target_os = "openbsd"))] typ: result.f_type as _, + #[cfg(target_os = "openbsd")] + typ: 0 as _, bsize: result.f_bsize as _, blocks: result.f_blocks as _, bfree: result.f_bfree as _, @@ -186,7 +197,10 @@ where return Err(std::io::Error::last_os_error().into()); } Ok(StatFs { + #[cfg(not(target_os = "openbsd"))] typ: result.f_type as _, + #[cfg(target_os = "openbsd")] + typ: 0 as _, bsize: result.f_bsize as _, blocks: result.f_blocks as _, bfree: result.f_bfree as _, -- cgit v1.2.3 From 51978a76540cac72dd35528dd023c3fc6d5eeca6 Mon Sep 17 00:00:00 2001 From: Volker Schlecht <47375452+VlkrS@users.noreply.github.com> Date: Tue, 29 Oct 2024 15:11:49 +0100 Subject: fix(ext/napi): export dynamic symbols list for {Free,Open}BSD (#26605) The two BSD ports are reusing the Linux code here. --- ext/napi/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/napi/lib.rs b/ext/napi/lib.rs index 0d41bb40f..20f924bdb 100644 --- a/ext/napi/lib.rs +++ b/ext/napi/lib.rs @@ -662,7 +662,11 @@ pub fn print_linker_flags(name: &str) { symbols_path, ); - #[cfg(target_os = "linux")] + #[cfg(any( + target_os = "linux", + target_os = "freebsd", + target_os = "openbsd" + ))] println!( "cargo:rustc-link-arg-bin={name}=-Wl,--export-dynamic-symbol-list={}", symbols_path, -- cgit v1.2.3 From a69224ea5bd02f08108aac867c492754095f2d34 Mon Sep 17 00:00:00 2001 From: Yoshiya Hinosawa Date: Wed, 30 Oct 2024 02:41:16 +0900 Subject: Revert "fix(ext/node): fix dns.lookup result ordering (#26264)" (#26621) This reverts commit d59599fc187c559ee231882773e1c5a2b932fc3d. Closes #26588 --- ext/node/polyfills/http.ts | 6 +++--- ext/node/polyfills/internal/dns/utils.ts | 14 +++++++++++-- ext/node/polyfills/internal_binding/cares_wrap.ts | 13 +++--------- ext/node/polyfills/net.ts | 24 ++++++++++++++++------- 4 files changed, 35 insertions(+), 22 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/http.ts b/ext/node/polyfills/http.ts index e7a860ff1..e8a189f70 100644 --- a/ext/node/polyfills/http.ts +++ b/ext/node/polyfills/http.ts @@ -51,7 +51,6 @@ import { urlToHttpOptions } from "ext:deno_node/internal/url.ts"; import { kEmptyObject } from "ext:deno_node/internal/util.mjs"; import { constants, TCP } from "ext:deno_node/internal_binding/tcp_wrap.ts"; import { notImplemented, warnNotImplemented } from "ext:deno_node/_utils.ts"; -import { isWindows } from "ext:deno_node/_util/os.ts"; import { connResetException, ERR_HTTP_HEADERS_SENT, @@ -1712,8 +1711,9 @@ export class ServerImpl extends EventEmitter { port = options.port | 0; } - // Use 0.0.0.0 for Windows, and [::] for other platforms. - let hostname = options.host ?? (isWindows ? "0.0.0.0" : "[::]"); + // TODO(bnoordhuis) Node prefers [::] when host is omitted, + // we on the other hand default to 0.0.0.0. + let hostname = options.host ?? "0.0.0.0"; if (hostname == "localhost") { hostname = "127.0.0.1"; } diff --git a/ext/node/polyfills/internal/dns/utils.ts b/ext/node/polyfills/internal/dns/utils.ts index 1e0c3d9ed..226fce93d 100644 --- a/ext/node/polyfills/internal/dns/utils.ts +++ b/ext/node/polyfills/internal/dns/utils.ts @@ -416,10 +416,20 @@ export function emitInvalidHostnameWarning(hostname: string) { ); } -let dnsOrder = getOptionValue("--dns-result-order") || "verbatim"; +let dnsOrder = getOptionValue("--dns-result-order") || "ipv4first"; export function getDefaultVerbatim() { - return dnsOrder !== "ipv4first"; + switch (dnsOrder) { + case "verbatim": { + return true; + } + case "ipv4first": { + return false; + } + default: { + return false; + } + } } /** diff --git a/ext/node/polyfills/internal_binding/cares_wrap.ts b/ext/node/polyfills/internal_binding/cares_wrap.ts index cbfea40b2..6feb7faf0 100644 --- a/ext/node/polyfills/internal_binding/cares_wrap.ts +++ b/ext/node/polyfills/internal_binding/cares_wrap.ts @@ -75,18 +75,11 @@ export function getaddrinfo( const recordTypes: ("A" | "AAAA")[] = []; - if (family === 6) { - recordTypes.push("AAAA"); - } else if (family === 4) { + if (family === 0 || family === 4) { recordTypes.push("A"); - } else if (family === 0 && hostname === "localhost") { - // Ipv6 is preferred over Ipv4 for localhost + } + if (family === 0 || family === 6) { recordTypes.push("AAAA"); - recordTypes.push("A"); - } else if (family === 0) { - // Only get Ipv4 addresses for the other hostnames - // This simulates what `getaddrinfo` does when the family is not specified - recordTypes.push("A"); } (async () => { diff --git a/ext/node/polyfills/net.ts b/ext/node/polyfills/net.ts index 35d273be9..48e1d0de8 100644 --- a/ext/node/polyfills/net.ts +++ b/ext/node/polyfills/net.ts @@ -1871,13 +1871,23 @@ function _setupListenHandle( // Try to bind to the unspecified IPv6 address, see if IPv6 is available if (!address && typeof fd !== "number") { - if (isWindows) { - address = DEFAULT_IPV4_ADDR; - addressType = 4; - } else { - address = DEFAULT_IPV6_ADDR; - addressType = 6; - } + // TODO(@bartlomieju): differs from Node which tries to bind to IPv6 first + // when no address is provided. + // + // Forcing IPv4 as a workaround for Deno not aligning with Node on + // implicit binding on Windows. + // + // REF: https://github.com/denoland/deno/issues/10762 + // rval = _createServerHandle(DEFAULT_IPV6_ADDR, port, 6, fd, flags); + + // if (typeof rval === "number") { + // rval = null; + address = DEFAULT_IPV4_ADDR; + addressType = 4; + // } else { + // address = DEFAULT_IPV6_ADDR; + // addressType = 6; + // } } if (rval === null) { -- cgit v1.2.3 From a1473d82c5612676c50af00ded0467dbb29bc0a8 Mon Sep 17 00:00:00 2001 From: denobot <33910674+denobot@users.noreply.github.com> Date: Wed, 30 Oct 2024 08:46:31 -0400 Subject: chore: forward v2.0.4 release commit to main (#26636) This is the release commit being forwarded back to main for 2.0.4 Co-authored-by: bartlomieju --- ext/broadcast_channel/Cargo.toml | 2 +- ext/cache/Cargo.toml | 2 +- ext/canvas/Cargo.toml | 2 +- ext/console/Cargo.toml | 2 +- ext/cron/Cargo.toml | 2 +- ext/crypto/Cargo.toml | 2 +- ext/fetch/Cargo.toml | 2 +- ext/ffi/Cargo.toml | 2 +- ext/fs/Cargo.toml | 2 +- ext/http/Cargo.toml | 2 +- ext/io/Cargo.toml | 2 +- ext/kv/Cargo.toml | 2 +- ext/napi/Cargo.toml | 2 +- ext/napi/sym/Cargo.toml | 2 +- ext/net/Cargo.toml | 2 +- ext/node/Cargo.toml | 2 +- ext/tls/Cargo.toml | 2 +- ext/url/Cargo.toml | 2 +- ext/web/Cargo.toml | 2 +- ext/webgpu/Cargo.toml | 2 +- ext/webidl/Cargo.toml | 2 +- ext/websocket/Cargo.toml | 2 +- ext/webstorage/Cargo.toml | 2 +- 23 files changed, 23 insertions(+), 23 deletions(-) (limited to 'ext') diff --git a/ext/broadcast_channel/Cargo.toml b/ext/broadcast_channel/Cargo.toml index 845544f0c..e6eeeed5e 100644 --- a/ext/broadcast_channel/Cargo.toml +++ b/ext/broadcast_channel/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_broadcast_channel" -version = "0.168.0" +version = "0.169.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cache/Cargo.toml b/ext/cache/Cargo.toml index a07a8f720..7cae8d88a 100644 --- a/ext/cache/Cargo.toml +++ b/ext/cache/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cache" -version = "0.106.0" +version = "0.107.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/canvas/Cargo.toml b/ext/canvas/Cargo.toml index 4c125ea9b..5d2c37199 100644 --- a/ext/canvas/Cargo.toml +++ b/ext/canvas/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_canvas" -version = "0.43.0" +version = "0.44.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/console/Cargo.toml b/ext/console/Cargo.toml index b89f67fec..b3543ed85 100644 --- a/ext/console/Cargo.toml +++ b/ext/console/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_console" -version = "0.174.0" +version = "0.175.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cron/Cargo.toml b/ext/cron/Cargo.toml index 6c5547c35..0d52cb6a9 100644 --- a/ext/cron/Cargo.toml +++ b/ext/cron/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cron" -version = "0.54.0" +version = "0.55.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/crypto/Cargo.toml b/ext/crypto/Cargo.toml index e9761f48f..95981bba3 100644 --- a/ext/crypto/Cargo.toml +++ b/ext/crypto/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_crypto" -version = "0.188.0" +version = "0.189.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml index 316a6eea8..93fc88ae6 100644 --- a/ext/fetch/Cargo.toml +++ b/ext/fetch/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fetch" -version = "0.198.0" +version = "0.199.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/ffi/Cargo.toml b/ext/ffi/Cargo.toml index 405030df2..58144790b 100644 --- a/ext/ffi/Cargo.toml +++ b/ext/ffi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_ffi" -version = "0.161.0" +version = "0.162.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml index 2a1c5bf2c..0ccde7f7e 100644 --- a/ext/fs/Cargo.toml +++ b/ext/fs/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fs" -version = "0.84.0" +version = "0.85.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/http/Cargo.toml b/ext/http/Cargo.toml index 0bae99ef7..a1a4cb20b 100644 --- a/ext/http/Cargo.toml +++ b/ext/http/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_http" -version = "0.172.0" +version = "0.173.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/io/Cargo.toml b/ext/io/Cargo.toml index f7ffe46bc..7e858d298 100644 --- a/ext/io/Cargo.toml +++ b/ext/io/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_io" -version = "0.84.0" +version = "0.85.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/kv/Cargo.toml b/ext/kv/Cargo.toml index a4e35193d..bbd234901 100644 --- a/ext/kv/Cargo.toml +++ b/ext/kv/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_kv" -version = "0.82.0" +version = "0.83.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/napi/Cargo.toml b/ext/napi/Cargo.toml index 622429f7c..c31367118 100644 --- a/ext/napi/Cargo.toml +++ b/ext/napi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_napi" -version = "0.105.0" +version = "0.106.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/napi/sym/Cargo.toml b/ext/napi/sym/Cargo.toml index 9fe2d839a..c23054eb4 100644 --- a/ext/napi/sym/Cargo.toml +++ b/ext/napi/sym/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "napi_sym" -version = "0.104.0" +version = "0.105.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/net/Cargo.toml b/ext/net/Cargo.toml index a458dbe9e..5ffd1b450 100644 --- a/ext/net/Cargo.toml +++ b/ext/net/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_net" -version = "0.166.0" +version = "0.167.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml index 3743e6a85..ed31dbf17 100644 --- a/ext/node/Cargo.toml +++ b/ext/node/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_node" -version = "0.111.0" +version = "0.112.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/tls/Cargo.toml b/ext/tls/Cargo.toml index d163fd7eb..6871faa5b 100644 --- a/ext/tls/Cargo.toml +++ b/ext/tls/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_tls" -version = "0.161.0" +version = "0.162.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/url/Cargo.toml b/ext/url/Cargo.toml index ef7890e2a..24d81abf8 100644 --- a/ext/url/Cargo.toml +++ b/ext/url/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_url" -version = "0.174.0" +version = "0.175.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/web/Cargo.toml b/ext/web/Cargo.toml index 0cb08e19c..a02cff98f 100644 --- a/ext/web/Cargo.toml +++ b/ext/web/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_web" -version = "0.205.0" +version = "0.206.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webgpu/Cargo.toml b/ext/webgpu/Cargo.toml index 84ee4a012..3bb814c25 100644 --- a/ext/webgpu/Cargo.toml +++ b/ext/webgpu/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webgpu" -version = "0.141.0" +version = "0.142.0" authors = ["the Deno authors"] edition.workspace = true license = "MIT" diff --git a/ext/webidl/Cargo.toml b/ext/webidl/Cargo.toml index a403db2fe..1685bb1fd 100644 --- a/ext/webidl/Cargo.toml +++ b/ext/webidl/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webidl" -version = "0.174.0" +version = "0.175.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/websocket/Cargo.toml b/ext/websocket/Cargo.toml index b48b39c99..6a575cb41 100644 --- a/ext/websocket/Cargo.toml +++ b/ext/websocket/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_websocket" -version = "0.179.0" +version = "0.180.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webstorage/Cargo.toml b/ext/webstorage/Cargo.toml index 911e25241..079ab2e4e 100644 --- a/ext/webstorage/Cargo.toml +++ b/ext/webstorage/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webstorage" -version = "0.169.0" +version = "0.170.0" authors.workspace = true edition.workspace = true license.workspace = true -- cgit v1.2.3 From a346071dd2bd822a7c2abcb187406efe7d1d1471 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Wed, 30 Oct 2024 13:13:32 -0700 Subject: fix(ext/node): return `this` from `http.Server.ref/unref()` (#26647) Fixes https://github.com/denoland/deno/issues/26642 --- ext/node/polyfills/http.ts | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'ext') diff --git a/ext/node/polyfills/http.ts b/ext/node/polyfills/http.ts index e8a189f70..9a920adee 100644 --- a/ext/node/polyfills/http.ts +++ b/ext/node/polyfills/http.ts @@ -1802,6 +1802,8 @@ export class ServerImpl extends EventEmitter { this.#server.ref(); } this.#unref = false; + + return this; } unref() { @@ -1809,6 +1811,8 @@ export class ServerImpl extends EventEmitter { this.#server.unref(); } this.#unref = true; + + return this; } close(cb?: (err?: Error) => void): this { -- cgit v1.2.3 From a8846eb70f96445753fe5f8f56e6155cb2d0fac6 Mon Sep 17 00:00:00 2001 From: David Sherret Date: Wed, 30 Oct 2024 16:43:22 -0400 Subject: fix: remove permission check in op_require_node_module_paths (#26645) --- ext/node/ops/require.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'ext') diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs index f4607f4e8..43867053b 100644 --- a/ext/node/ops/require.rs +++ b/ext/node/ops/require.rs @@ -131,9 +131,6 @@ where normalize_path(current_dir.join(from)) }; - let from = ensure_read_permission::

(state, &from) - .map_err(RequireError::Permission)?; - if cfg!(windows) { // return root node_modules when path is 'D:\\'. let from_str = from.to_str().unwrap(); @@ -154,7 +151,7 @@ where } let mut paths = Vec::with_capacity(from.components().count()); - let mut current_path = from.as_ref(); + let mut current_path = from.as_path(); let mut maybe_parent = Some(current_path); while let Some(parent) = maybe_parent { if !parent.ends_with("node_modules") { -- cgit v1.2.3 From 8bfd134da6d730cc1d182ab430b1713901d7a5b5 Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Thu, 31 Oct 2024 10:10:07 +0530 Subject: fix: clamp smi in fast calls by default (#26506) Fixes https://github.com/denoland/deno/issues/26480 Ref https://github.com/denoland/deno_core/commit/d2945fb65bca56ebfa7bb80556a4c8f4330d2315 --- ext/node/ops/crypto/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'ext') diff --git a/ext/node/ops/crypto/mod.rs b/ext/node/ops/crypto/mod.rs index 600d31558..0cf34511b 100644 --- a/ext/node/ops/crypto/mod.rs +++ b/ext/node/ops/crypto/mod.rs @@ -519,11 +519,11 @@ pub fn op_node_dh_compute_secret( } #[op2(fast)] -#[smi] +#[number] pub fn op_node_random_int( - #[smi] min: i32, - #[smi] max: i32, -) -> Result { + #[number] min: i64, + #[number] max: i64, +) -> Result { let mut rng = rand::thread_rng(); // Uniform distribution is required to avoid Modulo Bias // https://en.wikipedia.org/wiki/Fisher–Yates_shuffle#Modulo_bias -- cgit v1.2.3 From 951103fc8de952f32386121d3f0e4803fe267ea4 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Thu, 31 Oct 2024 10:02:17 -0700 Subject: fix(ext/node): convert errors from `fs.readFile/fs.readFileSync` to node format (#26632) Fixes the original issue reported in #26404. storybook runs into other errors after this PR (the new errors will be fixed in other PRs). Some code used by a dependency of storybook does a [string comparison on the error message](https://github.com/chromaui/chromatic-cli/blob/ce30b2be343cb96a0826390b95ea42befb2be547/node-src/lib/getConfiguration.ts#L88-L92) thrown here to check for a file not found error. --- ext/node/polyfills/_fs/_fs_readFile.ts | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/_fs/_fs_readFile.ts b/ext/node/polyfills/_fs/_fs_readFile.ts index 0f05ee167..cf7e0305d 100644 --- a/ext/node/polyfills/_fs/_fs_readFile.ts +++ b/ext/node/polyfills/_fs/_fs_readFile.ts @@ -19,6 +19,7 @@ import { TextEncodings, } from "ext:deno_node/_utils.ts"; import { FsFile } from "ext:deno_fs/30_fs.js"; +import { denoErrorToNodeError } from "ext:deno_node/internal/errors.ts"; function maybeDecode(data: Uint8Array, encoding: TextEncodings): string; function maybeDecode( @@ -87,7 +88,7 @@ export function readFile( } const buffer = maybeDecode(data, encoding); (cb as BinaryCallback)(null, buffer); - }, (err) => cb && cb(err)); + }, (err) => cb && cb(denoErrorToNodeError(err))); } } @@ -117,7 +118,12 @@ export function readFileSync( opt?: FileOptionsArgument, ): string | Buffer { path = path instanceof URL ? pathFromURL(path) : path; - const data = Deno.readFileSync(path); + let data; + try { + data = Deno.readFileSync(path); + } catch (err) { + throw denoErrorToNodeError(err); + } const encoding = getEncoding(opt); if (encoding && encoding !== "binary") { const text = maybeDecode(data, encoding); -- cgit v1.2.3 From 6d44952d4daaaab8ed9ec82212d2256c058eb05d Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Thu, 31 Oct 2024 10:02:31 -0700 Subject: fix(ext/node): resolve exports even if parent module filename isn't present (#26553) Fixes https://github.com/denoland/deno/issues/26505 I'm not exactly sure how this case comes about (I tried to write tests for it but couldn't manage to reproduce it), but what happens is the parent filename ends up null, and we bail out of resolving the specifier in package exports. I've checked, and in node the parent filename is also null (so that's not a bug on our part), but node continues to resolve even in that case. So this PR should match node's behavior more closely than we currently do. --- ext/node/ops/require.rs | 8 ++++++-- ext/node/polyfills/01_require.js | 6 +----- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'ext') diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs index 43867053b..7524fb43c 100644 --- a/ext/node/ops/require.rs +++ b/ext/node/ops/require.rs @@ -529,12 +529,16 @@ where return Ok(None); }; - let referrer = Url::from_file_path(parent_path).unwrap(); + let referrer = if parent_path.is_empty() { + None + } else { + Some(Url::from_file_path(parent_path).unwrap()) + }; let r = node_resolver.package_exports_resolve( &pkg.path, &format!(".{expansion}"), exports, - Some(&referrer), + referrer.as_ref(), NodeModuleKind::Cjs, REQUIRE_CONDITIONS, NodeResolutionMode::Execution, diff --git a/ext/node/polyfills/01_require.js b/ext/node/polyfills/01_require.js index 5b0980c31..7935903a8 100644 --- a/ext/node/polyfills/01_require.js +++ b/ext/node/polyfills/01_require.js @@ -523,17 +523,13 @@ function resolveExports( return; } - if (!parentPath) { - return false; - } - return op_require_resolve_exports( usesLocalNodeModulesDir, modulesPath, request, name, expansion, - parentPath, + parentPath ?? "", ) ?? false; } -- cgit v1.2.3 From 6c6bbeb97495e8c3e8eac7bea27bf836f02b575f Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Thu, 31 Oct 2024 22:18:33 -0700 Subject: fix(node): Implement `os.userInfo` properly, add missing `toPrimitive` (#24702) Fixes the implementation of `os.userInfo`, and adds a missing `toPrimitive` for `tmpdir`. This allows us to enable the corresponding node_compat test. --- ext/node/lib.rs | 2 +- ext/node/ops/os/mod.rs | 156 ++++++++++++++++++++++++++++++++-- ext/node/polyfills/internal/errors.ts | 13 --- ext/node/polyfills/os.ts | 53 +++++------- 4 files changed, 175 insertions(+), 49 deletions(-) (limited to 'ext') diff --git a/ext/node/lib.rs b/ext/node/lib.rs index 32624f38b..b34bea815 100644 --- a/ext/node/lib.rs +++ b/ext/node/lib.rs @@ -348,7 +348,7 @@ deno_core::extension!(deno_node, ops::http2::op_http2_send_response, ops::os::op_node_os_get_priority

, ops::os::op_node_os_set_priority

, - ops::os::op_node_os_username

, + ops::os::op_node_os_user_info

, ops::os::op_geteuid

, ops::os::op_getegid

, ops::os::op_cpus

, diff --git a/ext/node/ops/os/mod.rs b/ext/node/ops/os/mod.rs index b4c9eaa8c..ea7e6b99f 100644 --- a/ext/node/ops/os/mod.rs +++ b/ext/node/ops/os/mod.rs @@ -1,5 +1,7 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use std::mem::MaybeUninit; + use crate::NodePermissions; use deno_core::op2; use deno_core::OpState; @@ -15,6 +17,8 @@ pub enum OsError { Permission(deno_core::error::AnyError), #[error("Failed to get cpu info")] FailedToGetCpuInfo, + #[error("Failed to get user info")] + FailedToGetUserInfo(#[source] std::io::Error), } #[op2(fast)] @@ -54,20 +58,162 @@ where priority::set_priority(pid, priority).map_err(OsError::Priority) } +#[derive(serde::Serialize)] +pub struct UserInfo { + username: String, + homedir: String, + shell: Option, +} + +#[cfg(unix)] +fn get_user_info(uid: u32) -> Result { + use std::ffi::CStr; + let mut pw: MaybeUninit = MaybeUninit::uninit(); + let mut result: *mut libc::passwd = std::ptr::null_mut(); + // SAFETY: libc call, no invariants + let max_buf_size = unsafe { libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) }; + let buf_size = if max_buf_size < 0 { + // from the man page + 16_384 + } else { + max_buf_size as usize + }; + let mut buf = { + let mut b = Vec::>::with_capacity(buf_size); + // SAFETY: MaybeUninit has no initialization invariants, and len == cap + unsafe { + b.set_len(buf_size); + } + b + }; + // SAFETY: libc call, args are correct + let s = unsafe { + libc::getpwuid_r( + uid, + pw.as_mut_ptr(), + buf.as_mut_ptr().cast(), + buf_size, + std::ptr::addr_of_mut!(result), + ) + }; + if result.is_null() { + if s != 0 { + return Err( + OsError::FailedToGetUserInfo(std::io::Error::last_os_error()), + ); + } else { + return Err(OsError::FailedToGetUserInfo(std::io::Error::from( + std::io::ErrorKind::NotFound, + ))); + } + } + // SAFETY: pw was initialized by the call to `getpwuid_r` above + let pw = unsafe { pw.assume_init() }; + // SAFETY: initialized above, pw alive until end of function, nul terminated + let username = unsafe { CStr::from_ptr(pw.pw_name) }; + // SAFETY: initialized above, pw alive until end of function, nul terminated + let homedir = unsafe { CStr::from_ptr(pw.pw_dir) }; + // SAFETY: initialized above, pw alive until end of function, nul terminated + let shell = unsafe { CStr::from_ptr(pw.pw_shell) }; + Ok(UserInfo { + username: username.to_string_lossy().into_owned(), + homedir: homedir.to_string_lossy().into_owned(), + shell: Some(shell.to_string_lossy().into_owned()), + }) +} + +#[cfg(windows)] +fn get_user_info(_uid: u32) -> Result { + use std::ffi::OsString; + use std::os::windows::ffi::OsStringExt; + + use windows_sys::Win32::Foundation::CloseHandle; + use windows_sys::Win32::Foundation::GetLastError; + use windows_sys::Win32::Foundation::ERROR_INSUFFICIENT_BUFFER; + use windows_sys::Win32::Foundation::HANDLE; + use windows_sys::Win32::System::Threading::GetCurrentProcess; + use windows_sys::Win32::System::Threading::OpenProcessToken; + use windows_sys::Win32::UI::Shell::GetUserProfileDirectoryW; + struct Handle(HANDLE); + impl Drop for Handle { + fn drop(&mut self) { + // SAFETY: win32 call + unsafe { + CloseHandle(self.0); + } + } + } + let mut token: MaybeUninit = MaybeUninit::uninit(); + + // Get a handle to the current process + // SAFETY: win32 call + unsafe { + if OpenProcessToken( + GetCurrentProcess(), + windows_sys::Win32::Security::TOKEN_READ, + token.as_mut_ptr(), + ) == 0 + { + return Err( + OsError::FailedToGetUserInfo(std::io::Error::last_os_error()), + ); + } + } + + // SAFETY: initialized by call above + let token = Handle(unsafe { token.assume_init() }); + + let mut bufsize = 0; + // get the size for the homedir buf (it'll end up in `bufsize`) + // SAFETY: win32 call + unsafe { + GetUserProfileDirectoryW(token.0, std::ptr::null_mut(), &mut bufsize); + let err = GetLastError(); + if err != ERROR_INSUFFICIENT_BUFFER { + return Err(OsError::FailedToGetUserInfo( + std::io::Error::from_raw_os_error(err as i32), + )); + } + } + let mut path = vec![0; bufsize as usize]; + // Actually get the homedir + // SAFETY: path is `bufsize` elements + unsafe { + if GetUserProfileDirectoryW(token.0, path.as_mut_ptr(), &mut bufsize) == 0 { + return Err( + OsError::FailedToGetUserInfo(std::io::Error::last_os_error()), + ); + } + } + // remove trailing nul + path.pop(); + let homedir_wide = OsString::from_wide(&path); + let homedir = homedir_wide.to_string_lossy().into_owned(); + + Ok(UserInfo { + username: deno_whoami::username(), + homedir, + shell: None, + }) +} + #[op2] -#[string] -pub fn op_node_os_username

( +#[serde] +pub fn op_node_os_user_info

( state: &mut OpState, -) -> Result + #[smi] uid: u32, +) -> Result where P: NodePermissions + 'static, { { let permissions = state.borrow_mut::

(); - permissions.check_sys("username", "node:os.userInfo()")?; + permissions + .check_sys("userInfo", "node:os.userInfo()") + .map_err(OsError::Permission)?; } - Ok(deno_whoami::username()) + get_user_info(uid) } #[op2(fast)] diff --git a/ext/node/polyfills/internal/errors.ts b/ext/node/polyfills/internal/errors.ts index 51bd7a025..5a3d4437a 100644 --- a/ext/node/polyfills/internal/errors.ts +++ b/ext/node/polyfills/internal/errors.ts @@ -2558,19 +2558,6 @@ export class ERR_FS_RMDIR_ENOTDIR extends NodeSystemError { } } -export class ERR_OS_NO_HOMEDIR extends NodeSystemError { - constructor() { - const code = isWindows ? "ENOENT" : "ENOTDIR"; - const ctx: NodeSystemErrorCtx = { - message: "not a directory", - syscall: "home", - code, - errno: isWindows ? osConstants.errno.ENOENT : osConstants.errno.ENOTDIR, - }; - super(code, ctx, "Path is not a directory"); - } -} - export class ERR_HTTP_SOCKET_ASSIGNED extends NodeError { constructor() { super( diff --git a/ext/node/polyfills/os.ts b/ext/node/polyfills/os.ts index e47e8679e..edc89ed2c 100644 --- a/ext/node/polyfills/os.ts +++ b/ext/node/polyfills/os.ts @@ -28,16 +28,17 @@ import { op_homedir, op_node_os_get_priority, op_node_os_set_priority, - op_node_os_username, + op_node_os_user_info, } from "ext:core/ops"; import { validateIntegerRange } from "ext:deno_node/_utils.ts"; import process from "node:process"; import { isWindows } from "ext:deno_node/_util/os.ts"; -import { ERR_OS_NO_HOMEDIR } from "ext:deno_node/internal/errors.ts"; import { os } from "ext:deno_node/internal_binding/constants.ts"; import { osUptime } from "ext:runtime/30_os.js"; import { Buffer } from "ext:deno_node/internal/buffer.mjs"; +import { primordials } from "ext:core/mod.js"; +const { StringPrototypeEndsWith, StringPrototypeSlice } = primordials; export const constants = os; @@ -136,6 +137,8 @@ export function arch(): string { (uptime as any)[Symbol.toPrimitive] = (): number => uptime(); // deno-lint-ignore no-explicit-any (machine as any)[Symbol.toPrimitive] = (): string => machine(); +// deno-lint-ignore no-explicit-any +(tmpdir as any)[Symbol.toPrimitive] = (): string | null => tmpdir(); export function cpus(): CPUCoreInfo[] { return op_cpus(); @@ -268,26 +271,27 @@ export function setPriority(pid: number, priority?: number) { export function tmpdir(): string | null { /* This follows the node js implementation, but has a few differences: - * On windows, if none of the environment variables are defined, - we return null. - * On unix we use a plain Deno.env.get, instead of safeGetenv, + * We use a plain Deno.env.get, instead of safeGetenv, which special cases setuid binaries. - * Node removes a single trailing / or \, we remove all. */ if (isWindows) { - const temp = Deno.env.get("TEMP") || Deno.env.get("TMP"); - if (temp) { - return temp.replace(/(? 1 && StringPrototypeEndsWith(temp, "\\") && + !StringPrototypeEndsWith(temp, ":\\") + ) { + temp = StringPrototypeSlice(temp, 0, -1); } - return null; + + return temp; } else { // !isWindows - const temp = Deno.env.get("TMPDIR") || Deno.env.get("TMP") || + let temp = Deno.env.get("TMPDIR") || Deno.env.get("TMP") || Deno.env.get("TEMP") || "/tmp"; - return temp.replace(/(? 1 && StringPrototypeEndsWith(temp, "/")) { + temp = StringPrototypeSlice(temp, 0, -1); + } + return temp; } } @@ -320,7 +324,6 @@ export function uptime(): number { return osUptime(); } -/** Not yet implemented */ export function userInfo( options: UserInfoOptions = { encoding: "utf-8" }, ): UserInfo { @@ -331,20 +334,10 @@ export function userInfo( uid = -1; gid = -1; } - - // TODO(@crowlKats): figure out how to do this correctly: - // The value of homedir returned by os.userInfo() is provided by the operating system. - // This differs from the result of os.homedir(), which queries environment - // variables for the home directory before falling back to the operating system response. - let _homedir = homedir(); - if (!_homedir) { - throw new ERR_OS_NO_HOMEDIR(); - } - let shell = isWindows ? null : (Deno.env.get("SHELL") || null); - let username = op_node_os_username(); + let { username, homedir, shell } = op_node_os_user_info(uid); if (options?.encoding === "buffer") { - _homedir = _homedir ? Buffer.from(_homedir) : _homedir; + homedir = homedir ? Buffer.from(homedir) : homedir; shell = shell ? Buffer.from(shell) : shell; username = Buffer.from(username); } @@ -352,7 +345,7 @@ export function userInfo( return { uid, gid, - homedir: _homedir, + homedir, shell, username, }; -- cgit v1.2.3 From 4774eab64d5176e997b6431f03f075782321b3d9 Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Fri, 1 Nov 2024 16:13:02 +0530 Subject: chore: upgrade to rust 1.82 and LLVM 19 (#26615) Upgrade to rust 1.82 and LLVM 19 . Removes one webusb test because `requestAdapter` not working on new ubuntu 24 runners --- ext/node/ops/crypto/sign.rs | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) (limited to 'ext') diff --git a/ext/node/ops/crypto/sign.rs b/ext/node/ops/crypto/sign.rs index b7779a5d8..31744f86b 100644 --- a/ext/node/ops/crypto/sign.rs +++ b/ext/node/ops/crypto/sign.rs @@ -79,18 +79,15 @@ impl KeyObjectHandle { AsymmetricPrivateKey::RsaPss(key) => { let mut hash_algorithm = None; let mut salt_length = None; - match &key.details { - Some(details) => { - if details.hash_algorithm != details.mf1_hash_algorithm { - return Err(type_error( + if let Some(details) = &key.details { + if details.hash_algorithm != details.mf1_hash_algorithm { + return Err(type_error( "rsa-pss with different mf1 hash algorithm and hash algorithm is not supported", )); - } - hash_algorithm = Some(details.hash_algorithm); - salt_length = Some(details.salt_length as usize); } - None => {} - }; + hash_algorithm = Some(details.hash_algorithm); + salt_length = Some(details.salt_length as usize); + } if let Some(s) = pss_salt_length { salt_length = Some(s as usize); } @@ -215,18 +212,15 @@ impl KeyObjectHandle { AsymmetricPublicKey::RsaPss(key) => { let mut hash_algorithm = None; let mut salt_length = None; - match &key.details { - Some(details) => { - if details.hash_algorithm != details.mf1_hash_algorithm { - return Err(type_error( + if let Some(details) = &key.details { + if details.hash_algorithm != details.mf1_hash_algorithm { + return Err(type_error( "rsa-pss with different mf1 hash algorithm and hash algorithm is not supported", )); - } - hash_algorithm = Some(details.hash_algorithm); - salt_length = Some(details.salt_length as usize); } - None => {} - }; + hash_algorithm = Some(details.hash_algorithm); + salt_length = Some(details.salt_length as usize); + } if let Some(s) = pss_salt_length { salt_length = Some(s as usize); } -- cgit v1.2.3 From 826e42a5b5880c974ae019a7a21aade6a718062c Mon Sep 17 00:00:00 2001 From: David Sherret Date: Fri, 1 Nov 2024 12:27:00 -0400 Subject: fix: improved support for cjs and cts modules (#26558) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * cts support * better cjs/cts type checking * deno compile cjs/cts support * More efficient detect cjs (going towards stabilization) * Determination of whether .js, .ts, .jsx, or .tsx is cjs or esm is only done after loading * Support `import x = require(...);` Co-authored-by: Bartek Iwańczuk --- ext/node/lib.rs | 47 ++++++++++-------------------- ext/node/ops/require.rs | 62 +++++++++++++++++++++++----------------- ext/node/ops/worker_threads.rs | 40 ++++---------------------- ext/node/polyfills/01_require.js | 31 ++++++++++++++++++-- 4 files changed, 83 insertions(+), 97 deletions(-) (limited to 'ext') diff --git a/ext/node/lib.rs b/ext/node/lib.rs index b34bea815..db6d08e11 100644 --- a/ext/node/lib.rs +++ b/ext/node/lib.rs @@ -9,15 +9,11 @@ use std::path::Path; use std::path::PathBuf; use deno_core::error::AnyError; -use deno_core::located_script_name; use deno_core::op2; use deno_core::url::Url; #[allow(unused_imports)] use deno_core::v8; use deno_core::v8::ExternalReference; -use deno_core::JsRuntime; -use deno_fs::sync::MaybeSend; -use deno_fs::sync::MaybeSync; use node_resolver::NpmResolverRc; use once_cell::sync::Lazy; @@ -125,16 +121,17 @@ impl NodePermissions for deno_permissions::PermissionsContainer { } #[allow(clippy::disallowed_types)] -pub type NodeRequireResolverRc = - deno_fs::sync::MaybeArc; +pub type NodeRequireLoaderRc = std::rc::Rc; -pub trait NodeRequireResolver: std::fmt::Debug + MaybeSend + MaybeSync { +pub trait NodeRequireLoader { #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn ensure_read_permission<'a>( &self, permissions: &mut dyn NodePermissions, path: &'a Path, ) -> Result, AnyError>; + + fn load_text_file_lossy(&self, path: &Path) -> Result; } pub static NODE_ENV_VAR_ALLOWLIST: Lazy> = Lazy::new(|| { @@ -152,10 +149,12 @@ fn op_node_build_os() -> String { env!("TARGET").split('-').nth(2).unwrap().to_string() } +#[derive(Clone)] pub struct NodeExtInitServices { - pub node_require_resolver: NodeRequireResolverRc, + pub node_require_loader: NodeRequireLoaderRc, pub node_resolver: NodeResolverRc, pub npm_resolver: NpmResolverRc, + pub pkg_json_resolver: PackageJsonResolverRc, } deno_core::extension!(deno_node, @@ -639,9 +638,10 @@ deno_core::extension!(deno_node, state.put(options.fs.clone()); if let Some(init) = &options.maybe_init { - state.put(init.node_require_resolver.clone()); + state.put(init.node_require_loader.clone()); state.put(init.node_resolver.clone()); state.put(init.npm_resolver.clone()); + state.put(init.pkg_json_resolver.clone()); } }, global_template_middleware = global_template_middleware, @@ -761,33 +761,16 @@ deno_core::extension!(deno_node, }, ); -pub fn load_cjs_module( - js_runtime: &mut JsRuntime, - module: &str, - main: bool, - inspect_brk: bool, -) -> Result<(), AnyError> { - fn escape_for_single_quote_string(text: &str) -> String { - text.replace('\\', r"\\").replace('\'', r"\'") - } - - let source_code = format!( - r#"(function loadCjsModule(moduleName, isMain, inspectBrk) {{ - Deno[Deno.internal].node.loadCjsModule(moduleName, isMain, inspectBrk); - }})('{module}', {main}, {inspect_brk});"#, - main = main, - module = escape_for_single_quote_string(module), - inspect_brk = inspect_brk, - ); - - js_runtime.execute_script(located_script_name!(), source_code)?; - Ok(()) -} - pub type NodeResolver = node_resolver::NodeResolver; #[allow(clippy::disallowed_types)] pub type NodeResolverRc = deno_fs::sync::MaybeArc>; +pub type PackageJsonResolver = + node_resolver::PackageJsonResolver; +#[allow(clippy::disallowed_types)] +pub type PackageJsonResolverRc = deno_fs::sync::MaybeArc< + node_resolver::PackageJsonResolver, +>; #[derive(Debug)] pub struct DenoFsNodeResolverEnv { diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs index 7524fb43c..30db8b629 100644 --- a/ext/node/ops/require.rs +++ b/ext/node/ops/require.rs @@ -9,6 +9,7 @@ use deno_core::OpState; use deno_fs::FileSystemRc; use deno_package_json::PackageJsonRc; use deno_path_util::normalize_path; +use deno_path_util::url_to_file_path; use node_resolver::NodeModuleKind; use node_resolver::NodeResolutionMode; use node_resolver::REQUIRE_CONDITIONS; @@ -19,9 +20,10 @@ use std::path::PathBuf; use std::rc::Rc; use crate::NodePermissions; -use crate::NodeRequireResolverRc; +use crate::NodeRequireLoaderRc; use crate::NodeResolverRc; use crate::NpmResolverRc; +use crate::PackageJsonResolverRc; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn ensure_read_permission<'a, P>( @@ -31,9 +33,9 @@ fn ensure_read_permission<'a, P>( where P: NodePermissions + 'static, { - let resolver = state.borrow::().clone(); + let loader = state.borrow::().clone(); let permissions = state.borrow_mut::

(); - resolver.ensure_read_permission(permissions, file_path) + loader.ensure_read_permission(permissions, file_path) } #[derive(Debug, thiserror::Error)] @@ -54,10 +56,14 @@ pub enum RequireError { PackageImportsResolve( #[from] node_resolver::errors::PackageImportsResolveError, ), - #[error("failed to convert '{0}' to file path")] - FilePathConversion(Url), + #[error(transparent)] + FilePathConversion(#[from] deno_path_util::UrlToFilePathError), + #[error(transparent)] + UrlConversion(#[from] deno_path_util::PathToUrlError), #[error(transparent)] Fs(#[from] deno_io::fs::FsError), + #[error(transparent)] + ReadModule(deno_core::error::AnyError), #[error("Unable to get CWD: {0}")] UnableToGetCwd(deno_io::fs::FsError), } @@ -229,8 +235,11 @@ pub fn op_require_is_deno_dir_package( state: &mut OpState, #[string] path: String, ) -> bool { - let resolver = state.borrow::(); - resolver.in_npm_package_at_file_path(&PathBuf::from(path)) + let resolver = state.borrow::(); + match deno_path_util::url_from_file_path(&PathBuf::from(path)) { + Ok(specifier) => resolver.in_npm_package(&specifier), + Err(_) => false, + } } #[op2] @@ -411,8 +420,8 @@ where return Ok(None); } - let node_resolver = state.borrow::(); - let pkg = node_resolver + let pkg_json_resolver = state.borrow::(); + let pkg = pkg_json_resolver .get_closest_package_json_from_path(&PathBuf::from(parent_path.unwrap())) .ok() .flatten(); @@ -441,6 +450,7 @@ where let referrer = deno_core::url::Url::from_file_path(&pkg.path).unwrap(); if let Some(exports) = &pkg.exports { + let node_resolver = state.borrow::(); let r = node_resolver.package_exports_resolve( &pkg.path, &expansion, @@ -470,10 +480,13 @@ where P: NodePermissions + 'static, { let file_path = PathBuf::from(file_path); + // todo(dsherret): there's multiple borrows to NodeRequireLoaderRc here let file_path = ensure_read_permission::

(state, &file_path) .map_err(RequireError::Permission)?; - let fs = state.borrow::(); - Ok(fs.read_text_file_lossy_sync(&file_path, None)?) + let loader = state.borrow::(); + loader + .load_text_file_lossy(&file_path) + .map_err(RequireError::ReadModule) } #[op2] @@ -503,11 +516,12 @@ where P: NodePermissions + 'static, { let fs = state.borrow::(); - let npm_resolver = state.borrow::(); let node_resolver = state.borrow::(); + let pkg_json_resolver = state.borrow::(); let modules_path = PathBuf::from(&modules_path_str); - let pkg_path = if npm_resolver.in_npm_package_at_file_path(&modules_path) + let modules_specifier = deno_path_util::url_from_file_path(&modules_path)?; + let pkg_path = if node_resolver.in_npm_package(&modules_specifier) && !uses_local_node_modules_dir { modules_path @@ -521,7 +535,7 @@ where } }; let Some(pkg) = - node_resolver.load_package_json(&pkg_path.join("package.json"))? + pkg_json_resolver.load_package_json(&pkg_path.join("package.json"))? else { return Ok(None); }; @@ -561,8 +575,8 @@ where { let filename = PathBuf::from(filename); // permissions: allow reading the closest package.json files - let node_resolver = state.borrow::().clone(); - node_resolver.get_closest_package_json_from_path(&filename) + let pkg_json_resolver = state.borrow::(); + pkg_json_resolver.get_closest_package_json_from_path(&filename) } #[op2] @@ -574,13 +588,13 @@ pub fn op_require_read_package_scope

( where P: NodePermissions + 'static, { - let node_resolver = state.borrow::().clone(); + let pkg_json_resolver = state.borrow::(); let package_json_path = PathBuf::from(package_json_path); if package_json_path.file_name() != Some("package.json".as_ref()) { // permissions: do not allow reading a non-package.json file return None; } - node_resolver + pkg_json_resolver .load_package_json(&package_json_path) .ok() .flatten() @@ -599,14 +613,15 @@ where let referrer_path = PathBuf::from(&referrer_filename); let referrer_path = ensure_read_permission::

(state, &referrer_path) .map_err(RequireError::Permission)?; - let node_resolver = state.borrow::(); + let pkg_json_resolver = state.borrow::(); let Some(pkg) = - node_resolver.get_closest_package_json_from_path(&referrer_path)? + pkg_json_resolver.get_closest_package_json_from_path(&referrer_path)? else { return Ok(None); }; if pkg.imports.is_some() { + let node_resolver = state.borrow::(); let referrer_url = Url::from_file_path(&referrer_filename).unwrap(); let url = node_resolver.package_imports_resolve( &request, @@ -637,13 +652,6 @@ fn url_to_file_path_string(url: &Url) -> Result { Ok(file_path.to_string_lossy().into_owned()) } -fn url_to_file_path(url: &Url) -> Result { - match url.to_file_path() { - Ok(file_path) => Ok(file_path), - Err(()) => Err(RequireError::FilePathConversion(url.clone())), - } -} - #[op2(fast)] pub fn op_require_can_parse_as_esm( scope: &mut v8::HandleScope, diff --git a/ext/node/ops/worker_threads.rs b/ext/node/ops/worker_threads.rs index e33cf9157..d2e575882 100644 --- a/ext/node/ops/worker_threads.rs +++ b/ext/node/ops/worker_threads.rs @@ -4,14 +4,12 @@ use deno_core::op2; use deno_core::url::Url; use deno_core::OpState; use deno_fs::FileSystemRc; -use node_resolver::NodeResolution; use std::borrow::Cow; use std::path::Path; use std::path::PathBuf; use crate::NodePermissions; -use crate::NodeRequireResolverRc; -use crate::NodeResolverRc; +use crate::NodeRequireLoaderRc; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn ensure_read_permission<'a, P>( @@ -21,9 +19,9 @@ fn ensure_read_permission<'a, P>( where P: NodePermissions + 'static, { - let resolver = state.borrow::().clone(); + let loader = state.borrow::().clone(); let permissions = state.borrow_mut::

(); - resolver.ensure_read_permission(permissions, file_path) + loader.ensure_read_permission(permissions, file_path) } #[derive(Debug, thiserror::Error)] @@ -42,14 +40,11 @@ pub enum WorkerThreadsFilenameError { UrlToPath, #[error("File not found [{0:?}]")] FileNotFound(PathBuf), - #[error("Neither ESM nor CJS")] - NeitherEsmNorCjs, - #[error("{0}")] - UrlToNodeResolution(node_resolver::errors::UrlToNodeResolutionError), #[error(transparent)] Fs(#[from] deno_io::fs::FsError), } +// todo(dsherret): we should remove this and do all this work inside op_create_worker #[op2] #[string] pub fn op_worker_threads_filename

( @@ -88,30 +83,5 @@ where url_path.to_path_buf(), )); } - let node_resolver = state.borrow::(); - match node_resolver - .url_to_node_resolution(url) - .map_err(WorkerThreadsFilenameError::UrlToNodeResolution)? - { - NodeResolution::Esm(u) => Ok(u.to_string()), - NodeResolution::CommonJs(u) => wrap_cjs(u), - NodeResolution::BuiltIn(_) => { - Err(WorkerThreadsFilenameError::NeitherEsmNorCjs) - } - } -} - -/// -/// Wrap a CJS file-URL and the required setup in a stringified `data:`-URL -/// -fn wrap_cjs(url: Url) -> Result { - let path = url - .to_file_path() - .map_err(|_| WorkerThreadsFilenameError::UrlToPath)?; - let filename = path.file_name().unwrap().to_string_lossy(); - Ok(format!( - "data:text/javascript,import {{ createRequire }} from \"node:module\";\ - const require = createRequire(\"{}\"); require(\"./{}\");", - url, filename, - )) + Ok(url.to_string()) } diff --git a/ext/node/polyfills/01_require.js b/ext/node/polyfills/01_require.js index 7935903a8..296d819aa 100644 --- a/ext/node/polyfills/01_require.js +++ b/ext/node/polyfills/01_require.js @@ -1071,13 +1071,35 @@ Module._extensions[".js"] = function (module, filename) { } else if (pkg?.type === "commonjs") { format = "commonjs"; } - } else if (StringPrototypeEndsWith(filename, ".cjs")) { - format = "commonjs"; } module._compile(content, filename, format); }; +Module._extensions[".ts"] = + Module._extensions[".jsx"] = + Module._extensions[".tsx"] = + function (module, filename) { + const content = op_require_read_file(filename); + + let format; + const pkg = op_require_read_closest_package_json(filename); + if (pkg?.type === "module") { + format = "module"; + } else if (pkg?.type === "commonjs") { + format = "commonjs"; + } + + module._compile(content, filename, format); + }; + +Module._extensions[".cjs"] = + Module._extensions[".cts"] = + function (module, filename) { + const content = op_require_read_file(filename); + module._compile(content, filename, "commonjs"); + }; + function loadESMFromCJS(module, filename, code) { const namespace = op_import_sync( url.pathToFileURL(filename).toString(), @@ -1087,7 +1109,10 @@ function loadESMFromCJS(module, filename, code) { module.exports = namespace; } -Module._extensions[".mjs"] = function (module, filename) { +Module._extensions[".mjs"] = Module._extensions[".mts"] = function ( + module, + filename, +) { loadESMFromCJS(module, filename); }; -- cgit v1.2.3 From fb1d33a7111e45e9b414cfe922a5db5ee4daf3ea Mon Sep 17 00:00:00 2001 From: Kenta Moriuchi Date: Tue, 5 Nov 2024 02:17:11 +0900 Subject: chore: update dlint to v0.68.0 for internal (#26711) --- ext/node/polyfills/_fs/_fs_common.ts | 1 + ext/node/polyfills/_fs/_fs_open.ts | 4 ++-- ext/node/polyfills/_fs/_fs_readv.ts | 1 + ext/node/polyfills/internal/crypto/keygen.ts | 1 + ext/node/polyfills/internal/crypto/random.ts | 1 + ext/node/polyfills/internal_binding/http_parser.ts | 1 + ext/node/polyfills/vm.js | 1 + 7 files changed, 8 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/_fs/_fs_common.ts b/ext/node/polyfills/_fs/_fs_common.ts index ac0bf5a55..a29548bb3 100644 --- a/ext/node/polyfills/_fs/_fs_common.ts +++ b/ext/node/polyfills/_fs/_fs_common.ts @@ -20,6 +20,7 @@ import { notImplemented, TextEncodings, } from "ext:deno_node/_utils.ts"; +import { type Buffer } from "node:buffer"; export type CallbackWithError = (err: ErrnoException | null) => void; diff --git a/ext/node/polyfills/_fs/_fs_open.ts b/ext/node/polyfills/_fs/_fs_open.ts index 8bd989790..31ca4bb61 100644 --- a/ext/node/polyfills/_fs/_fs_open.ts +++ b/ext/node/polyfills/_fs/_fs_open.ts @@ -147,8 +147,8 @@ export function open( export function openPromise( path: string | Buffer | URL, - flags?: openFlags = "r", - mode? = 0o666, + flags: openFlags = "r", + mode = 0o666, ): Promise { return new Promise((resolve, reject) => { open(path, flags, mode, (err, fd) => { diff --git a/ext/node/polyfills/_fs/_fs_readv.ts b/ext/node/polyfills/_fs/_fs_readv.ts index 384f5e319..2259f029a 100644 --- a/ext/node/polyfills/_fs/_fs_readv.ts +++ b/ext/node/polyfills/_fs/_fs_readv.ts @@ -15,6 +15,7 @@ import { maybeCallback } from "ext:deno_node/_fs/_fs_common.ts"; import { validateInteger } from "ext:deno_node/internal/validators.mjs"; import * as io from "ext:deno_io/12_io.js"; import { op_fs_seek_async, op_fs_seek_sync } from "ext:core/ops"; +import process from "node:process"; type Callback = ( err: ErrnoException | null, diff --git a/ext/node/polyfills/internal/crypto/keygen.ts b/ext/node/polyfills/internal/crypto/keygen.ts index a40c76c0d..44bfd8327 100644 --- a/ext/node/polyfills/internal/crypto/keygen.ts +++ b/ext/node/polyfills/internal/crypto/keygen.ts @@ -29,6 +29,7 @@ import { } from "ext:deno_node/internal/validators.mjs"; import { Buffer } from "node:buffer"; import { KeyFormat, KeyType } from "ext:deno_node/internal/crypto/types.ts"; +import process from "node:process"; import { op_node_generate_dh_group_key, diff --git a/ext/node/polyfills/internal/crypto/random.ts b/ext/node/polyfills/internal/crypto/random.ts index 4219414dc..a41b86819 100644 --- a/ext/node/polyfills/internal/crypto/random.ts +++ b/ext/node/polyfills/internal/crypto/random.ts @@ -38,6 +38,7 @@ import { ERR_INVALID_ARG_TYPE, ERR_OUT_OF_RANGE, } from "ext:deno_node/internal/errors.ts"; +import { Buffer } from "node:buffer"; export { default as randomBytes } from "ext:deno_node/internal/crypto/_randomBytes.ts"; export { diff --git a/ext/node/polyfills/internal_binding/http_parser.ts b/ext/node/polyfills/internal_binding/http_parser.ts index ca4f896e2..bad10d985 100644 --- a/ext/node/polyfills/internal_binding/http_parser.ts +++ b/ext/node/polyfills/internal_binding/http_parser.ts @@ -126,6 +126,7 @@ ObjectSetPrototypeOf(HTTPParser.prototype, AsyncWrap.prototype); function defineProps(obj: object, props: Record) { for (const entry of new SafeArrayIterator(ObjectEntries(props))) { ObjectDefineProperty(obj, entry[0], { + __proto__: null, value: entry[1], enumerable: true, writable: true, diff --git a/ext/node/polyfills/vm.js b/ext/node/polyfills/vm.js index 183ddad2f..b64c847c5 100644 --- a/ext/node/polyfills/vm.js +++ b/ext/node/polyfills/vm.js @@ -182,6 +182,7 @@ function getContextOptions(options) { let defaultContextNameIndex = 1; export function createContext( + // deno-lint-ignore prefer-primordials contextObject = {}, options = { __proto__: null }, ) { -- cgit v1.2.3 From fe9f0ee5934871175758857899fe64e56c397fd5 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Mon, 4 Nov 2024 09:17:21 -0800 Subject: refactor(runtime/permissions): use concrete error types (#26464) --- ext/fetch/lib.rs | 23 ++---- ext/ffi/call.rs | 14 ++-- ext/ffi/callback.rs | 6 +- ext/ffi/dlfcn.rs | 6 +- ext/ffi/lib.rs | 11 ++- ext/ffi/repr.rs | 86 +++++--------------- ext/fs/lib.rs | 50 +++++++----- ext/fs/ops.rs | 217 ++++++++++++++----------------------------------- ext/kv/remote.rs | 13 +-- ext/kv/sqlite.rs | 12 +-- ext/napi/lib.rs | 15 ++-- ext/net/lib.rs | 17 ++-- ext/net/ops.rs | 36 ++++---- ext/node/lib.rs | 34 +++++--- ext/node/ops/fs.rs | 35 +++----- ext/node/ops/http.rs | 4 +- ext/node/ops/os/mod.rs | 14 +--- ext/websocket/lib.rs | 18 ++-- 18 files changed, 229 insertions(+), 382 deletions(-) (limited to 'ext') diff --git a/ext/fetch/lib.rs b/ext/fetch/lib.rs index 4df8dc3d7..7ef26431c 100644 --- a/ext/fetch/lib.rs +++ b/ext/fetch/lib.rs @@ -39,6 +39,7 @@ use deno_core::OpState; use deno_core::RcRef; use deno_core::Resource; use deno_core::ResourceId; +use deno_permissions::PermissionCheckError; use deno_tls::rustls::RootCertStore; use deno_tls::Proxy; use deno_tls::RootCertStoreProvider; @@ -149,7 +150,7 @@ pub enum FetchError { #[error(transparent)] Resource(deno_core::error::AnyError), #[error(transparent)] - Permission(deno_core::error::AnyError), + Permission(#[from] PermissionCheckError), #[error("NetworkError when attempting to fetch resource")] NetworkError, #[error("Fetching files only supports the GET method: received {0}")] @@ -346,13 +347,13 @@ pub trait FetchPermissions { &mut self, url: &Url, api_name: &str, - ) -> Result<(), deno_core::error::AnyError>; + ) -> Result<(), PermissionCheckError>; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_read<'a>( &mut self, p: &'a Path, api_name: &str, - ) -> Result, deno_core::error::AnyError>; + ) -> Result, PermissionCheckError>; } impl FetchPermissions for deno_permissions::PermissionsContainer { @@ -361,7 +362,7 @@ impl FetchPermissions for deno_permissions::PermissionsContainer { &mut self, url: &Url, api_name: &str, - ) -> Result<(), deno_core::error::AnyError> { + ) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_net_url(self, url, api_name) } @@ -370,7 +371,7 @@ impl FetchPermissions for deno_permissions::PermissionsContainer { &mut self, path: &'a Path, api_name: &str, - ) -> Result, deno_core::error::AnyError> { + ) -> Result, PermissionCheckError> { deno_permissions::PermissionsContainer::check_read_path( self, path, @@ -414,9 +415,7 @@ where "file" => { let path = url.to_file_path().map_err(|_| FetchError::NetworkError)?; let permissions = state.borrow_mut::(); - let path = permissions - .check_read(&path, "fetch()") - .map_err(FetchError::Permission)?; + let path = permissions.check_read(&path, "fetch()")?; let url = match path { Cow::Owned(path) => Url::from_file_path(path).unwrap(), Cow::Borrowed(_) => url, @@ -442,9 +441,7 @@ where } "http" | "https" => { let permissions = state.borrow_mut::(); - permissions - .check_net_url(&url, "fetch()") - .map_err(FetchError::Resource)?; + permissions.check_net_url(&url, "fetch()")?; let maybe_authority = extract_authority(&mut url); let uri = url @@ -863,9 +860,7 @@ where if let Some(proxy) = args.proxy.clone() { let permissions = state.borrow_mut::(); let url = Url::parse(&proxy.url)?; - permissions - .check_net_url(&url, "Deno.createHttpClient()") - .map_err(FetchError::Permission)?; + permissions.check_net_url(&url, "Deno.createHttpClient()")?; } let options = state.borrow::(); diff --git a/ext/ffi/call.rs b/ext/ffi/call.rs index ef61dc383..bbff0ee48 100644 --- a/ext/ffi/call.rs +++ b/ext/ffi/call.rs @@ -32,7 +32,9 @@ pub enum CallError { #[error("Invalid FFI symbol name: '{0}'")] InvalidSymbol(String), #[error(transparent)] - Permission(deno_core::error::AnyError), + Permission(#[from] deno_permissions::PermissionCheckError), + #[error(transparent)] + Resource(deno_core::error::AnyError), #[error(transparent)] Callback(#[from] super::CallbackError), } @@ -301,9 +303,7 @@ where { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(CallError::Permission)?; + permissions.check_partial_no_path()?; }; let symbol = PtrSymbol::new(pointer, &def)?; @@ -347,7 +347,7 @@ pub fn op_ffi_call_nonblocking( let resource = state .resource_table .get::(rid) - .map_err(CallError::Permission)?; + .map_err(CallError::Resource)?; let symbols = &resource.symbols; *symbols .get(&symbol) @@ -401,9 +401,7 @@ where { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(CallError::Permission)?; + permissions.check_partial_no_path()?; }; let symbol = PtrSymbol::new(pointer, &def)?; diff --git a/ext/ffi/callback.rs b/ext/ffi/callback.rs index f33e0413a..29583c800 100644 --- a/ext/ffi/callback.rs +++ b/ext/ffi/callback.rs @@ -38,7 +38,7 @@ pub enum CallbackError { #[error(transparent)] Resource(deno_core::error::AnyError), #[error(transparent)] - Permission(deno_core::error::AnyError), + Permission(#[from] deno_permissions::PermissionCheckError), #[error(transparent)] Other(deno_core::error::AnyError), } @@ -572,9 +572,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(CallbackError::Permission)?; + permissions.check_partial_no_path()?; let thread_id: u32 = LOCAL_THREAD_ID.with(|s| { let value = *s.borrow(); diff --git a/ext/ffi/dlfcn.rs b/ext/ffi/dlfcn.rs index 53bdcbc5c..55909468f 100644 --- a/ext/ffi/dlfcn.rs +++ b/ext/ffi/dlfcn.rs @@ -30,7 +30,7 @@ pub enum DlfcnError { #[error(transparent)] Dlopen(#[from] dlopen2::Error), #[error(transparent)] - Permission(deno_core::error::AnyError), + Permission(#[from] deno_permissions::PermissionCheckError), #[error(transparent)] Other(deno_core::error::AnyError), } @@ -133,9 +133,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - let path = permissions - .check_partial_with_path(&args.path) - .map_err(DlfcnError::Permission)?; + let path = permissions.check_partial_with_path(&args.path)?; let lib = Library::open(&path).map_err(|e| { dlopen2::Error::OpeningLibraryError(std::io::Error::new( diff --git a/ext/ffi/lib.rs b/ext/ffi/lib.rs index 237f8c3b0..73ec7757a 100644 --- a/ext/ffi/lib.rs +++ b/ext/ffi/lib.rs @@ -1,7 +1,5 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; - use std::mem::size_of; use std::os::raw::c_char; use std::os::raw::c_short; @@ -31,6 +29,7 @@ use symbol::Symbol; pub use call::CallError; pub use callback::CallbackError; +use deno_permissions::PermissionCheckError; pub use dlfcn::DlfcnError; pub use ir::IRError; pub use r#static::StaticError; @@ -48,17 +47,17 @@ const _: () = { pub const UNSTABLE_FEATURE_NAME: &str = "ffi"; pub trait FfiPermissions { - fn check_partial_no_path(&mut self) -> Result<(), AnyError>; + fn check_partial_no_path(&mut self) -> Result<(), PermissionCheckError>; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_partial_with_path( &mut self, path: &str, - ) -> Result; + ) -> Result; } impl FfiPermissions for deno_permissions::PermissionsContainer { #[inline(always)] - fn check_partial_no_path(&mut self) -> Result<(), AnyError> { + fn check_partial_no_path(&mut self) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_ffi_partial_no_path(self) } @@ -66,7 +65,7 @@ impl FfiPermissions for deno_permissions::PermissionsContainer { fn check_partial_with_path( &mut self, path: &str, - ) -> Result { + ) -> Result { deno_permissions::PermissionsContainer::check_ffi_partial_with_path( self, path, ) diff --git a/ext/ffi/repr.rs b/ext/ffi/repr.rs index 2f04f4feb..fd8a2c8e7 100644 --- a/ext/ffi/repr.rs +++ b/ext/ffi/repr.rs @@ -46,7 +46,7 @@ pub enum ReprError { #[error("Invalid pointer pointer, pointer is null")] InvalidPointer, #[error(transparent)] - Permission(deno_core::error::AnyError), + Permission(#[from] deno_permissions::PermissionCheckError), } #[op2(fast)] @@ -58,9 +58,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; Ok(ptr_number as *mut c_void) } @@ -75,9 +73,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; Ok(a == b) } @@ -91,9 +87,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; Ok(buf as *mut c_void) } @@ -107,9 +101,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; let Some(buf) = buf.get_backing_store() else { return Ok(0 as _); @@ -130,9 +122,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidOffset); @@ -162,9 +152,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; Ok(ptr as usize) } @@ -181,9 +169,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidArrayBuffer); @@ -215,9 +201,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if src.is_null() { Err(ReprError::InvalidArrayBuffer) @@ -246,9 +230,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidCString); @@ -272,9 +254,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidBool); @@ -294,9 +274,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidU8); @@ -318,9 +296,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidI8); @@ -342,9 +318,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidU16); @@ -366,9 +340,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidI16); @@ -390,9 +362,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidU32); @@ -412,9 +382,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidI32); @@ -437,9 +405,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidU64); @@ -465,9 +431,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidI64); @@ -490,9 +454,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidF32); @@ -512,9 +474,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidF64); @@ -534,9 +494,7 @@ where FP: FfiPermissions + 'static, { let permissions = state.borrow_mut::(); - permissions - .check_partial_no_path() - .map_err(ReprError::Permission)?; + permissions.check_partial_no_path()?; if ptr.is_null() { return Err(ReprError::InvalidPointer); diff --git a/ext/fs/lib.rs b/ext/fs/lib.rs index cd2baf22a..dd852e6be 100644 --- a/ext/fs/lib.rs +++ b/ext/fs/lib.rs @@ -22,8 +22,8 @@ pub use crate::sync::MaybeSync; use crate::ops::*; -use deno_core::error::AnyError; use deno_io::fs::FsError; +use deno_permissions::PermissionCheckError; use std::borrow::Cow; use std::path::Path; use std::path::PathBuf; @@ -42,45 +42,51 @@ pub trait FsPermissions { &mut self, path: &str, api_name: &str, - ) -> Result; + ) -> Result; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_read_path<'a>( &mut self, path: &'a Path, api_name: &str, - ) -> Result, AnyError>; - fn check_read_all(&mut self, api_name: &str) -> Result<(), AnyError>; + ) -> Result, PermissionCheckError>; + fn check_read_all( + &mut self, + api_name: &str, + ) -> Result<(), PermissionCheckError>; fn check_read_blind( &mut self, p: &Path, display: &str, api_name: &str, - ) -> Result<(), AnyError>; + ) -> Result<(), PermissionCheckError>; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_write( &mut self, path: &str, api_name: &str, - ) -> Result; + ) -> Result; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_write_path<'a>( &mut self, path: &'a Path, api_name: &str, - ) -> Result, AnyError>; + ) -> Result, PermissionCheckError>; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_write_partial( &mut self, path: &str, api_name: &str, - ) -> Result; - fn check_write_all(&mut self, api_name: &str) -> Result<(), AnyError>; + ) -> Result; + fn check_write_all( + &mut self, + api_name: &str, + ) -> Result<(), PermissionCheckError>; fn check_write_blind( &mut self, p: &Path, display: &str, api_name: &str, - ) -> Result<(), AnyError>; + ) -> Result<(), PermissionCheckError>; fn check<'a>( &mut self, @@ -140,7 +146,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer { &mut self, path: &str, api_name: &str, - ) -> Result { + ) -> Result { deno_permissions::PermissionsContainer::check_read(self, path, api_name) } @@ -148,7 +154,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer { &mut self, path: &'a Path, api_name: &str, - ) -> Result, AnyError> { + ) -> Result, PermissionCheckError> { deno_permissions::PermissionsContainer::check_read_path( self, path, @@ -160,7 +166,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer { path: &Path, display: &str, api_name: &str, - ) -> Result<(), AnyError> { + ) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_read_blind( self, path, display, api_name, ) @@ -170,7 +176,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer { &mut self, path: &str, api_name: &str, - ) -> Result { + ) -> Result { deno_permissions::PermissionsContainer::check_write(self, path, api_name) } @@ -178,7 +184,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer { &mut self, path: &'a Path, api_name: &str, - ) -> Result, AnyError> { + ) -> Result, PermissionCheckError> { deno_permissions::PermissionsContainer::check_write_path( self, path, api_name, ) @@ -188,7 +194,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer { &mut self, path: &str, api_name: &str, - ) -> Result { + ) -> Result { deno_permissions::PermissionsContainer::check_write_partial( self, path, api_name, ) @@ -199,17 +205,23 @@ impl FsPermissions for deno_permissions::PermissionsContainer { p: &Path, display: &str, api_name: &str, - ) -> Result<(), AnyError> { + ) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_write_blind( self, p, display, api_name, ) } - fn check_read_all(&mut self, api_name: &str) -> Result<(), AnyError> { + fn check_read_all( + &mut self, + api_name: &str, + ) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_read_all(self, api_name) } - fn check_write_all(&mut self, api_name: &str) -> Result<(), AnyError> { + fn check_write_all( + &mut self, + api_name: &str, + ) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_write_all(self, api_name) } } diff --git a/ext/fs/ops.rs b/ext/fs/ops.rs index a3f59da4e..9b76b49e6 100644 --- a/ext/fs/ops.rs +++ b/ext/fs/ops.rs @@ -10,6 +10,12 @@ use std::path::PathBuf; use std::path::StripPrefixError; use std::rc::Rc; +use crate::interface::AccessCheckFn; +use crate::interface::FileSystemRc; +use crate::interface::FsDirEntry; +use crate::interface::FsFileType; +use crate::FsPermissions; +use crate::OpenOptions; use deno_core::op2; use deno_core::CancelFuture; use deno_core::CancelHandle; @@ -20,18 +26,12 @@ use deno_core::ToJsBuffer; use deno_io::fs::FileResource; use deno_io::fs::FsError; use deno_io::fs::FsStat; +use deno_permissions::PermissionCheckError; use rand::rngs::ThreadRng; use rand::thread_rng; use rand::Rng; use serde::Serialize; -use crate::interface::AccessCheckFn; -use crate::interface::FileSystemRc; -use crate::interface::FsDirEntry; -use crate::interface::FsFileType; -use crate::FsPermissions; -use crate::OpenOptions; - #[derive(Debug, thiserror::Error)] pub enum FsOpsError { #[error("{0}")] @@ -39,7 +39,7 @@ pub enum FsOpsError { #[error("{0}")] OperationError(#[source] OperationError), #[error(transparent)] - Permission(deno_core::error::AnyError), + Permission(#[from] PermissionCheckError), #[error(transparent)] Resource(deno_core::error::AnyError), #[error("File name or path {0:?} is not valid UTF-8")] @@ -150,8 +150,7 @@ where let path = fs.cwd()?; state .borrow_mut::

() - .check_read_blind(&path, "CWD", "Deno.cwd()") - .map_err(FsOpsError::Permission)?; + .check_read_blind(&path, "CWD", "Deno.cwd()")?; let path_str = path_into_string(path.into_os_string())?; Ok(path_str) } @@ -166,8 +165,7 @@ where { let d = state .borrow_mut::

() - .check_read(directory, "Deno.chdir()") - .map_err(FsOpsError::Permission)?; + .check_read(directory, "Deno.chdir()")?; state .borrow::() .chdir(&d) @@ -253,8 +251,7 @@ where let path = state .borrow_mut::

() - .check_write(&path, "Deno.mkdirSync()") - .map_err(FsOpsError::Permission)?; + .check_write(&path, "Deno.mkdirSync()")?; let fs = state.borrow::(); fs.mkdir_sync(&path, recursive, Some(mode)) @@ -277,10 +274,7 @@ where let (fs, path) = { let mut state = state.borrow_mut(); - let path = state - .borrow_mut::

() - .check_write(&path, "Deno.mkdir()") - .map_err(FsOpsError::Permission)?; + let path = state.borrow_mut::

().check_write(&path, "Deno.mkdir()")?; (state.borrow::().clone(), path) }; @@ -302,8 +296,7 @@ where { let path = state .borrow_mut::

() - .check_write(&path, "Deno.chmodSync()") - .map_err(FsOpsError::Permission)?; + .check_write(&path, "Deno.chmodSync()")?; let fs = state.borrow::(); fs.chmod_sync(&path, mode).context_path("chmod", &path)?; Ok(()) @@ -320,10 +313,7 @@ where { let (fs, path) = { let mut state = state.borrow_mut(); - let path = state - .borrow_mut::

() - .check_write(&path, "Deno.chmod()") - .map_err(FsOpsError::Permission)?; + let path = state.borrow_mut::

().check_write(&path, "Deno.chmod()")?; (state.borrow::().clone(), path) }; fs.chmod_async(path.clone(), mode) @@ -344,8 +334,7 @@ where { let path = state .borrow_mut::

() - .check_write(&path, "Deno.chownSync()") - .map_err(FsOpsError::Permission)?; + .check_write(&path, "Deno.chownSync()")?; let fs = state.borrow::(); fs.chown_sync(&path, uid, gid) .context_path("chown", &path)?; @@ -364,10 +353,7 @@ where { let (fs, path) = { let mut state = state.borrow_mut(); - let path = state - .borrow_mut::

() - .check_write(&path, "Deno.chown()") - .map_err(FsOpsError::Permission)?; + let path = state.borrow_mut::

().check_write(&path, "Deno.chown()")?; (state.borrow::().clone(), path) }; fs.chown_async(path.clone(), uid, gid) @@ -387,8 +373,7 @@ where { let path = state .borrow_mut::

() - .check_write(path, "Deno.removeSync()") - .map_err(FsOpsError::Permission)?; + .check_write(path, "Deno.removeSync()")?; let fs = state.borrow::(); fs.remove_sync(&path, recursive) @@ -411,13 +396,11 @@ where let path = if recursive { state .borrow_mut::

() - .check_write(&path, "Deno.remove()") - .map_err(FsOpsError::Permission)? + .check_write(&path, "Deno.remove()")? } else { state .borrow_mut::

() - .check_write_partial(&path, "Deno.remove()") - .map_err(FsOpsError::Permission)? + .check_write_partial(&path, "Deno.remove()")? }; (state.borrow::().clone(), path) @@ -440,12 +423,8 @@ where P: FsPermissions + 'static, { let permissions = state.borrow_mut::

(); - let from = permissions - .check_read(from, "Deno.copyFileSync()") - .map_err(FsOpsError::Permission)?; - let to = permissions - .check_write(to, "Deno.copyFileSync()") - .map_err(FsOpsError::Permission)?; + let from = permissions.check_read(from, "Deno.copyFileSync()")?; + let to = permissions.check_write(to, "Deno.copyFileSync()")?; let fs = state.borrow::(); fs.copy_file_sync(&from, &to) @@ -466,12 +445,8 @@ where let (fs, from, to) = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - let from = permissions - .check_read(&from, "Deno.copyFile()") - .map_err(FsOpsError::Permission)?; - let to = permissions - .check_write(&to, "Deno.copyFile()") - .map_err(FsOpsError::Permission)?; + let from = permissions.check_read(&from, "Deno.copyFile()")?; + let to = permissions.check_write(&to, "Deno.copyFile()")?; (state.borrow::().clone(), from, to) }; @@ -493,8 +468,7 @@ where { let path = state .borrow_mut::

() - .check_read(&path, "Deno.statSync()") - .map_err(FsOpsError::Permission)?; + .check_read(&path, "Deno.statSync()")?; let fs = state.borrow::(); let stat = fs.stat_sync(&path).context_path("stat", &path)?; let serializable_stat = SerializableStat::from(stat); @@ -514,9 +488,7 @@ where let (fs, path) = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - let path = permissions - .check_read(&path, "Deno.stat()") - .map_err(FsOpsError::Permission)?; + let path = permissions.check_read(&path, "Deno.stat()")?; (state.borrow::().clone(), path) }; let stat = fs @@ -537,8 +509,7 @@ where { let path = state .borrow_mut::

() - .check_read(&path, "Deno.lstatSync()") - .map_err(FsOpsError::Permission)?; + .check_read(&path, "Deno.lstatSync()")?; let fs = state.borrow::(); let stat = fs.lstat_sync(&path).context_path("lstat", &path)?; let serializable_stat = SerializableStat::from(stat); @@ -558,9 +529,7 @@ where let (fs, path) = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - let path = permissions - .check_read(&path, "Deno.lstat()") - .map_err(FsOpsError::Permission)?; + let path = permissions.check_read(&path, "Deno.lstat()")?; (state.borrow::().clone(), path) }; let stat = fs @@ -581,13 +550,9 @@ where { let fs = state.borrow::().clone(); let permissions = state.borrow_mut::

(); - let path = permissions - .check_read(&path, "Deno.realPathSync()") - .map_err(FsOpsError::Permission)?; + let path = permissions.check_read(&path, "Deno.realPathSync()")?; if path.is_relative() { - permissions - .check_read_blind(&fs.cwd()?, "CWD", "Deno.realPathSync()") - .map_err(FsOpsError::Permission)?; + permissions.check_read_blind(&fs.cwd()?, "CWD", "Deno.realPathSync()")?; } let resolved_path = @@ -610,13 +575,9 @@ where let mut state = state.borrow_mut(); let fs = state.borrow::().clone(); let permissions = state.borrow_mut::

(); - let path = permissions - .check_read(&path, "Deno.realPath()") - .map_err(FsOpsError::Permission)?; + let path = permissions.check_read(&path, "Deno.realPath()")?; if path.is_relative() { - permissions - .check_read_blind(&fs.cwd()?, "CWD", "Deno.realPath()") - .map_err(FsOpsError::Permission)?; + permissions.check_read_blind(&fs.cwd()?, "CWD", "Deno.realPath()")?; } (fs, path) }; @@ -640,8 +601,7 @@ where { let path = state .borrow_mut::

() - .check_read(&path, "Deno.readDirSync()") - .map_err(FsOpsError::Permission)?; + .check_read(&path, "Deno.readDirSync()")?; let fs = state.borrow::(); let entries = fs.read_dir_sync(&path).context_path("readdir", &path)?; @@ -662,8 +622,7 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_read(&path, "Deno.readDir()") - .map_err(FsOpsError::Permission)?; + .check_read(&path, "Deno.readDir()")?; (state.borrow::().clone(), path) }; @@ -685,15 +644,9 @@ where P: FsPermissions + 'static, { let permissions = state.borrow_mut::

(); - let _ = permissions - .check_read(&oldpath, "Deno.renameSync()") - .map_err(FsOpsError::Permission)?; - let oldpath = permissions - .check_write(&oldpath, "Deno.renameSync()") - .map_err(FsOpsError::Permission)?; - let newpath = permissions - .check_write(&newpath, "Deno.renameSync()") - .map_err(FsOpsError::Permission)?; + let _ = permissions.check_read(&oldpath, "Deno.renameSync()")?; + let oldpath = permissions.check_write(&oldpath, "Deno.renameSync()")?; + let newpath = permissions.check_write(&newpath, "Deno.renameSync()")?; let fs = state.borrow::(); fs.rename_sync(&oldpath, &newpath) @@ -714,15 +667,9 @@ where let (fs, oldpath, newpath) = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - _ = permissions - .check_read(&oldpath, "Deno.rename()") - .map_err(FsOpsError::Permission)?; - let oldpath = permissions - .check_write(&oldpath, "Deno.rename()") - .map_err(FsOpsError::Permission)?; - let newpath = permissions - .check_write(&newpath, "Deno.rename()") - .map_err(FsOpsError::Permission)?; + _ = permissions.check_read(&oldpath, "Deno.rename()")?; + let oldpath = permissions.check_write(&oldpath, "Deno.rename()")?; + let newpath = permissions.check_write(&newpath, "Deno.rename()")?; (state.borrow::().clone(), oldpath, newpath) }; @@ -743,18 +690,10 @@ where P: FsPermissions + 'static, { let permissions = state.borrow_mut::

(); - _ = permissions - .check_read(oldpath, "Deno.linkSync()") - .map_err(FsOpsError::Permission)?; - let oldpath = permissions - .check_write(oldpath, "Deno.linkSync()") - .map_err(FsOpsError::Permission)?; - _ = permissions - .check_read(newpath, "Deno.linkSync()") - .map_err(FsOpsError::Permission)?; - let newpath = permissions - .check_write(newpath, "Deno.linkSync()") - .map_err(FsOpsError::Permission)?; + _ = permissions.check_read(oldpath, "Deno.linkSync()")?; + let oldpath = permissions.check_write(oldpath, "Deno.linkSync()")?; + _ = permissions.check_read(newpath, "Deno.linkSync()")?; + let newpath = permissions.check_write(newpath, "Deno.linkSync()")?; let fs = state.borrow::(); fs.link_sync(&oldpath, &newpath) @@ -775,18 +714,10 @@ where let (fs, oldpath, newpath) = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - _ = permissions - .check_read(&oldpath, "Deno.link()") - .map_err(FsOpsError::Permission)?; - let oldpath = permissions - .check_write(&oldpath, "Deno.link()") - .map_err(FsOpsError::Permission)?; - _ = permissions - .check_read(&newpath, "Deno.link()") - .map_err(FsOpsError::Permission)?; - let newpath = permissions - .check_write(&newpath, "Deno.link()") - .map_err(FsOpsError::Permission)?; + _ = permissions.check_read(&oldpath, "Deno.link()")?; + let oldpath = permissions.check_write(&oldpath, "Deno.link()")?; + _ = permissions.check_read(&newpath, "Deno.link()")?; + let newpath = permissions.check_write(&newpath, "Deno.link()")?; (state.borrow::().clone(), oldpath, newpath) }; @@ -811,12 +742,8 @@ where let newpath = PathBuf::from(newpath); let permissions = state.borrow_mut::

(); - permissions - .check_write_all("Deno.symlinkSync()") - .map_err(FsOpsError::Permission)?; - permissions - .check_read_all("Deno.symlinkSync()") - .map_err(FsOpsError::Permission)?; + permissions.check_write_all("Deno.symlinkSync()")?; + permissions.check_read_all("Deno.symlinkSync()")?; let fs = state.borrow::(); fs.symlink_sync(&oldpath, &newpath, file_type) @@ -841,12 +768,8 @@ where let fs = { let mut state = state.borrow_mut(); let permissions = state.borrow_mut::

(); - permissions - .check_write_all("Deno.symlink()") - .map_err(FsOpsError::Permission)?; - permissions - .check_read_all("Deno.symlink()") - .map_err(FsOpsError::Permission)?; + permissions.check_write_all("Deno.symlink()")?; + permissions.check_read_all("Deno.symlink()")?; state.borrow::().clone() }; @@ -868,8 +791,7 @@ where { let path = state .borrow_mut::

() - .check_read(&path, "Deno.readLink()") - .map_err(FsOpsError::Permission)?; + .check_read(&path, "Deno.readLink()")?; let fs = state.borrow::(); @@ -891,8 +813,7 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_read(&path, "Deno.readLink()") - .map_err(FsOpsError::Permission)?; + .check_read(&path, "Deno.readLink()")?; (state.borrow::().clone(), path) }; @@ -915,8 +836,7 @@ where { let path = state .borrow_mut::

() - .check_write(path, "Deno.truncateSync()") - .map_err(FsOpsError::Permission)?; + .check_write(path, "Deno.truncateSync()")?; let fs = state.borrow::(); fs.truncate_sync(&path, len) @@ -938,8 +858,7 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_write(&path, "Deno.truncate()") - .map_err(FsOpsError::Permission)?; + .check_write(&path, "Deno.truncate()")?; (state.borrow::().clone(), path) }; @@ -962,10 +881,7 @@ pub fn op_fs_utime_sync

( where P: FsPermissions + 'static, { - let path = state - .borrow_mut::

() - .check_write(path, "Deno.utime()") - .map_err(FsOpsError::Permission)?; + let path = state.borrow_mut::

().check_write(path, "Deno.utime()")?; let fs = state.borrow::(); fs.utime_sync(&path, atime_secs, atime_nanos, mtime_secs, mtime_nanos) @@ -988,10 +904,7 @@ where { let (fs, path) = { let mut state = state.borrow_mut(); - let path = state - .borrow_mut::

() - .check_write(&path, "Deno.utime()") - .map_err(FsOpsError::Permission)?; + let path = state.borrow_mut::

().check_write(&path, "Deno.utime()")?; (state.borrow::().clone(), path) }; @@ -1219,16 +1132,12 @@ where { let fs = state.borrow::().clone(); let dir = match dir { - Some(dir) => state - .borrow_mut::

() - .check_write(dir, api_name) - .map_err(FsOpsError::Permission)?, + Some(dir) => state.borrow_mut::

().check_write(dir, api_name)?, None => { let dir = fs.tmp_dir().context("tmpdir")?; state .borrow_mut::

() - .check_write_blind(&dir, "TMP", api_name) - .map_err(FsOpsError::Permission)?; + .check_write_blind(&dir, "TMP", api_name)?; dir } }; @@ -1246,16 +1155,12 @@ where let mut state = state.borrow_mut(); let fs = state.borrow::().clone(); let dir = match dir { - Some(dir) => state - .borrow_mut::

() - .check_write(dir, api_name) - .map_err(FsOpsError::Permission)?, + Some(dir) => state.borrow_mut::

().check_write(dir, api_name)?, None => { let dir = fs.tmp_dir().context("tmpdir")?; state .borrow_mut::

() - .check_write_blind(&dir, "TMP", api_name) - .map_err(FsOpsError::Permission)?; + .check_write_blind(&dir, "TMP", api_name)?; dir } }; diff --git a/ext/kv/remote.rs b/ext/kv/remote.rs index 922853588..4930aacfe 100644 --- a/ext/kv/remote.rs +++ b/ext/kv/remote.rs @@ -15,6 +15,7 @@ use deno_core::futures::Stream; use deno_core::OpState; use deno_fetch::create_http_client; use deno_fetch::CreateHttpClientOptions; +use deno_permissions::PermissionCheckError; use deno_tls::rustls::RootCertStore; use deno_tls::Proxy; use deno_tls::RootCertStoreProvider; @@ -45,17 +46,17 @@ impl HttpOptions { } pub trait RemoteDbHandlerPermissions { - fn check_env(&mut self, var: &str) -> Result<(), AnyError>; + fn check_env(&mut self, var: &str) -> Result<(), PermissionCheckError>; fn check_net_url( &mut self, url: &Url, api_name: &str, - ) -> Result<(), AnyError>; + ) -> Result<(), PermissionCheckError>; } impl RemoteDbHandlerPermissions for deno_permissions::PermissionsContainer { #[inline(always)] - fn check_env(&mut self, var: &str) -> Result<(), AnyError> { + fn check_env(&mut self, var: &str) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_env(self, var) } @@ -64,7 +65,7 @@ impl RemoteDbHandlerPermissions for deno_permissions::PermissionsContainer { &mut self, url: &Url, api_name: &str, - ) -> Result<(), AnyError> { + ) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_net_url(self, url, api_name) } } @@ -103,7 +104,9 @@ impl denokv_remote::RemotePermissions fn check_net_url(&self, url: &Url) -> Result<(), anyhow::Error> { let mut state = self.state.borrow_mut(); let permissions = state.borrow_mut::

(); - permissions.check_net_url(url, "Deno.openKv") + permissions + .check_net_url(url, "Deno.openKv") + .map_err(Into::into) } } diff --git a/ext/kv/sqlite.rs b/ext/kv/sqlite.rs index 0b4a3693c..9de520927 100644 --- a/ext/kv/sqlite.rs +++ b/ext/kv/sqlite.rs @@ -13,20 +13,20 @@ use std::sync::Arc; use std::sync::Mutex; use std::sync::OnceLock; +use crate::DatabaseHandler; use async_trait::async_trait; use deno_core::error::type_error; use deno_core::error::AnyError; use deno_core::unsync::spawn_blocking; use deno_core::OpState; use deno_path_util::normalize_path; +use deno_permissions::PermissionCheckError; pub use denokv_sqlite::SqliteBackendError; use denokv_sqlite::SqliteConfig; use denokv_sqlite::SqliteNotifier; use rand::SeedableRng; use rusqlite::OpenFlags; -use crate::DatabaseHandler; - static SQLITE_NOTIFIERS_MAP: OnceLock>> = OnceLock::new(); @@ -42,13 +42,13 @@ pub trait SqliteDbHandlerPermissions { &mut self, p: &str, api_name: &str, - ) -> Result; + ) -> Result; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_write<'a>( &mut self, p: &'a Path, api_name: &str, - ) -> Result, AnyError>; + ) -> Result, PermissionCheckError>; } impl SqliteDbHandlerPermissions for deno_permissions::PermissionsContainer { @@ -57,7 +57,7 @@ impl SqliteDbHandlerPermissions for deno_permissions::PermissionsContainer { &mut self, p: &str, api_name: &str, - ) -> Result { + ) -> Result { deno_permissions::PermissionsContainer::check_read(self, p, api_name) } @@ -66,7 +66,7 @@ impl SqliteDbHandlerPermissions for deno_permissions::PermissionsContainer { &mut self, p: &'a Path, api_name: &str, - ) -> Result, AnyError> { + ) -> Result, PermissionCheckError> { deno_permissions::PermissionsContainer::check_write_path(self, p, api_name) } } diff --git a/ext/napi/lib.rs b/ext/napi/lib.rs index 20f924bdb..88b8c238d 100644 --- a/ext/napi/lib.rs +++ b/ext/napi/lib.rs @@ -43,7 +43,7 @@ pub enum NApiError { #[error("Unable to find register Node-API module at {}", .0.display())] ModuleNotFound(PathBuf), #[error(transparent)] - Permission(deno_core::error::AnyError), + Permission(#[from] PermissionCheckError), } #[cfg(unix)] @@ -55,6 +55,7 @@ use libloading::os::windows::*; // Expose common stuff for ease of use. // `use deno_napi::*` pub use deno_core::v8; +use deno_permissions::PermissionCheckError; pub use std::ffi::CStr; pub use std::os::raw::c_char; pub use std::os::raw::c_void; @@ -508,20 +509,14 @@ deno_core::extension!(deno_napi, pub trait NapiPermissions { #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] - fn check( - &mut self, - path: &str, - ) -> Result; + fn check(&mut self, path: &str) -> Result; } // NOTE(bartlomieju): for now, NAPI uses `--allow-ffi` flag, but that might // change in the future. impl NapiPermissions for deno_permissions::PermissionsContainer { #[inline(always)] - fn check( - &mut self, - path: &str, - ) -> Result { + fn check(&mut self, path: &str) -> Result { deno_permissions::PermissionsContainer::check_ffi(self, path) } } @@ -553,7 +548,7 @@ where let (async_work_sender, cleanup_hooks, external_ops_tracker, path) = { let mut op_state = op_state.borrow_mut(); let permissions = op_state.borrow_mut::(); - let path = permissions.check(&path).map_err(NApiError::Permission)?; + let path = permissions.check(&path)?; let napi_state = op_state.borrow::(); ( op_state.borrow::().clone(), diff --git a/ext/net/lib.rs b/ext/net/lib.rs index b039965d4..bf8f58aa2 100644 --- a/ext/net/lib.rs +++ b/ext/net/lib.rs @@ -11,6 +11,7 @@ mod tcp; use deno_core::error::AnyError; use deno_core::OpState; +use deno_permissions::PermissionCheckError; use deno_tls::rustls::RootCertStore; use deno_tls::RootCertStoreProvider; use std::borrow::Cow; @@ -25,25 +26,25 @@ pub trait NetPermissions { &mut self, host: &(T, Option), api_name: &str, - ) -> Result<(), AnyError>; + ) -> Result<(), PermissionCheckError>; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_read( &mut self, p: &str, api_name: &str, - ) -> Result; + ) -> Result; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_write( &mut self, p: &str, api_name: &str, - ) -> Result; + ) -> Result; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_write_path<'a>( &mut self, p: &'a Path, api_name: &str, - ) -> Result, AnyError>; + ) -> Result, PermissionCheckError>; } impl NetPermissions for deno_permissions::PermissionsContainer { @@ -52,7 +53,7 @@ impl NetPermissions for deno_permissions::PermissionsContainer { &mut self, host: &(T, Option), api_name: &str, - ) -> Result<(), AnyError> { + ) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_net(self, host, api_name) } @@ -61,7 +62,7 @@ impl NetPermissions for deno_permissions::PermissionsContainer { &mut self, path: &str, api_name: &str, - ) -> Result { + ) -> Result { deno_permissions::PermissionsContainer::check_read(self, path, api_name) } @@ -70,7 +71,7 @@ impl NetPermissions for deno_permissions::PermissionsContainer { &mut self, path: &str, api_name: &str, - ) -> Result { + ) -> Result { deno_permissions::PermissionsContainer::check_write(self, path, api_name) } @@ -79,7 +80,7 @@ impl NetPermissions for deno_permissions::PermissionsContainer { &mut self, path: &'a Path, api_name: &str, - ) -> Result, AnyError> { + ) -> Result, PermissionCheckError> { deno_permissions::PermissionsContainer::check_write_path( self, path, api_name, ) diff --git a/ext/net/ops.rs b/ext/net/ops.rs index 0f92dead0..35bcff8dc 100644 --- a/ext/net/ops.rs +++ b/ext/net/ops.rs @@ -81,8 +81,8 @@ pub enum NetError { Io(#[from] std::io::Error), #[error("Another accept task is ongoing")] AcceptTaskOngoing, - #[error("{0}")] - Permission(deno_core::error::AnyError), + #[error(transparent)] + Permission(#[from] deno_permissions::PermissionCheckError), #[error("{0}")] Resource(deno_core::error::AnyError), #[error("No resolved address found")] @@ -195,12 +195,10 @@ where { { let mut s = state.borrow_mut(); - s.borrow_mut::() - .check_net( - &(&addr.hostname, Some(addr.port)), - "Deno.DatagramConn.send()", - ) - .map_err(NetError::Permission)?; + s.borrow_mut::().check_net( + &(&addr.hostname, Some(addr.port)), + "Deno.DatagramConn.send()", + )?; } let addr = resolve_addr(&addr.hostname, addr.port) .await? @@ -369,8 +367,7 @@ where let mut state_ = state.borrow_mut(); state_ .borrow_mut::() - .check_net(&(&addr.hostname, Some(addr.port)), "Deno.connect()") - .map_err(NetError::Permission)?; + .check_net(&(&addr.hostname, Some(addr.port)), "Deno.connect()")?; } let addr = resolve_addr(&addr.hostname, addr.port) @@ -420,8 +417,7 @@ where } state .borrow_mut::() - .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listen()") - .map_err(NetError::Permission)?; + .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listen()")?; let addr = resolve_addr_sync(&addr.hostname, addr.port)? .next() .ok_or_else(|| NetError::NoResolvedAddress)?; @@ -449,8 +445,7 @@ where { state .borrow_mut::() - .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenDatagram()") - .map_err(NetError::Permission)?; + .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenDatagram()")?; let addr = resolve_addr_sync(&addr.hostname, addr.port)? .next() .ok_or_else(|| NetError::NoResolvedAddress)?; @@ -647,9 +642,7 @@ where let socker_addr = &ns.socket_addr; let ip = socker_addr.ip().to_string(); let port = socker_addr.port(); - perm - .check_net(&(ip, Some(port)), "Deno.resolveDns()") - .map_err(NetError::Permission)?; + perm.check_net(&(ip, Some(port)), "Deno.resolveDns()")?; } } @@ -834,6 +827,7 @@ mod tests { use deno_core::futures::FutureExt; use deno_core::JsRuntime; use deno_core::RuntimeOptions; + use deno_permissions::PermissionCheckError; use socket2::SockRef; use std::net::Ipv4Addr; use std::net::Ipv6Addr; @@ -1041,7 +1035,7 @@ mod tests { &mut self, _host: &(T, Option), _api_name: &str, - ) -> Result<(), deno_core::error::AnyError> { + ) -> Result<(), PermissionCheckError> { Ok(()) } @@ -1049,7 +1043,7 @@ mod tests { &mut self, p: &str, _api_name: &str, - ) -> Result { + ) -> Result { Ok(PathBuf::from(p)) } @@ -1057,7 +1051,7 @@ mod tests { &mut self, p: &str, _api_name: &str, - ) -> Result { + ) -> Result { Ok(PathBuf::from(p)) } @@ -1065,7 +1059,7 @@ mod tests { &mut self, p: &'a Path, _api_name: &str, - ) -> Result, deno_core::error::AnyError> { + ) -> Result, PermissionCheckError> { Ok(Cow::Borrowed(p)) } } diff --git a/ext/node/lib.rs b/ext/node/lib.rs index db6d08e11..b08b0493b 100644 --- a/ext/node/lib.rs +++ b/ext/node/lib.rs @@ -24,6 +24,7 @@ pub mod ops; mod polyfill; pub use deno_package_json::PackageJson; +use deno_permissions::PermissionCheckError; pub use node_resolver::PathClean; pub use ops::ipc::ChildPipeFd; pub use ops::ipc::IpcJsonStreamResource; @@ -45,10 +46,13 @@ pub trait NodePermissions { &mut self, url: &Url, api_name: &str, - ) -> Result<(), AnyError>; + ) -> Result<(), PermissionCheckError>; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] #[inline(always)] - fn check_read(&mut self, path: &str) -> Result { + fn check_read( + &mut self, + path: &str, + ) -> Result { self.check_read_with_api_name(path, None) } #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] @@ -56,20 +60,24 @@ pub trait NodePermissions { &mut self, path: &str, api_name: Option<&str>, - ) -> Result; + ) -> Result; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_read_path<'a>( &mut self, path: &'a Path, - ) -> Result, AnyError>; + ) -> Result, PermissionCheckError>; fn query_read_all(&mut self) -> bool; - fn check_sys(&mut self, kind: &str, api_name: &str) -> Result<(), AnyError>; + fn check_sys( + &mut self, + kind: &str, + api_name: &str, + ) -> Result<(), PermissionCheckError>; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] fn check_write_with_api_name( &mut self, path: &str, api_name: Option<&str>, - ) -> Result; + ) -> Result; } impl NodePermissions for deno_permissions::PermissionsContainer { @@ -78,7 +86,7 @@ impl NodePermissions for deno_permissions::PermissionsContainer { &mut self, url: &Url, api_name: &str, - ) -> Result<(), AnyError> { + ) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_net_url(self, url, api_name) } @@ -87,7 +95,7 @@ impl NodePermissions for deno_permissions::PermissionsContainer { &mut self, path: &str, api_name: Option<&str>, - ) -> Result { + ) -> Result { deno_permissions::PermissionsContainer::check_read_with_api_name( self, path, api_name, ) @@ -96,7 +104,7 @@ impl NodePermissions for deno_permissions::PermissionsContainer { fn check_read_path<'a>( &mut self, path: &'a Path, - ) -> Result, AnyError> { + ) -> Result, PermissionCheckError> { deno_permissions::PermissionsContainer::check_read_path(self, path, None) } @@ -109,13 +117,17 @@ impl NodePermissions for deno_permissions::PermissionsContainer { &mut self, path: &str, api_name: Option<&str>, - ) -> Result { + ) -> Result { deno_permissions::PermissionsContainer::check_write_with_api_name( self, path, api_name, ) } - fn check_sys(&mut self, kind: &str, api_name: &str) -> Result<(), AnyError> { + fn check_sys( + &mut self, + kind: &str, + api_name: &str, + ) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_sys(self, kind, api_name) } } diff --git a/ext/node/ops/fs.rs b/ext/node/ops/fs.rs index 98b3c46a1..9c0e4e1cc 100644 --- a/ext/node/ops/fs.rs +++ b/ext/node/ops/fs.rs @@ -13,7 +13,7 @@ use crate::NodePermissions; #[derive(Debug, thiserror::Error)] pub enum FsError { #[error(transparent)] - Permission(deno_core::error::AnyError), + Permission(#[from] deno_permissions::PermissionCheckError), #[error("{0}")] Io(#[from] std::io::Error), #[cfg(windows)] @@ -53,8 +53,7 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_read_with_api_name(&path, Some("node:fs.exists()")) - .map_err(FsError::Permission)?; + .check_read_with_api_name(&path, Some("node:fs.exists()"))?; (state.borrow::().clone(), path) }; @@ -72,12 +71,10 @@ where { let path = state .borrow_mut::

() - .check_read_with_api_name(path, Some("node:fs.cpSync")) - .map_err(FsError::Permission)?; + .check_read_with_api_name(path, Some("node:fs.cpSync"))?; let new_path = state .borrow_mut::

() - .check_write_with_api_name(new_path, Some("node:fs.cpSync")) - .map_err(FsError::Permission)?; + .check_write_with_api_name(new_path, Some("node:fs.cpSync"))?; let fs = state.borrow::(); fs.cp_sync(&path, &new_path)?; @@ -97,12 +94,10 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_read_with_api_name(&path, Some("node:fs.cpSync")) - .map_err(FsError::Permission)?; + .check_read_with_api_name(&path, Some("node:fs.cpSync"))?; let new_path = state .borrow_mut::

() - .check_write_with_api_name(&new_path, Some("node:fs.cpSync")) - .map_err(FsError::Permission)?; + .check_write_with_api_name(&new_path, Some("node:fs.cpSync"))?; (state.borrow::().clone(), path, new_path) }; @@ -136,12 +131,10 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_read_with_api_name(&path, Some("node:fs.statfs")) - .map_err(FsError::Permission)?; + .check_read_with_api_name(&path, Some("node:fs.statfs"))?; state .borrow_mut::

() - .check_sys("statfs", "node:fs.statfs") - .map_err(FsError::Permission)?; + .check_sys("statfs", "node:fs.statfs")?; path }; #[cfg(unix)] @@ -279,8 +272,7 @@ where { let path = state .borrow_mut::

() - .check_write_with_api_name(path, Some("node:fs.lutimes")) - .map_err(FsError::Permission)?; + .check_write_with_api_name(path, Some("node:fs.lutimes"))?; let fs = state.borrow::(); fs.lutime_sync(&path, atime_secs, atime_nanos, mtime_secs, mtime_nanos)?; @@ -303,8 +295,7 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_write_with_api_name(&path, Some("node:fs.lutimesSync")) - .map_err(FsError::Permission)?; + .check_write_with_api_name(&path, Some("node:fs.lutimesSync"))?; (state.borrow::().clone(), path) }; @@ -326,8 +317,7 @@ where { let path = state .borrow_mut::

() - .check_write_with_api_name(&path, Some("node:fs.lchownSync")) - .map_err(FsError::Permission)?; + .check_write_with_api_name(&path, Some("node:fs.lchownSync"))?; let fs = state.borrow::(); fs.lchown_sync(&path, uid, gid)?; Ok(()) @@ -347,8 +337,7 @@ where let mut state = state.borrow_mut(); let path = state .borrow_mut::

() - .check_write_with_api_name(&path, Some("node:fs.lchown")) - .map_err(FsError::Permission)?; + .check_write_with_api_name(&path, Some("node:fs.lchown"))?; (state.borrow::().clone(), path) }; fs.lchown_async(path, uid, gid).await?; diff --git a/ext/node/ops/http.rs b/ext/node/ops/http.rs index 730e1e482..69571078f 100644 --- a/ext/node/ops/http.rs +++ b/ext/node/ops/http.rs @@ -78,9 +78,7 @@ where { let permissions = state.borrow_mut::

(); - permissions - .check_net_url(&url, "ClientRequest") - .map_err(FetchError::Permission)?; + permissions.check_net_url(&url, "ClientRequest")?; } let mut header_map = HeaderMap::new(); diff --git a/ext/node/ops/os/mod.rs b/ext/node/ops/os/mod.rs index ea7e6b99f..d291277ad 100644 --- a/ext/node/ops/os/mod.rs +++ b/ext/node/ops/os/mod.rs @@ -14,7 +14,7 @@ pub enum OsError { #[error(transparent)] Priority(priority::PriorityError), #[error(transparent)] - Permission(deno_core::error::AnyError), + Permission(#[from] deno_permissions::PermissionCheckError), #[error("Failed to get cpu info")] FailedToGetCpuInfo, #[error("Failed to get user info")] @@ -31,9 +31,7 @@ where { { let permissions = state.borrow_mut::

(); - permissions - .check_sys("getPriority", "node:os.getPriority()") - .map_err(OsError::Permission)?; + permissions.check_sys("getPriority", "node:os.getPriority()")?; } priority::get_priority(pid).map_err(OsError::Priority) @@ -50,9 +48,7 @@ where { { let permissions = state.borrow_mut::

(); - permissions - .check_sys("setPriority", "node:os.setPriority()") - .map_err(OsError::Permission)?; + permissions.check_sys("setPriority", "node:os.setPriority()")?; } priority::set_priority(pid, priority).map_err(OsError::Priority) @@ -266,9 +262,7 @@ where { { let permissions = state.borrow_mut::

(); - permissions - .check_sys("cpus", "node:os.cpus()") - .map_err(OsError::Permission)?; + permissions.check_sys("cpus", "node:os.cpus()")?; } cpus::cpu_info().ok_or(OsError::FailedToGetCpuInfo) diff --git a/ext/websocket/lib.rs b/ext/websocket/lib.rs index 2a67ac5a1..a5734271c 100644 --- a/ext/websocket/lib.rs +++ b/ext/websocket/lib.rs @@ -50,6 +50,7 @@ use tokio::io::ReadHalf; use tokio::io::WriteHalf; use tokio::net::TcpStream; +use deno_permissions::PermissionCheckError; use fastwebsockets::CloseCode; use fastwebsockets::FragmentCollectorRead; use fastwebsockets::Frame; @@ -75,7 +76,7 @@ pub enum WebsocketError { #[error(transparent)] Url(url::ParseError), #[error(transparent)] - Permission(deno_core::error::AnyError), + Permission(#[from] PermissionCheckError), #[error(transparent)] Resource(deno_core::error::AnyError), #[error(transparent)] @@ -112,7 +113,7 @@ pub trait WebSocketPermissions { &mut self, _url: &url::Url, _api_name: &str, - ) -> Result<(), deno_core::error::AnyError>; + ) -> Result<(), PermissionCheckError>; } impl WebSocketPermissions for deno_permissions::PermissionsContainer { @@ -121,7 +122,7 @@ impl WebSocketPermissions for deno_permissions::PermissionsContainer { &mut self, url: &url::Url, api_name: &str, - ) -> Result<(), deno_core::error::AnyError> { + ) -> Result<(), PermissionCheckError> { deno_permissions::PermissionsContainer::check_net_url(self, url, api_name) } } @@ -158,13 +159,10 @@ pub fn op_ws_check_permission_and_cancel_handle( where WP: WebSocketPermissions + 'static, { - state - .borrow_mut::() - .check_net_url( - &url::Url::parse(&url).map_err(WebsocketError::Url)?, - &api_name, - ) - .map_err(WebsocketError::Permission)?; + state.borrow_mut::().check_net_url( + &url::Url::parse(&url).map_err(WebsocketError::Url)?, + &api_name, + )?; if cancel_handle { let rid = state -- cgit v1.2.3 From 84aee0be9aac53499a757973a8a2054d2a44e479 Mon Sep 17 00:00:00 2001 From: Nathan Whitaker <17734409+nathanwhit@users.noreply.github.com> Date: Mon, 4 Nov 2024 13:08:29 -0800 Subject: fix(ext/node): add `findSourceMap` to the default export of `node:module` (#26720) Next.js 15.0.2 tries to use this and errors out --- ext/node/polyfills/01_require.js | 2 ++ 1 file changed, 2 insertions(+) (limited to 'ext') diff --git a/ext/node/polyfills/01_require.js b/ext/node/polyfills/01_require.js index 296d819aa..d818bb572 100644 --- a/ext/node/polyfills/01_require.js +++ b/ext/node/polyfills/01_require.js @@ -1312,6 +1312,8 @@ export function findSourceMap(_path) { return undefined; } +Module.findSourceMap = findSourceMap; + /** * @param {string | URL} _specifier * @param {string | URL} _parentUrl -- cgit v1.2.3 From ef7432c03f83ad9e9ca2812d0ab5653e87fa5259 Mon Sep 17 00:00:00 2001 From: denobot <33910674+denobot@users.noreply.github.com> Date: Tue, 5 Nov 2024 20:27:14 -0500 Subject: chore: forward v2.0.5 release commit to main (#26755) This is the release commit being forwarded back to main for 2.0.5 Co-authored-by: bartlomieju --- ext/broadcast_channel/Cargo.toml | 2 +- ext/cache/Cargo.toml | 2 +- ext/canvas/Cargo.toml | 2 +- ext/console/Cargo.toml | 2 +- ext/cron/Cargo.toml | 2 +- ext/crypto/Cargo.toml | 2 +- ext/fetch/Cargo.toml | 2 +- ext/ffi/Cargo.toml | 2 +- ext/fs/Cargo.toml | 2 +- ext/http/Cargo.toml | 2 +- ext/io/Cargo.toml | 2 +- ext/kv/Cargo.toml | 2 +- ext/napi/Cargo.toml | 2 +- ext/napi/sym/Cargo.toml | 2 +- ext/net/Cargo.toml | 2 +- ext/node/Cargo.toml | 2 +- ext/tls/Cargo.toml | 2 +- ext/url/Cargo.toml | 2 +- ext/web/Cargo.toml | 2 +- ext/webgpu/Cargo.toml | 2 +- ext/webidl/Cargo.toml | 2 +- ext/websocket/Cargo.toml | 2 +- ext/webstorage/Cargo.toml | 2 +- 23 files changed, 23 insertions(+), 23 deletions(-) (limited to 'ext') diff --git a/ext/broadcast_channel/Cargo.toml b/ext/broadcast_channel/Cargo.toml index e6eeeed5e..0951286f7 100644 --- a/ext/broadcast_channel/Cargo.toml +++ b/ext/broadcast_channel/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_broadcast_channel" -version = "0.169.0" +version = "0.170.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cache/Cargo.toml b/ext/cache/Cargo.toml index 7cae8d88a..20dc9ca48 100644 --- a/ext/cache/Cargo.toml +++ b/ext/cache/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cache" -version = "0.107.0" +version = "0.108.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/canvas/Cargo.toml b/ext/canvas/Cargo.toml index 5d2c37199..5c6e44f95 100644 --- a/ext/canvas/Cargo.toml +++ b/ext/canvas/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_canvas" -version = "0.44.0" +version = "0.45.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/console/Cargo.toml b/ext/console/Cargo.toml index b3543ed85..bf476574e 100644 --- a/ext/console/Cargo.toml +++ b/ext/console/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_console" -version = "0.175.0" +version = "0.176.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cron/Cargo.toml b/ext/cron/Cargo.toml index 0d52cb6a9..90b39aa0e 100644 --- a/ext/cron/Cargo.toml +++ b/ext/cron/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cron" -version = "0.55.0" +version = "0.56.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/crypto/Cargo.toml b/ext/crypto/Cargo.toml index 95981bba3..890cd868a 100644 --- a/ext/crypto/Cargo.toml +++ b/ext/crypto/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_crypto" -version = "0.189.0" +version = "0.190.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml index 93fc88ae6..0b15d05b8 100644 --- a/ext/fetch/Cargo.toml +++ b/ext/fetch/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fetch" -version = "0.199.0" +version = "0.200.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/ffi/Cargo.toml b/ext/ffi/Cargo.toml index 58144790b..27ecd1d54 100644 --- a/ext/ffi/Cargo.toml +++ b/ext/ffi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_ffi" -version = "0.162.0" +version = "0.163.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml index 0ccde7f7e..d1720b7bc 100644 --- a/ext/fs/Cargo.toml +++ b/ext/fs/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fs" -version = "0.85.0" +version = "0.86.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/http/Cargo.toml b/ext/http/Cargo.toml index a1a4cb20b..1d7c4d87e 100644 --- a/ext/http/Cargo.toml +++ b/ext/http/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_http" -version = "0.173.0" +version = "0.174.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/io/Cargo.toml b/ext/io/Cargo.toml index 7e858d298..b791f16a3 100644 --- a/ext/io/Cargo.toml +++ b/ext/io/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_io" -version = "0.85.0" +version = "0.86.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/kv/Cargo.toml b/ext/kv/Cargo.toml index bbd234901..59b425d3c 100644 --- a/ext/kv/Cargo.toml +++ b/ext/kv/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_kv" -version = "0.83.0" +version = "0.84.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/napi/Cargo.toml b/ext/napi/Cargo.toml index c31367118..064d33568 100644 --- a/ext/napi/Cargo.toml +++ b/ext/napi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_napi" -version = "0.106.0" +version = "0.107.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/napi/sym/Cargo.toml b/ext/napi/sym/Cargo.toml index c23054eb4..bf48d2742 100644 --- a/ext/napi/sym/Cargo.toml +++ b/ext/napi/sym/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "napi_sym" -version = "0.105.0" +version = "0.106.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/net/Cargo.toml b/ext/net/Cargo.toml index 5ffd1b450..b22fbe7b5 100644 --- a/ext/net/Cargo.toml +++ b/ext/net/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_net" -version = "0.167.0" +version = "0.168.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml index ed31dbf17..d830be575 100644 --- a/ext/node/Cargo.toml +++ b/ext/node/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_node" -version = "0.112.0" +version = "0.113.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/tls/Cargo.toml b/ext/tls/Cargo.toml index 6871faa5b..89a78f4b6 100644 --- a/ext/tls/Cargo.toml +++ b/ext/tls/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_tls" -version = "0.162.0" +version = "0.163.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/url/Cargo.toml b/ext/url/Cargo.toml index 24d81abf8..7f3fa9761 100644 --- a/ext/url/Cargo.toml +++ b/ext/url/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_url" -version = "0.175.0" +version = "0.176.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/web/Cargo.toml b/ext/web/Cargo.toml index a02cff98f..faba8e9fb 100644 --- a/ext/web/Cargo.toml +++ b/ext/web/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_web" -version = "0.206.0" +version = "0.207.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webgpu/Cargo.toml b/ext/webgpu/Cargo.toml index 3bb814c25..c682965d5 100644 --- a/ext/webgpu/Cargo.toml +++ b/ext/webgpu/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webgpu" -version = "0.142.0" +version = "0.143.0" authors = ["the Deno authors"] edition.workspace = true license = "MIT" diff --git a/ext/webidl/Cargo.toml b/ext/webidl/Cargo.toml index 1685bb1fd..1e8dd77cf 100644 --- a/ext/webidl/Cargo.toml +++ b/ext/webidl/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webidl" -version = "0.175.0" +version = "0.176.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/websocket/Cargo.toml b/ext/websocket/Cargo.toml index 6a575cb41..6724cea2c 100644 --- a/ext/websocket/Cargo.toml +++ b/ext/websocket/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_websocket" -version = "0.180.0" +version = "0.181.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webstorage/Cargo.toml b/ext/webstorage/Cargo.toml index 079ab2e4e..c1d2092f1 100644 --- a/ext/webstorage/Cargo.toml +++ b/ext/webstorage/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webstorage" -version = "0.170.0" +version = "0.171.0" authors.workspace = true edition.workspace = true license.workspace = true -- cgit v1.2.3 From 700f54a13cce0fcdcf19d1893e3254579c7347f4 Mon Sep 17 00:00:00 2001 From: snek Date: Wed, 6 Nov 2024 15:08:26 +0100 Subject: fix(ext/node): better inspector support (#26471) implement local inspector future changes: - wire up InspectorServer to enable open/close/url - wire up connectToMainThread Fixes https://github.com/denoland/deno/issues/25004 --- ext/node/lib.rs | 26 +++- ext/node/ops/inspector.rs | 161 ++++++++++++++++++++++++ ext/node/ops/mod.rs | 1 + ext/node/polyfills/inspector.js | 210 +++++++++++++++++++++++++++++++ ext/node/polyfills/inspector.ts | 82 ------------ ext/node/polyfills/inspector/promises.js | 20 +++ 6 files changed, 416 insertions(+), 84 deletions(-) create mode 100644 ext/node/ops/inspector.rs create mode 100644 ext/node/polyfills/inspector.js delete mode 100644 ext/node/polyfills/inspector.ts create mode 100644 ext/node/polyfills/inspector/promises.js (limited to 'ext') diff --git a/ext/node/lib.rs b/ext/node/lib.rs index b08b0493b..9ca21e994 100644 --- a/ext/node/lib.rs +++ b/ext/node/lib.rs @@ -47,6 +47,11 @@ pub trait NodePermissions { url: &Url, api_name: &str, ) -> Result<(), PermissionCheckError>; + fn check_net( + &mut self, + host: (&str, Option), + api_name: &str, + ) -> Result<(), PermissionCheckError>; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] #[inline(always)] fn check_read( @@ -90,6 +95,14 @@ impl NodePermissions for deno_permissions::PermissionsContainer { deno_permissions::PermissionsContainer::check_net_url(self, url, api_name) } + fn check_net( + &mut self, + host: (&str, Option), + api_name: &str, + ) -> Result<(), PermissionCheckError> { + deno_permissions::PermissionsContainer::check_net(self, &host, api_name) + } + #[inline(always)] fn check_read_with_api_name( &mut self, @@ -398,6 +411,15 @@ deno_core::extension!(deno_node, ops::process::op_node_process_kill, ops::process::op_process_abort, ops::tls::op_get_root_certificates, + ops::inspector::op_inspector_open

, + ops::inspector::op_inspector_close, + ops::inspector::op_inspector_url, + ops::inspector::op_inspector_wait, + ops::inspector::op_inspector_connect

, + ops::inspector::op_inspector_dispatch, + ops::inspector::op_inspector_disconnect, + ops::inspector::op_inspector_emit_protocol_event, + ops::inspector::op_inspector_enabled, ], esm_entry_point = "ext:deno_node/02_init.js", esm = [ @@ -606,8 +628,8 @@ deno_core::extension!(deno_node, "node:http" = "http.ts", "node:http2" = "http2.ts", "node:https" = "https.ts", - "node:inspector" = "inspector.ts", - "node:inspector/promises" = "inspector.ts", + "node:inspector" = "inspector.js", + "node:inspector/promises" = "inspector/promises.js", "node:module" = "01_require.js", "node:net" = "net.ts", "node:os" = "os.ts", diff --git a/ext/node/ops/inspector.rs b/ext/node/ops/inspector.rs new file mode 100644 index 000000000..34a7e004c --- /dev/null +++ b/ext/node/ops/inspector.rs @@ -0,0 +1,161 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use crate::NodePermissions; +use deno_core::anyhow::Error; +use deno_core::error::generic_error; +use deno_core::futures::channel::mpsc; +use deno_core::op2; +use deno_core::v8; +use deno_core::GarbageCollected; +use deno_core::InspectorSessionKind; +use deno_core::InspectorSessionOptions; +use deno_core::JsRuntimeInspector; +use deno_core::OpState; +use std::cell::RefCell; +use std::rc::Rc; + +#[op2(fast)] +pub fn op_inspector_enabled() -> bool { + // TODO: hook up to InspectorServer + false +} + +#[op2] +pub fn op_inspector_open

( + _state: &mut OpState, + _port: Option, + #[string] _host: Option, +) -> Result<(), Error> +where + P: NodePermissions + 'static, +{ + // TODO: hook up to InspectorServer + /* + let server = state.borrow_mut::(); + if let Some(host) = host { + server.set_host(host); + } + if let Some(port) = port { + server.set_port(port); + } + state + .borrow_mut::

() + .check_net((server.host(), Some(server.port())), "inspector.open")?; + */ + + Ok(()) +} + +#[op2(fast)] +pub fn op_inspector_close() { + // TODO: hook up to InspectorServer +} + +#[op2] +#[string] +pub fn op_inspector_url() -> Option { + // TODO: hook up to InspectorServer + None +} + +#[op2(fast)] +pub fn op_inspector_wait(state: &OpState) -> bool { + match state.try_borrow::>>() { + Some(inspector) => { + inspector + .borrow_mut() + .wait_for_session_and_break_on_next_statement(); + true + } + None => false, + } +} + +#[op2(fast)] +pub fn op_inspector_emit_protocol_event( + #[string] _event_name: String, + #[string] _params: String, +) { + // TODO: inspector channel & protocol notifications +} + +struct JSInspectorSession { + tx: RefCell>>, +} + +impl GarbageCollected for JSInspectorSession {} + +#[op2] +#[cppgc] +pub fn op_inspector_connect<'s, P>( + isolate: *mut v8::Isolate, + scope: &mut v8::HandleScope<'s>, + state: &mut OpState, + connect_to_main_thread: bool, + callback: v8::Local<'s, v8::Function>, +) -> Result +where + P: NodePermissions + 'static, +{ + state + .borrow_mut::

() + .check_sys("inspector", "inspector.Session.connect")?; + + if connect_to_main_thread { + return Err(generic_error("connectToMainThread not supported")); + } + + let context = scope.get_current_context(); + let context = v8::Global::new(scope, context); + let callback = v8::Global::new(scope, callback); + + let inspector = state + .borrow::>>() + .borrow_mut(); + + let tx = inspector.create_raw_session( + InspectorSessionOptions { + kind: InspectorSessionKind::NonBlocking { + wait_for_disconnect: false, + }, + }, + // The inspector connection does not keep the event loop alive but + // when the inspector sends a message to the frontend, the JS that + // that runs may keep the event loop alive so we have to call back + // synchronously, instead of using the usual LocalInspectorSession + // UnboundedReceiver API. + Box::new(move |message| { + // SAFETY: This function is called directly by the inspector, so + // 1) The isolate is still valid + // 2) We are on the same thread as the Isolate + let scope = unsafe { &mut v8::CallbackScope::new(&mut *isolate) }; + let context = v8::Local::new(scope, context.clone()); + let scope = &mut v8::ContextScope::new(scope, context); + let scope = &mut v8::TryCatch::new(scope); + let recv = v8::undefined(scope); + if let Some(message) = v8::String::new(scope, &message.content) { + let callback = v8::Local::new(scope, callback.clone()); + callback.call(scope, recv.into(), &[message.into()]); + } + }), + ); + + Ok(JSInspectorSession { + tx: RefCell::new(Some(tx)), + }) +} + +#[op2(fast)] +pub fn op_inspector_dispatch( + #[cppgc] session: &JSInspectorSession, + #[string] message: String, +) { + if let Some(tx) = &*session.tx.borrow() { + let _ = tx.unbounded_send(message); + } +} + +#[op2(fast)] +pub fn op_inspector_disconnect(#[cppgc] session: &JSInspectorSession) { + drop(session.tx.borrow_mut().take()); +} diff --git a/ext/node/ops/mod.rs b/ext/node/ops/mod.rs index b562261f3..b53f19dc2 100644 --- a/ext/node/ops/mod.rs +++ b/ext/node/ops/mod.rs @@ -7,6 +7,7 @@ pub mod fs; pub mod http; pub mod http2; pub mod idna; +pub mod inspector; pub mod ipc; pub mod os; pub mod process; diff --git a/ext/node/polyfills/inspector.js b/ext/node/polyfills/inspector.js new file mode 100644 index 000000000..7eb15ce91 --- /dev/null +++ b/ext/node/polyfills/inspector.js @@ -0,0 +1,210 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Copyright Joyent and Node contributors. All rights reserved. MIT license. + +import process from "node:process"; +import { EventEmitter } from "node:events"; +import { primordials } from "ext:core/mod.js"; +import { + op_get_extras_binding_object, + op_inspector_close, + op_inspector_connect, + op_inspector_disconnect, + op_inspector_dispatch, + op_inspector_emit_protocol_event, + op_inspector_enabled, + op_inspector_open, + op_inspector_url, + op_inspector_wait, +} from "ext:core/ops"; +import { + isUint32, + validateFunction, + validateInt32, + validateObject, + validateString, +} from "ext:deno_node/internal/validators.mjs"; +import { + ERR_INSPECTOR_ALREADY_ACTIVATED, + ERR_INSPECTOR_ALREADY_CONNECTED, + ERR_INSPECTOR_CLOSED, + ERR_INSPECTOR_COMMAND, + ERR_INSPECTOR_NOT_ACTIVE, + ERR_INSPECTOR_NOT_CONNECTED, + ERR_INSPECTOR_NOT_WORKER, +} from "ext:deno_node/internal/errors.ts"; + +const { + SymbolDispose, + JSONParse, + JSONStringify, + SafeMap, +} = primordials; + +class Session extends EventEmitter { + #connection = null; + #nextId = 1; + #messageCallbacks = new SafeMap(); + + connect() { + if (this.#connection) { + throw new ERR_INSPECTOR_ALREADY_CONNECTED("The inspector session"); + } + this.#connection = op_inspector_connect(false, (m) => this.#onMessage(m)); + } + + connectToMainThread() { + if (isMainThread) { + throw new ERR_INSPECTOR_NOT_WORKER(); + } + if (this.#connection) { + throw new ERR_INSPECTOR_ALREADY_CONNECTED("The inspector session"); + } + this.#connection = op_inspector_connect(true, (m) => this.#onMessage(m)); + } + + #onMessage(message) { + const parsed = JSONParse(message); + try { + if (parsed.id) { + const callback = this.#messageCallbacks.get(parsed.id); + this.#messageCallbacks.delete(parsed.id); + if (callback) { + if (parsed.error) { + return callback( + new ERR_INSPECTOR_COMMAND( + parsed.error.code, + parsed.error.message, + ), + ); + } + + callback(null, parsed.result); + } + } else { + this.emit(parsed.method, parsed); + this.emit("inspectorNotification", parsed); + } + } catch (error) { + process.emitWarning(error); + } + } + + post(method, params, callback) { + validateString(method, "method"); + if (!callback && typeof params === "function") { + callback = params; + params = null; + } + if (params) { + validateObject(params, "params"); + } + if (callback) { + validateFunction(callback, "callback"); + } + + if (!this.#connection) { + throw new ERR_INSPECTOR_NOT_CONNECTED(); + } + const id = this.#nextId++; + const message = { id, method }; + if (params) { + message.params = params; + } + if (callback) { + this.#messageCallbacks.set(id, callback); + } + op_inspector_dispatch(this.#connection, JSONStringify(message)); + } + + disconnect() { + if (!this.#connection) { + return; + } + op_inspector_disconnect(this.#connection); + this.#connection = null; + // deno-lint-ignore prefer-primordials + for (const callback of this.#messageCallbacks.values()) { + process.nextTick(callback, new ERR_INSPECTOR_CLOSED()); + } + this.#messageCallbacks.clear(); + this.#nextId = 1; + } +} + +function open(port, host, wait) { + if (op_inspector_enabled()) { + throw new ERR_INSPECTOR_ALREADY_ACTIVATED(); + } + // inspectorOpen() currently does not typecheck its arguments and adding + // such checks would be a potentially breaking change. However, the native + // open() function requires the port to fit into a 16-bit unsigned integer, + // causing an integer overflow otherwise, so we at least need to prevent that. + if (isUint32(port)) { + validateInt32(port, "port", 0, 65535); + } else { + // equiv of handling args[0]->IsUint32() + port = undefined; + } + if (typeof host !== "string") { + // equiv of handling args[1]->IsString() + host = undefined; + } + op_inspector_open(port, host); + if (wait) { + op_inspector_wait(); + } + + return { + __proto__: null, + [SymbolDispose]() { + _debugEnd(); + }, + }; +} + +function close() { + op_inspector_close(); +} + +function url() { + return op_inspector_url(); +} + +function waitForDebugger() { + if (!op_inspector_wait()) { + throw new ERR_INSPECTOR_NOT_ACTIVE(); + } +} + +function broadcastToFrontend(eventName, params) { + validateString(eventName, "eventName"); + if (params) { + validateObject(params, "params"); + } + op_inspector_emit_protocol_event(eventName, JSONStringify(params ?? {})); +} + +const Network = { + requestWillBeSent: (params) => + broadcastToFrontend("Network.requestWillBeSent", params), + responseReceived: (params) => + broadcastToFrontend("Network.responseReceived", params), + loadingFinished: (params) => + broadcastToFrontend("Network.loadingFinished", params), + loadingFailed: (params) => + broadcastToFrontend("Network.loadingFailed", params), +}; + +const console = op_get_extras_binding_object().console; + +export { close, console, Network, open, Session, url, waitForDebugger }; + +export default { + open, + close, + url, + waitForDebugger, + console, + Session, + Network, +}; diff --git a/ext/node/polyfills/inspector.ts b/ext/node/polyfills/inspector.ts deleted file mode 100644 index 9de86ab14..000000000 --- a/ext/node/polyfills/inspector.ts +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -// Copyright Joyent and Node contributors. All rights reserved. MIT license. - -import { EventEmitter } from "node:events"; -import { notImplemented } from "ext:deno_node/_utils.ts"; -import { primordials } from "ext:core/mod.js"; - -const { - SafeMap, -} = primordials; - -class Session extends EventEmitter { - #connection = null; - #nextId = 1; - #messageCallbacks = new SafeMap(); - - /** Connects the session to the inspector back-end. */ - connect() { - notImplemented("inspector.Session.prototype.connect"); - } - - /** Connects the session to the main thread - * inspector back-end. */ - connectToMainThread() { - notImplemented("inspector.Session.prototype.connectToMainThread"); - } - - /** Posts a message to the inspector back-end. */ - post( - _method: string, - _params?: Record, - _callback?: (...args: unknown[]) => void, - ) { - notImplemented("inspector.Session.prototype.post"); - } - - /** Immediately closes the session, all pending - * message callbacks will be called with an - * error. - */ - disconnect() { - notImplemented("inspector.Session.prototype.disconnect"); - } -} - -/** Activates inspector on host and port. - * See https://nodejs.org/api/inspector.html#inspectoropenport-host-wait */ -function open(_port?: number, _host?: string, _wait?: boolean) { - notImplemented("inspector.Session.prototype.open"); -} - -/** Deactivate the inspector. Blocks until there are no active connections. - * See https://nodejs.org/api/inspector.html#inspectorclose */ -function close() { - notImplemented("inspector.Session.prototype.close"); -} - -/** Return the URL of the active inspector, or undefined if there is none. - * See https://nodejs.org/api/inspector.html#inspectorurl */ -function url() { - // TODO(kt3k): returns undefined for now, which means the inspector is not activated. - return undefined; -} - -/** Blocks until a client (existing or connected later) has sent Runtime.runIfWaitingForDebugger command. - * See https://nodejs.org/api/inspector.html#inspectorwaitfordebugger */ -function waitForDebugger() { - notImplemented("inspector.wairForDebugger"); -} - -const console = globalThis.console; - -export { close, console, open, Session, url, waitForDebugger }; - -export default { - close, - console, - open, - Session, - url, - waitForDebugger, -}; diff --git a/ext/node/polyfills/inspector/promises.js b/ext/node/polyfills/inspector/promises.js new file mode 100644 index 000000000..3483e53f5 --- /dev/null +++ b/ext/node/polyfills/inspector/promises.js @@ -0,0 +1,20 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +// Copyright Joyent and Node contributors. All rights reserved. MIT license. + +import inspector from "node:inspector"; +import { promisify } from "ext:deno_node/internal/util.mjs"; + +class Session extends inspector.Session { + constructor() { + super(); + } +} +Session.prototype.post = promisify(inspector.Session.prototype.post); + +export * from "node:inspector"; +export { Session }; + +export default { + ...inspector, + Session, +}; -- cgit v1.2.3 From b3a3d84ce249ff126f92e7a0849ec0a6ce26e973 Mon Sep 17 00:00:00 2001 From: Satya Rohith Date: Wed, 6 Nov 2024 19:42:24 +0530 Subject: fix(node:zlib): gzip & gzipSync should accept ArrayBuffer (#26762) Closes https://github.com/denoland/deno/issues/26638 --- ext/node/polyfills/_zlib.mjs | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'ext') diff --git a/ext/node/polyfills/_zlib.mjs b/ext/node/polyfills/_zlib.mjs index 851bd602f..07fc440ef 100644 --- a/ext/node/polyfills/_zlib.mjs +++ b/ext/node/polyfills/_zlib.mjs @@ -14,6 +14,7 @@ import { nextTick } from "ext:deno_node/_next_tick.ts"; import { isAnyArrayBuffer, isArrayBufferView, + isUint8Array, } from "ext:deno_node/internal/util/types.ts"; var kRangeErrorMessage = "Cannot create final Buffer. It would be larger " + @@ -158,6 +159,12 @@ export const inflateRawSync = function (buffer, opts) { function sanitizeInput(input) { if (typeof input === "string") input = Buffer.from(input); + if (isArrayBufferView(input) && !isUint8Array(input)) { + input = Buffer.from(input.buffer, input.byteOffset, input.byteLength); + } else if (isAnyArrayBuffer(input)) { + input = Buffer.from(input); + } + if ( !Buffer.isBuffer(input) && (input.buffer && !input.buffer.constructor === ArrayBuffer) -- cgit v1.2.3 From db53ec230d2de1b3be50230d4c00e83a03df686f Mon Sep 17 00:00:00 2001 From: Kaveh Date: Thu, 7 Nov 2024 03:19:32 +0330 Subject: refactor(ext/net): Use hickory dns instead of unmaintained trust-dns (#26741) This PR replaces the unmaintained and rebranded `trust-dns` to `hickory` for resolver in `deno_net`. --- ext/net/Cargo.toml | 4 ++-- ext/net/ops.rs | 50 +++++++++++++++++++++++++------------------------- 2 files changed, 27 insertions(+), 27 deletions(-) (limited to 'ext') diff --git a/ext/net/Cargo.toml b/ext/net/Cargo.toml index b22fbe7b5..61bb5701a 100644 --- a/ext/net/Cargo.toml +++ b/ext/net/Cargo.toml @@ -17,11 +17,11 @@ path = "lib.rs" deno_core.workspace = true deno_permissions.workspace = true deno_tls.workspace = true +hickory-proto = "0.24" +hickory-resolver = { version = "0.24", features = ["tokio-runtime", "serde-config"] } pin-project.workspace = true rustls-tokio-stream.workspace = true serde.workspace = true socket2.workspace = true thiserror.workspace = true tokio.workspace = true -trust-dns-proto = "0.23" -trust-dns-resolver = { version = "0.23", features = ["tokio-runtime", "serde-config"] } diff --git a/ext/net/ops.rs b/ext/net/ops.rs index 35bcff8dc..9a8b70f0f 100644 --- a/ext/net/ops.rs +++ b/ext/net/ops.rs @@ -18,6 +18,16 @@ use deno_core::OpState; use deno_core::RcRef; use deno_core::Resource; use deno_core::ResourceId; +use hickory_proto::rr::rdata::caa::Value; +use hickory_proto::rr::record_data::RData; +use hickory_proto::rr::record_type::RecordType; +use hickory_resolver::config::NameServerConfigGroup; +use hickory_resolver::config::ResolverConfig; +use hickory_resolver::config::ResolverOpts; +use hickory_resolver::error::ResolveError; +use hickory_resolver::error::ResolveErrorKind; +use hickory_resolver::system_conf; +use hickory_resolver::AsyncResolver; use serde::Deserialize; use serde::Serialize; use socket2::Domain; @@ -33,16 +43,6 @@ use std::rc::Rc; use std::str::FromStr; use tokio::net::TcpStream; use tokio::net::UdpSocket; -use trust_dns_proto::rr::rdata::caa::Value; -use trust_dns_proto::rr::record_data::RData; -use trust_dns_proto::rr::record_type::RecordType; -use trust_dns_resolver::config::NameServerConfigGroup; -use trust_dns_resolver::config::ResolverConfig; -use trust_dns_resolver::config::ResolverOpts; -use trust_dns_resolver::error::ResolveError; -use trust_dns_resolver::error::ResolveErrorKind; -use trust_dns_resolver::system_conf; -use trust_dns_resolver::AsyncResolver; #[derive(Serialize, Clone, Debug)] #[serde(rename_all = "camelCase")] @@ -828,6 +828,21 @@ mod tests { use deno_core::JsRuntime; use deno_core::RuntimeOptions; use deno_permissions::PermissionCheckError; + use hickory_proto::rr::rdata::a::A; + use hickory_proto::rr::rdata::aaaa::AAAA; + use hickory_proto::rr::rdata::caa::KeyValue; + use hickory_proto::rr::rdata::caa::CAA; + use hickory_proto::rr::rdata::mx::MX; + use hickory_proto::rr::rdata::name::ANAME; + use hickory_proto::rr::rdata::name::CNAME; + use hickory_proto::rr::rdata::name::NS; + use hickory_proto::rr::rdata::name::PTR; + use hickory_proto::rr::rdata::naptr::NAPTR; + use hickory_proto::rr::rdata::srv::SRV; + use hickory_proto::rr::rdata::txt::TXT; + use hickory_proto::rr::rdata::SOA; + use hickory_proto::rr::record_data::RData; + use hickory_proto::rr::Name; use socket2::SockRef; use std::net::Ipv4Addr; use std::net::Ipv6Addr; @@ -836,21 +851,6 @@ mod tests { use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; - use trust_dns_proto::rr::rdata::a::A; - use trust_dns_proto::rr::rdata::aaaa::AAAA; - use trust_dns_proto::rr::rdata::caa::KeyValue; - use trust_dns_proto::rr::rdata::caa::CAA; - use trust_dns_proto::rr::rdata::mx::MX; - use trust_dns_proto::rr::rdata::name::ANAME; - use trust_dns_proto::rr::rdata::name::CNAME; - use trust_dns_proto::rr::rdata::name::NS; - use trust_dns_proto::rr::rdata::name::PTR; - use trust_dns_proto::rr::rdata::naptr::NAPTR; - use trust_dns_proto::rr::rdata::srv::SRV; - use trust_dns_proto::rr::rdata::txt::TXT; - use trust_dns_proto::rr::rdata::SOA; - use trust_dns_proto::rr::record_data::RData; - use trust_dns_proto::rr::Name; #[test] fn rdata_to_return_record_a() { -- cgit v1.2.3 From 1cab4f07a3e6125a089726f022dd6bc9af517536 Mon Sep 17 00:00:00 2001 From: Leo Kettmeir Date: Wed, 6 Nov 2024 16:57:57 -0800 Subject: refactor: use concrete error type for remaining ops (#26746) --- ext/node/ops/crypto/cipher.rs | 145 ++++++--- ext/node/ops/crypto/digest.rs | 30 +- ext/node/ops/crypto/keys.rs | 679 +++++++++++++++++++++++++++++------------- ext/node/ops/crypto/mod.rs | 414 ++++++++++++++----------- ext/node/ops/crypto/sign.rs | 142 +++++---- ext/node/ops/crypto/x509.rs | 66 ++-- 6 files changed, 932 insertions(+), 544 deletions(-) (limited to 'ext') diff --git a/ext/node/ops/crypto/cipher.rs b/ext/node/ops/crypto/cipher.rs index b80aa33fe..ec45146b4 100644 --- a/ext/node/ops/crypto/cipher.rs +++ b/ext/node/ops/crypto/cipher.rs @@ -4,9 +4,6 @@ use aes::cipher::block_padding::Pkcs7; use aes::cipher::BlockDecryptMut; use aes::cipher::BlockEncryptMut; use aes::cipher::KeyIvInit; -use deno_core::error::range_error; -use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::Resource; use digest::generic_array::GenericArray; use digest::KeyInit; @@ -50,8 +47,22 @@ pub struct DecipherContext { decipher: Rc>, } +#[derive(Debug, thiserror::Error)] +pub enum CipherContextError { + #[error("Cipher context is already in use")] + ContextInUse, + #[error("{0}")] + Resource(deno_core::error::AnyError), + #[error(transparent)] + Cipher(#[from] CipherError), +} + impl CipherContext { - pub fn new(algorithm: &str, key: &[u8], iv: &[u8]) -> Result { + pub fn new( + algorithm: &str, + key: &[u8], + iv: &[u8], + ) -> Result { Ok(Self { cipher: Rc::new(RefCell::new(Cipher::new(algorithm, key, iv)?)), }) @@ -74,16 +85,31 @@ impl CipherContext { auto_pad: bool, input: &[u8], output: &mut [u8], - ) -> Result { + ) -> Result { Rc::try_unwrap(self.cipher) - .map_err(|_| type_error("Cipher context is already in use"))? + .map_err(|_| CipherContextError::ContextInUse)? .into_inner() .r#final(auto_pad, input, output) + .map_err(Into::into) } } +#[derive(Debug, thiserror::Error)] +pub enum DecipherContextError { + #[error("Decipher context is already in use")] + ContextInUse, + #[error("{0}")] + Resource(deno_core::error::AnyError), + #[error(transparent)] + Decipher(#[from] DecipherError), +} + impl DecipherContext { - pub fn new(algorithm: &str, key: &[u8], iv: &[u8]) -> Result { + pub fn new( + algorithm: &str, + key: &[u8], + iv: &[u8], + ) -> Result { Ok(Self { decipher: Rc::new(RefCell::new(Decipher::new(algorithm, key, iv)?)), }) @@ -103,11 +129,12 @@ impl DecipherContext { input: &[u8], output: &mut [u8], auth_tag: &[u8], - ) -> Result<(), AnyError> { + ) -> Result<(), DecipherContextError> { Rc::try_unwrap(self.decipher) - .map_err(|_| type_error("Decipher context is already in use"))? + .map_err(|_| DecipherContextError::ContextInUse)? .into_inner() .r#final(auto_pad, input, output, auth_tag) + .map_err(Into::into) } } @@ -123,12 +150,26 @@ impl Resource for DecipherContext { } } +#[derive(Debug, thiserror::Error)] +pub enum CipherError { + #[error("IV length must be 12 bytes")] + InvalidIvLength, + #[error("Invalid key length")] + InvalidKeyLength, + #[error("Invalid initialization vector")] + InvalidInitializationVector, + #[error("Cannot pad the input data")] + CannotPadInputData, + #[error("Unknown cipher {0}")] + UnknownCipher(String), +} + impl Cipher { fn new( algorithm_name: &str, key: &[u8], iv: &[u8], - ) -> Result { + ) -> Result { use Cipher::*; Ok(match algorithm_name { "aes-128-cbc" => { @@ -139,7 +180,7 @@ impl Cipher { "aes-256-ecb" => Aes256Ecb(Box::new(ecb::Encryptor::new(key.into()))), "aes-128-gcm" => { if iv.len() != 12 { - return Err(type_error("IV length must be 12 bytes")); + return Err(CipherError::InvalidIvLength); } let cipher = @@ -149,7 +190,7 @@ impl Cipher { } "aes-256-gcm" => { if iv.len() != 12 { - return Err(type_error("IV length must be 12 bytes")); + return Err(CipherError::InvalidIvLength); } let cipher = @@ -159,15 +200,15 @@ impl Cipher { } "aes256" | "aes-256-cbc" => { if key.len() != 32 { - return Err(range_error("Invalid key length")); + return Err(CipherError::InvalidKeyLength); } if iv.len() != 16 { - return Err(type_error("Invalid initialization vector")); + return Err(CipherError::InvalidInitializationVector); } Aes256Cbc(Box::new(cbc::Encryptor::new(key.into(), iv.into()))) } - _ => return Err(type_error(format!("Unknown cipher {algorithm_name}"))), + _ => return Err(CipherError::UnknownCipher(algorithm_name.to_string())), }) } @@ -235,14 +276,14 @@ impl Cipher { auto_pad: bool, input: &[u8], output: &mut [u8], - ) -> Result { + ) -> Result { assert!(input.len() < 16); use Cipher::*; match (self, auto_pad) { (Aes128Cbc(encryptor), true) => { let _ = (*encryptor) .encrypt_padded_b2b_mut::(input, output) - .map_err(|_| type_error("Cannot pad the input data"))?; + .map_err(|_| CipherError::CannotPadInputData)?; Ok(None) } (Aes128Cbc(mut encryptor), false) => { @@ -255,7 +296,7 @@ impl Cipher { (Aes128Ecb(encryptor), true) => { let _ = (*encryptor) .encrypt_padded_b2b_mut::(input, output) - .map_err(|_| type_error("Cannot pad the input data"))?; + .map_err(|_| CipherError::CannotPadInputData)?; Ok(None) } (Aes128Ecb(mut encryptor), false) => { @@ -268,7 +309,7 @@ impl Cipher { (Aes192Ecb(encryptor), true) => { let _ = (*encryptor) .encrypt_padded_b2b_mut::(input, output) - .map_err(|_| type_error("Cannot pad the input data"))?; + .map_err(|_| CipherError::CannotPadInputData)?; Ok(None) } (Aes192Ecb(mut encryptor), false) => { @@ -281,7 +322,7 @@ impl Cipher { (Aes256Ecb(encryptor), true) => { let _ = (*encryptor) .encrypt_padded_b2b_mut::(input, output) - .map_err(|_| type_error("Cannot pad the input data"))?; + .map_err(|_| CipherError::CannotPadInputData)?; Ok(None) } (Aes256Ecb(mut encryptor), false) => { @@ -296,7 +337,7 @@ impl Cipher { (Aes256Cbc(encryptor), true) => { let _ = (*encryptor) .encrypt_padded_b2b_mut::(input, output) - .map_err(|_| type_error("Cannot pad the input data"))?; + .map_err(|_| CipherError::CannotPadInputData)?; Ok(None) } (Aes256Cbc(mut encryptor), false) => { @@ -319,12 +360,32 @@ impl Cipher { } } +#[derive(Debug, thiserror::Error)] +pub enum DecipherError { + #[error("IV length must be 12 bytes")] + InvalidIvLength, + #[error("Invalid key length")] + InvalidKeyLength, + #[error("Invalid initialization vector")] + InvalidInitializationVector, + #[error("Cannot unpad the input data")] + CannotUnpadInputData, + #[error("Failed to authenticate data")] + DataAuthenticationFailed, + #[error("setAutoPadding(false) not supported for Aes128Gcm yet")] + SetAutoPaddingFalseAes128GcmUnsupported, + #[error("setAutoPadding(false) not supported for Aes256Gcm yet")] + SetAutoPaddingFalseAes256GcmUnsupported, + #[error("Unknown cipher {0}")] + UnknownCipher(String), +} + impl Decipher { fn new( algorithm_name: &str, key: &[u8], iv: &[u8], - ) -> Result { + ) -> Result { use Decipher::*; Ok(match algorithm_name { "aes-128-cbc" => { @@ -335,7 +396,7 @@ impl Decipher { "aes-256-ecb" => Aes256Ecb(Box::new(ecb::Decryptor::new(key.into()))), "aes-128-gcm" => { if iv.len() != 12 { - return Err(type_error("IV length must be 12 bytes")); + return Err(DecipherError::InvalidIvLength); } let decipher = @@ -345,7 +406,7 @@ impl Decipher { } "aes-256-gcm" => { if iv.len() != 12 { - return Err(type_error("IV length must be 12 bytes")); + return Err(DecipherError::InvalidIvLength); } let decipher = @@ -355,15 +416,17 @@ impl Decipher { } "aes256" | "aes-256-cbc" => { if key.len() != 32 { - return Err(range_error("Invalid key length")); + return Err(DecipherError::InvalidKeyLength); } if iv.len() != 16 { - return Err(type_error("Invalid initialization vector")); + return Err(DecipherError::InvalidInitializationVector); } Aes256Cbc(Box::new(cbc::Decryptor::new(key.into(), iv.into()))) } - _ => return Err(type_error(format!("Unknown cipher {algorithm_name}"))), + _ => { + return Err(DecipherError::UnknownCipher(algorithm_name.to_string())) + } }) } @@ -432,14 +495,14 @@ impl Decipher { input: &[u8], output: &mut [u8], auth_tag: &[u8], - ) -> Result<(), AnyError> { + ) -> Result<(), DecipherError> { use Decipher::*; match (self, auto_pad) { (Aes128Cbc(decryptor), true) => { assert!(input.len() == 16); let _ = (*decryptor) .decrypt_padded_b2b_mut::(input, output) - .map_err(|_| type_error("Cannot unpad the input data"))?; + .map_err(|_| DecipherError::CannotUnpadInputData)?; Ok(()) } (Aes128Cbc(mut decryptor), false) => { @@ -453,7 +516,7 @@ impl Decipher { assert!(input.len() == 16); let _ = (*decryptor) .decrypt_padded_b2b_mut::(input, output) - .map_err(|_| type_error("Cannot unpad the input data"))?; + .map_err(|_| DecipherError::CannotUnpadInputData)?; Ok(()) } (Aes128Ecb(mut decryptor), false) => { @@ -467,7 +530,7 @@ impl Decipher { assert!(input.len() == 16); let _ = (*decryptor) .decrypt_padded_b2b_mut::(input, output) - .map_err(|_| type_error("Cannot unpad the input data"))?; + .map_err(|_| DecipherError::CannotUnpadInputData)?; Ok(()) } (Aes192Ecb(mut decryptor), false) => { @@ -481,7 +544,7 @@ impl Decipher { assert!(input.len() == 16); let _ = (*decryptor) .decrypt_padded_b2b_mut::(input, output) - .map_err(|_| type_error("Cannot unpad the input data"))?; + .map_err(|_| DecipherError::CannotUnpadInputData)?; Ok(()) } (Aes256Ecb(mut decryptor), false) => { @@ -496,28 +559,28 @@ impl Decipher { if tag.as_slice() == auth_tag { Ok(()) } else { - Err(type_error("Failed to authenticate data")) + Err(DecipherError::DataAuthenticationFailed) } } - (Aes128Gcm(_), false) => Err(type_error( - "setAutoPadding(false) not supported for Aes256Gcm yet", - )), + (Aes128Gcm(_), false) => { + Err(DecipherError::SetAutoPaddingFalseAes128GcmUnsupported) + } (Aes256Gcm(decipher), true) => { let tag = decipher.finish(); if tag.as_slice() == auth_tag { Ok(()) } else { - Err(type_error("Failed to authenticate data")) + Err(DecipherError::DataAuthenticationFailed) } } - (Aes256Gcm(_), false) => Err(type_error( - "setAutoPadding(false) not supported for Aes256Gcm yet", - )), + (Aes256Gcm(_), false) => { + Err(DecipherError::SetAutoPaddingFalseAes256GcmUnsupported) + } (Aes256Cbc(decryptor), true) => { assert!(input.len() == 16); let _ = (*decryptor) .decrypt_padded_b2b_mut::(input, output) - .map_err(|_| type_error("Cannot unpad the input data"))?; + .map_err(|_| DecipherError::CannotUnpadInputData)?; Ok(()) } (Aes256Cbc(mut decryptor), false) => { diff --git a/ext/node/ops/crypto/digest.rs b/ext/node/ops/crypto/digest.rs index 293e8e063..a7d8fb51f 100644 --- a/ext/node/ops/crypto/digest.rs +++ b/ext/node/ops/crypto/digest.rs @@ -1,6 +1,4 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::generic_error; -use deno_core::error::AnyError; use deno_core::GarbageCollected; use digest::Digest; use digest::DynDigest; @@ -19,7 +17,7 @@ impl Hasher { pub fn new( algorithm: &str, output_length: Option, - ) -> Result { + ) -> Result { let hash = Hash::new(algorithm, output_length)?; Ok(Self { @@ -44,7 +42,7 @@ impl Hasher { pub fn clone_inner( &self, output_length: Option, - ) -> Result, AnyError> { + ) -> Result, HashError> { let hash = self.hash.borrow(); let Some(hash) = hash.as_ref() else { return Ok(None); @@ -184,11 +182,19 @@ pub enum Hash { use Hash::*; +#[derive(Debug, thiserror::Error)] +pub enum HashError { + #[error("Output length mismatch for non-extendable algorithm")] + OutputLengthMismatch, + #[error("Digest method not supported: {0}")] + DigestMethodUnsupported(String), +} + impl Hash { pub fn new( algorithm_name: &str, output_length: Option, - ) -> Result { + ) -> Result { match algorithm_name { "shake128" => return Ok(Shake128(Default::default(), output_length)), "shake256" => return Ok(Shake256(Default::default(), output_length)), @@ -201,17 +207,13 @@ impl Hash { let digest: D = Digest::new(); if let Some(length) = output_length { if length != digest.output_size() { - return Err(generic_error( - "Output length mismatch for non-extendable algorithm", - )); + return Err(HashError::OutputLengthMismatch); } } FixedSize(Box::new(digest)) }, _ => { - return Err(generic_error(format!( - "Digest method not supported: {algorithm_name}" - ))) + return Err(HashError::DigestMethodUnsupported(algorithm_name.to_string())) } ); @@ -243,14 +245,12 @@ impl Hash { pub fn clone_hash( &self, output_length: Option, - ) -> Result { + ) -> Result { let hash = match self { FixedSize(context) => { if let Some(length) = output_length { if length != context.output_size() { - return Err(generic_error( - "Output length mismatch for non-extendable algorithm", - )); + return Err(HashError::OutputLengthMismatch); } } FixedSize(context.box_clone()) diff --git a/ext/node/ops/crypto/keys.rs b/ext/node/ops/crypto/keys.rs index ac62f5cca..f164972d4 100644 --- a/ext/node/ops/crypto/keys.rs +++ b/ext/node/ops/crypto/keys.rs @@ -4,9 +4,7 @@ use std::borrow::Cow; use std::cell::RefCell; use base64::Engine; -use deno_core::error::generic_error; use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::serde_v8::BigInt as V8BigInt; use deno_core::unsync::spawn_blocking; @@ -46,6 +44,7 @@ use spki::der::Reader as _; use spki::DecodePublicKey as _; use spki::EncodePublicKey as _; use spki::SubjectPublicKeyInfoRef; +use x509_parser::error::X509Error; use x509_parser::x509; use super::dh; @@ -236,9 +235,11 @@ impl RsaPssPrivateKey { } impl EcPublicKey { - pub fn to_jwk(&self) -> Result { + pub fn to_jwk(&self) -> Result { match self { - EcPublicKey::P224(_) => Err(type_error("Unsupported JWK EC curve: P224")), + EcPublicKey::P224(_) => { + Err(AsymmetricPublicKeyJwkError::UnsupportedJwkEcCurveP224) + } EcPublicKey::P256(key) => Ok(key.to_jwk()), EcPublicKey::P384(key) => Ok(key.to_jwk()), } @@ -363,49 +364,201 @@ impl<'a> TryFrom> for RsaPssParameters<'a> { } } +#[derive(Debug, thiserror::Error)] +pub enum X509PublicKeyError { + #[error(transparent)] + X509(#[from] x509_parser::error::X509Error), + #[error(transparent)] + Rsa(#[from] rsa::Error), + #[error(transparent)] + Asn1(#[from] x509_parser::der_parser::asn1_rs::Error), + #[error(transparent)] + Ec(#[from] elliptic_curve::Error), + #[error("unsupported ec named curve")] + UnsupportedEcNamedCurve, + #[error("missing ec parameters")] + MissingEcParameters, + #[error("malformed DSS public key")] + MalformedDssPublicKey, + #[error("unsupported x509 public key type")] + UnsupportedX509KeyType, +} + +#[derive(Debug, thiserror::Error)] +pub enum RsaJwkError { + #[error(transparent)] + Base64(#[from] base64::DecodeError), + #[error(transparent)] + Rsa(#[from] rsa::Error), + #[error("missing RSA private component")] + MissingRsaPrivateComponent, +} + +#[derive(Debug, thiserror::Error)] +pub enum EcJwkError { + #[error(transparent)] + Ec(#[from] elliptic_curve::Error), + #[error("unsupported curve: {0}")] + UnsupportedCurve(String), +} + +#[derive(Debug, thiserror::Error)] +pub enum EdRawError { + #[error(transparent)] + Ed25519Signature(#[from] ed25519_dalek::SignatureError), + #[error("invalid Ed25519 key")] + InvalidEd25519Key, + #[error("unsupported curve")] + UnsupportedCurve, +} + +#[derive(Debug, thiserror::Error)] +pub enum AsymmetricPrivateKeyError { + #[error("invalid PEM private key: not valid utf8 starting at byte {0}")] + InvalidPemPrivateKeyInvalidUtf8(usize), + #[error("invalid encrypted PEM private key")] + InvalidEncryptedPemPrivateKey, + #[error("invalid PEM private key")] + InvalidPemPrivateKey, + #[error("encrypted private key requires a passphrase to decrypt")] + EncryptedPrivateKeyRequiresPassphraseToDecrypt, + #[error("invalid PKCS#1 private key")] + InvalidPkcs1PrivateKey, + #[error("invalid SEC1 private key")] + InvalidSec1PrivateKey, + #[error("unsupported PEM label: {0}")] + UnsupportedPemLabel(String), + #[error(transparent)] + RsaPssParamsParse(#[from] RsaPssParamsParseError), + #[error("invalid encrypted PKCS#8 private key")] + InvalidEncryptedPkcs8PrivateKey, + #[error("invalid PKCS#8 private key")] + InvalidPkcs8PrivateKey, + #[error("PKCS#1 private key does not support encryption with passphrase")] + Pkcs1PrivateKeyDoesNotSupportEncryptionWithPassphrase, + #[error("SEC1 private key does not support encryption with passphrase")] + Sec1PrivateKeyDoesNotSupportEncryptionWithPassphrase, + #[error("unsupported ec named curve")] + UnsupportedEcNamedCurve, + #[error("invalid private key")] + InvalidPrivateKey, + #[error("invalid DSA private key")] + InvalidDsaPrivateKey, + #[error("malformed or missing named curve in ec parameters")] + MalformedOrMissingNamedCurveInEcParameters, + #[error("unsupported key type: {0}")] + UnsupportedKeyType(String), + #[error("unsupported key format: {0}")] + UnsupportedKeyFormat(String), + #[error("invalid x25519 private key")] + InvalidX25519PrivateKey, + #[error("x25519 private key is the wrong length")] + X25519PrivateKeyIsWrongLength, + #[error("invalid Ed25519 private key")] + InvalidEd25519PrivateKey, + #[error("missing dh parameters")] + MissingDhParameters, + #[error("unsupported private key oid")] + UnsupportedPrivateKeyOid, +} + +#[derive(Debug, thiserror::Error)] +pub enum AsymmetricPublicKeyError { + #[error("invalid PEM private key: not valid utf8 starting at byte {0}")] + InvalidPemPrivateKeyInvalidUtf8(usize), + #[error("invalid PEM public key")] + InvalidPemPublicKey, + #[error("invalid PKCS#1 public key")] + InvalidPkcs1PublicKey, + #[error(transparent)] + AsymmetricPrivateKey(#[from] AsymmetricPrivateKeyError), + #[error("invalid x509 certificate")] + InvalidX509Certificate, + #[error(transparent)] + X509(#[from] x509_parser::nom::Err), + #[error(transparent)] + X509PublicKey(#[from] X509PublicKeyError), + #[error("unsupported PEM label: {0}")] + UnsupportedPemLabel(String), + #[error("invalid SPKI public key")] + InvalidSpkiPublicKey, + #[error("unsupported key type: {0}")] + UnsupportedKeyType(String), + #[error("unsupported key format: {0}")] + UnsupportedKeyFormat(String), + #[error(transparent)] + Spki(#[from] spki::Error), + #[error(transparent)] + Pkcs1(#[from] rsa::pkcs1::Error), + #[error(transparent)] + RsaPssParamsParse(#[from] RsaPssParamsParseError), + #[error("malformed DSS public key")] + MalformedDssPublicKey, + #[error("malformed or missing named curve in ec parameters")] + MalformedOrMissingNamedCurveInEcParameters, + #[error("malformed or missing public key in ec spki")] + MalformedOrMissingPublicKeyInEcSpki, + #[error(transparent)] + Ec(#[from] elliptic_curve::Error), + #[error("unsupported ec named curve")] + UnsupportedEcNamedCurve, + #[error("malformed or missing public key in x25519 spki")] + MalformedOrMissingPublicKeyInX25519Spki, + #[error("x25519 public key is too short")] + X25519PublicKeyIsTooShort, + #[error("invalid Ed25519 public key")] + InvalidEd25519PublicKey, + #[error("missing dh parameters")] + MissingDhParameters, + #[error("malformed dh parameters")] + MalformedDhParameters, + #[error("malformed or missing public key in dh spki")] + MalformedOrMissingPublicKeyInDhSpki, + #[error("unsupported private key oid")] + UnsupportedPrivateKeyOid, +} + impl KeyObjectHandle { pub fn new_asymmetric_private_key_from_js( key: &[u8], format: &str, typ: &str, passphrase: Option<&[u8]>, - ) -> Result { + ) -> Result { let document = match format { "pem" => { let pem = std::str::from_utf8(key).map_err(|err| { - type_error(format!( - "invalid PEM private key: not valid utf8 starting at byte {}", - err.valid_up_to() - )) + AsymmetricPrivateKeyError::InvalidPemPrivateKeyInvalidUtf8( + err.valid_up_to(), + ) })?; if let Some(passphrase) = passphrase { - SecretDocument::from_pkcs8_encrypted_pem(pem, passphrase) - .map_err(|_| type_error("invalid encrypted PEM private key"))? + SecretDocument::from_pkcs8_encrypted_pem(pem, passphrase).map_err( + |_| AsymmetricPrivateKeyError::InvalidEncryptedPemPrivateKey, + )? } else { let (label, doc) = SecretDocument::from_pem(pem) - .map_err(|_| type_error("invalid PEM private key"))?; + .map_err(|_| AsymmetricPrivateKeyError::InvalidPemPrivateKey)?; match label { EncryptedPrivateKeyInfo::PEM_LABEL => { - return Err(type_error( - "encrypted private key requires a passphrase to decrypt", - )) + return Err(AsymmetricPrivateKeyError::EncryptedPrivateKeyRequiresPassphraseToDecrypt); } PrivateKeyInfo::PEM_LABEL => doc, rsa::pkcs1::RsaPrivateKey::PEM_LABEL => { - SecretDocument::from_pkcs1_der(doc.as_bytes()) - .map_err(|_| type_error("invalid PKCS#1 private key"))? + SecretDocument::from_pkcs1_der(doc.as_bytes()).map_err(|_| { + AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey + })? } sec1::EcPrivateKey::PEM_LABEL => { SecretDocument::from_sec1_der(doc.as_bytes()) - .map_err(|_| type_error("invalid SEC1 private key"))? + .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)? } _ => { - return Err(type_error(format!( - "unsupported PEM label: {}", - label - ))) + return Err(AsymmetricPrivateKeyError::UnsupportedPemLabel( + label.to_string(), + )) } } } @@ -413,54 +566,57 @@ impl KeyObjectHandle { "der" => match typ { "pkcs8" => { if let Some(passphrase) = passphrase { - SecretDocument::from_pkcs8_encrypted_der(key, passphrase) - .map_err(|_| type_error("invalid encrypted PKCS#8 private key"))? + SecretDocument::from_pkcs8_encrypted_der(key, passphrase).map_err( + |_| AsymmetricPrivateKeyError::InvalidEncryptedPkcs8PrivateKey, + )? } else { SecretDocument::from_pkcs8_der(key) - .map_err(|_| type_error("invalid PKCS#8 private key"))? + .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs8PrivateKey)? } } "pkcs1" => { if passphrase.is_some() { - return Err(type_error( - "PKCS#1 private key does not support encryption with passphrase", - )); + return Err(AsymmetricPrivateKeyError::Pkcs1PrivateKeyDoesNotSupportEncryptionWithPassphrase); } SecretDocument::from_pkcs1_der(key) - .map_err(|_| type_error("invalid PKCS#1 private key"))? + .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey)? } "sec1" => { if passphrase.is_some() { - return Err(type_error( - "SEC1 private key does not support encryption with passphrase", - )); + return Err(AsymmetricPrivateKeyError::Sec1PrivateKeyDoesNotSupportEncryptionWithPassphrase); } SecretDocument::from_sec1_der(key) - .map_err(|_| type_error("invalid SEC1 private key"))? + .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)? + } + _ => { + return Err(AsymmetricPrivateKeyError::UnsupportedKeyType( + typ.to_string(), + )) } - _ => return Err(type_error(format!("unsupported key type: {}", typ))), }, _ => { - return Err(type_error(format!("unsupported key format: {}", format))) + return Err(AsymmetricPrivateKeyError::UnsupportedKeyFormat( + format.to_string(), + )) } }; let pk_info = PrivateKeyInfo::try_from(document.as_bytes()) - .map_err(|_| type_error("invalid private key"))?; + .map_err(|_| AsymmetricPrivateKeyError::InvalidPrivateKey)?; let alg = pk_info.algorithm.oid; let private_key = match alg { RSA_ENCRYPTION_OID => { let private_key = rsa::RsaPrivateKey::from_pkcs1_der(pk_info.private_key) - .map_err(|_| type_error("invalid PKCS#1 private key"))?; + .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey)?; AsymmetricPrivateKey::Rsa(private_key) } RSASSA_PSS_OID => { let details = parse_rsa_pss_params(pk_info.algorithm.parameters)?; let private_key = rsa::RsaPrivateKey::from_pkcs1_der(pk_info.private_key) - .map_err(|_| type_error("invalid PKCS#1 private key"))?; + .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey)?; AsymmetricPrivateKey::RsaPss(RsaPssPrivateKey { key: private_key, details, @@ -468,40 +624,43 @@ impl KeyObjectHandle { } DSA_OID => { let private_key = dsa::SigningKey::try_from(pk_info) - .map_err(|_| type_error("invalid DSA private key"))?; + .map_err(|_| AsymmetricPrivateKeyError::InvalidDsaPrivateKey)?; AsymmetricPrivateKey::Dsa(private_key) } EC_OID => { let named_curve = pk_info.algorithm.parameters_oid().map_err(|_| { - type_error("malformed or missing named curve in ec parameters") + AsymmetricPrivateKeyError::MalformedOrMissingNamedCurveInEcParameters })?; match named_curve { ID_SECP224R1_OID => { - let secret_key = - p224::SecretKey::from_sec1_der(pk_info.private_key) - .map_err(|_| type_error("invalid SEC1 private key"))?; + let secret_key = p224::SecretKey::from_sec1_der( + pk_info.private_key, + ) + .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?; AsymmetricPrivateKey::Ec(EcPrivateKey::P224(secret_key)) } ID_SECP256R1_OID => { - let secret_key = - p256::SecretKey::from_sec1_der(pk_info.private_key) - .map_err(|_| type_error("invalid SEC1 private key"))?; + let secret_key = p256::SecretKey::from_sec1_der( + pk_info.private_key, + ) + .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?; AsymmetricPrivateKey::Ec(EcPrivateKey::P256(secret_key)) } ID_SECP384R1_OID => { - let secret_key = - p384::SecretKey::from_sec1_der(pk_info.private_key) - .map_err(|_| type_error("invalid SEC1 private key"))?; + let secret_key = p384::SecretKey::from_sec1_der( + pk_info.private_key, + ) + .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?; AsymmetricPrivateKey::Ec(EcPrivateKey::P384(secret_key)) } - _ => return Err(type_error("unsupported ec named curve")), + _ => return Err(AsymmetricPrivateKeyError::UnsupportedEcNamedCurve), } } X25519_OID => { let string_ref = OctetStringRef::from_der(pk_info.private_key) - .map_err(|_| type_error("invalid x25519 private key"))?; + .map_err(|_| AsymmetricPrivateKeyError::InvalidX25519PrivateKey)?; if string_ref.as_bytes().len() != 32 { - return Err(type_error("x25519 private key is the wrong length")); + return Err(AsymmetricPrivateKeyError::X25519PrivateKeyIsWrongLength); } let mut bytes = [0; 32]; bytes.copy_from_slice(string_ref.as_bytes()); @@ -509,22 +668,22 @@ impl KeyObjectHandle { } ED25519_OID => { let signing_key = ed25519_dalek::SigningKey::try_from(pk_info) - .map_err(|_| type_error("invalid Ed25519 private key"))?; + .map_err(|_| AsymmetricPrivateKeyError::InvalidEd25519PrivateKey)?; AsymmetricPrivateKey::Ed25519(signing_key) } DH_KEY_AGREEMENT_OID => { let params = pk_info .algorithm .parameters - .ok_or_else(|| type_error("missing dh parameters"))?; + .ok_or(AsymmetricPrivateKeyError::MissingDhParameters)?; let params = pkcs3::DhParameter::from_der(¶ms.to_der().unwrap()) - .map_err(|_| type_error("malformed dh parameters"))?; + .map_err(|_| AsymmetricPrivateKeyError::MissingDhParameters)?; AsymmetricPrivateKey::Dh(DhPrivateKey { key: dh::PrivateKey::from_bytes(pk_info.private_key), params, }) } - _ => return Err(type_error("unsupported private key oid")), + _ => return Err(AsymmetricPrivateKeyError::UnsupportedPrivateKeyOid), }; Ok(KeyObjectHandle::AsymmetricPrivate(private_key)) @@ -532,7 +691,7 @@ impl KeyObjectHandle { pub fn new_x509_public_key( spki: &x509::SubjectPublicKeyInfo, - ) -> Result { + ) -> Result { use x509_parser::der_parser::asn1_rs::oid; use x509_parser::public_key::PublicKey; @@ -565,18 +724,18 @@ impl KeyObjectHandle { let public_key = p384::PublicKey::from_sec1_bytes(data)?; AsymmetricPublicKey::Ec(EcPublicKey::P384(public_key)) } - _ => return Err(type_error("unsupported ec named curve")), + _ => return Err(X509PublicKeyError::UnsupportedEcNamedCurve), } } else { - return Err(type_error("missing ec parameters")); + return Err(X509PublicKeyError::MissingEcParameters); } } PublicKey::DSA(_) => { let verifying_key = dsa::VerifyingKey::from_public_key_der(spki.raw) - .map_err(|_| type_error("malformed DSS public key"))?; + .map_err(|_| X509PublicKeyError::MalformedDssPublicKey)?; AsymmetricPublicKey::Dsa(verifying_key) } - _ => return Err(type_error("unsupported x509 public key type")), + _ => return Err(X509PublicKeyError::UnsupportedX509KeyType), }; Ok(KeyObjectHandle::AsymmetricPublic(key)) @@ -585,7 +744,7 @@ impl KeyObjectHandle { pub fn new_rsa_jwk( jwk: RsaJwkKey, is_public: bool, - ) -> Result { + ) -> Result { use base64::prelude::BASE64_URL_SAFE_NO_PAD; let n = BASE64_URL_SAFE_NO_PAD.decode(jwk.n.as_bytes())?; @@ -604,19 +763,19 @@ impl KeyObjectHandle { let d = BASE64_URL_SAFE_NO_PAD.decode( jwk .d - .ok_or_else(|| type_error("missing RSA private component"))? + .ok_or(RsaJwkError::MissingRsaPrivateComponent)? .as_bytes(), )?; let p = BASE64_URL_SAFE_NO_PAD.decode( jwk .p - .ok_or_else(|| type_error("missing RSA private component"))? + .ok_or(RsaJwkError::MissingRsaPrivateComponent)? .as_bytes(), )?; let q = BASE64_URL_SAFE_NO_PAD.decode( jwk .q - .ok_or_else(|| type_error("missing RSA private component"))? + .ok_or(RsaJwkError::MissingRsaPrivateComponent)? .as_bytes(), )?; @@ -640,7 +799,7 @@ impl KeyObjectHandle { pub fn new_ec_jwk( jwk: &JwkEcKey, is_public: bool, - ) -> Result { + ) -> Result { // https://datatracker.ietf.org/doc/html/rfc7518#section-6.2.1.1 let handle = match jwk.crv() { "P-256" if is_public => { @@ -660,7 +819,7 @@ impl KeyObjectHandle { EcPrivateKey::P384(p384::SecretKey::from_jwk(jwk)?), )), _ => { - return Err(type_error(format!("unsupported curve: {}", jwk.crv()))); + return Err(EcJwkError::UnsupportedCurve(jwk.crv().to_string())); } }; @@ -671,12 +830,11 @@ impl KeyObjectHandle { curve: &str, data: &[u8], is_public: bool, - ) -> Result { + ) -> Result { match curve { "Ed25519" => { - let data = data - .try_into() - .map_err(|_| type_error("invalid Ed25519 key"))?; + let data = + data.try_into().map_err(|_| EdRawError::InvalidEd25519Key)?; if !is_public { Ok(KeyObjectHandle::AsymmetricPrivate( AsymmetricPrivateKey::Ed25519( @@ -692,9 +850,8 @@ impl KeyObjectHandle { } } "X25519" => { - let data: [u8; 32] = data - .try_into() - .map_err(|_| type_error("invalid x25519 key"))?; + let data: [u8; 32] = + data.try_into().map_err(|_| EdRawError::InvalidEd25519Key)?; if !is_public { Ok(KeyObjectHandle::AsymmetricPrivate( AsymmetricPrivateKey::X25519(x25519_dalek::StaticSecret::from( @@ -707,7 +864,7 @@ impl KeyObjectHandle { )) } } - _ => Err(type_error("unsupported curve")), + _ => Err(EdRawError::UnsupportedCurve), } } @@ -716,24 +873,23 @@ impl KeyObjectHandle { format: &str, typ: &str, passphrase: Option<&[u8]>, - ) -> Result { + ) -> Result { let document = match format { "pem" => { let pem = std::str::from_utf8(key).map_err(|err| { - type_error(format!( - "invalid PEM public key: not valid utf8 starting at byte {}", - err.valid_up_to() - )) + AsymmetricPublicKeyError::InvalidPemPrivateKeyInvalidUtf8( + err.valid_up_to(), + ) })?; let (label, document) = Document::from_pem(pem) - .map_err(|_| type_error("invalid PEM public key"))?; + .map_err(|_| AsymmetricPublicKeyError::InvalidPemPublicKey)?; match label { SubjectPublicKeyInfoRef::PEM_LABEL => document, rsa::pkcs1::RsaPublicKey::PEM_LABEL => { Document::from_pkcs1_der(document.as_bytes()) - .map_err(|_| type_error("invalid PKCS#1 public key"))? + .map_err(|_| AsymmetricPublicKeyError::InvalidPkcs1PublicKey)? } EncryptedPrivateKeyInfo::PEM_LABEL | PrivateKeyInfo::PEM_LABEL @@ -754,27 +910,36 @@ impl KeyObjectHandle { } "CERTIFICATE" => { let (_, pem) = x509_parser::pem::parse_x509_pem(pem.as_bytes()) - .map_err(|_| type_error("invalid x509 certificate"))?; + .map_err(|_| AsymmetricPublicKeyError::InvalidX509Certificate)?; let cert = pem.parse_x509()?; let public_key = cert.tbs_certificate.subject_pki; - return KeyObjectHandle::new_x509_public_key(&public_key); + return KeyObjectHandle::new_x509_public_key(&public_key) + .map_err(Into::into); } _ => { - return Err(type_error(format!("unsupported PEM label: {}", label))) + return Err(AsymmetricPublicKeyError::UnsupportedPemLabel( + label.to_string(), + )) } } } "der" => match typ { "pkcs1" => Document::from_pkcs1_der(key) - .map_err(|_| type_error("invalid PKCS#1 public key"))?, + .map_err(|_| AsymmetricPublicKeyError::InvalidPkcs1PublicKey)?, "spki" => Document::from_public_key_der(key) - .map_err(|_| type_error("invalid SPKI public key"))?, - _ => return Err(type_error(format!("unsupported key type: {}", typ))), + .map_err(|_| AsymmetricPublicKeyError::InvalidSpkiPublicKey)?, + _ => { + return Err(AsymmetricPublicKeyError::UnsupportedKeyType( + typ.to_string(), + )) + } }, _ => { - return Err(type_error(format!("unsupported key format: {}", format))) + return Err(AsymmetricPublicKeyError::UnsupportedKeyType( + format.to_string(), + )) } }; @@ -799,16 +964,16 @@ impl KeyObjectHandle { } DSA_OID => { let verifying_key = dsa::VerifyingKey::try_from(spki) - .map_err(|_| type_error("malformed DSS public key"))?; + .map_err(|_| AsymmetricPublicKeyError::MalformedDssPublicKey)?; AsymmetricPublicKey::Dsa(verifying_key) } EC_OID => { let named_curve = spki.algorithm.parameters_oid().map_err(|_| { - type_error("malformed or missing named curve in ec parameters") - })?; - let data = spki.subject_public_key.as_bytes().ok_or_else(|| { - type_error("malformed or missing public key in ec spki") + AsymmetricPublicKeyError::MalformedOrMissingNamedCurveInEcParameters })?; + let data = spki.subject_public_key.as_bytes().ok_or( + AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInEcSpki, + )?; match named_curve { ID_SECP224R1_OID => { @@ -823,54 +988,68 @@ impl KeyObjectHandle { let public_key = p384::PublicKey::from_sec1_bytes(data)?; AsymmetricPublicKey::Ec(EcPublicKey::P384(public_key)) } - _ => return Err(type_error("unsupported ec named curve")), + _ => return Err(AsymmetricPublicKeyError::UnsupportedEcNamedCurve), } } X25519_OID => { let mut bytes = [0; 32]; - let data = spki.subject_public_key.as_bytes().ok_or_else(|| { - type_error("malformed or missing public key in x25519 spki") - })?; + let data = spki.subject_public_key.as_bytes().ok_or( + AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInX25519Spki, + )?; if data.len() < 32 { - return Err(type_error("x25519 public key is too short")); + return Err(AsymmetricPublicKeyError::X25519PublicKeyIsTooShort); } bytes.copy_from_slice(&data[0..32]); AsymmetricPublicKey::X25519(x25519_dalek::PublicKey::from(bytes)) } ED25519_OID => { let verifying_key = ed25519_dalek::VerifyingKey::try_from(spki) - .map_err(|_| type_error("invalid Ed25519 private key"))?; + .map_err(|_| AsymmetricPublicKeyError::InvalidEd25519PublicKey)?; AsymmetricPublicKey::Ed25519(verifying_key) } DH_KEY_AGREEMENT_OID => { let params = spki .algorithm .parameters - .ok_or_else(|| type_error("missing dh parameters"))?; + .ok_or(AsymmetricPublicKeyError::MissingDhParameters)?; let params = pkcs3::DhParameter::from_der(¶ms.to_der().unwrap()) - .map_err(|_| type_error("malformed dh parameters"))?; + .map_err(|_| AsymmetricPublicKeyError::MalformedDhParameters)?; let Some(subject_public_key) = spki.subject_public_key.as_bytes() else { - return Err(type_error("malformed or missing public key in dh spki")); + return Err( + AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInDhSpki, + ); }; AsymmetricPublicKey::Dh(DhPublicKey { key: dh::PublicKey::from_bytes(subject_public_key), params, }) } - _ => return Err(type_error("unsupported public key oid")), + _ => return Err(AsymmetricPublicKeyError::UnsupportedPrivateKeyOid), }; Ok(KeyObjectHandle::AsymmetricPublic(public_key)) } } +#[derive(Debug, thiserror::Error)] +pub enum RsaPssParamsParseError { + #[error("malformed pss private key parameters")] + MalformedPssPrivateKeyParameters, + #[error("unsupported pss hash algorithm")] + UnsupportedPssHashAlgorithm, + #[error("unsupported pss mask gen algorithm")] + UnsupportedPssMaskGenAlgorithm, + #[error("malformed or missing pss mask gen algorithm parameters")] + MalformedOrMissingPssMaskGenAlgorithm, +} + fn parse_rsa_pss_params( parameters: Option>, -) -> Result, deno_core::anyhow::Error> { +) -> Result, RsaPssParamsParseError> { let details = if let Some(parameters) = parameters { let params = RsaPssParameters::try_from(parameters) - .map_err(|_| type_error("malformed pss private key parameters"))?; + .map_err(|_| RsaPssParamsParseError::MalformedPssPrivateKeyParameters)?; let hash_algorithm = match params.hash_algorithm.map(|k| k.oid) { Some(ID_SHA1_OID) => RsaPssHashAlgorithm::Sha1, @@ -881,16 +1060,16 @@ fn parse_rsa_pss_params( Some(ID_SHA512_224_OID) => RsaPssHashAlgorithm::Sha512_224, Some(ID_SHA512_256_OID) => RsaPssHashAlgorithm::Sha512_256, None => RsaPssHashAlgorithm::Sha1, - _ => return Err(type_error("unsupported pss hash algorithm")), + _ => return Err(RsaPssParamsParseError::UnsupportedPssHashAlgorithm), }; let mf1_hash_algorithm = match params.mask_gen_algorithm { Some(alg) => { if alg.oid != ID_MFG1 { - return Err(type_error("unsupported pss mask gen algorithm")); + return Err(RsaPssParamsParseError::UnsupportedPssMaskGenAlgorithm); } let params = alg.parameters_oid().map_err(|_| { - type_error("malformed or missing pss mask gen algorithm parameters") + RsaPssParamsParseError::MalformedOrMissingPssMaskGenAlgorithm })?; match params { ID_SHA1_OID => RsaPssHashAlgorithm::Sha1, @@ -900,7 +1079,9 @@ fn parse_rsa_pss_params( ID_SHA512_OID => RsaPssHashAlgorithm::Sha512, ID_SHA512_224_OID => RsaPssHashAlgorithm::Sha512_224, ID_SHA512_256_OID => RsaPssHashAlgorithm::Sha512_256, - _ => return Err(type_error("unsupported pss mask gen algorithm")), + _ => { + return Err(RsaPssParamsParseError::UnsupportedPssMaskGenAlgorithm) + } } } None => hash_algorithm, @@ -921,14 +1102,49 @@ fn parse_rsa_pss_params( Ok(details) } -use base64::prelude::BASE64_URL_SAFE_NO_PAD; - fn bytes_to_b64(bytes: &[u8]) -> String { + use base64::prelude::BASE64_URL_SAFE_NO_PAD; BASE64_URL_SAFE_NO_PAD.encode(bytes) } +#[derive(Debug, thiserror::Error)] +pub enum AsymmetricPublicKeyJwkError { + #[error("key is not an asymmetric public key")] + KeyIsNotAsymmetricPublicKey, + #[error("Unsupported JWK EC curve: P224")] + UnsupportedJwkEcCurveP224, + #[error("jwk export not implemented for this key type")] + JwkExportNotImplementedForKeyType, +} + +#[derive(Debug, thiserror::Error)] +pub enum AsymmetricPublicKeyDerError { + #[error("key is not an asymmetric public key")] + KeyIsNotAsymmetricPublicKey, + #[error("invalid RSA public key")] + InvalidRsaPublicKey, + #[error("exporting non-RSA public key as PKCS#1 is not supported")] + ExportingNonRsaPublicKeyAsPkcs1Unsupported, + #[error("invalid EC public key")] + InvalidEcPublicKey, + #[error("exporting RSA-PSS public key as SPKI is not supported yet")] + ExportingNonRsaPssPublicKeyAsSpkiUnsupported, + #[error("invalid DSA public key")] + InvalidDsaPublicKey, + #[error("invalid X25519 public key")] + InvalidX25519PublicKey, + #[error("invalid Ed25519 public key")] + InvalidEd25519PublicKey, + #[error("invalid DH public key")] + InvalidDhPublicKey, + #[error("unsupported key type: {0}")] + UnsupportedKeyType(String), +} + impl AsymmetricPublicKey { - fn export_jwk(&self) -> Result { + fn export_jwk( + &self, + ) -> Result { match self { AsymmetricPublicKey::Ec(key) => { let jwk = key.to_jwk()?; @@ -974,40 +1190,39 @@ impl AsymmetricPublicKey { }); Ok(jwk) } - _ => Err(type_error("jwk export not implemented for this key type")), + _ => Err(AsymmetricPublicKeyJwkError::JwkExportNotImplementedForKeyType), } } - fn export_der(&self, typ: &str) -> Result, AnyError> { + fn export_der( + &self, + typ: &str, + ) -> Result, AsymmetricPublicKeyDerError> { match typ { "pkcs1" => match self { AsymmetricPublicKey::Rsa(key) => { let der = key .to_pkcs1_der() - .map_err(|_| type_error("invalid RSA public key"))? + .map_err(|_| AsymmetricPublicKeyDerError::InvalidRsaPublicKey)? .into_vec() .into_boxed_slice(); Ok(der) } - _ => Err(type_error( - "exporting non-RSA public key as PKCS#1 is not supported", - )), + _ => Err(AsymmetricPublicKeyDerError::ExportingNonRsaPublicKeyAsPkcs1Unsupported), }, "spki" => { let der = match self { AsymmetricPublicKey::Rsa(key) => key .to_public_key_der() - .map_err(|_| type_error("invalid RSA public key"))? + .map_err(|_| AsymmetricPublicKeyDerError::InvalidRsaPublicKey)? .into_vec() .into_boxed_slice(), AsymmetricPublicKey::RsaPss(_key) => { - return Err(generic_error( - "exporting RSA-PSS public key as SPKI is not supported yet", - )) + return Err(AsymmetricPublicKeyDerError::ExportingNonRsaPssPublicKeyAsSpkiUnsupported) } AsymmetricPublicKey::Dsa(key) => key .to_public_key_der() - .map_err(|_| type_error("invalid DSA public key"))? + .map_err(|_| AsymmetricPublicKeyDerError::InvalidDsaPublicKey)? .into_vec() .into_boxed_slice(), AsymmetricPublicKey::Ec(key) => { @@ -1023,12 +1238,12 @@ impl AsymmetricPublicKey { parameters: Some(asn1::AnyRef::from(&oid)), }, subject_public_key: BitStringRef::from_bytes(&sec1) - .map_err(|_| type_error("invalid EC public key"))?, + .map_err(|_| AsymmetricPublicKeyDerError::InvalidEcPublicKey)?, }; spki .to_der() - .map_err(|_| type_error("invalid EC public key"))? + .map_err(|_| AsymmetricPublicKeyDerError::InvalidEcPublicKey)? .into_boxed_slice() } AsymmetricPublicKey::X25519(key) => { @@ -1038,12 +1253,12 @@ impl AsymmetricPublicKey { parameters: None, }, subject_public_key: BitStringRef::from_bytes(key.as_bytes()) - .map_err(|_| type_error("invalid X25519 public key"))?, + .map_err(|_| AsymmetricPublicKeyDerError::InvalidX25519PublicKey)?, }; spki .to_der() - .map_err(|_| type_error("invalid X25519 public key"))? + .map_err(|_| AsymmetricPublicKeyDerError::InvalidX25519PublicKey)? .into_boxed_slice() } AsymmetricPublicKey::Ed25519(key) => { @@ -1053,12 +1268,12 @@ impl AsymmetricPublicKey { parameters: None, }, subject_public_key: BitStringRef::from_bytes(key.as_bytes()) - .map_err(|_| type_error("invalid Ed25519 public key"))?, + .map_err(|_| AsymmetricPublicKeyDerError::InvalidEd25519PublicKey)?, }; spki .to_der() - .map_err(|_| type_error("invalid Ed25519 public key"))? + .map_err(|_| AsymmetricPublicKeyDerError::InvalidEd25519PublicKey)? .into_boxed_slice() } AsymmetricPublicKey::Dh(key) => { @@ -1071,43 +1286,67 @@ impl AsymmetricPublicKey { }, subject_public_key: BitStringRef::from_bytes(&public_key_bytes) .map_err(|_| { - type_error("invalid DH public key") + AsymmetricPublicKeyDerError::InvalidDhPublicKey })?, }; spki .to_der() - .map_err(|_| type_error("invalid DH public key"))? + .map_err(|_| AsymmetricPublicKeyDerError::InvalidDhPublicKey)? .into_boxed_slice() } }; Ok(der) } - _ => Err(type_error(format!("unsupported key type: {}", typ))), + _ => Err(AsymmetricPublicKeyDerError::UnsupportedKeyType(typ.to_string())), } } } +#[derive(Debug, thiserror::Error)] +pub enum AsymmetricPrivateKeyDerError { + #[error("key is not an asymmetric private key")] + KeyIsNotAsymmetricPrivateKey, + #[error("invalid RSA private key")] + InvalidRsaPrivateKey, + #[error("exporting non-RSA private key as PKCS#1 is not supported")] + ExportingNonRsaPrivateKeyAsPkcs1Unsupported, + #[error("invalid EC private key")] + InvalidEcPrivateKey, + #[error("exporting non-EC private key as SEC1 is not supported")] + ExportingNonEcPrivateKeyAsSec1Unsupported, + #[error("exporting RSA-PSS private key as PKCS#8 is not supported yet")] + ExportingNonRsaPssPrivateKeyAsPkcs8Unsupported, + #[error("invalid DSA private key")] + InvalidDsaPrivateKey, + #[error("invalid X25519 private key")] + InvalidX25519PrivateKey, + #[error("invalid Ed25519 private key")] + InvalidEd25519PrivateKey, + #[error("invalid DH private key")] + InvalidDhPrivateKey, + #[error("unsupported key type: {0}")] + UnsupportedKeyType(String), +} + impl AsymmetricPrivateKey { fn export_der( &self, typ: &str, // cipher: Option<&str>, // passphrase: Option<&str>, - ) -> Result, AnyError> { + ) -> Result, AsymmetricPrivateKeyDerError> { match typ { "pkcs1" => match self { AsymmetricPrivateKey::Rsa(key) => { let der = key .to_pkcs1_der() - .map_err(|_| type_error("invalid RSA private key"))? + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidRsaPrivateKey)? .to_bytes() .to_vec() .into_boxed_slice(); Ok(der) } - _ => Err(type_error( - "exporting non-RSA private key as PKCS#1 is not supported", - )), + _ => Err(AsymmetricPrivateKeyDerError::ExportingNonRsaPrivateKeyAsPkcs1Unsupported), }, "sec1" => match self { AsymmetricPrivateKey::Ec(key) => { @@ -1116,30 +1355,26 @@ impl AsymmetricPrivateKey { EcPrivateKey::P256(key) => key.to_sec1_der(), EcPrivateKey::P384(key) => key.to_sec1_der(), } - .map_err(|_| type_error("invalid EC private key"))?; + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEcPrivateKey)?; Ok(sec1.to_vec().into_boxed_slice()) } - _ => Err(type_error( - "exporting non-EC private key as SEC1 is not supported", - )), + _ => Err(AsymmetricPrivateKeyDerError::ExportingNonEcPrivateKeyAsSec1Unsupported), }, "pkcs8" => { let der = match self { AsymmetricPrivateKey::Rsa(key) => { let document = key .to_pkcs8_der() - .map_err(|_| type_error("invalid RSA private key"))?; + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidRsaPrivateKey)?; document.to_bytes().to_vec().into_boxed_slice() } AsymmetricPrivateKey::RsaPss(_key) => { - return Err(generic_error( - "exporting RSA-PSS private key as PKCS#8 is not supported yet", - )) + return Err(AsymmetricPrivateKeyDerError::ExportingNonRsaPssPrivateKeyAsPkcs8Unsupported) } AsymmetricPrivateKey::Dsa(key) => { let document = key .to_pkcs8_der() - .map_err(|_| type_error("invalid DSA private key"))?; + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidDsaPrivateKey)?; document.to_bytes().to_vec().into_boxed_slice() } AsymmetricPrivateKey::Ec(key) => { @@ -1148,14 +1383,14 @@ impl AsymmetricPrivateKey { EcPrivateKey::P256(key) => key.to_pkcs8_der(), EcPrivateKey::P384(key) => key.to_pkcs8_der(), } - .map_err(|_| type_error("invalid EC private key"))?; + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEcPrivateKey)?; document.to_bytes().to_vec().into_boxed_slice() } AsymmetricPrivateKey::X25519(key) => { let private_key = OctetStringRef::new(key.as_bytes()) - .map_err(|_| type_error("invalid X25519 private key"))? + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidX25519PrivateKey)? .to_der() - .map_err(|_| type_error("invalid X25519 private key"))?; + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidX25519PrivateKey)?; let private_key = PrivateKeyInfo { algorithm: rsa::pkcs8::AlgorithmIdentifierRef { @@ -1168,15 +1403,15 @@ impl AsymmetricPrivateKey { let der = private_key .to_der() - .map_err(|_| type_error("invalid X25519 private key"))? + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidX25519PrivateKey)? .into_boxed_slice(); return Ok(der); } AsymmetricPrivateKey::Ed25519(key) => { let private_key = OctetStringRef::new(key.as_bytes()) - .map_err(|_| type_error("invalid Ed25519 private key"))? + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEd25519PrivateKey)? .to_der() - .map_err(|_| type_error("invalid Ed25519 private key"))?; + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEd25519PrivateKey)?; let private_key = PrivateKeyInfo { algorithm: rsa::pkcs8::AlgorithmIdentifierRef { @@ -1189,7 +1424,7 @@ impl AsymmetricPrivateKey { private_key .to_der() - .map_err(|_| type_error("invalid ED25519 private key"))? + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEd25519PrivateKey)? .into_boxed_slice() } AsymmetricPrivateKey::Dh(key) => { @@ -1206,14 +1441,14 @@ impl AsymmetricPrivateKey { private_key .to_der() - .map_err(|_| type_error("invalid DH private key"))? + .map_err(|_| AsymmetricPrivateKeyDerError::InvalidDhPrivateKey)? .into_boxed_slice() } }; Ok(der) } - _ => Err(type_error(format!("unsupported key type: {}", typ))), + _ => Err(AsymmetricPrivateKeyDerError::UnsupportedKeyType(typ.to_string())), } } } @@ -1225,7 +1460,7 @@ pub fn op_node_create_private_key( #[string] format: &str, #[string] typ: &str, #[buffer] passphrase: Option<&[u8]>, -) -> Result { +) -> Result { KeyObjectHandle::new_asymmetric_private_key_from_js( key, format, typ, passphrase, ) @@ -1237,7 +1472,7 @@ pub fn op_node_create_ed_raw( #[string] curve: &str, #[buffer] key: &[u8], is_public: bool, -) -> Result { +) -> Result { KeyObjectHandle::new_ed_raw(curve, key, is_public) } @@ -1255,16 +1490,16 @@ pub struct RsaJwkKey { pub fn op_node_create_rsa_jwk( #[serde] jwk: RsaJwkKey, is_public: bool, -) -> Result { +) -> Result { KeyObjectHandle::new_rsa_jwk(jwk, is_public) } #[op2] #[cppgc] pub fn op_node_create_ec_jwk( - #[serde] jwk: elliptic_curve::JwkEcKey, + #[serde] jwk: JwkEcKey, is_public: bool, -) -> Result { +) -> Result { KeyObjectHandle::new_ec_jwk(&jwk, is_public) } @@ -1275,7 +1510,7 @@ pub fn op_node_create_public_key( #[string] format: &str, #[string] typ: &str, #[buffer] passphrase: Option<&[u8]>, -) -> Result { +) -> Result { KeyObjectHandle::new_asymmetric_public_key_from_js( key, format, typ, passphrase, ) @@ -1293,7 +1528,7 @@ pub fn op_node_create_secret_key( #[string] pub fn op_node_get_asymmetric_key_type( #[cppgc] handle: &KeyObjectHandle, -) -> Result<&'static str, AnyError> { +) -> Result<&'static str, deno_core::error::AnyError> { match handle { KeyObjectHandle::AsymmetricPrivate(AsymmetricPrivateKey::Rsa(_)) | KeyObjectHandle::AsymmetricPublic(AsymmetricPublicKey::Rsa(_)) => { @@ -1364,7 +1599,7 @@ pub enum AsymmetricKeyDetails { #[serde] pub fn op_node_get_asymmetric_key_details( #[cppgc] handle: &KeyObjectHandle, -) -> Result { +) -> Result { match handle { KeyObjectHandle::AsymmetricPrivate(private_key) => match private_key { AsymmetricPrivateKey::Rsa(key) => { @@ -1482,12 +1717,10 @@ pub fn op_node_get_asymmetric_key_details( #[smi] pub fn op_node_get_symmetric_key_size( #[cppgc] handle: &KeyObjectHandle, -) -> Result { +) -> Result { match handle { - KeyObjectHandle::AsymmetricPrivate(_) => { - Err(type_error("asymmetric key is not a symmetric key")) - } - KeyObjectHandle::AsymmetricPublic(_) => { + KeyObjectHandle::AsymmetricPrivate(_) + | KeyObjectHandle::AsymmetricPublic(_) => { Err(type_error("asymmetric key is not a symmetric key")) } KeyObjectHandle::Secret(key) => Ok(key.len() * 8), @@ -1592,13 +1825,17 @@ pub async fn op_node_generate_rsa_key_async( .unwrap() } +#[derive(Debug, thiserror::Error)] +#[error("digest not allowed for RSA-PSS keys{}", .0.as_ref().map(|digest| format!(": {digest}")).unwrap_or_default())] +pub struct GenerateRsaPssError(Option); + fn generate_rsa_pss( modulus_length: usize, public_exponent: usize, hash_algorithm: Option<&str>, mf1_hash_algorithm: Option<&str>, salt_length: Option, -) -> Result { +) -> Result { let key = RsaPrivateKey::new_with_exp( &mut thread_rng(), modulus_length, @@ -1617,25 +1854,19 @@ fn generate_rsa_pss( let hash_algorithm = match_fixed_digest_with_oid!( hash_algorithm, fn (algorithm: Option) { - algorithm.ok_or_else(|| type_error("digest not allowed for RSA-PSS keys: {}"))? + algorithm.ok_or(GenerateRsaPssError(None))? }, _ => { - return Err(type_error(format!( - "digest not allowed for RSA-PSS keys: {}", - hash_algorithm - ))) + return Err(GenerateRsaPssError(Some(hash_algorithm.to_string()))) } ); let mf1_hash_algorithm = match_fixed_digest_with_oid!( mf1_hash_algorithm, fn (algorithm: Option) { - algorithm.ok_or_else(|| type_error("digest not allowed for RSA-PSS keys: {}"))? + algorithm.ok_or(GenerateRsaPssError(None))? }, _ => { - return Err(type_error(format!( - "digest not allowed for RSA-PSS keys: {}", - mf1_hash_algorithm - ))) + return Err(GenerateRsaPssError(Some(mf1_hash_algorithm.to_string()))) } ); let salt_length = @@ -1663,7 +1894,7 @@ pub fn op_node_generate_rsa_pss_key( #[string] hash_algorithm: Option, // todo: Option<&str> not supproted in ops yet #[string] mf1_hash_algorithm: Option, // todo: Option<&str> not supproted in ops yet #[smi] salt_length: Option, -) -> Result { +) -> Result { generate_rsa_pss( modulus_length, public_exponent, @@ -1681,7 +1912,7 @@ pub async fn op_node_generate_rsa_pss_key_async( #[string] hash_algorithm: Option, // todo: Option<&str> not supproted in ops yet #[string] mf1_hash_algorithm: Option, // todo: Option<&str> not supproted in ops yet #[smi] salt_length: Option, -) -> Result { +) -> Result { spawn_blocking(move || { generate_rsa_pss( modulus_length, @@ -1698,7 +1929,7 @@ pub async fn op_node_generate_rsa_pss_key_async( fn dsa_generate( modulus_length: usize, divisor_length: usize, -) -> Result { +) -> Result { let mut rng = rand::thread_rng(); use dsa::Components; use dsa::KeySize; @@ -1729,7 +1960,7 @@ fn dsa_generate( pub fn op_node_generate_dsa_key( #[smi] modulus_length: usize, #[smi] divisor_length: usize, -) -> Result { +) -> Result { dsa_generate(modulus_length, divisor_length) } @@ -1738,13 +1969,15 @@ pub fn op_node_generate_dsa_key( pub async fn op_node_generate_dsa_key_async( #[smi] modulus_length: usize, #[smi] divisor_length: usize, -) -> Result { +) -> Result { spawn_blocking(move || dsa_generate(modulus_length, divisor_length)) .await .unwrap() } -fn ec_generate(named_curve: &str) -> Result { +fn ec_generate( + named_curve: &str, +) -> Result { let mut rng = rand::thread_rng(); // TODO(@littledivy): Support public key point encoding. // Default is uncompressed. @@ -1776,7 +2009,7 @@ fn ec_generate(named_curve: &str) -> Result { #[cppgc] pub fn op_node_generate_ec_key( #[string] named_curve: &str, -) -> Result { +) -> Result { ec_generate(named_curve) } @@ -1784,7 +2017,7 @@ pub fn op_node_generate_ec_key( #[cppgc] pub async fn op_node_generate_ec_key_async( #[string] named_curve: String, -) -> Result { +) -> Result { spawn_blocking(move || ec_generate(&named_curve)) .await .unwrap() @@ -1840,7 +2073,7 @@ fn u32_slice_to_u8_slice(slice: &[u32]) -> &[u8] { fn dh_group_generate( group_name: &str, -) -> Result { +) -> Result { let (dh, prime, generator) = match group_name { "modp5" => ( dh::DiffieHellman::group::(), @@ -1895,7 +2128,7 @@ fn dh_group_generate( #[cppgc] pub fn op_node_generate_dh_group_key( #[string] group_name: &str, -) -> Result { +) -> Result { dh_group_generate(group_name) } @@ -1903,7 +2136,7 @@ pub fn op_node_generate_dh_group_key( #[cppgc] pub async fn op_node_generate_dh_group_key_async( #[string] group_name: String, -) -> Result { +) -> Result { spawn_blocking(move || dh_group_generate(&group_name)) .await .unwrap() @@ -1913,7 +2146,7 @@ fn dh_generate( prime: Option<&[u8]>, prime_len: usize, generator: usize, -) -> Result { +) -> KeyObjectHandlePair { let prime = prime .map(|p| p.into()) .unwrap_or_else(|| Prime::generate(prime_len)); @@ -1923,7 +2156,7 @@ fn dh_generate( base: asn1::Int::new(generator.to_be_bytes().as_slice()).unwrap(), private_value_length: None, }; - Ok(KeyObjectHandlePair::new( + KeyObjectHandlePair::new( AsymmetricPrivateKey::Dh(DhPrivateKey { key: dh.private_key, params: params.clone(), @@ -1932,7 +2165,7 @@ fn dh_generate( key: dh.public_key, params, }), - )) + ) } #[op2] @@ -1941,7 +2174,7 @@ pub fn op_node_generate_dh_key( #[buffer] prime: Option<&[u8]>, #[smi] prime_len: usize, #[smi] generator: usize, -) -> Result { +) -> KeyObjectHandlePair { dh_generate(prime, prime_len, generator) } @@ -1951,7 +2184,7 @@ pub async fn op_node_generate_dh_key_async( #[buffer(copy)] prime: Option>, #[smi] prime_len: usize, #[smi] generator: usize, -) -> Result { +) -> KeyObjectHandlePair { spawn_blocking(move || dh_generate(prime.as_deref(), prime_len, generator)) .await .unwrap() @@ -1963,21 +2196,21 @@ pub fn op_node_dh_keys_generate_and_export( #[buffer] prime: Option<&[u8]>, #[smi] prime_len: usize, #[smi] generator: usize, -) -> Result<(ToJsBuffer, ToJsBuffer), AnyError> { +) -> (ToJsBuffer, ToJsBuffer) { let prime = prime .map(|p| p.into()) .unwrap_or_else(|| Prime::generate(prime_len)); let dh = dh::DiffieHellman::new(prime, generator); let private_key = dh.private_key.into_vec().into_boxed_slice(); let public_key = dh.public_key.into_vec().into_boxed_slice(); - Ok((private_key.into(), public_key.into())) + (private_key.into(), public_key.into()) } #[op2] #[buffer] pub fn op_node_export_secret_key( #[cppgc] handle: &KeyObjectHandle, -) -> Result, AnyError> { +) -> Result, deno_core::error::AnyError> { let key = handle .as_secret_key() .ok_or_else(|| type_error("key is not a secret key"))?; @@ -1988,7 +2221,7 @@ pub fn op_node_export_secret_key( #[string] pub fn op_node_export_secret_key_b64url( #[cppgc] handle: &KeyObjectHandle, -) -> Result { +) -> Result { let key = handle .as_secret_key() .ok_or_else(|| type_error("key is not a secret key"))?; @@ -1999,23 +2232,33 @@ pub fn op_node_export_secret_key_b64url( #[serde] pub fn op_node_export_public_key_jwk( #[cppgc] handle: &KeyObjectHandle, -) -> Result { +) -> Result { let public_key = handle .as_public_key() - .ok_or_else(|| type_error("key is not an asymmetric public key"))?; + .ok_or(AsymmetricPublicKeyJwkError::KeyIsNotAsymmetricPublicKey)?; public_key.export_jwk() } +#[derive(Debug, thiserror::Error)] +pub enum ExportPublicKeyPemError { + #[error(transparent)] + AsymmetricPublicKeyDer(#[from] AsymmetricPublicKeyDerError), + #[error("very large data")] + VeryLargeData, + #[error(transparent)] + Der(#[from] der::Error), +} + #[op2] #[string] pub fn op_node_export_public_key_pem( #[cppgc] handle: &KeyObjectHandle, #[string] typ: &str, -) -> Result { +) -> Result { let public_key = handle .as_public_key() - .ok_or_else(|| type_error("key is not an asymmetric public key"))?; + .ok_or(AsymmetricPublicKeyDerError::KeyIsNotAsymmetricPublicKey)?; let data = public_key.export_der(typ)?; let label = match typ { @@ -2025,7 +2268,7 @@ pub fn op_node_export_public_key_pem( }; let pem_len = der::pem::encapsulated_len(label, LineEnding::LF, data.len()) - .map_err(|_| type_error("very large data"))?; + .map_err(|_| ExportPublicKeyPemError::VeryLargeData)?; let mut out = vec![0; pem_len]; let mut writer = PemWriter::new(label, LineEnding::LF, &mut out)?; writer.write(&data)?; @@ -2040,22 +2283,32 @@ pub fn op_node_export_public_key_pem( pub fn op_node_export_public_key_der( #[cppgc] handle: &KeyObjectHandle, #[string] typ: &str, -) -> Result, AnyError> { +) -> Result, AsymmetricPublicKeyDerError> { let public_key = handle .as_public_key() - .ok_or_else(|| type_error("key is not an asymmetric public key"))?; + .ok_or(AsymmetricPublicKeyDerError::KeyIsNotAsymmetricPublicKey)?; public_key.export_der(typ) } +#[derive(Debug, thiserror::Error)] +pub enum ExportPrivateKeyPemError { + #[error(transparent)] + AsymmetricPublicKeyDer(#[from] AsymmetricPrivateKeyDerError), + #[error("very large data")] + VeryLargeData, + #[error(transparent)] + Der(#[from] der::Error), +} + #[op2] #[string] pub fn op_node_export_private_key_pem( #[cppgc] handle: &KeyObjectHandle, #[string] typ: &str, -) -> Result { +) -> Result { let private_key = handle .as_private_key() - .ok_or_else(|| type_error("key is not an asymmetric private key"))?; + .ok_or(AsymmetricPrivateKeyDerError::KeyIsNotAsymmetricPrivateKey)?; let data = private_key.export_der(typ)?; let label = match typ { @@ -2066,7 +2319,7 @@ pub fn op_node_export_private_key_pem( }; let pem_len = der::pem::encapsulated_len(label, LineEnding::LF, data.len()) - .map_err(|_| type_error("very large data"))?; + .map_err(|_| ExportPrivateKeyPemError::VeryLargeData)?; let mut out = vec![0; pem_len]; let mut writer = PemWriter::new(label, LineEnding::LF, &mut out)?; writer.write(&data)?; @@ -2081,10 +2334,10 @@ pub fn op_node_export_private_key_pem( pub fn op_node_export_private_key_der( #[cppgc] handle: &KeyObjectHandle, #[string] typ: &str, -) -> Result, AnyError> { +) -> Result, AsymmetricPrivateKeyDerError> { let private_key = handle .as_private_key() - .ok_or_else(|| type_error("key is not an asymmetric private key"))?; + .ok_or(AsymmetricPrivateKeyDerError::KeyIsNotAsymmetricPrivateKey)?; private_key.export_der(typ) } @@ -2102,7 +2355,7 @@ pub fn op_node_key_type(#[cppgc] handle: &KeyObjectHandle) -> &'static str { #[cppgc] pub fn op_node_derive_public_key_from_private_key( #[cppgc] handle: &KeyObjectHandle, -) -> Result { +) -> Result { let Some(private_key) = handle.as_private_key() else { return Err(type_error("expected private key")); }; diff --git a/ext/node/ops/crypto/mod.rs b/ext/node/ops/crypto/mod.rs index 0cf34511b..e90e82090 100644 --- a/ext/node/ops/crypto/mod.rs +++ b/ext/node/ops/crypto/mod.rs @@ -1,7 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use deno_core::error::generic_error; use deno_core::error::type_error; -use deno_core::error::AnyError; use deno_core::op2; use deno_core::unsync::spawn_blocking; use deno_core::JsBuffer; @@ -34,14 +33,14 @@ use rsa::Pkcs1v15Encrypt; use rsa::RsaPrivateKey; use rsa::RsaPublicKey; -mod cipher; +pub mod cipher; mod dh; -mod digest; +pub mod digest; pub mod keys; mod md5_sha1; mod pkcs3; mod primes; -mod sign; +pub mod sign; pub mod x509; use self::digest::match_fixed_digest_with_eager_block_buffer; @@ -58,38 +57,31 @@ pub fn op_node_check_prime( pub fn op_node_check_prime_bytes( #[anybuffer] bytes: &[u8], #[number] checks: usize, -) -> Result { +) -> bool { let candidate = BigInt::from_bytes_be(num_bigint::Sign::Plus, bytes); - Ok(primes::is_probably_prime(&candidate, checks)) + primes::is_probably_prime(&candidate, checks) } #[op2(async)] pub async fn op_node_check_prime_async( #[bigint] num: i64, #[number] checks: usize, -) -> Result { +) -> Result { // TODO(@littledivy): use rayon for CPU-bound tasks - Ok( - spawn_blocking(move || { - primes::is_probably_prime(&BigInt::from(num), checks) - }) - .await?, - ) + spawn_blocking(move || primes::is_probably_prime(&BigInt::from(num), checks)) + .await } #[op2(async)] pub fn op_node_check_prime_bytes_async( #[anybuffer] bytes: &[u8], #[number] checks: usize, -) -> Result>, AnyError> { +) -> impl Future> { let candidate = BigInt::from_bytes_be(num_bigint::Sign::Plus, bytes); // TODO(@littledivy): use rayon for CPU-bound tasks - Ok(async move { - Ok( - spawn_blocking(move || primes::is_probably_prime(&candidate, checks)) - .await?, - ) - }) + async move { + spawn_blocking(move || primes::is_probably_prime(&candidate, checks)).await + } } #[op2] @@ -97,7 +89,7 @@ pub fn op_node_check_prime_bytes_async( pub fn op_node_create_hash( #[string] algorithm: &str, output_length: Option, -) -> Result { +) -> Result { digest::Hasher::new(algorithm, output_length.map(|l| l as usize)) } @@ -145,17 +137,31 @@ pub fn op_node_hash_digest_hex( pub fn op_node_hash_clone( #[cppgc] hasher: &digest::Hasher, output_length: Option, -) -> Result, AnyError> { +) -> Result, digest::HashError> { hasher.clone_inner(output_length.map(|l| l as usize)) } +#[derive(Debug, thiserror::Error)] +pub enum PrivateEncryptDecryptError { + #[error(transparent)] + Pkcs8(#[from] pkcs8::Error), + #[error(transparent)] + Spki(#[from] spki::Error), + #[error(transparent)] + Utf8(#[from] std::str::Utf8Error), + #[error(transparent)] + Rsa(#[from] rsa::Error), + #[error("Unknown padding")] + UnknownPadding, +} + #[op2] #[serde] pub fn op_node_private_encrypt( #[serde] key: StringOrBuffer, #[serde] msg: StringOrBuffer, #[smi] padding: u32, -) -> Result { +) -> Result { let key = RsaPrivateKey::from_pkcs8_pem((&key).try_into()?)?; let mut rng = rand::thread_rng(); @@ -172,7 +178,7 @@ pub fn op_node_private_encrypt( .encrypt(&mut rng, Oaep::new::(), &msg)? .into(), ), - _ => Err(type_error("Unknown padding")), + _ => Err(PrivateEncryptDecryptError::UnknownPadding), } } @@ -182,13 +188,13 @@ pub fn op_node_private_decrypt( #[serde] key: StringOrBuffer, #[serde] msg: StringOrBuffer, #[smi] padding: u32, -) -> Result { +) -> Result { let key = RsaPrivateKey::from_pkcs8_pem((&key).try_into()?)?; match padding { 1 => Ok(key.decrypt(Pkcs1v15Encrypt, &msg)?.into()), 4 => Ok(key.decrypt(Oaep::new::(), &msg)?.into()), - _ => Err(type_error("Unknown padding")), + _ => Err(PrivateEncryptDecryptError::UnknownPadding), } } @@ -198,7 +204,7 @@ pub fn op_node_public_encrypt( #[serde] key: StringOrBuffer, #[serde] msg: StringOrBuffer, #[smi] padding: u32, -) -> Result { +) -> Result { let key = RsaPublicKey::from_public_key_pem((&key).try_into()?)?; let mut rng = rand::thread_rng(); @@ -209,7 +215,7 @@ pub fn op_node_public_encrypt( .encrypt(&mut rng, Oaep::new::(), &msg)? .into(), ), - _ => Err(type_error("Unknown padding")), + _ => Err(PrivateEncryptDecryptError::UnknownPadding), } } @@ -220,7 +226,7 @@ pub fn op_node_create_cipheriv( #[string] algorithm: &str, #[buffer] key: &[u8], #[buffer] iv: &[u8], -) -> Result { +) -> Result { let context = cipher::CipherContext::new(algorithm, key, iv)?; Ok(state.resource_table.add(context)) } @@ -262,11 +268,14 @@ pub fn op_node_cipheriv_final( auto_pad: bool, #[buffer] input: &[u8], #[anybuffer] output: &mut [u8], -) -> Result>, AnyError> { - let context = state.resource_table.take::(rid)?; +) -> Result>, cipher::CipherContextError> { + let context = state + .resource_table + .take::(rid) + .map_err(cipher::CipherContextError::Resource)?; let context = Rc::try_unwrap(context) - .map_err(|_| type_error("Cipher context is already in use"))?; - context.r#final(auto_pad, input, output) + .map_err(|_| cipher::CipherContextError::ContextInUse)?; + context.r#final(auto_pad, input, output).map_err(Into::into) } #[op2] @@ -274,10 +283,13 @@ pub fn op_node_cipheriv_final( pub fn op_node_cipheriv_take( state: &mut OpState, #[smi] rid: u32, -) -> Result>, AnyError> { - let context = state.resource_table.take::(rid)?; +) -> Result>, cipher::CipherContextError> { + let context = state + .resource_table + .take::(rid) + .map_err(cipher::CipherContextError::Resource)?; let context = Rc::try_unwrap(context) - .map_err(|_| type_error("Cipher context is already in use"))?; + .map_err(|_| cipher::CipherContextError::ContextInUse)?; Ok(context.take_tag()) } @@ -288,7 +300,7 @@ pub fn op_node_create_decipheriv( #[string] algorithm: &str, #[buffer] key: &[u8], #[buffer] iv: &[u8], -) -> Result { +) -> Result { let context = cipher::DecipherContext::new(algorithm, key, iv)?; Ok(state.resource_table.add(context)) } @@ -326,10 +338,13 @@ pub fn op_node_decipheriv_decrypt( pub fn op_node_decipheriv_take( state: &mut OpState, #[smi] rid: u32, -) -> Result<(), AnyError> { - let context = state.resource_table.take::(rid)?; +) -> Result<(), cipher::DecipherContextError> { + let context = state + .resource_table + .take::(rid) + .map_err(cipher::DecipherContextError::Resource)?; Rc::try_unwrap(context) - .map_err(|_| type_error("Cipher context is already in use"))?; + .map_err(|_| cipher::DecipherContextError::ContextInUse)?; Ok(()) } @@ -341,11 +356,16 @@ pub fn op_node_decipheriv_final( #[buffer] input: &[u8], #[anybuffer] output: &mut [u8], #[buffer] auth_tag: &[u8], -) -> Result<(), AnyError> { - let context = state.resource_table.take::(rid)?; +) -> Result<(), cipher::DecipherContextError> { + let context = state + .resource_table + .take::(rid) + .map_err(cipher::DecipherContextError::Resource)?; let context = Rc::try_unwrap(context) - .map_err(|_| type_error("Cipher context is already in use"))?; - context.r#final(auto_pad, input, output, auth_tag) + .map_err(|_| cipher::DecipherContextError::ContextInUse)?; + context + .r#final(auto_pad, input, output, auth_tag) + .map_err(Into::into) } #[op2] @@ -356,7 +376,7 @@ pub fn op_node_sign( #[string] digest_type: &str, #[smi] pss_salt_length: Option, #[smi] dsa_signature_encoding: u32, -) -> Result, AnyError> { +) -> Result, sign::KeyObjectHandlePrehashedSignAndVerifyError> { handle.sign_prehashed( digest_type, digest, @@ -373,7 +393,7 @@ pub fn op_node_verify( #[buffer] signature: &[u8], #[smi] pss_salt_length: Option, #[smi] dsa_signature_encoding: u32, -) -> Result { +) -> Result { handle.verify_prehashed( digest_type, digest, @@ -383,13 +403,21 @@ pub fn op_node_verify( ) } +#[derive(Debug, thiserror::Error)] +pub enum Pbkdf2Error { + #[error("unsupported digest: {0}")] + UnsupportedDigest(String), + #[error(transparent)] + Join(#[from] tokio::task::JoinError), +} + fn pbkdf2_sync( password: &[u8], salt: &[u8], iterations: u32, algorithm_name: &str, derived_key: &mut [u8], -) -> Result<(), AnyError> { +) -> Result<(), Pbkdf2Error> { match_fixed_digest_with_eager_block_buffer!( algorithm_name, fn () { @@ -397,10 +425,7 @@ fn pbkdf2_sync( Ok(()) }, _ => { - Err(type_error(format!( - "unsupported digest: {}", - algorithm_name - ))) + Err(Pbkdf2Error::UnsupportedDigest(algorithm_name.to_string())) } ) } @@ -424,7 +449,7 @@ pub async fn op_node_pbkdf2_async( #[smi] iterations: u32, #[string] digest: String, #[number] keylen: usize, -) -> Result { +) -> Result { spawn_blocking(move || { let mut derived_key = vec![0; keylen]; pbkdf2_sync(&password, &salt, iterations, &digest, &mut derived_key) @@ -450,15 +475,27 @@ pub async fn op_node_fill_random_async(#[smi] len: i32) -> ToJsBuffer { .unwrap() } +#[derive(Debug, thiserror::Error)] +pub enum HkdfError { + #[error("expected secret key")] + ExpectedSecretKey, + #[error("HKDF-Expand failed")] + HkdfExpandFailed, + #[error("Unsupported digest: {0}")] + UnsupportedDigest(String), + #[error(transparent)] + Join(#[from] tokio::task::JoinError), +} + fn hkdf_sync( digest_algorithm: &str, handle: &KeyObjectHandle, salt: &[u8], info: &[u8], okm: &mut [u8], -) -> Result<(), AnyError> { +) -> Result<(), HkdfError> { let Some(ikm) = handle.as_secret_key() else { - return Err(type_error("expected secret key")); + return Err(HkdfError::ExpectedSecretKey); }; match_fixed_digest_with_eager_block_buffer!( @@ -466,10 +503,10 @@ fn hkdf_sync( fn () { let hk = Hkdf::::new(Some(salt), ikm); hk.expand(info, okm) - .map_err(|_| type_error("HKDF-Expand failed")) + .map_err(|_| HkdfError::HkdfExpandFailed) }, _ => { - Err(type_error(format!("Unsupported digest: {}", digest_algorithm))) + Err(HkdfError::UnsupportedDigest(digest_algorithm.to_string())) } ) } @@ -481,7 +518,7 @@ pub fn op_node_hkdf( #[buffer] salt: &[u8], #[buffer] info: &[u8], #[buffer] okm: &mut [u8], -) -> Result<(), AnyError> { +) -> Result<(), HkdfError> { hkdf_sync(digest_algorithm, handle, salt, info, okm) } @@ -493,7 +530,7 @@ pub async fn op_node_hkdf_async( #[buffer] salt: JsBuffer, #[buffer] info: JsBuffer, #[number] okm_len: usize, -) -> Result { +) -> Result { let handle = handle.clone(); spawn_blocking(move || { let mut okm = vec![0u8; okm_len]; @@ -509,27 +546,24 @@ pub fn op_node_dh_compute_secret( #[buffer] prime: JsBuffer, #[buffer] private_key: JsBuffer, #[buffer] their_public_key: JsBuffer, -) -> Result { +) -> ToJsBuffer { let pubkey: BigUint = BigUint::from_bytes_be(their_public_key.as_ref()); let privkey: BigUint = BigUint::from_bytes_be(private_key.as_ref()); let primei: BigUint = BigUint::from_bytes_be(prime.as_ref()); let shared_secret: BigUint = pubkey.modpow(&privkey, &primei); - Ok(shared_secret.to_bytes_be().into()) + shared_secret.to_bytes_be().into() } #[op2(fast)] #[number] -pub fn op_node_random_int( - #[number] min: i64, - #[number] max: i64, -) -> Result { +pub fn op_node_random_int(#[number] min: i64, #[number] max: i64) -> i64 { let mut rng = rand::thread_rng(); // Uniform distribution is required to avoid Modulo Bias // https://en.wikipedia.org/wiki/Fisher–Yates_shuffle#Modulo_bias let dist = Uniform::from(min..max); - Ok(dist.sample(&mut rng)) + dist.sample(&mut rng) } #[allow(clippy::too_many_arguments)] @@ -542,7 +576,7 @@ fn scrypt( parallelization: u32, _maxmem: u32, output_buffer: &mut [u8], -) -> Result<(), AnyError> { +) -> Result<(), deno_core::error::AnyError> { // Construct Params let params = scrypt::Params::new( cost as u8, @@ -573,7 +607,7 @@ pub fn op_node_scrypt_sync( #[smi] parallelization: u32, #[smi] maxmem: u32, #[anybuffer] output_buffer: &mut [u8], -) -> Result<(), AnyError> { +) -> Result<(), deno_core::error::AnyError> { scrypt( password, salt, @@ -586,6 +620,14 @@ pub fn op_node_scrypt_sync( ) } +#[derive(Debug, thiserror::Error)] +pub enum ScryptAsyncError { + #[error(transparent)] + Join(#[from] tokio::task::JoinError), + #[error(transparent)] + Other(deno_core::error::AnyError), +} + #[op2(async)] #[serde] pub async fn op_node_scrypt_async( @@ -596,10 +638,11 @@ pub async fn op_node_scrypt_async( #[smi] block_size: u32, #[smi] parallelization: u32, #[smi] maxmem: u32, -) -> Result { +) -> Result { spawn_blocking(move || { let mut output_buffer = vec![0u8; keylen as usize]; - let res = scrypt( + + scrypt( password, salt, keylen, @@ -608,25 +651,30 @@ pub async fn op_node_scrypt_async( parallelization, maxmem, &mut output_buffer, - ); - - if res.is_ok() { - Ok(output_buffer.into()) - } else { - // TODO(lev): rethrow the error? - Err(generic_error("scrypt failure")) - } + ) + .map(|_| output_buffer.into()) + .map_err(ScryptAsyncError::Other) }) .await? } +#[derive(Debug, thiserror::Error)] +pub enum EcdhEncodePubKey { + #[error("Invalid public key")] + InvalidPublicKey, + #[error("Unsupported curve")] + UnsupportedCurve, + #[error(transparent)] + Sec1(#[from] sec1::Error), +} + #[op2] #[buffer] pub fn op_node_ecdh_encode_pubkey( #[string] curve: &str, #[buffer] pubkey: &[u8], compress: bool, -) -> Result, AnyError> { +) -> Result, EcdhEncodePubKey> { use elliptic_curve::sec1::FromEncodedPoint; match curve { @@ -639,7 +687,7 @@ pub fn op_node_ecdh_encode_pubkey( ); // CtOption does not expose its variants. if pubkey.is_none().into() { - return Err(type_error("Invalid public key")); + return Err(EcdhEncodePubKey::InvalidPublicKey); } let pubkey = pubkey.unwrap(); @@ -652,7 +700,7 @@ pub fn op_node_ecdh_encode_pubkey( ); // CtOption does not expose its variants. if pubkey.is_none().into() { - return Err(type_error("Invalid public key")); + return Err(EcdhEncodePubKey::InvalidPublicKey); } let pubkey = pubkey.unwrap(); @@ -665,7 +713,7 @@ pub fn op_node_ecdh_encode_pubkey( ); // CtOption does not expose its variants. if pubkey.is_none().into() { - return Err(type_error("Invalid public key")); + return Err(EcdhEncodePubKey::InvalidPublicKey); } let pubkey = pubkey.unwrap(); @@ -678,14 +726,14 @@ pub fn op_node_ecdh_encode_pubkey( ); // CtOption does not expose its variants. if pubkey.is_none().into() { - return Err(type_error("Invalid public key")); + return Err(EcdhEncodePubKey::InvalidPublicKey); } let pubkey = pubkey.unwrap(); Ok(pubkey.to_encoded_point(compress).as_ref().to_vec()) } - &_ => Err(type_error("Unsupported curve")), + &_ => Err(EcdhEncodePubKey::UnsupportedCurve), } } @@ -695,7 +743,7 @@ pub fn op_node_ecdh_generate_keys( #[buffer] pubbuf: &mut [u8], #[buffer] privbuf: &mut [u8], #[string] format: &str, -) -> Result<(), AnyError> { +) -> Result<(), deno_core::error::AnyError> { let mut rng = rand::thread_rng(); let compress = format == "compressed"; match curve { @@ -742,7 +790,7 @@ pub fn op_node_ecdh_compute_secret( #[buffer] this_priv: Option, #[buffer] their_pub: &mut [u8], #[buffer] secret: &mut [u8], -) -> Result<(), AnyError> { +) { match curve { "secp256k1" => { let their_public_key = @@ -760,8 +808,6 @@ pub fn op_node_ecdh_compute_secret( their_public_key.as_affine(), ); secret.copy_from_slice(shared_secret.raw_secret_bytes()); - - Ok(()) } "prime256v1" | "secp256r1" => { let their_public_key = @@ -776,8 +822,6 @@ pub fn op_node_ecdh_compute_secret( their_public_key.as_affine(), ); secret.copy_from_slice(shared_secret.raw_secret_bytes()); - - Ok(()) } "secp384r1" => { let their_public_key = @@ -792,8 +836,6 @@ pub fn op_node_ecdh_compute_secret( their_public_key.as_affine(), ); secret.copy_from_slice(shared_secret.raw_secret_bytes()); - - Ok(()) } "secp224r1" => { let their_public_key = @@ -808,8 +850,6 @@ pub fn op_node_ecdh_compute_secret( their_public_key.as_affine(), ); secret.copy_from_slice(shared_secret.raw_secret_bytes()); - - Ok(()) } &_ => todo!(), } @@ -820,7 +860,7 @@ pub fn op_node_ecdh_compute_public_key( #[string] curve: &str, #[buffer] privkey: &[u8], #[buffer] pubkey: &mut [u8], -) -> Result<(), AnyError> { +) { match curve { "secp256k1" => { let this_private_key = @@ -828,8 +868,6 @@ pub fn op_node_ecdh_compute_public_key( .expect("bad private key"); let public_key = this_private_key.public_key(); pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref()); - - Ok(()) } "prime256v1" | "secp256r1" => { let this_private_key = @@ -837,7 +875,6 @@ pub fn op_node_ecdh_compute_public_key( .expect("bad private key"); let public_key = this_private_key.public_key(); pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref()); - Ok(()) } "secp384r1" => { let this_private_key = @@ -845,7 +882,6 @@ pub fn op_node_ecdh_compute_public_key( .expect("bad private key"); let public_key = this_private_key.public_key(); pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref()); - Ok(()) } "secp224r1" => { let this_private_key = @@ -853,7 +889,6 @@ pub fn op_node_ecdh_compute_public_key( .expect("bad private key"); let public_key = this_private_key.public_key(); pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref()); - Ok(()) } &_ => todo!(), } @@ -874,8 +909,20 @@ pub fn op_node_gen_prime(#[number] size: usize) -> ToJsBuffer { #[serde] pub async fn op_node_gen_prime_async( #[number] size: usize, -) -> Result { - Ok(spawn_blocking(move || gen_prime(size)).await?) +) -> Result { + spawn_blocking(move || gen_prime(size)).await +} + +#[derive(Debug, thiserror::Error)] +pub enum DiffieHellmanError { + #[error("Expected private key")] + ExpectedPrivateKey, + #[error("Expected public key")] + ExpectedPublicKey, + #[error("DH parameters mismatch")] + DhParametersMismatch, + #[error("Unsupported key type for diffie hellman, or key type mismatch")] + UnsupportedKeyTypeForDiffieHellmanOrKeyTypeMismatch, } #[op2] @@ -883,117 +930,134 @@ pub async fn op_node_gen_prime_async( pub fn op_node_diffie_hellman( #[cppgc] private: &KeyObjectHandle, #[cppgc] public: &KeyObjectHandle, -) -> Result, AnyError> { +) -> Result, DiffieHellmanError> { let private = private .as_private_key() - .ok_or_else(|| type_error("Expected private key"))?; + .ok_or(DiffieHellmanError::ExpectedPrivateKey)?; let public = public .as_public_key() - .ok_or_else(|| type_error("Expected public key"))?; - - let res = match (private, &*public) { - ( - AsymmetricPrivateKey::Ec(EcPrivateKey::P224(private)), - AsymmetricPublicKey::Ec(EcPublicKey::P224(public)), - ) => p224::ecdh::diffie_hellman( - private.to_nonzero_scalar(), - public.as_affine(), - ) - .raw_secret_bytes() - .to_vec() - .into_boxed_slice(), - ( - AsymmetricPrivateKey::Ec(EcPrivateKey::P256(private)), - AsymmetricPublicKey::Ec(EcPublicKey::P256(public)), - ) => p256::ecdh::diffie_hellman( - private.to_nonzero_scalar(), - public.as_affine(), - ) - .raw_secret_bytes() - .to_vec() - .into_boxed_slice(), - ( - AsymmetricPrivateKey::Ec(EcPrivateKey::P384(private)), - AsymmetricPublicKey::Ec(EcPublicKey::P384(public)), - ) => p384::ecdh::diffie_hellman( - private.to_nonzero_scalar(), - public.as_affine(), - ) - .raw_secret_bytes() - .to_vec() - .into_boxed_slice(), - ( - AsymmetricPrivateKey::X25519(private), - AsymmetricPublicKey::X25519(public), - ) => private - .diffie_hellman(public) - .to_bytes() - .into_iter() - .collect(), - (AsymmetricPrivateKey::Dh(private), AsymmetricPublicKey::Dh(public)) => { - if private.params.prime != public.params.prime - || private.params.base != public.params.base - { - return Err(type_error("DH parameters mismatch")); + .ok_or(DiffieHellmanError::ExpectedPublicKey)?; + + let res = + match (private, &*public) { + ( + AsymmetricPrivateKey::Ec(EcPrivateKey::P224(private)), + AsymmetricPublicKey::Ec(EcPublicKey::P224(public)), + ) => p224::ecdh::diffie_hellman( + private.to_nonzero_scalar(), + public.as_affine(), + ) + .raw_secret_bytes() + .to_vec() + .into_boxed_slice(), + ( + AsymmetricPrivateKey::Ec(EcPrivateKey::P256(private)), + AsymmetricPublicKey::Ec(EcPublicKey::P256(public)), + ) => p256::ecdh::diffie_hellman( + private.to_nonzero_scalar(), + public.as_affine(), + ) + .raw_secret_bytes() + .to_vec() + .into_boxed_slice(), + ( + AsymmetricPrivateKey::Ec(EcPrivateKey::P384(private)), + AsymmetricPublicKey::Ec(EcPublicKey::P384(public)), + ) => p384::ecdh::diffie_hellman( + private.to_nonzero_scalar(), + public.as_affine(), + ) + .raw_secret_bytes() + .to_vec() + .into_boxed_slice(), + ( + AsymmetricPrivateKey::X25519(private), + AsymmetricPublicKey::X25519(public), + ) => private + .diffie_hellman(public) + .to_bytes() + .into_iter() + .collect(), + (AsymmetricPrivateKey::Dh(private), AsymmetricPublicKey::Dh(public)) => { + if private.params.prime != public.params.prime + || private.params.base != public.params.base + { + return Err(DiffieHellmanError::DhParametersMismatch); + } + + // OSIP - Octet-String-to-Integer primitive + let public_key = public.key.clone().into_vec(); + let pubkey = BigUint::from_bytes_be(&public_key); + + // Exponentiation (z = y^x mod p) + let prime = BigUint::from_bytes_be(private.params.prime.as_bytes()); + let private_key = private.key.clone().into_vec(); + let private_key = BigUint::from_bytes_be(&private_key); + let shared_secret = pubkey.modpow(&private_key, &prime); + + shared_secret.to_bytes_be().into() } - - // OSIP - Octet-String-to-Integer primitive - let public_key = public.key.clone().into_vec(); - let pubkey = BigUint::from_bytes_be(&public_key); - - // Exponentiation (z = y^x mod p) - let prime = BigUint::from_bytes_be(private.params.prime.as_bytes()); - let private_key = private.key.clone().into_vec(); - let private_key = BigUint::from_bytes_be(&private_key); - let shared_secret = pubkey.modpow(&private_key, &prime); - - shared_secret.to_bytes_be().into() - } - _ => { - return Err(type_error( - "Unsupported key type for diffie hellman, or key type mismatch", - )) - } - }; + _ => return Err( + DiffieHellmanError::UnsupportedKeyTypeForDiffieHellmanOrKeyTypeMismatch, + ), + }; Ok(res) } +#[derive(Debug, thiserror::Error)] +pub enum SignEd25519Error { + #[error("Expected private key")] + ExpectedPrivateKey, + #[error("Expected Ed25519 private key")] + ExpectedEd25519PrivateKey, + #[error("Invalid Ed25519 private key")] + InvalidEd25519PrivateKey, +} + #[op2(fast)] pub fn op_node_sign_ed25519( #[cppgc] key: &KeyObjectHandle, #[buffer] data: &[u8], #[buffer] signature: &mut [u8], -) -> Result<(), AnyError> { +) -> Result<(), SignEd25519Error> { let private = key .as_private_key() - .ok_or_else(|| type_error("Expected private key"))?; + .ok_or(SignEd25519Error::ExpectedPrivateKey)?; let ed25519 = match private { AsymmetricPrivateKey::Ed25519(private) => private, - _ => return Err(type_error("Expected Ed25519 private key")), + _ => return Err(SignEd25519Error::ExpectedEd25519PrivateKey), }; let pair = Ed25519KeyPair::from_seed_unchecked(ed25519.as_bytes().as_slice()) - .map_err(|_| type_error("Invalid Ed25519 private key"))?; + .map_err(|_| SignEd25519Error::InvalidEd25519PrivateKey)?; signature.copy_from_slice(pair.sign(data).as_ref()); Ok(()) } +#[derive(Debug, thiserror::Error)] +pub enum VerifyEd25519Error { + #[error("Expected public key")] + ExpectedPublicKey, + #[error("Expected Ed25519 public key")] + ExpectedEd25519PublicKey, +} + #[op2(fast)] pub fn op_node_verify_ed25519( #[cppgc] key: &KeyObjectHandle, #[buffer] data: &[u8], #[buffer] signature: &[u8], -) -> Result { +) -> Result { let public = key .as_public_key() - .ok_or_else(|| type_error("Expected public key"))?; + .ok_or(VerifyEd25519Error::ExpectedPublicKey)?; let ed25519 = match &*public { AsymmetricPublicKey::Ed25519(public) => public, - _ => return Err(type_error("Expected Ed25519 public key")), + _ => return Err(VerifyEd25519Error::ExpectedEd25519PublicKey), }; let verified = ring::signature::UnparsedPublicKey::new( diff --git a/ext/node/ops/crypto/sign.rs b/ext/node/ops/crypto/sign.rs index 31744f86b..30094c076 100644 --- a/ext/node/ops/crypto/sign.rs +++ b/ext/node/ops/crypto/sign.rs @@ -1,7 +1,4 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::generic_error; -use deno_core::error::type_error; -use deno_core::error::AnyError; use rand::rngs::OsRng; use rsa::signature::hazmat::PrehashSigner as _; use rsa::signature::hazmat::PrehashVerifier as _; @@ -26,7 +23,7 @@ use elliptic_curve::FieldBytesSize; fn dsa_signature( encoding: u32, signature: ecdsa::Signature, -) -> Result, AnyError> +) -> Result, KeyObjectHandlePrehashedSignAndVerifyError> where MaxSize: ArrayLength, as Add>::Output: Add + ArrayLength, @@ -36,10 +33,54 @@ where 0 => Ok(signature.to_der().to_bytes().to_vec().into_boxed_slice()), // IEEE P1363 1 => Ok(signature.to_bytes().to_vec().into_boxed_slice()), - _ => Err(type_error("invalid DSA signature encoding")), + _ => Err( + KeyObjectHandlePrehashedSignAndVerifyError::InvalidDsaSignatureEncoding, + ), } } +#[derive(Debug, thiserror::Error)] +pub enum KeyObjectHandlePrehashedSignAndVerifyError { + #[error("invalid DSA signature encoding")] + InvalidDsaSignatureEncoding, + #[error("key is not a private key")] + KeyIsNotPrivate, + #[error("digest not allowed for RSA signature: {0}")] + DigestNotAllowedForRsaSignature(String), + #[error("failed to sign digest with RSA")] + FailedToSignDigestWithRsa, + #[error("digest not allowed for RSA-PSS signature: {0}")] + DigestNotAllowedForRsaPssSignature(String), + #[error("failed to sign digest with RSA-PSS")] + FailedToSignDigestWithRsaPss, + #[error("failed to sign digest with DSA")] + FailedToSignDigestWithDsa, + #[error("rsa-pss with different mf1 hash algorithm and hash algorithm is not supported")] + RsaPssHashAlgorithmUnsupported, + #[error( + "private key does not allow {actual} to be used, expected {expected}" + )] + PrivateKeyDisallowsUsage { actual: String, expected: String }, + #[error("failed to sign digest")] + FailedToSignDigest, + #[error("x25519 key cannot be used for signing")] + X25519KeyCannotBeUsedForSigning, + #[error("Ed25519 key cannot be used for prehashed signing")] + Ed25519KeyCannotBeUsedForPrehashedSigning, + #[error("DH key cannot be used for signing")] + DhKeyCannotBeUsedForSigning, + #[error("key is not a public or private key")] + KeyIsNotPublicOrPrivate, + #[error("Invalid DSA signature")] + InvalidDsaSignature, + #[error("x25519 key cannot be used for verification")] + X25519KeyCannotBeUsedForVerification, + #[error("Ed25519 key cannot be used for prehashed verification")] + Ed25519KeyCannotBeUsedForPrehashedVerification, + #[error("DH key cannot be used for verification")] + DhKeyCannotBeUsedForVerification, +} + impl KeyObjectHandle { pub fn sign_prehashed( &self, @@ -47,10 +88,10 @@ impl KeyObjectHandle { digest: &[u8], pss_salt_length: Option, dsa_signature_encoding: u32, - ) -> Result, AnyError> { + ) -> Result, KeyObjectHandlePrehashedSignAndVerifyError> { let private_key = self .as_private_key() - .ok_or_else(|| type_error("key is not a private key"))?; + .ok_or(KeyObjectHandlePrehashedSignAndVerifyError::KeyIsNotPrivate)?; match private_key { AsymmetricPrivateKey::Rsa(key) => { @@ -63,17 +104,14 @@ impl KeyObjectHandle { rsa::pkcs1v15::Pkcs1v15Sign::new::() }, _ => { - return Err(type_error(format!( - "digest not allowed for RSA signature: {}", - digest_type - ))) + return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string())) } ) }; let signature = signer .sign(Some(&mut OsRng), key, digest) - .map_err(|_| generic_error("failed to sign digest with RSA"))?; + .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithRsa)?; Ok(signature.into()) } AsymmetricPrivateKey::RsaPss(key) => { @@ -81,9 +119,7 @@ impl KeyObjectHandle { let mut salt_length = None; if let Some(details) = &key.details { if details.hash_algorithm != details.mf1_hash_algorithm { - return Err(type_error( - "rsa-pss with different mf1 hash algorithm and hash algorithm is not supported", - )); + return Err(KeyObjectHandlePrehashedSignAndVerifyError::RsaPssHashAlgorithmUnsupported); } hash_algorithm = Some(details.hash_algorithm); salt_length = Some(details.salt_length as usize); @@ -96,10 +132,10 @@ impl KeyObjectHandle { fn (algorithm: Option) { if let Some(hash_algorithm) = hash_algorithm.take() { if Some(hash_algorithm) != algorithm { - return Err(type_error(format!( - "private key does not allow {} to be used, expected {}", - digest_type, hash_algorithm.as_str() - ))); + return Err(KeyObjectHandlePrehashedSignAndVerifyError::PrivateKeyDisallowsUsage { + actual: digest_type.to_string(), + expected: hash_algorithm.as_str().to_string(), + }); } } if let Some(salt_length) = salt_length { @@ -109,15 +145,12 @@ impl KeyObjectHandle { } }, _ => { - return Err(type_error(format!( - "digest not allowed for RSA-PSS signature: {}", - digest_type - ))) + return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaPssSignature(digest_type.to_string())); } ); let signature = pss .sign(Some(&mut OsRng), &key.key, digest) - .map_err(|_| generic_error("failed to sign digest with RSA-PSS"))?; + .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithRsaPss)?; Ok(signature.into()) } AsymmetricPrivateKey::Dsa(key) => { @@ -127,15 +160,12 @@ impl KeyObjectHandle { key.sign_prehashed_rfc6979::(digest) }, _ => { - return Err(type_error(format!( - "digest not allowed for RSA signature: {}", - digest_type - ))) + return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string())) } ); let signature = - res.map_err(|_| generic_error("failed to sign digest with DSA"))?; + res.map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithDsa)?; Ok(signature.into()) } AsymmetricPrivateKey::Ec(key) => match key { @@ -143,7 +173,7 @@ impl KeyObjectHandle { let signing_key = p224::ecdsa::SigningKey::from(key); let signature: p224::ecdsa::Signature = signing_key .sign_prehash(digest) - .map_err(|_| type_error("failed to sign digest"))?; + .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?; dsa_signature(dsa_signature_encoding, signature) } @@ -151,7 +181,7 @@ impl KeyObjectHandle { let signing_key = p256::ecdsa::SigningKey::from(key); let signature: p256::ecdsa::Signature = signing_key .sign_prehash(digest) - .map_err(|_| type_error("failed to sign digest"))?; + .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?; dsa_signature(dsa_signature_encoding, signature) } @@ -159,19 +189,17 @@ impl KeyObjectHandle { let signing_key = p384::ecdsa::SigningKey::from(key); let signature: p384::ecdsa::Signature = signing_key .sign_prehash(digest) - .map_err(|_| type_error("failed to sign digest"))?; + .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?; dsa_signature(dsa_signature_encoding, signature) } }, AsymmetricPrivateKey::X25519(_) => { - Err(type_error("x25519 key cannot be used for signing")) + Err(KeyObjectHandlePrehashedSignAndVerifyError::X25519KeyCannotBeUsedForSigning) } - AsymmetricPrivateKey::Ed25519(_) => Err(type_error( - "Ed25519 key cannot be used for prehashed signing", - )), + AsymmetricPrivateKey::Ed25519(_) => Err(KeyObjectHandlePrehashedSignAndVerifyError::Ed25519KeyCannotBeUsedForPrehashedSigning), AsymmetricPrivateKey::Dh(_) => { - Err(type_error("DH key cannot be used for signing")) + Err(KeyObjectHandlePrehashedSignAndVerifyError::DhKeyCannotBeUsedForSigning) } } } @@ -183,10 +211,10 @@ impl KeyObjectHandle { signature: &[u8], pss_salt_length: Option, dsa_signature_encoding: u32, - ) -> Result { - let public_key = self - .as_public_key() - .ok_or_else(|| type_error("key is not a public or private key"))?; + ) -> Result { + let public_key = self.as_public_key().ok_or( + KeyObjectHandlePrehashedSignAndVerifyError::KeyIsNotPublicOrPrivate, + )?; match &*public_key { AsymmetricPublicKey::Rsa(key) => { @@ -199,10 +227,7 @@ impl KeyObjectHandle { rsa::pkcs1v15::Pkcs1v15Sign::new::() }, _ => { - return Err(type_error(format!( - "digest not allowed for RSA signature: {}", - digest_type - ))) + return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string())) } ) }; @@ -214,9 +239,7 @@ impl KeyObjectHandle { let mut salt_length = None; if let Some(details) = &key.details { if details.hash_algorithm != details.mf1_hash_algorithm { - return Err(type_error( - "rsa-pss with different mf1 hash algorithm and hash algorithm is not supported", - )); + return Err(KeyObjectHandlePrehashedSignAndVerifyError::RsaPssHashAlgorithmUnsupported); } hash_algorithm = Some(details.hash_algorithm); salt_length = Some(details.salt_length as usize); @@ -229,10 +252,10 @@ impl KeyObjectHandle { fn (algorithm: Option) { if let Some(hash_algorithm) = hash_algorithm.take() { if Some(hash_algorithm) != algorithm { - return Err(type_error(format!( - "private key does not allow {} to be used, expected {}", - digest_type, hash_algorithm.as_str() - ))); + return Err(KeyObjectHandlePrehashedSignAndVerifyError::PrivateKeyDisallowsUsage { + actual: digest_type.to_string(), + expected: hash_algorithm.as_str().to_string(), + }); } } if let Some(salt_length) = salt_length { @@ -242,17 +265,14 @@ impl KeyObjectHandle { } }, _ => { - return Err(type_error(format!( - "digest not allowed for RSA-PSS signature: {}", - digest_type - ))) + return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaPssSignature(digest_type.to_string())); } ); Ok(pss.verify(&key.key, digest, signature).is_ok()) } AsymmetricPublicKey::Dsa(key) => { let signature = dsa::Signature::from_der(signature) - .map_err(|_| type_error("Invalid DSA signature"))?; + .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::InvalidDsaSignature)?; Ok(key.verify_prehash(digest, &signature).is_ok()) } AsymmetricPublicKey::Ec(key) => match key { @@ -294,13 +314,11 @@ impl KeyObjectHandle { } }, AsymmetricPublicKey::X25519(_) => { - Err(type_error("x25519 key cannot be used for verification")) + Err(KeyObjectHandlePrehashedSignAndVerifyError::X25519KeyCannotBeUsedForVerification) } - AsymmetricPublicKey::Ed25519(_) => Err(type_error( - "Ed25519 key cannot be used for prehashed verification", - )), + AsymmetricPublicKey::Ed25519(_) => Err(KeyObjectHandlePrehashedSignAndVerifyError::Ed25519KeyCannotBeUsedForPrehashedVerification), AsymmetricPublicKey::Dh(_) => { - Err(type_error("DH key cannot be used for verification")) + Err(KeyObjectHandlePrehashedSignAndVerifyError::DhKeyCannotBeUsedForVerification) } } } diff --git a/ext/node/ops/crypto/x509.rs b/ext/node/ops/crypto/x509.rs index b44ff3a4b..ab8e52f70 100644 --- a/ext/node/ops/crypto/x509.rs +++ b/ext/node/ops/crypto/x509.rs @@ -1,11 +1,11 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. -use deno_core::error::AnyError; use deno_core::op2; use x509_parser::der_parser::asn1_rs::Any; use x509_parser::der_parser::asn1_rs::Tag; use x509_parser::der_parser::oid::Oid; +pub use x509_parser::error::X509Error; use x509_parser::extensions; use x509_parser::pem; use x509_parser::prelude::*; @@ -65,7 +65,7 @@ impl<'a> Deref for CertificateView<'a> { #[cppgc] pub fn op_node_x509_parse( #[buffer] buf: &[u8], -) -> Result { +) -> Result { let source = match pem::parse_x509_pem(buf) { Ok((_, pem)) => CertificateSources::Pem(pem), Err(_) => CertificateSources::Der(buf.to_vec().into_boxed_slice()), @@ -81,7 +81,7 @@ pub fn op_node_x509_parse( X509Certificate::from_der(buf).map(|(_, cert)| cert)? } }; - Ok::<_, AnyError>(CertificateView { cert }) + Ok::<_, X509Error>(CertificateView { cert }) }, )?; @@ -89,23 +89,23 @@ pub fn op_node_x509_parse( } #[op2(fast)] -pub fn op_node_x509_ca(#[cppgc] cert: &Certificate) -> Result { +pub fn op_node_x509_ca(#[cppgc] cert: &Certificate) -> bool { let cert = cert.inner.get().deref(); - Ok(cert.is_ca()) + cert.is_ca() } #[op2(fast)] pub fn op_node_x509_check_email( #[cppgc] cert: &Certificate, #[string] email: &str, -) -> Result { +) -> bool { let cert = cert.inner.get().deref(); let subject = cert.subject(); if subject .iter_email() .any(|e| e.as_str().unwrap_or("") == email) { - return Ok(true); + return true; } let subject_alt = cert @@ -121,62 +121,60 @@ pub fn op_node_x509_check_email( for name in &subject_alt.general_names { if let extensions::GeneralName::RFC822Name(n) = name { if *n == email { - return Ok(true); + return true; } } } } - Ok(false) + false } #[op2] #[string] -pub fn op_node_x509_fingerprint( - #[cppgc] cert: &Certificate, -) -> Result, AnyError> { - Ok(cert.fingerprint::()) +pub fn op_node_x509_fingerprint(#[cppgc] cert: &Certificate) -> Option { + cert.fingerprint::() } #[op2] #[string] pub fn op_node_x509_fingerprint256( #[cppgc] cert: &Certificate, -) -> Result, AnyError> { - Ok(cert.fingerprint::()) +) -> Option { + cert.fingerprint::() } #[op2] #[string] pub fn op_node_x509_fingerprint512( #[cppgc] cert: &Certificate, -) -> Result, AnyError> { - Ok(cert.fingerprint::()) +) -> Option { + cert.fingerprint::() } #[op2] #[string] pub fn op_node_x509_get_issuer( #[cppgc] cert: &Certificate, -) -> Result { +) -> Result { let cert = cert.inner.get().deref(); - Ok(x509name_to_string(cert.issuer(), oid_registry())?) + x509name_to_string(cert.issuer(), oid_registry()) } #[op2] #[string] pub fn op_node_x509_get_subject( #[cppgc] cert: &Certificate, -) -> Result { +) -> Result { let cert = cert.inner.get().deref(); - Ok(x509name_to_string(cert.subject(), oid_registry())?) + x509name_to_string(cert.subject(), oid_registry()) } #[op2] #[cppgc] pub fn op_node_x509_public_key( #[cppgc] cert: &Certificate, -) -> Result { +) -> Result { let cert = cert.inner.get().deref(); let public_key = &cert.tbs_certificate.subject_pki; @@ -245,37 +243,29 @@ fn x509name_to_string( #[op2] #[string] -pub fn op_node_x509_get_valid_from( - #[cppgc] cert: &Certificate, -) -> Result { +pub fn op_node_x509_get_valid_from(#[cppgc] cert: &Certificate) -> String { let cert = cert.inner.get().deref(); - Ok(cert.validity().not_before.to_string()) + cert.validity().not_before.to_string() } #[op2] #[string] -pub fn op_node_x509_get_valid_to( - #[cppgc] cert: &Certificate, -) -> Result { +pub fn op_node_x509_get_valid_to(#[cppgc] cert: &Certificate) -> String { let cert = cert.inner.get().deref(); - Ok(cert.validity().not_after.to_string()) + cert.validity().not_after.to_string() } #[op2] #[string] -pub fn op_node_x509_get_serial_number( - #[cppgc] cert: &Certificate, -) -> Result { +pub fn op_node_x509_get_serial_number(#[cppgc] cert: &Certificate) -> String { let cert = cert.inner.get().deref(); let mut s = cert.serial.to_str_radix(16); s.make_ascii_uppercase(); - Ok(s) + s } #[op2(fast)] -pub fn op_node_x509_key_usage( - #[cppgc] cert: &Certificate, -) -> Result { +pub fn op_node_x509_key_usage(#[cppgc] cert: &Certificate) -> u16 { let cert = cert.inner.get().deref(); let key_usage = cert .extensions() @@ -286,5 +276,5 @@ pub fn op_node_x509_key_usage( _ => None, }); - Ok(key_usage.map(|k| k.flags).unwrap_or(0)) + key_usage.map(|k| k.flags).unwrap_or(0) } -- cgit v1.2.3 From b9262130fec34137e38c922015c6b671c0fa9396 Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Thu, 7 Nov 2024 17:12:13 +0530 Subject: feat(ext/http): abort signal when request is cancelled (#26761) Closes https://github.com/denoland/deno/issues/21653 --- ext/fetch/23_request.js | 16 +++++++++++----- ext/http/00_serve.ts | 8 ++++++++ ext/http/http_next.rs | 8 ++++++++ ext/http/lib.rs | 1 + 4 files changed, 28 insertions(+), 5 deletions(-) (limited to 'ext') diff --git a/ext/fetch/23_request.js b/ext/fetch/23_request.js index 6211e927d..22c17d6d2 100644 --- a/ext/fetch/23_request.js +++ b/ext/fetch/23_request.js @@ -269,19 +269,25 @@ class Request { /** @type {AbortSignal} */ get [_signal]() { const signal = this[_signalCache]; - // This signal not been created yet, and the request is still in progress - if (signal === undefined) { + // This signal has not been created yet, but the request has already completed + if (signal === false) { const signal = newSignal(); this[_signalCache] = signal; + signal[signalAbort](signalAbortError); return signal; } - // This signal has not been created yet, but the request has already completed - if (signal === false) { + + // This signal not been created yet, and the request is still in progress + if (signal === undefined) { const signal = newSignal(); this[_signalCache] = signal; - signal[signalAbort](signalAbortError); return signal; } + + if (!signal.aborted && this[_request].isCancelled) { + signal[signalAbort](signalAbortError); + } + return signal; } get [_mimeType]() { diff --git a/ext/http/00_serve.ts b/ext/http/00_serve.ts index 1b70cf212..8cfd7ad53 100644 --- a/ext/http/00_serve.ts +++ b/ext/http/00_serve.ts @@ -11,6 +11,7 @@ import { op_http_cancel, op_http_close, op_http_close_after_finish, + op_http_get_request_cancelled, op_http_get_request_headers, op_http_get_request_method_and_url, op_http_read_request_body, @@ -373,6 +374,13 @@ class InnerRequest { get external() { return this.#external; } + + get isCancelled() { + if (this.#external === null) { + return true; + } + return op_http_get_request_cancelled(this.#external); + } } class CallbackContext { diff --git a/ext/http/http_next.rs b/ext/http/http_next.rs index 56c46de92..326478fe7 100644 --- a/ext/http/http_next.rs +++ b/ext/http/http_next.rs @@ -700,6 +700,14 @@ fn set_response( http.complete(); } +#[op2(fast)] +pub fn op_http_get_request_cancelled(external: *const c_void) -> bool { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_get_request_cancelled") }; + http.cancelled() +} + /// Returned promise resolves when body streaming finishes. /// Call [`op_http_close_after_finish`] when done with the external. #[op2(async)] diff --git a/ext/http/lib.rs b/ext/http/lib.rs index 6243804a1..9d71e3ad3 100644 --- a/ext/http/lib.rs +++ b/ext/http/lib.rs @@ -113,6 +113,7 @@ deno_core::extension!( http_next::op_http_get_request_header, http_next::op_http_get_request_headers, http_next::op_http_get_request_method_and_url, + http_next::op_http_get_request_cancelled, http_next::op_http_read_request_body, http_next::op_http_serve_on, http_next::op_http_serve, -- cgit v1.2.3 From 637b1d5508293ed02bef2f317b30bb7c1f0cbc71 Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Fri, 8 Nov 2024 12:27:29 +0530 Subject: fix(ext/cache): don't panic when creating cache (#26780) --- ext/cache/lib.rs | 6 ++++-- ext/cache/sqlite.rs | 23 +++++++++-------------- 2 files changed, 13 insertions(+), 16 deletions(-) (limited to 'ext') diff --git a/ext/cache/lib.rs b/ext/cache/lib.rs index b9cc5427c..524d4cea0 100644 --- a/ext/cache/lib.rs +++ b/ext/cache/lib.rs @@ -33,7 +33,9 @@ pub enum CacheError { } #[derive(Clone)] -pub struct CreateCache(pub Arc C>); +pub struct CreateCache( + pub Arc Result>, +); deno_core::extension!(deno_cache, deps = [ deno_webidl, deno_web, deno_url, deno_fetch ], @@ -231,7 +233,7 @@ where if let Some(cache) = state.try_borrow::() { Ok(cache.clone()) } else if let Some(create_cache) = state.try_borrow::>() { - let cache = create_cache.0(); + let cache = create_cache.0()?; state.put(cache); Ok(state.borrow::().clone()) } else { diff --git a/ext/cache/sqlite.rs b/ext/cache/sqlite.rs index e4991c32f..6efceda11 100644 --- a/ext/cache/sqlite.rs +++ b/ext/cache/sqlite.rs @@ -42,7 +42,7 @@ pub struct SqliteBackedCache { } impl SqliteBackedCache { - pub fn new(cache_storage_dir: PathBuf) -> Self { + pub fn new(cache_storage_dir: PathBuf) -> Result { { std::fs::create_dir_all(&cache_storage_dir) .expect("failed to create cache dir"); @@ -57,18 +57,14 @@ impl SqliteBackedCache { PRAGMA synchronous=NORMAL; PRAGMA optimize; "; - connection - .execute_batch(initial_pragmas) - .expect("failed to execute pragmas"); - connection - .execute( - "CREATE TABLE IF NOT EXISTS cache_storage ( + connection.execute_batch(initial_pragmas)?; + connection.execute( + "CREATE TABLE IF NOT EXISTS cache_storage ( id INTEGER PRIMARY KEY, cache_name TEXT NOT NULL UNIQUE )", - (), - ) - .expect("failed to create cache_storage table"); + (), + )?; connection .execute( "CREATE TABLE IF NOT EXISTS request_response_list ( @@ -86,12 +82,11 @@ impl SqliteBackedCache { UNIQUE (cache_id, request_url) )", (), - ) - .expect("failed to create request_response_list table"); - SqliteBackedCache { + )?; + Ok(SqliteBackedCache { connection: Arc::new(Mutex::new(connection)), cache_storage_dir, - } + }) } } } -- cgit v1.2.3 From b482a50299ae4f636a186038460e54af65e2b627 Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Fri, 8 Nov 2024 18:46:11 +0530 Subject: feat(ext/http): abort event when request is cancelled (#26781) ```js Deno.serve(async (req) => { const { promise, resolve } = Promise.withResolvers(); req.signal.addEventListener("abort", () => { resolve(); }); await promise; return new Response("Ok"); }); ``` --- ext/fetch/23_request.js | 8 ++++---- ext/http/00_serve.ts | 13 +++++++++---- ext/http/http_next.rs | 13 +++++++++++++ ext/http/lib.rs | 1 + ext/http/service.rs | 10 ++++++++++ 5 files changed, 37 insertions(+), 8 deletions(-) (limited to 'ext') diff --git a/ext/fetch/23_request.js b/ext/fetch/23_request.js index 22c17d6d2..61cac22d2 100644 --- a/ext/fetch/23_request.js +++ b/ext/fetch/23_request.js @@ -281,11 +281,11 @@ class Request { if (signal === undefined) { const signal = newSignal(); this[_signalCache] = signal; - return signal; - } + this[_request].onCancel?.(() => { + signal[signalAbort](signalAbortError); + }); - if (!signal.aborted && this[_request].isCancelled) { - signal[signalAbort](signalAbortError); + return signal; } return signal; diff --git a/ext/http/00_serve.ts b/ext/http/00_serve.ts index 8cfd7ad53..7bf83e49c 100644 --- a/ext/http/00_serve.ts +++ b/ext/http/00_serve.ts @@ -11,10 +11,10 @@ import { op_http_cancel, op_http_close, op_http_close_after_finish, - op_http_get_request_cancelled, op_http_get_request_headers, op_http_get_request_method_and_url, op_http_read_request_body, + op_http_request_on_cancel, op_http_serve, op_http_serve_on, op_http_set_promise_complete, @@ -375,11 +375,16 @@ class InnerRequest { return this.#external; } - get isCancelled() { + onCancel(callback) { if (this.#external === null) { - return true; + callback(); + return; } - return op_http_get_request_cancelled(this.#external); + + PromisePrototypeThen( + op_http_request_on_cancel(this.#external), + callback, + ); } } diff --git a/ext/http/http_next.rs b/ext/http/http_next.rs index 326478fe7..c55e86835 100644 --- a/ext/http/http_next.rs +++ b/ext/http/http_next.rs @@ -708,6 +708,19 @@ pub fn op_http_get_request_cancelled(external: *const c_void) -> bool { http.cancelled() } +#[op2(async)] +pub async fn op_http_request_on_cancel(external: *const c_void) { + let http = + // SAFETY: op is called with external. + unsafe { clone_external!(external, "op_http_request_on_cancel") }; + let (tx, rx) = tokio::sync::oneshot::channel(); + + http.on_cancel(tx); + drop(http); + + rx.await.ok(); +} + /// Returned promise resolves when body streaming finishes. /// Call [`op_http_close_after_finish`] when done with the external. #[op2(async)] diff --git a/ext/http/lib.rs b/ext/http/lib.rs index 9d71e3ad3..49893b1b9 100644 --- a/ext/http/lib.rs +++ b/ext/http/lib.rs @@ -112,6 +112,7 @@ deno_core::extension!( http_next::op_http_close_after_finish, http_next::op_http_get_request_header, http_next::op_http_get_request_headers, + http_next::op_http_request_on_cancel, http_next::op_http_get_request_method_and_url, http_next::op_http_get_request_cancelled, http_next::op_http_read_request_body, diff --git a/ext/http/service.rs b/ext/http/service.rs index 75f93d77c..ce24dea43 100644 --- a/ext/http/service.rs +++ b/ext/http/service.rs @@ -27,6 +27,7 @@ use std::rc::Rc; use std::task::Context; use std::task::Poll; use std::task::Waker; +use tokio::sync::oneshot; pub type Request = hyper::Request; pub type Response = hyper::Response; @@ -211,6 +212,7 @@ pub struct UpgradeUnavailableError; struct HttpRecordInner { server_state: SignallingRc, + closed_channel: Option>, request_info: HttpConnectionProperties, request_parts: http::request::Parts, request_body: Option, @@ -276,6 +278,7 @@ impl HttpRecord { response_body_finished: false, response_body_waker: None, trailers: None, + closed_channel: None, been_dropped: false, finished: false, needs_close_after_finish: false, @@ -312,6 +315,10 @@ impl HttpRecord { RefMut::map(self.self_mut(), |inner| &mut inner.needs_close_after_finish) } + pub fn on_cancel(&self, sender: oneshot::Sender<()>) { + self.self_mut().closed_channel = Some(sender); + } + fn recycle(self: Rc) { assert!( Rc::strong_count(&self) == 1, @@ -390,6 +397,9 @@ impl HttpRecord { inner.been_dropped = true; // The request body might include actual resources. inner.request_body.take(); + if let Some(closed_channel) = inner.closed_channel.take() { + let _ = closed_channel.send(()); + } } /// Complete this record, potentially expunging it if it is fully complete (ie: cancelled as well). -- cgit v1.2.3 From 73fbd61bd016eebbf2776dc94c15a26bf39668d6 Mon Sep 17 00:00:00 2001 From: snek Date: Fri, 8 Nov 2024 23:20:24 +0100 Subject: fix: performance.timeOrigin (#26787) `performance.timeOrigin` was being set from when JS started executing, but `op_now` measures from an `std::time::Instant` stored in `OpState`, which is created at a completely different time. This caused `performance.timeOrigin` to be very incorrect. This PR corrects the origin and also cleans up some of the timer code. Compared to `Date.now()`, `performance`'s time origin is now consistently within 5us (0.005ms) of system time. ![image](https://github.com/user-attachments/assets/0a7be04a-4f6d-4816-bd25-38a2e6136926) --- ext/web/02_timers.js | 13 +--------- ext/web/15_performance.js | 24 ++++++++++++----- ext/web/lib.rs | 6 +++-- ext/web/timers.rs | 65 ++++++++++++++++++++++++++++++++++------------- 4 files changed, 69 insertions(+), 39 deletions(-) (limited to 'ext') diff --git a/ext/web/02_timers.js b/ext/web/02_timers.js index 89acaca42..6058febd5 100644 --- a/ext/web/02_timers.js +++ b/ext/web/02_timers.js @@ -1,12 +1,9 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. import { core, primordials } from "ext:core/mod.js"; -import { op_defer, op_now } from "ext:core/ops"; +import { op_defer } from "ext:core/ops"; const { - Uint8Array, - Uint32Array, PromisePrototypeThen, - TypedArrayPrototypeGetBuffer, TypeError, indirectEval, ReflectApply, @@ -18,13 +15,6 @@ const { import * as webidl from "ext:deno_webidl/00_webidl.js"; -const hrU8 = new Uint8Array(8); -const hr = new Uint32Array(TypedArrayPrototypeGetBuffer(hrU8)); -function opNow() { - op_now(hrU8); - return (hr[0] * 1000 + hr[1] / 1e6); -} - // --------------------------------------------------------------------------- function checkThis(thisArg) { @@ -151,7 +141,6 @@ export { clearInterval, clearTimeout, defer, - opNow, refTimer, setImmediate, setInterval, diff --git a/ext/web/15_performance.js b/ext/web/15_performance.js index ea5557278..9e0e310a5 100644 --- a/ext/web/15_performance.js +++ b/ext/web/15_performance.js @@ -1,6 +1,7 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. import { primordials } from "ext:core/mod.js"; +import { op_now, op_time_origin } from "ext:core/ops"; const { ArrayPrototypeFilter, ArrayPrototypePush, @@ -10,19 +11,34 @@ const { Symbol, SymbolFor, TypeError, + TypedArrayPrototypeGetBuffer, + Uint8Array, + Uint32Array, } = primordials; import * as webidl from "ext:deno_webidl/00_webidl.js"; import { structuredClone } from "./02_structured_clone.js"; import { createFilteredInspectProxy } from "ext:deno_console/01_console.js"; import { EventTarget } from "./02_event.js"; -import { opNow } from "./02_timers.js"; import { DOMException } from "./01_dom_exception.js"; const illegalConstructorKey = Symbol("illegalConstructorKey"); let performanceEntries = []; let timeOrigin; +const hrU8 = new Uint8Array(8); +const hr = new Uint32Array(TypedArrayPrototypeGetBuffer(hrU8)); + +function setTimeOrigin() { + op_time_origin(hrU8); + timeOrigin = hr[0] * 1000 + hr[1] / 1e6; +} + +function now() { + op_now(hrU8); + return hr[0] * 1000 + hr[1] / 1e6; +} + webidl.converters["PerformanceMarkOptions"] = webidl .createDictionaryConverter( "PerformanceMarkOptions", @@ -90,10 +106,6 @@ webidl.converters["DOMString or PerformanceMeasureOptions"] = ( return webidl.converters.DOMString(V, prefix, context, opts); }; -function setTimeOrigin(origin) { - timeOrigin = origin; -} - function findMostRecent( name, type, @@ -135,8 +147,6 @@ function filterByNameType( ); } -const now = opNow; - const _name = Symbol("[[name]]"); const _entryType = Symbol("[[entryType]]"); const _startTime = Symbol("[[startTime]]"); diff --git a/ext/web/lib.rs b/ext/web/lib.rs index 4935af5bd..af0fc2c27 100644 --- a/ext/web/lib.rs +++ b/ext/web/lib.rs @@ -52,7 +52,8 @@ pub use crate::message_port::Transferable; use crate::timers::op_defer; use crate::timers::op_now; -use crate::timers::StartTime; +use crate::timers::op_time_origin; +pub use crate::timers::StartTime; pub use crate::timers::TimersPermission; deno_core::extension!(deno_web, @@ -84,6 +85,7 @@ deno_core::extension!(deno_web, compression::op_compression_write, compression::op_compression_finish, op_now

, + op_time_origin

, op_defer, stream_resource::op_readable_stream_resource_allocate, stream_resource::op_readable_stream_resource_allocate_sized, @@ -123,7 +125,7 @@ deno_core::extension!(deno_web, if let Some(location) = options.maybe_location { state.put(Location(location)); } - state.put(StartTime::now()); + state.put(StartTime::default()); } ); diff --git a/ext/web/timers.rs b/ext/web/timers.rs index a9ab7c97e..06444ed34 100644 --- a/ext/web/timers.rs +++ b/ext/web/timers.rs @@ -4,7 +4,10 @@ use deno_core::op2; use deno_core::OpState; +use std::time::Duration; use std::time::Instant; +use std::time::SystemTime; +use std::time::UNIX_EPOCH; pub trait TimersPermission { fn allow_hrtime(&mut self) -> bool; @@ -17,21 +20,28 @@ impl TimersPermission for deno_permissions::PermissionsContainer { } } -pub type StartTime = Instant; +pub struct StartTime(Instant); -// Returns a milliseconds and nanoseconds subsec -// since the start time of the deno runtime. -// If the High precision flag is not set, the -// nanoseconds are rounded on 2ms. -#[op2(fast)] -pub fn op_now(state: &mut OpState, #[buffer] buf: &mut [u8]) +impl Default for StartTime { + fn default() -> Self { + Self(Instant::now()) + } +} + +impl std::ops::Deref for StartTime { + type Target = Instant; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +fn expose_time(state: &mut OpState, duration: Duration, out: &mut [u8]) where TP: TimersPermission + 'static, { - let start_time = state.borrow::(); - let elapsed = start_time.elapsed(); - let seconds = elapsed.as_secs(); - let mut subsec_nanos = elapsed.subsec_nanos(); + let seconds = duration.as_secs() as u32; + let mut subsec_nanos = duration.subsec_nanos(); // If the permission is not enabled // Round the nano result on 2 milliseconds @@ -40,14 +50,33 @@ where let reduced_time_precision = 2_000_000; // 2ms in nanoseconds subsec_nanos -= subsec_nanos % reduced_time_precision; } - if buf.len() < 8 { - return; + + if out.len() >= 8 { + out[0..4].copy_from_slice(&seconds.to_ne_bytes()); + out[4..8].copy_from_slice(&subsec_nanos.to_ne_bytes()); } - let buf: &mut [u32] = - // SAFETY: buffer is at least 8 bytes long. - unsafe { std::slice::from_raw_parts_mut(buf.as_mut_ptr() as _, 2) }; - buf[0] = seconds as u32; - buf[1] = subsec_nanos; +} + +#[op2(fast)] +pub fn op_now(state: &mut OpState, #[buffer] buf: &mut [u8]) +where + TP: TimersPermission + 'static, +{ + let start_time = state.borrow::(); + let elapsed = start_time.elapsed(); + expose_time::(state, elapsed, buf); +} + +#[op2(fast)] +pub fn op_time_origin(state: &mut OpState, #[buffer] buf: &mut [u8]) +where + TP: TimersPermission + 'static, +{ + // https://w3c.github.io/hr-time/#dfn-estimated-monotonic-time-of-the-unix-epoch + let wall_time = SystemTime::now(); + let monotonic_time = state.borrow::().elapsed(); + let epoch = wall_time.duration_since(UNIX_EPOCH).unwrap() - monotonic_time; + expose_time::(state, epoch, buf); } #[allow(clippy::unused_async)] -- cgit v1.2.3 From e1b40a69c0241a9be7249b64118eae8741e24268 Mon Sep 17 00:00:00 2001 From: denobot <33910674+denobot@users.noreply.github.com> Date: Sun, 10 Nov 2024 02:42:18 -0500 Subject: chore: forward v2.0.6 release commit to main (#26804) This is the release commit being forwarded back to main for 2.0.6 Signed-off-by: Divy Srivastava Co-authored-by: Divy Srivastava --- ext/broadcast_channel/Cargo.toml | 2 +- ext/cache/Cargo.toml | 2 +- ext/canvas/Cargo.toml | 2 +- ext/console/Cargo.toml | 2 +- ext/cron/Cargo.toml | 2 +- ext/crypto/Cargo.toml | 2 +- ext/fetch/Cargo.toml | 2 +- ext/ffi/Cargo.toml | 2 +- ext/fs/Cargo.toml | 2 +- ext/http/Cargo.toml | 2 +- ext/io/Cargo.toml | 2 +- ext/kv/Cargo.toml | 2 +- ext/napi/Cargo.toml | 2 +- ext/napi/sym/Cargo.toml | 2 +- ext/net/Cargo.toml | 2 +- ext/node/Cargo.toml | 2 +- ext/tls/Cargo.toml | 2 +- ext/url/Cargo.toml | 2 +- ext/web/Cargo.toml | 2 +- ext/webgpu/Cargo.toml | 2 +- ext/webidl/Cargo.toml | 2 +- ext/websocket/Cargo.toml | 2 +- ext/webstorage/Cargo.toml | 2 +- 23 files changed, 23 insertions(+), 23 deletions(-) (limited to 'ext') diff --git a/ext/broadcast_channel/Cargo.toml b/ext/broadcast_channel/Cargo.toml index 0951286f7..90ac03835 100644 --- a/ext/broadcast_channel/Cargo.toml +++ b/ext/broadcast_channel/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_broadcast_channel" -version = "0.170.0" +version = "0.171.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cache/Cargo.toml b/ext/cache/Cargo.toml index 20dc9ca48..56fa0a527 100644 --- a/ext/cache/Cargo.toml +++ b/ext/cache/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cache" -version = "0.108.0" +version = "0.109.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/canvas/Cargo.toml b/ext/canvas/Cargo.toml index 5c6e44f95..4231d7c84 100644 --- a/ext/canvas/Cargo.toml +++ b/ext/canvas/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_canvas" -version = "0.45.0" +version = "0.46.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/console/Cargo.toml b/ext/console/Cargo.toml index bf476574e..80f1cca84 100644 --- a/ext/console/Cargo.toml +++ b/ext/console/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_console" -version = "0.176.0" +version = "0.177.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/cron/Cargo.toml b/ext/cron/Cargo.toml index 90b39aa0e..966ccdc95 100644 --- a/ext/cron/Cargo.toml +++ b/ext/cron/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_cron" -version = "0.56.0" +version = "0.57.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/crypto/Cargo.toml b/ext/crypto/Cargo.toml index 890cd868a..a5794dc68 100644 --- a/ext/crypto/Cargo.toml +++ b/ext/crypto/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_crypto" -version = "0.190.0" +version = "0.191.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml index 0b15d05b8..56d416bbb 100644 --- a/ext/fetch/Cargo.toml +++ b/ext/fetch/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fetch" -version = "0.200.0" +version = "0.201.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/ffi/Cargo.toml b/ext/ffi/Cargo.toml index 27ecd1d54..295e8be84 100644 --- a/ext/ffi/Cargo.toml +++ b/ext/ffi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_ffi" -version = "0.163.0" +version = "0.164.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml index d1720b7bc..e85f349b1 100644 --- a/ext/fs/Cargo.toml +++ b/ext/fs/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_fs" -version = "0.86.0" +version = "0.87.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/http/Cargo.toml b/ext/http/Cargo.toml index 1d7c4d87e..ed98fe349 100644 --- a/ext/http/Cargo.toml +++ b/ext/http/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_http" -version = "0.174.0" +version = "0.175.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/io/Cargo.toml b/ext/io/Cargo.toml index b791f16a3..6ef049ff9 100644 --- a/ext/io/Cargo.toml +++ b/ext/io/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_io" -version = "0.86.0" +version = "0.87.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/kv/Cargo.toml b/ext/kv/Cargo.toml index 59b425d3c..1d7b91770 100644 --- a/ext/kv/Cargo.toml +++ b/ext/kv/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_kv" -version = "0.84.0" +version = "0.85.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/napi/Cargo.toml b/ext/napi/Cargo.toml index 064d33568..df3ec0287 100644 --- a/ext/napi/Cargo.toml +++ b/ext/napi/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_napi" -version = "0.107.0" +version = "0.108.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/napi/sym/Cargo.toml b/ext/napi/sym/Cargo.toml index bf48d2742..7c13a9165 100644 --- a/ext/napi/sym/Cargo.toml +++ b/ext/napi/sym/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "napi_sym" -version = "0.106.0" +version = "0.107.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/net/Cargo.toml b/ext/net/Cargo.toml index 61bb5701a..245deedd2 100644 --- a/ext/net/Cargo.toml +++ b/ext/net/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_net" -version = "0.168.0" +version = "0.169.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml index d830be575..9e1a3495b 100644 --- a/ext/node/Cargo.toml +++ b/ext/node/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_node" -version = "0.113.0" +version = "0.114.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/tls/Cargo.toml b/ext/tls/Cargo.toml index 89a78f4b6..943fc8413 100644 --- a/ext/tls/Cargo.toml +++ b/ext/tls/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_tls" -version = "0.163.0" +version = "0.164.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/url/Cargo.toml b/ext/url/Cargo.toml index 7f3fa9761..557a4669e 100644 --- a/ext/url/Cargo.toml +++ b/ext/url/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_url" -version = "0.176.0" +version = "0.177.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/web/Cargo.toml b/ext/web/Cargo.toml index faba8e9fb..db28d0e57 100644 --- a/ext/web/Cargo.toml +++ b/ext/web/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_web" -version = "0.207.0" +version = "0.208.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webgpu/Cargo.toml b/ext/webgpu/Cargo.toml index c682965d5..f23bb8371 100644 --- a/ext/webgpu/Cargo.toml +++ b/ext/webgpu/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webgpu" -version = "0.143.0" +version = "0.144.0" authors = ["the Deno authors"] edition.workspace = true license = "MIT" diff --git a/ext/webidl/Cargo.toml b/ext/webidl/Cargo.toml index 1e8dd77cf..8c3f6f612 100644 --- a/ext/webidl/Cargo.toml +++ b/ext/webidl/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webidl" -version = "0.176.0" +version = "0.177.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/websocket/Cargo.toml b/ext/websocket/Cargo.toml index 6724cea2c..61f1f5959 100644 --- a/ext/websocket/Cargo.toml +++ b/ext/websocket/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_websocket" -version = "0.181.0" +version = "0.182.0" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/ext/webstorage/Cargo.toml b/ext/webstorage/Cargo.toml index c1d2092f1..01e23ab83 100644 --- a/ext/webstorage/Cargo.toml +++ b/ext/webstorage/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "deno_webstorage" -version = "0.171.0" +version = "0.172.0" authors.workspace = true edition.workspace = true license.workspace = true -- cgit v1.2.3 From 90236d67c591d4344a9ca0e5d23a4906d08308e5 Mon Sep 17 00:00:00 2001 From: Satya Rohith Date: Tue, 12 Nov 2024 12:10:41 +0530 Subject: fix(ext/http): prefer brotli for `accept-encoding: gzip, deflate, br, zstd` (#26814) Closes https://github.com/denoland/deno/issues/26813 --- ext/http/http_next.rs | 1 + 1 file changed, 1 insertion(+) (limited to 'ext') diff --git a/ext/http/http_next.rs b/ext/http/http_next.rs index c55e86835..1251f00cc 100644 --- a/ext/http/http_next.rs +++ b/ext/http/http_next.rs @@ -564,6 +564,7 @@ fn is_request_compressible( match accept_encoding.to_str() { // Firefox and Chrome send this -- no need to parse Ok("gzip, deflate, br") => return Compression::Brotli, + Ok("gzip, deflate, br, zstd") => return Compression::Brotli, Ok("gzip") => return Compression::GZip, Ok("br") => return Compression::Brotli, _ => (), -- cgit v1.2.3 From c3c2b379669b17e5fdcbe5e62662404ca22c71c6 Mon Sep 17 00:00:00 2001 From: Yoshiya Hinosawa Date: Tue, 12 Nov 2024 19:54:47 +0900 Subject: fix(ext/node): add autoSelectFamily option to net.createConnection (#26661) --- ext/node/polyfills/internal/errors.ts | 26 +- ext/node/polyfills/internal/net.ts | 1 + ext/node/polyfills/internal_binding/uv.ts | 2 + ext/node/polyfills/net.ts | 490 +++++++++++++++++++++++++++++- 4 files changed, 504 insertions(+), 15 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/internal/errors.ts b/ext/node/polyfills/internal/errors.ts index 5a3d4437a..962ca86e9 100644 --- a/ext/node/polyfills/internal/errors.ts +++ b/ext/node/polyfills/internal/errors.ts @@ -18,7 +18,7 @@ */ import { primordials } from "ext:core/mod.js"; -const { JSONStringify, SymbolFor } = primordials; +const { JSONStringify, SafeArrayIterator, SymbolFor } = primordials; import { format, inspect } from "ext:deno_node/internal/util/inspect.mjs"; import { codes } from "ext:deno_node/internal/error_codes.ts"; import { @@ -1874,6 +1874,11 @@ export class ERR_SOCKET_CLOSED extends NodeError { super("ERR_SOCKET_CLOSED", `Socket is closed`); } } +export class ERR_SOCKET_CONNECTION_TIMEOUT extends NodeError { + constructor() { + super("ERR_SOCKET_CONNECTION_TIMEOUT", `Socket connection timeout`); + } +} export class ERR_SOCKET_DGRAM_IS_CONNECTED extends NodeError { constructor() { super("ERR_SOCKET_DGRAM_IS_CONNECTED", `Already connected`); @@ -2633,11 +2638,30 @@ export function aggregateTwoErrors( } return innerError || outerError; } + +export class NodeAggregateError extends AggregateError { + code: string; + constructor(errors, message) { + super(new SafeArrayIterator(errors), message); + this.code = errors[0]?.code; + } + + get [kIsNodeError]() { + return true; + } + + // deno-lint-ignore adjacent-overload-signatures + get ["constructor"]() { + return AggregateError; + } +} + codes.ERR_IPC_CHANNEL_CLOSED = ERR_IPC_CHANNEL_CLOSED; codes.ERR_INVALID_ARG_TYPE = ERR_INVALID_ARG_TYPE; codes.ERR_INVALID_ARG_VALUE = ERR_INVALID_ARG_VALUE; codes.ERR_OUT_OF_RANGE = ERR_OUT_OF_RANGE; codes.ERR_SOCKET_BAD_PORT = ERR_SOCKET_BAD_PORT; +codes.ERR_SOCKET_CONNECTION_TIMEOUT = ERR_SOCKET_CONNECTION_TIMEOUT; codes.ERR_BUFFER_OUT_OF_BOUNDS = ERR_BUFFER_OUT_OF_BOUNDS; codes.ERR_UNKNOWN_ENCODING = ERR_UNKNOWN_ENCODING; codes.ERR_PARSE_ARGS_INVALID_OPTION_VALUE = ERR_PARSE_ARGS_INVALID_OPTION_VALUE; diff --git a/ext/node/polyfills/internal/net.ts b/ext/node/polyfills/internal/net.ts index 144612626..a3dcb3ed2 100644 --- a/ext/node/polyfills/internal/net.ts +++ b/ext/node/polyfills/internal/net.ts @@ -95,4 +95,5 @@ export function makeSyncWrite(fd: number) { }; } +export const kReinitializeHandle = Symbol("kReinitializeHandle"); export const normalizedArgsSymbol = Symbol("normalizedArgs"); diff --git a/ext/node/polyfills/internal_binding/uv.ts b/ext/node/polyfills/internal_binding/uv.ts index aa468a0a5..6cd70a7e8 100644 --- a/ext/node/polyfills/internal_binding/uv.ts +++ b/ext/node/polyfills/internal_binding/uv.ts @@ -530,10 +530,12 @@ export function mapSysErrnoToUvErrno(sysErrno: number): number { export const UV_EAI_MEMORY = codeMap.get("EAI_MEMORY")!; export const UV_EBADF = codeMap.get("EBADF")!; +export const UV_ECANCELED = codeMap.get("ECANCELED")!; export const UV_EEXIST = codeMap.get("EEXIST"); export const UV_EINVAL = codeMap.get("EINVAL")!; export const UV_ENOENT = codeMap.get("ENOENT"); export const UV_ENOTSOCK = codeMap.get("ENOTSOCK")!; +export const UV_ETIMEDOUT = codeMap.get("ETIMEDOUT")!; export const UV_UNKNOWN = codeMap.get("UNKNOWN")!; export function errname(errno: number): string { diff --git a/ext/node/polyfills/net.ts b/ext/node/polyfills/net.ts index 48e1d0de8..2b0112519 100644 --- a/ext/node/polyfills/net.ts +++ b/ext/node/polyfills/net.ts @@ -31,6 +31,7 @@ import { isIP, isIPv4, isIPv6, + kReinitializeHandle, normalizedArgsSymbol, } from "ext:deno_node/internal/net.ts"; import { Duplex } from "node:stream"; @@ -50,9 +51,11 @@ import { ERR_SERVER_ALREADY_LISTEN, ERR_SERVER_NOT_RUNNING, ERR_SOCKET_CLOSED, + ERR_SOCKET_CONNECTION_TIMEOUT, errnoException, exceptionWithHostPort, genericNodeError, + NodeAggregateError, uvExceptionWithHostPort, } from "ext:deno_node/internal/errors.ts"; import type { ErrnoException } from "ext:deno_node/internal/errors.ts"; @@ -80,6 +83,7 @@ import { Buffer } from "node:buffer"; import type { LookupOneOptions } from "ext:deno_node/internal/dns/utils.ts"; import { validateAbortSignal, + validateBoolean, validateFunction, validateInt32, validateNumber, @@ -100,13 +104,25 @@ import { ShutdownWrap } from "ext:deno_node/internal_binding/stream_wrap.ts"; import { assert } from "ext:deno_node/_util/asserts.ts"; import { isWindows } from "ext:deno_node/_util/os.ts"; import { ADDRCONFIG, lookup as dnsLookup } from "node:dns"; -import { codeMap } from "ext:deno_node/internal_binding/uv.ts"; +import { + codeMap, + UV_ECANCELED, + UV_ETIMEDOUT, +} from "ext:deno_node/internal_binding/uv.ts"; import { guessHandleType } from "ext:deno_node/internal_binding/util.ts"; import { debuglog } from "ext:deno_node/internal/util/debuglog.ts"; import type { DuplexOptions } from "ext:deno_node/_stream.d.ts"; import type { BufferEncoding } from "ext:deno_node/_global.d.ts"; import type { Abortable } from "ext:deno_node/_events.d.ts"; import { channel } from "node:diagnostics_channel"; +import { primordials } from "ext:core/mod.js"; + +const { + ArrayPrototypeIncludes, + ArrayPrototypePush, + FunctionPrototypeBind, + MathMax, +} = primordials; let debug = debuglog("net", (fn) => { debug = fn; @@ -120,6 +136,9 @@ const kBytesWritten = Symbol("kBytesWritten"); const DEFAULT_IPV4_ADDR = "0.0.0.0"; const DEFAULT_IPV6_ADDR = "::"; +let autoSelectFamilyDefault = true; +let autoSelectFamilyAttemptTimeoutDefault = 250; + type Handle = TCP | Pipe; interface HandleOptions { @@ -214,6 +233,8 @@ interface TcpSocketConnectOptions extends ConnectOptions { hints?: number; family?: number; lookup?: LookupFunction; + autoSelectFamily?: boolean | undefined; + autoSelectFamilyAttemptTimeout?: number | undefined; } interface IpcSocketConnectOptions extends ConnectOptions { @@ -316,12 +337,6 @@ export function _normalizeArgs(args: unknown[]): NormalizedArgs { return arr; } -function _isTCPConnectWrap( - req: TCPConnectWrap | PipeConnectWrap, -): req is TCPConnectWrap { - return "localAddress" in req && "localPort" in req; -} - function _afterConnect( status: number, // deno-lint-ignore no-explicit-any @@ -372,7 +387,7 @@ function _afterConnect( socket.connecting = false; let details; - if (_isTCPConnectWrap(req)) { + if (req.localAddress && req.localPort) { details = req.localAddress + ":" + req.localPort; } @@ -384,7 +399,7 @@ function _afterConnect( details, ); - if (_isTCPConnectWrap(req)) { + if (details) { ex.localAddress = req.localAddress; ex.localPort = req.localPort; } @@ -393,6 +408,107 @@ function _afterConnect( } } +function _createConnectionError(req, status) { + let details; + + if (req.localAddress && req.localPort) { + details = req.localAddress + ":" + req.localPort; + } + + const ex = exceptionWithHostPort( + status, + "connect", + req.address, + req.port, + details, + ); + if (details) { + ex.localAddress = req.localAddress; + ex.localPort = req.localPort; + } + + return ex; +} + +function _afterConnectMultiple( + context, + current, + status, + handle, + req, + readable, + writable, +) { + debug( + "connect/multiple: connection attempt to %s:%s completed with status %s", + req.address, + req.port, + status, + ); + + // Make sure another connection is not spawned + clearTimeout(context[kTimeout]); + + // One of the connection has completed and correctly dispatched but after timeout, ignore this one + if (status === 0 && current !== context.current - 1) { + debug( + "connect/multiple: ignoring successful but timedout connection to %s:%s", + req.address, + req.port, + ); + handle.close(); + return; + } + + const self = context.socket; + + // Some error occurred, add to the list of exceptions + if (status !== 0) { + const ex = _createConnectionError(req, status); + ArrayPrototypePush(context.errors, ex); + + self.emit( + "connectionAttemptFailed", + req.address, + req.port, + req.addressType, + ex, + ); + + // Try the next address, unless we were aborted + if (context.socket.connecting) { + _internalConnectMultiple(context, status === UV_ECANCELED); + } + + return; + } + + _afterConnect(status, self._handle, req, readable, writable); +} + +function _internalConnectMultipleTimeout(context, req, handle) { + debug( + "connect/multiple: connection to %s:%s timed out", + req.address, + req.port, + ); + context.socket.emit( + "connectionAttemptTimeout", + req.address, + req.port, + req.addressType, + ); + + req.oncomplete = undefined; + ArrayPrototypePush(context.errors, _createConnectionError(req, UV_ETIMEDOUT)); + handle.close(); + + // Try the next address, unless we were aborted + if (context.socket.connecting) { + _internalConnectMultiple(context); + } +} + function _checkBindError(err: number, port: number, handle: TCP) { // EADDRINUSE may not be reported until we call `listen()` or `connect()`. // To complicate matters, a failed `bind()` followed by `listen()` or `connect()` @@ -495,6 +611,131 @@ function _internalConnect( } } +function _internalConnectMultiple(context, canceled?: boolean) { + clearTimeout(context[kTimeout]); + const self = context.socket; + + // We were requested to abort. Stop all operations + if (self._aborted) { + return; + } + + // All connections have been tried without success, destroy with error + if (canceled || context.current === context.addresses.length) { + if (context.errors.length === 0) { + self.destroy(new ERR_SOCKET_CONNECTION_TIMEOUT()); + return; + } + + self.destroy(new NodeAggregateError(context.errors)); + return; + } + + assert(self.connecting); + + const current = context.current++; + + if (current > 0) { + self[kReinitializeHandle](new TCP(TCPConstants.SOCKET)); + } + + const { localPort, port, flags } = context; + const { address, family: addressType } = context.addresses[current]; + let localAddress; + let err; + + if (localPort) { + if (addressType === 4) { + localAddress = DEFAULT_IPV4_ADDR; + err = self._handle.bind(localAddress, localPort); + } else { // addressType === 6 + localAddress = DEFAULT_IPV6_ADDR; + err = self._handle.bind6(localAddress, localPort, flags); + } + + debug( + "connect/multiple: binding to localAddress: %s and localPort: %d (addressType: %d)", + localAddress, + localPort, + addressType, + ); + + err = _checkBindError(err, localPort, self._handle); + if (err) { + ArrayPrototypePush( + context.errors, + exceptionWithHostPort(err, "bind", localAddress, localPort), + ); + _internalConnectMultiple(context); + return; + } + } + + debug( + "connect/multiple: attempting to connect to %s:%d (addressType: %d)", + address, + port, + addressType, + ); + self.emit("connectionAttempt", address, port, addressType); + + const req = new TCPConnectWrap(); + req.oncomplete = FunctionPrototypeBind( + _afterConnectMultiple, + undefined, + context, + current, + ); + req.address = address; + req.port = port; + req.localAddress = localAddress; + req.localPort = localPort; + req.addressType = addressType; + + ArrayPrototypePush( + self.autoSelectFamilyAttemptedAddresses, + `${address}:${port}`, + ); + + if (addressType === 4) { + err = self._handle.connect(req, address, port); + } else { + err = self._handle.connect6(req, address, port); + } + + if (err) { + const sockname = self._getsockname(); + let details; + + if (sockname) { + details = sockname.address + ":" + sockname.port; + } + + const ex = exceptionWithHostPort(err, "connect", address, port, details); + ArrayPrototypePush(context.errors, ex); + + self.emit("connectionAttemptFailed", address, port, addressType, ex); + _internalConnectMultiple(context); + return; + } + + if (current < context.addresses.length - 1) { + debug( + "connect/multiple: setting the attempt timeout to %d ms", + context.timeout, + ); + + // If the attempt has not returned an error, start the connection timer + context[kTimeout] = setTimeout( + _internalConnectMultipleTimeout, + context.timeout, + context, + req, + self._handle, + ); + } +} + // Provide a better error message when we call end() as a result // of the other side sending a FIN. The standard "write after end" // is overly vague, and makes it seem like the user's code is to blame. @@ -597,7 +838,7 @@ function _lookupAndConnect( ) { const { localAddress, localPort } = options; const host = options.host || "localhost"; - let { port } = options; + let { port, autoSelectFamilyAttemptTimeout, autoSelectFamily } = options; if (localAddress && !isIP(localAddress)) { throw new ERR_INVALID_IP_ADDRESS(localAddress); @@ -621,6 +862,22 @@ function _lookupAndConnect( port |= 0; + if (autoSelectFamily != null) { + validateBoolean(autoSelectFamily, "options.autoSelectFamily"); + } else { + autoSelectFamily = autoSelectFamilyDefault; + } + + if (autoSelectFamilyAttemptTimeout !== undefined) { + validateInt32(autoSelectFamilyAttemptTimeout); + + if (autoSelectFamilyAttemptTimeout < 10) { + autoSelectFamilyAttemptTimeout = 10; + } + } else { + autoSelectFamilyAttemptTimeout = autoSelectFamilyAttemptTimeoutDefault; + } + // If host is an IP, skip performing a lookup const addressType = isIP(host); if (addressType) { @@ -649,6 +906,7 @@ function _lookupAndConnect( const dnsOpts = { family: options.family, hints: options.hints || 0, + all: false, }; if ( @@ -665,6 +923,31 @@ function _lookupAndConnect( self._host = host; const lookup = options.lookup || dnsLookup; + if ( + dnsOpts.family !== 4 && dnsOpts.family !== 6 && !localAddress && + autoSelectFamily + ) { + debug("connect: autodetecting"); + + dnsOpts.all = true; + defaultTriggerAsyncIdScope(self[asyncIdSymbol], function () { + _lookupAndConnectMultiple( + self, + asyncIdSymbol, + lookup, + host, + options, + dnsOpts, + port, + localAddress, + localPort, + autoSelectFamilyAttemptTimeout, + ); + }); + + return; + } + defaultTriggerAsyncIdScope(self[asyncIdSymbol], function () { lookup( host, @@ -719,6 +1002,143 @@ function _lookupAndConnect( }); } +function _lookupAndConnectMultiple( + self: Socket, + asyncIdSymbol: number, + // deno-lint-ignore no-explicit-any + lookup: any, + host: string, + options: TcpSocketConnectOptions, + dnsopts, + port: number, + localAddress: string, + localPort: number, + timeout: number | undefined, +) { + defaultTriggerAsyncIdScope(self[asyncIdSymbol], function emitLookup() { + lookup(host, dnsopts, function emitLookup(err, addresses) { + // It's possible we were destroyed while looking this up. + // XXX it would be great if we could cancel the promise returned by + // the look up. + if (!self.connecting) { + return; + } else if (err) { + self.emit("lookup", err, undefined, undefined, host); + + // net.createConnection() creates a net.Socket object and immediately + // calls net.Socket.connect() on it (that's us). There are no event + // listeners registered yet so defer the error event to the next tick. + nextTick(_connectErrorNT, self, err); + return; + } + + // Filter addresses by only keeping the one which are either IPv4 or IPV6. + // The first valid address determines which group has preference on the + // alternate family sorting which happens later. + const validAddresses = [[], []]; + const validIps = [[], []]; + let destinations; + for (let i = 0, l = addresses.length; i < l; i++) { + const address = addresses[i]; + const { address: ip, family: addressType } = address; + self.emit("lookup", err, ip, addressType, host); + // It's possible we were destroyed while looking this up. + if (!self.connecting) { + return; + } + if (isIP(ip) && (addressType === 4 || addressType === 6)) { + destinations ||= addressType === 6 ? { 6: 0, 4: 1 } : { 4: 0, 6: 1 }; + + const destination = destinations[addressType]; + + // Only try an address once + if (!ArrayPrototypeIncludes(validIps[destination], ip)) { + ArrayPrototypePush(validAddresses[destination], address); + ArrayPrototypePush(validIps[destination], ip); + } + } + } + + // When no AAAA or A records are available, fail on the first one + if (!validAddresses[0].length && !validAddresses[1].length) { + const { address: firstIp, family: firstAddressType } = addresses[0]; + + if (!isIP(firstIp)) { + err = new ERR_INVALID_IP_ADDRESS(firstIp); + nextTick(_connectErrorNT, self, err); + } else if (firstAddressType !== 4 && firstAddressType !== 6) { + err = new ERR_INVALID_ADDRESS_FAMILY( + firstAddressType, + options.host, + options.port, + ); + nextTick(_connectErrorNT, self, err); + } + + return; + } + + // Sort addresses alternating families + const toAttempt = []; + for ( + let i = 0, + l = MathMax(validAddresses[0].length, validAddresses[1].length); + i < l; + i++ + ) { + if (i in validAddresses[0]) { + ArrayPrototypePush(toAttempt, validAddresses[0][i]); + } + if (i in validAddresses[1]) { + ArrayPrototypePush(toAttempt, validAddresses[1][i]); + } + } + + if (toAttempt.length === 1) { + debug( + "connect/multiple: only one address found, switching back to single connection", + ); + const { address: ip, family: addressType } = toAttempt[0]; + + self._unrefTimer(); + defaultTriggerAsyncIdScope( + self[asyncIdSymbol], + _internalConnect, + self, + ip, + port, + addressType, + localAddress, + localPort, + ); + + return; + } + + self.autoSelectFamilyAttemptedAddresses = []; + debug("connect/multiple: will try the following addresses", toAttempt); + + const context = { + socket: self, + addresses: toAttempt, + current: 0, + port, + localPort, + timeout, + [kTimeout]: null, + errors: [], + }; + + self._unrefTimer(); + defaultTriggerAsyncIdScope( + self[asyncIdSymbol], + _internalConnectMultiple, + context, + ); + }); + }); +} + function _afterShutdown(this: ShutdownWrap) { // deno-lint-ignore no-explicit-any const self: any = this.handle[ownerSymbol]; @@ -777,6 +1197,7 @@ export class Socket extends Duplex { _host: string | null = null; // deno-lint-ignore no-explicit-any _parent: any = null; + autoSelectFamilyAttemptedAddresses: AddressInfo[] | undefined = undefined; constructor(options: SocketOptions | number) { if (typeof options === "number") { @@ -1546,6 +1967,16 @@ export class Socket extends Duplex { set _handle(v: Handle | null) { this[kHandle] = v; } + + // deno-lint-ignore no-explicit-any + [kReinitializeHandle](handle: any) { + this._handle?.close(); + + this._handle = handle; + this._handle[ownerSymbol] = this; + + _initSocketHandle(this); + } } export const Stream = Socket; @@ -1593,6 +2024,33 @@ export function connect(...args: unknown[]) { export const createConnection = connect; +/** https://docs.deno.com/api/node/net/#namespace_getdefaultautoselectfamily */ +export function getDefaultAutoSelectFamily() { + return autoSelectFamilyDefault; +} + +/** https://docs.deno.com/api/node/net/#namespace_setdefaultautoselectfamily */ +export function setDefaultAutoSelectFamily(value: boolean) { + validateBoolean(value, "value"); + autoSelectFamilyDefault = value; +} + +/** https://docs.deno.com/api/node/net/#namespace_getdefaultautoselectfamilyattempttimeout */ +export function getDefaultAutoSelectFamilyAttemptTimeout() { + return autoSelectFamilyAttemptTimeoutDefault; +} + +/** https://docs.deno.com/api/node/net/#namespace_setdefaultautoselectfamilyattempttimeout */ +export function setDefaultAutoSelectFamilyAttemptTimeout(value: number) { + validateInt32(value, "value", 1); + + if (value < 10) { + value = 10; + } + + autoSelectFamilyAttemptTimeoutDefault = value; +} + export interface ListenOptions extends Abortable { fd?: number; port?: number | undefined; @@ -2478,15 +2936,19 @@ export { BlockList, isIP, isIPv4, isIPv6, SocketAddress }; export default { _createServerHandle, _normalizeArgs, - isIP, - isIPv4, - isIPv6, BlockList, - SocketAddress, connect, createConnection, createServer, + getDefaultAutoSelectFamily, + getDefaultAutoSelectFamilyAttemptTimeout, + isIP, + isIPv4, + isIPv6, Server, + setDefaultAutoSelectFamily, + setDefaultAutoSelectFamilyAttemptTimeout, Socket, + SocketAddress, Stream, }; -- cgit v1.2.3 From 3b99f6833cd3354da20785fdcf01e7409e610175 Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Tue, 12 Nov 2024 17:10:07 +0530 Subject: fix(ext/websocket): initialize `error` attribute of WebSocket ErrorEvent (#26796) Fixes https://github.com/denoland/deno/issues/26216 Not required by the spec but Discord.js depends on it, see https://github.com/denoland/deno/issues/26216#issuecomment-2466060306 --- ext/websocket/01_websocket.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/websocket/01_websocket.js b/ext/websocket/01_websocket.js index 58f477310..468999b95 100644 --- a/ext/websocket/01_websocket.js +++ b/ext/websocket/01_websocket.js @@ -28,6 +28,7 @@ const { ArrayPrototypePush, ArrayPrototypeShift, ArrayPrototypeSome, + Error, ErrorPrototypeToString, ObjectDefineProperties, ObjectPrototypeIsPrototypeOf, @@ -488,8 +489,11 @@ class WebSocket extends EventTarget { /* error */ this[_readyState] = CLOSED; + const message = op_ws_get_error(rid); + const error = new Error(message); const errorEv = new ErrorEvent("error", { - message: op_ws_get_error(rid), + error, + message, }); this.dispatchEvent(errorEv); -- cgit v1.2.3 From 01f3451869f56dc0651d8cd30bf8540aeb462c04 Mon Sep 17 00:00:00 2001 From: Richard Carson Date: Tue, 12 Nov 2024 12:49:49 -0500 Subject: chore: make fields public on `PermissionDeniedError` and `deno_kv::KvConfig` (#26798) A few small changes to avoid needing unsafe mem transmutes to instantiate the extensions --------- Signed-off-by: Richard Carson --- ext/kv/config.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'ext') diff --git a/ext/kv/config.rs b/ext/kv/config.rs index 6e2e2c3a1..7166bcbcc 100644 --- a/ext/kv/config.rs +++ b/ext/kv/config.rs @@ -1,16 +1,17 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +#[derive(Clone, Copy, Debug)] pub struct KvConfig { - pub(crate) max_write_key_size_bytes: usize, - pub(crate) max_read_key_size_bytes: usize, - pub(crate) max_value_size_bytes: usize, - pub(crate) max_read_ranges: usize, - pub(crate) max_read_entries: usize, - pub(crate) max_checks: usize, - pub(crate) max_mutations: usize, - pub(crate) max_watched_keys: usize, - pub(crate) max_total_mutation_size_bytes: usize, - pub(crate) max_total_key_size_bytes: usize, + pub max_write_key_size_bytes: usize, + pub max_read_key_size_bytes: usize, + pub max_value_size_bytes: usize, + pub max_read_ranges: usize, + pub max_read_entries: usize, + pub max_checks: usize, + pub max_mutations: usize, + pub max_watched_keys: usize, + pub max_total_mutation_size_bytes: usize, + pub max_total_key_size_bytes: usize, } impl KvConfig { -- cgit v1.2.3 From 43812ee8ff0eb2584c7beb18639da14d96d06817 Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Wed, 13 Nov 2024 08:02:09 +0530 Subject: fix(ext/node): process.getBuiltinModule (#26833) Closes https://github.com/denoland/deno/issues/26832 --- ext/node/polyfills/01_require.js | 20 +++++++++++++++++++- ext/node/polyfills/process.ts | 14 ++++++++++++-- 2 files changed, 31 insertions(+), 3 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/01_require.js b/ext/node/polyfills/01_require.js index d818bb572..0d267ca44 100644 --- a/ext/node/polyfills/01_require.js +++ b/ext/node/polyfills/01_require.js @@ -1233,6 +1233,24 @@ function isBuiltin(moduleName) { !StringPrototypeStartsWith(moduleName, "internal/"); } +function getBuiltinModule(id) { + if (!isBuiltin(id)) { + return undefined; + } + + if (StringPrototypeStartsWith(id, "node:")) { + // Slice 'node:' prefix + id = StringPrototypeSlice(id, 5); + } + + const mod = loadNativeModule(id, id); + if (mod) { + return mod.exports; + } + + return undefined; +} + Module.isBuiltin = isBuiltin; Module.createRequire = createRequire; @@ -1327,7 +1345,7 @@ export function register(_specifier, _parentUrl, _options) { return undefined; } -export { builtinModules, createRequire, isBuiltin, Module }; +export { builtinModules, createRequire, getBuiltinModule, isBuiltin, Module }; export const _cache = Module._cache; export const _extensions = Module._extensions; export const _findPath = Module._findPath; diff --git a/ext/node/polyfills/process.ts b/ext/node/polyfills/process.ts index 2605fa6d1..bf626e410 100644 --- a/ext/node/polyfills/process.ts +++ b/ext/node/polyfills/process.ts @@ -15,7 +15,7 @@ import { import { warnNotImplemented } from "ext:deno_node/_utils.ts"; import { EventEmitter } from "node:events"; -import Module from "node:module"; +import Module, { getBuiltinModule } from "node:module"; import { report } from "ext:deno_node/internal/process/report.ts"; import { validateString } from "ext:deno_node/internal/validators.mjs"; import { @@ -38,7 +38,15 @@ import { versions, } from "ext:deno_node/_process/process.ts"; import { _exiting } from "ext:deno_node/_process/exiting.ts"; -export { _nextTick as nextTick, chdir, cwd, env, version, versions }; +export { + _nextTick as nextTick, + chdir, + cwd, + env, + getBuiltinModule, + version, + versions, +}; import { createWritableStdioStream, initStdin, @@ -728,6 +736,8 @@ Process.prototype.getegid = getegid; /** This method is removed on Windows */ Process.prototype.geteuid = geteuid; +Process.prototype.getBuiltinModule = getBuiltinModule; + // TODO(kt3k): Implement this when we added -e option to node compat mode Process.prototype._eval = undefined; -- cgit v1.2.3 From 7becd83a3828b35331d0fcb82c64146e520f154b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Czerniawski?= <33061335+lczerniawski@users.noreply.github.com> Date: Wed, 13 Nov 2024 05:35:04 +0100 Subject: feat(ext/fs): add ctime to Deno.stats and use it in node compat layer (#24801) This PR fixes #24453, by introducing a ctime (using ctime for UNIX and ChangeTime for Windows) to Deno.stats. Co-authored-by: Yoshiya Hinosawa --- ext/fs/30_fs.js | 6 ++++-- ext/fs/in_memory_fs.rs | 2 ++ ext/fs/ops.rs | 4 ++++ ext/fs/std_fs.rs | 34 ++++++++++++++++++++++++++++++---- ext/io/fs.rs | 12 ++++++++++++ ext/node/polyfills/_fs/_fs_stat.ts | 10 +++++----- 6 files changed, 57 insertions(+), 11 deletions(-) (limited to 'ext') diff --git a/ext/fs/30_fs.js b/ext/fs/30_fs.js index c8e19ac75..40513e7e0 100644 --- a/ext/fs/30_fs.js +++ b/ext/fs/30_fs.js @@ -346,9 +346,10 @@ const { 0: statStruct, 1: statBuf } = createByteStruct({ mtime: "date", atime: "date", birthtime: "date", + ctime: "date", dev: "u64", ino: "?u64", - mode: "?u64", + mode: "u64", nlink: "?u64", uid: "?u64", gid: "?u64", @@ -377,9 +378,10 @@ function parseFileInfo(response) { birthtime: response.birthtimeSet === true ? new Date(response.birthtime) : null, + ctime: response.ctimeSet === true ? new Date(response.ctime) : null, dev: response.dev, + mode: response.mode, ino: unix ? response.ino : null, - mode: unix ? response.mode : null, nlink: unix ? response.nlink : null, uid: unix ? response.uid : null, gid: unix ? response.gid : null, diff --git a/ext/fs/in_memory_fs.rs b/ext/fs/in_memory_fs.rs index e29b9d50c..34b77836d 100644 --- a/ext/fs/in_memory_fs.rs +++ b/ext/fs/in_memory_fs.rs @@ -229,6 +229,7 @@ impl FileSystem for InMemoryFs { mtime: None, atime: None, birthtime: None, + ctime: None, dev: 0, ino: 0, mode: 0, @@ -251,6 +252,7 @@ impl FileSystem for InMemoryFs { mtime: None, atime: None, birthtime: None, + ctime: None, dev: 0, ino: 0, mode: 0, diff --git a/ext/fs/ops.rs b/ext/fs/ops.rs index 9b76b49e6..3d0d96ce6 100644 --- a/ext/fs/ops.rs +++ b/ext/fs/ops.rs @@ -1795,6 +1795,8 @@ create_struct_writer! { atime: u64, birthtime_set: bool, birthtime: u64, + ctime_set: bool, + ctime: u64, // Following are only valid under Unix. dev: u64, ino: u64, @@ -1826,6 +1828,8 @@ impl From for SerializableStat { atime: stat.atime.unwrap_or(0), birthtime_set: stat.birthtime.is_some(), birthtime: stat.birthtime.unwrap_or(0), + ctime_set: stat.ctime.is_some(), + ctime: stat.ctime.unwrap_or(0), dev: stat.dev, ino: stat.ino, diff --git a/ext/fs/std_fs.rs b/ext/fs/std_fs.rs index 1a83c97c5..73439d9ba 100644 --- a/ext/fs/std_fs.rs +++ b/ext/fs/std_fs.rs @@ -821,24 +821,46 @@ fn stat_extra( Ok(info.dwVolumeSerialNumber as u64) } + const WINDOWS_TICK: i64 = 10_000; // 100-nanosecond intervals in a millisecond + const SEC_TO_UNIX_EPOCH: i64 = 11_644_473_600; // Seconds between Windows epoch and Unix epoch + + fn windows_time_to_unix_time_msec(windows_time: &i64) -> i64 { + let milliseconds_since_windows_epoch = windows_time / WINDOWS_TICK; + milliseconds_since_windows_epoch - SEC_TO_UNIX_EPOCH * 1000 + } + use windows_sys::Wdk::Storage::FileSystem::FILE_ALL_INFORMATION; + use windows_sys::Win32::Foundation::NTSTATUS; unsafe fn query_file_information( handle: winapi::shared::ntdef::HANDLE, - ) -> std::io::Result { + ) -> Result { use windows_sys::Wdk::Storage::FileSystem::NtQueryInformationFile; + use windows_sys::Win32::Foundation::RtlNtStatusToDosError; + use windows_sys::Win32::Foundation::ERROR_MORE_DATA; + use windows_sys::Win32::System::IO::IO_STATUS_BLOCK; let mut info = std::mem::MaybeUninit::::zeroed(); + let mut io_status_block = + std::mem::MaybeUninit::::zeroed(); let status = NtQueryInformationFile( handle as _, - std::ptr::null_mut(), + io_status_block.as_mut_ptr(), info.as_mut_ptr() as *mut _, std::mem::size_of::() as _, 18, /* FileAllInformation */ ); if status < 0 { - return Err(std::io::Error::last_os_error()); + let converted_status = RtlNtStatusToDosError(status); + + // If error more data is returned, then it means that the buffer is too small to get full filename information + // to have that we should retry. However, since we only use BasicInformation and StandardInformation, it is fine to ignore it + // since struct is populated with other data anyway. + // https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntifs/nf-ntifs-ntqueryinformationfile#remarksdd + if converted_status != ERROR_MORE_DATA { + return Err(converted_status as NTSTATUS); + } } Ok(info.assume_init()) @@ -862,10 +884,13 @@ fn stat_extra( } let result = get_dev(file_handle); - CloseHandle(file_handle); fsstat.dev = result?; if let Ok(file_info) = query_file_information(file_handle) { + fsstat.ctime = Some(windows_time_to_unix_time_msec( + &file_info.BasicInformation.ChangeTime, + ) as u64); + if file_info.BasicInformation.FileAttributes & winapi::um::winnt::FILE_ATTRIBUTE_REPARSE_POINT != 0 @@ -898,6 +923,7 @@ fn stat_extra( } } + CloseHandle(file_handle); Ok(()) } } diff --git a/ext/io/fs.rs b/ext/io/fs.rs index 885426520..7ef02315b 100644 --- a/ext/io/fs.rs +++ b/ext/io/fs.rs @@ -94,6 +94,7 @@ pub struct FsStat { pub mtime: Option, pub atime: Option, pub birthtime: Option, + pub ctime: Option, pub dev: u64, pub ino: u64, @@ -153,6 +154,16 @@ impl FsStat { } } + #[inline(always)] + fn get_ctime(ctime_or_0: i64) -> Option { + if ctime_or_0 > 0 { + // ctime return seconds since epoch, but we need milliseconds + return Some(ctime_or_0 as u64 * 1000); + } + + None + } + Self { is_file: metadata.is_file(), is_directory: metadata.is_dir(), @@ -162,6 +173,7 @@ impl FsStat { mtime: to_msec(metadata.modified()), atime: to_msec(metadata.accessed()), birthtime: to_msec(metadata.created()), + ctime: get_ctime(unix_or_zero!(ctime)), dev: unix_or_zero!(dev), ino: unix_or_zero!(ino), diff --git a/ext/node/polyfills/_fs/_fs_stat.ts b/ext/node/polyfills/_fs/_fs_stat.ts index d00c81ffb..507cb05ea 100644 --- a/ext/node/polyfills/_fs/_fs_stat.ts +++ b/ext/node/polyfills/_fs/_fs_stat.ts @@ -290,8 +290,8 @@ export function convertFileInfoToStats(origin: Deno.FileInfo): Stats { isFIFO: () => false, isCharacterDevice: () => false, isSocket: () => false, - ctime: origin.mtime, - ctimeMs: origin.mtime?.getTime() || null, + ctime: origin.ctime, + ctimeMs: origin.ctime?.getTime() || null, }); return stats; @@ -336,9 +336,9 @@ export function convertFileInfoToBigIntStats( isFIFO: () => false, isCharacterDevice: () => false, isSocket: () => false, - ctime: origin.mtime, - ctimeMs: origin.mtime ? BigInt(origin.mtime.getTime()) : null, - ctimeNs: origin.mtime ? BigInt(origin.mtime.getTime()) * 1000000n : null, + ctime: origin.ctime, + ctimeMs: origin.ctime ? BigInt(origin.ctime.getTime()) : null, + ctimeNs: origin.ctime ? BigInt(origin.ctime.getTime()) * 1000000n : null, }); return stats; } -- cgit v1.2.3 From aa546189be730163ee5370029e4dfdb3b454ab96 Mon Sep 17 00:00:00 2001 From: snek Date: Wed, 13 Nov 2024 11:38:46 +0100 Subject: feat: OpenTelemetry Tracing API and Exporting (#26710) Initial import of OTEL code supporting tracing. Metrics soon to come. Implements APIs for https://jsr.io/@deno/otel so that code using OpenTelemetry.js just works tm. There is still a lot of work to do with configuration and adding built-in tracing to core APIs, which will come in followup PRs. --------- Co-authored-by: Luca Casonato --- ext/http/00_serve.ts | 137 ++++++++++++++++++++++++++++----------------------- 1 file changed, 75 insertions(+), 62 deletions(-) (limited to 'ext') diff --git a/ext/http/00_serve.ts b/ext/http/00_serve.ts index 7bf83e49c..fcdb87d09 100644 --- a/ext/http/00_serve.ts +++ b/ext/http/00_serve.ts @@ -42,6 +42,10 @@ const { Uint8Array, Promise, } = primordials; +const { + getAsyncContext, + setAsyncContext, +} = core; import { InnerBody } from "ext:deno_fetch/22_body.js"; import { Event } from "ext:deno_web/02_event.js"; @@ -397,8 +401,10 @@ class CallbackContext { /** @type {Promise | undefined} */ closing; listener; + asyncContext; constructor(signal, args, listener) { + this.asyncContext = getAsyncContext(); // The abort signal triggers a non-graceful shutdown signal?.addEventListener( "abort", @@ -508,82 +514,89 @@ function fastSyncResponseOrStream( */ function mapToCallback(context, callback, onError) { return async function (req) { - // Get the response from the user-provided callback. If that fails, use onError. If that fails, return a fallback - // 500 error. - let innerRequest; - let response; - try { - innerRequest = new InnerRequest(req, context); - const request = fromInnerRequest(innerRequest, "immutable"); - innerRequest.request = request; - response = await callback( - request, - new ServeHandlerInfo(innerRequest), - ); + const asyncContext = getAsyncContext(); + setAsyncContext(context.asyncContext); - // Throwing Error if the handler return value is not a Response class - if (!ObjectPrototypeIsPrototypeOf(ResponsePrototype, response)) { - throw new TypeError( - "Return value from serve handler must be a response or a promise resolving to a response", - ); - } - - if (response.type === "error") { - throw new TypeError( - "Return value from serve handler must not be an error response (like Response.error())", + try { + // Get the response from the user-provided callback. If that fails, use onError. If that fails, return a fallback + // 500 error. + let innerRequest; + let response; + try { + innerRequest = new InnerRequest(req, context); + const request = fromInnerRequest(innerRequest, "immutable"); + innerRequest.request = request; + response = await callback( + request, + new ServeHandlerInfo(innerRequest), ); - } - if (response.bodyUsed) { - throw new TypeError( - "The body of the Response returned from the serve handler has already been consumed", - ); - } - } catch (error) { - try { - response = await onError(error); + // Throwing Error if the handler return value is not a Response class if (!ObjectPrototypeIsPrototypeOf(ResponsePrototype, response)) { throw new TypeError( - "Return value from onError handler must be a response or a promise resolving to a response", + "Return value from serve handler must be a response or a promise resolving to a response", + ); + } + + if (response.type === "error") { + throw new TypeError( + "Return value from serve handler must not be an error response (like Response.error())", + ); + } + + if (response.bodyUsed) { + throw new TypeError( + "The body of the Response returned from the serve handler has already been consumed", ); } } catch (error) { - // deno-lint-ignore no-console - console.error("Exception in onError while handling exception", error); - response = internalServerError(); + try { + response = await onError(error); + if (!ObjectPrototypeIsPrototypeOf(ResponsePrototype, response)) { + throw new TypeError( + "Return value from onError handler must be a response or a promise resolving to a response", + ); + } + } catch (error) { + // deno-lint-ignore no-console + console.error("Exception in onError while handling exception", error); + response = internalServerError(); + } } - } - const inner = toInnerResponse(response); - if (innerRequest?.[_upgraded]) { - // We're done here as the connection has been upgraded during the callback and no longer requires servicing. - if (response !== UPGRADE_RESPONSE_SENTINEL) { - // deno-lint-ignore no-console - console.error("Upgrade response was not returned from callback"); - context.close(); + const inner = toInnerResponse(response); + if (innerRequest?.[_upgraded]) { + // We're done here as the connection has been upgraded during the callback and no longer requires servicing. + if (response !== UPGRADE_RESPONSE_SENTINEL) { + // deno-lint-ignore no-console + console.error("Upgrade response was not returned from callback"); + context.close(); + } + innerRequest?.[_upgraded](); + return; } - innerRequest?.[_upgraded](); - return; - } - // Did everything shut down while we were waiting? - if (context.closed) { - // We're shutting down, so this status shouldn't make it back to the client but "Service Unavailable" seems appropriate - innerRequest?.close(); - op_http_set_promise_complete(req, 503); - return; - } + // Did everything shut down while we were waiting? + if (context.closed) { + // We're shutting down, so this status shouldn't make it back to the client but "Service Unavailable" seems appropriate + innerRequest?.close(); + op_http_set_promise_complete(req, 503); + return; + } - const status = inner.status; - const headers = inner.headerList; - if (headers && headers.length > 0) { - if (headers.length == 1) { - op_http_set_response_header(req, headers[0][0], headers[0][1]); - } else { - op_http_set_response_headers(req, headers); + const status = inner.status; + const headers = inner.headerList; + if (headers && headers.length > 0) { + if (headers.length == 1) { + op_http_set_response_header(req, headers[0][0], headers[0][1]); + } else { + op_http_set_response_headers(req, headers); + } } - } - fastSyncResponseOrStream(req, inner.body, status, innerRequest); + fastSyncResponseOrStream(req, inner.body, status, innerRequest); + } finally { + setAsyncContext(asyncContext); + } }; } -- cgit v1.2.3 From 7d9ba09f5a4464072476b8992e43f5e5c30bde3a Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Wed, 13 Nov 2024 19:47:01 +0530 Subject: fix(ext/node): use ERR_NOT_IMPLEMENTED for notImplemented (#26853) --- ext/node/polyfills/_utils.ts | 4 ++-- ext/node/polyfills/internal/errors.ts | 10 ++++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/_utils.ts b/ext/node/polyfills/_utils.ts index b50c113e1..79d84e00f 100644 --- a/ext/node/polyfills/_utils.ts +++ b/ext/node/polyfills/_utils.ts @@ -17,6 +17,7 @@ const { import { TextDecoder, TextEncoder } from "ext:deno_web/08_text_encoding.js"; import { errorMap } from "ext:deno_node/internal_binding/uv.ts"; import { codes } from "ext:deno_node/internal/error_codes.ts"; +import { ERR_NOT_IMPLEMENTED } from "ext:deno_node/internal/errors.ts"; export type BinaryEncodings = "binary"; @@ -34,8 +35,7 @@ export type TextEncodings = export type Encodings = BinaryEncodings | TextEncodings; export function notImplemented(msg: string): never { - const message = msg ? `Not implemented: ${msg}` : "Not implemented"; - throw new Error(message); + throw new ERR_NOT_IMPLEMENTED(msg); } export function warnNotImplemented(msg?: string) { diff --git a/ext/node/polyfills/internal/errors.ts b/ext/node/polyfills/internal/errors.ts index 962ca86e9..61b53fa96 100644 --- a/ext/node/polyfills/internal/errors.ts +++ b/ext/node/polyfills/internal/errors.ts @@ -2390,6 +2390,15 @@ export class ERR_INVALID_RETURN_VALUE extends NodeTypeError { } } +export class ERR_NOT_IMPLEMENTED extends NodeError { + constructor(message?: string) { + super( + "ERR_NOT_IMPLEMENTED", + message ? `Not implemented: ${message}` : "Not implemented", + ); + } +} + export class ERR_INVALID_URL extends NodeTypeError { input: string; constructor(input: string) { @@ -2862,6 +2871,7 @@ export default { ERR_INVALID_SYNC_FORK_INPUT, ERR_INVALID_THIS, ERR_INVALID_TUPLE, + ERR_NOT_IMPLEMENTED, ERR_INVALID_URI, ERR_INVALID_URL, ERR_INVALID_URL_SCHEME, -- cgit v1.2.3 From 6a4c6d83bacf5f03628a494778a30bce970f7cbc Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Wed, 13 Nov 2024 20:07:45 +0530 Subject: fix(ext/node): zlib.crc32() (#26856) Fixes https://github.com/denoland/deno/issues/26845 --- ext/node/lib.rs | 1 + ext/node/ops/zlib/mod.rs | 10 +++++++++ ext/node/polyfills/zlib.ts | 53 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) (limited to 'ext') diff --git a/ext/node/lib.rs b/ext/node/lib.rs index 9ca21e994..6d320b92c 100644 --- a/ext/node/lib.rs +++ b/ext/node/lib.rs @@ -345,6 +345,7 @@ deno_core::extension!(deno_node, ops::zlib::op_zlib_write, ops::zlib::op_zlib_init, ops::zlib::op_zlib_reset, + ops::zlib::op_zlib_crc32, ops::zlib::brotli::op_brotli_compress, ops::zlib::brotli::op_brotli_compress_async, ops::zlib::brotli::op_create_brotli_compress, diff --git a/ext/node/ops/zlib/mod.rs b/ext/node/ops/zlib/mod.rs index e75ef050d..991c0925d 100644 --- a/ext/node/ops/zlib/mod.rs +++ b/ext/node/ops/zlib/mod.rs @@ -1,6 +1,7 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use deno_core::op2; +use libc::c_ulong; use std::borrow::Cow; use std::cell::RefCell; use zlib::*; @@ -381,6 +382,15 @@ pub fn op_zlib_close_if_pending( Ok(()) } +#[op2(fast)] +#[smi] +pub fn op_zlib_crc32(#[buffer] data: &[u8], #[smi] value: u32) -> u32 { + // SAFETY: `data` is a valid buffer. + unsafe { + zlib::crc32(value as c_ulong, data.as_ptr(), data.len() as u32) as u32 + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/ext/node/polyfills/zlib.ts b/ext/node/polyfills/zlib.ts index 3fe5f8bbd..6e5d02b5b 100644 --- a/ext/node/polyfills/zlib.ts +++ b/ext/node/polyfills/zlib.ts @@ -40,6 +40,58 @@ import { createBrotliCompress, createBrotliDecompress, } from "ext:deno_node/_brotli.js"; +import { ERR_INVALID_ARG_TYPE } from "ext:deno_node/internal/errors.ts"; +import { validateUint32 } from "ext:deno_node/internal/validators.mjs"; +import { op_zlib_crc32 } from "ext:core/ops"; +import { core, primordials } from "ext:core/mod.js"; +import { TextEncoder } from "ext:deno_web/08_text_encoding.js"; +const { + Uint8Array, + TypedArrayPrototypeGetBuffer, + TypedArrayPrototypeGetByteLength, + TypedArrayPrototypeGetByteOffset, + DataViewPrototypeGetBuffer, + DataViewPrototypeGetByteLength, + DataViewPrototypeGetByteOffset, +} = primordials; +const { isTypedArray, isDataView } = core; + +const enc = new TextEncoder(); +const toU8 = (input) => { + if (typeof input === "string") { + return enc.encode(input); + } + + if (isTypedArray(input)) { + return new Uint8Array( + TypedArrayPrototypeGetBuffer(input), + TypedArrayPrototypeGetByteOffset(input), + TypedArrayPrototypeGetByteLength(input), + ); + } else if (isDataView(input)) { + return new Uint8Array( + DataViewPrototypeGetBuffer(input), + DataViewPrototypeGetByteOffset(input), + DataViewPrototypeGetByteLength(input), + ); + } + + return input; +}; + +export function crc32(data, value = 0) { + if (typeof data !== "string" && !isArrayBufferView(data)) { + throw new ERR_INVALID_ARG_TYPE("data", [ + "Buffer", + "TypedArray", + "DataView", + "string", + ], data); + } + validateUint32(value, "value"); + + return op_zlib_crc32(toU8(data), value); +} export class Options { constructor() { @@ -87,6 +139,7 @@ export default { BrotliOptions, codes, constants, + crc32, createBrotliCompress, createBrotliDecompress, createDeflate, -- cgit v1.2.3 From f091d1ad69b4e5217ae3272b641171781a372c4f Mon Sep 17 00:00:00 2001 From: David Sherret Date: Wed, 13 Nov 2024 10:10:09 -0500 Subject: feat(node): stabilize detecting if CJS via `"type": "commonjs"` in a package.json (#26439) This will respect `"type": "commonjs"` in a package.json to determine if `.js`/`.jsx`/`.ts`/.tsx` files are CJS or ESM. If the file is found to be ESM it will be loaded as ESM though. --- ext/node/lib.rs | 7 ++++++- ext/node/ops/require.rs | 44 ++++++++++++++++++++-------------------- ext/node/polyfills/01_require.js | 31 ++++------------------------ ext/node/polyfills/process.ts | 2 +- 4 files changed, 33 insertions(+), 51 deletions(-) (limited to 'ext') diff --git a/ext/node/lib.rs b/ext/node/lib.rs index 6d320b92c..702c919f4 100644 --- a/ext/node/lib.rs +++ b/ext/node/lib.rs @@ -14,6 +14,7 @@ use deno_core::url::Url; #[allow(unused_imports)] use deno_core::v8; use deno_core::v8::ExternalReference; +use node_resolver::errors::ClosestPkgJsonError; use node_resolver::NpmResolverRc; use once_cell::sync::Lazy; @@ -157,6 +158,10 @@ pub trait NodeRequireLoader { ) -> Result, AnyError>; fn load_text_file_lossy(&self, path: &Path) -> Result; + + /// Get if the module kind is maybe CJS and loading should determine + /// if its CJS or ESM. + fn is_maybe_cjs(&self, specifier: &Url) -> Result; } pub static NODE_ENV_VAR_ALLOWLIST: Lazy> = Lazy::new(|| { @@ -385,6 +390,7 @@ deno_core::extension!(deno_node, ops::require::op_require_proxy_path, ops::require::op_require_is_deno_dir_package, ops::require::op_require_resolve_deno_dir, + ops::require::op_require_is_maybe_cjs, ops::require::op_require_is_request_relative, ops::require::op_require_resolve_lookup_paths, ops::require::op_require_try_self_parent_path

, @@ -398,7 +404,6 @@ deno_core::extension!(deno_node, ops::require::op_require_read_file

, ops::require::op_require_as_file_path, ops::require::op_require_resolve_exports

, - ops::require::op_require_read_closest_package_json

, ops::require::op_require_read_package_scope

, ops::require::op_require_package_imports_resolve

, ops::require::op_require_break_on_next_statement, diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs index 30db8b629..b7fa8feb2 100644 --- a/ext/node/ops/require.rs +++ b/ext/node/ops/require.rs @@ -1,16 +1,18 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use deno_core::error::AnyError; use deno_core::op2; use deno_core::url::Url; use deno_core::v8; use deno_core::JsRuntimeInspector; -use deno_core::ModuleSpecifier; use deno_core::OpState; use deno_fs::FileSystemRc; +use deno_package_json::NodeModuleKind; use deno_package_json::PackageJsonRc; use deno_path_util::normalize_path; +use deno_path_util::url_from_file_path; use deno_path_util::url_to_file_path; -use node_resolver::NodeModuleKind; +use node_resolver::errors::ClosestPkgJsonError; use node_resolver::NodeResolutionMode; use node_resolver::REQUIRE_CONDITIONS; use std::borrow::Cow; @@ -217,17 +219,17 @@ pub fn op_require_resolve_deno_dir( state: &mut OpState, #[string] request: String, #[string] parent_filename: String, -) -> Option { +) -> Result, AnyError> { let resolver = state.borrow::(); - resolver - .resolve_package_folder_from_package( - &request, - &ModuleSpecifier::from_file_path(&parent_filename).unwrap_or_else(|_| { - panic!("Url::from_file_path: [{:?}]", parent_filename) - }), - ) - .ok() - .map(|p| p.to_string_lossy().into_owned()) + Ok( + resolver + .resolve_package_folder_from_package( + &request, + &url_from_file_path(&PathBuf::from(parent_filename))?, + ) + .ok() + .map(|p| p.to_string_lossy().into_owned()), + ) } #[op2(fast)] @@ -564,19 +566,17 @@ where })) } -#[op2] -#[serde] -pub fn op_require_read_closest_package_json

( +#[op2(fast)] +pub fn op_require_is_maybe_cjs( state: &mut OpState, #[string] filename: String, -) -> Result, node_resolver::errors::ClosestPkgJsonError> -where - P: NodePermissions + 'static, -{ +) -> Result { let filename = PathBuf::from(filename); - // permissions: allow reading the closest package.json files - let pkg_json_resolver = state.borrow::(); - pkg_json_resolver.get_closest_package_json_from_path(&filename) + let Ok(url) = url_from_file_path(&filename) else { + return Ok(false); + }; + let loader = state.borrow::(); + loader.is_maybe_cjs(&url) } #[op2] diff --git a/ext/node/polyfills/01_require.js b/ext/node/polyfills/01_require.js index 0d267ca44..083d4e49b 100644 --- a/ext/node/polyfills/01_require.js +++ b/ext/node/polyfills/01_require.js @@ -11,6 +11,7 @@ import { op_require_can_parse_as_esm, op_require_init_paths, op_require_is_deno_dir_package, + op_require_is_maybe_cjs, op_require_is_request_relative, op_require_node_module_paths, op_require_package_imports_resolve, @@ -19,7 +20,6 @@ import { op_require_path_is_absolute, op_require_path_resolve, op_require_proxy_path, - op_require_read_closest_package_json, op_require_read_file, op_require_read_package_scope, op_require_real_path, @@ -1060,36 +1060,13 @@ Module.prototype._compile = function (content, filename, format) { return result; }; -Module._extensions[".js"] = function (module, filename) { - const content = op_require_read_file(filename); - - let format; - if (StringPrototypeEndsWith(filename, ".js")) { - const pkg = op_require_read_closest_package_json(filename); - if (pkg?.type === "module") { - format = "module"; - } else if (pkg?.type === "commonjs") { - format = "commonjs"; - } - } - - module._compile(content, filename, format); -}; - -Module._extensions[".ts"] = +Module._extensions[".js"] = + Module._extensions[".ts"] = Module._extensions[".jsx"] = Module._extensions[".tsx"] = function (module, filename) { const content = op_require_read_file(filename); - - let format; - const pkg = op_require_read_closest_package_json(filename); - if (pkg?.type === "module") { - format = "module"; - } else if (pkg?.type === "commonjs") { - format = "commonjs"; - } - + const format = op_require_is_maybe_cjs(filename) ? undefined : "module"; module._compile(content, filename, format); }; diff --git a/ext/node/polyfills/process.ts b/ext/node/polyfills/process.ts index bf626e410..647376d5c 100644 --- a/ext/node/polyfills/process.ts +++ b/ext/node/polyfills/process.ts @@ -919,7 +919,7 @@ Object.defineProperty(argv, "1", { if (Deno.mainModule?.startsWith("file:")) { return pathFromURL(new URL(Deno.mainModule)); } else { - return join(Deno.cwd(), "$deno$node.js"); + return join(Deno.cwd(), "$deno$node.mjs"); } }, }); -- cgit v1.2.3 From 4e899d48cffa95617266dd8f9aef54603a87ad82 Mon Sep 17 00:00:00 2001 From: snek Date: Thu, 14 Nov 2024 13:16:28 +0100 Subject: fix: otel resiliency (#26857) Improving the breadth of collected data, and ensuring that the collected data is more likely to be successfully reported. - Use `log` crate in more places - Hook up `log` crate to otel - Switch to process-wide otel processors - Handle places that use `process::exit` Also adds a more robust testing framework, with a deterministic tracing setting. Refs: https://github.com/denoland/deno/issues/26852 --- ext/napi/node_api.rs | 5 ++--- ext/napi/uv.rs | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'ext') diff --git a/ext/napi/node_api.rs b/ext/napi/node_api.rs index 186ae42c4..2ca5c8d0b 100644 --- a/ext/napi/node_api.rs +++ b/ext/napi/node_api.rs @@ -140,7 +140,6 @@ fn napi_fatal_exception(env: &mut Env, err: napi_value) -> napi_status { } #[napi_sym] -#[allow(clippy::print_stderr)] fn napi_fatal_error( location: *const c_char, location_len: usize, @@ -173,9 +172,9 @@ fn napi_fatal_error( }; if let Some(location) = location { - eprintln!("NODE API FATAL ERROR: {} {}", location, message); + log::error!("NODE API FATAL ERROR: {} {}", location, message); } else { - eprintln!("NODE API FATAL ERROR: {}", message); + log::error!("NODE API FATAL ERROR: {}", message); } std::process::abort(); diff --git a/ext/napi/uv.rs b/ext/napi/uv.rs index 6f728a92b..ea6b53966 100644 --- a/ext/napi/uv.rs +++ b/ext/napi/uv.rs @@ -5,10 +5,9 @@ use deno_core::parking_lot::Mutex; use std::mem::MaybeUninit; use std::ptr::addr_of_mut; -#[allow(clippy::print_stderr)] fn assert_ok(res: c_int) -> c_int { if res != 0 { - eprintln!("bad result in uv polyfill: {res}"); + log::error!("bad result in uv polyfill: {res}"); // don't panic because that might unwind into // c/c++ std::process::abort(); -- cgit v1.2.3 From 617350e79c58b6e01984e3d7c7436d243d0e5cff Mon Sep 17 00:00:00 2001 From: David Sherret Date: Thu, 14 Nov 2024 15:24:25 -0500 Subject: refactor(resolver): move more resolution code into deno_resolver (#26873) Follow-up to cjs refactor. This moves most of the resolution code into the deno_resolver crate. Still pending is the npm resolution code. --- ext/node/lib.rs | 4 ++-- ext/node/ops/require.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'ext') diff --git a/ext/node/lib.rs b/ext/node/lib.rs index 702c919f4..84f39552c 100644 --- a/ext/node/lib.rs +++ b/ext/node/lib.rs @@ -15,7 +15,7 @@ use deno_core::url::Url; use deno_core::v8; use deno_core::v8::ExternalReference; use node_resolver::errors::ClosestPkgJsonError; -use node_resolver::NpmResolverRc; +use node_resolver::NpmPackageFolderResolverRc; use once_cell::sync::Lazy; extern crate libz_sys as zlib; @@ -183,7 +183,7 @@ fn op_node_build_os() -> String { pub struct NodeExtInitServices { pub node_require_loader: NodeRequireLoaderRc, pub node_resolver: NodeResolverRc, - pub npm_resolver: NpmResolverRc, + pub npm_resolver: NpmPackageFolderResolverRc, pub pkg_json_resolver: PackageJsonResolverRc, } diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs index b7fa8feb2..e381ee91d 100644 --- a/ext/node/ops/require.rs +++ b/ext/node/ops/require.rs @@ -24,7 +24,7 @@ use std::rc::Rc; use crate::NodePermissions; use crate::NodeRequireLoaderRc; use crate::NodeResolverRc; -use crate::NpmResolverRc; +use crate::NpmPackageFolderResolverRc; use crate::PackageJsonResolverRc; #[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"] @@ -220,7 +220,7 @@ pub fn op_require_resolve_deno_dir( #[string] request: String, #[string] parent_filename: String, ) -> Result, AnyError> { - let resolver = state.borrow::(); + let resolver = state.borrow::(); Ok( resolver .resolve_package_folder_from_package( -- cgit v1.2.3 From 032ae7fb19bd01c1de28515facd5c3b2ce821924 Mon Sep 17 00:00:00 2001 From: Sahand Akbarzadeh Date: Fri, 15 Nov 2024 14:14:11 +0330 Subject: feat(ext/fetch): allow embedders to use `hickory_dns_resolver` instead of default `GaiResolver` (#26740) Allows embedders to use `hickory-dns-resolver` instead of threaded "getaddrinfo" resolver in the `fetch()` implementation. --- ext/fetch/Cargo.toml | 1 + ext/fetch/dns.rs | 116 +++++++++++++++++++++++++++++++++++++++++++++++++++ ext/fetch/lib.rs | 20 ++++++++- ext/fetch/tests.rs | 79 ++++++++++++++++++++++++++++++++--- ext/kv/remote.rs | 1 + ext/net/Cargo.toml | 2 +- 6 files changed, 211 insertions(+), 8 deletions(-) create mode 100644 ext/fetch/dns.rs (limited to 'ext') diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml index 56d416bbb..00c85f2aa 100644 --- a/ext/fetch/Cargo.toml +++ b/ext/fetch/Cargo.toml @@ -22,6 +22,7 @@ deno_permissions.workspace = true deno_tls.workspace = true dyn-clone = "1" error_reporter = "1" +hickory-resolver.workspace = true http.workspace = true http-body-util.workspace = true hyper.workspace = true diff --git a/ext/fetch/dns.rs b/ext/fetch/dns.rs new file mode 100644 index 000000000..9e21a4c34 --- /dev/null +++ b/ext/fetch/dns.rs @@ -0,0 +1,116 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use std::future::Future; +use std::io; +use std::net::SocketAddr; +use std::pin::Pin; +use std::task::Poll; +use std::task::{self}; +use std::vec; + +use hickory_resolver::error::ResolveError; +use hickory_resolver::name_server::GenericConnector; +use hickory_resolver::name_server::TokioRuntimeProvider; +use hickory_resolver::AsyncResolver; +use hyper_util::client::legacy::connect::dns::GaiResolver; +use hyper_util::client::legacy::connect::dns::Name; +use tokio::task::JoinHandle; +use tower::Service; + +#[derive(Clone, Debug)] +pub enum Resolver { + /// A resolver using blocking `getaddrinfo` calls in a threadpool. + Gai(GaiResolver), + /// hickory-resolver's userspace resolver. + Hickory(AsyncResolver>), +} + +impl Default for Resolver { + fn default() -> Self { + Self::gai() + } +} + +impl Resolver { + pub fn gai() -> Self { + Self::Gai(GaiResolver::new()) + } + + /// Create a [`AsyncResolver`] from system conf. + pub fn hickory() -> Result { + Ok(Self::Hickory( + hickory_resolver::AsyncResolver::tokio_from_system_conf()?, + )) + } + + pub fn hickory_from_async_resolver( + resolver: AsyncResolver>, + ) -> Self { + Self::Hickory(resolver) + } +} + +type SocketAddrs = vec::IntoIter; + +pub struct ResolveFut { + inner: JoinHandle>, +} + +impl Future for ResolveFut { + type Output = Result; + + fn poll( + mut self: Pin<&mut Self>, + cx: &mut task::Context<'_>, + ) -> Poll { + Pin::new(&mut self.inner).poll(cx).map(|res| match res { + Ok(Ok(addrs)) => Ok(addrs), + Ok(Err(e)) => Err(e), + Err(join_err) => { + if join_err.is_cancelled() { + Err(io::Error::new(io::ErrorKind::Interrupted, join_err)) + } else { + Err(io::Error::new(io::ErrorKind::Other, join_err)) + } + } + }) + } +} + +impl Service for Resolver { + type Response = SocketAddrs; + type Error = io::Error; + type Future = ResolveFut; + + fn poll_ready( + &mut self, + _cx: &mut task::Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, name: Name) -> Self::Future { + let task = match self { + Resolver::Gai(gai_resolver) => { + let mut resolver = gai_resolver.clone(); + tokio::spawn(async move { + let result = resolver.call(name).await?; + let x: Vec<_> = result.into_iter().collect(); + let iter: SocketAddrs = x.into_iter(); + Ok(iter) + }) + } + Resolver::Hickory(async_resolver) => { + let resolver = async_resolver.clone(); + tokio::spawn(async move { + let result = resolver.lookup_ip(name.as_str()).await?; + + let x: Vec<_> = + result.into_iter().map(|x| SocketAddr::new(x, 0)).collect(); + let iter: SocketAddrs = x.into_iter(); + Ok(iter) + }) + } + }; + ResolveFut { inner: task } + } +} diff --git a/ext/fetch/lib.rs b/ext/fetch/lib.rs index 7ef26431c..5949f9f75 100644 --- a/ext/fetch/lib.rs +++ b/ext/fetch/lib.rs @@ -1,5 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +pub mod dns; mod fs_fetch_handler; mod proxy; #[cfg(test)] @@ -91,6 +92,7 @@ pub struct Options { pub unsafely_ignore_certificate_errors: Option>, pub client_cert_chain_and_key: TlsKeys, pub file_fetch_handler: Rc, + pub resolver: dns::Resolver, } impl Options { @@ -114,6 +116,7 @@ impl Default for Options { unsafely_ignore_certificate_errors: None, client_cert_chain_and_key: TlsKeys::Null, file_fetch_handler: Rc::new(DefaultFileFetchHandler), + resolver: dns::Resolver::default(), } } } @@ -255,6 +258,7 @@ pub fn create_client_from_options( .map_err(HttpClientCreateError::RootCertStore)?, ca_certs: vec![], proxy: options.proxy.clone(), + dns_resolver: options.resolver.clone(), unsafely_ignore_certificate_errors: options .unsafely_ignore_certificate_errors .clone(), @@ -835,6 +839,8 @@ pub struct CreateHttpClientArgs { proxy: Option, pool_max_idle_per_host: Option, pool_idle_timeout: Option, + #[serde(default)] + use_hickory_resolver: bool, #[serde(default = "default_true")] http1: bool, #[serde(default = "default_true")] @@ -878,6 +884,13 @@ where .map_err(HttpClientCreateError::RootCertStore)?, ca_certs, proxy: args.proxy, + dns_resolver: if args.use_hickory_resolver { + dns::Resolver::hickory() + .map_err(deno_core::error::AnyError::new) + .map_err(FetchError::Resource)? + } else { + dns::Resolver::default() + }, unsafely_ignore_certificate_errors: options .unsafely_ignore_certificate_errors .clone(), @@ -909,6 +922,7 @@ pub struct CreateHttpClientOptions { pub root_cert_store: Option, pub ca_certs: Vec>, pub proxy: Option, + pub dns_resolver: dns::Resolver, pub unsafely_ignore_certificate_errors: Option>, pub client_cert_chain_and_key: Option, pub pool_max_idle_per_host: Option, @@ -923,6 +937,7 @@ impl Default for CreateHttpClientOptions { root_cert_store: None, ca_certs: vec![], proxy: None, + dns_resolver: dns::Resolver::default(), unsafely_ignore_certificate_errors: None, client_cert_chain_and_key: None, pool_max_idle_per_host: None, @@ -976,7 +991,8 @@ pub fn create_http_client( tls_config.alpn_protocols = alpn_protocols; let tls_config = Arc::from(tls_config); - let mut http_connector = HttpConnector::new(); + let mut http_connector = + HttpConnector::new_with_resolver(options.dns_resolver.clone()); http_connector.enforce_http(false); let user_agent = user_agent.parse::().map_err(|_| { @@ -1051,7 +1067,7 @@ pub struct Client { user_agent: HeaderValue, } -type Connector = proxy::ProxyConnector; +type Connector = proxy::ProxyConnector>; // clippy is wrong here #[allow(clippy::declare_interior_mutable_const)] diff --git a/ext/fetch/tests.rs b/ext/fetch/tests.rs index dad1b34a9..5cd1a35a5 100644 --- a/ext/fetch/tests.rs +++ b/ext/fetch/tests.rs @@ -1,6 +1,8 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. use std::net::SocketAddr; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; use std::sync::Arc; use bytes::Bytes; @@ -10,6 +12,8 @@ use http_body_util::BodyExt; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; +use crate::dns; + use super::create_http_client; use super::CreateHttpClientOptions; @@ -17,6 +21,53 @@ static EXAMPLE_CRT: &[u8] = include_bytes!("../tls/testdata/example1_cert.der"); static EXAMPLE_KEY: &[u8] = include_bytes!("../tls/testdata/example1_prikey.der"); +#[test] +fn test_userspace_resolver() { + let thread_counter = Arc::new(AtomicUsize::new(0)); + + let thread_counter_ref = thread_counter.clone(); + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .on_thread_start(move || { + thread_counter_ref.fetch_add(1, SeqCst); + }) + .build() + .unwrap(); + + rt.block_on(async move { + assert_eq!(thread_counter.load(SeqCst), 0); + let src_addr = create_https_server(true).await; + assert_eq!(src_addr.ip().to_string(), "127.0.0.1"); + // use `localhost` to ensure dns step happens. + let addr = format!("localhost:{}", src_addr.port()); + + let hickory = hickory_resolver::AsyncResolver::tokio( + Default::default(), + Default::default(), + ); + + assert_eq!(thread_counter.load(SeqCst), 0); + rust_test_client_with_resolver( + None, + addr.clone(), + "https", + http::Version::HTTP_2, + dns::Resolver::hickory_from_async_resolver(hickory), + ) + .await; + assert_eq!(thread_counter.load(SeqCst), 0, "userspace resolver shouldn't spawn new threads."); + rust_test_client_with_resolver( + None, + addr.clone(), + "https", + http::Version::HTTP_2, + dns::Resolver::gai(), + ) + .await; + assert_eq!(thread_counter.load(SeqCst), 1, "getaddrinfo is called inside spawn_blocking, so tokio spawn a new worker thread for it."); + }); +} + #[tokio::test] async fn test_https_proxy_http11() { let src_addr = create_https_server(false).await; @@ -52,25 +103,27 @@ async fn test_socks_proxy_h2() { run_test_client(prx_addr, src_addr, "socks5", http::Version::HTTP_2).await; } -async fn run_test_client( - prx_addr: SocketAddr, - src_addr: SocketAddr, +async fn rust_test_client_with_resolver( + prx_addr: Option, + src_addr: String, proto: &str, ver: http::Version, + resolver: dns::Resolver, ) { let client = create_http_client( "fetch/test", CreateHttpClientOptions { root_cert_store: None, ca_certs: vec![], - proxy: Some(deno_tls::Proxy { - url: format!("{}://{}", proto, prx_addr), + proxy: prx_addr.map(|p| deno_tls::Proxy { + url: format!("{}://{}", proto, p), basic_auth: None, }), unsafely_ignore_certificate_errors: Some(vec![]), client_cert_chain_and_key: None, pool_max_idle_per_host: None, pool_idle_timeout: None, + dns_resolver: resolver, http1: true, http2: true, }, @@ -92,6 +145,22 @@ async fn run_test_client( assert_eq!(hello, "hello from server"); } +async fn run_test_client( + prx_addr: SocketAddr, + src_addr: SocketAddr, + proto: &str, + ver: http::Version, +) { + rust_test_client_with_resolver( + Some(prx_addr), + src_addr.to_string(), + proto, + ver, + Default::default(), + ) + .await +} + async fn create_https_server(allow_h2: bool) -> SocketAddr { let mut tls_config = deno_tls::rustls::server::ServerConfig::builder() .with_no_client_auth() diff --git a/ext/kv/remote.rs b/ext/kv/remote.rs index 4930aacfe..63146daf7 100644 --- a/ext/kv/remote.rs +++ b/ext/kv/remote.rs @@ -197,6 +197,7 @@ impl DatabaseHandler root_cert_store: options.root_cert_store()?, ca_certs: vec![], proxy: options.proxy.clone(), + dns_resolver: Default::default(), unsafely_ignore_certificate_errors: options .unsafely_ignore_certificate_errors .clone(), diff --git a/ext/net/Cargo.toml b/ext/net/Cargo.toml index 245deedd2..1febbd533 100644 --- a/ext/net/Cargo.toml +++ b/ext/net/Cargo.toml @@ -18,7 +18,7 @@ deno_core.workspace = true deno_permissions.workspace = true deno_tls.workspace = true hickory-proto = "0.24" -hickory-resolver = { version = "0.24", features = ["tokio-runtime", "serde-config"] } +hickory-resolver.workspace = true pin-project.workspace = true rustls-tokio-stream.workspace = true serde.workspace = true -- cgit v1.2.3 From b8cf2599242a9d85d03b57d3649ccdf8bce1530e Mon Sep 17 00:00:00 2001 From: Luca Casonato Date: Fri, 15 Nov 2024 15:54:28 +0100 Subject: feat(fetch): accept async iterables for body (#26882) Reland of #24623, but with a fix for `String` objects. Co-authored-by: crowlkats --- ext/fetch/22_body.js | 14 +++++ ext/fetch/lib.deno_fetch.d.ts | 2 + ext/web/06_streams.js | 63 +++++---------------- ext/webidl/00_webidl.js | 126 ++++++++++++++++++++++++++++++++++++++++++ ext/webidl/internal.d.ts | 26 +++++++++ 5 files changed, 183 insertions(+), 48 deletions(-) (limited to 'ext') diff --git a/ext/fetch/22_body.js b/ext/fetch/22_body.js index 61a06b4af..c7e977c0b 100644 --- a/ext/fetch/22_body.js +++ b/ext/fetch/22_body.js @@ -15,6 +15,7 @@ import { core, primordials } from "ext:core/mod.js"; const { isAnyArrayBuffer, isArrayBuffer, + isStringObject, } = core; const { ArrayBufferIsView, @@ -466,6 +467,8 @@ function extractBody(object) { if (object.locked || isReadableStreamDisturbed(object)) { throw new TypeError("ReadableStream is locked or disturbed"); } + } else if (object[webidl.AsyncIterable] === webidl.AsyncIterable) { + stream = ReadableStream.from(object.open()); } if (typeof source === "string") { // WARNING: this deviates from spec (expects length to be set) @@ -483,6 +486,9 @@ function extractBody(object) { return { body, contentType }; } +webidl.converters["async iterable"] = webidl + .createAsyncIterableConverter(webidl.converters.Uint8Array); + webidl.converters["BodyInit_DOMString"] = (V, prefix, context, opts) => { // Union for (ReadableStream or Blob or ArrayBufferView or ArrayBuffer or FormData or URLSearchParams or USVString) if (ObjectPrototypeIsPrototypeOf(ReadableStreamPrototype, V)) { @@ -501,6 +507,14 @@ webidl.converters["BodyInit_DOMString"] = (V, prefix, context, opts) => { if (ArrayBufferIsView(V)) { return webidl.converters["ArrayBufferView"](V, prefix, context, opts); } + if (webidl.isAsyncIterable(V) && !isStringObject(V)) { + return webidl.converters["async iterable"]( + V, + prefix, + context, + opts, + ); + } } // BodyInit conversion is passed to extractBody(), which calls core.encode(). // core.encode() will UTF-8 encode strings with replacement, being equivalent to the USV normalization. diff --git a/ext/fetch/lib.deno_fetch.d.ts b/ext/fetch/lib.deno_fetch.d.ts index d219a3859..8614dec89 100644 --- a/ext/fetch/lib.deno_fetch.d.ts +++ b/ext/fetch/lib.deno_fetch.d.ts @@ -163,6 +163,8 @@ type BodyInit = | FormData | URLSearchParams | ReadableStream + | Iterable + | AsyncIterable | string; /** @category Fetch */ type RequestDestination = diff --git a/ext/web/06_streams.js b/ext/web/06_streams.js index a4f2275c5..f29e5f204 100644 --- a/ext/web/06_streams.js +++ b/ext/web/06_streams.js @@ -70,7 +70,6 @@ const { String, Symbol, SymbolAsyncIterator, - SymbolIterator, SymbolFor, TypeError, TypedArrayPrototypeGetBuffer, @@ -5084,34 +5083,6 @@ function initializeCountSizeFunction(globalObject) { WeakMapPrototypeSet(countSizeFunctionWeakMap, globalObject, size); } -// Ref: https://tc39.es/ecma262/#sec-getiterator -function getAsyncOrSyncIterator(obj) { - let iterator; - if (obj[SymbolAsyncIterator] != null) { - iterator = obj[SymbolAsyncIterator](); - if (!isObject(iterator)) { - throw new TypeError( - "[Symbol.asyncIterator] returned a non-object value", - ); - } - } else if (obj[SymbolIterator] != null) { - iterator = obj[SymbolIterator](); - if (!isObject(iterator)) { - throw new TypeError("[Symbol.iterator] returned a non-object value"); - } - } else { - throw new TypeError("No iterator found"); - } - if (typeof iterator.next !== "function") { - throw new TypeError("iterator.next is not a function"); - } - return iterator; -} - -function isObject(x) { - return (typeof x === "object" && x != null) || typeof x === "function"; -} - const _resourceBacking = Symbol("[[resourceBacking]]"); // This distinction exists to prevent unrefable streams being used in // regular fast streams that are unaware of refability @@ -5197,21 +5168,22 @@ class ReadableStream { } static from(asyncIterable) { + const prefix = "Failed to execute 'ReadableStream.from'"; webidl.requiredArguments( arguments.length, 1, - "Failed to execute 'ReadableStream.from'", + prefix, ); - asyncIterable = webidl.converters.any(asyncIterable); - - const iterator = getAsyncOrSyncIterator(asyncIterable); + asyncIterable = webidl.converters["async iterable"]( + asyncIterable, + prefix, + "Argument 1", + ); + const iter = asyncIterable.open(); const stream = createReadableStream(noop, async () => { // deno-lint-ignore prefer-primordials - const res = await iterator.next(); - if (!isObject(res)) { - throw new TypeError("iterator.next value is not an object"); - } + const res = await iter.next(); if (res.done) { readableStreamDefaultControllerClose(stream[_controller]); } else { @@ -5221,17 +5193,8 @@ class ReadableStream { ); } }, async (reason) => { - if (iterator.return == null) { - return undefined; - } else { - // deno-lint-ignore prefer-primordials - const res = await iterator.return(reason); - if (!isObject(res)) { - throw new TypeError("iterator.return value is not an object"); - } else { - return undefined; - } - } + // deno-lint-ignore prefer-primordials + await iter.return(reason); }, 0); return stream; } @@ -6892,6 +6855,10 @@ webidl.converters.StreamPipeOptions = webidl { key: "signal", converter: webidl.converters.AbortSignal }, ]); +webidl.converters["async iterable"] = webidl.createAsyncIterableConverter( + webidl.converters.any, +); + internals.resourceForReadableStream = resourceForReadableStream; export { diff --git a/ext/webidl/00_webidl.js b/ext/webidl/00_webidl.js index 1d05aae5f..eb18cbcc3 100644 --- a/ext/webidl/00_webidl.js +++ b/ext/webidl/00_webidl.js @@ -26,6 +26,7 @@ const { Float32Array, Float64Array, FunctionPrototypeBind, + FunctionPrototypeCall, Int16Array, Int32Array, Int8Array, @@ -77,6 +78,7 @@ const { StringPrototypeToWellFormed, Symbol, SymbolIterator, + SymbolAsyncIterator, SymbolToStringTag, TypedArrayPrototypeGetBuffer, TypedArrayPrototypeGetSymbolToStringTag, @@ -920,6 +922,127 @@ function createSequenceConverter(converter) { }; } +function isAsyncIterable(obj) { + if (obj[SymbolAsyncIterator] === undefined) { + if (obj[SymbolIterator] === undefined) { + return false; + } + } + + return true; +} + +const AsyncIterable = Symbol("[[asyncIterable]]"); + +function createAsyncIterableConverter(converter) { + return function ( + V, + prefix = undefined, + context = undefined, + opts = { __proto__: null }, + ) { + if (type(V) !== "Object") { + throw makeException( + TypeError, + "can not be converted to async iterable.", + prefix, + context, + ); + } + + let isAsync = true; + let method = V[SymbolAsyncIterator]; + if (method === undefined) { + method = V[SymbolIterator]; + + if (method === undefined) { + throw makeException( + TypeError, + "is not iterable.", + prefix, + context, + ); + } + + isAsync = false; + } + + return { + value: V, + [AsyncIterable]: AsyncIterable, + open(context) { + const iter = FunctionPrototypeCall(method, V); + if (type(iter) !== "Object") { + throw new TypeError( + `${context} could not be iterated because iterator method did not return object, but ${ + type(iter) + }.`, + ); + } + + let asyncIterator = iter; + + if (!isAsync) { + asyncIterator = { + // deno-lint-ignore require-await + async next() { + // deno-lint-ignore prefer-primordials + return iter.next(); + }, + }; + } + + return { + async next() { + // deno-lint-ignore prefer-primordials + const iterResult = await asyncIterator.next(); + if (type(iterResult) !== "Object") { + throw TypeError( + `${context} failed to iterate next value because the next() method did not return an object, but ${ + type(iterResult) + }.`, + ); + } + + if (iterResult.done) { + return { done: true }; + } + + const iterValue = converter( + iterResult.value, + `${context} failed to iterate next value`, + `The value returned from the next() method`, + opts, + ); + + return { done: false, value: iterValue }; + }, + async return(reason) { + if (asyncIterator.return === undefined) { + return undefined; + } + + // deno-lint-ignore prefer-primordials + const returnPromiseResult = await asyncIterator.return(reason); + if (type(returnPromiseResult) !== "Object") { + throw TypeError( + `${context} failed to close iterator because the return() method did not return an object, but ${ + type(returnPromiseResult) + }.`, + ); + } + + return undefined; + }, + [SymbolAsyncIterator]() { + return this; + }, + }; + }, + }; + }; +} + function createRecordConverter(keyConverter, valueConverter) { return (V, prefix, context, opts) => { if (type(V) !== "Object") { @@ -1302,9 +1425,11 @@ function setlike(obj, objPrototype, readonly) { export { assertBranded, + AsyncIterable, brand, configureInterface, converters, + createAsyncIterableConverter, createBranded, createDictionaryConverter, createEnumConverter, @@ -1315,6 +1440,7 @@ export { createSequenceConverter, illegalConstructor, invokeCallbackFunction, + isAsyncIterable, makeException, mixinPairIterable, requiredArguments, diff --git a/ext/webidl/internal.d.ts b/ext/webidl/internal.d.ts index 1ce45463e..375d548d3 100644 --- a/ext/webidl/internal.d.ts +++ b/ext/webidl/internal.d.ts @@ -438,6 +438,27 @@ declare module "ext:deno_webidl/00_webidl.js" { opts?: any, ) => T[]; + /** + * Create a converter that converts an async iterable of the inner type. + */ + function createAsyncIterableConverter( + converter: ( + v: V, + prefix?: string, + context?: string, + opts?: any, + ) => T, + ): ( + v: any, + prefix?: string, + context?: string, + opts?: any, + ) => ConvertedAsyncIterable; + + interface ConvertedAsyncIterable extends AsyncIterableIterator { + value: V; + } + /** * Create a converter that converts a Promise of the inner type. */ @@ -559,4 +580,9 @@ declare module "ext:deno_webidl/00_webidl.js" { | "Symbol" | "BigInt" | "Object"; + + /** + * Check whether a value is an async iterable. + */ + function isAsyncIterable(v: any): boolean; } -- cgit v1.2.3 From 48b94c099526eb262287e101a75cb4571b8972b0 Mon Sep 17 00:00:00 2001 From: David Sherret Date: Fri, 15 Nov 2024 23:22:50 -0500 Subject: refactor: use boxed_error in some places (#26887) --- ext/fs/Cargo.toml | 1 + ext/fs/lib.rs | 1 + ext/fs/ops.rs | 65 +++++++++++++++++-------------- ext/kv/Cargo.toml | 1 + ext/kv/lib.rs | 100 +++++++++++++++++++++++++++++------------------- ext/node/Cargo.toml | 1 + ext/node/ops/require.rs | 16 +++++--- 7 files changed, 111 insertions(+), 74 deletions(-) (limited to 'ext') diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml index e85f349b1..ace1b89f3 100644 --- a/ext/fs/Cargo.toml +++ b/ext/fs/Cargo.toml @@ -19,6 +19,7 @@ sync_fs = [] [dependencies] async-trait.workspace = true base32.workspace = true +boxed_error.workspace = true deno_core.workspace = true deno_io.workspace = true deno_path_util.workspace = true diff --git a/ext/fs/lib.rs b/ext/fs/lib.rs index dd852e6be..aed9a7085 100644 --- a/ext/fs/lib.rs +++ b/ext/fs/lib.rs @@ -15,6 +15,7 @@ pub use crate::interface::FsDirEntry; pub use crate::interface::FsFileType; pub use crate::interface::OpenOptions; pub use crate::ops::FsOpsError; +pub use crate::ops::FsOpsErrorKind; pub use crate::ops::OperationError; pub use crate::std_fs::RealFs; pub use crate::sync::MaybeSend; diff --git a/ext/fs/ops.rs b/ext/fs/ops.rs index 3d0d96ce6..e3a511f8e 100644 --- a/ext/fs/ops.rs +++ b/ext/fs/ops.rs @@ -16,6 +16,7 @@ use crate::interface::FsDirEntry; use crate::interface::FsFileType; use crate::FsPermissions; use crate::OpenOptions; +use boxed_error::Boxed; use deno_core::op2; use deno_core::CancelFuture; use deno_core::CancelHandle; @@ -32,8 +33,11 @@ use rand::thread_rng; use rand::Rng; use serde::Serialize; +#[derive(Debug, Boxed)] +pub struct FsOpsError(pub Box); + #[derive(Debug, thiserror::Error)] -pub enum FsOpsError { +pub enum FsOpsErrorKind { #[error("{0}")] Io(#[source] std::io::Error), #[error("{0}")] @@ -73,15 +77,16 @@ pub enum FsOpsError { impl From for FsOpsError { fn from(err: FsError) -> Self { match err { - FsError::Io(err) => FsOpsError::Io(err), + FsError::Io(err) => FsOpsErrorKind::Io(err), FsError::FileBusy => { - FsOpsError::Other(deno_core::error::resource_unavailable()) + FsOpsErrorKind::Other(deno_core::error::resource_unavailable()) } FsError::NotSupported => { - FsOpsError::Other(deno_core::error::not_supported()) + FsOpsErrorKind::Other(deno_core::error::not_supported()) } - FsError::NotCapable(err) => FsOpsError::NotCapable(err), + FsError::NotCapable(err) => FsOpsErrorKind::NotCapable(err), } + .into_box() } } @@ -127,11 +132,12 @@ fn map_permission_error( (path.as_str(), "") }; - FsOpsError::NotCapableAccess { + FsOpsErrorKind::NotCapableAccess { standalone: deno_permissions::is_standalone(), err, path: format!("{path}{truncated}"), } + .into_box() } err => Err::<(), _>(err) .context_path(operation, path) @@ -1176,7 +1182,9 @@ fn validate_temporary_filename_component( ) -> Result<(), FsOpsError> { // Ban ASCII and Unicode control characters: these will often fail if let Some(c) = component.matches(|c: char| c.is_control()).next() { - return Err(FsOpsError::InvalidControlCharacter(c.to_string())); + return Err( + FsOpsErrorKind::InvalidControlCharacter(c.to_string()).into_box(), + ); } // Windows has the most restrictive filenames. As temp files aren't normal files, we just // use this set of banned characters for all platforms because wildcard-like files can also @@ -1192,13 +1200,13 @@ fn validate_temporary_filename_component( .matches(|c: char| "<>:\"/\\|?*".contains(c)) .next() { - return Err(FsOpsError::InvalidCharacter(c.to_string())); + return Err(FsOpsErrorKind::InvalidCharacter(c.to_string()).into_box()); } // This check is only for Windows #[cfg(windows)] if suffix && component.ends_with(|c: char| ". ".contains(c)) { - return Err(FsOpsError::InvalidTrailingCharacter); + return Err(FsOpsErrorKind::InvalidTrailingCharacter.into_box()); } Ok(()) @@ -1440,7 +1448,7 @@ fn to_seek_from(offset: i64, whence: i32) -> Result { 1 => SeekFrom::Current(offset), 2 => SeekFrom::End(offset), _ => { - return Err(FsOpsError::InvalidSeekMode(whence)); + return Err(FsOpsErrorKind::InvalidSeekMode(whence).into_box()); } }; Ok(seek_from) @@ -1456,7 +1464,7 @@ pub fn op_fs_seek_sync( ) -> Result { let pos = to_seek_from(offset, whence)?; let file = - FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; + FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?; let cursor = file.seek_sync(pos)?; Ok(cursor) } @@ -1471,7 +1479,7 @@ pub async fn op_fs_seek_async( ) -> Result { let pos = to_seek_from(offset, whence)?; let file = FileResource::get_file(&state.borrow(), rid) - .map_err(FsOpsError::Resource)?; + .map_err(FsOpsErrorKind::Resource)?; let cursor = file.seek_async(pos).await?; Ok(cursor) } @@ -1482,7 +1490,7 @@ pub fn op_fs_file_sync_data_sync( #[smi] rid: ResourceId, ) -> Result<(), FsOpsError> { let file = - FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; + FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?; file.datasync_sync()?; Ok(()) } @@ -1493,7 +1501,7 @@ pub async fn op_fs_file_sync_data_async( #[smi] rid: ResourceId, ) -> Result<(), FsOpsError> { let file = FileResource::get_file(&state.borrow(), rid) - .map_err(FsOpsError::Resource)?; + .map_err(FsOpsErrorKind::Resource)?; file.datasync_async().await?; Ok(()) } @@ -1504,7 +1512,7 @@ pub fn op_fs_file_sync_sync( #[smi] rid: ResourceId, ) -> Result<(), FsOpsError> { let file = - FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; + FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?; file.sync_sync()?; Ok(()) } @@ -1515,7 +1523,7 @@ pub async fn op_fs_file_sync_async( #[smi] rid: ResourceId, ) -> Result<(), FsOpsError> { let file = FileResource::get_file(&state.borrow(), rid) - .map_err(FsOpsError::Resource)?; + .map_err(FsOpsErrorKind::Resource)?; file.sync_async().await?; Ok(()) } @@ -1527,7 +1535,7 @@ pub fn op_fs_file_stat_sync( #[buffer] stat_out_buf: &mut [u32], ) -> Result<(), FsOpsError> { let file = - FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; + FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?; let stat = file.stat_sync()?; let serializable_stat = SerializableStat::from(stat); serializable_stat.write(stat_out_buf); @@ -1541,7 +1549,7 @@ pub async fn op_fs_file_stat_async( #[smi] rid: ResourceId, ) -> Result { let file = FileResource::get_file(&state.borrow(), rid) - .map_err(FsOpsError::Resource)?; + .map_err(FsOpsErrorKind::Resource)?; let stat = file.stat_async().await?; Ok(stat.into()) } @@ -1553,7 +1561,7 @@ pub fn op_fs_flock_sync( exclusive: bool, ) -> Result<(), FsOpsError> { let file = - FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; + FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?; file.lock_sync(exclusive)?; Ok(()) } @@ -1565,7 +1573,7 @@ pub async fn op_fs_flock_async( exclusive: bool, ) -> Result<(), FsOpsError> { let file = FileResource::get_file(&state.borrow(), rid) - .map_err(FsOpsError::Resource)?; + .map_err(FsOpsErrorKind::Resource)?; file.lock_async(exclusive).await?; Ok(()) } @@ -1576,7 +1584,7 @@ pub fn op_fs_funlock_sync( #[smi] rid: ResourceId, ) -> Result<(), FsOpsError> { let file = - FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; + FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?; file.unlock_sync()?; Ok(()) } @@ -1587,7 +1595,7 @@ pub async fn op_fs_funlock_async( #[smi] rid: ResourceId, ) -> Result<(), FsOpsError> { let file = FileResource::get_file(&state.borrow(), rid) - .map_err(FsOpsError::Resource)?; + .map_err(FsOpsErrorKind::Resource)?; file.unlock_async().await?; Ok(()) } @@ -1599,7 +1607,7 @@ pub fn op_fs_ftruncate_sync( #[number] len: u64, ) -> Result<(), FsOpsError> { let file = - FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; + FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?; file.truncate_sync(len)?; Ok(()) } @@ -1611,7 +1619,7 @@ pub async fn op_fs_file_truncate_async( #[number] len: u64, ) -> Result<(), FsOpsError> { let file = FileResource::get_file(&state.borrow(), rid) - .map_err(FsOpsError::Resource)?; + .map_err(FsOpsErrorKind::Resource)?; file.truncate_async(len).await?; Ok(()) } @@ -1626,7 +1634,7 @@ pub fn op_fs_futime_sync( #[smi] mtime_nanos: u32, ) -> Result<(), FsOpsError> { let file = - FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?; + FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?; file.utime_sync(atime_secs, atime_nanos, mtime_secs, mtime_nanos)?; Ok(()) } @@ -1641,7 +1649,7 @@ pub async fn op_fs_futime_async( #[smi] mtime_nanos: u32, ) -> Result<(), FsOpsError> { let file = FileResource::get_file(&state.borrow(), rid) - .map_err(FsOpsError::Resource)?; + .map_err(FsOpsErrorKind::Resource)?; file .utime_async(atime_secs, atime_nanos, mtime_secs, mtime_nanos) .await?; @@ -1717,7 +1725,7 @@ impl MapErrContext for Result { where F: FnOnce(FsError) -> OperationError, { - self.map_err(|err| FsOpsError::OperationError(f(err))) + self.map_err(|err| FsOpsErrorKind::OperationError(f(err)).into_box()) } fn context(self, operation: &'static str) -> Self::R { @@ -1754,7 +1762,8 @@ impl MapErrContext for Result { } fn path_into_string(s: std::ffi::OsString) -> Result { - s.into_string().map_err(FsOpsError::InvalidUtf8) + s.into_string() + .map_err(|e| FsOpsErrorKind::InvalidUtf8(e).into_box()) } macro_rules! create_struct_writer { diff --git a/ext/kv/Cargo.toml b/ext/kv/Cargo.toml index 1d7b91770..aa7381766 100644 --- a/ext/kv/Cargo.toml +++ b/ext/kv/Cargo.toml @@ -17,6 +17,7 @@ path = "lib.rs" anyhow.workspace = true async-trait.workspace = true base64.workspace = true +boxed_error.workspace = true bytes.workspace = true chrono = { workspace = true, features = ["now"] } deno_core.workspace = true diff --git a/ext/kv/lib.rs b/ext/kv/lib.rs index a4ccfe3d6..5392b9721 100644 --- a/ext/kv/lib.rs +++ b/ext/kv/lib.rs @@ -14,6 +14,7 @@ use std::time::Duration; use base64::prelude::BASE64_URL_SAFE; use base64::Engine; +use boxed_error::Boxed; use chrono::DateTime; use chrono::Utc; use deno_core::error::get_custom_error_class; @@ -114,8 +115,11 @@ impl Resource for DatabaseWatcherResource { } } +#[derive(Debug, Boxed)] +pub struct KvError(pub Box); + #[derive(Debug, thiserror::Error)] -pub enum KvError { +pub enum KvErrorKind { #[error(transparent)] DatabaseHandler(deno_core::error::AnyError), #[error(transparent)] @@ -193,7 +197,7 @@ where let db = handler .open(state.clone(), path) .await - .map_err(KvError::DatabaseHandler)?; + .map_err(KvErrorKind::DatabaseHandler)?; let rid = state.borrow_mut().resource_table.add(DatabaseResource { db, cancel_handle: CancelHandle::new_rc(), @@ -329,7 +333,7 @@ where let resource = state .resource_table .get::>(rid) - .map_err(KvError::Resource)?; + .map_err(KvErrorKind::Resource)?; resource.db.clone() }; @@ -339,7 +343,7 @@ where }; if ranges.len() > config.max_read_ranges { - return Err(KvError::TooManyRanges(config.max_read_ranges)); + return Err(KvErrorKind::TooManyRanges(config.max_read_ranges).into_box()); } let mut total_entries = 0usize; @@ -358,14 +362,16 @@ where Ok(ReadRange { start, end, - limit: NonZeroU32::new(limit).ok_or(KvError::InvalidLimit)?, + limit: NonZeroU32::new(limit).ok_or(KvErrorKind::InvalidLimit)?, reverse, }) }) .collect::, KvError>>()?; if total_entries > config.max_read_entries { - return Err(KvError::TooManyEntries(config.max_read_entries)); + return Err( + KvErrorKind::TooManyEntries(config.max_read_entries).into_box(), + ); } let opts = SnapshotReadOptions { @@ -374,7 +380,7 @@ where let output_ranges = db .snapshot_read(read_ranges, opts) .await - .map_err(KvError::Kv)?; + .map_err(KvErrorKind::Kv)?; let output_ranges = output_ranges .into_iter() .map(|x| { @@ -415,7 +421,7 @@ where if get_custom_error_class(&err) == Some("BadResource") { return Ok(None); } else { - return Err(KvError::Resource(err)); + return Err(KvErrorKind::Resource(err).into_box()); } } }; @@ -423,11 +429,11 @@ where }; let Some(mut handle) = - db.dequeue_next_message().await.map_err(KvError::Kv)? + db.dequeue_next_message().await.map_err(KvErrorKind::Kv)? else { return Ok(None); }; - let payload = handle.take_payload().await.map_err(KvError::Kv)?.into(); + let payload = handle.take_payload().await.map_err(KvErrorKind::Kv)?.into(); let handle_rid = { let mut state = state.borrow_mut(); state.resource_table.add(QueueMessageResource { handle }) @@ -448,11 +454,11 @@ where let resource = state .resource_table .get::>(rid) - .map_err(KvError::Resource)?; + .map_err(KvErrorKind::Resource)?; let config = state.borrow::>().clone(); if keys.len() > config.max_watched_keys { - return Err(KvError::TooManyKeys(config.max_watched_keys)); + return Err(KvErrorKind::TooManyKeys(config.max_watched_keys).into_box()); } let keys: Vec> = keys @@ -493,7 +499,7 @@ async fn op_kv_watch_next( let resource = state .resource_table .get::(rid) - .map_err(KvError::Resource)?; + .map_err(KvErrorKind::Resource)?; resource.clone() }; @@ -519,7 +525,7 @@ async fn op_kv_watch_next( return Ok(None); }; - let entries = res.map_err(KvError::Kv)?; + let entries = res.map_err(KvErrorKind::Kv)?; let entries = entries .into_iter() .map(|entry| { @@ -549,9 +555,9 @@ where let handle = state .resource_table .take::::DB as Database>::QMH>>(handle_rid) - .map_err(|_| KvError::QueueMessageNotFound)?; + .map_err(|_| KvErrorKind::QueueMessageNotFound)?; Rc::try_unwrap(handle) - .map_err(|_| KvError::QueueMessageNotFound)? + .map_err(|_| KvErrorKind::QueueMessageNotFound)? .handle }; // if we fail to finish the message, there is not much we can do and the @@ -692,7 +698,7 @@ impl RawSelector { }), (Some(prefix), Some(start), None) => { if !start.starts_with(&prefix) || start.len() == prefix.len() { - return Err(KvError::StartKeyNotInKeyspace); + return Err(KvErrorKind::StartKeyNotInKeyspace.into_box()); } Ok(Self::Prefixed { prefix, @@ -702,7 +708,7 @@ impl RawSelector { } (Some(prefix), None, Some(end)) => { if !end.starts_with(&prefix) || end.len() == prefix.len() { - return Err(KvError::EndKeyNotInKeyspace); + return Err(KvErrorKind::EndKeyNotInKeyspace.into_box()); } Ok(Self::Prefixed { prefix, @@ -712,7 +718,7 @@ impl RawSelector { } (None, Some(start), Some(end)) => { if start > end { - return Err(KvError::StartKeyGreaterThanEndKey); + return Err(KvErrorKind::StartKeyGreaterThanEndKey.into_box()); } Ok(Self::Range { start, end }) } @@ -720,7 +726,7 @@ impl RawSelector { let end = start.iter().copied().chain(Some(0)).collect(); Ok(Self::Range { start, end }) } - _ => Err(KvError::InvalidRange), + _ => Err(KvErrorKind::InvalidRange.into_box()), } } @@ -782,7 +788,7 @@ fn encode_cursor( ) -> Result { let common_prefix = selector.common_prefix(); if !boundary_key.starts_with(common_prefix) { - return Err(KvError::InvalidBoundaryKey); + return Err(KvErrorKind::InvalidBoundaryKey.into_box()); } Ok(BASE64_URL_SAFE.encode(&boundary_key[common_prefix.len()..])) } @@ -799,7 +805,7 @@ fn decode_selector_and_cursor( let common_prefix = selector.common_prefix(); let cursor = BASE64_URL_SAFE .decode(cursor) - .map_err(|_| KvError::InvalidCursor)?; + .map_err(|_| KvErrorKind::InvalidCursor)?; let first_key: Vec; let last_key: Vec; @@ -824,13 +830,13 @@ fn decode_selector_and_cursor( // Defend against out-of-bounds reading if let Some(start) = selector.start() { if &first_key[..] < start { - return Err(KvError::CursorOutOfBounds); + return Err(KvErrorKind::CursorOutOfBounds.into_box()); } } if let Some(end) = selector.end() { if &last_key[..] > end { - return Err(KvError::CursorOutOfBounds); + return Err(KvErrorKind::CursorOutOfBounds.into_box()); } } @@ -855,7 +861,7 @@ where let resource = state .resource_table .get::>(rid) - .map_err(KvError::Resource)?; + .map_err(KvErrorKind::Resource)?; resource.db.clone() }; @@ -865,28 +871,28 @@ where }; if checks.len() > config.max_checks { - return Err(KvError::TooManyChecks(config.max_checks)); + return Err(KvErrorKind::TooManyChecks(config.max_checks).into_box()); } if mutations.len() + enqueues.len() > config.max_mutations { - return Err(KvError::TooManyMutations(config.max_mutations)); + return Err(KvErrorKind::TooManyMutations(config.max_mutations).into_box()); } let checks = checks .into_iter() .map(check_from_v8) .collect::, KvCheckError>>() - .map_err(KvError::InvalidCheck)?; + .map_err(KvErrorKind::InvalidCheck)?; let mutations = mutations .into_iter() .map(|mutation| mutation_from_v8((mutation, current_timestamp))) .collect::, KvMutationError>>() - .map_err(KvError::InvalidMutation)?; + .map_err(KvErrorKind::InvalidMutation)?; let enqueues = enqueues .into_iter() .map(|e| enqueue_from_v8(e, current_timestamp)) .collect::, std::io::Error>>() - .map_err(KvError::InvalidEnqueue)?; + .map_err(KvErrorKind::InvalidEnqueue)?; let mut total_payload_size = 0usize; let mut total_key_size = 0usize; @@ -897,7 +903,7 @@ where .chain(mutations.iter().map(|m| &m.key)) { if key.is_empty() { - return Err(KvError::EmptyKey); + return Err(KvErrorKind::EmptyKey.into_box()); } total_payload_size += check_write_key_size(key, &config)?; @@ -921,13 +927,16 @@ where } if total_payload_size > config.max_total_mutation_size_bytes { - return Err(KvError::TotalMutationTooLarge( - config.max_total_mutation_size_bytes, - )); + return Err( + KvErrorKind::TotalMutationTooLarge(config.max_total_mutation_size_bytes) + .into_box(), + ); } if total_key_size > config.max_total_key_size_bytes { - return Err(KvError::TotalKeyTooLarge(config.max_total_key_size_bytes)); + return Err( + KvErrorKind::TotalKeyTooLarge(config.max_total_key_size_bytes).into_box(), + ); } let atomic_write = AtomicWrite { @@ -936,7 +945,10 @@ where enqueues, }; - let result = db.atomic_write(atomic_write).await.map_err(KvError::Kv)?; + let result = db + .atomic_write(atomic_write) + .await + .map_err(KvErrorKind::Kv)?; Ok(result.map(|res| faster_hex::hex_string(&res.versionstamp))) } @@ -958,7 +970,9 @@ fn op_kv_encode_cursor( fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), KvError> { if key.len() > config.max_read_key_size_bytes { - Err(KvError::KeyTooLargeToRead(config.max_read_key_size_bytes)) + Err( + KvErrorKind::KeyTooLargeToRead(config.max_read_key_size_bytes).into_box(), + ) } else { Ok(()) } @@ -969,7 +983,10 @@ fn check_write_key_size( config: &KvConfig, ) -> Result { if key.len() > config.max_write_key_size_bytes { - Err(KvError::KeyTooLargeToWrite(config.max_write_key_size_bytes)) + Err( + KvErrorKind::KeyTooLargeToWrite(config.max_write_key_size_bytes) + .into_box(), + ) } else { Ok(key.len()) } @@ -986,7 +1003,7 @@ fn check_value_size( }; if payload.len() > config.max_value_size_bytes { - Err(KvError::ValueTooLarge(config.max_value_size_bytes)) + Err(KvErrorKind::ValueTooLarge(config.max_value_size_bytes).into_box()) } else { Ok(payload.len()) } @@ -997,7 +1014,10 @@ fn check_enqueue_payload_size( config: &KvConfig, ) -> Result { if payload.len() > config.max_value_size_bytes { - Err(KvError::EnqueuePayloadTooLarge(config.max_value_size_bytes)) + Err( + KvErrorKind::EnqueuePayloadTooLarge(config.max_value_size_bytes) + .into_box(), + ) } else { Ok(payload.len()) } diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml index 9e1a3495b..c4ee86a95 100644 --- a/ext/node/Cargo.toml +++ b/ext/node/Cargo.toml @@ -22,6 +22,7 @@ aes.workspace = true async-trait.workspace = true base64.workspace = true blake2 = "0.10.6" +boxed_error.workspace = true brotli.workspace = true bytes.workspace = true cbc.workspace = true diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs index e381ee91d..06c034fd5 100644 --- a/ext/node/ops/require.rs +++ b/ext/node/ops/require.rs @@ -1,5 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. +use boxed_error::Boxed; use deno_core::error::AnyError; use deno_core::op2; use deno_core::url::Url; @@ -40,8 +41,11 @@ where loader.ensure_read_permission(permissions, file_path) } +#[derive(Debug, Boxed)] +pub struct RequireError(pub Box); + #[derive(Debug, thiserror::Error)] -pub enum RequireError { +pub enum RequireErrorKind { #[error(transparent)] UrlParse(#[from] url::ParseError), #[error(transparent)] @@ -135,7 +139,7 @@ where let from = if from.starts_with("file:///") { url_to_file_path(&Url::parse(&from)?)? } else { - let current_dir = &fs.cwd().map_err(RequireError::UnableToGetCwd)?; + let current_dir = &fs.cwd().map_err(RequireErrorKind::UnableToGetCwd)?; normalize_path(current_dir.join(from)) }; @@ -324,7 +328,7 @@ where { let path = PathBuf::from(request); let path = ensure_read_permission::

(state, &path) - .map_err(RequireError::Permission)?; + .map_err(RequireErrorKind::Permission)?; let fs = state.borrow::(); let canonicalized_path = deno_path_util::strip_unc_prefix(fs.realpath_sync(&path)?); @@ -484,11 +488,11 @@ where let file_path = PathBuf::from(file_path); // todo(dsherret): there's multiple borrows to NodeRequireLoaderRc here let file_path = ensure_read_permission::

(state, &file_path) - .map_err(RequireError::Permission)?; + .map_err(RequireErrorKind::Permission)?; let loader = state.borrow::(); loader .load_text_file_lossy(&file_path) - .map_err(RequireError::ReadModule) + .map_err(|e| RequireErrorKind::ReadModule(e).into_box()) } #[op2] @@ -612,7 +616,7 @@ where { let referrer_path = PathBuf::from(&referrer_filename); let referrer_path = ensure_read_permission::

(state, &referrer_path) - .map_err(RequireError::Permission)?; + .map_err(RequireErrorKind::Permission)?; let pkg_json_resolver = state.borrow::(); let Some(pkg) = pkg_json_resolver.get_closest_package_json_from_path(&referrer_path)? -- cgit v1.2.3 From 768c5ea2bb2a927455577a29f0761425593aea98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bartek=20Iwa=C5=84czuk?= Date: Sat, 16 Nov 2024 14:29:36 +0000 Subject: fix(ext/cache): gracefully error when cache creation failed (#26895) Removes panic reported in https://github.com/denoland/deno/issues/26893 --- ext/cache/sqlite.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/cache/sqlite.rs b/ext/cache/sqlite.rs index 6efceda11..469e3e51d 100644 --- a/ext/cache/sqlite.rs +++ b/ext/cache/sqlite.rs @@ -8,6 +8,7 @@ use std::time::SystemTime; use std::time::UNIX_EPOCH; use async_trait::async_trait; +use deno_core::anyhow::Context; use deno_core::error::AnyError; use deno_core::futures::future::poll_fn; use deno_core::parking_lot::Mutex; @@ -45,7 +46,13 @@ impl SqliteBackedCache { pub fn new(cache_storage_dir: PathBuf) -> Result { { std::fs::create_dir_all(&cache_storage_dir) - .expect("failed to create cache dir"); + .with_context(|| { + format!( + "Failed to create cache storage directory {}", + cache_storage_dir.display() + ) + }) + .map_err(CacheError::Other)?; let path = cache_storage_dir.join("cache_metadata.db"); let connection = rusqlite::Connection::open(&path).unwrap_or_else(|_| { panic!("failed to open cache db at {}", path.display()) -- cgit v1.2.3 From 8d2960d7ccb756de4260a642d960cd01eaaa2478 Mon Sep 17 00:00:00 2001 From: /usr/bin/cat <109351887+cu8code@users.noreply.github.com> Date: Sat, 16 Nov 2024 20:31:19 +0530 Subject: fix(ext/node): New async setInterval function to improve the nodejs compatibility (#26703) Closes #26499 --- ext/node/polyfills/timers.ts | 88 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 86 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/timers.ts b/ext/node/polyfills/timers.ts index 02f69466e..e826416ed 100644 --- a/ext/node/polyfills/timers.ts +++ b/ext/node/polyfills/timers.ts @@ -15,10 +15,16 @@ import { setUnrefTimeout, Timeout, } from "ext:deno_node/internal/timers.mjs"; -import { validateFunction } from "ext:deno_node/internal/validators.mjs"; +import { + validateAbortSignal, + validateBoolean, + validateFunction, + validateObject, +} from "ext:deno_node/internal/validators.mjs"; import { promisify } from "ext:deno_node/internal/util.mjs"; export { setUnrefTimeout } from "ext:deno_node/internal/timers.mjs"; import * as timers from "ext:deno_web/02_timers.js"; +import { AbortError } from "ext:deno_node/internal/errors.ts"; const clearTimeout_ = timers.clearTimeout; const clearInterval_ = timers.clearInterval; @@ -89,10 +95,88 @@ export function clearImmediate(immediate: Immediate) { clearTimeout_(immediate._immediateId); } +async function* setIntervalAsync( + after: number, + value: number, + options: { signal?: AbortSignal; ref?: boolean } = { __proto__: null }, +) { + validateObject(options, "options"); + + if (typeof options?.signal !== "undefined") { + validateAbortSignal(options.signal, "options.signal"); + } + + if (typeof options?.ref !== "undefined") { + validateBoolean(options.ref, "options.ref"); + } + + const { signal, ref = true } = options; + + if (signal?.aborted) { + throw new AbortError(undefined, { cause: signal?.reason }); + } + + let onCancel: (() => void) | undefined = undefined; + let interval: Timeout | undefined = undefined; + try { + let notYielded = 0; + let callback: ((value?: object) => void) | undefined = undefined; + let rejectCallback: ((message?: string) => void) | undefined = undefined; + interval = new Timeout( + () => { + notYielded++; + if (callback) { + callback(); + callback = undefined; + rejectCallback = undefined; + } + }, + after, + [], + true, + ref, + ); + if (signal) { + onCancel = () => { + clearInterval(interval); + if (rejectCallback) { + rejectCallback(signal.reason); + callback = undefined; + rejectCallback = undefined; + } + }; + signal.addEventListener("abort", onCancel, { once: true }); + } + while (!signal?.aborted) { + if (notYielded === 0) { + await new Promise((resolve: () => void, reject: () => void) => { + callback = resolve; + rejectCallback = reject; + }); + } + for (; notYielded > 0; notYielded--) { + yield value; + } + } + } catch (error) { + if (signal?.aborted) { + throw new AbortError(undefined, { cause: signal?.reason }); + } + throw error; + } finally { + if (interval) { + clearInterval(interval); + } + if (onCancel) { + signal?.removeEventListener("abort", onCancel); + } + } +} + export const promises = { setTimeout: promisify(setTimeout), setImmediate: promisify(setImmediate), - setInterval: promisify(setInterval), + setInterval: setIntervalAsync, }; promises.scheduler = { -- cgit v1.2.3 From 80098bfeabcd2f681f1854c6294ad0fb71c2dd06 Mon Sep 17 00:00:00 2001 From: reczkok <66403540+reczkok@users.noreply.github.com> Date: Sun, 17 Nov 2024 04:14:36 +0100 Subject: fix(ext/webgpu): Create GPUQuerySet converter before usage (#26883) Converter for `GPUComputePassTimestampWrites` uses converter for `GPUQuerySet` before it is defined making it impossible to use. This PR simply reorders converter creation to resolve this issue. Logging the `GPUQuerySet` still fails (as mentioned in #26769) but it is now usable inside pass descriptors and works when used. --- ext/webgpu/01_webgpu.js | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'ext') diff --git a/ext/webgpu/01_webgpu.js b/ext/webgpu/01_webgpu.js index 719877750..cab5cbbdb 100644 --- a/ext/webgpu/01_webgpu.js +++ b/ext/webgpu/01_webgpu.js @@ -6982,6 +6982,12 @@ webidl.converters.GPUComputePassEncoder = webidl.createInterfaceConverter( GPUComputePassEncoder.prototype, ); +// INTERFACE: GPUQuerySet +webidl.converters.GPUQuerySet = webidl.createInterfaceConverter( + "GPUQuerySet", + GPUQuerySet.prototype, +); + // DICTIONARY: GPUComputePassTimestampWrites webidl.converters["GPUComputePassTimestampWrites"] = webidl .createDictionaryConverter( @@ -7154,12 +7160,6 @@ webidl.converters["GPURenderPassDepthStencilAttachment"] = webidl dictMembersGPURenderPassDepthStencilAttachment, ); -// INTERFACE: GPUQuerySet -webidl.converters.GPUQuerySet = webidl.createInterfaceConverter( - "GPUQuerySet", - GPUQuerySet.prototype, -); - // DICTIONARY: GPURenderPassTimestampWrites webidl.converters["GPURenderPassTimestampWrites"] = webidl .createDictionaryConverter( -- cgit v1.2.3 From 73411bb98a677727799122ebc397d825bf95b812 Mon Sep 17 00:00:00 2001 From: Benjamin Swerdlow Date: Sun, 17 Nov 2024 04:36:55 -0800 Subject: chore: Make ext/net/tcp.rs public (#26891) --- ext/net/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'ext') diff --git a/ext/net/lib.rs b/ext/net/lib.rs index bf8f58aa2..f482750b3 100644 --- a/ext/net/lib.rs +++ b/ext/net/lib.rs @@ -7,7 +7,7 @@ pub mod ops_tls; pub mod ops_unix; pub mod raw; pub mod resolve_addr; -mod tcp; +pub mod tcp; use deno_core::error::AnyError; use deno_core::OpState; -- cgit v1.2.3 From 594a99817cbe44553b2c288578fbba8e1e9c1907 Mon Sep 17 00:00:00 2001 From: Luca Casonato Date: Tue, 19 Nov 2024 00:55:22 +0100 Subject: feat(runtime): remove public OTEL trace API (#26854) This PR removes the public Deno.tracing.Span API. We are not confident we can ship an API that is better than the `@opentelemetry/api` API, because V8 CPED does not support us using `using` to manage span context. If this changes, we can revisit this decision. For now, users wanting custom spans can instrument their code using the `@opentelemetry/api` API and `@deno/otel`. This PR also speeds up the OTEL trace generation by a 30% by using Uint8Array instead of strings for the trace ID and span ID. --- ext/console/internal.d.ts | 3 +++ 1 file changed, 3 insertions(+) (limited to 'ext') diff --git a/ext/console/internal.d.ts b/ext/console/internal.d.ts index 45af616d6..5f9627cf5 100644 --- a/ext/console/internal.d.ts +++ b/ext/console/internal.d.ts @@ -9,4 +9,7 @@ declare module "ext:deno_console/01_console.js" { keys: (keyof TObject)[]; evaluate: boolean; }): Record; + + class Console { + } } -- cgit v1.2.3 From df1d36324ffd4f687c406e412f9255bf7a9d8a61 Mon Sep 17 00:00:00 2001 From: Marvin Hagemeister Date: Tue, 19 Nov 2024 01:39:40 +0100 Subject: fix(node/crypto): support promisify on generateKeyPair (#26913) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Calling `promisify(generateKeyPair)` didn't work as expected. It requires a custom promisify implementation. This was easy to fix thanks to the excellent debugging investigation in https://github.com/denoland/deno/issues/26910 Fixes https://github.com/denoland/deno/issues/26910 Co-authored-by: Bartek Iwańczuk --- ext/node/polyfills/internal/crypto/keygen.ts | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'ext') diff --git a/ext/node/polyfills/internal/crypto/keygen.ts b/ext/node/polyfills/internal/crypto/keygen.ts index 44bfd8327..b023ab106 100644 --- a/ext/node/polyfills/internal/crypto/keygen.ts +++ b/ext/node/polyfills/internal/crypto/keygen.ts @@ -30,6 +30,7 @@ import { import { Buffer } from "node:buffer"; import { KeyFormat, KeyType } from "ext:deno_node/internal/crypto/types.ts"; import process from "node:process"; +import { promisify } from "node:util"; import { op_node_generate_dh_group_key, @@ -570,7 +571,15 @@ export function generateKeyPair( privateKey: any, ) => void, ) { - createJob(kAsync, type, options).then((pair) => { + _generateKeyPair(type, options) + .then( + (res) => callback(null, res.publicKey, res.privateKey), + (err) => callback(err, null, null), + ); +} + +function _generateKeyPair(type: string, options: unknown) { + return createJob(kAsync, type, options).then((pair) => { const privateKeyHandle = op_node_get_private_key_from_pair(pair); const publicKeyHandle = op_node_get_public_key_from_pair(pair); @@ -589,12 +598,15 @@ export function generateKeyPair( } } - callback(null, publicKey, privateKey); - }).catch((err) => { - callback(err, null, null); + return { publicKey, privateKey }; }); } +Object.defineProperty(generateKeyPair, promisify.custom, { + enumerable: false, + value: _generateKeyPair, +}); + export interface KeyPairKeyObjectResult { publicKey: KeyObject; privateKey: KeyObject; -- cgit v1.2.3 From 9f26ca450936da66e57dad2206d0d11363e7b35c Mon Sep 17 00:00:00 2001 From: Yusuke Tanaka Date: Tue, 19 Nov 2024 10:46:24 +0900 Subject: feat(ext/http): Make http server parameters configurable (#26785) This commit makes http server parameters configurable on the extension initialization via two callbacks users can provide. The main motivation behind this change is to allow `deno_http` users to tune the HTTP/2 server to suit their needs, although Deno CLI users will not benefit from it as no JavaScript interface is exposed to set these parameters currently. It is up to users whether to provide hook functions. If not provided, the default configuration from hyper crate will be used. --- ext/http/http_next.rs | 79 ++++++++++++++++++++++++++++++++++++++------------- ext/http/lib.rs | 29 ++++++++++++++++++- 2 files changed, 88 insertions(+), 20 deletions(-) (limited to 'ext') diff --git a/ext/http/http_next.rs b/ext/http/http_next.rs index 1251f00cc..7dbac6021 100644 --- a/ext/http/http_next.rs +++ b/ext/http/http_next.rs @@ -18,6 +18,7 @@ use crate::service::HttpServerState; use crate::service::SignallingRc; use crate::websocket_upgrade::WebSocketUpgrade; use crate::LocalExecutor; +use crate::Options; use cache_control::CacheControl; use deno_core::external; use deno_core::futures::future::poll_fn; @@ -821,10 +822,16 @@ fn serve_http11_unconditional( io: impl HttpServeStream, svc: impl HttpService + 'static, cancel: Rc, + http1_builder_hook: Option http1::Builder>, ) -> impl Future> + 'static { - let conn = http1::Builder::new() - .keep_alive(true) - .writev(*USE_WRITEV) + let mut builder = http1::Builder::new(); + builder.keep_alive(true).writev(*USE_WRITEV); + + if let Some(http1_builder_hook) = http1_builder_hook { + builder = http1_builder_hook(builder); + } + + let conn = builder .serve_connection(TokioIo::new(io), svc) .with_upgrades(); @@ -843,9 +850,17 @@ fn serve_http2_unconditional( io: impl HttpServeStream, svc: impl HttpService + 'static, cancel: Rc, + http2_builder_hook: Option< + fn(http2::Builder) -> http2::Builder, + >, ) -> impl Future> + 'static { - let conn = - http2::Builder::new(LocalExecutor).serve_connection(TokioIo::new(io), svc); + let mut builder = http2::Builder::new(LocalExecutor); + + if let Some(http2_builder_hook) = http2_builder_hook { + builder = http2_builder_hook(builder); + } + + let conn = builder.serve_connection(TokioIo::new(io), svc); async { match conn.or_abort(cancel).await { Err(mut conn) => { @@ -861,15 +876,16 @@ async fn serve_http2_autodetect( io: impl HttpServeStream, svc: impl HttpService + 'static, cancel: Rc, + options: Options, ) -> Result<(), HttpNextError> { let prefix = NetworkStreamPrefixCheck::new(io, HTTP2_PREFIX); let (matches, io) = prefix.match_prefix().await?; if matches { - serve_http2_unconditional(io, svc, cancel) + serve_http2_unconditional(io, svc, cancel, options.http2_builder_hook) .await .map_err(HttpNextError::Hyper) } else { - serve_http11_unconditional(io, svc, cancel) + serve_http11_unconditional(io, svc, cancel, options.http1_builder_hook) .await .map_err(HttpNextError::Hyper) } @@ -880,6 +896,7 @@ fn serve_https( request_info: HttpConnectionProperties, lifetime: HttpLifetime, tx: tokio::sync::mpsc::Sender>, + options: Options, ) -> JoinHandle> { let HttpLifetime { server_state, @@ -891,21 +908,31 @@ fn serve_https( handle_request(req, request_info.clone(), server_state.clone(), tx.clone()) }); spawn( - async { + async move { let handshake = io.handshake().await?; // If the client specifically negotiates a protocol, we will use it. If not, we'll auto-detect // based on the prefix bytes let handshake = handshake.alpn; if Some(TLS_ALPN_HTTP_2) == handshake.as_deref() { - serve_http2_unconditional(io, svc, listen_cancel_handle) - .await - .map_err(HttpNextError::Hyper) + serve_http2_unconditional( + io, + svc, + listen_cancel_handle, + options.http2_builder_hook, + ) + .await + .map_err(HttpNextError::Hyper) } else if Some(TLS_ALPN_HTTP_11) == handshake.as_deref() { - serve_http11_unconditional(io, svc, listen_cancel_handle) - .await - .map_err(HttpNextError::Hyper) + serve_http11_unconditional( + io, + svc, + listen_cancel_handle, + options.http1_builder_hook, + ) + .await + .map_err(HttpNextError::Hyper) } else { - serve_http2_autodetect(io, svc, listen_cancel_handle).await + serve_http2_autodetect(io, svc, listen_cancel_handle, options).await } } .try_or_cancel(connection_cancel_handle), @@ -917,6 +944,7 @@ fn serve_http( request_info: HttpConnectionProperties, lifetime: HttpLifetime, tx: tokio::sync::mpsc::Sender>, + options: Options, ) -> JoinHandle> { let HttpLifetime { server_state, @@ -928,7 +956,7 @@ fn serve_http( handle_request(req, request_info.clone(), server_state.clone(), tx.clone()) }); spawn( - serve_http2_autodetect(io, svc, listen_cancel_handle) + serve_http2_autodetect(io, svc, listen_cancel_handle, options) .try_or_cancel(connection_cancel_handle), ) } @@ -938,6 +966,7 @@ fn serve_http_on( listen_properties: &HttpListenProperties, lifetime: HttpLifetime, tx: tokio::sync::mpsc::Sender>, + options: Options, ) -> JoinHandle> where HTTP: HttpPropertyExtractor, @@ -949,14 +978,14 @@ where match network_stream { NetworkStream::Tcp(conn) => { - serve_http(conn, connection_properties, lifetime, tx) + serve_http(conn, connection_properties, lifetime, tx, options) } NetworkStream::Tls(conn) => { - serve_https(conn, connection_properties, lifetime, tx) + serve_https(conn, connection_properties, lifetime, tx, options) } #[cfg(unix)] NetworkStream::Unix(conn) => { - serve_http(conn, connection_properties, lifetime, tx) + serve_http(conn, connection_properties, lifetime, tx, options) } } } @@ -1045,6 +1074,11 @@ where let lifetime = resource.lifetime(); + let options = { + let state = state.borrow(); + *state.borrow::() + }; + let listen_properties_clone: HttpListenProperties = listen_properties.clone(); let handle = spawn(async move { loop { @@ -1057,6 +1091,7 @@ where &listen_properties_clone, lifetime.clone(), tx.clone(), + options, ); } #[allow(unreachable_code)] @@ -1093,11 +1128,17 @@ where let (tx, rx) = tokio::sync::mpsc::channel(10); let resource: Rc = Rc::new(HttpJoinHandle::new(rx)); + let options = { + let state = state.borrow(); + *state.borrow::() + }; + let handle = serve_http_on::( connection, &listen_properties, resource.lifetime(), tx, + options, ); // Set the handle after we start the future diff --git a/ext/http/lib.rs b/ext/http/lib.rs index 49893b1b9..39b0bbc2a 100644 --- a/ext/http/lib.rs +++ b/ext/http/lib.rs @@ -39,6 +39,8 @@ use deno_net::raw::NetworkStream; use deno_websocket::ws_create_server_stream; use flate2::write::GzEncoder; use flate2::Compression; +use hyper::server::conn::http1; +use hyper::server::conn::http2; use hyper_util::rt::TokioIo; use hyper_v014::body::Bytes; use hyper_v014::body::HttpBody; @@ -96,6 +98,25 @@ pub use request_properties::HttpRequestProperties; pub use service::UpgradeUnavailableError; pub use websocket_upgrade::WebSocketUpgradeError; +#[derive(Debug, Default, Clone, Copy)] +pub struct Options { + /// By passing a hook function, the caller can customize various configuration + /// options for the HTTP/2 server. + /// See [`http2::Builder`] for what parameters can be customized. + /// + /// If `None`, the default configuration provided by hyper will be used. Note + /// that the default configuration is subject to change in future versions. + pub http2_builder_hook: + Option) -> http2::Builder>, + /// By passing a hook function, the caller can customize various configuration + /// options for the HTTP/1 server. + /// See [`http1::Builder`] for what parameters can be customized. + /// + /// If `None`, the default configuration provided by hyper will be used. Note + /// that the default configuration is subject to change in future versions. + pub http1_builder_hook: Option http1::Builder>, +} + deno_core::extension!( deno_http, deps = [deno_web, deno_net, deno_fetch, deno_websocket], @@ -135,6 +156,12 @@ deno_core::extension!( http_next::op_http_cancel, ], esm = ["00_serve.ts", "01_http.js", "02_websocket.ts"], + options = { + options: Options, + }, + state = |state, options| { + state.put::(options.options); + } ); #[derive(Debug, thiserror::Error)] @@ -1117,7 +1144,7 @@ async fn op_http_upgrade_websocket( // Needed so hyper can use non Send futures #[derive(Clone)] -struct LocalExecutor; +pub struct LocalExecutor; impl hyper_v014::rt::Executor for LocalExecutor where -- cgit v1.2.3 From 0e2f6e38e7b93e42099f546ef2c01629964d095a Mon Sep 17 00:00:00 2001 From: Yusuke Tanaka Date: Tue, 19 Nov 2024 10:48:57 +0900 Subject: feat(ext/fetch): Make fetch client parameters configurable (#26909) This commit makes HTTP client parameters used in `fetch` API configurable on the extension initialization via a callback `client_builder_hook` that users can provide. The main motivation behind this change is to allow `deno_fetch` users to tune the HTTP/2 client to suit their needs, although Deno CLI users will not benefit from it as no JavaScript interface is exposed to set these parameters currently. It is up to users whether to provide a hook function. If not provided, the default configuration from hyper crate will be used. Ref #26785 --- ext/fetch/lib.rs | 23 +++++++++++++++++++++-- ext/fetch/tests.rs | 1 + ext/kv/remote.rs | 1 + 3 files changed, 23 insertions(+), 2 deletions(-) (limited to 'ext') diff --git a/ext/fetch/lib.rs b/ext/fetch/lib.rs index 5949f9f75..c8e93b9fe 100644 --- a/ext/fetch/lib.rs +++ b/ext/fetch/lib.rs @@ -67,6 +67,7 @@ use http_body_util::BodyExt; use hyper::body::Frame; use hyper_util::client::legacy::connect::HttpConnector; use hyper_util::client::legacy::connect::HttpInfo; +use hyper_util::client::legacy::Builder as HyperClientBuilder; use hyper_util::rt::TokioExecutor; use hyper_util::rt::TokioTimer; use serde::Deserialize; @@ -85,6 +86,16 @@ pub struct Options { pub user_agent: String, pub root_cert_store_provider: Option>, pub proxy: Option, + /// A callback to customize HTTP client configuration. + /// + /// The settings applied with this hook may be overridden by the options + /// provided through `Deno.createHttpClient()` API. For instance, if the hook + /// calls [`hyper_util::client::legacy::Builder::pool_max_idle_per_host`] with + /// a value of 99, and a user calls `Deno.createHttpClient({ poolMaxIdlePerHost: 42 })`, + /// the value that will take effect is 42. + /// + /// For more info on what can be configured, see [`hyper_util::client::legacy::Builder`]. + pub client_builder_hook: Option HyperClientBuilder>, #[allow(clippy::type_complexity)] pub request_builder_hook: Option< fn(&mut http::Request) -> Result<(), deno_core::error::AnyError>, @@ -112,6 +123,7 @@ impl Default for Options { user_agent: "".to_string(), root_cert_store_provider: None, proxy: None, + client_builder_hook: None, request_builder_hook: None, unsafely_ignore_certificate_errors: None, client_cert_chain_and_key: TlsKeys::Null, @@ -271,6 +283,7 @@ pub fn create_client_from_options( pool_idle_timeout: None, http1: true, http2: true, + client_builder_hook: options.client_builder_hook, }, ) } @@ -908,6 +921,7 @@ where ), http1: args.http1, http2: args.http2, + client_builder_hook: options.client_builder_hook, }, )?; @@ -929,6 +943,7 @@ pub struct CreateHttpClientOptions { pub pool_idle_timeout: Option>, pub http1: bool, pub http2: bool, + pub client_builder_hook: Option HyperClientBuilder>, } impl Default for CreateHttpClientOptions { @@ -944,6 +959,7 @@ impl Default for CreateHttpClientOptions { pool_idle_timeout: None, http1: true, http2: true, + client_builder_hook: None, } } } @@ -999,11 +1015,14 @@ pub fn create_http_client( HttpClientCreateError::InvalidUserAgent(user_agent.to_string()) })?; - let mut builder = - hyper_util::client::legacy::Builder::new(TokioExecutor::new()); + let mut builder = HyperClientBuilder::new(TokioExecutor::new()); builder.timer(TokioTimer::new()); builder.pool_timer(TokioTimer::new()); + if let Some(client_builder_hook) = options.client_builder_hook { + builder = client_builder_hook(builder); + } + let mut proxies = proxy::from_env(); if let Some(proxy) = options.proxy { let mut intercept = proxy::Intercept::all(&proxy.url) diff --git a/ext/fetch/tests.rs b/ext/fetch/tests.rs index 5cd1a35a5..e053c6b1c 100644 --- a/ext/fetch/tests.rs +++ b/ext/fetch/tests.rs @@ -126,6 +126,7 @@ async fn rust_test_client_with_resolver( dns_resolver: resolver, http1: true, http2: true, + client_builder_hook: None, }, ) .unwrap(); diff --git a/ext/kv/remote.rs b/ext/kv/remote.rs index 63146daf7..1830aa67e 100644 --- a/ext/kv/remote.rs +++ b/ext/kv/remote.rs @@ -210,6 +210,7 @@ impl DatabaseHandler pool_idle_timeout: None, http1: false, http2: true, + client_builder_hook: None, }, )?; let fetch_client = FetchClient(client); -- cgit v1.2.3 From 069bc15030225393f7d05521505316066464bdbd Mon Sep 17 00:00:00 2001 From: Divy Srivastava Date: Tue, 19 Nov 2024 16:49:25 +0530 Subject: feat(ext/node): perf_hooks.monitorEventLoopDelay() (#26905) Fixes https://github.com/denoland/deno/issues/20961 Depends on https://github.com/denoland/deno_core/pull/965 and https://github.com/denoland/deno_core/pull/966 --- ext/ffi/dlfcn.rs | 11 +++- ext/node/Cargo.toml | 1 + ext/node/lib.rs | 3 + ext/node/ops/mod.rs | 1 + ext/node/ops/perf_hooks.rs | 135 +++++++++++++++++++++++++++++++++++++++ ext/node/polyfills/perf_hooks.ts | 10 +-- 6 files changed, 154 insertions(+), 7 deletions(-) create mode 100644 ext/node/ops/perf_hooks.rs (limited to 'ext') diff --git a/ext/ffi/dlfcn.rs b/ext/ffi/dlfcn.rs index 55909468f..26d1b71e9 100644 --- a/ext/ffi/dlfcn.rs +++ b/ext/ffi/dlfcn.rs @@ -15,6 +15,7 @@ use dlopen2::raw::Library; use serde::Deserialize; use serde_value::ValueDeserializer; use std::borrow::Cow; +use std::cell::RefCell; use std::collections::HashMap; use std::ffi::c_void; use std::rc::Rc; @@ -126,14 +127,17 @@ pub struct FfiLoadArgs { #[op2] pub fn op_ffi_load<'scope, FP>( scope: &mut v8::HandleScope<'scope>, - state: &mut OpState, + state: Rc>, #[serde] args: FfiLoadArgs, ) -> Result, DlfcnError> where FP: FfiPermissions + 'static, { - let permissions = state.borrow_mut::(); - let path = permissions.check_partial_with_path(&args.path)?; + let path = { + let mut state = state.borrow_mut(); + let permissions = state.borrow_mut::(); + permissions.check_partial_with_path(&args.path)? + }; let lib = Library::open(&path).map_err(|e| { dlopen2::Error::OpeningLibraryError(std::io::Error::new( @@ -215,6 +219,7 @@ where } } + let mut state = state.borrow_mut(); let out = v8::Array::new(scope, 2); let rid = state.resource_table.add(resource); let rid_v8 = v8::Integer::new_from_unsigned(scope, rid); diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml index c4ee86a95..36910a844 100644 --- a/ext/node/Cargo.toml +++ b/ext/node/Cargo.toml @@ -95,6 +95,7 @@ spki.workspace = true stable_deref_trait = "1.2.0" thiserror.workspace = true tokio.workspace = true +tokio-eld = "0.2" url.workspace = true webpki-root-certs.workspace = true winapi.workspace = true diff --git a/ext/node/lib.rs b/ext/node/lib.rs index 84f39552c..63f5794b7 100644 --- a/ext/node/lib.rs +++ b/ext/node/lib.rs @@ -427,6 +427,9 @@ deno_core::extension!(deno_node, ops::inspector::op_inspector_emit_protocol_event, ops::inspector::op_inspector_enabled, ], + objects = [ + ops::perf_hooks::EldHistogram + ], esm_entry_point = "ext:deno_node/02_init.js", esm = [ dir "polyfills", diff --git a/ext/node/ops/mod.rs b/ext/node/ops/mod.rs index b53f19dc2..e5ea8b417 100644 --- a/ext/node/ops/mod.rs +++ b/ext/node/ops/mod.rs @@ -10,6 +10,7 @@ pub mod idna; pub mod inspector; pub mod ipc; pub mod os; +pub mod perf_hooks; pub mod process; pub mod require; pub mod tls; diff --git a/ext/node/ops/perf_hooks.rs b/ext/node/ops/perf_hooks.rs new file mode 100644 index 000000000..636d0b2ad --- /dev/null +++ b/ext/node/ops/perf_hooks.rs @@ -0,0 +1,135 @@ +// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. + +use deno_core::op2; +use deno_core::GarbageCollected; + +use std::cell::Cell; + +#[derive(Debug, thiserror::Error)] +pub enum PerfHooksError { + #[error(transparent)] + TokioEld(#[from] tokio_eld::Error), +} + +pub struct EldHistogram { + eld: tokio_eld::EldHistogram, + started: Cell, +} + +impl GarbageCollected for EldHistogram {} + +#[op2] +impl EldHistogram { + // Creates an interval EldHistogram object that samples and reports the event + // loop delay over time. + // + // The delays will be reported in nanoseconds. + #[constructor] + #[cppgc] + pub fn new(#[smi] resolution: u32) -> Result { + Ok(EldHistogram { + eld: tokio_eld::EldHistogram::new(resolution as usize)?, + started: Cell::new(false), + }) + } + + // Disables the update interval timer. + // + // Returns true if the timer was stopped, false if it was already stopped. + #[fast] + fn enable(&self) -> bool { + if self.started.get() { + return false; + } + + self.eld.start(); + self.started.set(true); + + true + } + + // Enables the update interval timer. + // + // Returns true if the timer was started, false if it was already started. + #[fast] + fn disable(&self) -> bool { + if !self.started.get() { + return false; + } + + self.eld.stop(); + self.started.set(false); + + true + } + + // Returns the value at the given percentile. + // + // `percentile` ∈ (0, 100] + #[fast] + #[number] + fn percentile(&self, percentile: f64) -> u64 { + self.eld.value_at_percentile(percentile) + } + + // Returns the value at the given percentile as a bigint. + #[fast] + #[bigint] + fn percentile_big_int(&self, percentile: f64) -> u64 { + self.eld.value_at_percentile(percentile) + } + + // The number of samples recorded by the histogram. + #[getter] + #[number] + fn count(&self) -> u64 { + self.eld.len() + } + + // The number of samples recorded by the histogram as a bigint. + #[getter] + #[bigint] + fn count_big_int(&self) -> u64 { + self.eld.len() + } + + // The maximum recorded event loop delay. + #[getter] + #[number] + fn max(&self) -> u64 { + self.eld.max() + } + + // The maximum recorded event loop delay as a bigint. + #[getter] + #[bigint] + fn max_big_int(&self) -> u64 { + self.eld.max() + } + + // The mean of the recorded event loop delays. + #[getter] + fn mean(&self) -> f64 { + self.eld.mean() + } + + // The minimum recorded event loop delay. + #[getter] + #[number] + fn min(&self) -> u64 { + self.eld.min() + } + + // The minimum recorded event loop delay as a bigint. + #[getter] + #[bigint] + fn min_big_int(&self) -> u64 { + self.eld.min() + } + + // The standard deviation of the recorded event loop delays. + #[getter] + fn stddev(&self) -> f64 { + self.eld.stdev() + } +} diff --git a/ext/node/polyfills/perf_hooks.ts b/ext/node/polyfills/perf_hooks.ts index d92b925b5..ec76b3ce2 100644 --- a/ext/node/polyfills/perf_hooks.ts +++ b/ext/node/polyfills/perf_hooks.ts @@ -8,6 +8,7 @@ import { performance as shimPerformance, PerformanceEntry, } from "ext:deno_web/15_performance.js"; +import { EldHistogram } from "ext:core/ops"; class PerformanceObserver { static supportedEntryTypes: string[] = []; @@ -89,10 +90,11 @@ const performance: ) => shimPerformance.dispatchEvent(...args), }; -const monitorEventLoopDelay = () => - notImplemented( - "monitorEventLoopDelay from performance", - ); +function monitorEventLoopDelay(options = {}) { + const { resolution = 10 } = options; + + return new EldHistogram(resolution); +} export default { performance, -- cgit v1.2.3