summaryrefslogtreecommitdiff
path: root/ext
diff options
context:
space:
mode:
Diffstat (limited to 'ext')
-rw-r--r--ext/broadcast_channel/Cargo.toml2
-rw-r--r--ext/cache/Cargo.toml2
-rw-r--r--ext/cache/lib.rs8
-rw-r--r--ext/cache/sqlite.rs32
-rw-r--r--ext/canvas/Cargo.toml2
-rw-r--r--ext/console/01_console.js18
-rw-r--r--ext/console/Cargo.toml2
-rw-r--r--ext/console/internal.d.ts3
-rw-r--r--ext/cron/Cargo.toml2
-rw-r--r--ext/crypto/Cargo.toml4
-rw-r--r--ext/crypto/decrypt.rs124
-rw-r--r--ext/crypto/ed25519.rs22
-rw-r--r--ext/crypto/encrypt.rs74
-rw-r--r--ext/crypto/export_key.rs79
-rw-r--r--ext/crypto/generate_key.rs51
-rw-r--r--ext/crypto/import_key.rs288
-rw-r--r--ext/crypto/lib.rs275
-rw-r--r--ext/crypto/shared.rs87
-rw-r--r--ext/crypto/x25519.rs18
-rw-r--r--ext/crypto/x448.rs19
-rw-r--r--ext/fetch/22_body.js14
-rw-r--r--ext/fetch/23_request.js16
-rw-r--r--ext/fetch/23_response.js105
-rw-r--r--ext/fetch/Cargo.toml4
-rw-r--r--ext/fetch/dns.rs116
-rw-r--r--ext/fetch/fs_fetch_handler.rs5
-rw-r--r--ext/fetch/lib.deno_fetch.d.ts2
-rw-r--r--ext/fetch/lib.rs276
-rw-r--r--ext/fetch/tests.rs80
-rw-r--r--ext/ffi/Cargo.toml5
-rw-r--r--ext/ffi/call.rs75
-rw-r--r--ext/ffi/callback.rs47
-rw-r--r--ext/ffi/dlfcn.rs55
-rw-r--r--ext/ffi/ir.rs142
-rw-r--r--ext/ffi/lib.rs18
-rw-r--r--ext/ffi/repr.rs127
-rw-r--r--ext/ffi/static.rs31
-rw-r--r--ext/fs/30_fs.js6
-rw-r--r--ext/fs/Cargo.toml6
-rw-r--r--ext/fs/in_memory_fs.rs2
-rw-r--r--ext/fs/lib.rs53
-rw-r--r--ext/fs/ops.rs397
-rw-r--r--ext/fs/std_fs.rs36
-rw-r--r--ext/http/00_serve.ts158
-rw-r--r--ext/http/Cargo.toml2
-rw-r--r--ext/http/fly_accept_encoding.rs2
-rw-r--r--ext/http/http_next.rs261
-rw-r--r--ext/http/lib.rs184
-rw-r--r--ext/http/request_body.rs12
-rw-r--r--ext/http/request_properties.rs27
-rw-r--r--ext/http/service.rs23
-rw-r--r--ext/http/websocket_upgrade.rs60
-rw-r--r--ext/io/Cargo.toml2
-rw-r--r--ext/io/bi_pipe.rs43
-rw-r--r--ext/io/fs.rs71
-rw-r--r--ext/io/lib.rs12
-rw-r--r--ext/kv/Cargo.toml4
-rw-r--r--ext/kv/config.rs21
-rw-r--r--ext/kv/lib.rs308
-rw-r--r--ext/kv/remote.rs15
-rw-r--r--ext/kv/sqlite.rs12
-rw-r--r--ext/napi/Cargo.toml12
-rw-r--r--ext/napi/README.md114
-rw-r--r--ext/napi/build.rs22
-rw-r--r--ext/napi/generated_symbol_exports_list_linux.def1
-rw-r--r--ext/napi/generated_symbol_exports_list_macos.def160
-rw-r--r--ext/napi/generated_symbol_exports_list_windows.def162
-rw-r--r--ext/napi/js_native_api.rs3616
-rw-r--r--ext/napi/lib.rs85
-rw-r--r--ext/napi/node_api.rs1009
-rw-r--r--ext/napi/sym/Cargo.toml21
-rw-r--r--ext/napi/sym/README.md38
-rw-r--r--ext/napi/sym/lib.rs31
-rw-r--r--ext/napi/sym/symbol_exports.json164
-rw-r--r--ext/napi/util.rs287
-rw-r--r--ext/napi/uv.rs230
-rw-r--r--ext/net/01_net.js15
-rw-r--r--ext/net/Cargo.toml7
-rw-r--r--ext/net/io.rs45
-rw-r--r--ext/net/lib.rs19
-rw-r--r--ext/net/ops.rs244
-rw-r--r--ext/net/ops_tls.rs107
-rw-r--r--ext/net/ops_unix.rs67
-rw-r--r--ext/net/resolve_addr.rs5
-rw-r--r--ext/node/Cargo.toml4
-rw-r--r--ext/node/lib.rs127
-rw-r--r--ext/node/ops/blocklist.rs53
-rw-r--r--ext/node/ops/crypto/cipher.rs145
-rw-r--r--ext/node/ops/crypto/digest.rs30
-rw-r--r--ext/node/ops/crypto/keys.rs683
-rw-r--r--ext/node/ops/crypto/mod.rs416
-rw-r--r--ext/node/ops/crypto/sign.rs168
-rw-r--r--ext/node/ops/crypto/x509.rs66
-rw-r--r--ext/node/ops/fs.rs61
-rw-r--r--ext/node/ops/http.rs91
-rw-r--r--ext/node/ops/http2.rs83
-rw-r--r--ext/node/ops/idna.rs47
-rw-r--r--ext/node/ops/inspector.rs161
-rw-r--r--ext/node/ops/ipc.rs59
-rw-r--r--ext/node/ops/mod.rs2
-rw-r--r--ext/node/ops/os/mod.rs194
-rw-r--r--ext/node/ops/os/priority.rs30
-rw-r--r--ext/node/ops/perf_hooks.rs135
-rw-r--r--ext/node/ops/process.rs3
-rw-r--r--ext/node/ops/require.rs200
-rw-r--r--ext/node/ops/util.rs3
-rw-r--r--ext/node/ops/v8.rs25
-rw-r--r--ext/node/ops/winerror.rs3
-rw-r--r--ext/node/ops/worker_threads.rs77
-rw-r--r--ext/node/ops/zlib/brotli.rs77
-rw-r--r--ext/node/ops/zlib/mod.rs86
-rw-r--r--ext/node/ops/zlib/mode.rs21
-rw-r--r--ext/node/polyfills/01_require.js68
-rw-r--r--ext/node/polyfills/_fs/_fs_common.ts1
-rw-r--r--ext/node/polyfills/_fs/_fs_copy.ts6
-rw-r--r--ext/node/polyfills/_fs/_fs_open.ts4
-rw-r--r--ext/node/polyfills/_fs/_fs_readFile.ts10
-rw-r--r--ext/node/polyfills/_fs/_fs_readlink.ts33
-rw-r--r--ext/node/polyfills/_fs/_fs_readv.ts1
-rw-r--r--ext/node/polyfills/_fs/_fs_stat.ts24
-rw-r--r--ext/node/polyfills/_next_tick.ts5
-rw-r--r--ext/node/polyfills/_process/streams.mjs9
-rw-r--r--ext/node/polyfills/_tls_wrap.ts16
-rw-r--r--ext/node/polyfills/_utils.ts4
-rw-r--r--ext/node/polyfills/_zlib.mjs7
-rw-r--r--ext/node/polyfills/child_process.ts2
-rw-r--r--ext/node/polyfills/http.ts533
-rw-r--r--ext/node/polyfills/http2.ts12
-rw-r--r--ext/node/polyfills/inspector.js210
-rw-r--r--ext/node/polyfills/inspector.ts82
-rw-r--r--ext/node/polyfills/inspector/promises.js20
-rw-r--r--ext/node/polyfills/internal/buffer.mjs575
-rw-r--r--ext/node/polyfills/internal/child_process.ts27
-rw-r--r--ext/node/polyfills/internal/crypto/_randomInt.ts26
-rw-r--r--ext/node/polyfills/internal/crypto/keygen.ts21
-rw-r--r--ext/node/polyfills/internal/crypto/random.ts1
-rw-r--r--ext/node/polyfills/internal/errors.ts49
-rw-r--r--ext/node/polyfills/internal/net.ts1
-rw-r--r--ext/node/polyfills/internal/util/inspect.mjs13
-rw-r--r--ext/node/polyfills/internal_binding/_timingSafeEqual.ts21
-rw-r--r--ext/node/polyfills/internal_binding/http_parser.ts160
-rw-r--r--ext/node/polyfills/internal_binding/mod.ts3
-rw-r--r--ext/node/polyfills/internal_binding/tcp_wrap.ts6
-rw-r--r--ext/node/polyfills/internal_binding/uv.ts2
-rw-r--r--ext/node/polyfills/net.ts490
-rw-r--r--ext/node/polyfills/os.ts53
-rw-r--r--ext/node/polyfills/perf_hooks.ts10
-rw-r--r--ext/node/polyfills/process.ts20
-rw-r--r--ext/node/polyfills/timers.ts88
-rw-r--r--ext/node/polyfills/vm.js1
-rw-r--r--ext/node/polyfills/zlib.ts53
-rw-r--r--ext/tls/Cargo.toml2
-rw-r--r--ext/url/Cargo.toml3
-rw-r--r--ext/url/lib.rs2
-rw-r--r--ext/url/urlpattern.rs28
-rw-r--r--ext/web/02_timers.js13
-rw-r--r--ext/web/06_streams.js63
-rw-r--r--ext/web/15_performance.js24
-rw-r--r--ext/web/Cargo.toml3
-rw-r--r--ext/web/blob.rs60
-rw-r--r--ext/web/compression.rs64
-rw-r--r--ext/web/lib.rs152
-rw-r--r--ext/web/message_port.rs46
-rw-r--r--ext/web/stream_resource.rs36
-rw-r--r--ext/web/timers.rs65
-rw-r--r--ext/webgpu/01_webgpu.js12
-rw-r--r--ext/webgpu/Cargo.toml3
-rw-r--r--ext/webgpu/buffer.rs54
-rw-r--r--ext/webgpu/bundle.rs67
-rw-r--r--ext/webgpu/byow.rs84
-rw-r--r--ext/webgpu/error.rs28
-rw-r--r--ext/webgpu/lib.rs61
-rw-r--r--ext/webgpu/render_pass.rs72
-rw-r--r--ext/webgpu/sampler.rs3
-rw-r--r--ext/webgpu/shader.rs3
-rw-r--r--ext/webgpu/surface.rs43
-rw-r--r--ext/webgpu/texture.rs5
-rw-r--r--ext/webidl/00_webidl.js126
-rw-r--r--ext/webidl/Cargo.toml2
-rw-r--r--ext/webidl/internal.d.ts26
-rw-r--r--ext/websocket/01_websocket.js6
-rw-r--r--ext/websocket/Cargo.toml3
-rw-r--r--ext/websocket/lib.rs191
-rw-r--r--ext/webstorage/Cargo.toml3
-rw-r--r--ext/webstorage/lib.rs77
185 files changed, 13596 insertions, 3959 deletions
diff --git a/ext/broadcast_channel/Cargo.toml b/ext/broadcast_channel/Cargo.toml
index b19c4ce15..90ac03835 100644
--- a/ext/broadcast_channel/Cargo.toml
+++ b/ext/broadcast_channel/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_broadcast_channel"
-version = "0.165.0"
+version = "0.171.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
diff --git a/ext/cache/Cargo.toml b/ext/cache/Cargo.toml
index 9d876fcb7..56fa0a527 100644
--- a/ext/cache/Cargo.toml
+++ b/ext/cache/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_cache"
-version = "0.103.0"
+version = "0.109.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
diff --git a/ext/cache/lib.rs b/ext/cache/lib.rs
index 08661c349..524d4cea0 100644
--- a/ext/cache/lib.rs
+++ b/ext/cache/lib.rs
@@ -28,12 +28,14 @@ pub enum CacheError {
Resource(deno_core::error::AnyError),
#[error(transparent)]
Other(deno_core::error::AnyError),
- #[error(transparent)]
+ #[error("{0}")]
Io(#[from] std::io::Error),
}
#[derive(Clone)]
-pub struct CreateCache<C: Cache + 'static>(pub Arc<dyn Fn() -> C>);
+pub struct CreateCache<C: Cache + 'static>(
+ pub Arc<dyn Fn() -> Result<C, CacheError>>,
+);
deno_core::extension!(deno_cache,
deps = [ deno_webidl, deno_web, deno_url, deno_fetch ],
@@ -231,7 +233,7 @@ where
if let Some(cache) = state.try_borrow::<CA>() {
Ok(cache.clone())
} else if let Some(create_cache) = state.try_borrow::<CreateCache<CA>>() {
- let cache = create_cache.0();
+ let cache = create_cache.0()?;
state.put(cache);
Ok(state.borrow::<CA>().clone())
} else {
diff --git a/ext/cache/sqlite.rs b/ext/cache/sqlite.rs
index e4991c32f..469e3e51d 100644
--- a/ext/cache/sqlite.rs
+++ b/ext/cache/sqlite.rs
@@ -8,6 +8,7 @@ use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use async_trait::async_trait;
+use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use deno_core::futures::future::poll_fn;
use deno_core::parking_lot::Mutex;
@@ -42,10 +43,16 @@ pub struct SqliteBackedCache {
}
impl SqliteBackedCache {
- pub fn new(cache_storage_dir: PathBuf) -> Self {
+ pub fn new(cache_storage_dir: PathBuf) -> Result<Self, CacheError> {
{
std::fs::create_dir_all(&cache_storage_dir)
- .expect("failed to create cache dir");
+ .with_context(|| {
+ format!(
+ "Failed to create cache storage directory {}",
+ cache_storage_dir.display()
+ )
+ })
+ .map_err(CacheError::Other)?;
let path = cache_storage_dir.join("cache_metadata.db");
let connection = rusqlite::Connection::open(&path).unwrap_or_else(|_| {
panic!("failed to open cache db at {}", path.display())
@@ -57,18 +64,14 @@ impl SqliteBackedCache {
PRAGMA synchronous=NORMAL;
PRAGMA optimize;
";
- connection
- .execute_batch(initial_pragmas)
- .expect("failed to execute pragmas");
- connection
- .execute(
- "CREATE TABLE IF NOT EXISTS cache_storage (
+ connection.execute_batch(initial_pragmas)?;
+ connection.execute(
+ "CREATE TABLE IF NOT EXISTS cache_storage (
id INTEGER PRIMARY KEY,
cache_name TEXT NOT NULL UNIQUE
)",
- (),
- )
- .expect("failed to create cache_storage table");
+ (),
+ )?;
connection
.execute(
"CREATE TABLE IF NOT EXISTS request_response_list (
@@ -86,12 +89,11 @@ impl SqliteBackedCache {
UNIQUE (cache_id, request_url)
)",
(),
- )
- .expect("failed to create request_response_list table");
- SqliteBackedCache {
+ )?;
+ Ok(SqliteBackedCache {
connection: Arc::new(Mutex::new(connection)),
cache_storage_dir,
- }
+ })
}
}
}
diff --git a/ext/canvas/Cargo.toml b/ext/canvas/Cargo.toml
index 78c674348..4231d7c84 100644
--- a/ext/canvas/Cargo.toml
+++ b/ext/canvas/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_canvas"
-version = "0.40.0"
+version = "0.46.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
diff --git a/ext/console/01_console.js b/ext/console/01_console.js
index d9acc958a..3803492b9 100644
--- a/ext/console/01_console.js
+++ b/ext/console/01_console.js
@@ -84,6 +84,7 @@ const {
NumberIsInteger,
NumberIsNaN,
NumberParseInt,
+ NumberParseFloat,
NumberPrototypeToFixed,
NumberPrototypeToString,
NumberPrototypeValueOf,
@@ -2652,6 +2653,7 @@ const HSL_PATTERN = new SafeRegExp(
);
function parseCssColor(colorString) {
+ colorString = StringPrototypeToLowerCase(colorString);
if (colorKeywords.has(colorString)) {
colorString = colorKeywords.get(colorString);
}
@@ -3010,20 +3012,18 @@ function inspectArgs(args, inspectOptions = { __proto__: null }) {
} else if (ArrayPrototypeIncludes(["d", "i"], char)) {
// Format as an integer.
const value = args[a++];
- if (typeof value == "bigint") {
- formattedArg = `${value}n`;
- } else if (typeof value == "number") {
- formattedArg = `${NumberParseInt(String(value))}`;
- } else {
+ if (typeof value === "symbol") {
formattedArg = "NaN";
+ } else {
+ formattedArg = `${NumberParseInt(value)}`;
}
} else if (char == "f") {
// Format as a floating point value.
const value = args[a++];
- if (typeof value == "number") {
- formattedArg = `${value}`;
- } else {
+ if (typeof value === "symbol") {
formattedArg = "NaN";
+ } else {
+ formattedArg = `${NumberParseFloat(value)}`;
}
} else if (ArrayPrototypeIncludes(["O", "o"], char)) {
// Format as an object.
@@ -3257,7 +3257,7 @@ class Console {
const stringifyValue = (value) =>
inspectValueWithQuotes(value, {
- ...getDefaultInspectOptions(),
+ ...getConsoleInspectOptions(noColorStdout()),
depth: 1,
compact: true,
});
diff --git a/ext/console/Cargo.toml b/ext/console/Cargo.toml
index 5c143ca18..80f1cca84 100644
--- a/ext/console/Cargo.toml
+++ b/ext/console/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_console"
-version = "0.171.0"
+version = "0.177.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
diff --git a/ext/console/internal.d.ts b/ext/console/internal.d.ts
index 45af616d6..5f9627cf5 100644
--- a/ext/console/internal.d.ts
+++ b/ext/console/internal.d.ts
@@ -9,4 +9,7 @@ declare module "ext:deno_console/01_console.js" {
keys: (keyof TObject)[];
evaluate: boolean;
}): Record<string, unknown>;
+
+ class Console {
+ }
}
diff --git a/ext/cron/Cargo.toml b/ext/cron/Cargo.toml
index 10f09b57c..966ccdc95 100644
--- a/ext/cron/Cargo.toml
+++ b/ext/cron/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_cron"
-version = "0.51.0"
+version = "0.57.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
diff --git a/ext/crypto/Cargo.toml b/ext/crypto/Cargo.toml
index c81c8f6a7..a5794dc68 100644
--- a/ext/crypto/Cargo.toml
+++ b/ext/crypto/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_crypto"
-version = "0.185.0"
+version = "0.191.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -41,5 +41,7 @@ sha1.workspace = true
sha2.workspace = true
signature.workspace = true
spki.workspace = true
+thiserror.workspace = true
+tokio.workspace = true
uuid.workspace = true
x25519-dalek = "2.0.0"
diff --git a/ext/crypto/decrypt.rs b/ext/crypto/decrypt.rs
index 9b104e178..114047518 100644
--- a/ext/crypto/decrypt.rs
+++ b/ext/crypto/decrypt.rs
@@ -16,9 +16,6 @@ use ctr::cipher::StreamCipher;
use ctr::Ctr128BE;
use ctr::Ctr32BE;
use ctr::Ctr64BE;
-use deno_core::error::custom_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::unsync::spawn_blocking;
use deno_core::JsBuffer;
@@ -73,12 +70,36 @@ pub enum DecryptAlgorithm {
},
}
+#[derive(Debug, thiserror::Error)]
+pub enum DecryptError {
+ #[error(transparent)]
+ General(#[from] SharedError),
+ #[error(transparent)]
+ Pkcs1(#[from] rsa::pkcs1::Error),
+ #[error("Decryption failed")]
+ Failed,
+ #[error("invalid length")]
+ InvalidLength,
+ #[error("invalid counter length. Currently supported 32/64/128 bits")]
+ InvalidCounterLength,
+ #[error("tag length not equal to 128")]
+ InvalidTagLength,
+ #[error("invalid key or iv")]
+ InvalidKeyOrIv,
+ #[error("tried to decrypt too much data")]
+ TooMuchData,
+ #[error("iv length not equal to 12 or 16")]
+ InvalidIvLength,
+ #[error("{0}")]
+ Rsa(rsa::Error),
+}
+
#[op2(async)]
#[serde]
pub async fn op_crypto_decrypt(
#[serde] opts: DecryptOptions,
#[buffer] data: JsBuffer,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, DecryptError> {
let key = opts.key;
let fun = move || match opts.algorithm {
DecryptAlgorithm::RsaOaep { hash, label } => {
@@ -108,7 +129,7 @@ fn decrypt_rsa_oaep(
hash: ShaHash,
label: Vec<u8>,
data: &[u8],
-) -> Result<Vec<u8>, deno_core::anyhow::Error> {
+) -> Result<Vec<u8>, DecryptError> {
let key = key.as_rsa_private_key()?;
let private_key = rsa::RsaPrivateKey::from_pkcs1_der(key)?;
@@ -139,7 +160,7 @@ fn decrypt_rsa_oaep(
private_key
.decrypt(padding, data)
- .map_err(|e| custom_error("DOMExceptionOperationError", e.to_string()))
+ .map_err(DecryptError::Rsa)
}
fn decrypt_aes_cbc(
@@ -147,7 +168,7 @@ fn decrypt_aes_cbc(
length: usize,
iv: Vec<u8>,
data: &[u8],
-) -> Result<Vec<u8>, deno_core::anyhow::Error> {
+) -> Result<Vec<u8>, DecryptError> {
let key = key.as_secret_key()?;
// 2.
@@ -155,53 +176,32 @@ fn decrypt_aes_cbc(
128 => {
// Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315
type Aes128CbcDec = cbc::Decryptor<aes::Aes128>;
- let cipher = Aes128CbcDec::new_from_slices(key, &iv).map_err(|_| {
- custom_error(
- "DOMExceptionOperationError",
- "Invalid key or iv".to_string(),
- )
- })?;
+ let cipher = Aes128CbcDec::new_from_slices(key, &iv)
+ .map_err(|_| DecryptError::InvalidKeyOrIv)?;
- cipher.decrypt_padded_vec_mut::<Pkcs7>(data).map_err(|_| {
- custom_error(
- "DOMExceptionOperationError",
- "Decryption failed".to_string(),
- )
- })?
+ cipher
+ .decrypt_padded_vec_mut::<Pkcs7>(data)
+ .map_err(|_| DecryptError::Failed)?
}
192 => {
// Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315
type Aes192CbcDec = cbc::Decryptor<aes::Aes192>;
- let cipher = Aes192CbcDec::new_from_slices(key, &iv).map_err(|_| {
- custom_error(
- "DOMExceptionOperationError",
- "Invalid key or iv".to_string(),
- )
- })?;
+ let cipher = Aes192CbcDec::new_from_slices(key, &iv)
+ .map_err(|_| DecryptError::InvalidKeyOrIv)?;
- cipher.decrypt_padded_vec_mut::<Pkcs7>(data).map_err(|_| {
- custom_error(
- "DOMExceptionOperationError",
- "Decryption failed".to_string(),
- )
- })?
+ cipher
+ .decrypt_padded_vec_mut::<Pkcs7>(data)
+ .map_err(|_| DecryptError::Failed)?
}
256 => {
// Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315
type Aes256CbcDec = cbc::Decryptor<aes::Aes256>;
- let cipher = Aes256CbcDec::new_from_slices(key, &iv).map_err(|_| {
- custom_error(
- "DOMExceptionOperationError",
- "Invalid key or iv".to_string(),
- )
- })?;
+ let cipher = Aes256CbcDec::new_from_slices(key, &iv)
+ .map_err(|_| DecryptError::InvalidKeyOrIv)?;
- cipher.decrypt_padded_vec_mut::<Pkcs7>(data).map_err(|_| {
- custom_error(
- "DOMExceptionOperationError",
- "Decryption failed".to_string(),
- )
- })?
+ cipher
+ .decrypt_padded_vec_mut::<Pkcs7>(data)
+ .map_err(|_| DecryptError::Failed)?
}
_ => unreachable!(),
};
@@ -214,7 +214,7 @@ fn decrypt_aes_ctr_gen<B>(
key: &[u8],
counter: &[u8],
data: &[u8],
-) -> Result<Vec<u8>, AnyError>
+) -> Result<Vec<u8>, DecryptError>
where
B: KeyIvInit + StreamCipher,
{
@@ -223,7 +223,7 @@ where
let mut plaintext = data.to_vec();
cipher
.try_apply_keystream(&mut plaintext)
- .map_err(|_| operation_error("tried to decrypt too much data"))?;
+ .map_err(|_| DecryptError::TooMuchData)?;
Ok(plaintext)
}
@@ -235,12 +235,12 @@ fn decrypt_aes_gcm_gen<N: ArrayLength<u8>>(
length: usize,
additional_data: Vec<u8>,
plaintext: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), DecryptError> {
let nonce = Nonce::from_slice(nonce);
match length {
128 => {
let cipher = aes_gcm::AesGcm::<Aes128, N>::new_from_slice(key)
- .map_err(|_| operation_error("Decryption failed"))?;
+ .map_err(|_| DecryptError::Failed)?;
cipher
.decrypt_in_place_detached(
nonce,
@@ -248,11 +248,11 @@ fn decrypt_aes_gcm_gen<N: ArrayLength<u8>>(
plaintext,
tag,
)
- .map_err(|_| operation_error("Decryption failed"))?
+ .map_err(|_| DecryptError::Failed)?
}
192 => {
let cipher = aes_gcm::AesGcm::<Aes192, N>::new_from_slice(key)
- .map_err(|_| operation_error("Decryption failed"))?;
+ .map_err(|_| DecryptError::Failed)?;
cipher
.decrypt_in_place_detached(
nonce,
@@ -260,11 +260,11 @@ fn decrypt_aes_gcm_gen<N: ArrayLength<u8>>(
plaintext,
tag,
)
- .map_err(|_| operation_error("Decryption failed"))?
+ .map_err(|_| DecryptError::Failed)?
}
256 => {
let cipher = aes_gcm::AesGcm::<Aes256, N>::new_from_slice(key)
- .map_err(|_| operation_error("Decryption failed"))?;
+ .map_err(|_| DecryptError::Failed)?;
cipher
.decrypt_in_place_detached(
nonce,
@@ -272,9 +272,9 @@ fn decrypt_aes_gcm_gen<N: ArrayLength<u8>>(
plaintext,
tag,
)
- .map_err(|_| operation_error("Decryption failed"))?
+ .map_err(|_| DecryptError::Failed)?
}
- _ => return Err(type_error("invalid length")),
+ _ => return Err(DecryptError::InvalidLength),
};
Ok(())
@@ -286,7 +286,7 @@ fn decrypt_aes_ctr(
counter: &[u8],
ctr_length: usize,
data: &[u8],
-) -> Result<Vec<u8>, deno_core::anyhow::Error> {
+) -> Result<Vec<u8>, DecryptError> {
let key = key.as_secret_key()?;
match ctr_length {
@@ -294,23 +294,21 @@ fn decrypt_aes_ctr(
128 => decrypt_aes_ctr_gen::<Ctr32BE<aes::Aes128>>(key, counter, data),
192 => decrypt_aes_ctr_gen::<Ctr32BE<aes::Aes192>>(key, counter, data),
256 => decrypt_aes_ctr_gen::<Ctr32BE<aes::Aes256>>(key, counter, data),
- _ => Err(type_error("invalid length")),
+ _ => Err(DecryptError::InvalidLength),
},
64 => match key_length {
128 => decrypt_aes_ctr_gen::<Ctr64BE<aes::Aes128>>(key, counter, data),
192 => decrypt_aes_ctr_gen::<Ctr64BE<aes::Aes192>>(key, counter, data),
256 => decrypt_aes_ctr_gen::<Ctr64BE<aes::Aes256>>(key, counter, data),
- _ => Err(type_error("invalid length")),
+ _ => Err(DecryptError::InvalidLength),
},
128 => match key_length {
128 => decrypt_aes_ctr_gen::<Ctr128BE<aes::Aes128>>(key, counter, data),
192 => decrypt_aes_ctr_gen::<Ctr128BE<aes::Aes192>>(key, counter, data),
256 => decrypt_aes_ctr_gen::<Ctr128BE<aes::Aes256>>(key, counter, data),
- _ => Err(type_error("invalid length")),
+ _ => Err(DecryptError::InvalidLength),
},
- _ => Err(type_error(
- "invalid counter length. Currently supported 32/64/128 bits",
- )),
+ _ => Err(DecryptError::InvalidCounterLength),
}
}
@@ -321,7 +319,7 @@ fn decrypt_aes_gcm(
iv: Vec<u8>,
additional_data: Option<Vec<u8>>,
data: &[u8],
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, DecryptError> {
let key = key.as_secret_key()?;
let additional_data = additional_data.unwrap_or_default();
@@ -330,7 +328,7 @@ fn decrypt_aes_gcm(
// Note that encryption won't fail, it instead truncates the tag
// to the specified tag length as specified in the spec.
if tag_length != 128 {
- return Err(type_error("tag length not equal to 128"));
+ return Err(DecryptError::InvalidTagLength);
}
let sep = data.len() - (tag_length / 8);
@@ -357,7 +355,7 @@ fn decrypt_aes_gcm(
additional_data,
&mut plaintext,
)?,
- _ => return Err(type_error("iv length not equal to 12 or 16")),
+ _ => return Err(DecryptError::InvalidIvLength),
}
Ok(plaintext)
diff --git a/ext/crypto/ed25519.rs b/ext/crypto/ed25519.rs
index 4f604fe51..da34b7d25 100644
--- a/ext/crypto/ed25519.rs
+++ b/ext/crypto/ed25519.rs
@@ -2,8 +2,6 @@
use base64::prelude::BASE64_URL_SAFE_NO_PAD;
use base64::Engine;
-use deno_core::error::custom_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::ToJsBuffer;
use elliptic_curve::pkcs8::PrivateKeyInfo;
@@ -15,6 +13,16 @@ use spki::der::asn1::BitString;
use spki::der::Decode;
use spki::der::Encode;
+#[derive(Debug, thiserror::Error)]
+pub enum Ed25519Error {
+ #[error("Failed to export key")]
+ FailedExport,
+ #[error(transparent)]
+ Der(#[from] rsa::pkcs1::der::Error),
+ #[error(transparent)]
+ KeyRejected(#[from] ring::error::KeyRejected),
+}
+
#[op2(fast)]
pub fn op_crypto_generate_ed25519_keypair(
#[buffer] pkey: &mut [u8],
@@ -116,7 +124,7 @@ pub fn op_crypto_import_pkcs8_ed25519(
#[serde]
pub fn op_crypto_export_spki_ed25519(
#[buffer] pubkey: &[u8],
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, Ed25519Error> {
let key_info = spki::SubjectPublicKeyInfo {
algorithm: spki::AlgorithmIdentifierOwned {
// id-Ed25519
@@ -128,9 +136,7 @@ pub fn op_crypto_export_spki_ed25519(
Ok(
key_info
.to_der()
- .map_err(|_| {
- custom_error("DOMExceptionOperationError", "Failed to export key")
- })?
+ .map_err(|_| Ed25519Error::FailedExport)?
.into(),
)
}
@@ -139,7 +145,7 @@ pub fn op_crypto_export_spki_ed25519(
#[serde]
pub fn op_crypto_export_pkcs8_ed25519(
#[buffer] pkey: &[u8],
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, Ed25519Error> {
use rsa::pkcs1::der::Encode;
// This should probably use OneAsymmetricKey instead
@@ -164,7 +170,7 @@ pub fn op_crypto_export_pkcs8_ed25519(
#[string]
pub fn op_crypto_jwk_x_ed25519(
#[buffer] pkey: &[u8],
-) -> Result<String, AnyError> {
+) -> Result<String, Ed25519Error> {
let pair = Ed25519KeyPair::from_seed_unchecked(pkey)?;
Ok(BASE64_URL_SAFE_NO_PAD.encode(pair.public_key().as_ref()))
}
diff --git a/ext/crypto/encrypt.rs b/ext/crypto/encrypt.rs
index 204648e89..66b27657f 100644
--- a/ext/crypto/encrypt.rs
+++ b/ext/crypto/encrypt.rs
@@ -16,8 +16,6 @@ use aes_gcm::Nonce;
use ctr::Ctr128BE;
use ctr::Ctr32BE;
use ctr::Ctr64BE;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::unsync::spawn_blocking;
use deno_core::JsBuffer;
@@ -73,12 +71,30 @@ pub enum EncryptAlgorithm {
},
}
+#[derive(Debug, thiserror::Error)]
+pub enum EncryptError {
+ #[error(transparent)]
+ General(#[from] SharedError),
+ #[error("invalid length")]
+ InvalidLength,
+ #[error("invalid key or iv")]
+ InvalidKeyOrIv,
+ #[error("iv length not equal to 12 or 16")]
+ InvalidIvLength,
+ #[error("invalid counter length. Currently supported 32/64/128 bits")]
+ InvalidCounterLength,
+ #[error("tried to encrypt too much data")]
+ TooMuchData,
+ #[error("Encryption failed")]
+ Failed,
+}
+
#[op2(async)]
#[serde]
pub async fn op_crypto_encrypt(
#[serde] opts: EncryptOptions,
#[buffer] data: JsBuffer,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, EncryptError> {
let key = opts.key;
let fun = move || match opts.algorithm {
EncryptAlgorithm::RsaOaep { hash, label } => {
@@ -108,12 +124,12 @@ fn encrypt_rsa_oaep(
hash: ShaHash,
label: Vec<u8>,
data: &[u8],
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, EncryptError> {
let label = String::from_utf8_lossy(&label).to_string();
let public_key = key.as_rsa_public_key()?;
let public_key = rsa::RsaPublicKey::from_pkcs1_der(&public_key)
- .map_err(|_| operation_error("failed to decode public key"))?;
+ .map_err(|_| SharedError::FailedDecodePublicKey)?;
let mut rng = OsRng;
let padding = match hash {
ShaHash::Sha1 => rsa::Oaep {
@@ -139,7 +155,7 @@ fn encrypt_rsa_oaep(
};
let encrypted = public_key
.encrypt(&mut rng, padding, data)
- .map_err(|_| operation_error("Encryption failed"))?;
+ .map_err(|_| EncryptError::Failed)?;
Ok(encrypted)
}
@@ -148,7 +164,7 @@ fn encrypt_aes_cbc(
length: usize,
iv: Vec<u8>,
data: &[u8],
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, EncryptError> {
let key = key.as_secret_key()?;
let ciphertext = match length {
128 => {
@@ -156,7 +172,7 @@ fn encrypt_aes_cbc(
type Aes128CbcEnc = cbc::Encryptor<aes::Aes128>;
let cipher = Aes128CbcEnc::new_from_slices(key, &iv)
- .map_err(|_| operation_error("invalid key or iv".to_string()))?;
+ .map_err(|_| EncryptError::InvalidKeyOrIv)?;
cipher.encrypt_padded_vec_mut::<Pkcs7>(data)
}
192 => {
@@ -164,7 +180,7 @@ fn encrypt_aes_cbc(
type Aes192CbcEnc = cbc::Encryptor<aes::Aes192>;
let cipher = Aes192CbcEnc::new_from_slices(key, &iv)
- .map_err(|_| operation_error("invalid key or iv".to_string()))?;
+ .map_err(|_| EncryptError::InvalidKeyOrIv)?;
cipher.encrypt_padded_vec_mut::<Pkcs7>(data)
}
256 => {
@@ -172,10 +188,10 @@ fn encrypt_aes_cbc(
type Aes256CbcEnc = cbc::Encryptor<aes::Aes256>;
let cipher = Aes256CbcEnc::new_from_slices(key, &iv)
- .map_err(|_| operation_error("invalid key or iv".to_string()))?;
+ .map_err(|_| EncryptError::InvalidKeyOrIv)?;
cipher.encrypt_padded_vec_mut::<Pkcs7>(data)
}
- _ => return Err(type_error("invalid length")),
+ _ => return Err(EncryptError::InvalidLength),
};
Ok(ciphertext)
}
@@ -186,31 +202,31 @@ fn encrypt_aes_gcm_general<N: ArrayLength<u8>>(
length: usize,
ciphertext: &mut [u8],
additional_data: Vec<u8>,
-) -> Result<aes_gcm::Tag, AnyError> {
+) -> Result<aes_gcm::Tag, EncryptError> {
let nonce = Nonce::<N>::from_slice(&iv);
let tag = match length {
128 => {
let cipher = aes_gcm::AesGcm::<Aes128, N>::new_from_slice(key)
- .map_err(|_| operation_error("Encryption failed"))?;
+ .map_err(|_| EncryptError::Failed)?;
cipher
.encrypt_in_place_detached(nonce, &additional_data, ciphertext)
- .map_err(|_| operation_error("Encryption failed"))?
+ .map_err(|_| EncryptError::Failed)?
}
192 => {
let cipher = aes_gcm::AesGcm::<Aes192, N>::new_from_slice(key)
- .map_err(|_| operation_error("Encryption failed"))?;
+ .map_err(|_| EncryptError::Failed)?;
cipher
.encrypt_in_place_detached(nonce, &additional_data, ciphertext)
- .map_err(|_| operation_error("Encryption failed"))?
+ .map_err(|_| EncryptError::Failed)?
}
256 => {
let cipher = aes_gcm::AesGcm::<Aes256, N>::new_from_slice(key)
- .map_err(|_| operation_error("Encryption failed"))?;
+ .map_err(|_| EncryptError::Failed)?;
cipher
.encrypt_in_place_detached(nonce, &additional_data, ciphertext)
- .map_err(|_| operation_error("Encryption failed"))?
+ .map_err(|_| EncryptError::Failed)?
}
- _ => return Err(type_error("invalid length")),
+ _ => return Err(EncryptError::InvalidLength),
};
Ok(tag)
@@ -223,7 +239,7 @@ fn encrypt_aes_gcm(
iv: Vec<u8>,
additional_data: Option<Vec<u8>>,
data: &[u8],
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, EncryptError> {
let key = key.as_secret_key()?;
let additional_data = additional_data.unwrap_or_default();
@@ -244,7 +260,7 @@ fn encrypt_aes_gcm(
&mut ciphertext,
additional_data,
)?,
- _ => return Err(type_error("iv length not equal to 12 or 16")),
+ _ => return Err(EncryptError::InvalidIvLength),
};
// Truncated tag to the specified tag length.
@@ -261,7 +277,7 @@ fn encrypt_aes_ctr_gen<B>(
key: &[u8],
counter: &[u8],
data: &[u8],
-) -> Result<Vec<u8>, AnyError>
+) -> Result<Vec<u8>, EncryptError>
where
B: KeyIvInit + StreamCipher,
{
@@ -270,7 +286,7 @@ where
let mut ciphertext = data.to_vec();
cipher
.try_apply_keystream(&mut ciphertext)
- .map_err(|_| operation_error("tried to encrypt too much data"))?;
+ .map_err(|_| EncryptError::TooMuchData)?;
Ok(ciphertext)
}
@@ -281,7 +297,7 @@ fn encrypt_aes_ctr(
counter: &[u8],
ctr_length: usize,
data: &[u8],
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, EncryptError> {
let key = key.as_secret_key()?;
match ctr_length {
@@ -289,22 +305,20 @@ fn encrypt_aes_ctr(
128 => encrypt_aes_ctr_gen::<Ctr32BE<aes::Aes128>>(key, counter, data),
192 => encrypt_aes_ctr_gen::<Ctr32BE<aes::Aes192>>(key, counter, data),
256 => encrypt_aes_ctr_gen::<Ctr32BE<aes::Aes256>>(key, counter, data),
- _ => Err(type_error("invalid length")),
+ _ => Err(EncryptError::InvalidLength),
},
64 => match key_length {
128 => encrypt_aes_ctr_gen::<Ctr64BE<aes::Aes128>>(key, counter, data),
192 => encrypt_aes_ctr_gen::<Ctr64BE<aes::Aes192>>(key, counter, data),
256 => encrypt_aes_ctr_gen::<Ctr64BE<aes::Aes256>>(key, counter, data),
- _ => Err(type_error("invalid length")),
+ _ => Err(EncryptError::InvalidLength),
},
128 => match key_length {
128 => encrypt_aes_ctr_gen::<Ctr128BE<aes::Aes128>>(key, counter, data),
192 => encrypt_aes_ctr_gen::<Ctr128BE<aes::Aes192>>(key, counter, data),
256 => encrypt_aes_ctr_gen::<Ctr128BE<aes::Aes256>>(key, counter, data),
- _ => Err(type_error("invalid length")),
+ _ => Err(EncryptError::InvalidLength),
},
- _ => Err(type_error(
- "invalid counter length. Currently supported 32/64/128 bits",
- )),
+ _ => Err(EncryptError::InvalidCounterLength),
}
}
diff --git a/ext/crypto/export_key.rs b/ext/crypto/export_key.rs
index 00ce7e11c..edf0d7239 100644
--- a/ext/crypto/export_key.rs
+++ b/ext/crypto/export_key.rs
@@ -4,8 +4,6 @@ use base64::prelude::BASE64_URL_SAFE_NO_PAD;
use base64::Engine;
use const_oid::AssociatedOid;
use const_oid::ObjectIdentifier;
-use deno_core::error::custom_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::ToJsBuffer;
use elliptic_curve::sec1::ToEncodedPoint;
@@ -22,6 +20,16 @@ use spki::AlgorithmIdentifierOwned;
use crate::shared::*;
+#[derive(Debug, thiserror::Error)]
+pub enum ExportKeyError {
+ #[error(transparent)]
+ General(#[from] SharedError),
+ #[error(transparent)]
+ Der(#[from] spki::der::Error),
+ #[error("Unsupported named curve")]
+ UnsupportedNamedCurve,
+}
+
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExportKeyOptions {
@@ -99,7 +107,7 @@ pub enum ExportKeyResult {
pub fn op_crypto_export_key(
#[serde] opts: ExportKeyOptions,
#[serde] key_data: V8RawKeyData,
-) -> Result<ExportKeyResult, AnyError> {
+) -> Result<ExportKeyResult, ExportKeyError> {
match opts.algorithm {
ExportKeyAlgorithm::RsassaPkcs1v15 {}
| ExportKeyAlgorithm::RsaPss {}
@@ -125,7 +133,7 @@ fn bytes_to_b64(bytes: &[u8]) -> String {
fn export_key_rsa(
format: ExportKeyFormat,
key_data: V8RawKeyData,
-) -> Result<ExportKeyResult, deno_core::anyhow::Error> {
+) -> Result<ExportKeyResult, ExportKeyError> {
match format {
ExportKeyFormat::Spki => {
let subject_public_key = &key_data.as_rsa_public_key()?;
@@ -181,12 +189,7 @@ fn export_key_rsa(
ExportKeyFormat::JwkPublic => {
let public_key = key_data.as_rsa_public_key()?;
let public_key = rsa::pkcs1::RsaPublicKey::from_der(&public_key)
- .map_err(|_| {
- custom_error(
- "DOMExceptionOperationError",
- "failed to decode public key",
- )
- })?;
+ .map_err(|_| SharedError::FailedDecodePublicKey)?;
Ok(ExportKeyResult::JwkPublicRsa {
n: uint_to_b64(public_key.modulus),
@@ -196,12 +199,7 @@ fn export_key_rsa(
ExportKeyFormat::JwkPrivate => {
let private_key = key_data.as_rsa_private_key()?;
let private_key = rsa::pkcs1::RsaPrivateKey::from_der(private_key)
- .map_err(|_| {
- custom_error(
- "DOMExceptionOperationError",
- "failed to decode private key",
- )
- })?;
+ .map_err(|_| SharedError::FailedDecodePrivateKey)?;
Ok(ExportKeyResult::JwkPrivateRsa {
n: uint_to_b64(private_key.modulus),
@@ -214,14 +212,14 @@ fn export_key_rsa(
qi: uint_to_b64(private_key.coefficient),
})
}
- _ => Err(unsupported_format()),
+ _ => Err(SharedError::UnsupportedFormat.into()),
}
}
fn export_key_symmetric(
format: ExportKeyFormat,
key_data: V8RawKeyData,
-) -> Result<ExportKeyResult, deno_core::anyhow::Error> {
+) -> Result<ExportKeyResult, ExportKeyError> {
match format {
ExportKeyFormat::JwkSecret => {
let bytes = key_data.as_secret_key()?;
@@ -230,7 +228,7 @@ fn export_key_symmetric(
k: bytes_to_b64(bytes),
})
}
- _ => Err(unsupported_format()),
+ _ => Err(SharedError::UnsupportedFormat.into()),
}
}
@@ -239,7 +237,7 @@ fn export_key_ec(
key_data: V8RawKeyData,
algorithm: ExportKeyAlgorithm,
named_curve: EcNamedCurve,
-) -> Result<ExportKeyResult, deno_core::anyhow::Error> {
+) -> Result<ExportKeyResult, ExportKeyError> {
match format {
ExportKeyFormat::Raw => {
let subject_public_key = match named_curve {
@@ -332,10 +330,7 @@ fn export_key_ec(
y: bytes_to_b64(y),
})
} else {
- Err(custom_error(
- "DOMExceptionOperationError",
- "failed to decode public key",
- ))
+ Err(SharedError::FailedDecodePublicKey.into())
}
}
EcNamedCurve::P384 => {
@@ -350,10 +345,7 @@ fn export_key_ec(
y: bytes_to_b64(y),
})
} else {
- Err(custom_error(
- "DOMExceptionOperationError",
- "failed to decode public key",
- ))
+ Err(SharedError::FailedDecodePublicKey.into())
}
}
EcNamedCurve::P521 => {
@@ -368,10 +360,7 @@ fn export_key_ec(
y: bytes_to_b64(y),
})
} else {
- Err(custom_error(
- "DOMExceptionOperationError",
- "failed to decode public key",
- ))
+ Err(SharedError::FailedDecodePublicKey.into())
}
}
},
@@ -380,13 +369,8 @@ fn export_key_ec(
match named_curve {
EcNamedCurve::P256 => {
- let ec_key =
- p256::SecretKey::from_pkcs8_der(private_key).map_err(|_| {
- custom_error(
- "DOMExceptionOperationError",
- "failed to decode private key",
- )
- })?;
+ let ec_key = p256::SecretKey::from_pkcs8_der(private_key)
+ .map_err(|_| SharedError::FailedDecodePrivateKey)?;
let point = ec_key.public_key().to_encoded_point(false);
if let elliptic_curve::sec1::Coordinates::Uncompressed { x, y } =
@@ -398,18 +382,13 @@ fn export_key_ec(
d: bytes_to_b64(&ec_key.to_bytes()),
})
} else {
- Err(data_error("expected valid public EC key"))
+ Err(SharedError::ExpectedValidPublicECKey.into())
}
}
EcNamedCurve::P384 => {
- let ec_key =
- p384::SecretKey::from_pkcs8_der(private_key).map_err(|_| {
- custom_error(
- "DOMExceptionOperationError",
- "failed to decode private key",
- )
- })?;
+ let ec_key = p384::SecretKey::from_pkcs8_der(private_key)
+ .map_err(|_| SharedError::FailedDecodePrivateKey)?;
let point = ec_key.public_key().to_encoded_point(false);
if let elliptic_curve::sec1::Coordinates::Uncompressed { x, y } =
@@ -421,12 +400,12 @@ fn export_key_ec(
d: bytes_to_b64(&ec_key.to_bytes()),
})
} else {
- Err(data_error("expected valid public EC key"))
+ Err(SharedError::ExpectedValidPublicECKey.into())
}
}
- _ => Err(not_supported_error("Unsupported namedCurve")),
+ _ => Err(ExportKeyError::UnsupportedNamedCurve),
}
}
- ExportKeyFormat::JwkSecret => Err(unsupported_format()),
+ ExportKeyFormat::JwkSecret => Err(SharedError::UnsupportedFormat.into()),
}
}
diff --git a/ext/crypto/generate_key.rs b/ext/crypto/generate_key.rs
index 43aea2c70..3c0bd77c2 100644
--- a/ext/crypto/generate_key.rs
+++ b/ext/crypto/generate_key.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::unsync::spawn_blocking;
use deno_core::ToJsBuffer;
@@ -16,6 +15,26 @@ use serde::Deserialize;
use crate::shared::*;
+#[derive(Debug, thiserror::Error)]
+pub enum GenerateKeyError {
+ #[error(transparent)]
+ General(#[from] SharedError),
+ #[error("Bad public exponent")]
+ BadPublicExponent,
+ #[error("Invalid HMAC key length")]
+ InvalidHMACKeyLength,
+ #[error("Failed to serialize RSA key")]
+ FailedRSAKeySerialization,
+ #[error("Invalid AES key length")]
+ InvalidAESKeyLength,
+ #[error("Failed to generate RSA key")]
+ FailedRSAKeyGeneration,
+ #[error("Failed to generate EC key")]
+ FailedECKeyGeneration,
+ #[error("Failed to generate key")]
+ FailedKeyGeneration,
+}
+
// Allowlist for RSA public exponents.
static PUB_EXPONENT_1: Lazy<BigUint> =
Lazy::new(|| BigUint::from_u64(3).unwrap());
@@ -46,7 +65,7 @@ pub enum GenerateKeyOptions {
#[serde]
pub async fn op_crypto_generate_key(
#[serde] opts: GenerateKeyOptions,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, GenerateKeyError> {
let fun = || match opts {
GenerateKeyOptions::Rsa {
modulus_length,
@@ -65,21 +84,21 @@ pub async fn op_crypto_generate_key(
fn generate_key_rsa(
modulus_length: u32,
public_exponent: &[u8],
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, GenerateKeyError> {
let exponent = BigUint::from_bytes_be(public_exponent);
if exponent != *PUB_EXPONENT_1 && exponent != *PUB_EXPONENT_2 {
- return Err(operation_error("Bad public exponent"));
+ return Err(GenerateKeyError::BadPublicExponent);
}
let mut rng = OsRng;
let private_key =
RsaPrivateKey::new_with_exp(&mut rng, modulus_length as usize, &exponent)
- .map_err(|_| operation_error("Failed to generate RSA key"))?;
+ .map_err(|_| GenerateKeyError::FailedRSAKeyGeneration)?;
let private_key = private_key
.to_pkcs1_der()
- .map_err(|_| operation_error("Failed to serialize RSA key"))?;
+ .map_err(|_| GenerateKeyError::FailedRSAKeySerialization)?;
Ok(private_key.as_bytes().to_vec())
}
@@ -90,7 +109,9 @@ fn generate_key_ec_p521() -> Vec<u8> {
key.to_nonzero_scalar().to_bytes().to_vec()
}
-fn generate_key_ec(named_curve: EcNamedCurve) -> Result<Vec<u8>, AnyError> {
+fn generate_key_ec(
+ named_curve: EcNamedCurve,
+) -> Result<Vec<u8>, GenerateKeyError> {
let curve = match named_curve {
EcNamedCurve::P256 => &ring::signature::ECDSA_P256_SHA256_FIXED_SIGNING,
EcNamedCurve::P384 => &ring::signature::ECDSA_P384_SHA384_FIXED_SIGNING,
@@ -100,21 +121,21 @@ fn generate_key_ec(named_curve: EcNamedCurve) -> Result<Vec<u8>, AnyError> {
let rng = ring::rand::SystemRandom::new();
let pkcs8 = EcdsaKeyPair::generate_pkcs8(curve, &rng)
- .map_err(|_| operation_error("Failed to generate EC key"))?;
+ .map_err(|_| GenerateKeyError::FailedECKeyGeneration)?;
Ok(pkcs8.as_ref().to_vec())
}
-fn generate_key_aes(length: usize) -> Result<Vec<u8>, AnyError> {
+fn generate_key_aes(length: usize) -> Result<Vec<u8>, GenerateKeyError> {
if length % 8 != 0 || length > 256 {
- return Err(operation_error("Invalid AES key length"));
+ return Err(GenerateKeyError::InvalidAESKeyLength);
}
let mut key = vec![0u8; length / 8];
let rng = ring::rand::SystemRandom::new();
rng
.fill(&mut key)
- .map_err(|_| operation_error("Failed to generate key"))?;
+ .map_err(|_| GenerateKeyError::FailedKeyGeneration)?;
Ok(key)
}
@@ -122,7 +143,7 @@ fn generate_key_aes(length: usize) -> Result<Vec<u8>, AnyError> {
fn generate_key_hmac(
hash: ShaHash,
length: Option<usize>,
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, GenerateKeyError> {
let hash = match hash {
ShaHash::Sha1 => &ring::hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY,
ShaHash::Sha256 => &ring::hmac::HMAC_SHA256,
@@ -132,12 +153,12 @@ fn generate_key_hmac(
let length = if let Some(length) = length {
if length % 8 != 0 {
- return Err(operation_error("Invalid HMAC key length"));
+ return Err(GenerateKeyError::InvalidHMACKeyLength);
}
let length = length / 8;
if length > ring::digest::MAX_BLOCK_LEN {
- return Err(operation_error("Invalid HMAC key length"));
+ return Err(GenerateKeyError::InvalidHMACKeyLength);
}
length
@@ -149,7 +170,7 @@ fn generate_key_hmac(
let mut key = vec![0u8; length];
rng
.fill(&mut key)
- .map_err(|_| operation_error("Failed to generate key"))?;
+ .map_err(|_| GenerateKeyError::FailedKeyGeneration)?;
Ok(key)
}
diff --git a/ext/crypto/import_key.rs b/ext/crypto/import_key.rs
index e30baea03..3463ca2be 100644
--- a/ext/crypto/import_key.rs
+++ b/ext/crypto/import_key.rs
@@ -1,7 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use base64::Engine;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::JsBuffer;
use deno_core::ToJsBuffer;
@@ -15,6 +14,70 @@ use spki::der::Decode;
use crate::shared::*;
+#[derive(Debug, thiserror::Error)]
+pub enum ImportKeyError {
+ #[error(transparent)]
+ General(#[from] SharedError),
+ #[error("invalid modulus")]
+ InvalidModulus,
+ #[error("invalid public exponent")]
+ InvalidPublicExponent,
+ #[error("invalid private exponent")]
+ InvalidPrivateExponent,
+ #[error("invalid first prime factor")]
+ InvalidFirstPrimeFactor,
+ #[error("invalid second prime factor")]
+ InvalidSecondPrimeFactor,
+ #[error("invalid first CRT exponent")]
+ InvalidFirstCRTExponent,
+ #[error("invalid second CRT exponent")]
+ InvalidSecondCRTExponent,
+ #[error("invalid CRT coefficient")]
+ InvalidCRTCoefficient,
+ #[error("invalid b64 coordinate")]
+ InvalidB64Coordinate,
+ #[error("invalid RSA public key")]
+ InvalidRSAPublicKey,
+ #[error("invalid RSA private key")]
+ InvalidRSAPrivateKey,
+ #[error("unsupported algorithm")]
+ UnsupportedAlgorithm,
+ #[error("public key is invalid (too long)")]
+ PublicKeyTooLong,
+ #[error("private key is invalid (too long)")]
+ PrivateKeyTooLong,
+ #[error("invalid P-256 elliptic curve point")]
+ InvalidP256ECPoint,
+ #[error("invalid P-384 elliptic curve point")]
+ InvalidP384ECPoint,
+ #[error("invalid P-521 elliptic curve point")]
+ InvalidP521ECPoint,
+ #[error("invalid P-256 elliptic curve SPKI data")]
+ InvalidP256ECSPKIData,
+ #[error("invalid P-384 elliptic curve SPKI data")]
+ InvalidP384ECSPKIData,
+ #[error("invalid P-521 elliptic curve SPKI data")]
+ InvalidP521ECSPKIData,
+ #[error("curve mismatch")]
+ CurveMismatch,
+ #[error("Unsupported named curve")]
+ UnsupportedNamedCurve,
+ #[error("invalid key data")]
+ InvalidKeyData,
+ #[error("invalid JWK private key")]
+ InvalidJWKPrivateKey,
+ #[error(transparent)]
+ EllipticCurve(#[from] elliptic_curve::Error),
+ #[error("expected valid PKCS#8 data")]
+ ExpectedValidPkcs8Data,
+ #[error("malformed parameters")]
+ MalformedParameters,
+ #[error(transparent)]
+ Spki(#[from] spki::Error),
+ #[error(transparent)]
+ Der(#[from] rsa::pkcs1::der::Error),
+}
+
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum KeyData {
@@ -93,7 +156,7 @@ pub enum ImportKeyResult {
pub fn op_crypto_import_key(
#[serde] opts: ImportKeyOptions,
#[serde] key_data: KeyData,
-) -> Result<ImportKeyResult, AnyError> {
+) -> Result<ImportKeyResult, ImportKeyError> {
match opts {
ImportKeyOptions::RsassaPkcs1v15 {} => import_key_rsassa(key_data),
ImportKeyOptions::RsaPss {} => import_key_rsapss(key_data),
@@ -117,21 +180,21 @@ const BASE64_URL_SAFE_FORGIVING:
);
macro_rules! jwt_b64_int_or_err {
- ($name:ident, $b64:expr, $err:expr) => {
+ ($name:ident, $b64:expr, $err:tt) => {
let bytes = BASE64_URL_SAFE_FORGIVING
.decode($b64)
- .map_err(|_| data_error($err))?;
- let $name = UintRef::new(&bytes).map_err(|_| data_error($err))?;
+ .map_err(|_| ImportKeyError::$err)?;
+ let $name = UintRef::new(&bytes).map_err(|_| ImportKeyError::$err)?;
};
}
fn import_key_rsa_jwk(
key_data: KeyData,
-) -> Result<ImportKeyResult, deno_core::anyhow::Error> {
+) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::JwkPublicRsa { n, e } => {
- jwt_b64_int_or_err!(modulus, &n, "invalid modulus");
- jwt_b64_int_or_err!(public_exponent, &e, "invalid public exponent");
+ jwt_b64_int_or_err!(modulus, &n, InvalidModulus);
+ jwt_b64_int_or_err!(public_exponent, &e, InvalidPublicExponent);
let public_key = rsa::pkcs1::RsaPublicKey {
modulus,
@@ -141,7 +204,7 @@ fn import_key_rsa_jwk(
let mut data = Vec::new();
public_key
.encode_to_vec(&mut data)
- .map_err(|_| data_error("invalid rsa public key"))?;
+ .map_err(|_| ImportKeyError::InvalidRSAPublicKey)?;
let public_exponent =
public_key.public_exponent.as_bytes().to_vec().into();
@@ -163,14 +226,14 @@ fn import_key_rsa_jwk(
dq,
qi,
} => {
- jwt_b64_int_or_err!(modulus, &n, "invalid modulus");
- jwt_b64_int_or_err!(public_exponent, &e, "invalid public exponent");
- jwt_b64_int_or_err!(private_exponent, &d, "invalid private exponent");
- jwt_b64_int_or_err!(prime1, &p, "invalid first prime factor");
- jwt_b64_int_or_err!(prime2, &q, "invalid second prime factor");
- jwt_b64_int_or_err!(exponent1, &dp, "invalid first CRT exponent");
- jwt_b64_int_or_err!(exponent2, &dq, "invalid second CRT exponent");
- jwt_b64_int_or_err!(coefficient, &qi, "invalid CRT coefficient");
+ jwt_b64_int_or_err!(modulus, &n, InvalidModulus);
+ jwt_b64_int_or_err!(public_exponent, &e, InvalidPublicExponent);
+ jwt_b64_int_or_err!(private_exponent, &d, InvalidPrivateExponent);
+ jwt_b64_int_or_err!(prime1, &p, InvalidFirstPrimeFactor);
+ jwt_b64_int_or_err!(prime2, &q, InvalidSecondPrimeFactor);
+ jwt_b64_int_or_err!(exponent1, &dp, InvalidFirstCRTExponent);
+ jwt_b64_int_or_err!(exponent2, &dq, InvalidSecondCRTExponent);
+ jwt_b64_int_or_err!(coefficient, &qi, InvalidCRTCoefficient);
let private_key = rsa::pkcs1::RsaPrivateKey {
modulus,
@@ -187,7 +250,7 @@ fn import_key_rsa_jwk(
let mut data = Vec::new();
private_key
.encode_to_vec(&mut data)
- .map_err(|_| data_error("invalid rsa private key"))?;
+ .map_err(|_| ImportKeyError::InvalidRSAPrivateKey)?;
let public_exponent =
private_key.public_exponent.as_bytes().to_vec().into();
@@ -205,37 +268,33 @@ fn import_key_rsa_jwk(
fn import_key_rsassa(
key_data: KeyData,
-) -> Result<ImportKeyResult, deno_core::anyhow::Error> {
+) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::Spki(data) => {
// 2-3.
- let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)
- .map_err(|e| data_error(e.to_string()))?;
+ let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
- return Err(data_error("unsupported algorithm"));
+ return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let public_key = rsa::pkcs1::RsaPublicKey::from_der(
pk_info.subject_public_key.raw_bytes(),
- )
- .map_err(|e| data_error(e.to_string()))?;
+ )?;
- let bytes_consumed = public_key
- .encoded_len()
- .map_err(|e| data_error(e.to_string()))?;
+ let bytes_consumed = public_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(
pk_info.subject_public_key.raw_bytes().len() as u16,
)
{
- return Err(data_error("public key is invalid (too long)"));
+ return Err(ImportKeyError::PublicKeyTooLong);
}
let data = pk_info.subject_public_key.raw_bytes().to_vec().into();
@@ -251,30 +310,26 @@ fn import_key_rsassa(
}
KeyData::Pkcs8(data) => {
// 2-3.
- let pk_info = PrivateKeyInfo::from_der(&data)
- .map_err(|e| data_error(e.to_string()))?;
+ let pk_info = PrivateKeyInfo::from_der(&data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
- return Err(data_error("unsupported algorithm"));
+ return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let private_key =
- rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)
- .map_err(|e| data_error(e.to_string()))?;
+ rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)?;
- let bytes_consumed = private_key
- .encoded_len()
- .map_err(|e| data_error(e.to_string()))?;
+ let bytes_consumed = private_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(pk_info.private_key.len() as u16)
{
- return Err(data_error("private key is invalid (too long)"));
+ return Err(ImportKeyError::PrivateKeyTooLong);
}
let data = pk_info.private_key.to_vec().into();
@@ -291,43 +346,39 @@ fn import_key_rsassa(
KeyData::JwkPublicRsa { .. } | KeyData::JwkPrivateRsa { .. } => {
import_key_rsa_jwk(key_data)
}
- _ => Err(unsupported_format()),
+ _ => Err(SharedError::UnsupportedFormat.into()),
}
}
fn import_key_rsapss(
key_data: KeyData,
-) -> Result<ImportKeyResult, deno_core::anyhow::Error> {
+) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::Spki(data) => {
// 2-3.
- let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)
- .map_err(|e| data_error(e.to_string()))?;
+ let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
- return Err(data_error("unsupported algorithm"));
+ return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let public_key = rsa::pkcs1::RsaPublicKey::from_der(
pk_info.subject_public_key.raw_bytes(),
- )
- .map_err(|e| data_error(e.to_string()))?;
+ )?;
- let bytes_consumed = public_key
- .encoded_len()
- .map_err(|e| data_error(e.to_string()))?;
+ let bytes_consumed = public_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(
pk_info.subject_public_key.raw_bytes().len() as u16,
)
{
- return Err(data_error("public key is invalid (too long)"));
+ return Err(ImportKeyError::PublicKeyTooLong);
}
let data = pk_info.subject_public_key.raw_bytes().to_vec().into();
@@ -343,30 +394,26 @@ fn import_key_rsapss(
}
KeyData::Pkcs8(data) => {
// 2-3.
- let pk_info = PrivateKeyInfo::from_der(&data)
- .map_err(|e| data_error(e.to_string()))?;
+ let pk_info = PrivateKeyInfo::from_der(&data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
- return Err(data_error("unsupported algorithm"));
+ return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let private_key =
- rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)
- .map_err(|e| data_error(e.to_string()))?;
+ rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)?;
- let bytes_consumed = private_key
- .encoded_len()
- .map_err(|e| data_error(e.to_string()))?;
+ let bytes_consumed = private_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(pk_info.private_key.len() as u16)
{
- return Err(data_error("private key is invalid (too long)"));
+ return Err(ImportKeyError::PrivateKeyTooLong);
}
let data = pk_info.private_key.to_vec().into();
@@ -383,43 +430,39 @@ fn import_key_rsapss(
KeyData::JwkPublicRsa { .. } | KeyData::JwkPrivateRsa { .. } => {
import_key_rsa_jwk(key_data)
}
- _ => Err(unsupported_format()),
+ _ => Err(SharedError::UnsupportedFormat.into()),
}
}
fn import_key_rsaoaep(
key_data: KeyData,
-) -> Result<ImportKeyResult, deno_core::anyhow::Error> {
+) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::Spki(data) => {
// 2-3.
- let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)
- .map_err(|e| data_error(e.to_string()))?;
+ let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
- return Err(data_error("unsupported algorithm"));
+ return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let public_key = rsa::pkcs1::RsaPublicKey::from_der(
pk_info.subject_public_key.raw_bytes(),
- )
- .map_err(|e| data_error(e.to_string()))?;
+ )?;
- let bytes_consumed = public_key
- .encoded_len()
- .map_err(|e| data_error(e.to_string()))?;
+ let bytes_consumed = public_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(
pk_info.subject_public_key.raw_bytes().len() as u16,
)
{
- return Err(data_error("public key is invalid (too long)"));
+ return Err(ImportKeyError::PublicKeyTooLong);
}
let data = pk_info.subject_public_key.raw_bytes().to_vec().into();
@@ -435,30 +478,26 @@ fn import_key_rsaoaep(
}
KeyData::Pkcs8(data) => {
// 2-3.
- let pk_info = PrivateKeyInfo::from_der(&data)
- .map_err(|e| data_error(e.to_string()))?;
+ let pk_info = PrivateKeyInfo::from_der(&data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
- return Err(data_error("unsupported algorithm"));
+ return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let private_key =
- rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)
- .map_err(|e| data_error(e.to_string()))?;
+ rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)?;
- let bytes_consumed = private_key
- .encoded_len()
- .map_err(|e| data_error(e.to_string()))?;
+ let bytes_consumed = private_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(pk_info.private_key.len() as u16)
{
- return Err(data_error("private key is invalid (too long)"));
+ return Err(ImportKeyError::PrivateKeyTooLong);
}
let data = pk_info.private_key.to_vec().into();
@@ -475,14 +514,14 @@ fn import_key_rsaoaep(
KeyData::JwkPublicRsa { .. } | KeyData::JwkPrivateRsa { .. } => {
import_key_rsa_jwk(key_data)
}
- _ => Err(unsupported_format()),
+ _ => Err(SharedError::UnsupportedFormat.into()),
}
}
fn decode_b64url_to_field_bytes<C: elliptic_curve::Curve>(
b64: &str,
-) -> Result<elliptic_curve::FieldBytes<C>, deno_core::anyhow::Error> {
- jwt_b64_int_or_err!(val, b64, "invalid b64 coordinate");
+) -> Result<elliptic_curve::FieldBytes<C>, ImportKeyError> {
+ jwt_b64_int_or_err!(val, b64, InvalidB64Coordinate);
let mut bytes = elliptic_curve::FieldBytes::<C>::default();
let original_bytes = val.as_bytes();
@@ -495,7 +534,7 @@ fn decode_b64url_to_field_bytes<C: elliptic_curve::Curve>(
let val = new_bytes.as_slice();
if val.len() != bytes.len() {
- return Err(data_error("invalid b64 coordinate"));
+ return Err(ImportKeyError::InvalidB64Coordinate);
}
bytes.copy_from_slice(val);
@@ -506,7 +545,7 @@ fn import_key_ec_jwk_to_point(
x: String,
y: String,
named_curve: EcNamedCurve,
-) -> Result<Vec<u8>, deno_core::anyhow::Error> {
+) -> Result<Vec<u8>, ImportKeyError> {
let point_bytes = match named_curve {
EcNamedCurve::P256 => {
let x = decode_b64url_to_field_bytes::<p256::NistP256>(&x)?;
@@ -534,7 +573,7 @@ fn import_key_ec_jwk_to_point(
fn import_key_ec_jwk(
key_data: KeyData,
named_curve: EcNamedCurve,
-) -> Result<ImportKeyResult, deno_core::anyhow::Error> {
+) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::JwkPublicEc { x, y } => {
let point_bytes = import_key_ec_jwk_to_point(x, y, named_curve)?;
@@ -550,21 +589,21 @@ fn import_key_ec_jwk(
let pk = p256::SecretKey::from_bytes(&d)?;
pk.to_pkcs8_der()
- .map_err(|_| data_error("invalid JWK private key"))?
+ .map_err(|_| ImportKeyError::InvalidJWKPrivateKey)?
}
EcNamedCurve::P384 => {
let d = decode_b64url_to_field_bytes::<p384::NistP384>(&d)?;
let pk = p384::SecretKey::from_bytes(&d)?;
pk.to_pkcs8_der()
- .map_err(|_| data_error("invalid JWK private key"))?
+ .map_err(|_| ImportKeyError::InvalidJWKPrivateKey)?
}
EcNamedCurve::P521 => {
let d = decode_b64url_to_field_bytes::<p521::NistP521>(&d)?;
let pk = p521::SecretKey::from_bytes(&d)?;
pk.to_pkcs8_der()
- .map_err(|_| data_error("invalid JWK private key"))?
+ .map_err(|_| ImportKeyError::InvalidJWKPrivateKey)?
}
};
@@ -595,7 +634,7 @@ impl<'a> TryFrom<spki::der::asn1::AnyRef<'a>> for ECParametersSpki {
fn import_key_ec(
key_data: KeyData,
named_curve: EcNamedCurve,
-) -> Result<ImportKeyResult, AnyError> {
+) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::Raw(data) => {
// The point is parsed and validated, ultimately the original data is
@@ -604,28 +643,28 @@ fn import_key_ec(
EcNamedCurve::P256 => {
// 1-2.
let point = p256::EncodedPoint::from_bytes(&data)
- .map_err(|_| data_error("invalid P-256 elliptic curve point"))?;
+ .map_err(|_| ImportKeyError::InvalidP256ECPoint)?;
// 3.
if point.is_identity() {
- return Err(data_error("invalid P-256 elliptic curve point"));
+ return Err(ImportKeyError::InvalidP256ECPoint);
}
}
EcNamedCurve::P384 => {
// 1-2.
let point = p384::EncodedPoint::from_bytes(&data)
- .map_err(|_| data_error("invalid P-384 elliptic curve point"))?;
+ .map_err(|_| ImportKeyError::InvalidP384ECPoint)?;
// 3.
if point.is_identity() {
- return Err(data_error("invalid P-384 elliptic curve point"));
+ return Err(ImportKeyError::InvalidP384ECPoint);
}
}
EcNamedCurve::P521 => {
// 1-2.
let point = p521::EncodedPoint::from_bytes(&data)
- .map_err(|_| data_error("invalid P-521 elliptic curve point"))?;
+ .map_err(|_| ImportKeyError::InvalidP521ECPoint)?;
// 3.
if point.is_identity() {
- return Err(data_error("invalid P-521 elliptic curve point"));
+ return Err(ImportKeyError::InvalidP521ECPoint);
}
}
};
@@ -635,11 +674,11 @@ fn import_key_ec(
}
KeyData::Pkcs8(data) => {
let pk = PrivateKeyInfo::from_der(data.as_ref())
- .map_err(|_| data_error("expected valid PKCS#8 data"))?;
+ .map_err(|_| ImportKeyError::ExpectedValidPkcs8Data)?;
let named_curve_alg = pk
.algorithm
.parameters
- .ok_or_else(|| data_error("malformed parameters"))?
+ .ok_or(ImportKeyError::MalformedParameters)?
.try_into()
.unwrap();
@@ -654,7 +693,7 @@ fn import_key_ec(
};
if pk_named_curve != Some(named_curve) {
- return Err(data_error("curve mismatch"));
+ return Err(ImportKeyError::CurveMismatch);
}
Ok(ImportKeyResult::Ec {
@@ -663,14 +702,13 @@ fn import_key_ec(
}
KeyData::Spki(data) => {
// 2-3.
- let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)
- .map_err(|e| data_error(e.to_string()))?;
+ let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?;
// 4.
let alg = pk_info.algorithm.oid;
// id-ecPublicKey
if alg != elliptic_curve::ALGORITHM_OID {
- return Err(data_error("unsupported algorithm"));
+ return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 5-7.
@@ -678,9 +716,9 @@ fn import_key_ec(
pk_info
.algorithm
.parameters
- .ok_or_else(|| data_error("malformed parameters"))?,
+ .ok_or(ImportKeyError::MalformedParameters)?,
)
- .map_err(|_| data_error("malformed parameters"))?;
+ .map_err(|_| ImportKeyError::MalformedParameters)?;
// 8-9.
let named_curve_alg = params.named_curve_alg;
@@ -704,36 +742,30 @@ fn import_key_ec(
let bytes_consumed = match named_curve {
EcNamedCurve::P256 => {
- let point =
- p256::EncodedPoint::from_bytes(&*encoded_key).map_err(|_| {
- data_error("invalid P-256 elliptic curve SPKI data")
- })?;
+ let point = p256::EncodedPoint::from_bytes(&*encoded_key)
+ .map_err(|_| ImportKeyError::InvalidP256ECSPKIData)?;
if point.is_identity() {
- return Err(data_error("invalid P-256 elliptic curve point"));
+ return Err(ImportKeyError::InvalidP256ECPoint);
}
point.as_bytes().len()
}
EcNamedCurve::P384 => {
- let point =
- p384::EncodedPoint::from_bytes(&*encoded_key).map_err(|_| {
- data_error("invalid P-384 elliptic curve SPKI data")
- })?;
+ let point = p384::EncodedPoint::from_bytes(&*encoded_key)
+ .map_err(|_| ImportKeyError::InvalidP384ECSPKIData)?;
if point.is_identity() {
- return Err(data_error("invalid P-384 elliptic curve point"));
+ return Err(ImportKeyError::InvalidP384ECPoint);
}
point.as_bytes().len()
}
EcNamedCurve::P521 => {
- let point =
- p521::EncodedPoint::from_bytes(&*encoded_key).map_err(|_| {
- data_error("invalid P-521 elliptic curve SPKI data")
- })?;
+ let point = p521::EncodedPoint::from_bytes(&*encoded_key)
+ .map_err(|_| ImportKeyError::InvalidP521ECSPKIData)?;
if point.is_identity() {
- return Err(data_error("invalid P-521 elliptic curve point"));
+ return Err(ImportKeyError::InvalidP521ECPoint);
}
point.as_bytes().len()
@@ -741,15 +773,15 @@ fn import_key_ec(
};
if bytes_consumed != pk_info.subject_public_key.raw_bytes().len() {
- return Err(data_error("public key is invalid (too long)"));
+ return Err(ImportKeyError::PublicKeyTooLong);
}
// 11.
if named_curve != pk_named_curve {
- return Err(data_error("curve mismatch"));
+ return Err(ImportKeyError::CurveMismatch);
}
} else {
- return Err(data_error("Unsupported named curve"));
+ return Err(ImportKeyError::UnsupportedNamedCurve);
}
Ok(ImportKeyResult::Ec {
@@ -759,34 +791,38 @@ fn import_key_ec(
KeyData::JwkPublicEc { .. } | KeyData::JwkPrivateEc { .. } => {
import_key_ec_jwk(key_data, named_curve)
}
- _ => Err(unsupported_format()),
+ _ => Err(SharedError::UnsupportedFormat.into()),
}
}
-fn import_key_aes(key_data: KeyData) -> Result<ImportKeyResult, AnyError> {
+fn import_key_aes(
+ key_data: KeyData,
+) -> Result<ImportKeyResult, ImportKeyError> {
Ok(match key_data {
KeyData::JwkSecret { k } => {
let data = BASE64_URL_SAFE_FORGIVING
.decode(k)
- .map_err(|_| data_error("invalid key data"))?;
+ .map_err(|_| ImportKeyError::InvalidKeyData)?;
ImportKeyResult::Hmac {
raw_data: RustRawKeyData::Secret(data.into()),
}
}
- _ => return Err(unsupported_format()),
+ _ => return Err(SharedError::UnsupportedFormat.into()),
})
}
-fn import_key_hmac(key_data: KeyData) -> Result<ImportKeyResult, AnyError> {
+fn import_key_hmac(
+ key_data: KeyData,
+) -> Result<ImportKeyResult, ImportKeyError> {
Ok(match key_data {
KeyData::JwkSecret { k } => {
let data = BASE64_URL_SAFE_FORGIVING
.decode(k)
- .map_err(|_| data_error("invalid key data"))?;
+ .map_err(|_| ImportKeyError::InvalidKeyData)?;
ImportKeyResult::Hmac {
raw_data: RustRawKeyData::Secret(data.into()),
}
}
- _ => return Err(unsupported_format()),
+ _ => return Err(SharedError::UnsupportedFormat.into()),
})
}
diff --git a/ext/crypto/lib.rs b/ext/crypto/lib.rs
index c96029bf4..69dcd1413 100644
--- a/ext/crypto/lib.rs
+++ b/ext/crypto/lib.rs
@@ -6,10 +6,7 @@ use aes_kw::KekAes256;
use base64::prelude::BASE64_URL_SAFE_NO_PAD;
use base64::Engine;
-use deno_core::error::custom_error;
use deno_core::error::not_supported;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::ToJsBuffer;
@@ -17,7 +14,6 @@ use deno_core::unsync::spawn_blocking;
use deno_core::JsBuffer;
use deno_core::OpState;
use serde::Deserialize;
-use shared::operation_error;
use p256::elliptic_curve::sec1::FromEncodedPoint;
use p256::pkcs8::DecodePrivateKey;
@@ -67,15 +63,24 @@ mod x25519;
mod x448;
pub use crate::decrypt::op_crypto_decrypt;
+pub use crate::decrypt::DecryptError;
+pub use crate::ed25519::Ed25519Error;
pub use crate::encrypt::op_crypto_encrypt;
+pub use crate::encrypt::EncryptError;
pub use crate::export_key::op_crypto_export_key;
+pub use crate::export_key::ExportKeyError;
pub use crate::generate_key::op_crypto_generate_key;
+pub use crate::generate_key::GenerateKeyError;
pub use crate::import_key::op_crypto_import_key;
+pub use crate::import_key::ImportKeyError;
use crate::key::Algorithm;
use crate::key::CryptoHash;
use crate::key::CryptoNamedCurve;
use crate::key::HkdfOutput;
+pub use crate::shared::SharedError;
use crate::shared::V8RawKeyData;
+pub use crate::x25519::X25519Error;
+pub use crate::x448::X448Error;
deno_core::extension!(deno_crypto,
deps = [ deno_webidl, deno_web ],
@@ -127,11 +132,63 @@ deno_core::extension!(deno_crypto,
},
);
+#[derive(Debug, thiserror::Error)]
+pub enum Error {
+ #[error(transparent)]
+ General(#[from] SharedError),
+ #[error(transparent)]
+ JoinError(#[from] tokio::task::JoinError),
+ #[error(transparent)]
+ Der(#[from] rsa::pkcs1::der::Error),
+ #[error("Missing argument hash")]
+ MissingArgumentHash,
+ #[error("Missing argument saltLength")]
+ MissingArgumentSaltLength,
+ #[error("unsupported algorithm")]
+ UnsupportedAlgorithm,
+ #[error(transparent)]
+ KeyRejected(#[from] ring::error::KeyRejected),
+ #[error(transparent)]
+ RSA(#[from] rsa::Error),
+ #[error(transparent)]
+ Pkcs1(#[from] rsa::pkcs1::Error),
+ #[error(transparent)]
+ Unspecified(#[from] ring::error::Unspecified),
+ #[error("Invalid key format")]
+ InvalidKeyFormat,
+ #[error(transparent)]
+ P256Ecdsa(#[from] p256::ecdsa::Error),
+ #[error("Unexpected error decoding private key")]
+ DecodePrivateKey,
+ #[error("Missing argument publicKey")]
+ MissingArgumentPublicKey,
+ #[error("Missing argument namedCurve")]
+ MissingArgumentNamedCurve,
+ #[error("Missing argument info")]
+ MissingArgumentInfo,
+ #[error("The length provided for HKDF is too large")]
+ HKDFLengthTooLarge,
+ #[error(transparent)]
+ Base64Decode(#[from] base64::DecodeError),
+ #[error("Data must be multiple of 8 bytes")]
+ DataInvalidSize,
+ #[error("Invalid key length")]
+ InvalidKeyLength,
+ #[error("encryption error")]
+ EncryptionError,
+ #[error("decryption error - integrity check failed")]
+ DecryptionError,
+ #[error("The ArrayBufferView's byte length ({0}) exceeds the number of bytes of entropy available via this API (65536)")]
+ ArrayBufferViewLengthExceeded(usize),
+ #[error(transparent)]
+ Other(deno_core::error::AnyError),
+}
+
#[op2]
#[serde]
pub fn op_crypto_base64url_decode(
#[string] data: String,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, Error> {
let data: Vec<u8> = BASE64_URL_SAFE_NO_PAD.decode(data)?;
Ok(data.into())
}
@@ -147,12 +204,9 @@ pub fn op_crypto_base64url_encode(#[buffer] data: JsBuffer) -> String {
pub fn op_crypto_get_random_values(
state: &mut OpState,
#[buffer] out: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), Error> {
if out.len() > 65536 {
- return Err(
- deno_web::DomExceptionQuotaExceededError::new(&format!("The ArrayBufferView's byte length ({}) exceeds the number of bytes of entropy available via this API (65536)", out.len()))
- .into(),
- );
+ return Err(Error::ArrayBufferViewLengthExceeded(out.len()));
}
let maybe_seeded_rng = state.try_borrow_mut::<StdRng>();
@@ -204,7 +258,7 @@ pub struct SignArg {
pub async fn op_crypto_sign_key(
#[serde] args: SignArg,
#[buffer] zero_copy: JsBuffer,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, Error> {
deno_core::unsync::spawn_blocking(move || {
let data = &*zero_copy;
let algorithm = args.algorithm;
@@ -213,10 +267,7 @@ pub async fn op_crypto_sign_key(
Algorithm::RsassaPkcs1v15 => {
use rsa::pkcs1v15::SigningKey;
let private_key = RsaPrivateKey::from_pkcs1_der(&args.key.data)?;
- match args
- .hash
- .ok_or_else(|| type_error("Missing argument hash".to_string()))?
- {
+ match args.hash.ok_or_else(|| Error::MissingArgumentHash)? {
CryptoHash::Sha1 => {
let signing_key = SigningKey::<Sha1>::new(private_key);
signing_key.sign(data)
@@ -239,15 +290,13 @@ pub async fn op_crypto_sign_key(
Algorithm::RsaPss => {
let private_key = RsaPrivateKey::from_pkcs1_der(&args.key.data)?;
- let salt_len = args.salt_length.ok_or_else(|| {
- type_error("Missing argument saltLength".to_string())
- })? as usize;
+ let salt_len = args
+ .salt_length
+ .ok_or_else(|| Error::MissingArgumentSaltLength)?
+ as usize;
let mut rng = OsRng;
- match args
- .hash
- .ok_or_else(|| type_error("Missing argument hash".to_string()))?
- {
+ match args.hash.ok_or_else(|| Error::MissingArgumentHash)? {
CryptoHash::Sha1 => {
let signing_key = Pss::new_with_salt::<Sha1>(salt_len);
let hashed = Sha1::digest(data);
@@ -272,8 +321,10 @@ pub async fn op_crypto_sign_key(
.to_vec()
}
Algorithm::Ecdsa => {
- let curve: &EcdsaSigningAlgorithm =
- args.named_curve.ok_or_else(not_supported)?.into();
+ let curve: &EcdsaSigningAlgorithm = args
+ .named_curve
+ .ok_or_else(|| Error::Other(not_supported()))?
+ .into();
let rng = RingRand::SystemRandom::new();
let key_pair = EcdsaKeyPair::from_pkcs8(curve, &args.key.data, &rng)?;
@@ -282,7 +333,7 @@ pub async fn op_crypto_sign_key(
if let Some(hash) = args.hash {
match hash {
CryptoHash::Sha256 | CryptoHash::Sha384 => (),
- _ => return Err(type_error("Unsupported algorithm")),
+ _ => return Err(Error::UnsupportedAlgorithm),
}
};
@@ -292,14 +343,17 @@ pub async fn op_crypto_sign_key(
signature.as_ref().to_vec()
}
Algorithm::Hmac => {
- let hash: HmacAlgorithm = args.hash.ok_or_else(not_supported)?.into();
+ let hash: HmacAlgorithm = args
+ .hash
+ .ok_or_else(|| Error::Other(not_supported()))?
+ .into();
let key = HmacKey::new(hash, &args.key.data);
let signature = ring::hmac::sign(&key, data);
signature.as_ref().to_vec()
}
- _ => return Err(type_error("Unsupported algorithm".to_string())),
+ _ => return Err(Error::UnsupportedAlgorithm),
};
Ok(signature.into())
@@ -322,7 +376,7 @@ pub struct VerifyArg {
pub async fn op_crypto_verify_key(
#[serde] args: VerifyArg,
#[buffer] zero_copy: JsBuffer,
-) -> Result<bool, AnyError> {
+) -> Result<bool, Error> {
deno_core::unsync::spawn_blocking(move || {
let data = &*zero_copy;
let algorithm = args.algorithm;
@@ -333,10 +387,7 @@ pub async fn op_crypto_verify_key(
use rsa::pkcs1v15::VerifyingKey;
let public_key = read_rsa_public_key(args.key)?;
let signature: Signature = args.signature.as_ref().try_into()?;
- match args
- .hash
- .ok_or_else(|| type_error("Missing argument hash".to_string()))?
- {
+ match args.hash.ok_or_else(|| Error::MissingArgumentHash)? {
CryptoHash::Sha1 => {
let verifying_key = VerifyingKey::<Sha1>::new(public_key);
verifying_key.verify(data, &signature).is_ok()
@@ -359,14 +410,12 @@ pub async fn op_crypto_verify_key(
let public_key = read_rsa_public_key(args.key)?;
let signature = args.signature.as_ref();
- let salt_len = args.salt_length.ok_or_else(|| {
- type_error("Missing argument saltLength".to_string())
- })? as usize;
+ let salt_len = args
+ .salt_length
+ .ok_or_else(|| Error::MissingArgumentSaltLength)?
+ as usize;
- match args
- .hash
- .ok_or_else(|| type_error("Missing argument hash".to_string()))?
- {
+ match args.hash.ok_or_else(|| Error::MissingArgumentHash)? {
CryptoHash::Sha1 => {
let pss = Pss::new_with_salt::<Sha1>(salt_len);
let hashed = Sha1::digest(data);
@@ -390,15 +439,22 @@ pub async fn op_crypto_verify_key(
}
}
Algorithm::Hmac => {
- let hash: HmacAlgorithm = args.hash.ok_or_else(not_supported)?.into();
+ let hash: HmacAlgorithm = args
+ .hash
+ .ok_or_else(|| Error::Other(not_supported()))?
+ .into();
let key = HmacKey::new(hash, &args.key.data);
ring::hmac::verify(&key, data, &args.signature).is_ok()
}
Algorithm::Ecdsa => {
- let signing_alg: &EcdsaSigningAlgorithm =
- args.named_curve.ok_or_else(not_supported)?.into();
- let verify_alg: &EcdsaVerificationAlgorithm =
- args.named_curve.ok_or_else(not_supported)?.into();
+ let signing_alg: &EcdsaSigningAlgorithm = args
+ .named_curve
+ .ok_or_else(|| Error::Other(not_supported()))?
+ .into();
+ let verify_alg: &EcdsaVerificationAlgorithm = args
+ .named_curve
+ .ok_or_else(|| Error::Other(not_supported()))?
+ .into();
let private_key;
@@ -411,7 +467,7 @@ pub async fn op_crypto_verify_key(
private_key.public_key().as_ref()
}
KeyType::Public => &*args.key.data,
- _ => return Err(type_error("Invalid Key format".to_string())),
+ _ => return Err(Error::InvalidKeyFormat),
};
let public_key =
@@ -419,7 +475,7 @@ pub async fn op_crypto_verify_key(
public_key.verify(data, &args.signature).is_ok()
}
- _ => return Err(type_error("Unsupported algorithm".to_string())),
+ _ => return Err(Error::UnsupportedAlgorithm),
};
Ok(verification)
@@ -447,70 +503,68 @@ pub struct DeriveKeyArg {
pub async fn op_crypto_derive_bits(
#[serde] args: DeriveKeyArg,
#[buffer] zero_copy: Option<JsBuffer>,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, Error> {
deno_core::unsync::spawn_blocking(move || {
let algorithm = args.algorithm;
match algorithm {
Algorithm::Pbkdf2 => {
- let zero_copy = zero_copy.ok_or_else(not_supported)?;
+ let zero_copy =
+ zero_copy.ok_or_else(|| Error::Other(not_supported()))?;
let salt = &*zero_copy;
// The caller must validate these cases.
assert!(args.length > 0);
assert!(args.length % 8 == 0);
- let algorithm = match args.hash.ok_or_else(not_supported)? {
- CryptoHash::Sha1 => pbkdf2::PBKDF2_HMAC_SHA1,
- CryptoHash::Sha256 => pbkdf2::PBKDF2_HMAC_SHA256,
- CryptoHash::Sha384 => pbkdf2::PBKDF2_HMAC_SHA384,
- CryptoHash::Sha512 => pbkdf2::PBKDF2_HMAC_SHA512,
- };
+ let algorithm =
+ match args.hash.ok_or_else(|| Error::Other(not_supported()))? {
+ CryptoHash::Sha1 => pbkdf2::PBKDF2_HMAC_SHA1,
+ CryptoHash::Sha256 => pbkdf2::PBKDF2_HMAC_SHA256,
+ CryptoHash::Sha384 => pbkdf2::PBKDF2_HMAC_SHA384,
+ CryptoHash::Sha512 => pbkdf2::PBKDF2_HMAC_SHA512,
+ };
// This will never panic. We have already checked length earlier.
- let iterations =
- NonZeroU32::new(args.iterations.ok_or_else(not_supported)?).unwrap();
+ let iterations = NonZeroU32::new(
+ args
+ .iterations
+ .ok_or_else(|| Error::Other(not_supported()))?,
+ )
+ .unwrap();
let secret = args.key.data;
let mut out = vec![0; args.length / 8];
pbkdf2::derive(algorithm, iterations, salt, &secret, &mut out);
Ok(out.into())
}
Algorithm::Ecdh => {
- let named_curve = args.named_curve.ok_or_else(|| {
- type_error("Missing argument namedCurve".to_string())
- })?;
+ let named_curve = args
+ .named_curve
+ .ok_or_else(|| Error::MissingArgumentNamedCurve)?;
let public_key = args
.public_key
- .ok_or_else(|| type_error("Missing argument publicKey"))?;
+ .ok_or_else(|| Error::MissingArgumentPublicKey)?;
match named_curve {
CryptoNamedCurve::P256 => {
let secret_key = p256::SecretKey::from_pkcs8_der(&args.key.data)
- .map_err(|_| {
- type_error("Unexpected error decoding private key")
- })?;
+ .map_err(|_| Error::DecodePrivateKey)?;
let public_key = match public_key.r#type {
KeyType::Private => {
p256::SecretKey::from_pkcs8_der(&public_key.data)
- .map_err(|_| {
- type_error("Unexpected error decoding private key")
- })?
+ .map_err(|_| Error::DecodePrivateKey)?
.public_key()
}
KeyType::Public => {
let point = p256::EncodedPoint::from_bytes(public_key.data)
- .map_err(|_| {
- type_error("Unexpected error decoding private key")
- })?;
+ .map_err(|_| Error::DecodePrivateKey)?;
let pk = p256::PublicKey::from_encoded_point(&point);
// pk is a constant time Option.
if pk.is_some().into() {
pk.unwrap()
} else {
- return Err(type_error(
- "Unexpected error decoding private key",
- ));
+ return Err(Error::DecodePrivateKey);
}
}
_ => unreachable!(),
@@ -526,32 +580,24 @@ pub async fn op_crypto_derive_bits(
}
CryptoNamedCurve::P384 => {
let secret_key = p384::SecretKey::from_pkcs8_der(&args.key.data)
- .map_err(|_| {
- type_error("Unexpected error decoding private key")
- })?;
+ .map_err(|_| Error::DecodePrivateKey)?;
let public_key = match public_key.r#type {
KeyType::Private => {
p384::SecretKey::from_pkcs8_der(&public_key.data)
- .map_err(|_| {
- type_error("Unexpected error decoding private key")
- })?
+ .map_err(|_| Error::DecodePrivateKey)?
.public_key()
}
KeyType::Public => {
let point = p384::EncodedPoint::from_bytes(public_key.data)
- .map_err(|_| {
- type_error("Unexpected error decoding private key")
- })?;
+ .map_err(|_| Error::DecodePrivateKey)?;
let pk = p384::PublicKey::from_encoded_point(&point);
// pk is a constant time Option.
if pk.is_some().into() {
pk.unwrap()
} else {
- return Err(type_error(
- "Unexpected error decoding private key",
- ));
+ return Err(Error::DecodePrivateKey);
}
}
_ => unreachable!(),
@@ -568,18 +614,18 @@ pub async fn op_crypto_derive_bits(
}
}
Algorithm::Hkdf => {
- let zero_copy = zero_copy.ok_or_else(not_supported)?;
+ let zero_copy =
+ zero_copy.ok_or_else(|| Error::Other(not_supported()))?;
let salt = &*zero_copy;
- let algorithm = match args.hash.ok_or_else(not_supported)? {
- CryptoHash::Sha1 => hkdf::HKDF_SHA1_FOR_LEGACY_USE_ONLY,
- CryptoHash::Sha256 => hkdf::HKDF_SHA256,
- CryptoHash::Sha384 => hkdf::HKDF_SHA384,
- CryptoHash::Sha512 => hkdf::HKDF_SHA512,
- };
-
- let info = args
- .info
- .ok_or_else(|| type_error("Missing argument info".to_string()))?;
+ let algorithm =
+ match args.hash.ok_or_else(|| Error::Other(not_supported()))? {
+ CryptoHash::Sha1 => hkdf::HKDF_SHA1_FOR_LEGACY_USE_ONLY,
+ CryptoHash::Sha256 => hkdf::HKDF_SHA256,
+ CryptoHash::Sha384 => hkdf::HKDF_SHA384,
+ CryptoHash::Sha512 => hkdf::HKDF_SHA512,
+ };
+
+ let info = args.info.ok_or_else(|| Error::MissingArgumentInfo)?;
// IKM
let secret = args.key.data;
// L
@@ -588,23 +634,20 @@ pub async fn op_crypto_derive_bits(
let salt = hkdf::Salt::new(algorithm, salt);
let prk = salt.extract(&secret);
let info = &[&*info];
- let okm = prk.expand(info, HkdfOutput(length)).map_err(|_e| {
- custom_error(
- "DOMExceptionOperationError",
- "The length provided for HKDF is too large",
- )
- })?;
+ let okm = prk
+ .expand(info, HkdfOutput(length))
+ .map_err(|_e| Error::HKDFLengthTooLarge)?;
let mut r = vec![0u8; length];
okm.fill(&mut r)?;
Ok(r.into())
}
- _ => Err(type_error("Unsupported algorithm".to_string())),
+ _ => Err(Error::UnsupportedAlgorithm),
}
})
.await?
}
-fn read_rsa_public_key(key_data: KeyData) -> Result<RsaPublicKey, AnyError> {
+fn read_rsa_public_key(key_data: KeyData) -> Result<RsaPublicKey, Error> {
let public_key = match key_data.r#type {
KeyType::Private => {
RsaPrivateKey::from_pkcs1_der(&key_data.data)?.to_public_key()
@@ -617,7 +660,7 @@ fn read_rsa_public_key(key_data: KeyData) -> Result<RsaPublicKey, AnyError> {
#[op2]
#[string]
-pub fn op_crypto_random_uuid(state: &mut OpState) -> Result<String, AnyError> {
+pub fn op_crypto_random_uuid(state: &mut OpState) -> Result<String, Error> {
let maybe_seeded_rng = state.try_borrow_mut::<StdRng>();
let uuid = if let Some(seeded_rng) = maybe_seeded_rng {
let mut bytes = [0u8; 16];
@@ -638,7 +681,7 @@ pub fn op_crypto_random_uuid(state: &mut OpState) -> Result<String, AnyError> {
pub async fn op_crypto_subtle_digest(
#[serde] algorithm: CryptoHash,
#[buffer] data: JsBuffer,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, Error> {
let output = spawn_blocking(move || {
digest::digest(algorithm.into(), &data)
.as_ref()
@@ -662,7 +705,7 @@ pub struct WrapUnwrapKeyArg {
pub fn op_crypto_wrap_key(
#[serde] args: WrapUnwrapKeyArg,
#[buffer] data: JsBuffer,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, Error> {
let algorithm = args.algorithm;
match algorithm {
@@ -670,20 +713,20 @@ pub fn op_crypto_wrap_key(
let key = args.key.as_secret_key()?;
if data.len() % 8 != 0 {
- return Err(type_error("Data must be multiple of 8 bytes"));
+ return Err(Error::DataInvalidSize);
}
let wrapped_key = match key.len() {
16 => KekAes128::new(key.into()).wrap_vec(&data),
24 => KekAes192::new(key.into()).wrap_vec(&data),
32 => KekAes256::new(key.into()).wrap_vec(&data),
- _ => return Err(type_error("Invalid key length")),
+ _ => return Err(Error::InvalidKeyLength),
}
- .map_err(|_| operation_error("encryption error"))?;
+ .map_err(|_| Error::EncryptionError)?;
Ok(wrapped_key.into())
}
- _ => Err(type_error("Unsupported algorithm")),
+ _ => Err(Error::UnsupportedAlgorithm),
}
}
@@ -692,29 +735,27 @@ pub fn op_crypto_wrap_key(
pub fn op_crypto_unwrap_key(
#[serde] args: WrapUnwrapKeyArg,
#[buffer] data: JsBuffer,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, Error> {
let algorithm = args.algorithm;
match algorithm {
Algorithm::AesKw => {
let key = args.key.as_secret_key()?;
if data.len() % 8 != 0 {
- return Err(type_error("Data must be multiple of 8 bytes"));
+ return Err(Error::DataInvalidSize);
}
let unwrapped_key = match key.len() {
16 => KekAes128::new(key.into()).unwrap_vec(&data),
24 => KekAes192::new(key.into()).unwrap_vec(&data),
32 => KekAes256::new(key.into()).unwrap_vec(&data),
- _ => return Err(type_error("Invalid key length")),
+ _ => return Err(Error::InvalidKeyLength),
}
- .map_err(|_| {
- operation_error("decryption error - integrity check failed")
- })?;
+ .map_err(|_| Error::DecryptionError)?;
Ok(unwrapped_key.into())
}
- _ => Err(type_error("Unsupported algorithm")),
+ _ => Err(Error::UnsupportedAlgorithm),
}
}
diff --git a/ext/crypto/shared.rs b/ext/crypto/shared.rs
index d06a268cd..f70d32856 100644
--- a/ext/crypto/shared.rs
+++ b/ext/crypto/shared.rs
@@ -2,9 +2,6 @@
use std::borrow::Cow;
-use deno_core::error::custom_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::JsBuffer;
use deno_core::ToJsBuffer;
use elliptic_curve::sec1::ToEncodedPoint;
@@ -63,47 +60,73 @@ pub enum RustRawKeyData {
Public(ToJsBuffer),
}
+#[derive(Debug, thiserror::Error)]
+pub enum SharedError {
+ #[error("expected valid private key")]
+ ExpectedValidPrivateKey,
+ #[error("expected valid public key")]
+ ExpectedValidPublicKey,
+ #[error("expected valid private EC key")]
+ ExpectedValidPrivateECKey,
+ #[error("expected valid public EC key")]
+ ExpectedValidPublicECKey,
+ #[error("expected private key")]
+ ExpectedPrivateKey,
+ #[error("expected public key")]
+ ExpectedPublicKey,
+ #[error("expected secret key")]
+ ExpectedSecretKey,
+ #[error("failed to decode private key")]
+ FailedDecodePrivateKey,
+ #[error("failed to decode public key")]
+ FailedDecodePublicKey,
+ #[error("unsupported format")]
+ UnsupportedFormat,
+}
+
impl V8RawKeyData {
- pub fn as_rsa_public_key(&self) -> Result<Cow<'_, [u8]>, AnyError> {
+ pub fn as_rsa_public_key(&self) -> Result<Cow<'_, [u8]>, SharedError> {
match self {
V8RawKeyData::Public(data) => Ok(Cow::Borrowed(data)),
V8RawKeyData::Private(data) => {
let private_key = RsaPrivateKey::from_pkcs1_der(data)
- .map_err(|_| type_error("expected valid private key"))?;
+ .map_err(|_| SharedError::ExpectedValidPrivateKey)?;
let public_key_doc = private_key
.to_public_key()
.to_pkcs1_der()
- .map_err(|_| type_error("expected valid public key"))?;
+ .map_err(|_| SharedError::ExpectedValidPublicKey)?;
Ok(Cow::Owned(public_key_doc.as_bytes().into()))
}
- _ => Err(type_error("expected public key")),
+ _ => Err(SharedError::ExpectedPublicKey),
}
}
- pub fn as_rsa_private_key(&self) -> Result<&[u8], AnyError> {
+ pub fn as_rsa_private_key(&self) -> Result<&[u8], SharedError> {
match self {
V8RawKeyData::Private(data) => Ok(data),
- _ => Err(type_error("expected private key")),
+ _ => Err(SharedError::ExpectedPrivateKey),
}
}
- pub fn as_secret_key(&self) -> Result<&[u8], AnyError> {
+ pub fn as_secret_key(&self) -> Result<&[u8], SharedError> {
match self {
V8RawKeyData::Secret(data) => Ok(data),
- _ => Err(type_error("expected secret key")),
+ _ => Err(SharedError::ExpectedSecretKey),
}
}
- pub fn as_ec_public_key_p256(&self) -> Result<p256::EncodedPoint, AnyError> {
+ pub fn as_ec_public_key_p256(
+ &self,
+ ) -> Result<p256::EncodedPoint, SharedError> {
match self {
V8RawKeyData::Public(data) => p256::PublicKey::from_sec1_bytes(data)
.map(|p| p.to_encoded_point(false))
- .map_err(|_| type_error("expected valid public EC key")),
+ .map_err(|_| SharedError::ExpectedValidPublicECKey),
V8RawKeyData::Private(data) => {
let signing_key = p256::SecretKey::from_pkcs8_der(data)
- .map_err(|_| type_error("expected valid private EC key"))?;
+ .map_err(|_| SharedError::ExpectedValidPrivateECKey)?;
Ok(signing_key.public_key().to_encoded_point(false))
}
// Should never reach here.
@@ -111,14 +134,16 @@ impl V8RawKeyData {
}
}
- pub fn as_ec_public_key_p384(&self) -> Result<p384::EncodedPoint, AnyError> {
+ pub fn as_ec_public_key_p384(
+ &self,
+ ) -> Result<p384::EncodedPoint, SharedError> {
match self {
V8RawKeyData::Public(data) => p384::PublicKey::from_sec1_bytes(data)
.map(|p| p.to_encoded_point(false))
- .map_err(|_| type_error("expected valid public EC key")),
+ .map_err(|_| SharedError::ExpectedValidPublicECKey),
V8RawKeyData::Private(data) => {
let signing_key = p384::SecretKey::from_pkcs8_der(data)
- .map_err(|_| type_error("expected valid private EC key"))?;
+ .map_err(|_| SharedError::ExpectedValidPrivateECKey)?;
Ok(signing_key.public_key().to_encoded_point(false))
}
// Should never reach here.
@@ -126,16 +151,18 @@ impl V8RawKeyData {
}
}
- pub fn as_ec_public_key_p521(&self) -> Result<p521::EncodedPoint, AnyError> {
+ pub fn as_ec_public_key_p521(
+ &self,
+ ) -> Result<p521::EncodedPoint, SharedError> {
match self {
V8RawKeyData::Public(data) => {
// public_key is a serialized EncodedPoint
p521::EncodedPoint::from_bytes(data)
- .map_err(|_| type_error("expected valid public EC key"))
+ .map_err(|_| SharedError::ExpectedValidPublicECKey)
}
V8RawKeyData::Private(data) => {
let signing_key = p521::SecretKey::from_pkcs8_der(data)
- .map_err(|_| type_error("expected valid private EC key"))?;
+ .map_err(|_| SharedError::ExpectedValidPrivateECKey)?;
Ok(signing_key.public_key().to_encoded_point(false))
}
// Should never reach here.
@@ -143,26 +170,10 @@ impl V8RawKeyData {
}
}
- pub fn as_ec_private_key(&self) -> Result<&[u8], AnyError> {
+ pub fn as_ec_private_key(&self) -> Result<&[u8], SharedError> {
match self {
V8RawKeyData::Private(data) => Ok(data),
- _ => Err(type_error("expected private key")),
+ _ => Err(SharedError::ExpectedPrivateKey),
}
}
}
-
-pub fn data_error(msg: impl Into<Cow<'static, str>>) -> AnyError {
- custom_error("DOMExceptionDataError", msg)
-}
-
-pub fn not_supported_error(msg: impl Into<Cow<'static, str>>) -> AnyError {
- custom_error("DOMExceptionNotSupportedError", msg)
-}
-
-pub fn operation_error(msg: impl Into<Cow<'static, str>>) -> AnyError {
- custom_error("DOMExceptionOperationError", msg)
-}
-
-pub fn unsupported_format() -> AnyError {
- not_supported_error("unsupported format")
-}
diff --git a/ext/crypto/x25519.rs b/ext/crypto/x25519.rs
index cdbd1d7c8..d2c4d986b 100644
--- a/ext/crypto/x25519.rs
+++ b/ext/crypto/x25519.rs
@@ -1,8 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use curve25519_dalek::montgomery::MontgomeryPoint;
-use deno_core::error::custom_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::ToJsBuffer;
use elliptic_curve::pkcs8::PrivateKeyInfo;
@@ -13,6 +11,14 @@ use spki::der::asn1::BitString;
use spki::der::Decode;
use spki::der::Encode;
+#[derive(Debug, thiserror::Error)]
+pub enum X25519Error {
+ #[error("Failed to export key")]
+ FailedExport,
+ #[error(transparent)]
+ Der(#[from] spki::der::Error),
+}
+
#[op2(fast)]
pub fn op_crypto_generate_x25519_keypair(
#[buffer] pkey: &mut [u8],
@@ -113,7 +119,7 @@ pub fn op_crypto_import_pkcs8_x25519(
#[serde]
pub fn op_crypto_export_spki_x25519(
#[buffer] pubkey: &[u8],
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, X25519Error> {
let key_info = spki::SubjectPublicKeyInfo {
algorithm: spki::AlgorithmIdentifierRef {
// id-X25519
@@ -125,9 +131,7 @@ pub fn op_crypto_export_spki_x25519(
Ok(
key_info
.to_der()
- .map_err(|_| {
- custom_error("DOMExceptionOperationError", "Failed to export key")
- })?
+ .map_err(|_| X25519Error::FailedExport)?
.into(),
)
}
@@ -136,7 +140,7 @@ pub fn op_crypto_export_spki_x25519(
#[serde]
pub fn op_crypto_export_pkcs8_x25519(
#[buffer] pkey: &[u8],
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, X25519Error> {
use rsa::pkcs1::der::Encode;
// This should probably use OneAsymmetricKey instead
diff --git a/ext/crypto/x448.rs b/ext/crypto/x448.rs
index 3c8f24c31..89bf48e28 100644
--- a/ext/crypto/x448.rs
+++ b/ext/crypto/x448.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::custom_error;
-use deno_core::error::AnyError;
+
use deno_core::op2;
use deno_core::ToJsBuffer;
use ed448_goldilocks::curve::MontgomeryPoint;
@@ -13,6 +12,14 @@ use spki::der::asn1::BitString;
use spki::der::Decode;
use spki::der::Encode;
+#[derive(Debug, thiserror::Error)]
+pub enum X448Error {
+ #[error("Failed to export key")]
+ FailedExport,
+ #[error(transparent)]
+ Der(#[from] spki::der::Error),
+}
+
#[op2(fast)]
pub fn op_crypto_generate_x448_keypair(
#[buffer] pkey: &mut [u8],
@@ -56,7 +63,7 @@ const X448_OID: const_oid::ObjectIdentifier =
#[serde]
pub fn op_crypto_export_spki_x448(
#[buffer] pubkey: &[u8],
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, X448Error> {
let key_info = spki::SubjectPublicKeyInfo {
algorithm: spki::AlgorithmIdentifierRef {
oid: X448_OID,
@@ -67,9 +74,7 @@ pub fn op_crypto_export_spki_x448(
Ok(
key_info
.to_der()
- .map_err(|_| {
- custom_error("DOMExceptionOperationError", "Failed to export key")
- })?
+ .map_err(|_| X448Error::FailedExport)?
.into(),
)
}
@@ -78,7 +83,7 @@ pub fn op_crypto_export_spki_x448(
#[serde]
pub fn op_crypto_export_pkcs8_x448(
#[buffer] pkey: &[u8],
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, X448Error> {
use rsa::pkcs1::der::Encode;
let pk_info = rsa::pkcs8::PrivateKeyInfo {
diff --git a/ext/fetch/22_body.js b/ext/fetch/22_body.js
index 61a06b4af..c7e977c0b 100644
--- a/ext/fetch/22_body.js
+++ b/ext/fetch/22_body.js
@@ -15,6 +15,7 @@ import { core, primordials } from "ext:core/mod.js";
const {
isAnyArrayBuffer,
isArrayBuffer,
+ isStringObject,
} = core;
const {
ArrayBufferIsView,
@@ -466,6 +467,8 @@ function extractBody(object) {
if (object.locked || isReadableStreamDisturbed(object)) {
throw new TypeError("ReadableStream is locked or disturbed");
}
+ } else if (object[webidl.AsyncIterable] === webidl.AsyncIterable) {
+ stream = ReadableStream.from(object.open());
}
if (typeof source === "string") {
// WARNING: this deviates from spec (expects length to be set)
@@ -483,6 +486,9 @@ function extractBody(object) {
return { body, contentType };
}
+webidl.converters["async iterable<Uint8Array>"] = webidl
+ .createAsyncIterableConverter(webidl.converters.Uint8Array);
+
webidl.converters["BodyInit_DOMString"] = (V, prefix, context, opts) => {
// Union for (ReadableStream or Blob or ArrayBufferView or ArrayBuffer or FormData or URLSearchParams or USVString)
if (ObjectPrototypeIsPrototypeOf(ReadableStreamPrototype, V)) {
@@ -501,6 +507,14 @@ webidl.converters["BodyInit_DOMString"] = (V, prefix, context, opts) => {
if (ArrayBufferIsView(V)) {
return webidl.converters["ArrayBufferView"](V, prefix, context, opts);
}
+ if (webidl.isAsyncIterable(V) && !isStringObject(V)) {
+ return webidl.converters["async iterable<Uint8Array>"](
+ V,
+ prefix,
+ context,
+ opts,
+ );
+ }
}
// BodyInit conversion is passed to extractBody(), which calls core.encode().
// core.encode() will UTF-8 encode strings with replacement, being equivalent to the USV normalization.
diff --git a/ext/fetch/23_request.js b/ext/fetch/23_request.js
index 6211e927d..61cac22d2 100644
--- a/ext/fetch/23_request.js
+++ b/ext/fetch/23_request.js
@@ -269,19 +269,25 @@ class Request {
/** @type {AbortSignal} */
get [_signal]() {
const signal = this[_signalCache];
- // This signal not been created yet, and the request is still in progress
- if (signal === undefined) {
+ // This signal has not been created yet, but the request has already completed
+ if (signal === false) {
const signal = newSignal();
this[_signalCache] = signal;
+ signal[signalAbort](signalAbortError);
return signal;
}
- // This signal has not been created yet, but the request has already completed
- if (signal === false) {
+
+ // This signal not been created yet, and the request is still in progress
+ if (signal === undefined) {
const signal = newSignal();
this[_signalCache] = signal;
- signal[signalAbort](signalAbortError);
+ this[_request].onCancel?.(() => {
+ signal[signalAbort](signalAbortError);
+ });
+
return signal;
}
+
return signal;
}
get [_mimeType]() {
diff --git a/ext/fetch/23_response.js b/ext/fetch/23_response.js
index ff4ad5fac..278dcb7de 100644
--- a/ext/fetch/23_response.js
+++ b/ext/fetch/23_response.js
@@ -61,6 +61,15 @@ const _mimeType = Symbol("mime type");
const _body = Symbol("body");
const _brand = webidl.brand;
+// it's slightly faster to cache these
+const webidlConvertersBodyInitDomString =
+ webidl.converters["BodyInit_DOMString?"];
+const webidlConvertersUSVString = webidl.converters["USVString"];
+const webidlConvertersUnsignedShort = webidl.converters["unsigned short"];
+const webidlConvertersAny = webidl.converters["any"];
+const webidlConvertersByteString = webidl.converters["ByteString"];
+const webidlConvertersHeadersInit = webidl.converters["HeadersInit"];
+
/**
* @typedef InnerResponse
* @property {"basic" | "cors" | "default" | "error" | "opaque" | "opaqueredirect"} type
@@ -259,8 +268,8 @@ class Response {
*/
static redirect(url, status = 302) {
const prefix = "Failed to execute 'Response.redirect'";
- url = webidl.converters["USVString"](url, prefix, "Argument 1");
- status = webidl.converters["unsigned short"](status, prefix, "Argument 2");
+ url = webidlConvertersUSVString(url, prefix, "Argument 1");
+ status = webidlConvertersUnsignedShort(status, prefix, "Argument 2");
const baseURL = getLocationHref();
const parsedURL = new URL(url, baseURL);
@@ -286,8 +295,8 @@ class Response {
*/
static json(data = undefined, init = { __proto__: null }) {
const prefix = "Failed to execute 'Response.json'";
- data = webidl.converters.any(data);
- init = webidl.converters["ResponseInit_fast"](init, prefix, "Argument 2");
+ data = webidlConvertersAny(data);
+ init = webidlConvertersResponseInitFast(init, prefix, "Argument 2");
const str = serializeJSValueToJSONString(data);
const res = extractBody(str);
@@ -313,8 +322,8 @@ class Response {
}
const prefix = "Failed to construct 'Response'";
- body = webidl.converters["BodyInit_DOMString?"](body, prefix, "Argument 1");
- init = webidl.converters["ResponseInit_fast"](init, prefix, "Argument 2");
+ body = webidlConvertersBodyInitDomString(body, prefix, "Argument 1");
+ init = webidlConvertersResponseInitFast(init, prefix, "Argument 2");
this[_response] = newInnerResponse();
this[_headers] = headersFromHeaderList(
@@ -443,47 +452,49 @@ webidl.converters["Response"] = webidl.createInterfaceConverter(
"Response",
ResponsePrototype,
);
-webidl.converters["ResponseInit"] = webidl.createDictionaryConverter(
- "ResponseInit",
- [{
- key: "status",
- defaultValue: 200,
- converter: webidl.converters["unsigned short"],
- }, {
- key: "statusText",
- defaultValue: "",
- converter: webidl.converters["ByteString"],
- }, {
- key: "headers",
- converter: webidl.converters["HeadersInit"],
- }],
-);
-webidl.converters["ResponseInit_fast"] = function (
- init,
- prefix,
- context,
- opts,
-) {
- if (init === undefined || init === null) {
- return { status: 200, statusText: "", headers: undefined };
- }
- // Fast path, if not a proxy
- if (typeof init === "object" && !core.isProxy(init)) {
- // Not a proxy fast path
- const status = init.status !== undefined
- ? webidl.converters["unsigned short"](init.status)
- : 200;
- const statusText = init.statusText !== undefined
- ? webidl.converters["ByteString"](init.statusText)
- : "";
- const headers = init.headers !== undefined
- ? webidl.converters["HeadersInit"](init.headers)
- : undefined;
- return { status, statusText, headers };
- }
- // Slow default path
- return webidl.converters["ResponseInit"](init, prefix, context, opts);
-};
+const webidlConvertersResponseInit = webidl.converters["ResponseInit"] = webidl
+ .createDictionaryConverter(
+ "ResponseInit",
+ [{
+ key: "status",
+ defaultValue: 200,
+ converter: webidlConvertersUnsignedShort,
+ }, {
+ key: "statusText",
+ defaultValue: "",
+ converter: webidlConvertersByteString,
+ }, {
+ key: "headers",
+ converter: webidlConvertersHeadersInit,
+ }],
+ );
+const webidlConvertersResponseInitFast = webidl
+ .converters["ResponseInit_fast"] = function (
+ init,
+ prefix,
+ context,
+ opts,
+ ) {
+ if (init === undefined || init === null) {
+ return { status: 200, statusText: "", headers: undefined };
+ }
+ // Fast path, if not a proxy
+ if (typeof init === "object" && !core.isProxy(init)) {
+ // Not a proxy fast path
+ const status = init.status !== undefined
+ ? webidlConvertersUnsignedShort(init.status)
+ : 200;
+ const statusText = init.statusText !== undefined
+ ? webidlConvertersByteString(init.statusText)
+ : "";
+ const headers = init.headers !== undefined
+ ? webidlConvertersHeadersInit(init.headers)
+ : undefined;
+ return { status, statusText, headers };
+ }
+ // Slow default path
+ return webidlConvertersResponseInit(init, prefix, context, opts);
+ };
/**
* @param {Response} response
diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml
index cc9e4f03d..00c85f2aa 100644
--- a/ext/fetch/Cargo.toml
+++ b/ext/fetch/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_fetch"
-version = "0.195.0"
+version = "0.201.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -22,6 +22,7 @@ deno_permissions.workspace = true
deno_tls.workspace = true
dyn-clone = "1"
error_reporter = "1"
+hickory-resolver.workspace = true
http.workspace = true
http-body-util.workspace = true
hyper.workspace = true
@@ -32,6 +33,7 @@ percent-encoding.workspace = true
rustls-webpki.workspace = true
serde.workspace = true
serde_json.workspace = true
+thiserror.workspace = true
tokio.workspace = true
tokio-rustls.workspace = true
tokio-socks.workspace = true
diff --git a/ext/fetch/dns.rs b/ext/fetch/dns.rs
new file mode 100644
index 000000000..9e21a4c34
--- /dev/null
+++ b/ext/fetch/dns.rs
@@ -0,0 +1,116 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+use std::future::Future;
+use std::io;
+use std::net::SocketAddr;
+use std::pin::Pin;
+use std::task::Poll;
+use std::task::{self};
+use std::vec;
+
+use hickory_resolver::error::ResolveError;
+use hickory_resolver::name_server::GenericConnector;
+use hickory_resolver::name_server::TokioRuntimeProvider;
+use hickory_resolver::AsyncResolver;
+use hyper_util::client::legacy::connect::dns::GaiResolver;
+use hyper_util::client::legacy::connect::dns::Name;
+use tokio::task::JoinHandle;
+use tower::Service;
+
+#[derive(Clone, Debug)]
+pub enum Resolver {
+ /// A resolver using blocking `getaddrinfo` calls in a threadpool.
+ Gai(GaiResolver),
+ /// hickory-resolver's userspace resolver.
+ Hickory(AsyncResolver<GenericConnector<TokioRuntimeProvider>>),
+}
+
+impl Default for Resolver {
+ fn default() -> Self {
+ Self::gai()
+ }
+}
+
+impl Resolver {
+ pub fn gai() -> Self {
+ Self::Gai(GaiResolver::new())
+ }
+
+ /// Create a [`AsyncResolver`] from system conf.
+ pub fn hickory() -> Result<Self, ResolveError> {
+ Ok(Self::Hickory(
+ hickory_resolver::AsyncResolver::tokio_from_system_conf()?,
+ ))
+ }
+
+ pub fn hickory_from_async_resolver(
+ resolver: AsyncResolver<GenericConnector<TokioRuntimeProvider>>,
+ ) -> Self {
+ Self::Hickory(resolver)
+ }
+}
+
+type SocketAddrs = vec::IntoIter<SocketAddr>;
+
+pub struct ResolveFut {
+ inner: JoinHandle<Result<SocketAddrs, io::Error>>,
+}
+
+impl Future for ResolveFut {
+ type Output = Result<SocketAddrs, io::Error>;
+
+ fn poll(
+ mut self: Pin<&mut Self>,
+ cx: &mut task::Context<'_>,
+ ) -> Poll<Self::Output> {
+ Pin::new(&mut self.inner).poll(cx).map(|res| match res {
+ Ok(Ok(addrs)) => Ok(addrs),
+ Ok(Err(e)) => Err(e),
+ Err(join_err) => {
+ if join_err.is_cancelled() {
+ Err(io::Error::new(io::ErrorKind::Interrupted, join_err))
+ } else {
+ Err(io::Error::new(io::ErrorKind::Other, join_err))
+ }
+ }
+ })
+ }
+}
+
+impl Service<Name> for Resolver {
+ type Response = SocketAddrs;
+ type Error = io::Error;
+ type Future = ResolveFut;
+
+ fn poll_ready(
+ &mut self,
+ _cx: &mut task::Context<'_>,
+ ) -> Poll<Result<(), io::Error>> {
+ Poll::Ready(Ok(()))
+ }
+
+ fn call(&mut self, name: Name) -> Self::Future {
+ let task = match self {
+ Resolver::Gai(gai_resolver) => {
+ let mut resolver = gai_resolver.clone();
+ tokio::spawn(async move {
+ let result = resolver.call(name).await?;
+ let x: Vec<_> = result.into_iter().collect();
+ let iter: SocketAddrs = x.into_iter();
+ Ok(iter)
+ })
+ }
+ Resolver::Hickory(async_resolver) => {
+ let resolver = async_resolver.clone();
+ tokio::spawn(async move {
+ let result = resolver.lookup_ip(name.as_str()).await?;
+
+ let x: Vec<_> =
+ result.into_iter().map(|x| SocketAddr::new(x, 0)).collect();
+ let iter: SocketAddrs = x.into_iter();
+ Ok(iter)
+ })
+ }
+ };
+ ResolveFut { inner: task }
+ }
+}
diff --git a/ext/fetch/fs_fetch_handler.rs b/ext/fetch/fs_fetch_handler.rs
index 4c2b81f35..c236dd9c6 100644
--- a/ext/fetch/fs_fetch_handler.rs
+++ b/ext/fetch/fs_fetch_handler.rs
@@ -4,7 +4,6 @@ use crate::CancelHandle;
use crate::CancelableResponseFuture;
use crate::FetchHandler;
-use deno_core::error::type_error;
use deno_core::futures::FutureExt;
use deno_core::futures::TryFutureExt;
use deno_core::futures::TryStreamExt;
@@ -42,9 +41,7 @@ impl FetchHandler for FsFetchHandler {
.map_err(|_| ())?;
Ok::<_, ()>(response)
}
- .map_err(move |_| {
- type_error("NetworkError when attempting to fetch resource")
- })
+ .map_err(move |_| super::FetchError::NetworkError)
.or_cancel(&cancel_handle)
.boxed_local();
diff --git a/ext/fetch/lib.deno_fetch.d.ts b/ext/fetch/lib.deno_fetch.d.ts
index d219a3859..8614dec89 100644
--- a/ext/fetch/lib.deno_fetch.d.ts
+++ b/ext/fetch/lib.deno_fetch.d.ts
@@ -163,6 +163,8 @@ type BodyInit =
| FormData
| URLSearchParams
| ReadableStream<Uint8Array>
+ | Iterable<Uint8Array>
+ | AsyncIterable<Uint8Array>
| string;
/** @category Fetch */
type RequestDestination =
diff --git a/ext/fetch/lib.rs b/ext/fetch/lib.rs
index 88f303852..c8e93b9fe 100644
--- a/ext/fetch/lib.rs
+++ b/ext/fetch/lib.rs
@@ -1,5 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+pub mod dns;
mod fs_fetch_handler;
mod proxy;
#[cfg(test)]
@@ -17,10 +18,6 @@ use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
-use deno_core::anyhow::anyhow;
-use deno_core::anyhow::Error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::futures::stream::Peekable;
use deno_core::futures::Future;
use deno_core::futures::FutureExt;
@@ -28,6 +25,7 @@ use deno_core::futures::Stream;
use deno_core::futures::StreamExt;
use deno_core::futures::TryFutureExt;
use deno_core::op2;
+use deno_core::url;
use deno_core::url::Url;
use deno_core::AsyncRefCell;
use deno_core::AsyncResult;
@@ -42,6 +40,7 @@ use deno_core::OpState;
use deno_core::RcRef;
use deno_core::Resource;
use deno_core::ResourceId;
+use deno_permissions::PermissionCheckError;
use deno_tls::rustls::RootCertStore;
use deno_tls::Proxy;
use deno_tls::RootCertStoreProvider;
@@ -68,6 +67,7 @@ use http_body_util::BodyExt;
use hyper::body::Frame;
use hyper_util::client::legacy::connect::HttpConnector;
use hyper_util::client::legacy::connect::HttpInfo;
+use hyper_util::client::legacy::Builder as HyperClientBuilder;
use hyper_util::rt::TokioExecutor;
use hyper_util::rt::TokioTimer;
use serde::Deserialize;
@@ -86,16 +86,30 @@ pub struct Options {
pub user_agent: String,
pub root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>,
pub proxy: Option<Proxy>,
+ /// A callback to customize HTTP client configuration.
+ ///
+ /// The settings applied with this hook may be overridden by the options
+ /// provided through `Deno.createHttpClient()` API. For instance, if the hook
+ /// calls [`hyper_util::client::legacy::Builder::pool_max_idle_per_host`] with
+ /// a value of 99, and a user calls `Deno.createHttpClient({ poolMaxIdlePerHost: 42 })`,
+ /// the value that will take effect is 42.
+ ///
+ /// For more info on what can be configured, see [`hyper_util::client::legacy::Builder`].
+ pub client_builder_hook: Option<fn(HyperClientBuilder) -> HyperClientBuilder>,
#[allow(clippy::type_complexity)]
- pub request_builder_hook:
- Option<fn(&mut http::Request<ReqBody>) -> Result<(), AnyError>>,
+ pub request_builder_hook: Option<
+ fn(&mut http::Request<ReqBody>) -> Result<(), deno_core::error::AnyError>,
+ >,
pub unsafely_ignore_certificate_errors: Option<Vec<String>>,
pub client_cert_chain_and_key: TlsKeys,
pub file_fetch_handler: Rc<dyn FetchHandler>,
+ pub resolver: dns::Resolver,
}
impl Options {
- pub fn root_cert_store(&self) -> Result<Option<RootCertStore>, AnyError> {
+ pub fn root_cert_store(
+ &self,
+ ) -> Result<Option<RootCertStore>, deno_core::error::AnyError> {
Ok(match &self.root_cert_store_provider {
Some(provider) => Some(provider.get_or_try_init()?.clone()),
None => None,
@@ -109,10 +123,12 @@ impl Default for Options {
user_agent: "".to_string(),
root_cert_store_provider: None,
proxy: None,
+ client_builder_hook: None,
request_builder_hook: None,
unsafely_ignore_certificate_errors: None,
client_cert_chain_and_key: TlsKeys::Null,
file_fetch_handler: Rc::new(DefaultFileFetchHandler),
+ resolver: dns::Resolver::default(),
}
}
}
@@ -144,6 +160,51 @@ deno_core::extension!(deno_fetch,
},
);
+#[derive(Debug, thiserror::Error)]
+pub enum FetchError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ Permission(#[from] PermissionCheckError),
+ #[error("NetworkError when attempting to fetch resource")]
+ NetworkError,
+ #[error("Fetching files only supports the GET method: received {0}")]
+ FsNotGet(Method),
+ #[error("Invalid URL {0}")]
+ InvalidUrl(Url),
+ #[error(transparent)]
+ InvalidHeaderName(#[from] http::header::InvalidHeaderName),
+ #[error(transparent)]
+ InvalidHeaderValue(#[from] http::header::InvalidHeaderValue),
+ #[error("{0:?}")]
+ DataUrl(data_url::DataUrlError),
+ #[error("{0:?}")]
+ Base64(data_url::forgiving_base64::InvalidBase64),
+ #[error("Blob for the given URL not found.")]
+ BlobNotFound,
+ #[error("Url scheme '{0}' not supported")]
+ SchemeNotSupported(String),
+ #[error("Request was cancelled")]
+ RequestCanceled,
+ #[error(transparent)]
+ Http(#[from] http::Error),
+ #[error(transparent)]
+ ClientCreate(#[from] HttpClientCreateError),
+ #[error(transparent)]
+ Url(#[from] url::ParseError),
+ #[error(transparent)]
+ Method(#[from] http::method::InvalidMethod),
+ #[error(transparent)]
+ ClientSend(#[from] ClientSendError),
+ #[error(transparent)]
+ RequestBuilderHook(deno_core::error::AnyError),
+ #[error(transparent)]
+ Io(#[from] std::io::Error),
+ // Only used for node upgrade
+ #[error(transparent)]
+ Hyper(#[from] hyper::Error),
+}
+
pub type CancelableResponseFuture =
Pin<Box<dyn Future<Output = CancelableResponseResult>>>;
@@ -170,11 +231,7 @@ impl FetchHandler for DefaultFileFetchHandler {
_state: &mut OpState,
_url: &Url,
) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>) {
- let fut = async move {
- Ok(Err(type_error(
- "NetworkError when attempting to fetch resource",
- )))
- };
+ let fut = async move { Ok(Err(FetchError::NetworkError)) };
(Box::pin(fut), None)
}
}
@@ -191,7 +248,7 @@ pub struct FetchReturn {
pub fn get_or_create_client_from_state(
state: &mut OpState,
-) -> Result<Client, AnyError> {
+) -> Result<Client, HttpClientCreateError> {
if let Some(client) = state.try_borrow::<Client>() {
Ok(client.clone())
} else {
@@ -204,13 +261,16 @@ pub fn get_or_create_client_from_state(
pub fn create_client_from_options(
options: &Options,
-) -> Result<Client, AnyError> {
+) -> Result<Client, HttpClientCreateError> {
create_http_client(
&options.user_agent,
CreateHttpClientOptions {
- root_cert_store: options.root_cert_store()?,
+ root_cert_store: options
+ .root_cert_store()
+ .map_err(HttpClientCreateError::RootCertStore)?,
ca_certs: vec![],
proxy: options.proxy.clone(),
+ dns_resolver: options.resolver.clone(),
unsafely_ignore_certificate_errors: options
.unsafely_ignore_certificate_errors
.clone(),
@@ -223,6 +283,7 @@ pub fn create_client_from_options(
pool_idle_timeout: None,
http1: true,
http2: true,
+ client_builder_hook: options.client_builder_hook,
},
)
}
@@ -230,7 +291,9 @@ pub fn create_client_from_options(
#[allow(clippy::type_complexity)]
pub struct ResourceToBodyAdapter(
Rc<dyn Resource>,
- Option<Pin<Box<dyn Future<Output = Result<BufView, Error>>>>>,
+ Option<
+ Pin<Box<dyn Future<Output = Result<BufView, deno_core::error::AnyError>>>>,
+ >,
);
impl ResourceToBodyAdapter {
@@ -246,7 +309,7 @@ unsafe impl Send for ResourceToBodyAdapter {}
unsafe impl Sync for ResourceToBodyAdapter {}
impl Stream for ResourceToBodyAdapter {
- type Item = Result<Bytes, Error>;
+ type Item = Result<Bytes, deno_core::error::AnyError>;
fn poll_next(
self: Pin<&mut Self>,
@@ -276,7 +339,7 @@ impl Stream for ResourceToBodyAdapter {
impl hyper::body::Body for ResourceToBodyAdapter {
type Data = Bytes;
- type Error = Error;
+ type Error = deno_core::error::AnyError;
fn poll_frame(
self: Pin<&mut Self>,
@@ -301,13 +364,13 @@ pub trait FetchPermissions {
&mut self,
url: &Url,
api_name: &str,
- ) -> Result<(), AnyError>;
+ ) -> Result<(), PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_read<'a>(
&mut self,
p: &'a Path,
api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError>;
+ ) -> Result<Cow<'a, Path>, PermissionCheckError>;
}
impl FetchPermissions for deno_permissions::PermissionsContainer {
@@ -316,7 +379,7 @@ impl FetchPermissions for deno_permissions::PermissionsContainer {
&mut self,
url: &Url,
api_name: &str,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_net_url(self, url, api_name)
}
@@ -325,7 +388,7 @@ impl FetchPermissions for deno_permissions::PermissionsContainer {
&mut self,
path: &'a Path,
api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError> {
+ ) -> Result<Cow<'a, Path>, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_read_path(
self,
path,
@@ -346,12 +409,15 @@ pub fn op_fetch<FP>(
has_body: bool,
#[buffer] data: Option<JsBuffer>,
#[smi] resource: Option<ResourceId>,
-) -> Result<FetchReturn, AnyError>
+) -> Result<FetchReturn, FetchError>
where
FP: FetchPermissions + 'static,
{
let (client, allow_host) = if let Some(rid) = client_rid {
- let r = state.resource_table.get::<HttpClientResource>(rid)?;
+ let r = state
+ .resource_table
+ .get::<HttpClientResource>(rid)
+ .map_err(FetchError::Resource)?;
(r.client.clone(), r.allow_host)
} else {
(get_or_create_client_from_state(state)?, false)
@@ -364,9 +430,7 @@ where
let scheme = url.scheme();
let (request_rid, cancel_handle_rid) = match scheme {
"file" => {
- let path = url.to_file_path().map_err(|_| {
- type_error("NetworkError when attempting to fetch resource")
- })?;
+ let path = url.to_file_path().map_err(|_| FetchError::NetworkError)?;
let permissions = state.borrow_mut::<FP>();
let path = permissions.check_read(&path, "fetch()")?;
let url = match path {
@@ -375,9 +439,7 @@ where
};
if method != Method::GET {
- return Err(type_error(format!(
- "Fetching files only supports the GET method: received {method}"
- )));
+ return Err(FetchError::FsNotGet(method));
}
let Options {
@@ -402,7 +464,7 @@ where
let uri = url
.as_str()
.parse::<Uri>()
- .map_err(|_| type_error(format!("Invalid URL {url}")))?;
+ .map_err(|_| FetchError::InvalidUrl(url.clone()))?;
let mut con_len = None;
let body = if has_body {
@@ -416,7 +478,10 @@ where
.boxed()
}
(_, Some(resource)) => {
- let resource = state.resource_table.take_any(resource)?;
+ let resource = state
+ .resource_table
+ .take_any(resource)
+ .map_err(FetchError::Resource)?;
match resource.size_hint() {
(body_size, Some(n)) if body_size == n && body_size > 0 => {
con_len = Some(body_size);
@@ -453,10 +518,8 @@ where
}
for (key, value) in headers {
- let name = HeaderName::from_bytes(&key)
- .map_err(|err| type_error(err.to_string()))?;
- let v = HeaderValue::from_bytes(&value)
- .map_err(|err| type_error(err.to_string()))?;
+ let name = HeaderName::from_bytes(&key)?;
+ let v = HeaderValue::from_bytes(&value)?;
if (name != HOST || allow_host) && name != CONTENT_LENGTH {
request.headers_mut().append(name, v);
@@ -474,20 +537,18 @@ where
let options = state.borrow::<Options>();
if let Some(request_builder_hook) = options.request_builder_hook {
request_builder_hook(&mut request)
- .map_err(|err| type_error(err.to_string()))?;
+ .map_err(FetchError::RequestBuilderHook)?;
}
let cancel_handle = CancelHandle::new_rc();
let cancel_handle_ = cancel_handle.clone();
- let fut = {
- async move {
- client
- .send(request)
- .map_err(Into::into)
- .or_cancel(cancel_handle_)
- .await
- }
+ let fut = async move {
+ client
+ .send(request)
+ .map_err(Into::into)
+ .or_cancel(cancel_handle_)
+ .await
};
let request_rid = state.resource_table.add(FetchRequestResource {
@@ -501,12 +562,10 @@ where
(request_rid, Some(cancel_handle_rid))
}
"data" => {
- let data_url = DataUrl::process(url.as_str())
- .map_err(|e| type_error(format!("{e:?}")))?;
+ let data_url =
+ DataUrl::process(url.as_str()).map_err(FetchError::DataUrl)?;
- let (body, _) = data_url
- .decode_to_vec()
- .map_err(|e| type_error(format!("{e:?}")))?;
+ let (body, _) = data_url.decode_to_vec().map_err(FetchError::Base64)?;
let body = http_body_util::Full::new(body.into())
.map_err(|never| match never {})
.boxed();
@@ -528,11 +587,9 @@ where
"blob" => {
// Blob URL resolution happens in the JS side of fetch. If we got here is
// because the URL isn't an object URL.
- return Err(type_error("Blob for the given URL not found."));
- }
- _ => {
- return Err(type_error(format!("Url scheme '{scheme}' not supported")))
+ return Err(FetchError::BlobNotFound);
}
+ _ => return Err(FetchError::SchemeNotSupported(scheme.to_string())),
};
Ok(FetchReturn {
@@ -564,11 +621,12 @@ pub struct FetchResponse {
pub async fn op_fetch_send(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<FetchResponse, AnyError> {
+) -> Result<FetchResponse, FetchError> {
let request = state
.borrow_mut()
.resource_table
- .take::<FetchRequestResource>(rid)?;
+ .take::<FetchRequestResource>(rid)
+ .map_err(FetchError::Resource)?;
let request = Rc::try_unwrap(request)
.ok()
@@ -581,22 +639,23 @@ pub async fn op_fetch_send(
// If any error in the chain is a hyper body error, return that as a special result we can use to
// reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`).
// TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead
- let mut err_ref: &dyn std::error::Error = err.as_ref();
- while let Some(err_src) = std::error::Error::source(err_ref) {
- if let Some(err_src) = err_src.downcast_ref::<hyper::Error>() {
- if let Some(err_src) = std::error::Error::source(err_src) {
- return Ok(FetchResponse {
- error: Some((err.to_string(), err_src.to_string())),
- ..Default::default()
- });
+
+ if let FetchError::ClientSend(err_src) = &err {
+ if let Some(client_err) = std::error::Error::source(&err_src.source) {
+ if let Some(err_src) = client_err.downcast_ref::<hyper::Error>() {
+ if let Some(err_src) = std::error::Error::source(err_src) {
+ return Ok(FetchResponse {
+ error: Some((err.to_string(), err_src.to_string())),
+ ..Default::default()
+ });
+ }
}
}
- err_ref = err_src;
}
- return Err(type_error(err.to_string()));
+ return Err(err);
}
- Err(_) => return Err(type_error("Request was cancelled")),
+ Err(_) => return Err(FetchError::RequestCanceled),
};
let status = res.status();
@@ -636,7 +695,7 @@ pub async fn op_fetch_send(
}
type CancelableResponseResult =
- Result<Result<http::Response<ResBody>, AnyError>, Canceled>;
+ Result<Result<http::Response<ResBody>, FetchError>, Canceled>;
pub struct FetchRequestResource {
pub future: Pin<Box<dyn Future<Output = CancelableResponseResult>>>,
@@ -691,7 +750,7 @@ impl FetchResponseResource {
}
}
- pub async fn upgrade(self) -> Result<hyper::upgrade::Upgraded, AnyError> {
+ pub async fn upgrade(self) -> Result<hyper::upgrade::Upgraded, hyper::Error> {
let reader = self.response_reader.into_inner();
match reader {
FetchResponseReader::Start(resp) => Ok(hyper::upgrade::on(resp).await?),
@@ -746,7 +805,9 @@ impl Resource for FetchResponseResource {
// safely call `await` on it without creating a race condition.
Some(_) => match reader.as_mut().next().await.unwrap() {
Ok(chunk) => assert!(chunk.is_empty()),
- Err(err) => break Err(type_error(err.to_string())),
+ Err(err) => {
+ break Err(deno_core::error::type_error(err.to_string()))
+ }
},
None => break Ok(BufView::empty()),
}
@@ -791,6 +852,8 @@ pub struct CreateHttpClientArgs {
proxy: Option<Proxy>,
pool_max_idle_per_host: Option<usize>,
pool_idle_timeout: Option<serde_json::Value>,
+ #[serde(default)]
+ use_hickory_resolver: bool,
#[serde(default = "default_true")]
http1: bool,
#[serde(default = "default_true")]
@@ -809,7 +872,7 @@ pub fn op_fetch_custom_client<FP>(
state: &mut OpState,
#[serde] args: CreateHttpClientArgs,
#[cppgc] tls_keys: &TlsKeysHolder,
-) -> Result<ResourceId, AnyError>
+) -> Result<ResourceId, FetchError>
where
FP: FetchPermissions + 'static,
{
@@ -829,9 +892,18 @@ where
let client = create_http_client(
&options.user_agent,
CreateHttpClientOptions {
- root_cert_store: options.root_cert_store()?,
+ root_cert_store: options
+ .root_cert_store()
+ .map_err(HttpClientCreateError::RootCertStore)?,
ca_certs,
proxy: args.proxy,
+ dns_resolver: if args.use_hickory_resolver {
+ dns::Resolver::hickory()
+ .map_err(deno_core::error::AnyError::new)
+ .map_err(FetchError::Resource)?
+ } else {
+ dns::Resolver::default()
+ },
unsafely_ignore_certificate_errors: options
.unsafely_ignore_certificate_errors
.clone(),
@@ -849,6 +921,7 @@ where
),
http1: args.http1,
http2: args.http2,
+ client_builder_hook: options.client_builder_hook,
},
)?;
@@ -863,12 +936,14 @@ pub struct CreateHttpClientOptions {
pub root_cert_store: Option<RootCertStore>,
pub ca_certs: Vec<Vec<u8>>,
pub proxy: Option<Proxy>,
+ pub dns_resolver: dns::Resolver,
pub unsafely_ignore_certificate_errors: Option<Vec<String>>,
pub client_cert_chain_and_key: Option<TlsKey>,
pub pool_max_idle_per_host: Option<usize>,
pub pool_idle_timeout: Option<Option<u64>>,
pub http1: bool,
pub http2: bool,
+ pub client_builder_hook: Option<fn(HyperClientBuilder) -> HyperClientBuilder>,
}
impl Default for CreateHttpClientOptions {
@@ -877,29 +952,46 @@ impl Default for CreateHttpClientOptions {
root_cert_store: None,
ca_certs: vec![],
proxy: None,
+ dns_resolver: dns::Resolver::default(),
unsafely_ignore_certificate_errors: None,
client_cert_chain_and_key: None,
pool_max_idle_per_host: None,
pool_idle_timeout: None,
http1: true,
http2: true,
+ client_builder_hook: None,
}
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum HttpClientCreateError {
+ #[error(transparent)]
+ Tls(deno_tls::TlsError),
+ #[error("Illegal characters in User-Agent: received {0}")]
+ InvalidUserAgent(String),
+ #[error("invalid proxy url")]
+ InvalidProxyUrl,
+ #[error("Cannot create Http Client: either `http1` or `http2` needs to be set to true")]
+ HttpVersionSelectionInvalid,
+ #[error(transparent)]
+ RootCertStore(deno_core::error::AnyError),
+}
+
/// Create new instance of async Client. This client supports
/// proxies and doesn't follow redirects.
pub fn create_http_client(
user_agent: &str,
options: CreateHttpClientOptions,
-) -> Result<Client, AnyError> {
+) -> Result<Client, HttpClientCreateError> {
let mut tls_config = deno_tls::create_client_config(
options.root_cert_store,
options.ca_certs,
options.unsafely_ignore_certificate_errors,
options.client_cert_chain_and_key.into(),
deno_tls::SocketUse::Http,
- )?;
+ )
+ .map_err(HttpClientCreateError::Tls)?;
// Proxy TLS should not send ALPN
tls_config.alpn_protocols.clear();
@@ -915,24 +1007,26 @@ pub fn create_http_client(
tls_config.alpn_protocols = alpn_protocols;
let tls_config = Arc::from(tls_config);
- let mut http_connector = HttpConnector::new();
+ let mut http_connector =
+ HttpConnector::new_with_resolver(options.dns_resolver.clone());
http_connector.enforce_http(false);
let user_agent = user_agent.parse::<HeaderValue>().map_err(|_| {
- type_error(format!(
- "Illegal characters in User-Agent: received {user_agent}"
- ))
+ HttpClientCreateError::InvalidUserAgent(user_agent.to_string())
})?;
- let mut builder =
- hyper_util::client::legacy::Builder::new(TokioExecutor::new());
+ let mut builder = HyperClientBuilder::new(TokioExecutor::new());
builder.timer(TokioTimer::new());
builder.pool_timer(TokioTimer::new());
+ if let Some(client_builder_hook) = options.client_builder_hook {
+ builder = client_builder_hook(builder);
+ }
+
let mut proxies = proxy::from_env();
if let Some(proxy) = options.proxy {
let mut intercept = proxy::Intercept::all(&proxy.url)
- .ok_or_else(|| type_error("invalid proxy url"))?;
+ .ok_or_else(|| HttpClientCreateError::InvalidProxyUrl)?;
if let Some(basic_auth) = &proxy.basic_auth {
intercept.set_auth(&basic_auth.username, &basic_auth.password);
}
@@ -964,7 +1058,7 @@ pub fn create_http_client(
}
(true, true) => {}
(false, false) => {
- return Err(type_error("Cannot create Http Client: either `http1` or `http2` needs to be set to true"))
+ return Err(HttpClientCreateError::HttpVersionSelectionInvalid)
}
}
@@ -980,10 +1074,8 @@ pub fn create_http_client(
#[op2]
#[serde]
-pub fn op_utf8_to_byte_string(
- #[string] input: String,
-) -> Result<ByteString, AnyError> {
- Ok(input.into())
+pub fn op_utf8_to_byte_string(#[string] input: String) -> ByteString {
+ input.into()
}
#[derive(Clone, Debug)]
@@ -994,7 +1086,7 @@ pub struct Client {
user_agent: HeaderValue,
}
-type Connector = proxy::ProxyConnector<HttpConnector>;
+type Connector = proxy::ProxyConnector<HttpConnector<dns::Resolver>>;
// clippy is wrong here
#[allow(clippy::declare_interior_mutable_const)]
@@ -1003,7 +1095,7 @@ const STAR_STAR: HeaderValue = HeaderValue::from_static("*/*");
#[derive(Debug)]
pub struct ClientSendError {
uri: Uri,
- source: hyper_util::client::legacy::Error,
+ pub source: hyper_util::client::legacy::Error,
}
impl ClientSendError {
@@ -1075,12 +1167,14 @@ impl Client {
.oneshot(req)
.await
.map_err(|e| ClientSendError { uri, source: e })?;
- Ok(resp.map(|b| b.map_err(|e| anyhow!(e)).boxed()))
+ Ok(resp.map(|b| b.map_err(|e| deno_core::anyhow::anyhow!(e)).boxed()))
}
}
-pub type ReqBody = http_body_util::combinators::BoxBody<Bytes, Error>;
-pub type ResBody = http_body_util::combinators::BoxBody<Bytes, Error>;
+pub type ReqBody =
+ http_body_util::combinators::BoxBody<Bytes, deno_core::error::AnyError>;
+pub type ResBody =
+ http_body_util::combinators::BoxBody<Bytes, deno_core::error::AnyError>;
/// Copied from https://github.com/seanmonstar/reqwest/blob/b9d62a0323d96f11672a61a17bf8849baec00275/src/async_impl/request.rs#L572
/// Check the request URL for a "username:password" type authority, and if
diff --git a/ext/fetch/tests.rs b/ext/fetch/tests.rs
index dad1b34a9..e053c6b1c 100644
--- a/ext/fetch/tests.rs
+++ b/ext/fetch/tests.rs
@@ -1,6 +1,8 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::net::SocketAddr;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
use bytes::Bytes;
@@ -10,6 +12,8 @@ use http_body_util::BodyExt;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
+use crate::dns;
+
use super::create_http_client;
use super::CreateHttpClientOptions;
@@ -17,6 +21,53 @@ static EXAMPLE_CRT: &[u8] = include_bytes!("../tls/testdata/example1_cert.der");
static EXAMPLE_KEY: &[u8] =
include_bytes!("../tls/testdata/example1_prikey.der");
+#[test]
+fn test_userspace_resolver() {
+ let thread_counter = Arc::new(AtomicUsize::new(0));
+
+ let thread_counter_ref = thread_counter.clone();
+ let rt = tokio::runtime::Builder::new_current_thread()
+ .enable_all()
+ .on_thread_start(move || {
+ thread_counter_ref.fetch_add(1, SeqCst);
+ })
+ .build()
+ .unwrap();
+
+ rt.block_on(async move {
+ assert_eq!(thread_counter.load(SeqCst), 0);
+ let src_addr = create_https_server(true).await;
+ assert_eq!(src_addr.ip().to_string(), "127.0.0.1");
+ // use `localhost` to ensure dns step happens.
+ let addr = format!("localhost:{}", src_addr.port());
+
+ let hickory = hickory_resolver::AsyncResolver::tokio(
+ Default::default(),
+ Default::default(),
+ );
+
+ assert_eq!(thread_counter.load(SeqCst), 0);
+ rust_test_client_with_resolver(
+ None,
+ addr.clone(),
+ "https",
+ http::Version::HTTP_2,
+ dns::Resolver::hickory_from_async_resolver(hickory),
+ )
+ .await;
+ assert_eq!(thread_counter.load(SeqCst), 0, "userspace resolver shouldn't spawn new threads.");
+ rust_test_client_with_resolver(
+ None,
+ addr.clone(),
+ "https",
+ http::Version::HTTP_2,
+ dns::Resolver::gai(),
+ )
+ .await;
+ assert_eq!(thread_counter.load(SeqCst), 1, "getaddrinfo is called inside spawn_blocking, so tokio spawn a new worker thread for it.");
+ });
+}
+
#[tokio::test]
async fn test_https_proxy_http11() {
let src_addr = create_https_server(false).await;
@@ -52,27 +103,30 @@ async fn test_socks_proxy_h2() {
run_test_client(prx_addr, src_addr, "socks5", http::Version::HTTP_2).await;
}
-async fn run_test_client(
- prx_addr: SocketAddr,
- src_addr: SocketAddr,
+async fn rust_test_client_with_resolver(
+ prx_addr: Option<SocketAddr>,
+ src_addr: String,
proto: &str,
ver: http::Version,
+ resolver: dns::Resolver,
) {
let client = create_http_client(
"fetch/test",
CreateHttpClientOptions {
root_cert_store: None,
ca_certs: vec![],
- proxy: Some(deno_tls::Proxy {
- url: format!("{}://{}", proto, prx_addr),
+ proxy: prx_addr.map(|p| deno_tls::Proxy {
+ url: format!("{}://{}", proto, p),
basic_auth: None,
}),
unsafely_ignore_certificate_errors: Some(vec![]),
client_cert_chain_and_key: None,
pool_max_idle_per_host: None,
pool_idle_timeout: None,
+ dns_resolver: resolver,
http1: true,
http2: true,
+ client_builder_hook: None,
},
)
.unwrap();
@@ -92,6 +146,22 @@ async fn run_test_client(
assert_eq!(hello, "hello from server");
}
+async fn run_test_client(
+ prx_addr: SocketAddr,
+ src_addr: SocketAddr,
+ proto: &str,
+ ver: http::Version,
+) {
+ rust_test_client_with_resolver(
+ Some(prx_addr),
+ src_addr.to_string(),
+ proto,
+ ver,
+ Default::default(),
+ )
+ .await
+}
+
async fn create_https_server(allow_h2: bool) -> SocketAddr {
let mut tls_config = deno_tls::rustls::server::ServerConfig::builder()
.with_no_client_auth()
diff --git a/ext/ffi/Cargo.toml b/ext/ffi/Cargo.toml
index 23a2aee1c..295e8be84 100644
--- a/ext/ffi/Cargo.toml
+++ b/ext/ffi/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_ffi"
-version = "0.158.0"
+version = "0.164.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -21,9 +21,12 @@ dynasmrt = "1.2.3"
libffi = "=3.2.0"
libffi-sys = "=2.3.0"
log.workspace = true
+num-bigint.workspace = true
serde.workspace = true
serde-value = "0.7"
serde_json = "1.0"
+thiserror.workspace = true
+tokio.workspace = true
[target.'cfg(windows)'.dependencies]
winapi = { workspace = true, features = ["errhandlingapi", "minwindef", "ntdef", "winbase", "winnt"] }
diff --git a/ext/ffi/call.rs b/ext/ffi/call.rs
index 3572b9e81..bbff0ee48 100644
--- a/ext/ffi/call.rs
+++ b/ext/ffi/call.rs
@@ -7,23 +7,38 @@ use crate::symbol::NativeType;
use crate::symbol::Symbol;
use crate::FfiPermissions;
use crate::ForeignFunction;
-use deno_core::anyhow::anyhow;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::serde_json::Value;
+use deno_core::serde_v8::BigInt as V8BigInt;
use deno_core::serde_v8::ExternalPointer;
use deno_core::unsync::spawn_blocking;
use deno_core::v8;
use deno_core::OpState;
use deno_core::ResourceId;
use libffi::middle::Arg;
+use num_bigint::BigInt;
use serde::Serialize;
use std::cell::RefCell;
use std::ffi::c_void;
use std::future::Future;
use std::rc::Rc;
+#[derive(Debug, thiserror::Error)]
+pub enum CallError {
+ #[error(transparent)]
+ IR(#[from] IRError),
+ #[error("Nonblocking FFI call failed: {0}")]
+ NonblockingCallFailure(#[source] tokio::task::JoinError),
+ #[error("Invalid FFI symbol name: '{0}'")]
+ InvalidSymbol(String),
+ #[error(transparent)]
+ Permission(#[from] deno_permissions::PermissionCheckError),
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ Callback(#[from] super::CallbackError),
+}
+
// SAFETY: Makes an FFI call
unsafe fn ffi_call_rtype_struct(
cif: &libffi::middle::Cif,
@@ -45,7 +60,7 @@ pub(crate) fn ffi_call_sync<'scope>(
args: v8::FunctionCallbackArguments,
symbol: &Symbol,
out_buffer: Option<OutBuffer>,
-) -> Result<NativeValue, AnyError>
+) -> Result<NativeValue, CallError>
where
'scope: 'scope,
{
@@ -191,6 +206,7 @@ where
#[serde(untagged)]
pub enum FfiValue {
Value(Value),
+ BigInt(V8BigInt),
External(ExternalPointer),
}
@@ -201,7 +217,7 @@ fn ffi_call(
parameter_types: &[NativeType],
result_type: NativeType,
out_buffer: Option<OutBuffer>,
-) -> Result<FfiValue, AnyError> {
+) -> FfiValue {
let call_args: Vec<Arg> = call_args
.iter()
.enumerate()
@@ -214,7 +230,7 @@ fn ffi_call(
// SAFETY: types in the `Cif` match the actual calling convention and
// types of symbol.
unsafe {
- Ok(match result_type {
+ match result_type {
NativeType::Void => {
cif.call::<()>(fun_ptr, &call_args);
FfiValue::Value(Value::from(()))
@@ -240,18 +256,18 @@ fn ffi_call(
NativeType::I32 => {
FfiValue::Value(Value::from(cif.call::<i32>(fun_ptr, &call_args)))
}
- NativeType::U64 => {
- FfiValue::Value(Value::from(cif.call::<u64>(fun_ptr, &call_args)))
- }
- NativeType::I64 => {
- FfiValue::Value(Value::from(cif.call::<i64>(fun_ptr, &call_args)))
- }
- NativeType::USize => {
- FfiValue::Value(Value::from(cif.call::<usize>(fun_ptr, &call_args)))
- }
- NativeType::ISize => {
- FfiValue::Value(Value::from(cif.call::<isize>(fun_ptr, &call_args)))
- }
+ NativeType::U64 => FfiValue::BigInt(V8BigInt::from(BigInt::from(
+ cif.call::<u64>(fun_ptr, &call_args),
+ ))),
+ NativeType::I64 => FfiValue::BigInt(V8BigInt::from(BigInt::from(
+ cif.call::<i64>(fun_ptr, &call_args),
+ ))),
+ NativeType::USize => FfiValue::BigInt(V8BigInt::from(BigInt::from(
+ cif.call::<usize>(fun_ptr, &call_args),
+ ))),
+ NativeType::ISize => FfiValue::BigInt(V8BigInt::from(BigInt::from(
+ cif.call::<isize>(fun_ptr, &call_args),
+ ))),
NativeType::F32 => {
FfiValue::Value(Value::from(cif.call::<f32>(fun_ptr, &call_args)))
}
@@ -267,7 +283,7 @@ fn ffi_call(
ffi_call_rtype_struct(cif, &fun_ptr, call_args, out_buffer.unwrap().0);
FfiValue::Value(Value::Null)
}
- })
+ }
}
}
@@ -280,7 +296,7 @@ pub fn op_ffi_call_ptr_nonblocking<FP>(
#[serde] def: ForeignFunction,
parameters: v8::Local<v8::Array>,
out_buffer: Option<v8::Local<v8::TypedArray>>,
-) -> Result<impl Future<Output = Result<FfiValue, AnyError>>, AnyError>
+) -> Result<impl Future<Output = Result<FfiValue, CallError>>, CallError>
where
FP: FfiPermissions + 'static,
{
@@ -309,7 +325,7 @@ where
Ok(async move {
let result = join_handle
.await
- .map_err(|err| anyhow!("Nonblocking FFI call failed: {}", err))??;
+ .map_err(CallError::NonblockingCallFailure)?;
// SAFETY: Same return type declared to libffi; trust user to have it right beyond that.
Ok(result)
})
@@ -325,16 +341,17 @@ pub fn op_ffi_call_nonblocking(
#[string] symbol: String,
parameters: v8::Local<v8::Array>,
out_buffer: Option<v8::Local<v8::TypedArray>>,
-) -> Result<impl Future<Output = Result<FfiValue, AnyError>>, AnyError> {
+) -> Result<impl Future<Output = Result<FfiValue, CallError>>, CallError> {
let symbol = {
let state = state.borrow();
- let resource = state.resource_table.get::<DynamicLibraryResource>(rid)?;
+ let resource = state
+ .resource_table
+ .get::<DynamicLibraryResource>(rid)
+ .map_err(CallError::Resource)?;
let symbols = &resource.symbols;
*symbols
.get(&symbol)
- .ok_or_else(|| {
- type_error(format!("Invalid FFI symbol name: '{symbol}'"))
- })?
+ .ok_or_else(|| CallError::InvalidSymbol(symbol))?
.clone()
};
@@ -362,7 +379,7 @@ pub fn op_ffi_call_nonblocking(
Ok(async move {
let result = join_handle
.await
- .map_err(|err| anyhow!("Nonblocking FFI call failed: {}", err))??;
+ .map_err(CallError::NonblockingCallFailure)?;
// SAFETY: Same return type declared to libffi; trust user to have it right beyond that.
Ok(result)
})
@@ -377,7 +394,7 @@ pub fn op_ffi_call_ptr<FP>(
#[serde] def: ForeignFunction,
parameters: v8::Local<v8::Array>,
out_buffer: Option<v8::Local<v8::TypedArray>>,
-) -> Result<FfiValue, AnyError>
+) -> Result<FfiValue, CallError>
where
FP: FfiPermissions + 'static,
{
@@ -399,7 +416,7 @@ where
&def.parameters,
def.result.clone(),
out_buffer_ptr,
- )?;
+ );
// SAFETY: Same return type declared to libffi; trust user to have it right beyond that.
Ok(result)
}
diff --git a/ext/ffi/callback.rs b/ext/ffi/callback.rs
index 6fa166f52..29583c800 100644
--- a/ext/ffi/callback.rs
+++ b/ext/ffi/callback.rs
@@ -3,7 +3,6 @@
use crate::symbol::NativeType;
use crate::FfiPermissions;
use crate::ForeignFunction;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::v8;
use deno_core::v8::TryCatch;
@@ -34,6 +33,16 @@ thread_local! {
static LOCAL_THREAD_ID: RefCell<u32> = const { RefCell::new(0) };
}
+#[derive(Debug, thiserror::Error)]
+pub enum CallbackError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ Permission(#[from] deno_permissions::PermissionCheckError),
+ #[error(transparent)]
+ Other(deno_core::error::AnyError),
+}
+
#[derive(Clone)]
pub struct PtrSymbol {
pub cif: libffi::middle::Cif,
@@ -44,7 +53,7 @@ impl PtrSymbol {
pub fn new(
fn_ptr: *mut c_void,
def: &ForeignFunction,
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, CallbackError> {
let ptr = libffi::middle::CodePtr::from_ptr(fn_ptr as _);
let cif = libffi::middle::Cif::new(
def
@@ -52,8 +61,13 @@ impl PtrSymbol {
.clone()
.into_iter()
.map(libffi::middle::Type::try_from)
- .collect::<Result<Vec<_>, _>>()?,
- def.result.clone().try_into()?,
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(CallbackError::Other)?,
+ def
+ .result
+ .clone()
+ .try_into()
+ .map_err(CallbackError::Other)?,
);
Ok(Self { cif, ptr })
@@ -522,10 +536,12 @@ unsafe fn do_ffi_callback(
pub fn op_ffi_unsafe_callback_ref(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<impl Future<Output = Result<(), AnyError>>, AnyError> {
+) -> Result<impl Future<Output = ()>, CallbackError> {
let state = state.borrow();
- let callback_resource =
- state.resource_table.get::<UnsafeCallbackResource>(rid)?;
+ let callback_resource = state
+ .resource_table
+ .get::<UnsafeCallbackResource>(rid)
+ .map_err(CallbackError::Resource)?;
Ok(async move {
let info: &mut CallbackInfo =
@@ -536,7 +552,6 @@ pub fn op_ffi_unsafe_callback_ref(
.into_future()
.or_cancel(callback_resource.cancel.clone())
.await;
- Ok(())
})
}
@@ -552,7 +567,7 @@ pub fn op_ffi_unsafe_callback_create<FP, 'scope>(
scope: &mut v8::HandleScope<'scope>,
#[serde] args: RegisterCallbackArgs,
cb: v8::Local<v8::Function>,
-) -> Result<v8::Local<'scope, v8::Value>, AnyError>
+) -> Result<v8::Local<'scope, v8::Value>, CallbackError>
where
FP: FfiPermissions + 'static,
{
@@ -593,8 +608,10 @@ where
.parameters
.into_iter()
.map(libffi::middle::Type::try_from)
- .collect::<Result<Vec<_>, _>>()?,
- libffi::middle::Type::try_from(args.result)?,
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(CallbackError::Other)?,
+ libffi::middle::Type::try_from(args.result)
+ .map_err(CallbackError::Other)?,
);
// SAFETY: CallbackInfo is leaked, is not null and stays valid as long as the callback exists.
@@ -624,14 +641,16 @@ pub fn op_ffi_unsafe_callback_close(
state: &mut OpState,
scope: &mut v8::HandleScope,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
+) -> Result<(), CallbackError> {
// SAFETY: This drops the closure and the callback info associated with it.
// Any retained function pointers to the closure become dangling pointers.
// It is up to the user to know that it is safe to call the `close()` on the
// UnsafeCallback instance.
unsafe {
- let callback_resource =
- state.resource_table.take::<UnsafeCallbackResource>(rid)?;
+ let callback_resource = state
+ .resource_table
+ .take::<UnsafeCallbackResource>(rid)
+ .map_err(CallbackError::Resource)?;
let info = Box::from_raw(callback_resource.info);
let _ = v8::Global::from_raw(scope, info.callback);
let _ = v8::Global::from_raw(scope, info.context);
diff --git a/ext/ffi/dlfcn.rs b/ext/ffi/dlfcn.rs
index 10199bf85..26d1b71e9 100644
--- a/ext/ffi/dlfcn.rs
+++ b/ext/ffi/dlfcn.rs
@@ -6,8 +6,6 @@ use crate::symbol::Symbol;
use crate::turbocall;
use crate::turbocall::Turbocall;
use crate::FfiPermissions;
-use deno_core::error::generic_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::v8;
use deno_core::GarbageCollected;
@@ -17,10 +15,27 @@ use dlopen2::raw::Library;
use serde::Deserialize;
use serde_value::ValueDeserializer;
use std::borrow::Cow;
+use std::cell::RefCell;
use std::collections::HashMap;
use std::ffi::c_void;
use std::rc::Rc;
+#[derive(Debug, thiserror::Error)]
+pub enum DlfcnError {
+ #[error("Failed to register symbol {symbol}: {error}")]
+ RegisterSymbol {
+ symbol: String,
+ #[source]
+ error: dlopen2::Error,
+ },
+ #[error(transparent)]
+ Dlopen(#[from] dlopen2::Error),
+ #[error(transparent)]
+ Permission(#[from] deno_permissions::PermissionCheckError),
+ #[error(transparent)]
+ Other(deno_core::error::AnyError),
+}
+
pub struct DynamicLibraryResource {
lib: Library,
pub symbols: HashMap<String, Box<Symbol>>,
@@ -37,7 +52,7 @@ impl Resource for DynamicLibraryResource {
}
impl DynamicLibraryResource {
- pub fn get_static(&self, symbol: String) -> Result<*mut c_void, AnyError> {
+ pub fn get_static(&self, symbol: String) -> Result<*mut c_void, DlfcnError> {
// By default, Err returned by this function does not tell
// which symbol wasn't exported. So we'll modify the error
// message to include the name of symbol.
@@ -45,9 +60,7 @@ impl DynamicLibraryResource {
// SAFETY: The obtained T symbol is the size of a pointer.
match unsafe { self.lib.symbol::<*mut c_void>(&symbol) } {
Ok(value) => Ok(Ok(value)),
- Err(err) => Err(generic_error(format!(
- "Failed to register symbol {symbol}: {err}"
- ))),
+ Err(error) => Err(DlfcnError::RegisterSymbol { symbol, error }),
}?
}
}
@@ -114,14 +127,17 @@ pub struct FfiLoadArgs {
#[op2]
pub fn op_ffi_load<'scope, FP>(
scope: &mut v8::HandleScope<'scope>,
- state: &mut OpState,
+ state: Rc<RefCell<OpState>>,
#[serde] args: FfiLoadArgs,
-) -> Result<v8::Local<'scope, v8::Value>, AnyError>
+) -> Result<v8::Local<'scope, v8::Value>, DlfcnError>
where
FP: FfiPermissions + 'static,
{
- let permissions = state.borrow_mut::<FP>();
- let path = permissions.check_partial_with_path(&args.path)?;
+ let path = {
+ let mut state = state.borrow_mut();
+ let permissions = state.borrow_mut::<FP>();
+ permissions.check_partial_with_path(&args.path)?
+ };
let lib = Library::open(&path).map_err(|e| {
dlopen2::Error::OpeningLibraryError(std::io::Error::new(
@@ -152,15 +168,16 @@ where
// SAFETY: The obtained T symbol is the size of a pointer.
match unsafe { resource.lib.symbol::<*const c_void>(symbol) } {
Ok(value) => Ok(value),
- Err(err) => if foreign_fn.optional {
+ Err(error) => if foreign_fn.optional {
let null: v8::Local<v8::Value> = v8::null(scope).into();
let func_key = v8::String::new(scope, &symbol_key).unwrap();
obj.set(scope, func_key.into(), null);
break 'register_symbol;
} else {
- Err(generic_error(format!(
- "Failed to register symbol {symbol}: {err}"
- )))
+ Err(DlfcnError::RegisterSymbol {
+ symbol: symbol.to_owned(),
+ error,
+ })
},
}?;
@@ -171,8 +188,13 @@ where
.clone()
.into_iter()
.map(libffi::middle::Type::try_from)
- .collect::<Result<Vec<_>, _>>()?,
- foreign_fn.result.clone().try_into()?,
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(DlfcnError::Other)?,
+ foreign_fn
+ .result
+ .clone()
+ .try_into()
+ .map_err(DlfcnError::Other)?,
);
let func_key = v8::String::new(scope, &symbol_key).unwrap();
@@ -197,6 +219,7 @@ where
}
}
+ let mut state = state.borrow_mut();
let out = v8::Array::new(scope, 2);
let rid = state.resource_table.add(resource);
let rid_v8 = v8::Integer::new_from_unsigned(scope, rid);
diff --git a/ext/ffi/ir.rs b/ext/ffi/ir.rs
index ebf64945b..2e8084216 100644
--- a/ext/ffi/ir.rs
+++ b/ext/ffi/ir.rs
@@ -1,13 +1,55 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use crate::symbol::NativeType;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::v8;
use libffi::middle::Arg;
use std::ffi::c_void;
use std::ptr;
+#[derive(Debug, thiserror::Error)]
+pub enum IRError {
+ #[error("Invalid FFI u8 type, expected boolean")]
+ InvalidU8ExpectedBoolean,
+ #[error("Invalid FFI u8 type, expected unsigned integer")]
+ InvalidU8ExpectedUnsignedInteger,
+ #[error("Invalid FFI i8 type, expected integer")]
+ InvalidI8,
+ #[error("Invalid FFI u16 type, expected unsigned integer")]
+ InvalidU16,
+ #[error("Invalid FFI i16 type, expected integer")]
+ InvalidI16,
+ #[error("Invalid FFI u32 type, expected unsigned integer")]
+ InvalidU32,
+ #[error("Invalid FFI i32 type, expected integer")]
+ InvalidI32,
+ #[error("Invalid FFI u64 type, expected unsigned integer")]
+ InvalidU64,
+ #[error("Invalid FFI i64 type, expected integer")]
+ InvalidI64,
+ #[error("Invalid FFI usize type, expected unsigned integer")]
+ InvalidUsize,
+ #[error("Invalid FFI isize type, expected integer")]
+ InvalidIsize,
+ #[error("Invalid FFI f32 type, expected number")]
+ InvalidF32,
+ #[error("Invalid FFI f64 type, expected number")]
+ InvalidF64,
+ #[error("Invalid FFI pointer type, expected null, or External")]
+ InvalidPointerType,
+ #[error(
+ "Invalid FFI buffer type, expected null, ArrayBuffer, or ArrayBufferView"
+ )]
+ InvalidBufferType,
+ #[error("Invalid FFI ArrayBufferView, expected data in the buffer")]
+ InvalidArrayBufferView,
+ #[error("Invalid FFI ArrayBuffer, expected data in buffer")]
+ InvalidArrayBuffer,
+ #[error("Invalid FFI struct type, expected ArrayBuffer, or ArrayBufferView")]
+ InvalidStructType,
+ #[error("Invalid FFI function type, expected null, or External")]
+ InvalidFunctionType,
+}
+
pub struct OutBuffer(pub *mut u8);
// SAFETY: OutBuffer is allocated by us in 00_ffi.js and is guaranteed to be
@@ -126,9 +168,9 @@ unsafe impl Send for NativeValue {}
#[inline]
pub fn ffi_parse_bool_arg(
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let bool_value = v8::Local::<v8::Boolean>::try_from(arg)
- .map_err(|_| type_error("Invalid FFI u8 type, expected boolean"))?
+ .map_err(|_| IRError::InvalidU8ExpectedBoolean)?
.is_true();
Ok(NativeValue { bool_value })
}
@@ -136,9 +178,9 @@ pub fn ffi_parse_bool_arg(
#[inline]
pub fn ffi_parse_u8_arg(
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let u8_value = v8::Local::<v8::Uint32>::try_from(arg)
- .map_err(|_| type_error("Invalid FFI u8 type, expected unsigned integer"))?
+ .map_err(|_| IRError::InvalidU8ExpectedUnsignedInteger)?
.value() as u8;
Ok(NativeValue { u8_value })
}
@@ -146,9 +188,9 @@ pub fn ffi_parse_u8_arg(
#[inline]
pub fn ffi_parse_i8_arg(
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let i8_value = v8::Local::<v8::Int32>::try_from(arg)
- .map_err(|_| type_error("Invalid FFI i8 type, expected integer"))?
+ .map_err(|_| IRError::InvalidI8)?
.value() as i8;
Ok(NativeValue { i8_value })
}
@@ -156,9 +198,9 @@ pub fn ffi_parse_i8_arg(
#[inline]
pub fn ffi_parse_u16_arg(
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let u16_value = v8::Local::<v8::Uint32>::try_from(arg)
- .map_err(|_| type_error("Invalid FFI u16 type, expected unsigned integer"))?
+ .map_err(|_| IRError::InvalidU16)?
.value() as u16;
Ok(NativeValue { u16_value })
}
@@ -166,9 +208,9 @@ pub fn ffi_parse_u16_arg(
#[inline]
pub fn ffi_parse_i16_arg(
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let i16_value = v8::Local::<v8::Int32>::try_from(arg)
- .map_err(|_| type_error("Invalid FFI i16 type, expected integer"))?
+ .map_err(|_| IRError::InvalidI16)?
.value() as i16;
Ok(NativeValue { i16_value })
}
@@ -176,9 +218,9 @@ pub fn ffi_parse_i16_arg(
#[inline]
pub fn ffi_parse_u32_arg(
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let u32_value = v8::Local::<v8::Uint32>::try_from(arg)
- .map_err(|_| type_error("Invalid FFI u32 type, expected unsigned integer"))?
+ .map_err(|_| IRError::InvalidU32)?
.value();
Ok(NativeValue { u32_value })
}
@@ -186,9 +228,9 @@ pub fn ffi_parse_u32_arg(
#[inline]
pub fn ffi_parse_i32_arg(
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let i32_value = v8::Local::<v8::Int32>::try_from(arg)
- .map_err(|_| type_error("Invalid FFI i32 type, expected integer"))?
+ .map_err(|_| IRError::InvalidI32)?
.value();
Ok(NativeValue { i32_value })
}
@@ -197,7 +239,7 @@ pub fn ffi_parse_i32_arg(
pub fn ffi_parse_u64_arg(
scope: &mut v8::HandleScope,
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
// Order of checking:
// 1. BigInt: Uncommon and not supported by Fast API, so optimise slow call for this case.
// 2. Number: Common, supported by Fast API, so let that be the optimal case.
@@ -207,9 +249,7 @@ pub fn ffi_parse_u64_arg(
} else if let Ok(value) = v8::Local::<v8::Number>::try_from(arg) {
value.integer_value(scope).unwrap() as u64
} else {
- return Err(type_error(
- "Invalid FFI u64 type, expected unsigned integer",
- ));
+ return Err(IRError::InvalidU64);
};
Ok(NativeValue { u64_value })
}
@@ -218,7 +258,7 @@ pub fn ffi_parse_u64_arg(
pub fn ffi_parse_i64_arg(
scope: &mut v8::HandleScope,
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
// Order of checking:
// 1. BigInt: Uncommon and not supported by Fast API, so optimise slow call for this case.
// 2. Number: Common, supported by Fast API, so let that be the optimal case.
@@ -228,7 +268,7 @@ pub fn ffi_parse_i64_arg(
} else if let Ok(value) = v8::Local::<v8::Number>::try_from(arg) {
value.integer_value(scope).unwrap()
} else {
- return Err(type_error("Invalid FFI i64 type, expected integer"));
+ return Err(IRError::InvalidI64);
};
Ok(NativeValue { i64_value })
}
@@ -237,7 +277,7 @@ pub fn ffi_parse_i64_arg(
pub fn ffi_parse_usize_arg(
scope: &mut v8::HandleScope,
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
// Order of checking:
// 1. BigInt: Uncommon and not supported by Fast API, so optimise slow call for this case.
// 2. Number: Common, supported by Fast API, so let that be the optimal case.
@@ -247,7 +287,7 @@ pub fn ffi_parse_usize_arg(
} else if let Ok(value) = v8::Local::<v8::Number>::try_from(arg) {
value.integer_value(scope).unwrap() as usize
} else {
- return Err(type_error("Invalid FFI usize type, expected integer"));
+ return Err(IRError::InvalidUsize);
};
Ok(NativeValue { usize_value })
}
@@ -256,7 +296,7 @@ pub fn ffi_parse_usize_arg(
pub fn ffi_parse_isize_arg(
scope: &mut v8::HandleScope,
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
// Order of checking:
// 1. BigInt: Uncommon and not supported by Fast API, so optimise slow call for this case.
// 2. Number: Common, supported by Fast API, so let that be the optimal case.
@@ -266,7 +306,7 @@ pub fn ffi_parse_isize_arg(
} else if let Ok(value) = v8::Local::<v8::Number>::try_from(arg) {
value.integer_value(scope).unwrap() as isize
} else {
- return Err(type_error("Invalid FFI isize type, expected integer"));
+ return Err(IRError::InvalidIsize);
};
Ok(NativeValue { isize_value })
}
@@ -274,9 +314,9 @@ pub fn ffi_parse_isize_arg(
#[inline]
pub fn ffi_parse_f32_arg(
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let f32_value = v8::Local::<v8::Number>::try_from(arg)
- .map_err(|_| type_error("Invalid FFI f32 type, expected number"))?
+ .map_err(|_| IRError::InvalidF32)?
.value() as f32;
Ok(NativeValue { f32_value })
}
@@ -284,9 +324,9 @@ pub fn ffi_parse_f32_arg(
#[inline]
pub fn ffi_parse_f64_arg(
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let f64_value = v8::Local::<v8::Number>::try_from(arg)
- .map_err(|_| type_error("Invalid FFI f64 type, expected number"))?
+ .map_err(|_| IRError::InvalidF64)?
.value();
Ok(NativeValue { f64_value })
}
@@ -295,15 +335,13 @@ pub fn ffi_parse_f64_arg(
pub fn ffi_parse_pointer_arg(
_scope: &mut v8::HandleScope,
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let pointer = if let Ok(value) = v8::Local::<v8::External>::try_from(arg) {
value.value()
} else if arg.is_null() {
ptr::null_mut()
} else {
- return Err(type_error(
- "Invalid FFI pointer type, expected null, or External",
- ));
+ return Err(IRError::InvalidPointerType);
};
Ok(NativeValue { pointer })
}
@@ -312,7 +350,7 @@ pub fn ffi_parse_pointer_arg(
pub fn ffi_parse_buffer_arg(
scope: &mut v8::HandleScope,
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
// Order of checking:
// 1. ArrayBuffer: Fairly common and not supported by Fast API, optimise this case.
// 2. ArrayBufferView: Common and supported by Fast API
@@ -328,9 +366,7 @@ pub fn ffi_parse_buffer_arg(
let byte_offset = value.byte_offset();
let pointer = value
.buffer(scope)
- .ok_or_else(|| {
- type_error("Invalid FFI ArrayBufferView, expected data in the buffer")
- })?
+ .ok_or(IRError::InvalidArrayBufferView)?
.data();
if let Some(non_null) = pointer {
// SAFETY: Pointer is non-null, and V8 guarantees that the byte_offset
@@ -342,9 +378,7 @@ pub fn ffi_parse_buffer_arg(
} else if arg.is_null() {
ptr::null_mut()
} else {
- return Err(type_error(
- "Invalid FFI buffer type, expected null, ArrayBuffer, or ArrayBufferView",
- ));
+ return Err(IRError::InvalidBufferType);
};
Ok(NativeValue { pointer })
}
@@ -353,7 +387,7 @@ pub fn ffi_parse_buffer_arg(
pub fn ffi_parse_struct_arg(
scope: &mut v8::HandleScope,
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
// Order of checking:
// 1. ArrayBuffer: Fairly common and not supported by Fast API, optimise this case.
// 2. ArrayBufferView: Common and supported by Fast API
@@ -362,31 +396,23 @@ pub fn ffi_parse_struct_arg(
if let Some(non_null) = value.data() {
non_null.as_ptr()
} else {
- return Err(type_error(
- "Invalid FFI ArrayBuffer, expected data in buffer",
- ));
+ return Err(IRError::InvalidArrayBuffer);
}
} else if let Ok(value) = v8::Local::<v8::ArrayBufferView>::try_from(arg) {
let byte_offset = value.byte_offset();
let pointer = value
.buffer(scope)
- .ok_or_else(|| {
- type_error("Invalid FFI ArrayBufferView, expected data in the buffer")
- })?
+ .ok_or(IRError::InvalidArrayBufferView)?
.data();
if let Some(non_null) = pointer {
// SAFETY: Pointer is non-null, and V8 guarantees that the byte_offset
// is within the buffer backing store.
unsafe { non_null.as_ptr().add(byte_offset) }
} else {
- return Err(type_error(
- "Invalid FFI ArrayBufferView, expected data in buffer",
- ));
+ return Err(IRError::InvalidArrayBufferView);
}
} else {
- return Err(type_error(
- "Invalid FFI struct type, expected ArrayBuffer, or ArrayBufferView",
- ));
+ return Err(IRError::InvalidStructType);
};
Ok(NativeValue { pointer })
}
@@ -395,15 +421,13 @@ pub fn ffi_parse_struct_arg(
pub fn ffi_parse_function_arg(
_scope: &mut v8::HandleScope,
arg: v8::Local<v8::Value>,
-) -> Result<NativeValue, AnyError> {
+) -> Result<NativeValue, IRError> {
let pointer = if let Ok(value) = v8::Local::<v8::External>::try_from(arg) {
value.value()
} else if arg.is_null() {
ptr::null_mut()
} else {
- return Err(type_error(
- "Invalid FFI function type, expected null, or External",
- ));
+ return Err(IRError::InvalidFunctionType);
};
Ok(NativeValue { pointer })
}
@@ -412,7 +436,7 @@ pub fn ffi_parse_args<'scope>(
scope: &mut v8::HandleScope<'scope>,
args: v8::Local<v8::Array>,
parameter_types: &[NativeType],
-) -> Result<Vec<NativeValue>, AnyError>
+) -> Result<Vec<NativeValue>, IRError>
where
'scope: 'scope,
{
diff --git a/ext/ffi/lib.rs b/ext/ffi/lib.rs
index 77ec3c85e..73ec7757a 100644
--- a/ext/ffi/lib.rs
+++ b/ext/ffi/lib.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
-
use std::mem::size_of;
use std::os::raw::c_char;
use std::os::raw::c_short;
@@ -29,6 +27,14 @@ use repr::*;
use symbol::NativeType;
use symbol::Symbol;
+pub use call::CallError;
+pub use callback::CallbackError;
+use deno_permissions::PermissionCheckError;
+pub use dlfcn::DlfcnError;
+pub use ir::IRError;
+pub use r#static::StaticError;
+pub use repr::ReprError;
+
#[cfg(not(target_pointer_width = "64"))]
compile_error!("platform not supported");
@@ -41,17 +47,17 @@ const _: () = {
pub const UNSTABLE_FEATURE_NAME: &str = "ffi";
pub trait FfiPermissions {
- fn check_partial_no_path(&mut self) -> Result<(), AnyError>;
+ fn check_partial_no_path(&mut self) -> Result<(), PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_partial_with_path(
&mut self,
path: &str,
- ) -> Result<PathBuf, AnyError>;
+ ) -> Result<PathBuf, PermissionCheckError>;
}
impl FfiPermissions for deno_permissions::PermissionsContainer {
#[inline(always)]
- fn check_partial_no_path(&mut self) -> Result<(), AnyError> {
+ fn check_partial_no_path(&mut self) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_ffi_partial_no_path(self)
}
@@ -59,7 +65,7 @@ impl FfiPermissions for deno_permissions::PermissionsContainer {
fn check_partial_with_path(
&mut self,
path: &str,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_ffi_partial_with_path(
self, path,
)
diff --git a/ext/ffi/repr.rs b/ext/ffi/repr.rs
index 315e6d53b..fd8a2c8e7 100644
--- a/ext/ffi/repr.rs
+++ b/ext/ffi/repr.rs
@@ -1,9 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use crate::FfiPermissions;
-use deno_core::error::range_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::v8;
use deno_core::OpState;
@@ -12,11 +9,51 @@ use std::ffi::c_void;
use std::ffi::CStr;
use std::ptr;
+#[derive(Debug, thiserror::Error)]
+pub enum ReprError {
+ #[error("Invalid pointer to offset, pointer is null")]
+ InvalidOffset,
+ #[error("Invalid ArrayBuffer pointer, pointer is null")]
+ InvalidArrayBuffer,
+ #[error("Destination length is smaller than source length")]
+ DestinationLengthTooShort,
+ #[error("Invalid CString pointer, pointer is null")]
+ InvalidCString,
+ #[error("Invalid CString pointer, string exceeds max length")]
+ CStringTooLong,
+ #[error("Invalid bool pointer, pointer is null")]
+ InvalidBool,
+ #[error("Invalid u8 pointer, pointer is null")]
+ InvalidU8,
+ #[error("Invalid i8 pointer, pointer is null")]
+ InvalidI8,
+ #[error("Invalid u16 pointer, pointer is null")]
+ InvalidU16,
+ #[error("Invalid i16 pointer, pointer is null")]
+ InvalidI16,
+ #[error("Invalid u32 pointer, pointer is null")]
+ InvalidU32,
+ #[error("Invalid i32 pointer, pointer is null")]
+ InvalidI32,
+ #[error("Invalid u64 pointer, pointer is null")]
+ InvalidU64,
+ #[error("Invalid i64 pointer, pointer is null")]
+ InvalidI64,
+ #[error("Invalid f32 pointer, pointer is null")]
+ InvalidF32,
+ #[error("Invalid f64 pointer, pointer is null")]
+ InvalidF64,
+ #[error("Invalid pointer pointer, pointer is null")]
+ InvalidPointer,
+ #[error(transparent)]
+ Permission(#[from] deno_permissions::PermissionCheckError),
+}
+
#[op2(fast)]
pub fn op_ffi_ptr_create<FP>(
state: &mut OpState,
#[bigint] ptr_number: usize,
-) -> Result<*mut c_void, AnyError>
+) -> Result<*mut c_void, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -31,7 +68,7 @@ pub fn op_ffi_ptr_equals<FP>(
state: &mut OpState,
a: *const c_void,
b: *const c_void,
-) -> Result<bool, AnyError>
+) -> Result<bool, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -45,7 +82,7 @@ where
pub fn op_ffi_ptr_of<FP>(
state: &mut OpState,
#[anybuffer] buf: *const u8,
-) -> Result<*mut c_void, AnyError>
+) -> Result<*mut c_void, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -59,7 +96,7 @@ where
pub fn op_ffi_ptr_of_exact<FP>(
state: &mut OpState,
buf: v8::Local<v8::ArrayBufferView>,
-) -> Result<*mut c_void, AnyError>
+) -> Result<*mut c_void, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -80,7 +117,7 @@ pub fn op_ffi_ptr_offset<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<*mut c_void, AnyError>
+) -> Result<*mut c_void, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -88,7 +125,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid pointer to offset, pointer is null"));
+ return Err(ReprError::InvalidOffset);
}
// TODO(mmastrac): Create a RawPointer that can safely do pointer math.
@@ -110,7 +147,7 @@ unsafe extern "C" fn noop_deleter_callback(
pub fn op_ffi_ptr_value<FP>(
state: &mut OpState,
ptr: *mut c_void,
-) -> Result<usize, AnyError>
+) -> Result<usize, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -127,7 +164,7 @@ pub fn op_ffi_get_buf<FP, 'scope>(
ptr: *mut c_void,
#[number] offset: isize,
#[number] len: usize,
-) -> Result<v8::Local<'scope, v8::ArrayBuffer>, AnyError>
+) -> Result<v8::Local<'scope, v8::ArrayBuffer>, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -135,7 +172,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid ArrayBuffer pointer, pointer is null"));
+ return Err(ReprError::InvalidArrayBuffer);
}
// SAFETY: Trust the user to have provided a real pointer, offset, and a valid matching size to it. Since this is a foreign pointer, we should not do any deletion.
@@ -144,7 +181,7 @@ where
ptr.offset(offset),
len,
noop_deleter_callback,
- std::ptr::null_mut(),
+ ptr::null_mut(),
)
}
.make_shared();
@@ -159,7 +196,7 @@ pub fn op_ffi_buf_copy_into<FP>(
#[number] offset: isize,
#[anybuffer] dst: &mut [u8],
#[number] len: usize,
-) -> Result<(), AnyError>
+) -> Result<(), ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -167,11 +204,9 @@ where
permissions.check_partial_no_path()?;
if src.is_null() {
- Err(type_error("Invalid ArrayBuffer pointer, pointer is null"))
+ Err(ReprError::InvalidArrayBuffer)
} else if dst.len() < len {
- Err(range_error(
- "Destination length is smaller than source length",
- ))
+ Err(ReprError::DestinationLengthTooShort)
} else {
let src = src as *const c_void;
@@ -190,7 +225,7 @@ pub fn op_ffi_cstr_read<FP, 'scope>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<v8::Local<'scope, v8::String>, AnyError>
+) -> Result<v8::Local<'scope, v8::String>, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -198,16 +233,14 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid CString pointer, pointer is null"));
+ return Err(ReprError::InvalidCString);
}
let cstr =
// SAFETY: Pointer and offset are user provided.
unsafe { CStr::from_ptr(ptr.offset(offset) as *const c_char) }.to_bytes();
let value = v8::String::new_from_utf8(scope, cstr, v8::NewStringType::Normal)
- .ok_or_else(|| {
- type_error("Invalid CString pointer, string exceeds max length")
- })?;
+ .ok_or_else(|| ReprError::CStringTooLong)?;
Ok(value)
}
@@ -216,7 +249,7 @@ pub fn op_ffi_read_bool<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<bool, AnyError>
+) -> Result<bool, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -224,7 +257,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid bool pointer, pointer is null"));
+ return Err(ReprError::InvalidBool);
}
// SAFETY: ptr and offset are user provided.
@@ -236,7 +269,7 @@ pub fn op_ffi_read_u8<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<u32, AnyError>
+) -> Result<u32, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -244,7 +277,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid u8 pointer, pointer is null"));
+ return Err(ReprError::InvalidU8);
}
// SAFETY: ptr and offset are user provided.
@@ -258,7 +291,7 @@ pub fn op_ffi_read_i8<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<i32, AnyError>
+) -> Result<i32, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -266,7 +299,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid i8 pointer, pointer is null"));
+ return Err(ReprError::InvalidI8);
}
// SAFETY: ptr and offset are user provided.
@@ -280,7 +313,7 @@ pub fn op_ffi_read_u16<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<u32, AnyError>
+) -> Result<u32, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -288,7 +321,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid u16 pointer, pointer is null"));
+ return Err(ReprError::InvalidU16);
}
// SAFETY: ptr and offset are user provided.
@@ -302,7 +335,7 @@ pub fn op_ffi_read_i16<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<i32, AnyError>
+) -> Result<i32, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -310,7 +343,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid i16 pointer, pointer is null"));
+ return Err(ReprError::InvalidI16);
}
// SAFETY: ptr and offset are user provided.
@@ -324,7 +357,7 @@ pub fn op_ffi_read_u32<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<u32, AnyError>
+) -> Result<u32, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -332,7 +365,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid u32 pointer, pointer is null"));
+ return Err(ReprError::InvalidU32);
}
// SAFETY: ptr and offset are user provided.
@@ -344,7 +377,7 @@ pub fn op_ffi_read_i32<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<i32, AnyError>
+) -> Result<i32, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -352,7 +385,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid i32 pointer, pointer is null"));
+ return Err(ReprError::InvalidI32);
}
// SAFETY: ptr and offset are user provided.
@@ -367,7 +400,7 @@ pub fn op_ffi_read_u64<FP>(
// Note: The representation of 64-bit integers is function-wide. We cannot
// choose to take this parameter as a number while returning a bigint.
#[bigint] offset: isize,
-) -> Result<u64, AnyError>
+) -> Result<u64, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -375,7 +408,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid u64 pointer, pointer is null"));
+ return Err(ReprError::InvalidU64);
}
let value =
@@ -393,7 +426,7 @@ pub fn op_ffi_read_i64<FP>(
// Note: The representation of 64-bit integers is function-wide. We cannot
// choose to take this parameter as a number while returning a bigint.
#[bigint] offset: isize,
-) -> Result<i64, AnyError>
+) -> Result<i64, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -401,7 +434,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid i64 pointer, pointer is null"));
+ return Err(ReprError::InvalidI64);
}
let value =
@@ -416,7 +449,7 @@ pub fn op_ffi_read_f32<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<f32, AnyError>
+) -> Result<f32, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -424,7 +457,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid f32 pointer, pointer is null"));
+ return Err(ReprError::InvalidF32);
}
// SAFETY: ptr and offset are user provided.
@@ -436,7 +469,7 @@ pub fn op_ffi_read_f64<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<f64, AnyError>
+) -> Result<f64, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -444,7 +477,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid f64 pointer, pointer is null"));
+ return Err(ReprError::InvalidF64);
}
// SAFETY: ptr and offset are user provided.
@@ -456,7 +489,7 @@ pub fn op_ffi_read_ptr<FP>(
state: &mut OpState,
ptr: *mut c_void,
#[number] offset: isize,
-) -> Result<*mut c_void, AnyError>
+) -> Result<*mut c_void, ReprError>
where
FP: FfiPermissions + 'static,
{
@@ -464,7 +497,7 @@ where
permissions.check_partial_no_path()?;
if ptr.is_null() {
- return Err(type_error("Invalid pointer pointer, pointer is null"));
+ return Err(ReprError::InvalidPointer);
}
// SAFETY: ptr and offset are user provided.
diff --git a/ext/ffi/static.rs b/ext/ffi/static.rs
index f08605754..61b405933 100644
--- a/ext/ffi/static.rs
+++ b/ext/ffi/static.rs
@@ -2,14 +2,24 @@
use crate::dlfcn::DynamicLibraryResource;
use crate::symbol::NativeType;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::v8;
use deno_core::OpState;
use deno_core::ResourceId;
use std::ptr;
+#[derive(Debug, thiserror::Error)]
+pub enum StaticError {
+ #[error(transparent)]
+ Dlfcn(super::DlfcnError),
+ #[error("Invalid FFI static type 'void'")]
+ InvalidTypeVoid,
+ #[error("Invalid FFI static type 'struct'")]
+ InvalidTypeStruct,
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+}
+
#[op2]
pub fn op_ffi_get_static<'scope>(
scope: &mut v8::HandleScope<'scope>,
@@ -18,24 +28,27 @@ pub fn op_ffi_get_static<'scope>(
#[string] name: String,
#[serde] static_type: NativeType,
optional: bool,
-) -> Result<v8::Local<'scope, v8::Value>, AnyError> {
- let resource = state.resource_table.get::<DynamicLibraryResource>(rid)?;
+) -> Result<v8::Local<'scope, v8::Value>, StaticError> {
+ let resource = state
+ .resource_table
+ .get::<DynamicLibraryResource>(rid)
+ .map_err(StaticError::Resource)?;
let data_ptr = match resource.get_static(name) {
- Ok(data_ptr) => Ok(data_ptr),
+ Ok(data_ptr) => data_ptr,
Err(err) => {
if optional {
let null: v8::Local<v8::Value> = v8::null(scope).into();
return Ok(null);
} else {
- Err(err)
+ return Err(StaticError::Dlfcn(err));
}
}
- }?;
+ };
Ok(match static_type {
NativeType::Void => {
- return Err(type_error("Invalid FFI static type 'void'"));
+ return Err(StaticError::InvalidTypeVoid);
}
NativeType::Bool => {
// SAFETY: ptr is user provided
@@ -132,7 +145,7 @@ pub fn op_ffi_get_static<'scope>(
external
}
NativeType::Struct(_) => {
- return Err(type_error("Invalid FFI static type 'struct'"));
+ return Err(StaticError::InvalidTypeStruct);
}
})
}
diff --git a/ext/fs/30_fs.js b/ext/fs/30_fs.js
index c8e19ac75..40513e7e0 100644
--- a/ext/fs/30_fs.js
+++ b/ext/fs/30_fs.js
@@ -346,9 +346,10 @@ const { 0: statStruct, 1: statBuf } = createByteStruct({
mtime: "date",
atime: "date",
birthtime: "date",
+ ctime: "date",
dev: "u64",
ino: "?u64",
- mode: "?u64",
+ mode: "u64",
nlink: "?u64",
uid: "?u64",
gid: "?u64",
@@ -377,9 +378,10 @@ function parseFileInfo(response) {
birthtime: response.birthtimeSet === true
? new Date(response.birthtime)
: null,
+ ctime: response.ctimeSet === true ? new Date(response.ctime) : null,
dev: response.dev,
+ mode: response.mode,
ino: unix ? response.ino : null,
- mode: unix ? response.mode : null,
nlink: unix ? response.nlink : null,
uid: unix ? response.uid : null,
gid: unix ? response.gid : null,
diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml
index 606c00ad8..ace1b89f3 100644
--- a/ext/fs/Cargo.toml
+++ b/ext/fs/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_fs"
-version = "0.81.0"
+version = "0.87.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -19,6 +19,7 @@ sync_fs = []
[dependencies]
async-trait.workspace = true
base32.workspace = true
+boxed_error.workspace = true
deno_core.workspace = true
deno_io.workspace = true
deno_path_util.workspace = true
@@ -28,9 +29,10 @@ libc.workspace = true
rand.workspace = true
rayon = "1.8.0"
serde.workspace = true
+thiserror.workspace = true
[target.'cfg(unix)'.dependencies]
-nix.workspace = true
+nix = { workspace = true, features = ["fs", "user"] }
[target.'cfg(windows)'.dependencies]
winapi = { workspace = true, features = ["winbase"] }
diff --git a/ext/fs/in_memory_fs.rs b/ext/fs/in_memory_fs.rs
index e29b9d50c..34b77836d 100644
--- a/ext/fs/in_memory_fs.rs
+++ b/ext/fs/in_memory_fs.rs
@@ -229,6 +229,7 @@ impl FileSystem for InMemoryFs {
mtime: None,
atime: None,
birthtime: None,
+ ctime: None,
dev: 0,
ino: 0,
mode: 0,
@@ -251,6 +252,7 @@ impl FileSystem for InMemoryFs {
mtime: None,
atime: None,
birthtime: None,
+ ctime: None,
dev: 0,
ino: 0,
mode: 0,
diff --git a/ext/fs/lib.rs b/ext/fs/lib.rs
index bd49078b2..aed9a7085 100644
--- a/ext/fs/lib.rs
+++ b/ext/fs/lib.rs
@@ -14,14 +14,17 @@ pub use crate::interface::FileSystemRc;
pub use crate::interface::FsDirEntry;
pub use crate::interface::FsFileType;
pub use crate::interface::OpenOptions;
+pub use crate::ops::FsOpsError;
+pub use crate::ops::FsOpsErrorKind;
+pub use crate::ops::OperationError;
pub use crate::std_fs::RealFs;
pub use crate::sync::MaybeSend;
pub use crate::sync::MaybeSync;
use crate::ops::*;
-use deno_core::error::AnyError;
use deno_io::fs::FsError;
+use deno_permissions::PermissionCheckError;
use std::borrow::Cow;
use std::path::Path;
use std::path::PathBuf;
@@ -40,45 +43,51 @@ pub trait FsPermissions {
&mut self,
path: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError>;
+ ) -> Result<PathBuf, PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_read_path<'a>(
&mut self,
path: &'a Path,
api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError>;
- fn check_read_all(&mut self, api_name: &str) -> Result<(), AnyError>;
+ ) -> Result<Cow<'a, Path>, PermissionCheckError>;
+ fn check_read_all(
+ &mut self,
+ api_name: &str,
+ ) -> Result<(), PermissionCheckError>;
fn check_read_blind(
&mut self,
p: &Path,
display: &str,
api_name: &str,
- ) -> Result<(), AnyError>;
+ ) -> Result<(), PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_write(
&mut self,
path: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError>;
+ ) -> Result<PathBuf, PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_write_path<'a>(
&mut self,
path: &'a Path,
api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError>;
+ ) -> Result<Cow<'a, Path>, PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_write_partial(
&mut self,
path: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError>;
- fn check_write_all(&mut self, api_name: &str) -> Result<(), AnyError>;
+ ) -> Result<PathBuf, PermissionCheckError>;
+ fn check_write_all(
+ &mut self,
+ api_name: &str,
+ ) -> Result<(), PermissionCheckError>;
fn check_write_blind(
&mut self,
p: &Path,
display: &str,
api_name: &str,
- ) -> Result<(), AnyError>;
+ ) -> Result<(), PermissionCheckError>;
fn check<'a>(
&mut self,
@@ -138,7 +147,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer {
&mut self,
path: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_read(self, path, api_name)
}
@@ -146,7 +155,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer {
&mut self,
path: &'a Path,
api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError> {
+ ) -> Result<Cow<'a, Path>, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_read_path(
self,
path,
@@ -158,7 +167,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer {
path: &Path,
display: &str,
api_name: &str,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_read_blind(
self, path, display, api_name,
)
@@ -168,7 +177,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer {
&mut self,
path: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_write(self, path, api_name)
}
@@ -176,7 +185,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer {
&mut self,
path: &'a Path,
api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError> {
+ ) -> Result<Cow<'a, Path>, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_write_path(
self, path, api_name,
)
@@ -186,7 +195,7 @@ impl FsPermissions for deno_permissions::PermissionsContainer {
&mut self,
path: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_write_partial(
self, path, api_name,
)
@@ -197,17 +206,23 @@ impl FsPermissions for deno_permissions::PermissionsContainer {
p: &Path,
display: &str,
api_name: &str,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_write_blind(
self, p, display, api_name,
)
}
- fn check_read_all(&mut self, api_name: &str) -> Result<(), AnyError> {
+ fn check_read_all(
+ &mut self,
+ api_name: &str,
+ ) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_read_all(self, api_name)
}
- fn check_write_all(&mut self, api_name: &str) -> Result<(), AnyError> {
+ fn check_write_all(
+ &mut self,
+ api_name: &str,
+ ) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_write_all(self, api_name)
}
}
diff --git a/ext/fs/ops.rs b/ext/fs/ops.rs
index b13d3a7d1..e3a511f8e 100644
--- a/ext/fs/ops.rs
+++ b/ext/fs/ops.rs
@@ -1,6 +1,8 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::cell::RefCell;
+use std::error::Error;
+use std::fmt::Formatter;
use std::io;
use std::io::SeekFrom;
use std::path::Path;
@@ -8,10 +10,13 @@ use std::path::PathBuf;
use std::path::StripPrefixError;
use std::rc::Rc;
-use deno_core::anyhow::bail;
-use deno_core::error::custom_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
+use crate::interface::AccessCheckFn;
+use crate::interface::FileSystemRc;
+use crate::interface::FsDirEntry;
+use crate::interface::FsFileType;
+use crate::FsPermissions;
+use crate::OpenOptions;
+use boxed_error::Boxed;
use deno_core::op2;
use deno_core::CancelFuture;
use deno_core::CancelHandle;
@@ -22,17 +27,76 @@ use deno_core::ToJsBuffer;
use deno_io::fs::FileResource;
use deno_io::fs::FsError;
use deno_io::fs::FsStat;
+use deno_permissions::PermissionCheckError;
use rand::rngs::ThreadRng;
use rand::thread_rng;
use rand::Rng;
use serde::Serialize;
-use crate::interface::AccessCheckFn;
-use crate::interface::FileSystemRc;
-use crate::interface::FsDirEntry;
-use crate::interface::FsFileType;
-use crate::FsPermissions;
-use crate::OpenOptions;
+#[derive(Debug, Boxed)]
+pub struct FsOpsError(pub Box<FsOpsErrorKind>);
+
+#[derive(Debug, thiserror::Error)]
+pub enum FsOpsErrorKind {
+ #[error("{0}")]
+ Io(#[source] std::io::Error),
+ #[error("{0}")]
+ OperationError(#[source] OperationError),
+ #[error(transparent)]
+ Permission(#[from] PermissionCheckError),
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error("File name or path {0:?} is not valid UTF-8")]
+ InvalidUtf8(std::ffi::OsString),
+ #[error("{0}")]
+ StripPrefix(#[from] StripPrefixError),
+ #[error("{0}")]
+ Canceled(#[from] deno_core::Canceled),
+ #[error("Invalid seek mode: {0}")]
+ InvalidSeekMode(i32),
+ #[error("Invalid control character in prefix or suffix: {0:?}")]
+ InvalidControlCharacter(String),
+ #[error("Invalid character in prefix or suffix: {0:?}")]
+ InvalidCharacter(String),
+ #[cfg(windows)]
+ #[error("Invalid trailing character in suffix")]
+ InvalidTrailingCharacter,
+ #[error("Requires {err} access to {path}, {}", print_not_capable_info(*.standalone, .err))]
+ NotCapableAccess {
+ // NotCapable
+ standalone: bool,
+ err: &'static str,
+ path: String,
+ },
+ #[error("permission denied: {0}")]
+ NotCapable(&'static str), // NotCapable
+ #[error(transparent)]
+ Other(deno_core::error::AnyError),
+}
+
+impl From<FsError> for FsOpsError {
+ fn from(err: FsError) -> Self {
+ match err {
+ FsError::Io(err) => FsOpsErrorKind::Io(err),
+ FsError::FileBusy => {
+ FsOpsErrorKind::Other(deno_core::error::resource_unavailable())
+ }
+ FsError::NotSupported => {
+ FsOpsErrorKind::Other(deno_core::error::not_supported())
+ }
+ FsError::NotCapable(err) => FsOpsErrorKind::NotCapable(err),
+ }
+ .into_box()
+ }
+}
+
+fn print_not_capable_info(standalone: bool, err: &'static str) -> String {
+ if standalone {
+ format!("specify the required permissions during compilation using `deno compile --allow-{err}`")
+ } else {
+ format!("run again with the --allow-{err} flag")
+ }
+}
fn sync_permission_check<'a, P: FsPermissions + 'static>(
permissions: &'a mut P,
@@ -58,7 +122,7 @@ fn map_permission_error(
operation: &'static str,
error: FsError,
path: &Path,
-) -> AnyError {
+) -> FsOpsError {
match error {
FsError::NotCapable(err) => {
let path = format!("{path:?}");
@@ -67,14 +131,13 @@ fn map_permission_error(
} else {
(path.as_str(), "")
};
- let msg = if deno_permissions::is_standalone() {
- format!(
- "Requires {err} access to {path}{truncated}, specify the required permissions during compilation using `deno compile --allow-{err}`")
- } else {
- format!(
- "Requires {err} access to {path}{truncated}, run again with the --allow-{err} flag")
- };
- custom_error("NotCapable", msg)
+
+ FsOpsErrorKind::NotCapableAccess {
+ standalone: deno_permissions::is_standalone(),
+ err,
+ path: format!("{path}{truncated}"),
+ }
+ .into_box()
}
err => Err::<(), _>(err)
.context_path(operation, path)
@@ -85,7 +148,7 @@ fn map_permission_error(
#[op2]
#[string]
-pub fn op_fs_cwd<P>(state: &mut OpState) -> Result<String, AnyError>
+pub fn op_fs_cwd<P>(state: &mut OpState) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -102,7 +165,7 @@ where
pub fn op_fs_chdir<P>(
state: &mut OpState,
#[string] directory: &str,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -119,7 +182,7 @@ where
pub fn op_fs_umask(
state: &mut OpState,
mask: Option<u32>,
-) -> Result<u32, AnyError>
+) -> Result<u32, FsOpsError>
where
{
state.borrow::<FileSystemRc>().umask(mask).context("umask")
@@ -131,7 +194,7 @@ pub fn op_fs_open_sync<P>(
state: &mut OpState,
#[string] path: String,
#[serde] options: Option<OpenOptions>,
-) -> Result<ResourceId, AnyError>
+) -> Result<ResourceId, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -158,7 +221,7 @@ pub async fn op_fs_open_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
#[serde] options: Option<OpenOptions>,
-) -> Result<ResourceId, AnyError>
+) -> Result<ResourceId, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -186,7 +249,7 @@ pub fn op_fs_mkdir_sync<P>(
#[string] path: String,
recursive: bool,
mode: Option<u32>,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -209,7 +272,7 @@ pub async fn op_fs_mkdir_async<P>(
#[string] path: String,
recursive: bool,
mode: Option<u32>,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -233,7 +296,7 @@ pub fn op_fs_chmod_sync<P>(
state: &mut OpState,
#[string] path: String,
mode: u32,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -250,7 +313,7 @@ pub async fn op_fs_chmod_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
mode: u32,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -271,7 +334,7 @@ pub fn op_fs_chown_sync<P>(
#[string] path: String,
uid: Option<u32>,
gid: Option<u32>,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -290,7 +353,7 @@ pub async fn op_fs_chown_async<P>(
#[string] path: String,
uid: Option<u32>,
gid: Option<u32>,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -310,7 +373,7 @@ pub fn op_fs_remove_sync<P>(
state: &mut OpState,
#[string] path: &str,
recursive: bool,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -330,7 +393,7 @@ pub async fn op_fs_remove_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
recursive: bool,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -361,7 +424,7 @@ pub fn op_fs_copy_file_sync<P>(
state: &mut OpState,
#[string] from: &str,
#[string] to: &str,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -381,7 +444,7 @@ pub async fn op_fs_copy_file_async<P>(
state: Rc<RefCell<OpState>>,
#[string] from: String,
#[string] to: String,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -405,7 +468,7 @@ pub fn op_fs_stat_sync<P>(
state: &mut OpState,
#[string] path: String,
#[buffer] stat_out_buf: &mut [u32],
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -424,7 +487,7 @@ where
pub async fn op_fs_stat_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
-) -> Result<SerializableStat, AnyError>
+) -> Result<SerializableStat, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -446,7 +509,7 @@ pub fn op_fs_lstat_sync<P>(
state: &mut OpState,
#[string] path: String,
#[buffer] stat_out_buf: &mut [u32],
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -465,7 +528,7 @@ where
pub async fn op_fs_lstat_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
-) -> Result<SerializableStat, AnyError>
+) -> Result<SerializableStat, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -487,7 +550,7 @@ where
pub fn op_fs_realpath_sync<P>(
state: &mut OpState,
#[string] path: String,
-) -> Result<String, AnyError>
+) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -510,7 +573,7 @@ where
pub async fn op_fs_realpath_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
-) -> Result<String, AnyError>
+) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -538,7 +601,7 @@ where
pub fn op_fs_read_dir_sync<P>(
state: &mut OpState,
#[string] path: String,
-) -> Result<Vec<FsDirEntry>, AnyError>
+) -> Result<Vec<FsDirEntry>, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -557,7 +620,7 @@ where
pub async fn op_fs_read_dir_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
-) -> Result<Vec<FsDirEntry>, AnyError>
+) -> Result<Vec<FsDirEntry>, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -582,7 +645,7 @@ pub fn op_fs_rename_sync<P>(
state: &mut OpState,
#[string] oldpath: String,
#[string] newpath: String,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -603,7 +666,7 @@ pub async fn op_fs_rename_async<P>(
state: Rc<RefCell<OpState>>,
#[string] oldpath: String,
#[string] newpath: String,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -628,7 +691,7 @@ pub fn op_fs_link_sync<P>(
state: &mut OpState,
#[string] oldpath: &str,
#[string] newpath: &str,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -650,7 +713,7 @@ pub async fn op_fs_link_async<P>(
state: Rc<RefCell<OpState>>,
#[string] oldpath: String,
#[string] newpath: String,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -677,7 +740,7 @@ pub fn op_fs_symlink_sync<P>(
#[string] oldpath: &str,
#[string] newpath: &str,
#[serde] file_type: Option<FsFileType>,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -701,7 +764,7 @@ pub async fn op_fs_symlink_async<P>(
#[string] oldpath: String,
#[string] newpath: String,
#[serde] file_type: Option<FsFileType>,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -728,7 +791,7 @@ where
pub fn op_fs_read_link_sync<P>(
state: &mut OpState,
#[string] path: String,
-) -> Result<String, AnyError>
+) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -748,7 +811,7 @@ where
pub async fn op_fs_read_link_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
-) -> Result<String, AnyError>
+) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -773,7 +836,7 @@ pub fn op_fs_truncate_sync<P>(
state: &mut OpState,
#[string] path: &str,
#[number] len: u64,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -793,7 +856,7 @@ pub async fn op_fs_truncate_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
#[number] len: u64,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -820,7 +883,7 @@ pub fn op_fs_utime_sync<P>(
#[smi] atime_nanos: u32,
#[number] mtime_secs: i64,
#[smi] mtime_nanos: u32,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -841,7 +904,7 @@ pub async fn op_fs_utime_async<P>(
#[smi] atime_nanos: u32,
#[number] mtime_secs: i64,
#[smi] mtime_nanos: u32,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -871,7 +934,7 @@ pub fn op_fs_make_temp_dir_sync<P>(
#[string] dir_arg: Option<String>,
#[string] prefix: Option<String>,
#[string] suffix: Option<String>,
-) -> Result<String, AnyError>
+) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -913,7 +976,7 @@ pub async fn op_fs_make_temp_dir_async<P>(
#[string] dir_arg: Option<String>,
#[string] prefix: Option<String>,
#[string] suffix: Option<String>,
-) -> Result<String, AnyError>
+) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -959,7 +1022,7 @@ pub fn op_fs_make_temp_file_sync<P>(
#[string] dir_arg: Option<String>,
#[string] prefix: Option<String>,
#[string] suffix: Option<String>,
-) -> Result<String, AnyError>
+) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -1007,7 +1070,7 @@ pub async fn op_fs_make_temp_file_async<P>(
#[string] dir_arg: Option<String>,
#[string] prefix: Option<String>,
#[string] suffix: Option<String>,
-) -> Result<String, AnyError>
+) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -1069,7 +1132,7 @@ fn make_temp_check_sync<P>(
state: &mut OpState,
dir: Option<&str>,
api_name: &str,
-) -> Result<(PathBuf, FileSystemRc), AnyError>
+) -> Result<(PathBuf, FileSystemRc), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -1091,7 +1154,7 @@ fn make_temp_check_async<P>(
state: Rc<RefCell<OpState>>,
dir: Option<&str>,
api_name: &str,
-) -> Result<(PathBuf, FileSystemRc), AnyError>
+) -> Result<(PathBuf, FileSystemRc), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -1116,10 +1179,12 @@ where
fn validate_temporary_filename_component(
component: &str,
#[allow(unused_variables)] suffix: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), FsOpsError> {
// Ban ASCII and Unicode control characters: these will often fail
if let Some(c) = component.matches(|c: char| c.is_control()).next() {
- bail!("Invalid control character in prefix or suffix: {:?}", c);
+ return Err(
+ FsOpsErrorKind::InvalidControlCharacter(c.to_string()).into_box(),
+ );
}
// Windows has the most restrictive filenames. As temp files aren't normal files, we just
// use this set of banned characters for all platforms because wildcard-like files can also
@@ -1135,13 +1200,13 @@ fn validate_temporary_filename_component(
.matches(|c: char| "<>:\"/\\|?*".contains(c))
.next()
{
- bail!("Invalid character in prefix or suffix: {:?}", c);
+ return Err(FsOpsErrorKind::InvalidCharacter(c.to_string()).into_box());
}
// This check is only for Windows
#[cfg(windows)]
if suffix && component.ends_with(|c: char| ". ".contains(c)) {
- bail!("Invalid trailing character in suffix");
+ return Err(FsOpsErrorKind::InvalidTrailingCharacter.into_box());
}
Ok(())
@@ -1152,7 +1217,7 @@ fn tmp_name(
dir: &Path,
prefix: Option<&str>,
suffix: Option<&str>,
-) -> Result<PathBuf, AnyError> {
+) -> Result<PathBuf, FsOpsError> {
let prefix = prefix.unwrap_or("");
validate_temporary_filename_component(prefix, false)?;
let suffix = suffix.unwrap_or("");
@@ -1179,7 +1244,7 @@ pub fn op_fs_write_file_sync<P>(
create: bool,
create_new: bool,
#[buffer] data: JsBuffer,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -1207,7 +1272,7 @@ pub async fn op_fs_write_file_async<P>(
create_new: bool,
#[buffer] data: JsBuffer,
#[smi] cancel_rid: Option<ResourceId>,
-) -> Result<(), AnyError>
+) -> Result<(), FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -1255,7 +1320,7 @@ where
pub fn op_fs_read_file_sync<P>(
state: &mut OpState,
#[string] path: String,
-) -> Result<ToJsBuffer, AnyError>
+) -> Result<ToJsBuffer, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -1277,7 +1342,7 @@ pub async fn op_fs_read_file_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
#[smi] cancel_rid: Option<ResourceId>,
-) -> Result<ToJsBuffer, AnyError>
+) -> Result<ToJsBuffer, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -1318,7 +1383,7 @@ where
pub fn op_fs_read_file_text_sync<P>(
state: &mut OpState,
#[string] path: String,
-) -> Result<String, AnyError>
+) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -1340,7 +1405,7 @@ pub async fn op_fs_read_file_text_async<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
#[smi] cancel_rid: Option<ResourceId>,
-) -> Result<String, AnyError>
+) -> Result<String, FsOpsError>
where
P: FsPermissions + 'static,
{
@@ -1377,13 +1442,13 @@ where
Ok(str)
}
-fn to_seek_from(offset: i64, whence: i32) -> Result<SeekFrom, AnyError> {
+fn to_seek_from(offset: i64, whence: i32) -> Result<SeekFrom, FsOpsError> {
let seek_from = match whence {
0 => SeekFrom::Start(offset as u64),
1 => SeekFrom::Current(offset),
2 => SeekFrom::End(offset),
_ => {
- return Err(type_error(format!("Invalid seek mode: {whence}")));
+ return Err(FsOpsErrorKind::InvalidSeekMode(whence).into_box());
}
};
Ok(seek_from)
@@ -1396,9 +1461,10 @@ pub fn op_fs_seek_sync(
#[smi] rid: ResourceId,
#[number] offset: i64,
#[smi] whence: i32,
-) -> Result<u64, AnyError> {
+) -> Result<u64, FsOpsError> {
let pos = to_seek_from(offset, whence)?;
- let file = FileResource::get_file(state, rid)?;
+ let file =
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
let cursor = file.seek_sync(pos)?;
Ok(cursor)
}
@@ -1410,9 +1476,10 @@ pub async fn op_fs_seek_async(
#[smi] rid: ResourceId,
#[number] offset: i64,
#[smi] whence: i32,
-) -> Result<u64, AnyError> {
+) -> Result<u64, FsOpsError> {
let pos = to_seek_from(offset, whence)?;
- let file = FileResource::get_file(&state.borrow(), rid)?;
+ let file = FileResource::get_file(&state.borrow(), rid)
+ .map_err(FsOpsErrorKind::Resource)?;
let cursor = file.seek_async(pos).await?;
Ok(cursor)
}
@@ -1421,8 +1488,9 @@ pub async fn op_fs_seek_async(
pub fn op_fs_file_sync_data_sync(
state: &mut OpState,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(state, rid)?;
+) -> Result<(), FsOpsError> {
+ let file =
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.datasync_sync()?;
Ok(())
}
@@ -1431,8 +1499,9 @@ pub fn op_fs_file_sync_data_sync(
pub async fn op_fs_file_sync_data_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(&state.borrow(), rid)?;
+) -> Result<(), FsOpsError> {
+ let file = FileResource::get_file(&state.borrow(), rid)
+ .map_err(FsOpsErrorKind::Resource)?;
file.datasync_async().await?;
Ok(())
}
@@ -1441,8 +1510,9 @@ pub async fn op_fs_file_sync_data_async(
pub fn op_fs_file_sync_sync(
state: &mut OpState,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(state, rid)?;
+) -> Result<(), FsOpsError> {
+ let file =
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.sync_sync()?;
Ok(())
}
@@ -1451,8 +1521,9 @@ pub fn op_fs_file_sync_sync(
pub async fn op_fs_file_sync_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(&state.borrow(), rid)?;
+) -> Result<(), FsOpsError> {
+ let file = FileResource::get_file(&state.borrow(), rid)
+ .map_err(FsOpsErrorKind::Resource)?;
file.sync_async().await?;
Ok(())
}
@@ -1462,8 +1533,9 @@ pub fn op_fs_file_stat_sync(
state: &mut OpState,
#[smi] rid: ResourceId,
#[buffer] stat_out_buf: &mut [u32],
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(state, rid)?;
+) -> Result<(), FsOpsError> {
+ let file =
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
let stat = file.stat_sync()?;
let serializable_stat = SerializableStat::from(stat);
serializable_stat.write(stat_out_buf);
@@ -1475,8 +1547,9 @@ pub fn op_fs_file_stat_sync(
pub async fn op_fs_file_stat_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<SerializableStat, AnyError> {
- let file = FileResource::get_file(&state.borrow(), rid)?;
+) -> Result<SerializableStat, FsOpsError> {
+ let file = FileResource::get_file(&state.borrow(), rid)
+ .map_err(FsOpsErrorKind::Resource)?;
let stat = file.stat_async().await?;
Ok(stat.into())
}
@@ -1486,8 +1559,9 @@ pub fn op_fs_flock_sync(
state: &mut OpState,
#[smi] rid: ResourceId,
exclusive: bool,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(state, rid)?;
+) -> Result<(), FsOpsError> {
+ let file =
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.lock_sync(exclusive)?;
Ok(())
}
@@ -1497,8 +1571,9 @@ pub async fn op_fs_flock_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
exclusive: bool,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(&state.borrow(), rid)?;
+) -> Result<(), FsOpsError> {
+ let file = FileResource::get_file(&state.borrow(), rid)
+ .map_err(FsOpsErrorKind::Resource)?;
file.lock_async(exclusive).await?;
Ok(())
}
@@ -1507,8 +1582,9 @@ pub async fn op_fs_flock_async(
pub fn op_fs_funlock_sync(
state: &mut OpState,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(state, rid)?;
+) -> Result<(), FsOpsError> {
+ let file =
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.unlock_sync()?;
Ok(())
}
@@ -1517,8 +1593,9 @@ pub fn op_fs_funlock_sync(
pub async fn op_fs_funlock_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(&state.borrow(), rid)?;
+) -> Result<(), FsOpsError> {
+ let file = FileResource::get_file(&state.borrow(), rid)
+ .map_err(FsOpsErrorKind::Resource)?;
file.unlock_async().await?;
Ok(())
}
@@ -1528,8 +1605,9 @@ pub fn op_fs_ftruncate_sync(
state: &mut OpState,
#[smi] rid: ResourceId,
#[number] len: u64,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(state, rid)?;
+) -> Result<(), FsOpsError> {
+ let file =
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.truncate_sync(len)?;
Ok(())
}
@@ -1539,8 +1617,9 @@ pub async fn op_fs_file_truncate_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[number] len: u64,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(&state.borrow(), rid)?;
+) -> Result<(), FsOpsError> {
+ let file = FileResource::get_file(&state.borrow(), rid)
+ .map_err(FsOpsErrorKind::Resource)?;
file.truncate_async(len).await?;
Ok(())
}
@@ -1553,8 +1632,9 @@ pub fn op_fs_futime_sync(
#[smi] atime_nanos: u32,
#[number] mtime_secs: i64,
#[smi] mtime_nanos: u32,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(state, rid)?;
+) -> Result<(), FsOpsError> {
+ let file =
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.utime_sync(atime_secs, atime_nanos, mtime_secs, mtime_nanos)?;
Ok(())
}
@@ -1567,42 +1647,64 @@ pub async fn op_fs_futime_async(
#[smi] atime_nanos: u32,
#[number] mtime_secs: i64,
#[smi] mtime_nanos: u32,
-) -> Result<(), AnyError> {
- let file = FileResource::get_file(&state.borrow(), rid)?;
+) -> Result<(), FsOpsError> {
+ let file = FileResource::get_file(&state.borrow(), rid)
+ .map_err(FsOpsErrorKind::Resource)?;
file
.utime_async(atime_secs, atime_nanos, mtime_secs, mtime_nanos)
.await?;
Ok(())
}
-trait WithContext {
- fn context<E: Into<Box<dyn std::error::Error + Send + Sync>>>(
- self,
- desc: E,
- ) -> AnyError;
+#[derive(Debug)]
+pub struct OperationError {
+ operation: &'static str,
+ kind: OperationErrorKind,
+ pub err: FsError,
}
-impl WithContext for FsError {
- fn context<E: Into<Box<dyn std::error::Error + Send + Sync>>>(
- self,
- desc: E,
- ) -> AnyError {
- match self {
- FsError::Io(io) => {
- AnyError::new(io::Error::new(io.kind(), desc)).context(io)
+impl std::fmt::Display for OperationError {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ if let FsError::Io(e) = &self.err {
+ std::fmt::Display::fmt(&e, f)?;
+ f.write_str(": ")?;
+ }
+
+ f.write_str(self.operation)?;
+
+ match &self.kind {
+ OperationErrorKind::Bare => Ok(()),
+ OperationErrorKind::WithPath(path) => write!(f, " '{}'", path.display()),
+ OperationErrorKind::WithTwoPaths(from, to) => {
+ write!(f, " '{}' -> '{}'", from.display(), to.display())
}
- _ => self.into(),
}
}
}
+impl std::error::Error for OperationError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ if let FsError::Io(err) = &self.err {
+ Some(err)
+ } else {
+ None
+ }
+ }
+}
+
+#[derive(Debug)]
+pub enum OperationErrorKind {
+ Bare,
+ WithPath(PathBuf),
+ WithTwoPaths(PathBuf, PathBuf),
+}
+
trait MapErrContext {
type R;
- fn context_fn<F, E>(self, f: F) -> Self::R
+ fn context_fn<F>(self, f: F) -> Self::R
where
- F: FnOnce() -> E,
- E: Into<Box<dyn std::error::Error + Send + Sync>>;
+ F: FnOnce(FsError) -> OperationError;
fn context(self, desc: &'static str) -> Self::R;
@@ -1617,25 +1719,29 @@ trait MapErrContext {
}
impl<T> MapErrContext for Result<T, FsError> {
- type R = Result<T, AnyError>;
+ type R = Result<T, FsOpsError>;
- fn context_fn<F, E>(self, f: F) -> Self::R
+ fn context_fn<F>(self, f: F) -> Self::R
where
- F: FnOnce() -> E,
- E: Into<Box<dyn std::error::Error + Send + Sync>>,
+ F: FnOnce(FsError) -> OperationError,
{
- self.map_err(|err| {
- let message = f();
- err.context(message)
- })
+ self.map_err(|err| FsOpsErrorKind::OperationError(f(err)).into_box())
}
- fn context(self, desc: &'static str) -> Self::R {
- self.context_fn(move || desc)
+ fn context(self, operation: &'static str) -> Self::R {
+ self.context_fn(move |err| OperationError {
+ operation,
+ kind: OperationErrorKind::Bare,
+ err,
+ })
}
fn context_path(self, operation: &'static str, path: &Path) -> Self::R {
- self.context_fn(|| format!("{operation} '{}'", path.display()))
+ self.context_fn(|err| OperationError {
+ operation,
+ kind: OperationErrorKind::WithPath(path.to_path_buf()),
+ err,
+ })
}
fn context_two_path(
@@ -1644,21 +1750,20 @@ impl<T> MapErrContext for Result<T, FsError> {
oldpath: &Path,
newpath: &Path,
) -> Self::R {
- self.context_fn(|| {
- format!(
- "{operation} '{}' -> '{}'",
- oldpath.display(),
- newpath.display()
- )
+ self.context_fn(|err| OperationError {
+ operation,
+ kind: OperationErrorKind::WithTwoPaths(
+ oldpath.to_path_buf(),
+ newpath.to_path_buf(),
+ ),
+ err,
})
}
}
-fn path_into_string(s: std::ffi::OsString) -> Result<String, AnyError> {
- s.into_string().map_err(|s| {
- let message = format!("File name or path {s:?} is not valid UTF-8");
- custom_error("InvalidData", message)
- })
+fn path_into_string(s: std::ffi::OsString) -> Result<String, FsOpsError> {
+ s.into_string()
+ .map_err(|e| FsOpsErrorKind::InvalidUtf8(e).into_box())
}
macro_rules! create_struct_writer {
@@ -1699,6 +1804,8 @@ create_struct_writer! {
atime: u64,
birthtime_set: bool,
birthtime: u64,
+ ctime_set: bool,
+ ctime: u64,
// Following are only valid under Unix.
dev: u64,
ino: u64,
@@ -1730,6 +1837,8 @@ impl From<FsStat> for SerializableStat {
atime: stat.atime.unwrap_or(0),
birthtime_set: stat.birthtime.is_some(),
birthtime: stat.birthtime.unwrap_or(0),
+ ctime_set: stat.ctime.is_some(),
+ ctime: stat.ctime.unwrap_or(0),
dev: stat.dev,
ino: stat.ino,
diff --git a/ext/fs/std_fs.rs b/ext/fs/std_fs.rs
index 41a8569ba..73439d9ba 100644
--- a/ext/fs/std_fs.rs
+++ b/ext/fs/std_fs.rs
@@ -821,24 +821,46 @@ fn stat_extra(
Ok(info.dwVolumeSerialNumber as u64)
}
+ const WINDOWS_TICK: i64 = 10_000; // 100-nanosecond intervals in a millisecond
+ const SEC_TO_UNIX_EPOCH: i64 = 11_644_473_600; // Seconds between Windows epoch and Unix epoch
+
+ fn windows_time_to_unix_time_msec(windows_time: &i64) -> i64 {
+ let milliseconds_since_windows_epoch = windows_time / WINDOWS_TICK;
+ milliseconds_since_windows_epoch - SEC_TO_UNIX_EPOCH * 1000
+ }
+
use windows_sys::Wdk::Storage::FileSystem::FILE_ALL_INFORMATION;
+ use windows_sys::Win32::Foundation::NTSTATUS;
unsafe fn query_file_information(
handle: winapi::shared::ntdef::HANDLE,
- ) -> std::io::Result<FILE_ALL_INFORMATION> {
+ ) -> Result<FILE_ALL_INFORMATION, NTSTATUS> {
use windows_sys::Wdk::Storage::FileSystem::NtQueryInformationFile;
+ use windows_sys::Win32::Foundation::RtlNtStatusToDosError;
+ use windows_sys::Win32::Foundation::ERROR_MORE_DATA;
+ use windows_sys::Win32::System::IO::IO_STATUS_BLOCK;
let mut info = std::mem::MaybeUninit::<FILE_ALL_INFORMATION>::zeroed();
+ let mut io_status_block =
+ std::mem::MaybeUninit::<IO_STATUS_BLOCK>::zeroed();
let status = NtQueryInformationFile(
handle as _,
- std::ptr::null_mut(),
+ io_status_block.as_mut_ptr(),
info.as_mut_ptr() as *mut _,
std::mem::size_of::<FILE_ALL_INFORMATION>() as _,
18, /* FileAllInformation */
);
if status < 0 {
- return Err(std::io::Error::last_os_error());
+ let converted_status = RtlNtStatusToDosError(status);
+
+ // If error more data is returned, then it means that the buffer is too small to get full filename information
+ // to have that we should retry. However, since we only use BasicInformation and StandardInformation, it is fine to ignore it
+ // since struct is populated with other data anyway.
+ // https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntifs/nf-ntifs-ntqueryinformationfile#remarksdd
+ if converted_status != ERROR_MORE_DATA {
+ return Err(converted_status as NTSTATUS);
+ }
}
Ok(info.assume_init())
@@ -862,10 +884,13 @@ fn stat_extra(
}
let result = get_dev(file_handle);
- CloseHandle(file_handle);
fsstat.dev = result?;
if let Ok(file_info) = query_file_information(file_handle) {
+ fsstat.ctime = Some(windows_time_to_unix_time_msec(
+ &file_info.BasicInformation.ChangeTime,
+ ) as u64);
+
if file_info.BasicInformation.FileAttributes
& winapi::um::winnt::FILE_ATTRIBUTE_REPARSE_POINT
!= 0
@@ -898,6 +923,7 @@ fn stat_extra(
}
}
+ CloseHandle(file_handle);
Ok(())
}
}
@@ -929,7 +955,7 @@ fn exists(path: &Path) -> bool {
}
fn realpath(path: &Path) -> FsResult<PathBuf> {
- Ok(deno_core::strip_unc_prefix(path.canonicalize()?))
+ Ok(deno_path_util::strip_unc_prefix(path.canonicalize()?))
}
fn read_dir(path: &Path) -> FsResult<Vec<FsDirEntry>> {
diff --git a/ext/http/00_serve.ts b/ext/http/00_serve.ts
index 3b9b085a2..fcdb87d09 100644
--- a/ext/http/00_serve.ts
+++ b/ext/http/00_serve.ts
@@ -14,6 +14,7 @@ import {
op_http_get_request_headers,
op_http_get_request_method_and_url,
op_http_read_request_body,
+ op_http_request_on_cancel,
op_http_serve,
op_http_serve_on,
op_http_set_promise_complete,
@@ -41,6 +42,10 @@ const {
Uint8Array,
Promise,
} = primordials;
+const {
+ getAsyncContext,
+ setAsyncContext,
+} = core;
import { InnerBody } from "ext:deno_fetch/22_body.js";
import { Event } from "ext:deno_web/02_event.js";
@@ -76,7 +81,11 @@ import {
ReadableStreamPrototype,
resourceForReadableStream,
} from "ext:deno_web/06_streams.js";
-import { listen, listenOptionApiName, TcpConn } from "ext:deno_net/01_net.js";
+import {
+ listen,
+ listenOptionApiName,
+ UpgradedConn,
+} from "ext:deno_net/01_net.js";
import { hasTlsKeyPairOptions, listenTls } from "ext:deno_net/02_tls.js";
import { SymbolAsyncDispose } from "ext:deno_web/00_infra.js";
@@ -189,7 +198,7 @@ class InnerRequest {
const upgradeRid = op_http_upgrade_raw(external);
- const conn = new TcpConn(
+ const conn = new UpgradedConn(
upgradeRid,
underlyingConn?.remoteAddr,
underlyingConn?.localAddr,
@@ -369,6 +378,18 @@ class InnerRequest {
get external() {
return this.#external;
}
+
+ onCancel(callback) {
+ if (this.#external === null) {
+ callback();
+ return;
+ }
+
+ PromisePrototypeThen(
+ op_http_request_on_cancel(this.#external),
+ callback,
+ );
+ }
}
class CallbackContext {
@@ -380,8 +401,10 @@ class CallbackContext {
/** @type {Promise<void> | undefined} */
closing;
listener;
+ asyncContext;
constructor(signal, args, listener) {
+ this.asyncContext = getAsyncContext();
// The abort signal triggers a non-graceful shutdown
signal?.addEventListener(
"abort",
@@ -491,82 +514,89 @@ function fastSyncResponseOrStream(
*/
function mapToCallback(context, callback, onError) {
return async function (req) {
- // Get the response from the user-provided callback. If that fails, use onError. If that fails, return a fallback
- // 500 error.
- let innerRequest;
- let response;
- try {
- innerRequest = new InnerRequest(req, context);
- const request = fromInnerRequest(innerRequest, "immutable");
- innerRequest.request = request;
- response = await callback(
- request,
- new ServeHandlerInfo(innerRequest),
- );
-
- // Throwing Error if the handler return value is not a Response class
- if (!ObjectPrototypeIsPrototypeOf(ResponsePrototype, response)) {
- throw new TypeError(
- "Return value from serve handler must be a response or a promise resolving to a response",
- );
- }
+ const asyncContext = getAsyncContext();
+ setAsyncContext(context.asyncContext);
- if (response.type === "error") {
- throw new TypeError(
- "Return value from serve handler must not be an error response (like Response.error())",
+ try {
+ // Get the response from the user-provided callback. If that fails, use onError. If that fails, return a fallback
+ // 500 error.
+ let innerRequest;
+ let response;
+ try {
+ innerRequest = new InnerRequest(req, context);
+ const request = fromInnerRequest(innerRequest, "immutable");
+ innerRequest.request = request;
+ response = await callback(
+ request,
+ new ServeHandlerInfo(innerRequest),
);
- }
- if (response.bodyUsed) {
- throw new TypeError(
- "The body of the Response returned from the serve handler has already been consumed",
- );
- }
- } catch (error) {
- try {
- response = await onError(error);
+ // Throwing Error if the handler return value is not a Response class
if (!ObjectPrototypeIsPrototypeOf(ResponsePrototype, response)) {
throw new TypeError(
- "Return value from onError handler must be a response or a promise resolving to a response",
+ "Return value from serve handler must be a response or a promise resolving to a response",
+ );
+ }
+
+ if (response.type === "error") {
+ throw new TypeError(
+ "Return value from serve handler must not be an error response (like Response.error())",
+ );
+ }
+
+ if (response.bodyUsed) {
+ throw new TypeError(
+ "The body of the Response returned from the serve handler has already been consumed",
);
}
} catch (error) {
- // deno-lint-ignore no-console
- console.error("Exception in onError while handling exception", error);
- response = internalServerError();
+ try {
+ response = await onError(error);
+ if (!ObjectPrototypeIsPrototypeOf(ResponsePrototype, response)) {
+ throw new TypeError(
+ "Return value from onError handler must be a response or a promise resolving to a response",
+ );
+ }
+ } catch (error) {
+ // deno-lint-ignore no-console
+ console.error("Exception in onError while handling exception", error);
+ response = internalServerError();
+ }
}
- }
- const inner = toInnerResponse(response);
- if (innerRequest?.[_upgraded]) {
- // We're done here as the connection has been upgraded during the callback and no longer requires servicing.
- if (response !== UPGRADE_RESPONSE_SENTINEL) {
- // deno-lint-ignore no-console
- console.error("Upgrade response was not returned from callback");
- context.close();
+ const inner = toInnerResponse(response);
+ if (innerRequest?.[_upgraded]) {
+ // We're done here as the connection has been upgraded during the callback and no longer requires servicing.
+ if (response !== UPGRADE_RESPONSE_SENTINEL) {
+ // deno-lint-ignore no-console
+ console.error("Upgrade response was not returned from callback");
+ context.close();
+ }
+ innerRequest?.[_upgraded]();
+ return;
}
- innerRequest?.[_upgraded]();
- return;
- }
- // Did everything shut down while we were waiting?
- if (context.closed) {
- // We're shutting down, so this status shouldn't make it back to the client but "Service Unavailable" seems appropriate
- innerRequest?.close();
- op_http_set_promise_complete(req, 503);
- return;
- }
+ // Did everything shut down while we were waiting?
+ if (context.closed) {
+ // We're shutting down, so this status shouldn't make it back to the client but "Service Unavailable" seems appropriate
+ innerRequest?.close();
+ op_http_set_promise_complete(req, 503);
+ return;
+ }
- const status = inner.status;
- const headers = inner.headerList;
- if (headers && headers.length > 0) {
- if (headers.length == 1) {
- op_http_set_response_header(req, headers[0][0], headers[0][1]);
- } else {
- op_http_set_response_headers(req, headers);
+ const status = inner.status;
+ const headers = inner.headerList;
+ if (headers && headers.length > 0) {
+ if (headers.length == 1) {
+ op_http_set_response_header(req, headers[0][0], headers[0][1]);
+ } else {
+ op_http_set_response_headers(req, headers);
+ }
}
- }
- fastSyncResponseOrStream(req, inner.body, status, innerRequest);
+ fastSyncResponseOrStream(req, inner.body, status, innerRequest);
+ } finally {
+ setAsyncContext(asyncContext);
+ }
};
}
diff --git a/ext/http/Cargo.toml b/ext/http/Cargo.toml
index b7637bec3..ed98fe349 100644
--- a/ext/http/Cargo.toml
+++ b/ext/http/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_http"
-version = "0.169.0"
+version = "0.175.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
diff --git a/ext/http/fly_accept_encoding.rs b/ext/http/fly_accept_encoding.rs
index 94e336876..4d6fd2231 100644
--- a/ext/http/fly_accept_encoding.rs
+++ b/ext/http/fly_accept_encoding.rs
@@ -119,7 +119,7 @@ fn encodings_iter_inner<'s>(
};
Some(Ok((encoding, qval)))
})
- .map(|r| r?) // flatten Result<Result<...
+ .flatten()
}
#[cfg(test)]
diff --git a/ext/http/http_next.rs b/ext/http/http_next.rs
index efe1b88c9..7dbac6021 100644
--- a/ext/http/http_next.rs
+++ b/ext/http/http_next.rs
@@ -18,8 +18,8 @@ use crate::service::HttpServerState;
use crate::service::SignallingRc;
use crate::websocket_upgrade::WebSocketUpgrade;
use crate::LocalExecutor;
+use crate::Options;
use cache_control::CacheControl;
-use deno_core::error::AnyError;
use deno_core::external;
use deno_core::futures::future::poll_fn;
use deno_core::futures::TryFutureExt;
@@ -146,12 +146,32 @@ macro_rules! clone_external {
}};
}
+#[derive(Debug, thiserror::Error)]
+pub enum HttpNextError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error("{0}")]
+ Io(#[from] io::Error),
+ #[error(transparent)]
+ WebSocketUpgrade(crate::websocket_upgrade::WebSocketUpgradeError),
+ #[error("{0}")]
+ Hyper(#[from] hyper::Error),
+ #[error(transparent)]
+ JoinError(#[from] tokio::task::JoinError),
+ #[error(transparent)]
+ Canceled(#[from] deno_core::Canceled),
+ #[error(transparent)]
+ HttpPropertyExtractor(deno_core::error::AnyError),
+ #[error(transparent)]
+ UpgradeUnavailable(#[from] crate::service::UpgradeUnavailableError),
+}
+
#[op2(fast)]
#[smi]
pub fn op_http_upgrade_raw(
state: &mut OpState,
external: *const c_void,
-) -> Result<ResourceId, AnyError> {
+) -> Result<ResourceId, HttpNextError> {
// SAFETY: external is deleted before calling this op.
let http = unsafe { take_external!(external, "op_http_upgrade_raw") };
@@ -177,7 +197,7 @@ pub fn op_http_upgrade_raw(
upgraded.write_all(&bytes).await?;
break upgraded;
}
- Err(err) => return Err(err),
+ Err(err) => return Err(HttpNextError::WebSocketUpgrade(err)),
}
};
@@ -193,7 +213,7 @@ pub fn op_http_upgrade_raw(
}
read_tx.write_all(&buf[..read]).await?;
}
- Ok::<_, AnyError>(())
+ Ok::<_, HttpNextError>(())
});
spawn(async move {
let mut buf = [0; 1024];
@@ -204,7 +224,7 @@ pub fn op_http_upgrade_raw(
}
upgraded_tx.write_all(&buf[..read]).await?;
}
- Ok::<_, AnyError>(())
+ Ok::<_, HttpNextError>(())
});
Ok(())
@@ -223,7 +243,7 @@ pub async fn op_http_upgrade_websocket_next(
state: Rc<RefCell<OpState>>,
external: *const c_void,
#[serde] headers: Vec<(ByteString, ByteString)>,
-) -> Result<ResourceId, AnyError> {
+) -> Result<ResourceId, HttpNextError> {
let http =
// SAFETY: external is deleted before calling this op.
unsafe { take_external!(external, "op_http_upgrade_websocket_next") };
@@ -246,7 +266,11 @@ pub async fn op_http_upgrade_websocket_next(
// Stage 3: take the extracted raw network stream and upgrade it to a websocket, then return it
let (stream, bytes) = extract_network_stream(upgraded);
- ws_create_server_stream(&mut state.borrow_mut(), stream, bytes)
+ Ok(ws_create_server_stream(
+ &mut state.borrow_mut(),
+ stream,
+ bytes,
+ ))
}
#[op2(fast)]
@@ -296,7 +320,7 @@ where
let authority: v8::Local<v8::Value> = match request_properties.authority {
Some(authority) => v8::String::new_from_utf8(
scope,
- authority.as_ref(),
+ authority.as_bytes(),
v8::NewStringType::Normal,
)
.unwrap()
@@ -305,15 +329,25 @@ where
};
// Only extract the path part - we handle authority elsewhere
- let path = match &request_parts.uri.path_and_query() {
- Some(path_and_query) => path_and_query.to_string(),
- None => "".to_owned(),
+ let path = match request_parts.uri.path_and_query() {
+ Some(path_and_query) => {
+ let path = path_and_query.as_str();
+ if matches!(path.as_bytes().first(), Some(b'/' | b'*')) {
+ Cow::Borrowed(path)
+ } else {
+ Cow::Owned(format!("/{}", path))
+ }
+ }
+ None => Cow::Borrowed(""),
};
- let path: v8::Local<v8::Value> =
- v8::String::new_from_utf8(scope, path.as_ref(), v8::NewStringType::Normal)
- .unwrap()
- .into();
+ let path: v8::Local<v8::Value> = v8::String::new_from_utf8(
+ scope,
+ path.as_bytes(),
+ v8::NewStringType::Normal,
+ )
+ .unwrap()
+ .into();
let peer_address: v8::Local<v8::Value> = v8::String::new_from_utf8(
scope,
@@ -531,6 +565,7 @@ fn is_request_compressible(
match accept_encoding.to_str() {
// Firefox and Chrome send this -- no need to parse
Ok("gzip, deflate, br") => return Compression::Brotli,
+ Ok("gzip, deflate, br, zstd") => return Compression::Brotli,
Ok("gzip") => return Compression::GZip,
Ok("br") => return Compression::Brotli,
_ => (),
@@ -667,6 +702,27 @@ fn set_response(
http.complete();
}
+#[op2(fast)]
+pub fn op_http_get_request_cancelled(external: *const c_void) -> bool {
+ let http =
+ // SAFETY: op is called with external.
+ unsafe { clone_external!(external, "op_http_get_request_cancelled") };
+ http.cancelled()
+}
+
+#[op2(async)]
+pub async fn op_http_request_on_cancel(external: *const c_void) {
+ let http =
+ // SAFETY: op is called with external.
+ unsafe { clone_external!(external, "op_http_request_on_cancel") };
+ let (tx, rx) = tokio::sync::oneshot::channel();
+
+ http.on_cancel(tx);
+ drop(http);
+
+ rx.await.ok();
+}
+
/// Returned promise resolves when body streaming finishes.
/// Call [`op_http_close_after_finish`] when done with the external.
#[op2(async)]
@@ -676,7 +732,7 @@ pub async fn op_http_set_response_body_resource(
#[smi] stream_rid: ResourceId,
auto_close: bool,
status: u16,
-) -> Result<bool, AnyError> {
+) -> Result<bool, HttpNextError> {
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_set_response_body_resource") };
@@ -691,9 +747,15 @@ pub async fn op_http_set_response_body_resource(
let resource = {
let mut state = state.borrow_mut();
if auto_close {
- state.resource_table.take_any(stream_rid)?
+ state
+ .resource_table
+ .take_any(stream_rid)
+ .map_err(HttpNextError::Resource)?
} else {
- state.resource_table.get_any(stream_rid)?
+ state
+ .resource_table
+ .get_any(stream_rid)
+ .map_err(HttpNextError::Resource)?
}
};
@@ -760,10 +822,16 @@ fn serve_http11_unconditional(
io: impl HttpServeStream,
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
cancel: Rc<CancelHandle>,
+ http1_builder_hook: Option<fn(http1::Builder) -> http1::Builder>,
) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
- let conn = http1::Builder::new()
- .keep_alive(true)
- .writev(*USE_WRITEV)
+ let mut builder = http1::Builder::new();
+ builder.keep_alive(true).writev(*USE_WRITEV);
+
+ if let Some(http1_builder_hook) = http1_builder_hook {
+ builder = http1_builder_hook(builder);
+ }
+
+ let conn = builder
.serve_connection(TokioIo::new(io), svc)
.with_upgrades();
@@ -782,9 +850,17 @@ fn serve_http2_unconditional(
io: impl HttpServeStream,
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
cancel: Rc<CancelHandle>,
+ http2_builder_hook: Option<
+ fn(http2::Builder<LocalExecutor>) -> http2::Builder<LocalExecutor>,
+ >,
) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
- let conn =
- http2::Builder::new(LocalExecutor).serve_connection(TokioIo::new(io), svc);
+ let mut builder = http2::Builder::new(LocalExecutor);
+
+ if let Some(http2_builder_hook) = http2_builder_hook {
+ builder = http2_builder_hook(builder);
+ }
+
+ let conn = builder.serve_connection(TokioIo::new(io), svc);
async {
match conn.or_abort(cancel).await {
Err(mut conn) => {
@@ -800,17 +876,18 @@ async fn serve_http2_autodetect(
io: impl HttpServeStream,
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
cancel: Rc<CancelHandle>,
-) -> Result<(), AnyError> {
+ options: Options,
+) -> Result<(), HttpNextError> {
let prefix = NetworkStreamPrefixCheck::new(io, HTTP2_PREFIX);
let (matches, io) = prefix.match_prefix().await?;
if matches {
- serve_http2_unconditional(io, svc, cancel)
+ serve_http2_unconditional(io, svc, cancel, options.http2_builder_hook)
.await
- .map_err(|e| e.into())
+ .map_err(HttpNextError::Hyper)
} else {
- serve_http11_unconditional(io, svc, cancel)
+ serve_http11_unconditional(io, svc, cancel, options.http1_builder_hook)
.await
- .map_err(|e| e.into())
+ .map_err(HttpNextError::Hyper)
}
}
@@ -819,7 +896,8 @@ fn serve_https(
request_info: HttpConnectionProperties,
lifetime: HttpLifetime,
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
-) -> JoinHandle<Result<(), AnyError>> {
+ options: Options,
+) -> JoinHandle<Result<(), HttpNextError>> {
let HttpLifetime {
server_state,
connection_cancel_handle,
@@ -830,21 +908,31 @@ fn serve_https(
handle_request(req, request_info.clone(), server_state.clone(), tx.clone())
});
spawn(
- async {
+ async move {
let handshake = io.handshake().await?;
// If the client specifically negotiates a protocol, we will use it. If not, we'll auto-detect
// based on the prefix bytes
let handshake = handshake.alpn;
if Some(TLS_ALPN_HTTP_2) == handshake.as_deref() {
- serve_http2_unconditional(io, svc, listen_cancel_handle)
- .await
- .map_err(|e| e.into())
+ serve_http2_unconditional(
+ io,
+ svc,
+ listen_cancel_handle,
+ options.http2_builder_hook,
+ )
+ .await
+ .map_err(HttpNextError::Hyper)
} else if Some(TLS_ALPN_HTTP_11) == handshake.as_deref() {
- serve_http11_unconditional(io, svc, listen_cancel_handle)
- .await
- .map_err(|e| e.into())
+ serve_http11_unconditional(
+ io,
+ svc,
+ listen_cancel_handle,
+ options.http1_builder_hook,
+ )
+ .await
+ .map_err(HttpNextError::Hyper)
} else {
- serve_http2_autodetect(io, svc, listen_cancel_handle).await
+ serve_http2_autodetect(io, svc, listen_cancel_handle, options).await
}
}
.try_or_cancel(connection_cancel_handle),
@@ -856,7 +944,8 @@ fn serve_http(
request_info: HttpConnectionProperties,
lifetime: HttpLifetime,
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
-) -> JoinHandle<Result<(), AnyError>> {
+ options: Options,
+) -> JoinHandle<Result<(), HttpNextError>> {
let HttpLifetime {
server_state,
connection_cancel_handle,
@@ -867,7 +956,7 @@ fn serve_http(
handle_request(req, request_info.clone(), server_state.clone(), tx.clone())
});
spawn(
- serve_http2_autodetect(io, svc, listen_cancel_handle)
+ serve_http2_autodetect(io, svc, listen_cancel_handle, options)
.try_or_cancel(connection_cancel_handle),
)
}
@@ -877,7 +966,8 @@ fn serve_http_on<HTTP>(
listen_properties: &HttpListenProperties,
lifetime: HttpLifetime,
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
-) -> JoinHandle<Result<(), AnyError>>
+ options: Options,
+) -> JoinHandle<Result<(), HttpNextError>>
where
HTTP: HttpPropertyExtractor,
{
@@ -888,14 +978,14 @@ where
match network_stream {
NetworkStream::Tcp(conn) => {
- serve_http(conn, connection_properties, lifetime, tx)
+ serve_http(conn, connection_properties, lifetime, tx, options)
}
NetworkStream::Tls(conn) => {
- serve_https(conn, connection_properties, lifetime, tx)
+ serve_https(conn, connection_properties, lifetime, tx, options)
}
#[cfg(unix)]
NetworkStream::Unix(conn) => {
- serve_http(conn, connection_properties, lifetime, tx)
+ serve_http(conn, connection_properties, lifetime, tx, options)
}
}
}
@@ -908,7 +998,7 @@ struct HttpLifetime {
}
struct HttpJoinHandle {
- join_handle: AsyncRefCell<Option<JoinHandle<Result<(), AnyError>>>>,
+ join_handle: AsyncRefCell<Option<JoinHandle<Result<(), HttpNextError>>>>,
connection_cancel_handle: Rc<CancelHandle>,
listen_cancel_handle: Rc<CancelHandle>,
rx: AsyncRefCell<tokio::sync::mpsc::Receiver<Rc<HttpRecord>>>,
@@ -968,12 +1058,13 @@ impl Drop for HttpJoinHandle {
pub fn op_http_serve<HTTP>(
state: Rc<RefCell<OpState>>,
#[smi] listener_rid: ResourceId,
-) -> Result<(ResourceId, &'static str, String), AnyError>
+) -> Result<(ResourceId, &'static str, String), HttpNextError>
where
HTTP: HttpPropertyExtractor,
{
let listener =
- HTTP::get_listener_for_rid(&mut state.borrow_mut(), listener_rid)?;
+ HTTP::get_listener_for_rid(&mut state.borrow_mut(), listener_rid)
+ .map_err(HttpNextError::Resource)?;
let listen_properties = HTTP::listen_properties_from_listener(&listener)?;
@@ -983,21 +1074,28 @@ where
let lifetime = resource.lifetime();
+ let options = {
+ let state = state.borrow();
+ *state.borrow::<Options>()
+ };
+
let listen_properties_clone: HttpListenProperties = listen_properties.clone();
let handle = spawn(async move {
loop {
let conn = HTTP::accept_connection_from_listener(&listener)
.try_or_cancel(listen_cancel_clone.clone())
- .await?;
+ .await
+ .map_err(HttpNextError::HttpPropertyExtractor)?;
serve_http_on::<HTTP>(
conn,
&listen_properties_clone,
lifetime.clone(),
tx.clone(),
+ options,
);
}
#[allow(unreachable_code)]
- Ok::<_, AnyError>(())
+ Ok::<_, HttpNextError>(())
});
// Set the handle after we start the future
@@ -1017,25 +1115,31 @@ where
pub fn op_http_serve_on<HTTP>(
state: Rc<RefCell<OpState>>,
#[smi] connection_rid: ResourceId,
-) -> Result<(ResourceId, &'static str, String), AnyError>
+) -> Result<(ResourceId, &'static str, String), HttpNextError>
where
HTTP: HttpPropertyExtractor,
{
let connection =
- HTTP::get_connection_for_rid(&mut state.borrow_mut(), connection_rid)?;
+ HTTP::get_connection_for_rid(&mut state.borrow_mut(), connection_rid)
+ .map_err(HttpNextError::Resource)?;
let listen_properties = HTTP::listen_properties_from_connection(&connection)?;
let (tx, rx) = tokio::sync::mpsc::channel(10);
let resource: Rc<HttpJoinHandle> = Rc::new(HttpJoinHandle::new(rx));
- let handle: JoinHandle<Result<(), deno_core::anyhow::Error>> =
- serve_http_on::<HTTP>(
- connection,
- &listen_properties,
- resource.lifetime(),
- tx,
- );
+ let options = {
+ let state = state.borrow();
+ *state.borrow::<Options>()
+ };
+
+ let handle = serve_http_on::<HTTP>(
+ connection,
+ &listen_properties,
+ resource.lifetime(),
+ tx,
+ options,
+ );
// Set the handle after we start the future
*RcRef::map(&resource, |this| &this.join_handle)
@@ -1081,12 +1185,13 @@ pub fn op_http_try_wait(
pub async fn op_http_wait(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<*const c_void, AnyError> {
+) -> Result<*const c_void, HttpNextError> {
// We will get the join handle initially, as we might be consuming requests still
let join_handle = state
.borrow_mut()
.resource_table
- .get::<HttpJoinHandle>(rid)?;
+ .get::<HttpJoinHandle>(rid)
+ .map_err(HttpNextError::Resource)?;
let cancel = join_handle.listen_cancel_handle();
let next = async {
@@ -1113,13 +1218,12 @@ pub async fn op_http_wait(
// Filter out shutdown (ENOTCONN) errors
if let Err(err) = res {
- if let Some(err) = err.source() {
- if let Some(err) = err.downcast_ref::<io::Error>() {
- if err.kind() == io::ErrorKind::NotConnected {
- return Ok(null());
- }
+ if let HttpNextError::Io(err) = &err {
+ if err.kind() == io::ErrorKind::NotConnected {
+ return Ok(null());
}
}
+
return Err(err);
}
@@ -1132,7 +1236,7 @@ pub fn op_http_cancel(
state: &mut OpState,
#[smi] rid: ResourceId,
graceful: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), deno_core::error::AnyError> {
let join_handle = state.resource_table.get::<HttpJoinHandle>(rid)?;
if graceful {
@@ -1152,11 +1256,12 @@ pub async fn op_http_close(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
graceful: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), HttpNextError> {
let join_handle = state
.borrow_mut()
.resource_table
- .take::<HttpJoinHandle>(rid)?;
+ .take::<HttpJoinHandle>(rid)
+ .map_err(HttpNextError::Resource)?;
if graceful {
http_general_trace!("graceful shutdown");
@@ -1202,23 +1307,26 @@ impl UpgradeStream {
}
}
- async fn read(self: Rc<Self>, buf: &mut [u8]) -> Result<usize, AnyError> {
+ async fn read(
+ self: Rc<Self>,
+ buf: &mut [u8],
+ ) -> Result<usize, std::io::Error> {
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
async {
let read = RcRef::map(self, |this| &this.read);
let mut read = read.borrow_mut().await;
- Ok(Pin::new(&mut *read).read(buf).await?)
+ Pin::new(&mut *read).read(buf).await
}
.try_or_cancel(cancel_handle)
.await
}
- async fn write(self: Rc<Self>, buf: &[u8]) -> Result<usize, AnyError> {
+ async fn write(self: Rc<Self>, buf: &[u8]) -> Result<usize, std::io::Error> {
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
async {
let write = RcRef::map(self, |this| &this.write);
let mut write = write.borrow_mut().await;
- Ok(Pin::new(&mut *write).write(buf).await?)
+ Pin::new(&mut *write).write(buf).await
}
.try_or_cancel(cancel_handle)
.await
@@ -1228,7 +1336,7 @@ impl UpgradeStream {
self: Rc<Self>,
buf1: &[u8],
buf2: &[u8],
- ) -> Result<usize, AnyError> {
+ ) -> Result<usize, std::io::Error> {
let mut wr = RcRef::map(self, |r| &r.write).borrow_mut().await;
let total = buf1.len() + buf2.len();
@@ -1281,9 +1389,12 @@ pub async fn op_raw_write_vectored(
#[smi] rid: ResourceId,
#[buffer] buf1: JsBuffer,
#[buffer] buf2: JsBuffer,
-) -> Result<usize, AnyError> {
- let resource: Rc<UpgradeStream> =
- state.borrow().resource_table.get::<UpgradeStream>(rid)?;
+) -> Result<usize, HttpNextError> {
+ let resource: Rc<UpgradeStream> = state
+ .borrow()
+ .resource_table
+ .get::<UpgradeStream>(rid)
+ .map_err(HttpNextError::Resource)?;
let nwritten = resource.write_vectored(&buf1, &buf2).await?;
Ok(nwritten)
}
diff --git a/ext/http/lib.rs b/ext/http/lib.rs
index 934f8a002..39b0bbc2a 100644
--- a/ext/http/lib.rs
+++ b/ext/http/lib.rs
@@ -6,8 +6,6 @@ use async_compression::Level;
use base64::prelude::BASE64_STANDARD;
use base64::Engine;
use cache_control::CacheControl;
-use deno_core::error::custom_error;
-use deno_core::error::AnyError;
use deno_core::futures::channel::mpsc;
use deno_core::futures::channel::oneshot;
use deno_core::futures::future::pending;
@@ -41,6 +39,8 @@ use deno_net::raw::NetworkStream;
use deno_websocket::ws_create_server_stream;
use flate2::write::GzEncoder;
use flate2::Compression;
+use hyper::server::conn::http1;
+use hyper::server::conn::http2;
use hyper_util::rt::TokioIo;
use hyper_v014::body::Bytes;
use hyper_v014::body::HttpBody;
@@ -89,11 +89,33 @@ mod service;
mod websocket_upgrade;
use fly_accept_encoding::Encoding;
+pub use http_next::HttpNextError;
pub use request_properties::DefaultHttpPropertyExtractor;
pub use request_properties::HttpConnectionProperties;
pub use request_properties::HttpListenProperties;
pub use request_properties::HttpPropertyExtractor;
pub use request_properties::HttpRequestProperties;
+pub use service::UpgradeUnavailableError;
+pub use websocket_upgrade::WebSocketUpgradeError;
+
+#[derive(Debug, Default, Clone, Copy)]
+pub struct Options {
+ /// By passing a hook function, the caller can customize various configuration
+ /// options for the HTTP/2 server.
+ /// See [`http2::Builder`] for what parameters can be customized.
+ ///
+ /// If `None`, the default configuration provided by hyper will be used. Note
+ /// that the default configuration is subject to change in future versions.
+ pub http2_builder_hook:
+ Option<fn(http2::Builder<LocalExecutor>) -> http2::Builder<LocalExecutor>>,
+ /// By passing a hook function, the caller can customize various configuration
+ /// options for the HTTP/1 server.
+ /// See [`http1::Builder`] for what parameters can be customized.
+ ///
+ /// If `None`, the default configuration provided by hyper will be used. Note
+ /// that the default configuration is subject to change in future versions.
+ pub http1_builder_hook: Option<fn(http1::Builder) -> http1::Builder>,
+}
deno_core::extension!(
deno_http,
@@ -111,7 +133,9 @@ deno_core::extension!(
http_next::op_http_close_after_finish,
http_next::op_http_get_request_header,
http_next::op_http_get_request_headers,
+ http_next::op_http_request_on_cancel,
http_next::op_http_get_request_method_and_url<HTTP>,
+ http_next::op_http_get_request_cancelled,
http_next::op_http_read_request_body,
http_next::op_http_serve_on<HTTP>,
http_next::op_http_serve<HTTP>,
@@ -132,8 +156,46 @@ deno_core::extension!(
http_next::op_http_cancel,
],
esm = ["00_serve.ts", "01_http.js", "02_websocket.ts"],
+ options = {
+ options: Options,
+ },
+ state = |state, options| {
+ state.put::<Options>(options.options);
+ }
);
+#[derive(Debug, thiserror::Error)]
+pub enum HttpError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ Canceled(#[from] deno_core::Canceled),
+ #[error("{0}")]
+ HyperV014(#[source] Arc<hyper_v014::Error>),
+ #[error("{0}")]
+ InvalidHeaderName(#[from] hyper_v014::header::InvalidHeaderName),
+ #[error("{0}")]
+ InvalidHeaderValue(#[from] hyper_v014::header::InvalidHeaderValue),
+ #[error("{0}")]
+ Http(#[from] hyper_v014::http::Error),
+ #[error("response headers already sent")]
+ ResponseHeadersAlreadySent,
+ #[error("connection closed while sending response")]
+ ConnectionClosedWhileSendingResponse,
+ #[error("already in use")]
+ AlreadyInUse,
+ #[error("{0}")]
+ Io(#[from] std::io::Error),
+ #[error("no response headers")]
+ NoResponseHeaders,
+ #[error("response already completed")]
+ ResponseAlreadyCompleted,
+ #[error("cannot upgrade because request body was used")]
+ UpgradeBodyUsed,
+ #[error(transparent)]
+ Other(deno_core::error::AnyError),
+}
+
pub enum HttpSocketAddr {
IpSocket(std::net::SocketAddr),
#[cfg(unix)]
@@ -216,7 +278,7 @@ impl HttpConnResource {
String,
String,
)>,
- AnyError,
+ HttpError,
> {
let fut = async {
let (request_tx, request_rx) = oneshot::channel();
@@ -259,8 +321,8 @@ impl HttpConnResource {
}
/// A future that completes when this HTTP connection is closed or errors.
- async fn closed(&self) -> Result<(), AnyError> {
- self.closed_fut.clone().map_err(AnyError::from).await
+ async fn closed(&self) -> Result<(), HttpError> {
+ self.closed_fut.clone().map_err(HttpError::HyperV014).await
}
}
@@ -280,14 +342,13 @@ pub fn http_create_conn_resource<S, A>(
io: S,
addr: A,
scheme: &'static str,
-) -> Result<ResourceId, AnyError>
+) -> ResourceId
where
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
A: Into<HttpSocketAddr>,
{
let conn = HttpConnResource::new(io, scheme, addr.into());
- let rid = state.resource_table.add(conn);
- Ok(rid)
+ state.resource_table.add(conn)
}
/// An object that implements the `hyper::Service` trait, through which Hyper
@@ -423,7 +484,9 @@ impl Resource for HttpStreamReadResource {
// safely call `await` on it without creating a race condition.
Some(_) => match body.as_mut().next().await.unwrap() {
Ok(chunk) => assert!(chunk.is_empty()),
- Err(err) => break Err(AnyError::from(err)),
+ Err(err) => {
+ break Err(HttpError::HyperV014(Arc::new(err)).into())
+ }
},
None => break Ok(BufView::empty()),
}
@@ -545,8 +608,12 @@ struct NextRequestResponse(
async fn op_http_accept(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<Option<NextRequestResponse>, AnyError> {
- let conn = state.borrow().resource_table.get::<HttpConnResource>(rid)?;
+) -> Result<Option<NextRequestResponse>, HttpError> {
+ let conn = state
+ .borrow()
+ .resource_table
+ .get::<HttpConnResource>(rid)
+ .map_err(HttpError::Resource)?;
match conn.accept().await {
Ok(Some((read_stream, write_stream, method, url))) => {
@@ -657,11 +724,12 @@ async fn op_http_write_headers(
#[smi] status: u16,
#[serde] headers: Vec<(ByteString, ByteString)>,
#[serde] data: Option<StringOrBuffer>,
-) -> Result<(), AnyError> {
+) -> Result<(), HttpError> {
let stream = state
.borrow_mut()
.resource_table
- .get::<HttpStreamWriteResource>(rid)?;
+ .get::<HttpStreamWriteResource>(rid)
+ .map_err(HttpError::Resource)?;
// Track supported encoding
let encoding = stream.accept_encoding;
@@ -708,14 +776,14 @@ async fn op_http_write_headers(
let mut old_wr = RcRef::map(&stream, |r| &r.wr).borrow_mut().await;
let response_tx = match replace(&mut *old_wr, new_wr) {
HttpResponseWriter::Headers(response_tx) => response_tx,
- _ => return Err(http_error("response headers already sent")),
+ _ => return Err(HttpError::ResponseHeadersAlreadySent),
};
match response_tx.send(body) {
Ok(_) => Ok(()),
Err(_) => {
stream.conn.closed().await?;
- Err(http_error("connection closed while sending response"))
+ Err(HttpError::ConnectionClosedWhileSendingResponse)
}
}
}
@@ -725,11 +793,14 @@ async fn op_http_write_headers(
fn op_http_headers(
state: &mut OpState,
#[smi] rid: u32,
-) -> Result<Vec<(ByteString, ByteString)>, AnyError> {
- let stream = state.resource_table.get::<HttpStreamReadResource>(rid)?;
+) -> Result<Vec<(ByteString, ByteString)>, HttpError> {
+ let stream = state
+ .resource_table
+ .get::<HttpStreamReadResource>(rid)
+ .map_err(HttpError::Resource)?;
let rd = RcRef::map(&stream, |r| &r.rd)
.try_borrow()
- .ok_or_else(|| http_error("already in use"))?;
+ .ok_or(HttpError::AlreadyInUse)?;
match &*rd {
HttpRequestReader::Headers(request) => Ok(req_headers(request.headers())),
HttpRequestReader::Body(headers, _) => Ok(req_headers(headers)),
@@ -741,7 +812,7 @@ fn http_response(
data: Option<StringOrBuffer>,
compressing: bool,
encoding: Encoding,
-) -> Result<(HttpResponseWriter, hyper_v014::Body), AnyError> {
+) -> Result<(HttpResponseWriter, hyper_v014::Body), HttpError> {
// Gzip, after level 1, doesn't produce significant size difference.
// This default matches nginx default gzip compression level (1):
// https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level
@@ -878,25 +949,34 @@ async fn op_http_write_resource(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[smi] stream: ResourceId,
-) -> Result<(), AnyError> {
+) -> Result<(), HttpError> {
let http_stream = state
.borrow()
.resource_table
- .get::<HttpStreamWriteResource>(rid)?;
+ .get::<HttpStreamWriteResource>(rid)
+ .map_err(HttpError::Resource)?;
let mut wr = RcRef::map(&http_stream, |r| &r.wr).borrow_mut().await;
- let resource = state.borrow().resource_table.get_any(stream)?;
+ let resource = state
+ .borrow()
+ .resource_table
+ .get_any(stream)
+ .map_err(HttpError::Resource)?;
loop {
match *wr {
HttpResponseWriter::Headers(_) => {
- return Err(http_error("no response headers"))
+ return Err(HttpError::NoResponseHeaders)
}
HttpResponseWriter::Closed => {
- return Err(http_error("response already completed"))
+ return Err(HttpError::ResponseAlreadyCompleted)
}
_ => {}
};
- let view = resource.clone().read(64 * 1024).await?; // 64KB
+ let view = resource
+ .clone()
+ .read(64 * 1024)
+ .await
+ .map_err(HttpError::Other)?; // 64KB
if view.is_empty() {
break;
}
@@ -937,16 +1017,17 @@ async fn op_http_write(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[buffer] buf: JsBuffer,
-) -> Result<(), AnyError> {
+) -> Result<(), HttpError> {
let stream = state
.borrow()
.resource_table
- .get::<HttpStreamWriteResource>(rid)?;
+ .get::<HttpStreamWriteResource>(rid)
+ .map_err(HttpError::Resource)?;
let mut wr = RcRef::map(&stream, |r| &r.wr).borrow_mut().await;
match &mut *wr {
- HttpResponseWriter::Headers(_) => Err(http_error("no response headers")),
- HttpResponseWriter::Closed => Err(http_error("response already completed")),
+ HttpResponseWriter::Headers(_) => Err(HttpError::NoResponseHeaders),
+ HttpResponseWriter::Closed => Err(HttpError::ResponseAlreadyCompleted),
HttpResponseWriter::Body { writer, .. } => {
let mut result = writer.write_all(&buf).await;
if result.is_ok() {
@@ -961,7 +1042,7 @@ async fn op_http_write(
stream.conn.closed().await?;
// If there was no connection error, drop body_tx.
*wr = HttpResponseWriter::Closed;
- Err(http_error("response already completed"))
+ Err(HttpError::ResponseAlreadyCompleted)
}
}
}
@@ -975,7 +1056,7 @@ async fn op_http_write(
stream.conn.closed().await?;
// If there was no connection error, drop body_tx.
*wr = HttpResponseWriter::Closed;
- Err(http_error("response already completed"))
+ Err(HttpError::ResponseAlreadyCompleted)
}
}
}
@@ -989,11 +1070,12 @@ async fn op_http_write(
async fn op_http_shutdown(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
+) -> Result<(), HttpError> {
let stream = state
.borrow()
.resource_table
- .get::<HttpStreamWriteResource>(rid)?;
+ .get::<HttpStreamWriteResource>(rid)
+ .map_err(HttpError::Resource)?;
let mut wr = RcRef::map(&stream, |r| &r.wr).borrow_mut().await;
let wr = take(&mut *wr);
match wr {
@@ -1022,14 +1104,12 @@ async fn op_http_shutdown(
#[op2]
#[string]
-fn op_http_websocket_accept_header(
- #[string] key: String,
-) -> Result<String, AnyError> {
+fn op_http_websocket_accept_header(#[string] key: String) -> String {
let digest = ring::digest::digest(
&ring::digest::SHA1_FOR_LEGACY_USE_ONLY,
format!("{key}258EAFA5-E914-47DA-95CA-C5AB0DC85B11").as_bytes(),
);
- Ok(BASE64_STANDARD.encode(digest))
+ BASE64_STANDARD.encode(digest)
}
#[op2(async)]
@@ -1037,30 +1117,34 @@ fn op_http_websocket_accept_header(
async fn op_http_upgrade_websocket(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<ResourceId, AnyError> {
+) -> Result<ResourceId, HttpError> {
let stream = state
.borrow_mut()
.resource_table
- .get::<HttpStreamReadResource>(rid)?;
+ .get::<HttpStreamReadResource>(rid)
+ .map_err(HttpError::Resource)?;
let mut rd = RcRef::map(&stream, |r| &r.rd).borrow_mut().await;
let request = match &mut *rd {
HttpRequestReader::Headers(request) => request,
- _ => {
- return Err(http_error("cannot upgrade because request body was used"))
- }
+ _ => return Err(HttpError::UpgradeBodyUsed),
};
- let (transport, bytes) =
- extract_network_stream(hyper_v014::upgrade::on(request).await?);
- let ws_rid =
- ws_create_server_stream(&mut state.borrow_mut(), transport, bytes)?;
- Ok(ws_rid)
+ let (transport, bytes) = extract_network_stream(
+ hyper_v014::upgrade::on(request)
+ .await
+ .map_err(|err| HttpError::HyperV014(Arc::new(err)))?,
+ );
+ Ok(ws_create_server_stream(
+ &mut state.borrow_mut(),
+ transport,
+ bytes,
+ ))
}
// Needed so hyper can use non Send futures
#[derive(Clone)]
-struct LocalExecutor;
+pub struct LocalExecutor;
impl<Fut> hyper_v014::rt::Executor<Fut> for LocalExecutor
where
@@ -1082,10 +1166,6 @@ where
}
}
-fn http_error(message: &'static str) -> AnyError {
- custom_error("Http", message)
-}
-
/// Filters out the ever-surprising 'shutdown ENOTCONN' errors.
fn filter_enotconn(
result: Result<(), hyper_v014::Error>,
diff --git a/ext/http/request_body.rs b/ext/http/request_body.rs
index 45df12457..f1c3f358e 100644
--- a/ext/http/request_body.rs
+++ b/ext/http/request_body.rs
@@ -1,9 +1,9 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use bytes::Bytes;
-use deno_core::error::AnyError;
use deno_core::futures::stream::Peekable;
use deno_core::futures::Stream;
use deno_core::futures::StreamExt;
+use deno_core::futures::TryFutureExt;
use deno_core::AsyncRefCell;
use deno_core::AsyncResult;
use deno_core::BufView;
@@ -22,7 +22,7 @@ use std::task::Poll;
struct ReadFuture(Incoming);
impl Stream for ReadFuture {
- type Item = Result<Bytes, AnyError>;
+ type Item = Result<Bytes, hyper::Error>;
fn poll_next(
self: Pin<&mut Self>,
@@ -37,13 +37,13 @@ impl Stream for ReadFuture {
if let Ok(data) = frame.into_data() {
// Ensure that we never yield an empty frame
if !data.is_empty() {
- break Poll::Ready(Some(Ok::<_, AnyError>(data)));
+ break Poll::Ready(Some(Ok(data)));
}
}
// Loop again so we don't lose the waker
continue;
}
- Some(Err(e)) => Poll::Ready(Some(Err(e.into()))),
+ Some(Err(e)) => Poll::Ready(Some(Err(e))),
None => Poll::Ready(None),
};
}
@@ -58,7 +58,7 @@ impl HttpRequestBody {
Self(AsyncRefCell::new(ReadFuture(body).peekable()), size_hint)
}
- async fn read(self: Rc<Self>, limit: usize) -> Result<BufView, AnyError> {
+ async fn read(self: Rc<Self>, limit: usize) -> Result<BufView, hyper::Error> {
let peekable = RcRef::map(self, |this| &this.0);
let mut peekable = peekable.borrow_mut().await;
match Pin::new(&mut *peekable).peek_mut().await {
@@ -82,7 +82,7 @@ impl Resource for HttpRequestBody {
}
fn read(self: Rc<Self>, limit: usize) -> AsyncResult<BufView> {
- Box::pin(HttpRequestBody::read(self, limit))
+ Box::pin(HttpRequestBody::read(self, limit).map_err(Into::into))
}
fn size_hint(&self) -> (u64, Option<u64>) {
diff --git a/ext/http/request_properties.rs b/ext/http/request_properties.rs
index 1422c7417..39d35a79f 100644
--- a/ext/http/request_properties.rs
+++ b/ext/http/request_properties.rs
@@ -34,8 +34,8 @@ pub struct HttpConnectionProperties {
pub stream_type: NetworkStreamType,
}
-pub struct HttpRequestProperties {
- pub authority: Option<String>,
+pub struct HttpRequestProperties<'a> {
+ pub authority: Option<Cow<'a, str>>,
}
/// Pluggable trait to determine listen, connection and request properties
@@ -84,11 +84,11 @@ pub trait HttpPropertyExtractor {
) -> NetworkStream;
/// Determines the request properties.
- fn request_properties(
- connection_properties: &HttpConnectionProperties,
- uri: &Uri,
- headers: &HeaderMap,
- ) -> HttpRequestProperties;
+ fn request_properties<'a>(
+ connection_properties: &'a HttpConnectionProperties,
+ uri: &'a Uri,
+ headers: &'a HeaderMap,
+ ) -> HttpRequestProperties<'a>;
}
pub struct DefaultHttpPropertyExtractor {}
@@ -180,18 +180,17 @@ impl HttpPropertyExtractor for DefaultHttpPropertyExtractor {
}
}
- fn request_properties(
- connection_properties: &HttpConnectionProperties,
- uri: &Uri,
- headers: &HeaderMap,
- ) -> HttpRequestProperties {
+ fn request_properties<'a>(
+ connection_properties: &'a HttpConnectionProperties,
+ uri: &'a Uri,
+ headers: &'a HeaderMap,
+ ) -> HttpRequestProperties<'a> {
let authority = req_host(
uri,
headers,
connection_properties.stream_type,
connection_properties.local_port.unwrap_or_default(),
- )
- .map(|s| s.into_owned());
+ );
HttpRequestProperties { authority }
}
diff --git a/ext/http/service.rs b/ext/http/service.rs
index 787e9babf..ce24dea43 100644
--- a/ext/http/service.rs
+++ b/ext/http/service.rs
@@ -2,7 +2,6 @@
use crate::request_properties::HttpConnectionProperties;
use crate::response_body::ResponseBytesInner;
use crate::response_body::ResponseStreamResult;
-use deno_core::error::AnyError;
use deno_core::futures::ready;
use deno_core::BufView;
use deno_core::OpState;
@@ -28,6 +27,7 @@ use std::rc::Rc;
use std::task::Context;
use std::task::Poll;
use std::task::Waker;
+use tokio::sync::oneshot;
pub type Request = hyper::Request<Incoming>;
pub type Response = hyper::Response<HttpRecordResponse>;
@@ -206,8 +206,13 @@ pub(crate) async fn handle_request(
Ok(response)
}
+#[derive(Debug, thiserror::Error)]
+#[error("upgrade unavailable")]
+pub struct UpgradeUnavailableError;
+
struct HttpRecordInner {
server_state: SignallingRc<HttpServerState>,
+ closed_channel: Option<oneshot::Sender<()>>,
request_info: HttpConnectionProperties,
request_parts: http::request::Parts,
request_body: Option<RequestBodyState>,
@@ -273,6 +278,7 @@ impl HttpRecord {
response_body_finished: false,
response_body_waker: None,
trailers: None,
+ closed_channel: None,
been_dropped: false,
finished: false,
needs_close_after_finish: false,
@@ -309,6 +315,10 @@ impl HttpRecord {
RefMut::map(self.self_mut(), |inner| &mut inner.needs_close_after_finish)
}
+ pub fn on_cancel(&self, sender: oneshot::Sender<()>) {
+ self.self_mut().closed_channel = Some(sender);
+ }
+
fn recycle(self: Rc<Self>) {
assert!(
Rc::strong_count(&self) == 1,
@@ -344,14 +354,14 @@ impl HttpRecord {
}
/// Perform the Hyper upgrade on this record.
- pub fn upgrade(&self) -> Result<OnUpgrade, AnyError> {
+ pub fn upgrade(&self) -> Result<OnUpgrade, UpgradeUnavailableError> {
// Manually perform the upgrade. We're peeking into hyper's underlying machinery here a bit
self
.self_mut()
.request_parts
.extensions
.remove::<OnUpgrade>()
- .ok_or_else(|| AnyError::msg("upgrade unavailable"))
+ .ok_or(UpgradeUnavailableError)
}
/// Take the Hyper body from this record.
@@ -387,6 +397,9 @@ impl HttpRecord {
inner.been_dropped = true;
// The request body might include actual resources.
inner.request_body.take();
+ if let Some(closed_channel) = inner.closed_channel.take() {
+ let _ = closed_channel.send(());
+ }
}
/// Complete this record, potentially expunging it if it is fully complete (ie: cancelled as well).
@@ -515,7 +528,7 @@ pub struct HttpRecordResponse(ManuallyDrop<Rc<HttpRecord>>);
impl Body for HttpRecordResponse {
type Data = BufView;
- type Error = AnyError;
+ type Error = deno_core::error::AnyError;
fn poll_frame(
self: Pin<&mut Self>,
@@ -640,7 +653,7 @@ mod tests {
}
#[tokio::test]
- async fn test_handle_request() -> Result<(), AnyError> {
+ async fn test_handle_request() -> Result<(), deno_core::error::AnyError> {
let (tx, mut rx) = tokio::sync::mpsc::channel(10);
let server_state = HttpServerState::new();
let server_state_check = server_state.clone();
diff --git a/ext/http/websocket_upgrade.rs b/ext/http/websocket_upgrade.rs
index 4dead767a..af9504717 100644
--- a/ext/http/websocket_upgrade.rs
+++ b/ext/http/websocket_upgrade.rs
@@ -4,7 +4,6 @@ use std::marker::PhantomData;
use bytes::Bytes;
use bytes::BytesMut;
-use deno_core::error::AnyError;
use httparse::Status;
use hyper::header::HeaderName;
use hyper::header::HeaderValue;
@@ -13,12 +12,30 @@ use memmem::Searcher;
use memmem::TwoWaySearcher;
use once_cell::sync::OnceCell;
-use crate::http_error;
+#[derive(Debug, thiserror::Error)]
+pub enum WebSocketUpgradeError {
+ #[error("invalid headers")]
+ InvalidHeaders,
+ #[error("{0}")]
+ HttpParse(#[from] httparse::Error),
+ #[error("{0}")]
+ Http(#[from] http::Error),
+ #[error("{0}")]
+ Utf8(#[from] std::str::Utf8Error),
+ #[error("{0}")]
+ InvalidHeaderName(#[from] http::header::InvalidHeaderName),
+ #[error("{0}")]
+ InvalidHeaderValue(#[from] http::header::InvalidHeaderValue),
+ #[error("invalid HTTP status line")]
+ InvalidHttpStatusLine,
+ #[error("attempted to write to completed upgrade buffer")]
+ UpgradeBufferAlreadyCompleted,
+}
/// Given a buffer that ends in `\n\n` or `\r\n\r\n`, returns a parsed [`Request<Body>`].
fn parse_response<T: Default>(
header_bytes: &[u8],
-) -> Result<(usize, Response<T>), AnyError> {
+) -> Result<(usize, Response<T>), WebSocketUpgradeError> {
let mut headers = [httparse::EMPTY_HEADER; 16];
let status = httparse::parse_headers(header_bytes, &mut headers)?;
match status {
@@ -32,7 +49,7 @@ fn parse_response<T: Default>(
}
Ok((index, resp))
}
- _ => Err(http_error("invalid headers")),
+ _ => Err(WebSocketUpgradeError::InvalidHeaders),
}
}
@@ -69,11 +86,14 @@ pub struct WebSocketUpgrade<T: Default> {
impl<T: Default> WebSocketUpgrade<T> {
/// Ensures that the status line starts with "HTTP/1.1 101 " which matches all of the node.js
/// WebSocket libraries that are known. We don't care about the trailing status text.
- fn validate_status(&self, status: &[u8]) -> Result<(), AnyError> {
+ fn validate_status(
+ &self,
+ status: &[u8],
+ ) -> Result<(), WebSocketUpgradeError> {
if status.starts_with(b"HTTP/1.1 101 ") {
Ok(())
} else {
- Err(http_error("invalid HTTP status line"))
+ Err(WebSocketUpgradeError::InvalidHttpStatusLine)
}
}
@@ -82,7 +102,7 @@ impl<T: Default> WebSocketUpgrade<T> {
pub fn write(
&mut self,
bytes: &[u8],
- ) -> Result<Option<(Response<T>, Bytes)>, AnyError> {
+ ) -> Result<Option<(Response<T>, Bytes)>, WebSocketUpgradeError> {
use WebSocketUpgradeState::*;
match self.state {
@@ -142,9 +162,7 @@ impl<T: Default> WebSocketUpgrade<T> {
Ok(None)
}
}
- Complete => {
- Err(http_error("attempted to write to completed upgrade buffer"))
- }
+ Complete => Err(WebSocketUpgradeError::UpgradeBufferAlreadyCompleted),
}
}
}
@@ -157,8 +175,8 @@ mod tests {
type ExpectedResponseAndHead = Option<(Response<Body>, &'static [u8])>;
fn assert_response(
- result: Result<Option<(Response<Body>, Bytes)>, AnyError>,
- expected: Result<ExpectedResponseAndHead, &'static str>,
+ result: Result<Option<(Response<Body>, Bytes)>, WebSocketUpgradeError>,
+ expected: Result<ExpectedResponseAndHead, WebSocketUpgradeError>,
chunk_info: Option<(usize, usize)>,
) {
let formatted = format!("{result:?}");
@@ -189,8 +207,8 @@ mod tests {
"Expected Ok(None), was {formatted}",
),
Err(e) => assert_eq!(
- e,
- result.err().map(|e| format!("{e:?}")).unwrap_or_default(),
+ format!("{e:?}"),
+ format!("{:?}", result.unwrap_err()),
"Expected error, was {formatted}",
),
}
@@ -198,7 +216,7 @@ mod tests {
fn validate_upgrade_all_at_once(
s: &str,
- expected: Result<ExpectedResponseAndHead, &'static str>,
+ expected: Result<ExpectedResponseAndHead, WebSocketUpgradeError>,
) {
let mut upgrade = WebSocketUpgrade::default();
let res = upgrade.write(s.as_bytes());
@@ -209,7 +227,7 @@ mod tests {
fn validate_upgrade_chunks(
s: &str,
size: usize,
- expected: Result<ExpectedResponseAndHead, &'static str>,
+ expected: Result<ExpectedResponseAndHead, WebSocketUpgradeError>,
) {
let chunk_info = Some((s.as_bytes().len(), size));
let mut upgrade = WebSocketUpgrade::default();
@@ -226,7 +244,7 @@ mod tests {
fn validate_upgrade(
s: &str,
- expected: fn() -> Result<ExpectedResponseAndHead, &'static str>,
+ expected: fn() -> Result<ExpectedResponseAndHead, WebSocketUpgradeError>,
) {
validate_upgrade_all_at_once(s, expected());
validate_upgrade_chunks(s, 1, expected());
@@ -315,7 +333,7 @@ mod tests {
#[test]
fn upgrade_invalid_status() {
validate_upgrade("HTTP/1.1 200 OK\nConnection: Upgrade\n\n", || {
- Err("invalid HTTP status line")
+ Err(WebSocketUpgradeError::InvalidHttpStatusLine)
});
}
@@ -327,7 +345,11 @@ mod tests {
.join("\n");
validate_upgrade(
&format!("HTTP/1.1 101 Switching Protocols\n{headers}\n\n"),
- || Err("too many headers"),
+ || {
+ Err(WebSocketUpgradeError::HttpParse(
+ httparse::Error::TooManyHeaders,
+ ))
+ },
);
}
}
diff --git a/ext/io/Cargo.toml b/ext/io/Cargo.toml
index d45834a8f..6ef049ff9 100644
--- a/ext/io/Cargo.toml
+++ b/ext/io/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_io"
-version = "0.81.0"
+version = "0.87.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
diff --git a/ext/io/bi_pipe.rs b/ext/io/bi_pipe.rs
index 402e383ac..3492e2f44 100644
--- a/ext/io/bi_pipe.rs
+++ b/ext/io/bi_pipe.rs
@@ -2,7 +2,6 @@
use std::rc::Rc;
-use deno_core::error::AnyError;
use deno_core::AsyncRefCell;
use deno_core::AsyncResult;
use deno_core::CancelHandle;
@@ -71,13 +70,16 @@ impl BiPipeResource {
pub async fn read(
self: Rc<Self>,
data: &mut [u8],
- ) -> Result<usize, AnyError> {
+ ) -> Result<usize, std::io::Error> {
let mut rd = RcRef::map(&self, |r| &r.read_half).borrow_mut().await;
let cancel_handle = RcRef::map(&self, |r| &r.cancel);
- Ok(rd.read(data).try_or_cancel(cancel_handle).await?)
+ rd.read(data).try_or_cancel(cancel_handle).await
}
- pub async fn write(self: Rc<Self>, data: &[u8]) -> Result<usize, AnyError> {
+ pub async fn write(
+ self: Rc<Self>,
+ data: &[u8],
+ ) -> Result<usize, std::io::Error> {
let mut wr = RcRef::map(self, |r| &r.write_half).borrow_mut().await;
let nwritten = wr.write(data).await?;
wr.flush().await?;
@@ -181,9 +183,10 @@ fn from_raw(
) -> Result<(BiPipeRead, BiPipeWrite), std::io::Error> {
use std::os::fd::FromRawFd;
// Safety: The fd is part of a pair of connected sockets
- let unix_stream = tokio::net::UnixStream::from_std(unsafe {
- std::os::unix::net::UnixStream::from_raw_fd(stream)
- })?;
+ let unix_stream =
+ unsafe { std::os::unix::net::UnixStream::from_raw_fd(stream) };
+ unix_stream.set_nonblocking(true)?;
+ let unix_stream = tokio::net::UnixStream::from_std(unix_stream)?;
let (read, write) = unix_stream.into_split();
Ok((BiPipeRead { inner: read }, BiPipeWrite { inner: write }))
}
@@ -270,15 +273,15 @@ impl_async_write!(for BiPipe -> self.write_end);
/// Creates both sides of a bidirectional pipe, returning the raw
/// handles to the underlying OS resources.
-pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError>
-{
+pub fn bi_pipe_pair_raw(
+) -> Result<(RawBiPipeHandle, RawBiPipeHandle), std::io::Error> {
#[cfg(unix)]
{
// SockFlag is broken on macOS
// https://github.com/nix-rust/nix/issues/861
let mut fds = [-1, -1];
#[cfg(not(target_os = "macos"))]
- let flags = libc::SOCK_CLOEXEC | libc::SOCK_NONBLOCK;
+ let flags = libc::SOCK_CLOEXEC;
#[cfg(target_os = "macos")]
let flags = 0;
@@ -293,19 +296,19 @@ pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError
)
};
if ret != 0 {
- return Err(std::io::Error::last_os_error().into());
+ return Err(std::io::Error::last_os_error());
}
if cfg!(target_os = "macos") {
let fcntl = |fd: i32, flag: libc::c_int| -> Result<(), std::io::Error> {
// SAFETY: libc call, fd is valid
- let flags = unsafe { libc::fcntl(fd, libc::F_GETFL) };
+ let flags = unsafe { libc::fcntl(fd, libc::F_GETFD) };
if flags == -1 {
return Err(fail(fds));
}
// SAFETY: libc call, fd is valid
- let ret = unsafe { libc::fcntl(fd, libc::F_SETFL, flags | flag) };
+ let ret = unsafe { libc::fcntl(fd, libc::F_SETFD, flags | flag) };
if ret == -1 {
return Err(fail(fds));
}
@@ -321,13 +324,9 @@ pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError
std::io::Error::last_os_error()
}
- // SOCK_NONBLOCK is not supported on macOS.
- (fcntl)(fds[0], libc::O_NONBLOCK)?;
- (fcntl)(fds[1], libc::O_NONBLOCK)?;
-
// SOCK_CLOEXEC is not supported on macOS.
- (fcntl)(fds[0], libc::FD_CLOEXEC)?;
- (fcntl)(fds[1], libc::FD_CLOEXEC)?;
+ fcntl(fds[0], libc::FD_CLOEXEC)?;
+ fcntl(fds[1], libc::FD_CLOEXEC)?;
}
let fd1 = fds[0];
@@ -389,7 +388,7 @@ pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError
continue;
}
- return Err(err.into());
+ return Err(err);
}
break (path, hd1);
@@ -411,7 +410,7 @@ pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError
0,
);
if hd2 == INVALID_HANDLE_VALUE {
- return Err(io::Error::last_os_error().into());
+ return Err(io::Error::last_os_error());
}
// Will not block because we have create the pair.
@@ -419,7 +418,7 @@ pub fn bi_pipe_pair_raw() -> Result<(RawBiPipeHandle, RawBiPipeHandle), AnyError
let err = std::io::Error::last_os_error();
if err.raw_os_error() != Some(ERROR_PIPE_CONNECTED as i32) {
CloseHandle(hd2);
- return Err(err.into());
+ return Err(err);
}
}
diff --git a/ext/io/fs.rs b/ext/io/fs.rs
index 3798c1429..7ef02315b 100644
--- a/ext/io/fs.rs
+++ b/ext/io/fs.rs
@@ -1,15 +1,12 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use std::borrow::Cow;
+use std::fmt::Formatter;
use std::io;
use std::rc::Rc;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
-use deno_core::error::custom_error;
-use deno_core::error::not_supported;
-use deno_core::error::resource_unavailable;
-use deno_core::error::AnyError;
use deno_core::BufMutView;
use deno_core::BufView;
use deno_core::OpState;
@@ -25,6 +22,21 @@ pub enum FsError {
NotCapable(&'static str),
}
+impl std::fmt::Display for FsError {
+ fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
+ match self {
+ FsError::Io(err) => std::fmt::Display::fmt(err, f),
+ FsError::FileBusy => f.write_str("file busy"),
+ FsError::NotSupported => f.write_str("not supported"),
+ FsError::NotCapable(err) => {
+ f.write_str(&format!("requires {err} access"))
+ }
+ }
+ }
+}
+
+impl std::error::Error for FsError {}
+
impl FsError {
pub fn kind(&self) -> io::ErrorKind {
match self {
@@ -59,19 +71,6 @@ impl From<io::ErrorKind> for FsError {
}
}
-impl From<FsError> for AnyError {
- fn from(err: FsError) -> Self {
- match err {
- FsError::Io(err) => AnyError::from(err),
- FsError::FileBusy => resource_unavailable(),
- FsError::NotSupported => not_supported(),
- FsError::NotCapable(err) => {
- custom_error("NotCapable", format!("permission denied: {err}"))
- }
- }
- }
-}
-
impl From<JoinError> for FsError {
fn from(err: JoinError) -> Self {
if err.is_cancelled() {
@@ -95,6 +94,7 @@ pub struct FsStat {
pub mtime: Option<u64>,
pub atime: Option<u64>,
pub birthtime: Option<u64>,
+ pub ctime: Option<u64>,
pub dev: u64,
pub ino: u64,
@@ -154,6 +154,16 @@ impl FsStat {
}
}
+ #[inline(always)]
+ fn get_ctime(ctime_or_0: i64) -> Option<u64> {
+ if ctime_or_0 > 0 {
+ // ctime return seconds since epoch, but we need milliseconds
+ return Some(ctime_or_0 as u64 * 1000);
+ }
+
+ None
+ }
+
Self {
is_file: metadata.is_file(),
is_directory: metadata.is_dir(),
@@ -163,6 +173,7 @@ impl FsStat {
mtime: to_msec(metadata.modified()),
atime: to_msec(metadata.accessed()),
birthtime: to_msec(metadata.created()),
+ ctime: get_ctime(unix_or_zero!(ctime)),
dev: unix_or_zero!(dev),
ino: unix_or_zero!(ino),
@@ -266,9 +277,9 @@ impl FileResource {
state: &OpState,
rid: ResourceId,
f: F,
- ) -> Result<R, AnyError>
+ ) -> Result<R, deno_core::error::AnyError>
where
- F: FnOnce(Rc<FileResource>) -> Result<R, AnyError>,
+ F: FnOnce(Rc<FileResource>) -> Result<R, deno_core::error::AnyError>,
{
let resource = state.resource_table.get::<FileResource>(rid)?;
f(resource)
@@ -277,7 +288,7 @@ impl FileResource {
pub fn get_file(
state: &OpState,
rid: ResourceId,
- ) -> Result<Rc<dyn File>, AnyError> {
+ ) -> Result<Rc<dyn File>, deno_core::error::AnyError> {
let resource = state.resource_table.get::<FileResource>(rid)?;
Ok(resource.file())
}
@@ -286,9 +297,9 @@ impl FileResource {
state: &OpState,
rid: ResourceId,
f: F,
- ) -> Result<R, AnyError>
+ ) -> Result<R, deno_core::error::AnyError>
where
- F: FnOnce(Rc<dyn File>) -> Result<R, AnyError>,
+ F: FnOnce(Rc<dyn File>) -> Result<R, deno_core::error::AnyError>,
{
Self::with_resource(state, rid, |r| f(r.file.clone()))
}
@@ -303,10 +314,7 @@ impl deno_core::Resource for FileResource {
Cow::Borrowed(&self.name)
}
- fn read(
- self: Rc<Self>,
- limit: usize,
- ) -> deno_core::AsyncResult<deno_core::BufView> {
+ fn read(self: Rc<Self>, limit: usize) -> deno_core::AsyncResult<BufView> {
Box::pin(async move {
self
.file
@@ -319,8 +327,8 @@ impl deno_core::Resource for FileResource {
fn read_byob(
self: Rc<Self>,
- buf: deno_core::BufMutView,
- ) -> deno_core::AsyncResult<(usize, deno_core::BufMutView)> {
+ buf: BufMutView,
+ ) -> deno_core::AsyncResult<(usize, BufMutView)> {
Box::pin(async move {
self
.file
@@ -333,17 +341,14 @@ impl deno_core::Resource for FileResource {
fn write(
self: Rc<Self>,
- buf: deno_core::BufView,
+ buf: BufView,
) -> deno_core::AsyncResult<deno_core::WriteOutcome> {
Box::pin(async move {
self.file.clone().write(buf).await.map_err(|err| err.into())
})
}
- fn write_all(
- self: Rc<Self>,
- buf: deno_core::BufView,
- ) -> deno_core::AsyncResult<()> {
+ fn write_all(self: Rc<Self>, buf: BufView) -> deno_core::AsyncResult<()> {
Box::pin(async move {
self
.file
diff --git a/ext/io/lib.rs b/ext/io/lib.rs
index a07d64ae3..5d183aa46 100644
--- a/ext/io/lib.rs
+++ b/ext/io/lib.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::unsync::spawn_blocking;
use deno_core::unsync::TaskQueue;
@@ -48,6 +47,7 @@ use winapi::um::processenv::GetStdHandle;
#[cfg(windows)]
use winapi::um::winbase;
+use deno_core::futures::TryFutureExt;
#[cfg(windows)]
use parking_lot::Condvar;
#[cfg(windows)]
@@ -348,13 +348,13 @@ where
RcRef::map(self, |r| &r.stream).borrow_mut()
}
- async fn write(self: Rc<Self>, data: &[u8]) -> Result<usize, AnyError> {
+ async fn write(self: Rc<Self>, data: &[u8]) -> Result<usize, io::Error> {
let mut stream = self.borrow_mut().await;
let nwritten = stream.write(data).await?;
Ok(nwritten)
}
- async fn shutdown(self: Rc<Self>) -> Result<(), AnyError> {
+ async fn shutdown(self: Rc<Self>) -> Result<(), io::Error> {
let mut stream = self.borrow_mut().await;
stream.shutdown().await?;
Ok(())
@@ -396,7 +396,7 @@ where
self.cancel_handle.cancel()
}
- async fn read(self: Rc<Self>, data: &mut [u8]) -> Result<usize, AnyError> {
+ async fn read(self: Rc<Self>, data: &mut [u8]) -> Result<usize, io::Error> {
let mut rd = self.borrow_mut().await;
let nread = rd.read(data).try_or_cancel(self.cancel_handle()).await?;
Ok(nread)
@@ -417,7 +417,7 @@ impl Resource for ChildStdinResource {
deno_core::impl_writable!();
fn shutdown(self: Rc<Self>) -> AsyncResult<()> {
- Box::pin(self.shutdown())
+ Box::pin(self.shutdown().map_err(|e| e.into()))
}
}
@@ -1010,7 +1010,7 @@ pub fn op_print(
state: &mut OpState,
#[string] msg: &str,
is_err: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), deno_core::error::AnyError> {
let rid = if is_err { 2 } else { 1 };
FileResource::with_file(state, rid, move |file| {
Ok(file.write_all_sync(msg.as_bytes())?)
diff --git a/ext/kv/Cargo.toml b/ext/kv/Cargo.toml
index 14c3514ff..aa7381766 100644
--- a/ext/kv/Cargo.toml
+++ b/ext/kv/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_kv"
-version = "0.79.0"
+version = "0.85.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -17,6 +17,7 @@ path = "lib.rs"
anyhow.workspace = true
async-trait.workspace = true
base64.workspace = true
+boxed_error.workspace = true
bytes.workspace = true
chrono = { workspace = true, features = ["now"] }
deno_core.workspace = true
@@ -36,6 +37,7 @@ prost.workspace = true
rand.workspace = true
rusqlite.workspace = true
serde.workspace = true
+thiserror.workspace = true
url.workspace = true
[build-dependencies]
diff --git a/ext/kv/config.rs b/ext/kv/config.rs
index 6e2e2c3a1..7166bcbcc 100644
--- a/ext/kv/config.rs
+++ b/ext/kv/config.rs
@@ -1,16 +1,17 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+#[derive(Clone, Copy, Debug)]
pub struct KvConfig {
- pub(crate) max_write_key_size_bytes: usize,
- pub(crate) max_read_key_size_bytes: usize,
- pub(crate) max_value_size_bytes: usize,
- pub(crate) max_read_ranges: usize,
- pub(crate) max_read_entries: usize,
- pub(crate) max_checks: usize,
- pub(crate) max_mutations: usize,
- pub(crate) max_watched_keys: usize,
- pub(crate) max_total_mutation_size_bytes: usize,
- pub(crate) max_total_key_size_bytes: usize,
+ pub max_write_key_size_bytes: usize,
+ pub max_read_key_size_bytes: usize,
+ pub max_value_size_bytes: usize,
+ pub max_read_ranges: usize,
+ pub max_read_entries: usize,
+ pub max_checks: usize,
+ pub max_mutations: usize,
+ pub max_watched_keys: usize,
+ pub max_total_mutation_size_bytes: usize,
+ pub max_total_key_size_bytes: usize,
}
impl KvConfig {
diff --git a/ext/kv/lib.rs b/ext/kv/lib.rs
index 13e4f1662..5392b9721 100644
--- a/ext/kv/lib.rs
+++ b/ext/kv/lib.rs
@@ -12,15 +12,12 @@ use std::num::NonZeroU32;
use std::rc::Rc;
use std::time::Duration;
-use anyhow::bail;
use base64::prelude::BASE64_URL_SAFE;
use base64::Engine;
+use boxed_error::Boxed;
use chrono::DateTime;
use chrono::Utc;
-use deno_core::anyhow::Context;
use deno_core::error::get_custom_error_class;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::futures::StreamExt;
use deno_core::op2;
use deno_core::serde_v8::AnyValue;
@@ -118,12 +115,75 @@ impl Resource for DatabaseWatcherResource {
}
}
+#[derive(Debug, Boxed)]
+pub struct KvError(pub Box<KvErrorKind>);
+
+#[derive(Debug, thiserror::Error)]
+pub enum KvErrorKind {
+ #[error(transparent)]
+ DatabaseHandler(deno_core::error::AnyError),
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error("Too many ranges (max {0})")]
+ TooManyRanges(usize),
+ #[error("Too many entries (max {0})")]
+ TooManyEntries(usize),
+ #[error("Too many checks (max {0})")]
+ TooManyChecks(usize),
+ #[error("Too many mutations (max {0})")]
+ TooManyMutations(usize),
+ #[error("Too many keys (max {0})")]
+ TooManyKeys(usize),
+ #[error("limit must be greater than 0")]
+ InvalidLimit,
+ #[error("Invalid boundary key")]
+ InvalidBoundaryKey,
+ #[error("Key too large for read (max {0} bytes)")]
+ KeyTooLargeToRead(usize),
+ #[error("Key too large for write (max {0} bytes)")]
+ KeyTooLargeToWrite(usize),
+ #[error("Total mutation size too large (max {0} bytes)")]
+ TotalMutationTooLarge(usize),
+ #[error("Total key size too large (max {0} bytes)")]
+ TotalKeyTooLarge(usize),
+ #[error(transparent)]
+ Kv(deno_core::error::AnyError),
+ #[error(transparent)]
+ Io(#[from] std::io::Error),
+ #[error("Queue message not found")]
+ QueueMessageNotFound,
+ #[error("Start key is not in the keyspace defined by prefix")]
+ StartKeyNotInKeyspace,
+ #[error("End key is not in the keyspace defined by prefix")]
+ EndKeyNotInKeyspace,
+ #[error("Start key is greater than end key")]
+ StartKeyGreaterThanEndKey,
+ #[error("Invalid check")]
+ InvalidCheck(#[source] KvCheckError),
+ #[error("Invalid mutation")]
+ InvalidMutation(#[source] KvMutationError),
+ #[error("Invalid enqueue")]
+ InvalidEnqueue(#[source] std::io::Error),
+ #[error("key cannot be empty")]
+ EmptyKey, // TypeError
+ #[error("Value too large (max {0} bytes)")]
+ ValueTooLarge(usize), // TypeError
+ #[error("enqueue payload too large (max {0} bytes)")]
+ EnqueuePayloadTooLarge(usize), // TypeError
+ #[error("invalid cursor")]
+ InvalidCursor,
+ #[error("cursor out of bounds")]
+ CursorOutOfBounds,
+ #[error("Invalid range")]
+ InvalidRange,
+}
+
#[op2(async)]
#[smi]
async fn op_kv_database_open<DBH>(
state: Rc<RefCell<OpState>>,
#[string] path: Option<String>,
-) -> Result<ResourceId, AnyError>
+) -> Result<ResourceId, KvError>
where
DBH: DatabaseHandler + 'static,
{
@@ -134,7 +194,10 @@ where
.check_or_exit(UNSTABLE_FEATURE_NAME, "Deno.openKv");
state.borrow::<Rc<DBH>>().clone()
};
- let db = handler.open(state.clone(), path).await?;
+ let db = handler
+ .open(state.clone(), path)
+ .await
+ .map_err(KvErrorKind::DatabaseHandler)?;
let rid = state.borrow_mut().resource_table.add(DatabaseResource {
db,
cancel_handle: CancelHandle::new_rc(),
@@ -184,8 +247,8 @@ enum ToV8Value {
}
impl TryFrom<FromV8Value> for KvValue {
- type Error = AnyError;
- fn try_from(value: FromV8Value) -> Result<Self, AnyError> {
+ type Error = num_bigint::TryFromBigIntError<num_bigint::BigInt>;
+ fn try_from(value: FromV8Value) -> Result<Self, Self::Error> {
Ok(match value {
FromV8Value::V8(buf) => KvValue::V8(buf.to_vec()),
FromV8Value::Bytes(buf) => KvValue::Bytes(buf.to_vec()),
@@ -214,8 +277,8 @@ struct ToV8KvEntry {
}
impl TryFrom<KvEntry> for ToV8KvEntry {
- type Error = AnyError;
- fn try_from(entry: KvEntry) -> Result<Self, AnyError> {
+ type Error = std::io::Error;
+ fn try_from(entry: KvEntry) -> Result<Self, Self::Error> {
Ok(ToV8KvEntry {
key: decode_key(&entry.key)?
.0
@@ -261,14 +324,16 @@ async fn op_kv_snapshot_read<DBH>(
#[smi] rid: ResourceId,
#[serde] ranges: Vec<SnapshotReadRange>,
#[serde] consistency: V8Consistency,
-) -> Result<Vec<Vec<ToV8KvEntry>>, AnyError>
+) -> Result<Vec<Vec<ToV8KvEntry>>, KvError>
where
DBH: DatabaseHandler + 'static,
{
let db = {
let state = state.borrow();
- let resource =
- state.resource_table.get::<DatabaseResource<DBH::DB>>(rid)?;
+ let resource = state
+ .resource_table
+ .get::<DatabaseResource<DBH::DB>>(rid)
+ .map_err(KvErrorKind::Resource)?;
resource.db.clone()
};
@@ -278,10 +343,7 @@ where
};
if ranges.len() > config.max_read_ranges {
- return Err(type_error(format!(
- "Too many ranges (max {})",
- config.max_read_ranges
- )));
+ return Err(KvErrorKind::TooManyRanges(config.max_read_ranges).into_box());
}
let mut total_entries = 0usize;
@@ -300,33 +362,34 @@ where
Ok(ReadRange {
start,
end,
- limit: NonZeroU32::new(limit)
- .with_context(|| "limit must be greater than 0")?,
+ limit: NonZeroU32::new(limit).ok_or(KvErrorKind::InvalidLimit)?,
reverse,
})
})
- .collect::<Result<Vec<_>, AnyError>>()?;
+ .collect::<Result<Vec<_>, KvError>>()?;
if total_entries > config.max_read_entries {
- return Err(type_error(format!(
- "Too many entries (max {})",
- config.max_read_entries
- )));
+ return Err(
+ KvErrorKind::TooManyEntries(config.max_read_entries).into_box(),
+ );
}
let opts = SnapshotReadOptions {
consistency: consistency.into(),
};
- let output_ranges = db.snapshot_read(read_ranges, opts).await?;
+ let output_ranges = db
+ .snapshot_read(read_ranges, opts)
+ .await
+ .map_err(KvErrorKind::Kv)?;
let output_ranges = output_ranges
.into_iter()
.map(|x| {
x.entries
.into_iter()
.map(TryInto::try_into)
- .collect::<Result<Vec<_>, AnyError>>()
+ .collect::<Result<Vec<_>, std::io::Error>>()
})
- .collect::<Result<Vec<_>, AnyError>>()?;
+ .collect::<Result<Vec<_>, std::io::Error>>()?;
Ok(output_ranges)
}
@@ -345,7 +408,7 @@ impl<QMH: QueueMessageHandle + 'static> Resource for QueueMessageResource<QMH> {
async fn op_kv_dequeue_next_message<DBH>(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<Option<(ToJsBuffer, ResourceId)>, AnyError>
+) -> Result<Option<(ToJsBuffer, ResourceId)>, KvError>
where
DBH: DatabaseHandler + 'static,
{
@@ -358,17 +421,19 @@ where
if get_custom_error_class(&err) == Some("BadResource") {
return Ok(None);
} else {
- return Err(err);
+ return Err(KvErrorKind::Resource(err).into_box());
}
}
};
resource.db.clone()
};
- let Some(mut handle) = db.dequeue_next_message().await? else {
+ let Some(mut handle) =
+ db.dequeue_next_message().await.map_err(KvErrorKind::Kv)?
+ else {
return Ok(None);
};
- let payload = handle.take_payload().await?.into();
+ let payload = handle.take_payload().await.map_err(KvErrorKind::Kv)?.into();
let handle_rid = {
let mut state = state.borrow_mut();
state.resource_table.add(QueueMessageResource { handle })
@@ -382,18 +447,18 @@ fn op_kv_watch<DBH>(
state: &mut OpState,
#[smi] rid: ResourceId,
#[serde] keys: Vec<KvKey>,
-) -> Result<ResourceId, AnyError>
+) -> Result<ResourceId, KvError>
where
DBH: DatabaseHandler + 'static,
{
- let resource = state.resource_table.get::<DatabaseResource<DBH::DB>>(rid)?;
+ let resource = state
+ .resource_table
+ .get::<DatabaseResource<DBH::DB>>(rid)
+ .map_err(KvErrorKind::Resource)?;
let config = state.borrow::<Rc<KvConfig>>().clone();
if keys.len() > config.max_watched_keys {
- return Err(type_error(format!(
- "Too many keys (max {})",
- config.max_watched_keys
- )));
+ return Err(KvErrorKind::TooManyKeys(config.max_watched_keys).into_box());
}
let keys: Vec<Vec<u8>> = keys
@@ -428,10 +493,13 @@ enum WatchEntry {
async fn op_kv_watch_next(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<Option<Vec<WatchEntry>>, AnyError> {
+) -> Result<Option<Vec<WatchEntry>>, KvError> {
let resource = {
let state = state.borrow();
- let resource = state.resource_table.get::<DatabaseWatcherResource>(rid)?;
+ let resource = state
+ .resource_table
+ .get::<DatabaseWatcherResource>(rid)
+ .map_err(KvErrorKind::Resource)?;
resource.clone()
};
@@ -457,7 +525,7 @@ async fn op_kv_watch_next(
return Ok(None);
};
- let entries = res?;
+ let entries = res.map_err(KvErrorKind::Kv)?;
let entries = entries
.into_iter()
.map(|entry| {
@@ -468,7 +536,7 @@ async fn op_kv_watch_next(
WatchKeyOutput::Unchanged => WatchEntry::Unchanged,
})
})
- .collect::<Result<_, anyhow::Error>>()?;
+ .collect::<Result<_, KvError>>()?;
Ok(Some(entries))
}
@@ -478,7 +546,7 @@ async fn op_kv_finish_dequeued_message<DBH>(
state: Rc<RefCell<OpState>>,
#[smi] handle_rid: ResourceId,
success: bool,
-) -> Result<(), AnyError>
+) -> Result<(), KvError>
where
DBH: DatabaseHandler + 'static,
{
@@ -487,9 +555,9 @@ where
let handle = state
.resource_table
.take::<QueueMessageResource<<<DBH>::DB as Database>::QMH>>(handle_rid)
- .map_err(|_| type_error("Queue message not found"))?;
+ .map_err(|_| KvErrorKind::QueueMessageNotFound)?;
Rc::try_unwrap(handle)
- .map_err(|_| type_error("Queue message not found"))?
+ .map_err(|_| KvErrorKind::QueueMessageNotFound)?
.handle
};
// if we fail to finish the message, there is not much we can do and the
@@ -500,32 +568,52 @@ where
Ok(())
}
+#[derive(Debug, thiserror::Error)]
+pub enum KvCheckError {
+ #[error("invalid versionstamp")]
+ InvalidVersionstamp,
+ #[error(transparent)]
+ Io(std::io::Error),
+}
+
type V8KvCheck = (KvKey, Option<ByteString>);
-fn check_from_v8(value: V8KvCheck) -> Result<Check, AnyError> {
+fn check_from_v8(value: V8KvCheck) -> Result<Check, KvCheckError> {
let versionstamp = match value.1 {
Some(data) => {
let mut out = [0u8; 10];
if data.len() != out.len() * 2 {
- bail!(type_error("invalid versionstamp"));
+ return Err(KvCheckError::InvalidVersionstamp);
}
faster_hex::hex_decode(&data, &mut out)
- .map_err(|_| type_error("invalid versionstamp"))?;
+ .map_err(|_| KvCheckError::InvalidVersionstamp)?;
Some(out)
}
None => None,
};
Ok(Check {
- key: encode_v8_key(value.0)?,
+ key: encode_v8_key(value.0).map_err(KvCheckError::Io)?,
versionstamp,
})
}
+#[derive(Debug, thiserror::Error)]
+pub enum KvMutationError {
+ #[error(transparent)]
+ BigInt(#[from] num_bigint::TryFromBigIntError<num_bigint::BigInt>),
+ #[error(transparent)]
+ Io(#[from] std::io::Error),
+ #[error("Invalid mutation '{0}' with value")]
+ InvalidMutationWithValue(String),
+ #[error("Invalid mutation '{0}' without value")]
+ InvalidMutationWithoutValue(String),
+}
+
type V8KvMutation = (KvKey, String, Option<FromV8Value>, Option<u64>);
fn mutation_from_v8(
(value, current_timstamp): (V8KvMutation, DateTime<Utc>),
-) -> Result<Mutation, AnyError> {
+) -> Result<Mutation, KvMutationError> {
let key = encode_v8_key(value.0)?;
let kind = match (value.1.as_str(), value.2) {
("set", Some(value)) => MutationKind::Set(value.try_into()?),
@@ -542,10 +630,10 @@ fn mutation_from_v8(
MutationKind::SetSuffixVersionstampedKey(value.try_into()?)
}
(op, Some(_)) => {
- return Err(type_error(format!("Invalid mutation '{op}' with value")))
+ return Err(KvMutationError::InvalidMutationWithValue(op.to_string()))
}
(op, None) => {
- return Err(type_error(format!("Invalid mutation '{op}' without value")))
+ return Err(KvMutationError::InvalidMutationWithoutValue(op.to_string()))
}
};
Ok(Mutation {
@@ -562,7 +650,7 @@ type V8Enqueue = (JsBuffer, u64, Vec<KvKey>, Option<Vec<u32>>);
fn enqueue_from_v8(
value: V8Enqueue,
current_timestamp: DateTime<Utc>,
-) -> Result<Enqueue, AnyError> {
+) -> Result<Enqueue, std::io::Error> {
Ok(Enqueue {
payload: value.0.to_vec(),
deadline: current_timestamp
@@ -597,7 +685,7 @@ impl RawSelector {
prefix: Option<KvKey>,
start: Option<KvKey>,
end: Option<KvKey>,
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, KvError> {
let prefix = prefix.map(encode_v8_key).transpose()?;
let start = start.map(encode_v8_key).transpose()?;
let end = end.map(encode_v8_key).transpose()?;
@@ -610,9 +698,7 @@ impl RawSelector {
}),
(Some(prefix), Some(start), None) => {
if !start.starts_with(&prefix) || start.len() == prefix.len() {
- return Err(type_error(
- "Start key is not in the keyspace defined by prefix",
- ));
+ return Err(KvErrorKind::StartKeyNotInKeyspace.into_box());
}
Ok(Self::Prefixed {
prefix,
@@ -622,9 +708,7 @@ impl RawSelector {
}
(Some(prefix), None, Some(end)) => {
if !end.starts_with(&prefix) || end.len() == prefix.len() {
- return Err(type_error(
- "End key is not in the keyspace defined by prefix",
- ));
+ return Err(KvErrorKind::EndKeyNotInKeyspace.into_box());
}
Ok(Self::Prefixed {
prefix,
@@ -634,7 +718,7 @@ impl RawSelector {
}
(None, Some(start), Some(end)) => {
if start > end {
- return Err(type_error("Start key is greater than end key"));
+ return Err(KvErrorKind::StartKeyGreaterThanEndKey.into_box());
}
Ok(Self::Range { start, end })
}
@@ -642,7 +726,7 @@ impl RawSelector {
let end = start.iter().copied().chain(Some(0)).collect();
Ok(Self::Range { start, end })
}
- _ => Err(type_error("Invalid range")),
+ _ => Err(KvErrorKind::InvalidRange.into_box()),
}
}
@@ -701,10 +785,10 @@ fn common_prefix_for_bytes<'a>(a: &'a [u8], b: &'a [u8]) -> &'a [u8] {
fn encode_cursor(
selector: &RawSelector,
boundary_key: &[u8],
-) -> Result<String, AnyError> {
+) -> Result<String, KvError> {
let common_prefix = selector.common_prefix();
if !boundary_key.starts_with(common_prefix) {
- return Err(type_error("Invalid boundary key"));
+ return Err(KvErrorKind::InvalidBoundaryKey.into_box());
}
Ok(BASE64_URL_SAFE.encode(&boundary_key[common_prefix.len()..]))
}
@@ -713,7 +797,7 @@ fn decode_selector_and_cursor(
selector: &RawSelector,
reverse: bool,
cursor: Option<&ByteString>,
-) -> Result<(Vec<u8>, Vec<u8>), AnyError> {
+) -> Result<(Vec<u8>, Vec<u8>), KvError> {
let Some(cursor) = cursor else {
return Ok((selector.range_start_key(), selector.range_end_key()));
};
@@ -721,7 +805,7 @@ fn decode_selector_and_cursor(
let common_prefix = selector.common_prefix();
let cursor = BASE64_URL_SAFE
.decode(cursor)
- .map_err(|_| type_error("invalid cursor"))?;
+ .map_err(|_| KvErrorKind::InvalidCursor)?;
let first_key: Vec<u8>;
let last_key: Vec<u8>;
@@ -746,13 +830,13 @@ fn decode_selector_and_cursor(
// Defend against out-of-bounds reading
if let Some(start) = selector.start() {
if &first_key[..] < start {
- return Err(type_error("cursor out of bounds"));
+ return Err(KvErrorKind::CursorOutOfBounds.into_box());
}
}
if let Some(end) = selector.end() {
if &last_key[..] > end {
- return Err(type_error("cursor out of bounds"));
+ return Err(KvErrorKind::CursorOutOfBounds.into_box());
}
}
@@ -767,15 +851,17 @@ async fn op_kv_atomic_write<DBH>(
#[serde] checks: Vec<V8KvCheck>,
#[serde] mutations: Vec<V8KvMutation>,
#[serde] enqueues: Vec<V8Enqueue>,
-) -> Result<Option<String>, AnyError>
+) -> Result<Option<String>, KvError>
where
DBH: DatabaseHandler + 'static,
{
let current_timestamp = chrono::Utc::now();
let db = {
let state = state.borrow();
- let resource =
- state.resource_table.get::<DatabaseResource<DBH::DB>>(rid)?;
+ let resource = state
+ .resource_table
+ .get::<DatabaseResource<DBH::DB>>(rid)
+ .map_err(KvErrorKind::Resource)?;
resource.db.clone()
};
@@ -785,34 +871,28 @@ where
};
if checks.len() > config.max_checks {
- return Err(type_error(format!(
- "Too many checks (max {})",
- config.max_checks
- )));
+ return Err(KvErrorKind::TooManyChecks(config.max_checks).into_box());
}
if mutations.len() + enqueues.len() > config.max_mutations {
- return Err(type_error(format!(
- "Too many mutations (max {})",
- config.max_mutations
- )));
+ return Err(KvErrorKind::TooManyMutations(config.max_mutations).into_box());
}
let checks = checks
.into_iter()
.map(check_from_v8)
- .collect::<Result<Vec<Check>, AnyError>>()
- .with_context(|| "invalid check")?;
+ .collect::<Result<Vec<Check>, KvCheckError>>()
+ .map_err(KvErrorKind::InvalidCheck)?;
let mutations = mutations
.into_iter()
.map(|mutation| mutation_from_v8((mutation, current_timestamp)))
- .collect::<Result<Vec<Mutation>, AnyError>>()
- .with_context(|| "Invalid mutation")?;
+ .collect::<Result<Vec<Mutation>, KvMutationError>>()
+ .map_err(KvErrorKind::InvalidMutation)?;
let enqueues = enqueues
.into_iter()
.map(|e| enqueue_from_v8(e, current_timestamp))
- .collect::<Result<Vec<Enqueue>, AnyError>>()
- .with_context(|| "invalid enqueue")?;
+ .collect::<Result<Vec<Enqueue>, std::io::Error>>()
+ .map_err(KvErrorKind::InvalidEnqueue)?;
let mut total_payload_size = 0usize;
let mut total_key_size = 0usize;
@@ -823,7 +903,7 @@ where
.chain(mutations.iter().map(|m| &m.key))
{
if key.is_empty() {
- return Err(type_error("key cannot be empty"));
+ return Err(KvErrorKind::EmptyKey.into_box());
}
total_payload_size += check_write_key_size(key, &config)?;
@@ -847,17 +927,16 @@ where
}
if total_payload_size > config.max_total_mutation_size_bytes {
- return Err(type_error(format!(
- "Total mutation size too large (max {} bytes)",
- config.max_total_mutation_size_bytes
- )));
+ return Err(
+ KvErrorKind::TotalMutationTooLarge(config.max_total_mutation_size_bytes)
+ .into_box(),
+ );
}
if total_key_size > config.max_total_key_size_bytes {
- return Err(type_error(format!(
- "Total key size too large (max {} bytes)",
- config.max_total_key_size_bytes
- )));
+ return Err(
+ KvErrorKind::TotalKeyTooLarge(config.max_total_key_size_bytes).into_box(),
+ );
}
let atomic_write = AtomicWrite {
@@ -866,7 +945,10 @@ where
enqueues,
};
- let result = db.atomic_write(atomic_write).await?;
+ let result = db
+ .atomic_write(atomic_write)
+ .await
+ .map_err(KvErrorKind::Kv)?;
Ok(result.map(|res| faster_hex::hex_string(&res.versionstamp)))
}
@@ -879,19 +961,18 @@ type EncodeCursorRangeSelector = (Option<KvKey>, Option<KvKey>, Option<KvKey>);
fn op_kv_encode_cursor(
#[serde] (prefix, start, end): EncodeCursorRangeSelector,
#[serde] boundary_key: KvKey,
-) -> Result<String, AnyError> {
+) -> Result<String, KvError> {
let selector = RawSelector::from_tuple(prefix, start, end)?;
let boundary_key = encode_v8_key(boundary_key)?;
let cursor = encode_cursor(&selector, &boundary_key)?;
Ok(cursor)
}
-fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), AnyError> {
+fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), KvError> {
if key.len() > config.max_read_key_size_bytes {
- Err(type_error(format!(
- "Key too large for read (max {} bytes)",
- config.max_read_key_size_bytes
- )))
+ Err(
+ KvErrorKind::KeyTooLargeToRead(config.max_read_key_size_bytes).into_box(),
+ )
} else {
Ok(())
}
@@ -900,12 +981,12 @@ fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), AnyError> {
fn check_write_key_size(
key: &[u8],
config: &KvConfig,
-) -> Result<usize, AnyError> {
+) -> Result<usize, KvError> {
if key.len() > config.max_write_key_size_bytes {
- Err(type_error(format!(
- "Key too large for write (max {} bytes)",
- config.max_write_key_size_bytes
- )))
+ Err(
+ KvErrorKind::KeyTooLargeToWrite(config.max_write_key_size_bytes)
+ .into_box(),
+ )
} else {
Ok(key.len())
}
@@ -914,7 +995,7 @@ fn check_write_key_size(
fn check_value_size(
value: &KvValue,
config: &KvConfig,
-) -> Result<usize, AnyError> {
+) -> Result<usize, KvError> {
let payload = match value {
KvValue::Bytes(x) => x,
KvValue::V8(x) => x,
@@ -922,10 +1003,7 @@ fn check_value_size(
};
if payload.len() > config.max_value_size_bytes {
- Err(type_error(format!(
- "Value too large (max {} bytes)",
- config.max_value_size_bytes
- )))
+ Err(KvErrorKind::ValueTooLarge(config.max_value_size_bytes).into_box())
} else {
Ok(payload.len())
}
@@ -934,12 +1012,12 @@ fn check_value_size(
fn check_enqueue_payload_size(
payload: &[u8],
config: &KvConfig,
-) -> Result<usize, AnyError> {
+) -> Result<usize, KvError> {
if payload.len() > config.max_value_size_bytes {
- Err(type_error(format!(
- "enqueue payload too large (max {} bytes)",
- config.max_value_size_bytes
- )))
+ Err(
+ KvErrorKind::EnqueuePayloadTooLarge(config.max_value_size_bytes)
+ .into_box(),
+ )
} else {
Ok(payload.len())
}
diff --git a/ext/kv/remote.rs b/ext/kv/remote.rs
index 922853588..1830aa67e 100644
--- a/ext/kv/remote.rs
+++ b/ext/kv/remote.rs
@@ -15,6 +15,7 @@ use deno_core::futures::Stream;
use deno_core::OpState;
use deno_fetch::create_http_client;
use deno_fetch::CreateHttpClientOptions;
+use deno_permissions::PermissionCheckError;
use deno_tls::rustls::RootCertStore;
use deno_tls::Proxy;
use deno_tls::RootCertStoreProvider;
@@ -45,17 +46,17 @@ impl HttpOptions {
}
pub trait RemoteDbHandlerPermissions {
- fn check_env(&mut self, var: &str) -> Result<(), AnyError>;
+ fn check_env(&mut self, var: &str) -> Result<(), PermissionCheckError>;
fn check_net_url(
&mut self,
url: &Url,
api_name: &str,
- ) -> Result<(), AnyError>;
+ ) -> Result<(), PermissionCheckError>;
}
impl RemoteDbHandlerPermissions for deno_permissions::PermissionsContainer {
#[inline(always)]
- fn check_env(&mut self, var: &str) -> Result<(), AnyError> {
+ fn check_env(&mut self, var: &str) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_env(self, var)
}
@@ -64,7 +65,7 @@ impl RemoteDbHandlerPermissions for deno_permissions::PermissionsContainer {
&mut self,
url: &Url,
api_name: &str,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_net_url(self, url, api_name)
}
}
@@ -103,7 +104,9 @@ impl<P: RemoteDbHandlerPermissions + 'static> denokv_remote::RemotePermissions
fn check_net_url(&self, url: &Url) -> Result<(), anyhow::Error> {
let mut state = self.state.borrow_mut();
let permissions = state.borrow_mut::<P>();
- permissions.check_net_url(url, "Deno.openKv")
+ permissions
+ .check_net_url(url, "Deno.openKv")
+ .map_err(Into::into)
}
}
@@ -194,6 +197,7 @@ impl<P: RemoteDbHandlerPermissions + 'static> DatabaseHandler
root_cert_store: options.root_cert_store()?,
ca_certs: vec![],
proxy: options.proxy.clone(),
+ dns_resolver: Default::default(),
unsafely_ignore_certificate_errors: options
.unsafely_ignore_certificate_errors
.clone(),
@@ -206,6 +210,7 @@ impl<P: RemoteDbHandlerPermissions + 'static> DatabaseHandler
pool_idle_timeout: None,
http1: false,
http2: true,
+ client_builder_hook: None,
},
)?;
let fetch_client = FetchClient(client);
diff --git a/ext/kv/sqlite.rs b/ext/kv/sqlite.rs
index 0b4a3693c..9de520927 100644
--- a/ext/kv/sqlite.rs
+++ b/ext/kv/sqlite.rs
@@ -13,20 +13,20 @@ use std::sync::Arc;
use std::sync::Mutex;
use std::sync::OnceLock;
+use crate::DatabaseHandler;
use async_trait::async_trait;
use deno_core::error::type_error;
use deno_core::error::AnyError;
use deno_core::unsync::spawn_blocking;
use deno_core::OpState;
use deno_path_util::normalize_path;
+use deno_permissions::PermissionCheckError;
pub use denokv_sqlite::SqliteBackendError;
use denokv_sqlite::SqliteConfig;
use denokv_sqlite::SqliteNotifier;
use rand::SeedableRng;
use rusqlite::OpenFlags;
-use crate::DatabaseHandler;
-
static SQLITE_NOTIFIERS_MAP: OnceLock<Mutex<HashMap<PathBuf, SqliteNotifier>>> =
OnceLock::new();
@@ -42,13 +42,13 @@ pub trait SqliteDbHandlerPermissions {
&mut self,
p: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError>;
+ ) -> Result<PathBuf, PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_write<'a>(
&mut self,
p: &'a Path,
api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError>;
+ ) -> Result<Cow<'a, Path>, PermissionCheckError>;
}
impl SqliteDbHandlerPermissions for deno_permissions::PermissionsContainer {
@@ -57,7 +57,7 @@ impl SqliteDbHandlerPermissions for deno_permissions::PermissionsContainer {
&mut self,
p: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_read(self, p, api_name)
}
@@ -66,7 +66,7 @@ impl SqliteDbHandlerPermissions for deno_permissions::PermissionsContainer {
&mut self,
p: &'a Path,
api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError> {
+ ) -> Result<Cow<'a, Path>, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_write_path(self, p, api_name)
}
}
diff --git a/ext/napi/Cargo.toml b/ext/napi/Cargo.toml
index ade789ff8..df3ec0287 100644
--- a/ext/napi/Cargo.toml
+++ b/ext/napi/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_napi"
-version = "0.102.0"
+version = "0.108.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -16,4 +16,14 @@ path = "lib.rs"
[dependencies]
deno_core.workspace = true
deno_permissions.workspace = true
+libc.workspace = true
libloading = { version = "0.7" }
+log.workspace = true
+napi_sym.workspace = true
+thiserror.workspace = true
+
+[target.'cfg(windows)'.dependencies]
+windows-sys.workspace = true
+
+[dev-dependencies]
+libuv-sys-lite = "=1.48.2"
diff --git a/ext/napi/README.md b/ext/napi/README.md
index e69de29bb..b47929524 100644
--- a/ext/napi/README.md
+++ b/ext/napi/README.md
@@ -0,0 +1,114 @@
+# napi
+
+This directory contains source for Deno's Node-API implementation. It depends on
+`napi_sym` and `deno_napi`.
+
+Files are generally organized the same as in Node.js's implementation to ease in
+ensuring compatibility.
+
+## Adding a new function
+
+Add the symbol name to
+[`cli/napi_sym/symbol_exports.json`](../napi_sym/symbol_exports.json).
+
+```diff
+{
+ "symbols": [
+ ...
+ "napi_get_undefined",
+- "napi_get_null"
++ "napi_get_null",
++ "napi_get_boolean"
+ ]
+}
+```
+
+Determine where to place the implementation. `napi_get_boolean` is related to JS
+values so we will place it in `js_native_api.rs`. If something is not clear,
+just create a new file module.
+
+See [`napi_sym`](../napi_sym/) for writing the implementation:
+
+```rust
+#[napi_sym::napi_sym]
+fn napi_get_boolean(
+ env: *mut Env,
+ value: bool,
+ result: *mut napi_value,
+) -> Result {
+ // ...
+ Ok(())
+}
+```
+
+Update the generated symbol lists using the script:
+
+```
+deno run --allow-write tools/napi/generate_symbols_lists.js
+```
+
+Add a test in [`/tests/napi`](../../tests/napi/). You can also refer to Node.js
+test suite for Node-API.
+
+```js
+// tests/napi/boolean_test.js
+import { assertEquals, loadTestLibrary } from "./common.js";
+const lib = loadTestLibrary();
+Deno.test("napi get boolean", function () {
+ assertEquals(lib.test_get_boolean(true), true);
+ assertEquals(lib.test_get_boolean(false), false);
+});
+```
+
+```rust
+// tests/napi/src/boolean.rs
+
+use napi_sys::Status::napi_ok;
+use napi_sys::ValueType::napi_boolean;
+use napi_sys::*;
+
+extern "C" fn test_boolean(
+ env: napi_env,
+ info: napi_callback_info,
+) -> napi_value {
+ let (args, argc, _) = crate::get_callback_info!(env, info, 1);
+ assert_eq!(argc, 1);
+
+ let mut ty = -1;
+ assert!(unsafe { napi_typeof(env, args[0], &mut ty) } == napi_ok);
+ assert_eq!(ty, napi_boolean);
+
+ // Use napi_get_boolean here...
+
+ value
+}
+
+pub fn init(env: napi_env, exports: napi_value) {
+ let properties = &[crate::new_property!(env, "test_boolean\0", test_boolean)];
+
+ unsafe {
+ napi_define_properties(env, exports, properties.len(), properties.as_ptr())
+ };
+}
+```
+
+```diff
+// tests/napi/src/lib.rs
+
++ mod boolean;
+
+...
+
+#[no_mangle]
+unsafe extern "C" fn napi_register_module_v1(
+ env: napi_env,
+ exports: napi_value,
+) -> napi_value {
+ ...
++ boolean::init(env, exports);
+
+ exports
+}
+```
+
+Run the test using `cargo test -p tests/napi`.
diff --git a/ext/napi/build.rs b/ext/napi/build.rs
new file mode 100644
index 000000000..8705830a9
--- /dev/null
+++ b/ext/napi/build.rs
@@ -0,0 +1,22 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+fn main() {
+ let symbols_file_name = match std::env::consts::OS {
+ "android" | "freebsd" | "openbsd" => {
+ "generated_symbol_exports_list_linux.def".to_string()
+ }
+ os => format!("generated_symbol_exports_list_{}.def", os),
+ };
+ let symbols_path = std::path::Path::new(".")
+ .join(symbols_file_name)
+ .canonicalize()
+ .expect(
+ "Missing symbols list! Generate using tools/napi/generate_symbols_lists.js",
+ );
+
+ println!("cargo:rustc-rerun-if-changed={}", symbols_path.display());
+
+ let path = std::path::PathBuf::from(std::env::var("OUT_DIR").unwrap())
+ .join("napi_symbol_path.txt");
+ std::fs::write(path, symbols_path.as_os_str().as_encoded_bytes()).unwrap();
+}
diff --git a/ext/napi/generated_symbol_exports_list_linux.def b/ext/napi/generated_symbol_exports_list_linux.def
new file mode 100644
index 000000000..614880ebf
--- /dev/null
+++ b/ext/napi/generated_symbol_exports_list_linux.def
@@ -0,0 +1 @@
+{ "node_api_create_syntax_error"; "napi_make_callback"; "napi_has_named_property"; "napi_async_destroy"; "napi_coerce_to_object"; "napi_get_arraybuffer_info"; "napi_detach_arraybuffer"; "napi_get_undefined"; "napi_reference_unref"; "napi_fatal_error"; "napi_open_callback_scope"; "napi_close_callback_scope"; "napi_get_value_uint32"; "napi_create_function"; "napi_create_arraybuffer"; "napi_get_value_int64"; "napi_get_all_property_names"; "napi_resolve_deferred"; "napi_is_detached_arraybuffer"; "napi_create_string_utf8"; "napi_create_threadsafe_function"; "node_api_throw_syntax_error"; "napi_create_bigint_int64"; "napi_wrap"; "napi_set_property"; "napi_get_value_bigint_int64"; "napi_open_handle_scope"; "napi_create_error"; "napi_create_buffer"; "napi_cancel_async_work"; "napi_is_exception_pending"; "napi_acquire_threadsafe_function"; "napi_create_external"; "napi_get_threadsafe_function_context"; "napi_get_null"; "napi_create_string_utf16"; "node_api_create_external_string_utf16"; "napi_get_value_bigint_uint64"; "napi_module_register"; "napi_is_typedarray"; "napi_create_external_buffer"; "napi_get_new_target"; "napi_get_instance_data"; "napi_close_handle_scope"; "napi_get_value_string_utf16"; "napi_get_property_names"; "napi_is_arraybuffer"; "napi_get_cb_info"; "napi_define_properties"; "napi_add_env_cleanup_hook"; "node_api_get_module_file_name"; "napi_get_node_version"; "napi_create_int64"; "napi_create_double"; "napi_get_and_clear_last_exception"; "napi_create_reference"; "napi_get_typedarray_info"; "napi_call_threadsafe_function"; "napi_get_last_error_info"; "napi_create_array_with_length"; "napi_coerce_to_number"; "napi_get_global"; "napi_is_error"; "napi_set_instance_data"; "napi_create_typedarray"; "napi_throw_type_error"; "napi_has_property"; "napi_get_value_external"; "napi_create_range_error"; "napi_typeof"; "napi_ref_threadsafe_function"; "napi_create_bigint_uint64"; "napi_get_prototype"; "napi_adjust_external_memory"; "napi_release_threadsafe_function"; "napi_delete_async_work"; "napi_create_string_latin1"; "node_api_create_external_string_latin1"; "napi_is_array"; "napi_unref_threadsafe_function"; "napi_throw_error"; "napi_has_own_property"; "napi_get_reference_value"; "napi_remove_env_cleanup_hook"; "napi_get_value_string_utf8"; "napi_is_promise"; "napi_get_boolean"; "napi_run_script"; "napi_get_element"; "napi_get_named_property"; "napi_get_buffer_info"; "napi_get_value_bool"; "napi_reference_ref"; "napi_create_object"; "napi_create_promise"; "napi_create_int32"; "napi_escape_handle"; "napi_open_escapable_handle_scope"; "napi_throw"; "napi_get_value_double"; "napi_set_named_property"; "napi_call_function"; "napi_create_date"; "napi_object_freeze"; "napi_get_uv_event_loop"; "napi_get_value_string_latin1"; "napi_reject_deferred"; "napi_add_finalizer"; "napi_create_array"; "napi_delete_reference"; "napi_get_date_value"; "napi_create_dataview"; "napi_get_version"; "napi_define_class"; "napi_is_date"; "napi_remove_wrap"; "napi_delete_property"; "napi_instanceof"; "napi_create_buffer_copy"; "napi_delete_element"; "napi_object_seal"; "napi_queue_async_work"; "napi_get_value_bigint_words"; "napi_is_buffer"; "napi_get_array_length"; "napi_get_property"; "napi_new_instance"; "napi_set_element"; "napi_create_bigint_words"; "napi_strict_equals"; "napi_is_dataview"; "napi_close_escapable_handle_scope"; "napi_get_dataview_info"; "napi_get_value_int32"; "napi_unwrap"; "napi_throw_range_error"; "napi_coerce_to_bool"; "napi_create_uint32"; "napi_has_element"; "napi_create_external_arraybuffer"; "napi_create_symbol"; "node_api_symbol_for"; "napi_coerce_to_string"; "napi_create_type_error"; "napi_fatal_exception"; "napi_create_async_work"; "napi_async_init"; "node_api_create_property_key_utf16"; "napi_type_tag_object"; "napi_check_object_type_tag"; "node_api_post_finalizer"; "napi_add_async_cleanup_hook"; "napi_remove_async_cleanup_hook"; "uv_mutex_init"; "uv_mutex_lock"; "uv_mutex_unlock"; "uv_mutex_destroy"; "uv_async_init"; "uv_async_send"; "uv_close"; }; \ No newline at end of file
diff --git a/ext/napi/generated_symbol_exports_list_macos.def b/ext/napi/generated_symbol_exports_list_macos.def
new file mode 100644
index 000000000..36b2f37fa
--- /dev/null
+++ b/ext/napi/generated_symbol_exports_list_macos.def
@@ -0,0 +1,160 @@
+_node_api_create_syntax_error
+_napi_make_callback
+_napi_has_named_property
+_napi_async_destroy
+_napi_coerce_to_object
+_napi_get_arraybuffer_info
+_napi_detach_arraybuffer
+_napi_get_undefined
+_napi_reference_unref
+_napi_fatal_error
+_napi_open_callback_scope
+_napi_close_callback_scope
+_napi_get_value_uint32
+_napi_create_function
+_napi_create_arraybuffer
+_napi_get_value_int64
+_napi_get_all_property_names
+_napi_resolve_deferred
+_napi_is_detached_arraybuffer
+_napi_create_string_utf8
+_napi_create_threadsafe_function
+_node_api_throw_syntax_error
+_napi_create_bigint_int64
+_napi_wrap
+_napi_set_property
+_napi_get_value_bigint_int64
+_napi_open_handle_scope
+_napi_create_error
+_napi_create_buffer
+_napi_cancel_async_work
+_napi_is_exception_pending
+_napi_acquire_threadsafe_function
+_napi_create_external
+_napi_get_threadsafe_function_context
+_napi_get_null
+_napi_create_string_utf16
+_node_api_create_external_string_utf16
+_napi_get_value_bigint_uint64
+_napi_module_register
+_napi_is_typedarray
+_napi_create_external_buffer
+_napi_get_new_target
+_napi_get_instance_data
+_napi_close_handle_scope
+_napi_get_value_string_utf16
+_napi_get_property_names
+_napi_is_arraybuffer
+_napi_get_cb_info
+_napi_define_properties
+_napi_add_env_cleanup_hook
+_node_api_get_module_file_name
+_napi_get_node_version
+_napi_create_int64
+_napi_create_double
+_napi_get_and_clear_last_exception
+_napi_create_reference
+_napi_get_typedarray_info
+_napi_call_threadsafe_function
+_napi_get_last_error_info
+_napi_create_array_with_length
+_napi_coerce_to_number
+_napi_get_global
+_napi_is_error
+_napi_set_instance_data
+_napi_create_typedarray
+_napi_throw_type_error
+_napi_has_property
+_napi_get_value_external
+_napi_create_range_error
+_napi_typeof
+_napi_ref_threadsafe_function
+_napi_create_bigint_uint64
+_napi_get_prototype
+_napi_adjust_external_memory
+_napi_release_threadsafe_function
+_napi_delete_async_work
+_napi_create_string_latin1
+_node_api_create_external_string_latin1
+_napi_is_array
+_napi_unref_threadsafe_function
+_napi_throw_error
+_napi_has_own_property
+_napi_get_reference_value
+_napi_remove_env_cleanup_hook
+_napi_get_value_string_utf8
+_napi_is_promise
+_napi_get_boolean
+_napi_run_script
+_napi_get_element
+_napi_get_named_property
+_napi_get_buffer_info
+_napi_get_value_bool
+_napi_reference_ref
+_napi_create_object
+_napi_create_promise
+_napi_create_int32
+_napi_escape_handle
+_napi_open_escapable_handle_scope
+_napi_throw
+_napi_get_value_double
+_napi_set_named_property
+_napi_call_function
+_napi_create_date
+_napi_object_freeze
+_napi_get_uv_event_loop
+_napi_get_value_string_latin1
+_napi_reject_deferred
+_napi_add_finalizer
+_napi_create_array
+_napi_delete_reference
+_napi_get_date_value
+_napi_create_dataview
+_napi_get_version
+_napi_define_class
+_napi_is_date
+_napi_remove_wrap
+_napi_delete_property
+_napi_instanceof
+_napi_create_buffer_copy
+_napi_delete_element
+_napi_object_seal
+_napi_queue_async_work
+_napi_get_value_bigint_words
+_napi_is_buffer
+_napi_get_array_length
+_napi_get_property
+_napi_new_instance
+_napi_set_element
+_napi_create_bigint_words
+_napi_strict_equals
+_napi_is_dataview
+_napi_close_escapable_handle_scope
+_napi_get_dataview_info
+_napi_get_value_int32
+_napi_unwrap
+_napi_throw_range_error
+_napi_coerce_to_bool
+_napi_create_uint32
+_napi_has_element
+_napi_create_external_arraybuffer
+_napi_create_symbol
+_node_api_symbol_for
+_napi_coerce_to_string
+_napi_create_type_error
+_napi_fatal_exception
+_napi_create_async_work
+_napi_async_init
+_node_api_create_property_key_utf16
+_napi_type_tag_object
+_napi_check_object_type_tag
+_node_api_post_finalizer
+_napi_add_async_cleanup_hook
+_napi_remove_async_cleanup_hook
+_uv_mutex_init
+_uv_mutex_lock
+_uv_mutex_unlock
+_uv_mutex_destroy
+_uv_async_init
+_uv_async_send
+_uv_close \ No newline at end of file
diff --git a/ext/napi/generated_symbol_exports_list_windows.def b/ext/napi/generated_symbol_exports_list_windows.def
new file mode 100644
index 000000000..b7355112e
--- /dev/null
+++ b/ext/napi/generated_symbol_exports_list_windows.def
@@ -0,0 +1,162 @@
+LIBRARY
+EXPORTS
+ node_api_create_syntax_error
+ napi_make_callback
+ napi_has_named_property
+ napi_async_destroy
+ napi_coerce_to_object
+ napi_get_arraybuffer_info
+ napi_detach_arraybuffer
+ napi_get_undefined
+ napi_reference_unref
+ napi_fatal_error
+ napi_open_callback_scope
+ napi_close_callback_scope
+ napi_get_value_uint32
+ napi_create_function
+ napi_create_arraybuffer
+ napi_get_value_int64
+ napi_get_all_property_names
+ napi_resolve_deferred
+ napi_is_detached_arraybuffer
+ napi_create_string_utf8
+ napi_create_threadsafe_function
+ node_api_throw_syntax_error
+ napi_create_bigint_int64
+ napi_wrap
+ napi_set_property
+ napi_get_value_bigint_int64
+ napi_open_handle_scope
+ napi_create_error
+ napi_create_buffer
+ napi_cancel_async_work
+ napi_is_exception_pending
+ napi_acquire_threadsafe_function
+ napi_create_external
+ napi_get_threadsafe_function_context
+ napi_get_null
+ napi_create_string_utf16
+ node_api_create_external_string_utf16
+ napi_get_value_bigint_uint64
+ napi_module_register
+ napi_is_typedarray
+ napi_create_external_buffer
+ napi_get_new_target
+ napi_get_instance_data
+ napi_close_handle_scope
+ napi_get_value_string_utf16
+ napi_get_property_names
+ napi_is_arraybuffer
+ napi_get_cb_info
+ napi_define_properties
+ napi_add_env_cleanup_hook
+ node_api_get_module_file_name
+ napi_get_node_version
+ napi_create_int64
+ napi_create_double
+ napi_get_and_clear_last_exception
+ napi_create_reference
+ napi_get_typedarray_info
+ napi_call_threadsafe_function
+ napi_get_last_error_info
+ napi_create_array_with_length
+ napi_coerce_to_number
+ napi_get_global
+ napi_is_error
+ napi_set_instance_data
+ napi_create_typedarray
+ napi_throw_type_error
+ napi_has_property
+ napi_get_value_external
+ napi_create_range_error
+ napi_typeof
+ napi_ref_threadsafe_function
+ napi_create_bigint_uint64
+ napi_get_prototype
+ napi_adjust_external_memory
+ napi_release_threadsafe_function
+ napi_delete_async_work
+ napi_create_string_latin1
+ node_api_create_external_string_latin1
+ napi_is_array
+ napi_unref_threadsafe_function
+ napi_throw_error
+ napi_has_own_property
+ napi_get_reference_value
+ napi_remove_env_cleanup_hook
+ napi_get_value_string_utf8
+ napi_is_promise
+ napi_get_boolean
+ napi_run_script
+ napi_get_element
+ napi_get_named_property
+ napi_get_buffer_info
+ napi_get_value_bool
+ napi_reference_ref
+ napi_create_object
+ napi_create_promise
+ napi_create_int32
+ napi_escape_handle
+ napi_open_escapable_handle_scope
+ napi_throw
+ napi_get_value_double
+ napi_set_named_property
+ napi_call_function
+ napi_create_date
+ napi_object_freeze
+ napi_get_uv_event_loop
+ napi_get_value_string_latin1
+ napi_reject_deferred
+ napi_add_finalizer
+ napi_create_array
+ napi_delete_reference
+ napi_get_date_value
+ napi_create_dataview
+ napi_get_version
+ napi_define_class
+ napi_is_date
+ napi_remove_wrap
+ napi_delete_property
+ napi_instanceof
+ napi_create_buffer_copy
+ napi_delete_element
+ napi_object_seal
+ napi_queue_async_work
+ napi_get_value_bigint_words
+ napi_is_buffer
+ napi_get_array_length
+ napi_get_property
+ napi_new_instance
+ napi_set_element
+ napi_create_bigint_words
+ napi_strict_equals
+ napi_is_dataview
+ napi_close_escapable_handle_scope
+ napi_get_dataview_info
+ napi_get_value_int32
+ napi_unwrap
+ napi_throw_range_error
+ napi_coerce_to_bool
+ napi_create_uint32
+ napi_has_element
+ napi_create_external_arraybuffer
+ napi_create_symbol
+ node_api_symbol_for
+ napi_coerce_to_string
+ napi_create_type_error
+ napi_fatal_exception
+ napi_create_async_work
+ napi_async_init
+ node_api_create_property_key_utf16
+ napi_type_tag_object
+ napi_check_object_type_tag
+ node_api_post_finalizer
+ napi_add_async_cleanup_hook
+ napi_remove_async_cleanup_hook
+ uv_mutex_init
+ uv_mutex_lock
+ uv_mutex_unlock
+ uv_mutex_destroy
+ uv_async_init
+ uv_async_send
+ uv_close \ No newline at end of file
diff --git a/ext/napi/js_native_api.rs b/ext/napi/js_native_api.rs
new file mode 100644
index 000000000..53a12d6eb
--- /dev/null
+++ b/ext/napi/js_native_api.rs
@@ -0,0 +1,3616 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+#![allow(non_upper_case_globals)]
+#![deny(unsafe_op_in_unsafe_fn)]
+
+const NAPI_VERSION: u32 = 9;
+
+use crate::*;
+use libc::INT_MAX;
+
+use super::util::check_new_from_utf8;
+use super::util::check_new_from_utf8_len;
+use super::util::get_array_buffer_ptr;
+use super::util::make_external_backing_store;
+use super::util::napi_clear_last_error;
+use super::util::napi_set_last_error;
+use super::util::v8_name_from_property_descriptor;
+use crate::check_arg;
+use crate::check_env;
+use crate::function::create_function;
+use crate::function::create_function_template;
+use crate::function::CallbackInfo;
+use napi_sym::napi_sym;
+use std::ptr::NonNull;
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+enum ReferenceOwnership {
+ Runtime,
+ Userland,
+}
+
+enum ReferenceState {
+ Strong(v8::Global<v8::Value>),
+ Weak(v8::Weak<v8::Value>),
+}
+
+struct Reference {
+ env: *mut Env,
+ state: ReferenceState,
+ ref_count: u32,
+ ownership: ReferenceOwnership,
+ finalize_cb: Option<napi_finalize>,
+ finalize_data: *mut c_void,
+ finalize_hint: *mut c_void,
+}
+
+impl Reference {
+ fn new(
+ env: *mut Env,
+ value: v8::Local<v8::Value>,
+ initial_ref_count: u32,
+ ownership: ReferenceOwnership,
+ finalize_cb: Option<napi_finalize>,
+ finalize_data: *mut c_void,
+ finalize_hint: *mut c_void,
+ ) -> Box<Self> {
+ let isolate = unsafe { (*env).isolate() };
+
+ let mut reference = Box::new(Reference {
+ env,
+ state: ReferenceState::Strong(v8::Global::new(isolate, value)),
+ ref_count: initial_ref_count,
+ ownership,
+ finalize_cb,
+ finalize_data,
+ finalize_hint,
+ });
+
+ if initial_ref_count == 0 {
+ reference.set_weak();
+ }
+
+ reference
+ }
+
+ fn ref_(&mut self) -> u32 {
+ self.ref_count += 1;
+ if self.ref_count == 1 {
+ self.set_strong();
+ }
+ self.ref_count
+ }
+
+ fn unref(&mut self) -> u32 {
+ let old_ref_count = self.ref_count;
+ if self.ref_count > 0 {
+ self.ref_count -= 1;
+ }
+ if old_ref_count == 1 && self.ref_count == 0 {
+ self.set_weak();
+ }
+ self.ref_count
+ }
+
+ fn reset(&mut self) {
+ self.finalize_cb = None;
+ self.finalize_data = std::ptr::null_mut();
+ self.finalize_hint = std::ptr::null_mut();
+ }
+
+ fn set_strong(&mut self) {
+ if let ReferenceState::Weak(w) = &self.state {
+ let isolate = unsafe { (*self.env).isolate() };
+ if let Some(g) = w.to_global(isolate) {
+ self.state = ReferenceState::Strong(g);
+ }
+ }
+ }
+
+ fn set_weak(&mut self) {
+ let reference = self as *mut Reference;
+ if let ReferenceState::Strong(g) = &self.state {
+ let cb = Box::new(move |_: &mut v8::Isolate| {
+ Reference::weak_callback(reference)
+ });
+ let isolate = unsafe { (*self.env).isolate() };
+ self.state =
+ ReferenceState::Weak(v8::Weak::with_finalizer(isolate, g, cb));
+ }
+ }
+
+ fn weak_callback(reference: *mut Reference) {
+ let reference = unsafe { &mut *reference };
+
+ let finalize_cb = reference.finalize_cb;
+ let finalize_data = reference.finalize_data;
+ let finalize_hint = reference.finalize_hint;
+ reference.reset();
+
+ // copy this value before the finalize callback, since
+ // it might free the reference (which would be a UAF)
+ let ownership = reference.ownership;
+ if let Some(finalize_cb) = finalize_cb {
+ unsafe {
+ finalize_cb(reference.env as _, finalize_data, finalize_hint);
+ }
+ }
+
+ if ownership == ReferenceOwnership::Runtime {
+ unsafe { drop(Reference::from_raw(reference)) }
+ }
+ }
+
+ fn into_raw(r: Box<Reference>) -> *mut Reference {
+ Box::into_raw(r)
+ }
+
+ unsafe fn from_raw(r: *mut Reference) -> Box<Reference> {
+ unsafe { Box::from_raw(r) }
+ }
+
+ unsafe fn remove(r: *mut Reference) {
+ let r = unsafe { &mut *r };
+ if r.ownership == ReferenceOwnership::Userland {
+ r.reset();
+ } else {
+ unsafe { drop(Reference::from_raw(r)) }
+ }
+ }
+}
+
+#[napi_sym]
+fn napi_get_last_error_info(
+ env: *mut Env,
+ result: *mut *const napi_extended_error_info,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, result);
+
+ if env.last_error.error_code == napi_ok {
+ napi_clear_last_error(env);
+ } else {
+ env.last_error.error_message =
+ ERROR_MESSAGES[env.last_error.error_code as usize].as_ptr();
+ }
+
+ unsafe {
+ *result = &env.last_error;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_create_function<'s>(
+ env: &'s mut Env,
+ name: *const c_char,
+ length: usize,
+ cb: Option<napi_callback>,
+ cb_info: napi_callback_info,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ let env_ptr = env as *mut Env;
+ check_arg!(env, result);
+ check_arg!(env, cb);
+
+ let name = if !name.is_null() {
+ match unsafe { check_new_from_utf8_len(env, name, length) } {
+ Ok(s) => Some(s),
+ Err(status) => return status,
+ }
+ } else {
+ None
+ };
+
+ unsafe {
+ *result =
+ create_function(&mut env.scope(), env_ptr, name, cb.unwrap(), cb_info)
+ .into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+#[allow(clippy::too_many_arguments)]
+fn napi_define_class<'s>(
+ env: &'s mut Env,
+ utf8name: *const c_char,
+ length: usize,
+ constructor: Option<napi_callback>,
+ callback_data: *mut c_void,
+ property_count: usize,
+ properties: *const napi_property_descriptor,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ let env_ptr = env as *mut Env;
+ check_arg!(env, result);
+ check_arg!(env, constructor);
+
+ if property_count > 0 {
+ check_arg!(env, properties);
+ }
+
+ let name = match unsafe { check_new_from_utf8_len(env, utf8name, length) } {
+ Ok(string) => string,
+ Err(status) => return status,
+ };
+
+ let tpl = create_function_template(
+ &mut env.scope(),
+ env_ptr,
+ Some(name),
+ constructor.unwrap(),
+ callback_data,
+ );
+
+ let napi_properties: &[napi_property_descriptor] = if property_count > 0 {
+ unsafe { std::slice::from_raw_parts(properties, property_count) }
+ } else {
+ &[]
+ };
+ let mut static_property_count = 0;
+
+ for p in napi_properties {
+ if p.attributes & napi_static != 0 {
+ // Will be handled below
+ static_property_count += 1;
+ continue;
+ }
+
+ let name = match unsafe { v8_name_from_property_descriptor(env_ptr, p) } {
+ Ok(name) => name,
+ Err(status) => return status,
+ };
+
+ let mut accessor_property = v8::PropertyAttribute::NONE;
+
+ if p.attributes & napi_enumerable == 0 {
+ accessor_property = accessor_property | v8::PropertyAttribute::DONT_ENUM;
+ }
+ if p.attributes & napi_configurable == 0 {
+ accessor_property =
+ accessor_property | v8::PropertyAttribute::DONT_DELETE;
+ }
+
+ if p.getter.is_some() || p.setter.is_some() {
+ let getter = p.getter.map(|g| {
+ create_function_template(&mut env.scope(), env_ptr, None, g, p.data)
+ });
+ let setter = p.setter.map(|s| {
+ create_function_template(&mut env.scope(), env_ptr, None, s, p.data)
+ });
+ if getter.is_some()
+ && setter.is_some()
+ && (p.attributes & napi_writable) == 0
+ {
+ accessor_property =
+ accessor_property | v8::PropertyAttribute::READ_ONLY;
+ }
+ let proto = tpl.prototype_template(&mut env.scope());
+ proto.set_accessor_property(name, getter, setter, accessor_property);
+ } else if let Some(method) = p.method {
+ let function = create_function_template(
+ &mut env.scope(),
+ env_ptr,
+ None,
+ method,
+ p.data,
+ );
+ let proto = tpl.prototype_template(&mut env.scope());
+ proto.set_with_attr(name, function.into(), accessor_property);
+ } else {
+ let proto = tpl.prototype_template(&mut env.scope());
+ if (p.attributes & napi_writable) == 0 {
+ accessor_property =
+ accessor_property | v8::PropertyAttribute::READ_ONLY;
+ }
+ proto.set_with_attr(name, p.value.unwrap().into(), accessor_property);
+ }
+ }
+
+ let value: v8::Local<v8::Value> =
+ tpl.get_function(&mut env.scope()).unwrap().into();
+
+ unsafe {
+ *result = value.into();
+ }
+
+ if static_property_count > 0 {
+ let mut static_descriptors = Vec::with_capacity(static_property_count);
+
+ for p in napi_properties {
+ if p.attributes & napi_static != 0 {
+ static_descriptors.push(*p);
+ }
+ }
+
+ crate::status_call!(unsafe {
+ napi_define_properties(
+ env_ptr,
+ *result,
+ static_descriptors.len(),
+ static_descriptors.as_ptr(),
+ )
+ });
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_property_names(
+ env: *mut Env,
+ object: napi_value,
+ result: *mut napi_value,
+) -> napi_status {
+ unsafe {
+ napi_get_all_property_names(
+ env,
+ object,
+ napi_key_include_prototypes,
+ napi_key_enumerable | napi_key_skip_symbols,
+ napi_key_numbers_to_strings,
+ result,
+ )
+ }
+}
+
+#[napi_sym]
+fn napi_get_all_property_names<'s>(
+ env: &'s mut Env,
+ object: napi_value,
+ key_mode: napi_key_collection_mode,
+ key_filter: napi_key_filter,
+ key_conversion: napi_key_conversion,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let scope = &mut env.scope();
+
+ let Some(obj) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ let mut filter = v8::PropertyFilter::ALL_PROPERTIES;
+
+ if key_filter & napi_key_writable != 0 {
+ filter = filter | v8::PropertyFilter::ONLY_WRITABLE;
+ }
+ if key_filter & napi_key_enumerable != 0 {
+ filter = filter | v8::PropertyFilter::ONLY_ENUMERABLE;
+ }
+ if key_filter & napi_key_configurable != 0 {
+ filter = filter | v8::PropertyFilter::ONLY_CONFIGURABLE;
+ }
+ if key_filter & napi_key_skip_strings != 0 {
+ filter = filter | v8::PropertyFilter::SKIP_STRINGS;
+ }
+ if key_filter & napi_key_skip_symbols != 0 {
+ filter = filter | v8::PropertyFilter::SKIP_SYMBOLS;
+ }
+
+ let key_mode = match key_mode {
+ napi_key_include_prototypes => v8::KeyCollectionMode::IncludePrototypes,
+ napi_key_own_only => v8::KeyCollectionMode::OwnOnly,
+ _ => return napi_invalid_arg,
+ };
+
+ let key_conversion = match key_conversion {
+ napi_key_keep_numbers => v8::KeyConversionMode::KeepNumbers,
+ napi_key_numbers_to_strings => v8::KeyConversionMode::ConvertToString,
+ _ => return napi_invalid_arg,
+ };
+
+ let filter = v8::GetPropertyNamesArgsBuilder::new()
+ .mode(key_mode)
+ .property_filter(filter)
+ .index_filter(v8::IndexFilter::IncludeIndices)
+ .key_conversion(key_conversion)
+ .build();
+
+ let property_names = match obj.get_property_names(scope, filter) {
+ Some(n) => n,
+ None => return napi_generic_failure,
+ };
+
+ unsafe {
+ *result = property_names.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_set_property(
+ env: &mut Env,
+ object: napi_value,
+ key: napi_value,
+ value: napi_value,
+) -> napi_status {
+ check_arg!(env, key);
+ check_arg!(env, value);
+
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ if object.set(scope, key.unwrap(), value.unwrap()).is_none() {
+ return napi_generic_failure;
+ };
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_has_property(
+ env: &mut Env,
+ object: napi_value,
+ key: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ check_arg!(env, key);
+ check_arg!(env, result);
+
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ let Some(has) = object.has(scope, key.unwrap()) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = has;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_property<'s>(
+ env: &'s mut Env,
+ object: napi_value,
+ key: napi_value,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, key);
+ check_arg!(env, result);
+
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ let Some(value) = object.get(scope, key.unwrap()) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = value.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_delete_property(
+ env: &mut Env,
+ object: napi_value,
+ key: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ check_arg!(env, key);
+
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ let Some(deleted) = object.delete(scope, key.unwrap()) else {
+ return napi_generic_failure;
+ };
+
+ if !result.is_null() {
+ unsafe {
+ *result = deleted;
+ }
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_has_own_property(
+ env: &mut Env,
+ object: napi_value,
+ key: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ check_arg!(env, key);
+ check_arg!(env, result);
+
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ let Ok(key) = v8::Local::<v8::Name>::try_from(key.unwrap()) else {
+ return napi_name_expected;
+ };
+
+ let Some(has_own) = object.has_own_property(scope, key) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = has_own;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_has_named_property<'s>(
+ env: &'s mut Env,
+ object: napi_value<'s>,
+ utf8name: *const c_char,
+ result: *mut bool,
+) -> napi_status {
+ let env_ptr = env as *mut Env;
+ check_arg!(env, result);
+
+ let Some(object) = object.and_then(|o| o.to_object(&mut env.scope())) else {
+ return napi_object_expected;
+ };
+
+ let key = match unsafe { check_new_from_utf8(env_ptr, utf8name) } {
+ Ok(key) => key,
+ Err(status) => return status,
+ };
+
+ let Some(has_property) = object.has(&mut env.scope(), key.into()) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = has_property;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_set_named_property<'s>(
+ env: &'s mut Env,
+ object: napi_value<'s>,
+ utf8name: *const c_char,
+ value: napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, value);
+ let env_ptr = env as *mut Env;
+
+ let Some(object) = object.and_then(|o| o.to_object(&mut env.scope())) else {
+ return napi_object_expected;
+ };
+
+ let key = match unsafe { check_new_from_utf8(env_ptr, utf8name) } {
+ Ok(key) => key,
+ Err(status) => return status,
+ };
+
+ let value = value.unwrap();
+
+ if !object
+ .set(&mut env.scope(), key.into(), value)
+ .unwrap_or(false)
+ {
+ return napi_generic_failure;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_named_property<'s>(
+ env: &'s mut Env,
+ object: napi_value<'s>,
+ utf8name: *const c_char,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, result);
+ let env_ptr = env as *mut Env;
+
+ let Some(object) = object.and_then(|o| o.to_object(&mut env.scope())) else {
+ return napi_object_expected;
+ };
+
+ let key = match unsafe { check_new_from_utf8(env_ptr, utf8name) } {
+ Ok(key) => key,
+ Err(status) => return status,
+ };
+
+ let Some(value) = object.get(&mut env.scope(), key.into()) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = value.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_set_element<'s>(
+ env: &'s mut Env,
+ object: napi_value<'s>,
+ index: u32,
+ value: napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, value);
+
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ if !object
+ .set_index(scope, index, value.unwrap())
+ .unwrap_or(false)
+ {
+ return napi_generic_failure;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_has_element(
+ env: &mut Env,
+ object: napi_value,
+ index: u32,
+ result: *mut bool,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ let Some(has) = object.has_index(scope, index) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = has;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_element<'s>(
+ env: &'s mut Env,
+ object: napi_value,
+ index: u32,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ let Some(value) = object.get_index(scope, index) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = value.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_delete_element(
+ env: &mut Env,
+ object: napi_value,
+ index: u32,
+ result: *mut bool,
+) -> napi_status {
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ let Some(deleted) = object.delete_index(scope, index) else {
+ return napi_generic_failure;
+ };
+
+ if !result.is_null() {
+ unsafe {
+ *result = deleted;
+ }
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_define_properties(
+ env: &mut Env,
+ object: napi_value,
+ property_count: usize,
+ properties: *const napi_property_descriptor,
+) -> napi_status {
+ let env_ptr = env as *mut Env;
+
+ if property_count > 0 {
+ check_arg!(env, properties);
+ }
+
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ let properties = if property_count == 0 {
+ &[]
+ } else {
+ unsafe { std::slice::from_raw_parts(properties, property_count) }
+ };
+ for property in properties {
+ let property_name =
+ match unsafe { v8_name_from_property_descriptor(env_ptr, property) } {
+ Ok(name) => name,
+ Err(status) => return status,
+ };
+
+ let writable = property.attributes & napi_writable != 0;
+ let enumerable = property.attributes & napi_enumerable != 0;
+ let configurable = property.attributes & napi_configurable != 0;
+
+ if property.getter.is_some() || property.setter.is_some() {
+ let local_getter: v8::Local<v8::Value> = if let Some(getter) =
+ property.getter
+ {
+ create_function(&mut env.scope(), env_ptr, None, getter, property.data)
+ .into()
+ } else {
+ v8::undefined(scope).into()
+ };
+ let local_setter: v8::Local<v8::Value> = if let Some(setter) =
+ property.setter
+ {
+ create_function(&mut env.scope(), env_ptr, None, setter, property.data)
+ .into()
+ } else {
+ v8::undefined(scope).into()
+ };
+
+ let mut desc =
+ v8::PropertyDescriptor::new_from_get_set(local_getter, local_setter);
+ desc.set_enumerable(enumerable);
+ desc.set_configurable(configurable);
+
+ if !object
+ .define_property(scope, property_name, &desc)
+ .unwrap_or(false)
+ {
+ return napi_invalid_arg;
+ }
+ } else if let Some(method) = property.method {
+ let method: v8::Local<v8::Value> = {
+ let function = create_function(
+ &mut env.scope(),
+ env_ptr,
+ None,
+ method,
+ property.data,
+ );
+ function.into()
+ };
+
+ let mut desc =
+ v8::PropertyDescriptor::new_from_value_writable(method, writable);
+ desc.set_enumerable(enumerable);
+ desc.set_configurable(configurable);
+
+ if !object
+ .define_property(scope, property_name, &desc)
+ .unwrap_or(false)
+ {
+ return napi_generic_failure;
+ }
+ } else {
+ let value = property.value.unwrap();
+
+ if enumerable & writable & configurable {
+ if !object
+ .create_data_property(scope, property_name, value)
+ .unwrap_or(false)
+ {
+ return napi_invalid_arg;
+ }
+ } else {
+ let mut desc =
+ v8::PropertyDescriptor::new_from_value_writable(value, writable);
+ desc.set_enumerable(enumerable);
+ desc.set_configurable(configurable);
+
+ if !object
+ .define_property(scope, property_name, &desc)
+ .unwrap_or(false)
+ {
+ return napi_invalid_arg;
+ }
+ }
+ }
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_object_freeze(env: &mut Env, object: napi_value) -> napi_status {
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ if !object
+ .set_integrity_level(scope, v8::IntegrityLevel::Frozen)
+ .unwrap_or(false)
+ {
+ return napi_generic_failure;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_object_seal(env: &mut Env, object: napi_value) -> napi_status {
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ if !object
+ .set_integrity_level(scope, v8::IntegrityLevel::Sealed)
+ .unwrap_or(false)
+ {
+ return napi_generic_failure;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_is_array(
+ env: *mut Env,
+ value: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let value = value.unwrap();
+
+ unsafe {
+ *result = value.is_array();
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_get_array_length(
+ env: &mut Env,
+ value: napi_value,
+ result: *mut u32,
+) -> napi_status {
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let value = value.unwrap();
+
+ match v8::Local::<v8::Array>::try_from(value) {
+ Ok(array) => {
+ unsafe {
+ *result = array.length();
+ }
+ napi_ok
+ }
+ Err(_) => napi_array_expected,
+ }
+}
+
+#[napi_sym]
+fn napi_strict_equals(
+ env: &mut Env,
+ lhs: napi_value,
+ rhs: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ check_arg!(env, lhs);
+ check_arg!(env, rhs);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = lhs.unwrap().strict_equals(rhs.unwrap());
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_prototype<'s>(
+ env: &'s mut Env,
+ object: napi_value,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let scope = &mut env.scope();
+
+ let Some(object) = object.and_then(|o| o.to_object(scope)) else {
+ return napi_object_expected;
+ };
+
+ let Some(proto) = object.get_prototype(scope) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = proto.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_create_object(
+ env_ptr: *mut Env,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::Object::new(&mut env.scope()).into();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_create_array(
+ env_ptr: *mut Env,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::Array::new(&mut env.scope(), 0).into();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_create_array_with_length(
+ env_ptr: *mut Env,
+ length: usize,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::Array::new(&mut env.scope(), length as _).into();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_create_string_latin1(
+ env_ptr: *mut Env,
+ string: *const c_char,
+ length: usize,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ if length > 0 {
+ check_arg!(env, string);
+ }
+ crate::return_status_if_false!(
+ env,
+ (length == NAPI_AUTO_LENGTH) || length <= INT_MAX as _,
+ napi_invalid_arg
+ );
+
+ let buffer = if length > 0 {
+ unsafe {
+ std::slice::from_raw_parts(
+ string as _,
+ if length == NAPI_AUTO_LENGTH {
+ std::ffi::CStr::from_ptr(string).to_bytes().len()
+ } else {
+ length
+ },
+ )
+ }
+ } else {
+ &[]
+ };
+
+ let Some(string) = v8::String::new_from_one_byte(
+ &mut env.scope(),
+ buffer,
+ v8::NewStringType::Normal,
+ ) else {
+ return napi_set_last_error(env_ptr, napi_generic_failure);
+ };
+
+ unsafe {
+ *result = string.into();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+pub(crate) fn napi_create_string_utf8(
+ env_ptr: *mut Env,
+ string: *const c_char,
+ length: usize,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ if length > 0 {
+ check_arg!(env, string);
+ }
+ crate::return_status_if_false!(
+ env,
+ (length == NAPI_AUTO_LENGTH) || length <= INT_MAX as _,
+ napi_invalid_arg
+ );
+
+ let buffer = if length > 0 {
+ unsafe {
+ std::slice::from_raw_parts(
+ string as _,
+ if length == NAPI_AUTO_LENGTH {
+ std::ffi::CStr::from_ptr(string).to_bytes().len()
+ } else {
+ length
+ },
+ )
+ }
+ } else {
+ &[]
+ };
+
+ let Some(string) = v8::String::new_from_utf8(
+ &mut env.scope(),
+ buffer,
+ v8::NewStringType::Normal,
+ ) else {
+ return napi_set_last_error(env_ptr, napi_generic_failure);
+ };
+
+ unsafe {
+ *result = string.into();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_create_string_utf16(
+ env_ptr: *mut Env,
+ string: *const u16,
+ length: usize,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ if length > 0 {
+ check_arg!(env, string);
+ }
+ crate::return_status_if_false!(
+ env,
+ (length == NAPI_AUTO_LENGTH) || length <= INT_MAX as _,
+ napi_invalid_arg
+ );
+
+ let buffer = if length > 0 {
+ unsafe {
+ std::slice::from_raw_parts(
+ string,
+ if length == NAPI_AUTO_LENGTH {
+ let mut length = 0;
+ while *(string.add(length)) != 0 {
+ length += 1;
+ }
+ length
+ } else {
+ length
+ },
+ )
+ }
+ } else {
+ &[]
+ };
+
+ let Some(string) = v8::String::new_from_two_byte(
+ &mut env.scope(),
+ buffer,
+ v8::NewStringType::Normal,
+ ) else {
+ return napi_set_last_error(env_ptr, napi_generic_failure);
+ };
+
+ unsafe {
+ *result = string.into();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn node_api_create_external_string_latin1(
+ env_ptr: *mut Env,
+ string: *const c_char,
+ length: usize,
+ nogc_finalize_callback: Option<napi_finalize>,
+ finalize_hint: *mut c_void,
+ result: *mut napi_value,
+ copied: *mut bool,
+) -> napi_status {
+ let status =
+ unsafe { napi_create_string_latin1(env_ptr, string, length, result) };
+
+ if status == napi_ok {
+ unsafe {
+ *copied = true;
+ }
+
+ if let Some(finalize) = nogc_finalize_callback {
+ unsafe {
+ finalize(env_ptr as napi_env, string as *mut c_void, finalize_hint);
+ }
+ }
+ }
+
+ status
+}
+
+#[napi_sym]
+fn node_api_create_external_string_utf16(
+ env_ptr: *mut Env,
+ string: *const u16,
+ length: usize,
+ nogc_finalize_callback: Option<napi_finalize>,
+ finalize_hint: *mut c_void,
+ result: *mut napi_value,
+ copied: *mut bool,
+) -> napi_status {
+ let status =
+ unsafe { napi_create_string_utf16(env_ptr, string, length, result) };
+
+ if status == napi_ok {
+ unsafe {
+ *copied = true;
+ }
+
+ if let Some(finalize) = nogc_finalize_callback {
+ unsafe {
+ finalize(env_ptr as napi_env, string as *mut c_void, finalize_hint);
+ }
+ }
+ }
+
+ status
+}
+
+#[napi_sym]
+fn node_api_create_property_key_utf16(
+ env_ptr: *mut Env,
+ string: *const u16,
+ length: usize,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ if length > 0 {
+ check_arg!(env, string);
+ }
+ crate::return_status_if_false!(
+ env,
+ (length == NAPI_AUTO_LENGTH) || length <= INT_MAX as _,
+ napi_invalid_arg
+ );
+
+ let buffer = if length > 0 {
+ unsafe {
+ std::slice::from_raw_parts(
+ string,
+ if length == NAPI_AUTO_LENGTH {
+ let mut length = 0;
+ while *(string.add(length)) != 0 {
+ length += 1;
+ }
+ length
+ } else {
+ length
+ },
+ )
+ }
+ } else {
+ &[]
+ };
+
+ let Some(string) = v8::String::new_from_two_byte(
+ &mut env.scope(),
+ buffer,
+ v8::NewStringType::Internalized,
+ ) else {
+ return napi_set_last_error(env_ptr, napi_generic_failure);
+ };
+
+ unsafe {
+ *result = string.into();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_create_double(
+ env_ptr: *mut Env,
+ value: f64,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::Number::new(&mut env.scope(), value).into();
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_create_int32(
+ env_ptr: *mut Env,
+ value: i32,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::Integer::new(&mut env.scope(), value).into();
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_create_uint32(
+ env_ptr: *mut Env,
+ value: u32,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::Integer::new_from_unsigned(&mut env.scope(), value).into();
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_create_int64(
+ env_ptr: *mut Env,
+ value: i64,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::Number::new(&mut env.scope(), value as _).into();
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_create_bigint_int64(
+ env_ptr: *mut Env,
+ value: i64,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::BigInt::new_from_i64(&mut env.scope(), value).into();
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_create_bigint_uint64(
+ env_ptr: *mut Env,
+ value: u64,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::BigInt::new_from_u64(&mut env.scope(), value).into();
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_create_bigint_words<'s>(
+ env: &'s mut Env,
+ sign_bit: bool,
+ word_count: usize,
+ words: *const u64,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, words);
+ check_arg!(env, result);
+
+ if word_count > INT_MAX as _ {
+ return napi_invalid_arg;
+ }
+
+ match v8::BigInt::new_from_words(&mut env.scope(), sign_bit, unsafe {
+ std::slice::from_raw_parts(words, word_count)
+ }) {
+ Some(value) => unsafe {
+ *result = value.into();
+ },
+ None => {
+ return napi_generic_failure;
+ }
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_boolean(
+ env: *mut Env,
+ value: bool,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::Boolean::new(env.isolate(), value).into();
+ }
+
+ return napi_clear_last_error(env);
+}
+
+#[napi_sym]
+fn napi_create_symbol(
+ env_ptr: *mut Env,
+ description: napi_value,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ let description = if let Some(d) = *description {
+ let Some(d) = d.to_string(&mut env.scope()) else {
+ return napi_set_last_error(env, napi_string_expected);
+ };
+ Some(d)
+ } else {
+ None
+ };
+
+ unsafe {
+ *result = v8::Symbol::new(&mut env.scope(), description).into();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn node_api_symbol_for(
+ env: *mut Env,
+ utf8description: *const c_char,
+ length: usize,
+ result: *mut napi_value,
+) -> napi_status {
+ {
+ let env = check_env!(env);
+ check_arg!(env, result);
+
+ let description_string =
+ match unsafe { check_new_from_utf8_len(env, utf8description, length) } {
+ Ok(s) => s,
+ Err(status) => return napi_set_last_error(env, status),
+ };
+
+ unsafe {
+ *result =
+ v8::Symbol::for_key(&mut env.scope(), description_string).into();
+ }
+ }
+
+ napi_clear_last_error(env)
+}
+
+macro_rules! napi_create_error_impl {
+ ($env_ptr:ident, $code:ident, $msg:ident, $result:ident, $error:ident) => {{
+ let env_ptr = $env_ptr;
+ let code = $code;
+ let msg = $msg;
+ let result = $result;
+
+ let env = check_env!(env_ptr);
+ check_arg!(env, msg);
+ check_arg!(env, result);
+
+ let Some(message) =
+ msg.and_then(|v| v8::Local::<v8::String>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_string_expected);
+ };
+
+ let error = v8::Exception::$error(&mut env.scope(), message);
+
+ if let Some(code) = *code {
+ let error_obj: v8::Local<v8::Object> = error.try_into().unwrap();
+ let code_key = v8::String::new(&mut env.scope(), "code").unwrap();
+ if !error_obj
+ .set(&mut env.scope(), code_key.into(), code)
+ .unwrap_or(false)
+ {
+ return napi_set_last_error(env_ptr, napi_generic_failure);
+ }
+ }
+
+ unsafe {
+ *result = error.into();
+ }
+
+ return napi_clear_last_error(env_ptr);
+ }};
+}
+
+#[napi_sym]
+fn napi_create_error(
+ env_ptr: *mut Env,
+ code: napi_value,
+ msg: napi_value,
+ result: *mut napi_value,
+) -> napi_status {
+ napi_create_error_impl!(env_ptr, code, msg, result, error)
+}
+
+#[napi_sym]
+fn napi_create_type_error(
+ env_ptr: *mut Env,
+ code: napi_value,
+ msg: napi_value,
+ result: *mut napi_value,
+) -> napi_status {
+ napi_create_error_impl!(env_ptr, code, msg, result, type_error)
+}
+
+#[napi_sym]
+fn napi_create_range_error(
+ env_ptr: *mut Env,
+ code: napi_value,
+ msg: napi_value,
+ result: *mut napi_value,
+) -> napi_status {
+ napi_create_error_impl!(env_ptr, code, msg, result, range_error)
+}
+
+#[napi_sym]
+fn node_api_create_syntax_error(
+ env_ptr: *mut Env,
+ code: napi_value,
+ msg: napi_value,
+ result: *mut napi_value,
+) -> napi_status {
+ napi_create_error_impl!(env_ptr, code, msg, result, syntax_error)
+}
+
+pub fn get_value_type(value: v8::Local<v8::Value>) -> Option<napi_valuetype> {
+ if value.is_undefined() {
+ Some(napi_undefined)
+ } else if value.is_null() {
+ Some(napi_null)
+ } else if value.is_external() {
+ Some(napi_external)
+ } else if value.is_boolean() {
+ Some(napi_boolean)
+ } else if value.is_number() {
+ Some(napi_number)
+ } else if value.is_big_int() {
+ Some(napi_bigint)
+ } else if value.is_string() {
+ Some(napi_string)
+ } else if value.is_symbol() {
+ Some(napi_symbol)
+ } else if value.is_function() {
+ Some(napi_function)
+ } else if value.is_object() {
+ Some(napi_object)
+ } else {
+ None
+ }
+}
+
+#[napi_sym]
+fn napi_typeof(
+ env: *mut Env,
+ value: napi_value,
+ result: *mut napi_valuetype,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let Some(ty) = get_value_type(value.unwrap()) else {
+ return napi_set_last_error(env, napi_invalid_arg);
+ };
+
+ unsafe {
+ *result = ty;
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_get_undefined(env: *mut Env, result: *mut napi_value) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::undefined(&mut env.scope()).into();
+ }
+
+ return napi_clear_last_error(env);
+}
+
+#[napi_sym]
+fn napi_get_null(env: *mut Env, result: *mut napi_value) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = v8::null(&mut env.scope()).into();
+ }
+
+ return napi_clear_last_error(env);
+}
+
+#[napi_sym]
+fn napi_get_cb_info(
+ env: *mut Env,
+ cbinfo: napi_callback_info,
+ argc: *mut i32,
+ argv: *mut napi_value,
+ this_arg: *mut napi_value,
+ data: *mut *mut c_void,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, cbinfo);
+
+ let cbinfo: &CallbackInfo = unsafe { &*(cbinfo as *const CallbackInfo) };
+ let args = unsafe { &*(cbinfo.args as *const v8::FunctionCallbackArguments) };
+
+ if !argv.is_null() {
+ check_arg!(env, argc);
+ let argc = unsafe { *argc as usize };
+ for i in 0..argc {
+ let arg = args.get(i as _);
+ unsafe {
+ *argv.add(i) = arg.into();
+ }
+ }
+ }
+
+ if !argc.is_null() {
+ unsafe {
+ *argc = args.length();
+ }
+ }
+
+ if !this_arg.is_null() {
+ unsafe {
+ *this_arg = args.this().into();
+ }
+ }
+
+ if !data.is_null() {
+ unsafe {
+ *data = cbinfo.cb_info;
+ }
+ }
+
+ napi_clear_last_error(env);
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_new_target(
+ env: *mut Env,
+ cbinfo: napi_callback_info,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, cbinfo);
+ check_arg!(env, result);
+
+ let cbinfo: &CallbackInfo = unsafe { &*(cbinfo as *const CallbackInfo) };
+ let args = unsafe { &*(cbinfo.args as *const v8::FunctionCallbackArguments) };
+
+ unsafe {
+ *result = args.new_target().into();
+ }
+
+ return napi_clear_last_error(env);
+}
+
+#[napi_sym]
+fn napi_call_function<'s>(
+ env: &'s mut Env,
+ recv: napi_value<'s>,
+ func: napi_value<'s>,
+ argc: usize,
+ argv: *const napi_value<'s>,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, recv);
+ let args = if argc > 0 {
+ check_arg!(env, argv);
+ unsafe {
+ std::slice::from_raw_parts(argv as *mut v8::Local<v8::Value>, argc)
+ }
+ } else {
+ &[]
+ };
+
+ let Some(func) =
+ func.and_then(|f| v8::Local::<v8::Function>::try_from(f).ok())
+ else {
+ return napi_function_expected;
+ };
+
+ let Some(v) = func.call(&mut env.scope(), recv.unwrap(), args) else {
+ return napi_generic_failure;
+ };
+
+ if !result.is_null() {
+ unsafe {
+ *result = v.into();
+ }
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_global(env_ptr: *mut Env, result: *mut napi_value) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ let global = v8::Local::new(&mut env.scope(), &env.global);
+ unsafe {
+ *result = global.into();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_throw(env: *mut Env, error: napi_value) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, error);
+
+ if env.last_exception.is_some() {
+ return napi_pending_exception;
+ }
+
+ let error = error.unwrap();
+ env.scope().throw_exception(error);
+ let error = v8::Global::new(&mut env.scope(), error);
+ env.last_exception = Some(error);
+
+ napi_clear_last_error(env)
+}
+
+macro_rules! napi_throw_error_impl {
+ ($env:ident, $code:ident, $msg:ident, $error:ident) => {{
+ let env = check_env!($env);
+ let env_ptr = env as *mut Env;
+ let code = $code;
+ let msg = $msg;
+
+ if env.last_exception.is_some() {
+ return napi_pending_exception;
+ }
+
+ let str_ = match unsafe { check_new_from_utf8(env, msg) } {
+ Ok(s) => s,
+ Err(status) => return status,
+ };
+
+ let error = v8::Exception::$error(&mut env.scope(), str_);
+
+ if !code.is_null() {
+ let error_obj: v8::Local<v8::Object> = error.try_into().unwrap();
+ let code = match unsafe { check_new_from_utf8(env_ptr, code) } {
+ Ok(s) => s,
+ Err(status) => return napi_set_last_error(env, status),
+ };
+ let code_key = v8::String::new(&mut env.scope(), "code").unwrap();
+ if !error_obj
+ .set(&mut env.scope(), code_key.into(), code.into())
+ .unwrap_or(false)
+ {
+ return napi_set_last_error(env, napi_generic_failure);
+ }
+ }
+
+ env.scope().throw_exception(error);
+ let error = v8::Global::new(&mut env.scope(), error);
+ env.last_exception = Some(error);
+
+ napi_clear_last_error(env)
+ }};
+}
+
+#[napi_sym]
+fn napi_throw_error(
+ env: *mut Env,
+ code: *const c_char,
+ msg: *const c_char,
+) -> napi_status {
+ napi_throw_error_impl!(env, code, msg, error)
+}
+
+#[napi_sym]
+fn napi_throw_type_error(
+ env: *mut Env,
+ code: *const c_char,
+ msg: *const c_char,
+) -> napi_status {
+ napi_throw_error_impl!(env, code, msg, type_error)
+}
+
+#[napi_sym]
+fn napi_throw_range_error(
+ env: *mut Env,
+ code: *const c_char,
+ msg: *const c_char,
+) -> napi_status {
+ napi_throw_error_impl!(env, code, msg, range_error)
+}
+
+#[napi_sym]
+fn node_api_throw_syntax_error(
+ env: *mut Env,
+ code: *const c_char,
+ msg: *const c_char,
+) -> napi_status {
+ napi_throw_error_impl!(env, code, msg, syntax_error)
+}
+
+#[napi_sym]
+fn napi_is_error(
+ env: *mut Env,
+ value: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = value.unwrap().is_native_error();
+ }
+
+ return napi_clear_last_error(env);
+}
+
+#[napi_sym]
+fn napi_get_value_double(
+ env_ptr: *mut Env,
+ value: napi_value,
+ result: *mut f64,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let Some(number) =
+ value.and_then(|v| v8::Local::<v8::Number>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_number_expected);
+ };
+
+ unsafe {
+ *result = number.value();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_get_value_int32(
+ env_ptr: *mut Env,
+ value: napi_value,
+ result: *mut i32,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let Some(value) = value.unwrap().int32_value(&mut env.scope()) else {
+ return napi_set_last_error(env, napi_number_expected);
+ };
+
+ unsafe {
+ *result = value;
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_get_value_uint32(
+ env_ptr: *mut Env,
+ value: napi_value,
+ result: *mut u32,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let Some(value) = value.unwrap().uint32_value(&mut env.scope()) else {
+ return napi_set_last_error(env, napi_number_expected);
+ };
+
+ unsafe {
+ *result = value;
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_get_value_int64(
+ env_ptr: *mut Env,
+ value: napi_value,
+ result: *mut i64,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let Some(number) =
+ value.and_then(|v| v8::Local::<v8::Number>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_number_expected);
+ };
+
+ let value = number.value();
+
+ unsafe {
+ if value.is_finite() {
+ *result = value as _;
+ } else {
+ *result = 0;
+ }
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_get_value_bigint_int64(
+ env_ptr: *mut Env,
+ value: napi_value,
+ result: *mut i64,
+ lossless: *mut bool,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+ check_arg!(env, result);
+ check_arg!(env, lossless);
+
+ let Some(bigint) =
+ value.and_then(|v| v8::Local::<v8::BigInt>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_bigint_expected);
+ };
+
+ let (result_, lossless_) = bigint.i64_value();
+
+ unsafe {
+ *result = result_;
+ *lossless = lossless_;
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_get_value_bigint_uint64(
+ env_ptr: *mut Env,
+ value: napi_value,
+ result: *mut u64,
+ lossless: *mut bool,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+ check_arg!(env, result);
+ check_arg!(env, lossless);
+
+ let Some(bigint) =
+ value.and_then(|v| v8::Local::<v8::BigInt>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_bigint_expected);
+ };
+
+ let (result_, lossless_) = bigint.u64_value();
+
+ unsafe {
+ *result = result_;
+ *lossless = lossless_;
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_get_value_bigint_words(
+ env_ptr: *mut Env,
+ value: napi_value,
+ sign_bit: *mut i32,
+ word_count: *mut usize,
+ words: *mut u64,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+ check_arg!(env, word_count);
+
+ let Some(bigint) =
+ value.and_then(|v| v8::Local::<v8::BigInt>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_bigint_expected);
+ };
+
+ let word_count_int;
+
+ if sign_bit.is_null() && words.is_null() {
+ word_count_int = bigint.word_count();
+ } else {
+ check_arg!(env, sign_bit);
+ check_arg!(env, words);
+ let out_words =
+ unsafe { std::slice::from_raw_parts_mut(words, *word_count) };
+ let (sign, slice_) = bigint.to_words_array(out_words);
+ word_count_int = slice_.len();
+ unsafe {
+ *sign_bit = sign as i32;
+ }
+ }
+
+ unsafe {
+ *word_count = word_count_int;
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_get_value_bool(
+ env_ptr: *mut Env,
+ value: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let Some(boolean) =
+ value.and_then(|v| v8::Local::<v8::Boolean>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_boolean_expected);
+ };
+
+ unsafe {
+ *result = boolean.is_true();
+ }
+
+ return napi_clear_last_error(env_ptr);
+}
+
+#[napi_sym]
+fn napi_get_value_string_latin1(
+ env_ptr: *mut Env,
+ value: napi_value,
+ buf: *mut c_char,
+ bufsize: usize,
+ result: *mut usize,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+
+ let Some(value) =
+ value.and_then(|v| v8::Local::<v8::String>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_string_expected);
+ };
+
+ if buf.is_null() {
+ check_arg!(env, result);
+ unsafe {
+ *result = value.length();
+ }
+ } else if bufsize != 0 {
+ let buffer =
+ unsafe { std::slice::from_raw_parts_mut(buf as _, bufsize - 1) };
+ let copied = value.write_one_byte(
+ &mut env.scope(),
+ buffer,
+ 0,
+ v8::WriteOptions::NO_NULL_TERMINATION,
+ );
+ unsafe {
+ buf.add(copied).write(0);
+ }
+ if !result.is_null() {
+ unsafe {
+ *result = copied;
+ }
+ }
+ } else if !result.is_null() {
+ unsafe {
+ *result = 0;
+ }
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_get_value_string_utf8(
+ env_ptr: *mut Env,
+ value: napi_value,
+ buf: *mut u8,
+ bufsize: usize,
+ result: *mut usize,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+
+ let Some(value) =
+ value.and_then(|v| v8::Local::<v8::String>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_string_expected);
+ };
+
+ if buf.is_null() {
+ check_arg!(env, result);
+ unsafe {
+ *result = value.utf8_length(env.isolate());
+ }
+ } else if bufsize != 0 {
+ let buffer =
+ unsafe { std::slice::from_raw_parts_mut(buf as _, bufsize - 1) };
+ let copied = value.write_utf8(
+ &mut env.scope(),
+ buffer,
+ None,
+ v8::WriteOptions::REPLACE_INVALID_UTF8
+ | v8::WriteOptions::NO_NULL_TERMINATION,
+ );
+ unsafe {
+ buf.add(copied).write(0);
+ }
+ if !result.is_null() {
+ unsafe {
+ *result = copied;
+ }
+ }
+ } else if !result.is_null() {
+ unsafe {
+ *result = 0;
+ }
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_get_value_string_utf16(
+ env_ptr: *mut Env,
+ value: napi_value,
+ buf: *mut u16,
+ bufsize: usize,
+ result: *mut usize,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+
+ let Some(value) =
+ value.and_then(|v| v8::Local::<v8::String>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_string_expected);
+ };
+
+ if buf.is_null() {
+ check_arg!(env, result);
+ unsafe {
+ *result = value.length();
+ }
+ } else if bufsize != 0 {
+ let buffer =
+ unsafe { std::slice::from_raw_parts_mut(buf as _, bufsize - 1) };
+ let copied = value.write(
+ &mut env.scope(),
+ buffer,
+ 0,
+ v8::WriteOptions::NO_NULL_TERMINATION,
+ );
+ unsafe {
+ buf.add(copied).write(0);
+ }
+ if !result.is_null() {
+ unsafe {
+ *result = copied;
+ }
+ }
+ } else if !result.is_null() {
+ unsafe {
+ *result = 0;
+ }
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_coerce_to_bool<'s>(
+ env: &'s mut Env,
+ value: napi_value,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let coerced = value.unwrap().to_boolean(&mut env.scope());
+
+ unsafe {
+ *result = coerced.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_coerce_to_number<'s>(
+ env: &'s mut Env,
+ value: napi_value,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let Some(coerced) = value.unwrap().to_number(&mut env.scope()) else {
+ return napi_number_expected;
+ };
+
+ unsafe {
+ *result = coerced.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_coerce_to_object<'s>(
+ env: &'s mut Env,
+ value: napi_value,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let Some(coerced) = value.unwrap().to_object(&mut env.scope()) else {
+ return napi_object_expected;
+ };
+
+ unsafe {
+ *result = coerced.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_coerce_to_string<'s>(
+ env: &'s mut Env,
+ value: napi_value,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let Some(coerced) = value.unwrap().to_string(&mut env.scope()) else {
+ return napi_string_expected;
+ };
+
+ unsafe {
+ *result = coerced.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_wrap(
+ env: &mut Env,
+ js_object: napi_value,
+ native_object: *mut c_void,
+ finalize_cb: Option<napi_finalize>,
+ finalize_hint: *mut c_void,
+ result: *mut napi_ref,
+) -> napi_status {
+ check_arg!(env, js_object);
+ let env_ptr = env as *mut Env;
+
+ let Some(obj) =
+ js_object.and_then(|v| v8::Local::<v8::Object>::try_from(v).ok())
+ else {
+ return napi_invalid_arg;
+ };
+
+ let napi_wrap = v8::Local::new(&mut env.scope(), &env.shared().napi_wrap);
+
+ if obj
+ .has_private(&mut env.scope(), napi_wrap)
+ .unwrap_or(false)
+ {
+ return napi_invalid_arg;
+ }
+
+ if !result.is_null() {
+ check_arg!(env, finalize_cb);
+ }
+
+ let ownership = if result.is_null() {
+ ReferenceOwnership::Runtime
+ } else {
+ ReferenceOwnership::Userland
+ };
+ let reference = Reference::new(
+ env_ptr,
+ obj.into(),
+ 0,
+ ownership,
+ finalize_cb,
+ native_object,
+ finalize_hint,
+ );
+
+ let reference = Reference::into_raw(reference) as *mut c_void;
+
+ if !result.is_null() {
+ check_arg!(env, finalize_cb);
+ unsafe {
+ *result = reference;
+ }
+ }
+
+ let external = v8::External::new(&mut env.scope(), reference);
+ assert!(obj
+ .set_private(&mut env.scope(), napi_wrap, external.into())
+ .unwrap());
+
+ napi_ok
+}
+
+fn unwrap(
+ env: &mut Env,
+ obj: napi_value,
+ result: *mut *mut c_void,
+ keep: bool,
+) -> napi_status {
+ check_arg!(env, obj);
+ if keep {
+ check_arg!(env, result);
+ }
+
+ let Some(obj) = obj.and_then(|v| v8::Local::<v8::Object>::try_from(v).ok())
+ else {
+ return napi_invalid_arg;
+ };
+
+ let napi_wrap = v8::Local::new(&mut env.scope(), &env.shared().napi_wrap);
+ let Some(val) = obj.get_private(&mut env.scope(), napi_wrap) else {
+ return napi_invalid_arg;
+ };
+
+ let Ok(external) = v8::Local::<v8::External>::try_from(val) else {
+ return napi_invalid_arg;
+ };
+
+ let reference = external.value() as *mut Reference;
+ let reference = unsafe { &mut *reference };
+
+ if !result.is_null() {
+ unsafe {
+ *result = reference.finalize_data;
+ }
+ }
+
+ if !keep {
+ assert!(obj
+ .delete_private(&mut env.scope(), napi_wrap)
+ .unwrap_or(false));
+ unsafe { Reference::remove(reference) };
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_unwrap(
+ env: &mut Env,
+ obj: napi_value,
+ result: *mut *mut c_void,
+) -> napi_status {
+ unwrap(env, obj, result, true)
+}
+
+#[napi_sym]
+fn napi_remove_wrap(
+ env: &mut Env,
+ obj: napi_value,
+ result: *mut *mut c_void,
+) -> napi_status {
+ unwrap(env, obj, result, false)
+}
+
+struct ExternalWrapper {
+ data: *mut c_void,
+ type_tag: Option<napi_type_tag>,
+}
+
+#[napi_sym]
+fn napi_create_external<'s>(
+ env: &'s mut Env,
+ data: *mut c_void,
+ finalize_cb: Option<napi_finalize>,
+ finalize_hint: *mut c_void,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ let env_ptr = env as *mut Env;
+ check_arg!(env, result);
+
+ let wrapper = Box::new(ExternalWrapper {
+ data,
+ type_tag: None,
+ });
+
+ let wrapper = Box::into_raw(wrapper);
+ let external = v8::External::new(&mut env.scope(), wrapper as _);
+
+ if let Some(finalize_cb) = finalize_cb {
+ Reference::into_raw(Reference::new(
+ env_ptr,
+ external.into(),
+ 0,
+ ReferenceOwnership::Runtime,
+ Some(finalize_cb),
+ data,
+ finalize_hint,
+ ));
+ }
+
+ unsafe {
+ *result = external.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_type_tag_object(
+ env: &mut Env,
+ object_or_external: napi_value,
+ type_tag: *const napi_type_tag,
+) -> napi_status {
+ check_arg!(env, object_or_external);
+ check_arg!(env, type_tag);
+
+ let val = object_or_external.unwrap();
+
+ if let Ok(external) = v8::Local::<v8::External>::try_from(val) {
+ let wrapper_ptr = external.value() as *mut ExternalWrapper;
+ let wrapper = unsafe { &mut *wrapper_ptr };
+ if wrapper.type_tag.is_some() {
+ return napi_invalid_arg;
+ }
+ wrapper.type_tag = Some(unsafe { *type_tag });
+ return napi_ok;
+ }
+
+ let Some(object) = val.to_object(&mut env.scope()) else {
+ return napi_object_expected;
+ };
+
+ let key = v8::Local::new(&mut env.scope(), &env.shared().type_tag);
+
+ if object.has_private(&mut env.scope(), key).unwrap_or(false) {
+ return napi_invalid_arg;
+ }
+
+ let slice = unsafe { std::slice::from_raw_parts(type_tag as *const u64, 2) };
+ let Some(tag) = v8::BigInt::new_from_words(&mut env.scope(), false, slice)
+ else {
+ return napi_generic_failure;
+ };
+
+ if !object
+ .set_private(&mut env.scope(), key, tag.into())
+ .unwrap_or(false)
+ {
+ return napi_generic_failure;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_check_object_type_tag(
+ env: &mut Env,
+ object_or_external: napi_value,
+ type_tag: *const napi_type_tag,
+ result: *mut bool,
+) -> napi_status {
+ check_arg!(env, object_or_external);
+ check_arg!(env, type_tag);
+ check_arg!(env, result);
+
+ let type_tag = unsafe { *type_tag };
+
+ let val = object_or_external.unwrap();
+
+ if let Ok(external) = v8::Local::<v8::External>::try_from(val) {
+ let wrapper_ptr = external.value() as *mut ExternalWrapper;
+ let wrapper = unsafe { &mut *wrapper_ptr };
+ unsafe {
+ *result = match wrapper.type_tag {
+ Some(t) => t == type_tag,
+ None => false,
+ };
+ };
+ return napi_ok;
+ }
+
+ let Some(object) = val.to_object(&mut env.scope()) else {
+ return napi_object_expected;
+ };
+
+ let key = v8::Local::new(&mut env.scope(), &env.shared().type_tag);
+
+ let Some(val) = object.get_private(&mut env.scope(), key) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = false;
+ }
+
+ if let Ok(bigint) = v8::Local::<v8::BigInt>::try_from(val) {
+ let mut words = [0u64; 2];
+ let (sign, words) = bigint.to_words_array(&mut words);
+ if !sign {
+ let pass = if words.len() == 2 {
+ type_tag.lower == words[0] && type_tag.upper == words[1]
+ } else if words.len() == 1 {
+ type_tag.lower == words[0] && type_tag.upper == 0
+ } else if words.is_empty() {
+ type_tag.lower == 0 && type_tag.upper == 0
+ } else {
+ false
+ };
+ unsafe {
+ *result = pass;
+ }
+ }
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_value_external(
+ env: *mut Env,
+ value: napi_value,
+ result: *mut *mut c_void,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let Some(external) =
+ value.and_then(|v| v8::Local::<v8::External>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env, napi_invalid_arg);
+ };
+
+ let wrapper_ptr = external.value() as *const ExternalWrapper;
+ let wrapper = unsafe { &*wrapper_ptr };
+
+ unsafe {
+ *result = wrapper.data;
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_create_reference(
+ env: *mut Env,
+ value: napi_value,
+ initial_refcount: u32,
+ result: *mut napi_ref,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let value = value.unwrap();
+
+ let reference = Reference::new(
+ env,
+ value,
+ initial_refcount,
+ ReferenceOwnership::Userland,
+ None,
+ std::ptr::null_mut(),
+ std::ptr::null_mut(),
+ );
+
+ let ptr = Reference::into_raw(reference);
+
+ unsafe {
+ *result = ptr as _;
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_delete_reference(env: *mut Env, ref_: napi_ref) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, ref_);
+
+ let reference = unsafe { Reference::from_raw(ref_ as _) };
+
+ drop(reference);
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_reference_ref(
+ env: *mut Env,
+ ref_: napi_ref,
+ result: *mut u32,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, ref_);
+
+ let reference = unsafe { &mut *(ref_ as *mut Reference) };
+
+ let count = reference.ref_();
+
+ if !result.is_null() {
+ unsafe {
+ *result = count;
+ }
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_reference_unref(
+ env: *mut Env,
+ ref_: napi_ref,
+ result: *mut u32,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, ref_);
+
+ let reference = unsafe { &mut *(ref_ as *mut Reference) };
+
+ if reference.ref_count == 0 {
+ return napi_set_last_error(env, napi_generic_failure);
+ }
+
+ let count = reference.unref();
+
+ if !result.is_null() {
+ unsafe {
+ *result = count;
+ }
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_get_reference_value(
+ env_ptr: *mut Env,
+ ref_: napi_ref,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, ref_);
+ check_arg!(env, result);
+
+ let reference = unsafe { &mut *(ref_ as *mut Reference) };
+
+ let value = match &reference.state {
+ ReferenceState::Strong(g) => Some(v8::Local::new(&mut env.scope(), g)),
+ ReferenceState::Weak(w) => w.to_local(&mut env.scope()),
+ };
+
+ unsafe {
+ *result = value.into();
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_open_handle_scope(
+ env: *mut Env,
+ _result: *mut napi_handle_scope,
+) -> napi_status {
+ let env = check_env!(env);
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_close_handle_scope(
+ env: *mut Env,
+ _scope: napi_handle_scope,
+) -> napi_status {
+ let env = check_env!(env);
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_open_escapable_handle_scope(
+ env: *mut Env,
+ _result: *mut napi_escapable_handle_scope,
+) -> napi_status {
+ let env = check_env!(env);
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_close_escapable_handle_scope(
+ env: *mut Env,
+ _scope: napi_escapable_handle_scope,
+) -> napi_status {
+ let env = check_env!(env);
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_escape_handle<'s>(
+ env: *mut Env,
+ _scope: napi_escapable_handle_scope,
+ escapee: napi_value<'s>,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ let env = check_env!(env);
+
+ unsafe {
+ *result = escapee;
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_new_instance<'s>(
+ env: &'s mut Env,
+ constructor: napi_value,
+ argc: usize,
+ argv: *const napi_value,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, constructor);
+ if argc > 0 {
+ check_arg!(env, argv);
+ }
+ check_arg!(env, result);
+
+ let Some(func) =
+ constructor.and_then(|v| v8::Local::<v8::Function>::try_from(v).ok())
+ else {
+ return napi_invalid_arg;
+ };
+
+ let args = if argc > 0 {
+ unsafe {
+ std::slice::from_raw_parts(argv as *mut v8::Local<v8::Value>, argc)
+ }
+ } else {
+ &[]
+ };
+
+ let Some(value) = func.new_instance(&mut env.scope(), args) else {
+ return napi_pending_exception;
+ };
+
+ unsafe {
+ *result = value.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_instanceof(
+ env: &mut Env,
+ object: napi_value,
+ constructor: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ check_arg!(env, object);
+ check_arg!(env, result);
+
+ let Some(ctor) = constructor.and_then(|v| v.to_object(&mut env.scope()))
+ else {
+ return napi_object_expected;
+ };
+
+ if !ctor.is_function() {
+ unsafe {
+ napi_throw_type_error(
+ env,
+ c"ERR_NAPI_CONS_FUNCTION".as_ptr(),
+ c"Constructor must be a function".as_ptr(),
+ );
+ }
+ return napi_function_expected;
+ }
+
+ let Some(res) = object.unwrap().instance_of(&mut env.scope(), ctor) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = res;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_is_exception_pending(
+ env_ptr: *mut Env,
+ result: *mut bool,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = env.last_exception.is_some();
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_get_and_clear_last_exception(
+ env_ptr: *mut Env,
+ result: *mut napi_value,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, result);
+
+ let ex: v8::Local<v8::Value> =
+ if let Some(last_exception) = env.last_exception.take() {
+ v8::Local::new(&mut env.scope(), last_exception)
+ } else {
+ v8::undefined(&mut env.scope()).into()
+ };
+
+ unsafe {
+ *result = ex.into();
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_is_arraybuffer(
+ env: *mut Env,
+ value: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = value.unwrap().is_array_buffer();
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_create_arraybuffer<'s>(
+ env: &'s mut Env,
+ len: usize,
+ data: *mut *mut c_void,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let buffer = v8::ArrayBuffer::new(&mut env.scope(), len);
+
+ if !data.is_null() {
+ unsafe {
+ *data = get_array_buffer_ptr(buffer);
+ }
+ }
+
+ unsafe {
+ *result = buffer.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_create_external_arraybuffer<'s>(
+ env: &'s mut Env,
+ data: *mut c_void,
+ byte_length: usize,
+ finalize_cb: napi_finalize,
+ finalize_hint: *mut c_void,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let store = make_external_backing_store(
+ env,
+ data,
+ byte_length,
+ std::ptr::null_mut(),
+ finalize_cb,
+ finalize_hint,
+ );
+
+ let ab =
+ v8::ArrayBuffer::with_backing_store(&mut env.scope(), &store.make_shared());
+ let value: v8::Local<v8::Value> = ab.into();
+
+ unsafe {
+ *result = value.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_arraybuffer_info(
+ env: *mut Env,
+ value: napi_value,
+ data: *mut *mut c_void,
+ length: *mut usize,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+
+ let Some(buf) =
+ value.and_then(|v| v8::Local::<v8::ArrayBuffer>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env, napi_invalid_arg);
+ };
+
+ if !data.is_null() {
+ unsafe {
+ *data = get_array_buffer_ptr(buf);
+ }
+ }
+
+ if !length.is_null() {
+ unsafe {
+ *length = buf.byte_length();
+ }
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_is_typedarray(
+ env: *mut Env,
+ value: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = value.unwrap().is_typed_array();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_create_typedarray<'s>(
+ env: &'s mut Env,
+ ty: napi_typedarray_type,
+ length: usize,
+ arraybuffer: napi_value,
+ byte_offset: usize,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, arraybuffer);
+ check_arg!(env, result);
+
+ let Some(ab) =
+ arraybuffer.and_then(|v| v8::Local::<v8::ArrayBuffer>::try_from(v).ok())
+ else {
+ return napi_arraybuffer_expected;
+ };
+
+ macro_rules! create {
+ ($TypedArray:ident, $size_of_element:expr) => {{
+ let soe = $size_of_element;
+ if soe > 1 && byte_offset % soe != 0 {
+ let message = v8::String::new(
+ &mut env.scope(),
+ format!(
+ "start offset of {} should be multiple of {}",
+ stringify!($TypedArray),
+ soe
+ )
+ .as_str(),
+ )
+ .unwrap();
+ let exc = v8::Exception::range_error(&mut env.scope(), message);
+ env.scope().throw_exception(exc);
+ return napi_pending_exception;
+ }
+
+ if length * soe + byte_offset > ab.byte_length() {
+ let message =
+ v8::String::new(&mut env.scope(), "Invalid typed array length")
+ .unwrap();
+ let exc = v8::Exception::range_error(&mut env.scope(), message);
+ env.scope().throw_exception(exc);
+ return napi_pending_exception;
+ }
+
+ let Some(ta) =
+ v8::$TypedArray::new(&mut env.scope(), ab, byte_offset, length)
+ else {
+ return napi_generic_failure;
+ };
+ ta.into()
+ }};
+ }
+
+ let typedarray: v8::Local<v8::Value> = match ty {
+ napi_uint8_array => create!(Uint8Array, 1),
+ napi_uint8_clamped_array => create!(Uint8ClampedArray, 1),
+ napi_int8_array => create!(Int8Array, 1),
+ napi_uint16_array => create!(Uint16Array, 2),
+ napi_int16_array => create!(Int16Array, 2),
+ napi_uint32_array => create!(Uint32Array, 4),
+ napi_int32_array => create!(Int32Array, 4),
+ napi_float32_array => create!(Float32Array, 4),
+ napi_float64_array => create!(Float64Array, 8),
+ napi_bigint64_array => create!(BigInt64Array, 8),
+ napi_biguint64_array => create!(BigUint64Array, 8),
+ _ => {
+ return napi_invalid_arg;
+ }
+ };
+
+ unsafe {
+ *result = typedarray.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_get_typedarray_info(
+ env_ptr: *mut Env,
+ typedarray: napi_value,
+ type_: *mut napi_typedarray_type,
+ length: *mut usize,
+ data: *mut *mut c_void,
+ arraybuffer: *mut napi_value,
+ byte_offset: *mut usize,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, typedarray);
+
+ let Some(array) =
+ typedarray.and_then(|v| v8::Local::<v8::TypedArray>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env_ptr, napi_invalid_arg);
+ };
+
+ if !type_.is_null() {
+ let tatype = if array.is_int8_array() {
+ napi_int8_array
+ } else if array.is_uint8_array() {
+ napi_uint8_array
+ } else if array.is_uint8_clamped_array() {
+ napi_uint8_clamped_array
+ } else if array.is_int16_array() {
+ napi_int16_array
+ } else if array.is_uint16_array() {
+ napi_uint16_array
+ } else if array.is_int32_array() {
+ napi_int32_array
+ } else if array.is_uint32_array() {
+ napi_uint32_array
+ } else if array.is_float32_array() {
+ napi_float32_array
+ } else if array.is_float64_array() {
+ napi_float64_array
+ } else if array.is_big_int64_array() {
+ napi_bigint64_array
+ } else if array.is_big_uint64_array() {
+ napi_biguint64_array
+ } else {
+ unreachable!();
+ };
+
+ unsafe {
+ *type_ = tatype;
+ }
+ }
+
+ if !length.is_null() {
+ unsafe {
+ *length = array.length();
+ }
+ }
+
+ if !data.is_null() {
+ unsafe {
+ *data = array.data();
+ }
+ }
+
+ if !arraybuffer.is_null() {
+ let buf = array.buffer(&mut env.scope()).unwrap();
+ unsafe {
+ *arraybuffer = buf.into();
+ }
+ }
+
+ if !byte_offset.is_null() {
+ unsafe {
+ *byte_offset = array.byte_offset();
+ }
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_create_dataview<'s>(
+ env: &'s mut Env,
+ byte_length: usize,
+ arraybuffer: napi_value<'s>,
+ byte_offset: usize,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, arraybuffer);
+ check_arg!(env, result);
+
+ let Some(buffer) =
+ arraybuffer.and_then(|v| v8::Local::<v8::ArrayBuffer>::try_from(v).ok())
+ else {
+ return napi_invalid_arg;
+ };
+
+ if byte_length + byte_offset > buffer.byte_length() {
+ unsafe {
+ return napi_throw_range_error(
+ env,
+ c"ERR_NAPI_INVALID_DATAVIEW_ARGS".as_ptr(),
+ c"byte_offset + byte_length should be less than or equal to the size in bytes of the array passed in".as_ptr(),
+ );
+ }
+ }
+
+ let dataview =
+ v8::DataView::new(&mut env.scope(), buffer, byte_offset, byte_length);
+
+ unsafe {
+ *result = dataview.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_is_dataview(
+ env: *mut Env,
+ value: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = value.unwrap().is_data_view();
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_get_dataview_info(
+ env_ptr: *mut Env,
+ dataview: napi_value,
+ byte_length: *mut usize,
+ data: *mut *mut c_void,
+ arraybuffer: *mut napi_value,
+ byte_offset: *mut usize,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, dataview);
+
+ let Some(array) =
+ dataview.and_then(|v| v8::Local::<v8::DataView>::try_from(v).ok())
+ else {
+ return napi_invalid_arg;
+ };
+
+ if !byte_length.is_null() {
+ unsafe {
+ *byte_length = array.byte_length();
+ }
+ }
+
+ if !arraybuffer.is_null() {
+ let Some(buffer) = array.buffer(&mut env.scope()) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *arraybuffer = buffer.into();
+ }
+ }
+
+ if !data.is_null() {
+ unsafe {
+ *data = array.data();
+ }
+ }
+
+ if !byte_offset.is_null() {
+ unsafe {
+ *byte_offset = array.byte_offset();
+ }
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn napi_get_version(env: *mut Env, result: *mut u32) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = NAPI_VERSION;
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_create_promise<'s>(
+ env: &'s mut Env,
+ deferred: *mut napi_deferred,
+ promise: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, deferred);
+ check_arg!(env, promise);
+
+ let resolver = v8::PromiseResolver::new(&mut env.scope()).unwrap();
+
+ let global = v8::Global::new(&mut env.scope(), resolver);
+ let global_ptr = global.into_raw().as_ptr() as napi_deferred;
+
+ let p = resolver.get_promise(&mut env.scope());
+
+ unsafe {
+ *deferred = global_ptr;
+ }
+
+ unsafe {
+ *promise = p.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_resolve_deferred(
+ env: &mut Env,
+ deferred: napi_deferred,
+ result: napi_value,
+) -> napi_status {
+ check_arg!(env, result);
+ check_arg!(env, deferred);
+
+ // Make sure microtasks don't run and call back into JS
+ env
+ .scope()
+ .set_microtasks_policy(v8::MicrotasksPolicy::Explicit);
+
+ let deferred_ptr =
+ unsafe { NonNull::new_unchecked(deferred as *mut v8::PromiseResolver) };
+ let global = unsafe { v8::Global::from_raw(env.isolate(), deferred_ptr) };
+ let resolver = v8::Local::new(&mut env.scope(), global);
+
+ let success = resolver
+ .resolve(&mut env.scope(), result.unwrap())
+ .unwrap_or(false);
+
+ // Restore policy
+ env
+ .scope()
+ .set_microtasks_policy(v8::MicrotasksPolicy::Auto);
+
+ if success {
+ napi_ok
+ } else {
+ napi_generic_failure
+ }
+}
+
+#[napi_sym]
+fn napi_reject_deferred(
+ env: &mut Env,
+ deferred: napi_deferred,
+ result: napi_value,
+) -> napi_status {
+ check_arg!(env, result);
+ check_arg!(env, deferred);
+
+ let deferred_ptr =
+ unsafe { NonNull::new_unchecked(deferred as *mut v8::PromiseResolver) };
+ let global = unsafe { v8::Global::from_raw(env.isolate(), deferred_ptr) };
+ let resolver = v8::Local::new(&mut env.scope(), global);
+
+ if !resolver
+ .reject(&mut env.scope(), result.unwrap())
+ .unwrap_or(false)
+ {
+ return napi_generic_failure;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_is_promise(
+ env: *mut Env,
+ value: napi_value,
+ is_promise: *mut bool,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, is_promise);
+
+ unsafe {
+ *is_promise = value.unwrap().is_promise();
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_create_date<'s>(
+ env: &'s mut Env,
+ time: f64,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let Some(date) = v8::Date::new(&mut env.scope(), time) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = date.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_is_date(
+ env: *mut Env,
+ value: napi_value,
+ is_date: *mut bool,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, is_date);
+
+ unsafe {
+ *is_date = value.unwrap().is_date();
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_get_date_value(
+ env: &mut Env,
+ value: napi_value,
+ result: *mut f64,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let Some(date) = value.and_then(|v| v8::Local::<v8::Date>::try_from(v).ok())
+ else {
+ return napi_date_expected;
+ };
+
+ unsafe {
+ *result = date.value_of();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_run_script<'s>(
+ env: &'s mut Env,
+ script: napi_value,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, script);
+ check_arg!(env, result);
+
+ let Some(script) =
+ script.and_then(|v| v8::Local::<v8::String>::try_from(v).ok())
+ else {
+ return napi_string_expected;
+ };
+
+ let Some(script) = v8::Script::compile(&mut env.scope(), script, None) else {
+ return napi_generic_failure;
+ };
+
+ let Some(rv) = script.run(&mut env.scope()) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = rv.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_add_finalizer(
+ env_ptr: *mut Env,
+ value: napi_value,
+ finalize_data: *mut c_void,
+ finalize_cb: Option<napi_finalize>,
+ finalize_hint: *mut c_void,
+ result: *mut napi_ref,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, value);
+ check_arg!(env, finalize_cb);
+
+ let Some(value) =
+ value.and_then(|v| v8::Local::<v8::Object>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env, napi_invalid_arg);
+ };
+
+ let ownership = if result.is_null() {
+ ReferenceOwnership::Runtime
+ } else {
+ ReferenceOwnership::Userland
+ };
+ let reference = Reference::new(
+ env,
+ value.into(),
+ 0,
+ ownership,
+ finalize_cb,
+ finalize_data,
+ finalize_hint,
+ );
+
+ if !result.is_null() {
+ unsafe {
+ *result = Reference::into_raw(reference) as _;
+ }
+ }
+
+ napi_clear_last_error(env_ptr)
+}
+
+#[napi_sym]
+fn node_api_post_finalizer(
+ env: *mut Env,
+ _finalize_cb: napi_finalize,
+ _finalize_data: *mut c_void,
+ _finalize_hint: *mut c_void,
+) -> napi_status {
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_adjust_external_memory(
+ env: *mut Env,
+ change_in_bytes: i64,
+ adjusted_value: *mut i64,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, adjusted_value);
+
+ unsafe {
+ *adjusted_value = env
+ .isolate()
+ .adjust_amount_of_external_allocated_memory(change_in_bytes);
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_set_instance_data(
+ env: *mut Env,
+ data: *mut c_void,
+ finalize_cb: Option<napi_finalize>,
+ finalize_hint: *mut c_void,
+) -> napi_status {
+ let env = check_env!(env);
+
+ env.shared_mut().instance_data = Some(InstanceData {
+ data,
+ finalize_cb,
+ finalize_hint,
+ });
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_get_instance_data(
+ env: *mut Env,
+ data: *mut *mut c_void,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, data);
+
+ let instance_data = match &env.shared().instance_data {
+ Some(v) => v.data,
+ None => std::ptr::null_mut(),
+ };
+
+ unsafe { *data = instance_data };
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_detach_arraybuffer(env: *mut Env, value: napi_value) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+
+ let Some(ab) =
+ value.and_then(|v| v8::Local::<v8::ArrayBuffer>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env, napi_arraybuffer_expected);
+ };
+
+ if !ab.is_detachable() {
+ return napi_set_last_error(env, napi_detachable_arraybuffer_expected);
+ }
+
+ // Expected to crash for None.
+ ab.detach(None).unwrap();
+
+ napi_clear_last_error(env);
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_is_detached_arraybuffer(
+ env_ptr: *mut Env,
+ arraybuffer: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, arraybuffer);
+ check_arg!(env, result);
+
+ let is_detached = match arraybuffer
+ .and_then(|v| v8::Local::<v8::ArrayBuffer>::try_from(v).ok())
+ {
+ Some(ab) => ab.was_detached(),
+ None => false,
+ };
+
+ unsafe {
+ *result = is_detached;
+ }
+
+ napi_clear_last_error(env)
+}
diff --git a/ext/napi/lib.rs b/ext/napi/lib.rs
index 4500c66fd..88b8c238d 100644
--- a/ext/napi/lib.rs
+++ b/ext/napi/lib.rs
@@ -5,9 +5,23 @@
#![allow(clippy::undocumented_unsafe_blocks)]
#![deny(clippy::missing_safety_doc)]
+//! Symbols to be exported are now defined in this JSON file.
+//! The `#[napi_sym]` macro checks for missing entries and panics.
+//!
+//! `./tools/napi/generate_symbols_list.js` is used to generate the LINK `cli/exports.def` on Windows,
+//! which is also checked into git.
+//!
+//! To add a new napi function:
+//! 1. Place `#[napi_sym]` on top of your implementation.
+//! 2. Add the function's identifier to this JSON list.
+//! 3. Finally, run `tools/napi/generate_symbols_list.js` to update `ext/napi/generated_symbol_exports_list_*.def`.
+
+pub mod js_native_api;
+pub mod node_api;
+pub mod util;
+pub mod uv;
+
use core::ptr::NonNull;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::parking_lot::RwLock;
use deno_core::url::Url;
@@ -20,6 +34,18 @@ use std::path::PathBuf;
use std::rc::Rc;
use std::thread_local;
+#[derive(Debug, thiserror::Error)]
+pub enum NApiError {
+ #[error("Invalid path")]
+ InvalidPath,
+ #[error(transparent)]
+ LibLoading(#[from] libloading::Error),
+ #[error("Unable to find register Node-API module at {}", .0.display())]
+ ModuleNotFound(PathBuf),
+ #[error(transparent)]
+ Permission(#[from] PermissionCheckError),
+}
+
#[cfg(unix)]
use libloading::os::unix::*;
@@ -29,6 +55,7 @@ use libloading::os::windows::*;
// Expose common stuff for ease of use.
// `use deno_napi::*`
pub use deno_core::v8;
+use deno_permissions::PermissionCheckError;
pub use std::ffi::CStr;
pub use std::os::raw::c_char;
pub use std::os::raw::c_void;
@@ -482,14 +509,14 @@ deno_core::extension!(deno_napi,
pub trait NapiPermissions {
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
- fn check(&mut self, path: &str) -> std::result::Result<PathBuf, AnyError>;
+ fn check(&mut self, path: &str) -> Result<PathBuf, PermissionCheckError>;
}
// NOTE(bartlomieju): for now, NAPI uses `--allow-ffi` flag, but that might
// change in the future.
impl NapiPermissions for deno_permissions::PermissionsContainer {
#[inline(always)]
- fn check(&mut self, path: &str) -> Result<PathBuf, AnyError> {
+ fn check(&mut self, path: &str) -> Result<PathBuf, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_ffi(self, path)
}
}
@@ -512,7 +539,7 @@ fn op_napi_open<NP, 'scope>(
global: v8::Local<'scope, v8::Object>,
buffer_constructor: v8::Local<'scope, v8::Function>,
report_error: v8::Local<'scope, v8::Function>,
-) -> std::result::Result<v8::Local<'scope, v8::Value>, AnyError>
+) -> Result<v8::Local<'scope, v8::Value>, NApiError>
where
NP: NapiPermissions + 'static,
{
@@ -540,7 +567,7 @@ where
let type_tag = v8::Global::new(scope, type_tag);
let url_filename =
- Url::from_file_path(&path).map_err(|_| type_error("Invalid path"))?;
+ Url::from_file_path(&path).map_err(|_| NApiError::InvalidPath)?;
let env_shared =
EnvShared::new(napi_wrap, type_tag, format!("{url_filename}\0"));
@@ -565,17 +592,11 @@ where
// SAFETY: opening a DLL calls dlopen
#[cfg(unix)]
- let library = match unsafe { Library::open(Some(&path), flags) } {
- Ok(lib) => lib,
- Err(e) => return Err(type_error(e.to_string())),
- };
+ let library = unsafe { Library::open(Some(&path), flags) }?;
// SAFETY: opening a DLL calls dlopen
#[cfg(not(unix))]
- let library = match unsafe { Library::load_with_flags(&path, flags) } {
- Ok(lib) => lib,
- Err(e) => return Err(type_error(e.to_string())),
- };
+ let library = unsafe { Library::load_with_flags(&path, flags) }?;
let maybe_module = MODULE_TO_REGISTER.with(|cell| {
let mut slot = cell.borrow_mut();
@@ -610,10 +631,7 @@ where
// SAFETY: we are going blind, calling the register function on the other side.
unsafe { init(env_ptr, exports.into()) }
} else {
- return Err(type_error(format!(
- "Unable to find register Node-API module at {}",
- path.display()
- )));
+ return Err(NApiError::ModuleNotFound(path));
};
let exports = maybe_exports.unwrap_or(exports.into());
@@ -624,3 +642,34 @@ where
Ok(exports)
}
+
+#[allow(clippy::print_stdout)]
+pub fn print_linker_flags(name: &str) {
+ let symbols_path =
+ include_str!(concat!(env!("OUT_DIR"), "/napi_symbol_path.txt"));
+
+ #[cfg(target_os = "windows")]
+ println!("cargo:rustc-link-arg-bin={name}=/DEF:{}", symbols_path);
+
+ #[cfg(target_os = "macos")]
+ println!(
+ "cargo:rustc-link-arg-bin={name}=-Wl,-exported_symbols_list,{}",
+ symbols_path,
+ );
+
+ #[cfg(any(
+ target_os = "linux",
+ target_os = "freebsd",
+ target_os = "openbsd"
+ ))]
+ println!(
+ "cargo:rustc-link-arg-bin={name}=-Wl,--export-dynamic-symbol-list={}",
+ symbols_path,
+ );
+
+ #[cfg(target_os = "android")]
+ println!(
+ "cargo:rustc-link-arg-bin={name}=-Wl,--export-dynamic-symbol-list={}",
+ symbols_path,
+ );
+}
diff --git a/ext/napi/node_api.rs b/ext/napi/node_api.rs
new file mode 100644
index 000000000..2ca5c8d0b
--- /dev/null
+++ b/ext/napi/node_api.rs
@@ -0,0 +1,1009 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+#![deny(unsafe_op_in_unsafe_fn)]
+
+use super::util::get_array_buffer_ptr;
+use super::util::make_external_backing_store;
+use super::util::napi_clear_last_error;
+use super::util::napi_set_last_error;
+use super::util::SendPtr;
+use crate::check_arg;
+use crate::check_env;
+use crate::*;
+use deno_core::parking_lot::Condvar;
+use deno_core::parking_lot::Mutex;
+use deno_core::V8CrossThreadTaskSpawner;
+use napi_sym::napi_sym;
+use std::sync::atomic::AtomicBool;
+use std::sync::atomic::AtomicU8;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering;
+use std::sync::Arc;
+
+#[napi_sym]
+fn napi_module_register(module: *const NapiModule) -> napi_status {
+ MODULE_TO_REGISTER.with(|cell| {
+ let mut slot = cell.borrow_mut();
+ let prev = slot.replace(module);
+ assert!(prev.is_none());
+ });
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_add_env_cleanup_hook(
+ env: *mut Env,
+ fun: Option<napi_cleanup_hook>,
+ arg: *mut c_void,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, fun);
+
+ let fun = fun.unwrap();
+
+ env.add_cleanup_hook(fun, arg);
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_remove_env_cleanup_hook(
+ env: *mut Env,
+ fun: Option<napi_cleanup_hook>,
+ arg: *mut c_void,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, fun);
+
+ let fun = fun.unwrap();
+
+ env.remove_cleanup_hook(fun, arg);
+
+ napi_ok
+}
+
+struct AsyncCleanupHandle {
+ env: *mut Env,
+ hook: napi_async_cleanup_hook,
+ data: *mut c_void,
+}
+
+unsafe extern "C" fn async_cleanup_handler(arg: *mut c_void) {
+ unsafe {
+ let handle = Box::<AsyncCleanupHandle>::from_raw(arg as _);
+ (handle.hook)(arg, handle.data);
+ }
+}
+
+#[napi_sym]
+fn napi_add_async_cleanup_hook(
+ env: *mut Env,
+ hook: Option<napi_async_cleanup_hook>,
+ arg: *mut c_void,
+ remove_handle: *mut napi_async_cleanup_hook_handle,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, hook);
+
+ let hook = hook.unwrap();
+
+ let handle = Box::into_raw(Box::new(AsyncCleanupHandle {
+ env,
+ hook,
+ data: arg,
+ })) as *mut c_void;
+
+ env.add_cleanup_hook(async_cleanup_handler, handle);
+
+ if !remove_handle.is_null() {
+ unsafe {
+ *remove_handle = handle;
+ }
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_remove_async_cleanup_hook(
+ remove_handle: napi_async_cleanup_hook_handle,
+) -> napi_status {
+ if remove_handle.is_null() {
+ return napi_invalid_arg;
+ }
+
+ let handle =
+ unsafe { Box::<AsyncCleanupHandle>::from_raw(remove_handle as _) };
+
+ let env = unsafe { &mut *handle.env };
+
+ env.remove_cleanup_hook(async_cleanup_handler, remove_handle);
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_fatal_exception(env: &mut Env, err: napi_value) -> napi_status {
+ check_arg!(env, err);
+
+ let report_error = v8::Local::new(&mut env.scope(), &env.report_error);
+
+ let this = v8::undefined(&mut env.scope());
+ if report_error
+ .call(&mut env.scope(), this.into(), &[err.unwrap()])
+ .is_none()
+ {
+ return napi_generic_failure;
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_fatal_error(
+ location: *const c_char,
+ location_len: usize,
+ message: *const c_char,
+ message_len: usize,
+) -> napi_status {
+ let location = if location.is_null() {
+ None
+ } else {
+ unsafe {
+ Some(if location_len == NAPI_AUTO_LENGTH {
+ std::ffi::CStr::from_ptr(location).to_str().unwrap()
+ } else {
+ let slice = std::slice::from_raw_parts(
+ location as *const u8,
+ location_len as usize,
+ );
+ std::str::from_utf8(slice).unwrap()
+ })
+ }
+ };
+
+ let message = if message_len == NAPI_AUTO_LENGTH {
+ unsafe { std::ffi::CStr::from_ptr(message).to_str().unwrap() }
+ } else {
+ let slice = unsafe {
+ std::slice::from_raw_parts(message as *const u8, message_len as usize)
+ };
+ std::str::from_utf8(slice).unwrap()
+ };
+
+ if let Some(location) = location {
+ log::error!("NODE API FATAL ERROR: {} {}", location, message);
+ } else {
+ log::error!("NODE API FATAL ERROR: {}", message);
+ }
+
+ std::process::abort();
+}
+
+#[napi_sym]
+fn napi_open_callback_scope(
+ env: *mut Env,
+ _resource_object: napi_value,
+ _context: napi_value,
+ result: *mut napi_callback_scope,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, result);
+
+ // we open scope automatically when it's needed
+ unsafe {
+ *result = std::ptr::null_mut();
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_close_callback_scope(
+ env: *mut Env,
+ scope: napi_callback_scope,
+) -> napi_status {
+ let env = check_env!(env);
+ // we close scope automatically when it's needed
+ assert!(scope.is_null());
+ napi_clear_last_error(env)
+}
+
+// NOTE: we don't support "async_hooks::AsyncContext" so these APIs are noops.
+#[napi_sym]
+fn napi_async_init(
+ env: *mut Env,
+ _async_resource: napi_value,
+ _async_resource_name: napi_value,
+ result: *mut napi_async_context,
+) -> napi_status {
+ let env = check_env!(env);
+ unsafe {
+ *result = ptr::null_mut();
+ }
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_async_destroy(
+ env: *mut Env,
+ async_context: napi_async_context,
+) -> napi_status {
+ let env = check_env!(env);
+ assert!(async_context.is_null());
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_make_callback<'s>(
+ env: &'s mut Env,
+ _async_context: napi_async_context,
+ recv: napi_value,
+ func: napi_value,
+ argc: usize,
+ argv: *const napi_value<'s>,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, recv);
+ if argc > 0 {
+ check_arg!(env, argv);
+ }
+
+ let Some(recv) = recv.and_then(|v| v.to_object(&mut env.scope())) else {
+ return napi_object_expected;
+ };
+
+ let Some(func) =
+ func.and_then(|v| v8::Local::<v8::Function>::try_from(v).ok())
+ else {
+ return napi_function_expected;
+ };
+
+ let args = if argc > 0 {
+ unsafe {
+ std::slice::from_raw_parts(argv as *mut v8::Local<v8::Value>, argc)
+ }
+ } else {
+ &[]
+ };
+
+ // TODO: async_context
+
+ let Some(v) = func.call(&mut env.scope(), recv.into(), args) else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = v.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_create_buffer<'s>(
+ env: &'s mut Env,
+ length: usize,
+ data: *mut *mut c_void,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let ab = v8::ArrayBuffer::new(&mut env.scope(), length);
+
+ let buffer_constructor =
+ v8::Local::new(&mut env.scope(), &env.buffer_constructor);
+ let Some(buffer) =
+ buffer_constructor.new_instance(&mut env.scope(), &[ab.into()])
+ else {
+ return napi_generic_failure;
+ };
+
+ if !data.is_null() {
+ unsafe {
+ *data = get_array_buffer_ptr(ab);
+ }
+ }
+
+ unsafe {
+ *result = buffer.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_create_external_buffer<'s>(
+ env: &'s mut Env,
+ length: usize,
+ data: *mut c_void,
+ finalize_cb: napi_finalize,
+ finalize_hint: *mut c_void,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let store = make_external_backing_store(
+ env,
+ data,
+ length,
+ ptr::null_mut(),
+ finalize_cb,
+ finalize_hint,
+ );
+
+ let ab =
+ v8::ArrayBuffer::with_backing_store(&mut env.scope(), &store.make_shared());
+
+ let buffer_constructor =
+ v8::Local::new(&mut env.scope(), &env.buffer_constructor);
+ let Some(buffer) =
+ buffer_constructor.new_instance(&mut env.scope(), &[ab.into()])
+ else {
+ return napi_generic_failure;
+ };
+
+ unsafe {
+ *result = buffer.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_create_buffer_copy<'s>(
+ env: &'s mut Env,
+ length: usize,
+ data: *mut c_void,
+ result_data: *mut *mut c_void,
+ result: *mut napi_value<'s>,
+) -> napi_status {
+ check_arg!(env, result);
+
+ let ab = v8::ArrayBuffer::new(&mut env.scope(), length);
+
+ let buffer_constructor =
+ v8::Local::new(&mut env.scope(), &env.buffer_constructor);
+ let Some(buffer) =
+ buffer_constructor.new_instance(&mut env.scope(), &[ab.into()])
+ else {
+ return napi_generic_failure;
+ };
+
+ let ptr = get_array_buffer_ptr(ab);
+ unsafe {
+ std::ptr::copy(data, ptr, length);
+ }
+
+ if !result_data.is_null() {
+ unsafe {
+ *result_data = ptr;
+ }
+ }
+
+ unsafe {
+ *result = buffer.into();
+ }
+
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_is_buffer(
+ env: *mut Env,
+ value: napi_value,
+ result: *mut bool,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+ check_arg!(env, result);
+
+ let buffer_constructor =
+ v8::Local::new(&mut env.scope(), &env.buffer_constructor);
+
+ let Some(is_buffer) = value
+ .unwrap()
+ .instance_of(&mut env.scope(), buffer_constructor.into())
+ else {
+ return napi_set_last_error(env, napi_generic_failure);
+ };
+
+ unsafe {
+ *result = is_buffer;
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_get_buffer_info(
+ env: *mut Env,
+ value: napi_value,
+ data: *mut *mut c_void,
+ length: *mut usize,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, value);
+
+ // NB: Any TypedArray instance seems to be accepted by this function
+ // in Node.js.
+ let Some(ta) =
+ value.and_then(|v| v8::Local::<v8::TypedArray>::try_from(v).ok())
+ else {
+ return napi_set_last_error(env, napi_invalid_arg);
+ };
+
+ if !data.is_null() {
+ unsafe {
+ *data = ta.data();
+ }
+ }
+
+ if !length.is_null() {
+ unsafe {
+ *length = ta.byte_length();
+ }
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_get_node_version(
+ env: *mut Env,
+ result: *mut *const napi_node_version,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, result);
+
+ const NODE_VERSION: napi_node_version = napi_node_version {
+ major: 20,
+ minor: 11,
+ patch: 1,
+ release: c"Deno".as_ptr(),
+ };
+
+ unsafe {
+ *result = &NODE_VERSION as *const napi_node_version;
+ }
+
+ napi_clear_last_error(env)
+}
+
+struct AsyncWork {
+ state: AtomicU8,
+ env: *mut Env,
+ _async_resource: v8::Global<v8::Object>,
+ _async_resource_name: String,
+ execute: napi_async_execute_callback,
+ complete: Option<napi_async_complete_callback>,
+ data: *mut c_void,
+}
+
+impl AsyncWork {
+ const IDLE: u8 = 0;
+ const QUEUED: u8 = 1;
+ const RUNNING: u8 = 2;
+}
+
+#[napi_sym]
+pub(crate) fn napi_create_async_work(
+ env: *mut Env,
+ async_resource: napi_value,
+ async_resource_name: napi_value,
+ execute: Option<napi_async_execute_callback>,
+ complete: Option<napi_async_complete_callback>,
+ data: *mut c_void,
+ result: *mut napi_async_work,
+) -> napi_status {
+ let env_ptr = env;
+ let env = check_env!(env);
+ check_arg!(env, execute);
+ check_arg!(env, result);
+
+ let resource = if let Some(v) = *async_resource {
+ let Some(resource) = v.to_object(&mut env.scope()) else {
+ return napi_set_last_error(env, napi_object_expected);
+ };
+ resource
+ } else {
+ v8::Object::new(&mut env.scope())
+ };
+
+ let Some(resource_name) =
+ async_resource_name.and_then(|v| v.to_string(&mut env.scope()))
+ else {
+ return napi_set_last_error(env, napi_string_expected);
+ };
+
+ let resource_name = resource_name.to_rust_string_lossy(&mut env.scope());
+
+ let work = Box::new(AsyncWork {
+ state: AtomicU8::new(AsyncWork::IDLE),
+ env: env_ptr,
+ _async_resource: v8::Global::new(&mut env.scope(), resource),
+ _async_resource_name: resource_name,
+ execute: execute.unwrap(),
+ complete,
+ data,
+ });
+
+ unsafe {
+ *result = Box::into_raw(work) as _;
+ }
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+pub(crate) fn napi_delete_async_work(
+ env: *mut Env,
+ work: napi_async_work,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, work);
+
+ drop(unsafe { Box::<AsyncWork>::from_raw(work as _) });
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_get_uv_event_loop(
+ env_ptr: *mut Env,
+ uv_loop: *mut *mut (),
+) -> napi_status {
+ let env = check_env!(env_ptr);
+ check_arg!(env, uv_loop);
+ unsafe {
+ *uv_loop = env_ptr.cast();
+ }
+ 0
+}
+
+#[napi_sym]
+pub(crate) fn napi_queue_async_work(
+ env: *mut Env,
+ work: napi_async_work,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, work);
+
+ let work = unsafe { &*(work as *mut AsyncWork) };
+
+ let result =
+ work
+ .state
+ .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |state| {
+ // allow queue if idle or if running, but not if already queued.
+ if state == AsyncWork::IDLE || state == AsyncWork::RUNNING {
+ Some(AsyncWork::QUEUED)
+ } else {
+ None
+ }
+ });
+
+ if result.is_err() {
+ return napi_clear_last_error(env);
+ }
+
+ let work = SendPtr(work);
+
+ env.add_async_work(move || {
+ let work = work.take();
+ let work = unsafe { &*work };
+
+ let state = work.state.compare_exchange(
+ AsyncWork::QUEUED,
+ AsyncWork::RUNNING,
+ Ordering::SeqCst,
+ Ordering::SeqCst,
+ );
+
+ if state.is_ok() {
+ unsafe {
+ (work.execute)(work.env as _, work.data);
+ }
+
+ // reset back to idle if its still marked as running
+ let _ = work.state.compare_exchange(
+ AsyncWork::RUNNING,
+ AsyncWork::IDLE,
+ Ordering::SeqCst,
+ Ordering::Relaxed,
+ );
+ }
+
+ if let Some(complete) = work.complete {
+ let status = if state.is_ok() {
+ napi_ok
+ } else if state == Err(AsyncWork::IDLE) {
+ napi_cancelled
+ } else {
+ napi_generic_failure
+ };
+
+ unsafe {
+ complete(work.env as _, status, work.data);
+ }
+ }
+
+ // `complete` probably deletes this `work`, so don't use it here.
+ });
+
+ napi_clear_last_error(env)
+}
+
+#[napi_sym]
+fn napi_cancel_async_work(env: *mut Env, work: napi_async_work) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, work);
+
+ let work = unsafe { &*(work as *mut AsyncWork) };
+
+ let _ = work.state.compare_exchange(
+ AsyncWork::QUEUED,
+ AsyncWork::IDLE,
+ Ordering::SeqCst,
+ Ordering::Relaxed,
+ );
+
+ napi_clear_last_error(env)
+}
+
+extern "C" fn default_call_js_cb(
+ env: napi_env,
+ js_callback: napi_value,
+ _context: *mut c_void,
+ _data: *mut c_void,
+) {
+ if let Some(js_callback) = *js_callback {
+ if let Ok(js_callback) = v8::Local::<v8::Function>::try_from(js_callback) {
+ let env = unsafe { &mut *(env as *mut Env) };
+ let scope = &mut env.scope();
+ let recv = v8::undefined(scope);
+ js_callback.call(scope, recv.into(), &[]);
+ }
+ }
+}
+
+struct TsFn {
+ env: *mut Env,
+ func: Option<v8::Global<v8::Function>>,
+ max_queue_size: usize,
+ queue_size: Mutex<usize>,
+ queue_cond: Condvar,
+ thread_count: AtomicUsize,
+ thread_finalize_data: *mut c_void,
+ thread_finalize_cb: Option<napi_finalize>,
+ context: *mut c_void,
+ call_js_cb: napi_threadsafe_function_call_js,
+ _resource: v8::Global<v8::Object>,
+ _resource_name: String,
+ is_closing: AtomicBool,
+ is_closed: Arc<AtomicBool>,
+ sender: V8CrossThreadTaskSpawner,
+ is_ref: AtomicBool,
+}
+
+impl Drop for TsFn {
+ fn drop(&mut self) {
+ assert!(self
+ .is_closed
+ .compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
+ .is_ok());
+
+ self.unref();
+
+ if let Some(finalizer) = self.thread_finalize_cb {
+ unsafe {
+ (finalizer)(self.env as _, self.thread_finalize_data, self.context);
+ }
+ }
+ }
+}
+
+impl TsFn {
+ pub fn acquire(&self) -> napi_status {
+ if self.is_closing.load(Ordering::SeqCst) {
+ return napi_closing;
+ }
+ self.thread_count.fetch_add(1, Ordering::Relaxed);
+ napi_ok
+ }
+
+ pub fn release(
+ tsfn: *mut TsFn,
+ mode: napi_threadsafe_function_release_mode,
+ ) -> napi_status {
+ let tsfn = unsafe { &mut *tsfn };
+
+ let result = tsfn.thread_count.fetch_update(
+ Ordering::Relaxed,
+ Ordering::Relaxed,
+ |x| {
+ if x == 0 {
+ None
+ } else {
+ Some(x - 1)
+ }
+ },
+ );
+
+ if result.is_err() {
+ return napi_invalid_arg;
+ }
+
+ if (result == Ok(1) || mode == napi_tsfn_abort)
+ && tsfn
+ .is_closing
+ .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
+ .is_ok()
+ {
+ tsfn.queue_cond.notify_all();
+ let tsfnptr = SendPtr(tsfn);
+ // drop must be queued in order to preserve ordering consistent
+ // with Node.js and so that the finalizer runs on the main thread.
+ tsfn.sender.spawn(move |_| {
+ let tsfn = unsafe { Box::from_raw(tsfnptr.take() as *mut TsFn) };
+ drop(tsfn);
+ });
+ }
+
+ napi_ok
+ }
+
+ pub fn ref_(&self) -> napi_status {
+ if self
+ .is_ref
+ .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst)
+ .is_ok()
+ {
+ let env = unsafe { &mut *self.env };
+ env.threadsafe_function_ref();
+ }
+ napi_ok
+ }
+
+ pub fn unref(&self) -> napi_status {
+ if self
+ .is_ref
+ .compare_exchange(true, false, Ordering::SeqCst, Ordering::SeqCst)
+ .is_ok()
+ {
+ let env = unsafe { &mut *self.env };
+ env.threadsafe_function_unref();
+ }
+
+ napi_ok
+ }
+
+ pub fn call(
+ &self,
+ data: *mut c_void,
+ mode: napi_threadsafe_function_call_mode,
+ ) -> napi_status {
+ if self.is_closing.load(Ordering::SeqCst) {
+ return napi_closing;
+ }
+
+ if self.max_queue_size > 0 {
+ let mut queue_size = self.queue_size.lock();
+ while *queue_size >= self.max_queue_size {
+ if mode == napi_tsfn_blocking {
+ self.queue_cond.wait(&mut queue_size);
+
+ if self.is_closing.load(Ordering::SeqCst) {
+ return napi_closing;
+ }
+ } else {
+ return napi_queue_full;
+ }
+ }
+ *queue_size += 1;
+ }
+
+ let is_closed = self.is_closed.clone();
+ let tsfn = SendPtr(self);
+ let data = SendPtr(data);
+ let context = SendPtr(self.context);
+ let call_js_cb = self.call_js_cb;
+
+ self.sender.spawn(move |scope: &mut v8::HandleScope| {
+ let data = data.take();
+
+ // if is_closed then tsfn is freed, don't read from it.
+ if is_closed.load(Ordering::Relaxed) {
+ unsafe {
+ call_js_cb(
+ std::ptr::null_mut(),
+ None::<v8::Local<v8::Value>>.into(),
+ context.take() as _,
+ data as _,
+ );
+ }
+ } else {
+ let tsfn = tsfn.take();
+
+ let tsfn = unsafe { &*tsfn };
+
+ if tsfn.max_queue_size > 0 {
+ let mut queue_size = tsfn.queue_size.lock();
+ let size = *queue_size;
+ *queue_size -= 1;
+ if size == tsfn.max_queue_size {
+ tsfn.queue_cond.notify_one();
+ }
+ }
+
+ let func = tsfn.func.as_ref().map(|f| v8::Local::new(scope, f));
+
+ unsafe {
+ (tsfn.call_js_cb)(
+ tsfn.env as _,
+ func.into(),
+ tsfn.context,
+ data as _,
+ );
+ }
+ }
+ });
+
+ napi_ok
+ }
+}
+
+#[napi_sym]
+#[allow(clippy::too_many_arguments)]
+fn napi_create_threadsafe_function(
+ env: *mut Env,
+ func: napi_value,
+ async_resource: napi_value,
+ async_resource_name: napi_value,
+ max_queue_size: usize,
+ initial_thread_count: usize,
+ thread_finalize_data: *mut c_void,
+ thread_finalize_cb: Option<napi_finalize>,
+ context: *mut c_void,
+ call_js_cb: Option<napi_threadsafe_function_call_js>,
+ result: *mut napi_threadsafe_function,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, async_resource_name);
+ if initial_thread_count == 0 {
+ return napi_set_last_error(env, napi_invalid_arg);
+ }
+ check_arg!(env, result);
+
+ let func = if let Some(value) = *func {
+ let Ok(func) = v8::Local::<v8::Function>::try_from(value) else {
+ return napi_set_last_error(env, napi_function_expected);
+ };
+ Some(v8::Global::new(&mut env.scope(), func))
+ } else {
+ check_arg!(env, call_js_cb);
+ None
+ };
+
+ let resource = if let Some(v) = *async_resource {
+ let Some(resource) = v.to_object(&mut env.scope()) else {
+ return napi_set_last_error(env, napi_object_expected);
+ };
+ resource
+ } else {
+ v8::Object::new(&mut env.scope())
+ };
+ let resource = v8::Global::new(&mut env.scope(), resource);
+
+ let Some(resource_name) =
+ async_resource_name.and_then(|v| v.to_string(&mut env.scope()))
+ else {
+ return napi_set_last_error(env, napi_string_expected);
+ };
+ let resource_name = resource_name.to_rust_string_lossy(&mut env.scope());
+
+ let tsfn = Box::new(TsFn {
+ env,
+ func,
+ max_queue_size,
+ queue_size: Mutex::new(0),
+ queue_cond: Condvar::new(),
+ thread_count: AtomicUsize::new(initial_thread_count),
+ thread_finalize_data,
+ thread_finalize_cb,
+ context,
+ call_js_cb: call_js_cb.unwrap_or(default_call_js_cb),
+ _resource: resource,
+ _resource_name: resource_name,
+ is_closing: AtomicBool::new(false),
+ is_closed: Arc::new(AtomicBool::new(false)),
+ is_ref: AtomicBool::new(false),
+ sender: env.async_work_sender.clone(),
+ });
+
+ tsfn.ref_();
+
+ unsafe {
+ *result = Box::into_raw(tsfn) as _;
+ }
+
+ napi_clear_last_error(env)
+}
+
+/// Maybe called from any thread.
+#[napi_sym]
+fn napi_get_threadsafe_function_context(
+ func: napi_threadsafe_function,
+ result: *mut *const c_void,
+) -> napi_status {
+ assert!(!func.is_null());
+ let tsfn = unsafe { &*(func as *const TsFn) };
+ unsafe {
+ *result = tsfn.context;
+ }
+ napi_ok
+}
+
+#[napi_sym]
+fn napi_call_threadsafe_function(
+ func: napi_threadsafe_function,
+ data: *mut c_void,
+ is_blocking: napi_threadsafe_function_call_mode,
+) -> napi_status {
+ assert!(!func.is_null());
+ let tsfn = unsafe { &*(func as *mut TsFn) };
+ tsfn.call(data, is_blocking)
+}
+
+#[napi_sym]
+fn napi_acquire_threadsafe_function(
+ tsfn: napi_threadsafe_function,
+) -> napi_status {
+ assert!(!tsfn.is_null());
+ let tsfn = unsafe { &*(tsfn as *mut TsFn) };
+ tsfn.acquire()
+}
+
+#[napi_sym]
+fn napi_release_threadsafe_function(
+ tsfn: napi_threadsafe_function,
+ mode: napi_threadsafe_function_release_mode,
+) -> napi_status {
+ assert!(!tsfn.is_null());
+ TsFn::release(tsfn as _, mode)
+}
+
+#[napi_sym]
+fn napi_unref_threadsafe_function(
+ _env: &mut Env,
+ func: napi_threadsafe_function,
+) -> napi_status {
+ assert!(!func.is_null());
+ let tsfn = unsafe { &*(func as *mut TsFn) };
+ tsfn.unref()
+}
+
+#[napi_sym]
+fn napi_ref_threadsafe_function(
+ _env: &mut Env,
+ func: napi_threadsafe_function,
+) -> napi_status {
+ assert!(!func.is_null());
+ let tsfn = unsafe { &*(func as *mut TsFn) };
+ tsfn.ref_()
+}
+
+#[napi_sym]
+fn node_api_get_module_file_name(
+ env: *mut Env,
+ result: *mut *const c_char,
+) -> napi_status {
+ let env = check_env!(env);
+ check_arg!(env, result);
+
+ unsafe {
+ *result = env.shared().filename.as_ptr() as _;
+ }
+
+ napi_clear_last_error(env)
+}
diff --git a/ext/napi/sym/Cargo.toml b/ext/napi/sym/Cargo.toml
new file mode 100644
index 000000000..7c13a9165
--- /dev/null
+++ b/ext/napi/sym/Cargo.toml
@@ -0,0 +1,21 @@
+# Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+[package]
+name = "napi_sym"
+version = "0.107.0"
+authors.workspace = true
+edition.workspace = true
+license.workspace = true
+readme = "README.md"
+repository.workspace = true
+description = "proc macro for writing N-API symbols"
+
+[lib]
+path = "./lib.rs"
+proc-macro = true
+
+[dependencies]
+quote.workspace = true
+serde.workspace = true
+serde_json.workspace = true
+syn.workspace = true
diff --git a/ext/napi/sym/README.md b/ext/napi/sym/README.md
new file mode 100644
index 000000000..66eb4bff2
--- /dev/null
+++ b/ext/napi/sym/README.md
@@ -0,0 +1,38 @@
+# napi_sym
+
+A proc_macro for Deno's Node-API implementation. It does the following things:
+
+- Marks the symbol as `#[no_mangle]` and rewrites it as
+ `unsafe extern "C" $name`.
+- Asserts that the function symbol is present in
+ [`symbol_exports.json`](./symbol_exports.json).
+- Maps `deno_napi::Result` to raw `napi_result`.
+
+```rust
+use deno_napi::napi_value;
+use deno_napi::Env;
+use deno_napi::Error;
+use deno_napi::Result;
+
+#[napi_sym::napi_sym]
+fn napi_get_boolean(
+ env: *mut Env,
+ value: bool,
+ result: *mut napi_value,
+) -> Result {
+ let _env: &mut Env = env.as_mut().ok_or(Error::InvalidArg)?;
+ // *result = ...
+ Ok(())
+}
+```
+
+### `symbol_exports.json`
+
+A file containing the symbols that need to be put into the executable's dynamic
+symbol table at link-time.
+
+This is done using `/DEF:` on Windows, `-exported_symbol,_` on macOS and
+`--export-dynamic-symbol=` on Linux. See [`cli/build.rs`](../build.rs).
+
+On Windows, you need to generate the `.def` file by running
+[`tools/napi/generate_symbols_lists.js`](../../tools/napi/generate_symbols_lists.js).
diff --git a/ext/napi/sym/lib.rs b/ext/napi/sym/lib.rs
new file mode 100644
index 000000000..e2826306b
--- /dev/null
+++ b/ext/napi/sym/lib.rs
@@ -0,0 +1,31 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use proc_macro::TokenStream;
+use quote::quote;
+use serde::Deserialize;
+
+static NAPI_EXPORTS: &str = include_str!("./symbol_exports.json");
+
+#[derive(Deserialize)]
+struct SymbolExports {
+ pub symbols: Vec<String>,
+}
+
+#[proc_macro_attribute]
+pub fn napi_sym(_attr: TokenStream, item: TokenStream) -> TokenStream {
+ let func = syn::parse::<syn::ItemFn>(item).expect("expected a function");
+
+ let exports: SymbolExports =
+ serde_json::from_str(NAPI_EXPORTS).expect("failed to parse exports");
+ let name = &func.sig.ident;
+ assert!(
+ exports.symbols.contains(&name.to_string()),
+ "cli/napi/sym/symbol_exports.json is out of sync!"
+ );
+
+ TokenStream::from(quote! {
+ crate::napi_wrap! {
+ #func
+ }
+ })
+}
diff --git a/ext/napi/sym/symbol_exports.json b/ext/napi/sym/symbol_exports.json
new file mode 100644
index 000000000..00946b8ed
--- /dev/null
+++ b/ext/napi/sym/symbol_exports.json
@@ -0,0 +1,164 @@
+{
+ "symbols": [
+ "node_api_create_syntax_error",
+ "napi_make_callback",
+ "napi_has_named_property",
+ "napi_async_destroy",
+ "napi_coerce_to_object",
+ "napi_get_arraybuffer_info",
+ "napi_detach_arraybuffer",
+ "napi_get_undefined",
+ "napi_reference_unref",
+ "napi_fatal_error",
+ "napi_open_callback_scope",
+ "napi_close_callback_scope",
+ "napi_get_value_uint32",
+ "napi_create_function",
+ "napi_create_arraybuffer",
+ "napi_get_value_int64",
+ "napi_get_all_property_names",
+ "napi_resolve_deferred",
+ "napi_is_detached_arraybuffer",
+ "napi_create_string_utf8",
+ "napi_create_threadsafe_function",
+ "node_api_throw_syntax_error",
+ "napi_create_bigint_int64",
+ "napi_wrap",
+ "napi_set_property",
+ "napi_get_value_bigint_int64",
+ "napi_open_handle_scope",
+ "napi_create_error",
+ "napi_create_buffer",
+ "napi_cancel_async_work",
+ "napi_is_exception_pending",
+ "napi_acquire_threadsafe_function",
+ "napi_create_external",
+ "napi_get_threadsafe_function_context",
+ "napi_get_null",
+ "napi_create_string_utf16",
+ "node_api_create_external_string_utf16",
+ "napi_get_value_bigint_uint64",
+ "napi_module_register",
+ "napi_is_typedarray",
+ "napi_create_external_buffer",
+ "napi_get_new_target",
+ "napi_get_instance_data",
+ "napi_close_handle_scope",
+ "napi_get_value_string_utf16",
+ "napi_get_property_names",
+ "napi_is_arraybuffer",
+ "napi_get_cb_info",
+ "napi_define_properties",
+ "napi_add_env_cleanup_hook",
+ "node_api_get_module_file_name",
+ "napi_get_node_version",
+ "napi_create_int64",
+ "napi_create_double",
+ "napi_get_and_clear_last_exception",
+ "napi_create_reference",
+ "napi_get_typedarray_info",
+ "napi_call_threadsafe_function",
+ "napi_get_last_error_info",
+ "napi_create_array_with_length",
+ "napi_coerce_to_number",
+ "napi_get_global",
+ "napi_is_error",
+ "napi_set_instance_data",
+ "napi_create_typedarray",
+ "napi_throw_type_error",
+ "napi_has_property",
+ "napi_get_value_external",
+ "napi_create_range_error",
+ "napi_typeof",
+ "napi_ref_threadsafe_function",
+ "napi_create_bigint_uint64",
+ "napi_get_prototype",
+ "napi_adjust_external_memory",
+ "napi_release_threadsafe_function",
+ "napi_delete_async_work",
+ "napi_create_string_latin1",
+ "node_api_create_external_string_latin1",
+ "napi_is_array",
+ "napi_unref_threadsafe_function",
+ "napi_throw_error",
+ "napi_has_own_property",
+ "napi_get_reference_value",
+ "napi_remove_env_cleanup_hook",
+ "napi_get_value_string_utf8",
+ "napi_is_promise",
+ "napi_get_boolean",
+ "napi_run_script",
+ "napi_get_element",
+ "napi_get_named_property",
+ "napi_get_buffer_info",
+ "napi_get_value_bool",
+ "napi_reference_ref",
+ "napi_create_object",
+ "napi_create_promise",
+ "napi_create_int32",
+ "napi_escape_handle",
+ "napi_open_escapable_handle_scope",
+ "napi_throw",
+ "napi_get_value_double",
+ "napi_set_named_property",
+ "napi_call_function",
+ "napi_create_date",
+ "napi_object_freeze",
+ "napi_get_uv_event_loop",
+ "napi_get_value_string_latin1",
+ "napi_reject_deferred",
+ "napi_add_finalizer",
+ "napi_create_array",
+ "napi_delete_reference",
+ "napi_get_date_value",
+ "napi_create_dataview",
+ "napi_get_version",
+ "napi_define_class",
+ "napi_is_date",
+ "napi_remove_wrap",
+ "napi_delete_property",
+ "napi_instanceof",
+ "napi_create_buffer_copy",
+ "napi_delete_element",
+ "napi_object_seal",
+ "napi_queue_async_work",
+ "napi_get_value_bigint_words",
+ "napi_is_buffer",
+ "napi_get_array_length",
+ "napi_get_property",
+ "napi_new_instance",
+ "napi_set_element",
+ "napi_create_bigint_words",
+ "napi_strict_equals",
+ "napi_is_dataview",
+ "napi_close_escapable_handle_scope",
+ "napi_get_dataview_info",
+ "napi_get_value_int32",
+ "napi_unwrap",
+ "napi_throw_range_error",
+ "napi_coerce_to_bool",
+ "napi_create_uint32",
+ "napi_has_element",
+ "napi_create_external_arraybuffer",
+ "napi_create_symbol",
+ "node_api_symbol_for",
+ "napi_coerce_to_string",
+ "napi_create_type_error",
+ "napi_fatal_exception",
+ "napi_create_async_work",
+ "napi_async_init",
+ "node_api_create_property_key_utf16",
+ "napi_type_tag_object",
+ "napi_check_object_type_tag",
+ "node_api_post_finalizer",
+ "napi_add_async_cleanup_hook",
+ "napi_remove_async_cleanup_hook",
+ "uv_mutex_init",
+ "uv_mutex_lock",
+ "uv_mutex_unlock",
+ "uv_mutex_destroy",
+ "uv_async_init",
+ "uv_async_send",
+ "uv_close"
+ ]
+}
diff --git a/ext/napi/util.rs b/ext/napi/util.rs
new file mode 100644
index 000000000..21e9d433a
--- /dev/null
+++ b/ext/napi/util.rs
@@ -0,0 +1,287 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+use crate::*;
+use libc::INT_MAX;
+
+#[repr(transparent)]
+pub(crate) struct SendPtr<T>(pub *const T);
+
+impl<T> SendPtr<T> {
+ // silly function to get around `clippy::redundant_locals`
+ pub fn take(self) -> *const T {
+ self.0
+ }
+}
+
+unsafe impl<T> Send for SendPtr<T> {}
+unsafe impl<T> Sync for SendPtr<T> {}
+
+pub fn get_array_buffer_ptr(ab: v8::Local<v8::ArrayBuffer>) -> *mut c_void {
+ match ab.data() {
+ Some(p) => p.as_ptr(),
+ None => std::ptr::null_mut(),
+ }
+}
+
+struct BufferFinalizer {
+ env: *mut Env,
+ finalize_cb: napi_finalize,
+ finalize_data: *mut c_void,
+ finalize_hint: *mut c_void,
+}
+
+impl Drop for BufferFinalizer {
+ fn drop(&mut self) {
+ unsafe {
+ (self.finalize_cb)(self.env as _, self.finalize_data, self.finalize_hint);
+ }
+ }
+}
+
+pub(crate) extern "C" fn backing_store_deleter_callback(
+ data: *mut c_void,
+ _byte_length: usize,
+ deleter_data: *mut c_void,
+) {
+ let mut finalizer =
+ unsafe { Box::<BufferFinalizer>::from_raw(deleter_data as _) };
+
+ finalizer.finalize_data = data;
+
+ drop(finalizer);
+}
+
+pub(crate) fn make_external_backing_store(
+ env: *mut Env,
+ data: *mut c_void,
+ byte_length: usize,
+ finalize_data: *mut c_void,
+ finalize_cb: napi_finalize,
+ finalize_hint: *mut c_void,
+) -> v8::UniqueRef<v8::BackingStore> {
+ let finalizer = Box::new(BufferFinalizer {
+ env,
+ finalize_data,
+ finalize_cb,
+ finalize_hint,
+ });
+
+ unsafe {
+ v8::ArrayBuffer::new_backing_store_from_ptr(
+ data,
+ byte_length,
+ backing_store_deleter_callback,
+ Box::into_raw(finalizer) as _,
+ )
+ }
+}
+
+#[macro_export]
+macro_rules! check_env {
+ ($env: expr) => {{
+ let env = $env;
+ if env.is_null() {
+ return napi_invalid_arg;
+ }
+ unsafe { &mut *env }
+ }};
+}
+
+#[macro_export]
+macro_rules! return_error_status_if_false {
+ ($env: expr, $condition: expr, $status: ident) => {
+ if !$condition {
+ return Err($crate::util::napi_set_last_error($env, $status).into());
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! return_status_if_false {
+ ($env: expr, $condition: expr, $status: ident) => {
+ if !$condition {
+ return $crate::util::napi_set_last_error($env, $status);
+ }
+ };
+}
+
+pub(crate) unsafe fn check_new_from_utf8_len<'s>(
+ env: *mut Env,
+ str_: *const c_char,
+ len: usize,
+) -> Result<v8::Local<'s, v8::String>, napi_status> {
+ let env = unsafe { &mut *env };
+ return_error_status_if_false!(
+ env,
+ (len == NAPI_AUTO_LENGTH) || len <= INT_MAX as _,
+ napi_invalid_arg
+ );
+ return_error_status_if_false!(env, !str_.is_null(), napi_invalid_arg);
+ let string = if len == NAPI_AUTO_LENGTH {
+ let result = unsafe { std::ffi::CStr::from_ptr(str_ as *const _) }.to_str();
+ return_error_status_if_false!(env, result.is_ok(), napi_generic_failure);
+ result.unwrap()
+ } else {
+ let string = unsafe { std::slice::from_raw_parts(str_ as *const u8, len) };
+ let result = std::str::from_utf8(string);
+ return_error_status_if_false!(env, result.is_ok(), napi_generic_failure);
+ result.unwrap()
+ };
+ let result = {
+ let env = unsafe { &mut *(env as *mut Env) };
+ v8::String::new(&mut env.scope(), string)
+ };
+ return_error_status_if_false!(env, result.is_some(), napi_generic_failure);
+ Ok(result.unwrap())
+}
+
+#[inline]
+pub(crate) unsafe fn check_new_from_utf8<'s>(
+ env: *mut Env,
+ str_: *const c_char,
+) -> Result<v8::Local<'s, v8::String>, napi_status> {
+ unsafe { check_new_from_utf8_len(env, str_, NAPI_AUTO_LENGTH) }
+}
+
+pub(crate) unsafe fn v8_name_from_property_descriptor<'s>(
+ env: *mut Env,
+ p: &'s napi_property_descriptor,
+) -> Result<v8::Local<'s, v8::Name>, napi_status> {
+ if !p.utf8name.is_null() {
+ unsafe { check_new_from_utf8(env, p.utf8name).map(|v| v.into()) }
+ } else {
+ match *p.name {
+ Some(v) => match v.try_into() {
+ Ok(name) => Ok(name),
+ Err(_) => Err(napi_name_expected),
+ },
+ None => Err(napi_name_expected),
+ }
+ }
+}
+
+pub(crate) fn napi_clear_last_error(env: *mut Env) -> napi_status {
+ let env = unsafe { &mut *env };
+ env.last_error.error_code = napi_ok;
+ env.last_error.engine_error_code = 0;
+ env.last_error.engine_reserved = std::ptr::null_mut();
+ env.last_error.error_message = std::ptr::null_mut();
+ napi_ok
+}
+
+pub(crate) fn napi_set_last_error(
+ env: *mut Env,
+ error_code: napi_status,
+) -> napi_status {
+ let env = unsafe { &mut *env };
+ env.last_error.error_code = error_code;
+ error_code
+}
+
+#[macro_export]
+macro_rules! status_call {
+ ($call: expr) => {
+ let status = $call;
+ if status != napi_ok {
+ return status;
+ }
+ };
+}
+
+pub trait Nullable {
+ fn is_null(&self) -> bool;
+}
+
+impl<T> Nullable for *mut T {
+ fn is_null(&self) -> bool {
+ (*self).is_null()
+ }
+}
+
+impl<T> Nullable for *const T {
+ fn is_null(&self) -> bool {
+ (*self).is_null()
+ }
+}
+
+impl<T> Nullable for Option<T> {
+ fn is_null(&self) -> bool {
+ self.is_none()
+ }
+}
+
+impl<'s> Nullable for napi_value<'s> {
+ fn is_null(&self) -> bool {
+ self.is_none()
+ }
+}
+
+#[macro_export]
+macro_rules! check_arg {
+ ($env: expr, $ptr: expr) => {
+ $crate::return_status_if_false!(
+ $env,
+ !$crate::util::Nullable::is_null(&$ptr),
+ napi_invalid_arg
+ );
+ };
+}
+
+#[macro_export]
+macro_rules! napi_wrap {
+ ( $( # [ $attr:meta ] )* $vis:vis fn $name:ident $( < $( $x:lifetime ),* > )? ( $env:ident : & $( $lt:lifetime )? mut Env $( , $ident:ident : $ty:ty )* $(,)? ) -> napi_status $body:block ) => {
+ $( # [ $attr ] )*
+ #[no_mangle]
+ $vis unsafe extern "C" fn $name $( < $( $x ),* > )? ( env_ptr : *mut Env , $( $ident : $ty ),* ) -> napi_status {
+ let env: & $( $lt )? mut Env = $crate::check_env!(env_ptr);
+
+ if env.last_exception.is_some() {
+ return napi_pending_exception;
+ }
+
+ $crate::util::napi_clear_last_error(env);
+
+ let scope_env = unsafe { &mut *env_ptr };
+ let scope = &mut scope_env.scope();
+ let try_catch = &mut v8::TryCatch::new(scope);
+
+ #[inline(always)]
+ fn inner $( < $( $x ),* > )? ( $env: & $( $lt )? mut Env , $( $ident : $ty ),* ) -> napi_status $body
+
+ log::trace!("NAPI ENTER: {}", stringify!($name));
+
+ let result = inner( env, $( $ident ),* );
+
+ log::trace!("NAPI EXIT: {} {}", stringify!($name), result);
+
+ if let Some(exception) = try_catch.exception() {
+ let env = unsafe { &mut *env_ptr };
+ let global = v8::Global::new(env.isolate(), exception);
+ env.last_exception = Some(global);
+ return $crate::util::napi_set_last_error(env_ptr, napi_pending_exception);
+ }
+
+ if result != napi_ok {
+ return $crate::util::napi_set_last_error(env_ptr, result);
+ }
+
+ return result;
+ }
+ };
+
+ ( $( # [ $attr:meta ] )* $vis:vis fn $name:ident $( < $( $x:lifetime ),* > )? ( $( $ident:ident : $ty:ty ),* $(,)? ) -> napi_status $body:block ) => {
+ $( # [ $attr ] )*
+ #[no_mangle]
+ $vis unsafe extern "C" fn $name $( < $( $x ),* > )? ( $( $ident : $ty ),* ) -> napi_status {
+ #[inline(always)]
+ fn inner $( < $( $x ),* > )? ( $( $ident : $ty ),* ) -> napi_status $body
+
+ log::trace!("NAPI ENTER: {}", stringify!($name));
+
+ let result = inner( $( $ident ),* );
+
+ log::trace!("NAPI EXIT: {} {}", stringify!($name), result);
+
+ result
+ }
+ };
+}
diff --git a/ext/napi/uv.rs b/ext/napi/uv.rs
new file mode 100644
index 000000000..ea6b53966
--- /dev/null
+++ b/ext/napi/uv.rs
@@ -0,0 +1,230 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use crate::*;
+use deno_core::parking_lot::Mutex;
+use std::mem::MaybeUninit;
+use std::ptr::addr_of_mut;
+
+fn assert_ok(res: c_int) -> c_int {
+ if res != 0 {
+ log::error!("bad result in uv polyfill: {res}");
+ // don't panic because that might unwind into
+ // c/c++
+ std::process::abort();
+ }
+ res
+}
+
+use js_native_api::napi_create_string_utf8;
+use node_api::napi_create_async_work;
+use node_api::napi_delete_async_work;
+use node_api::napi_queue_async_work;
+use std::ffi::c_int;
+
+const UV_MUTEX_SIZE: usize = {
+ #[cfg(unix)]
+ {
+ std::mem::size_of::<libc::pthread_mutex_t>()
+ }
+ #[cfg(windows)]
+ {
+ std::mem::size_of::<windows_sys::Win32::System::Threading::CRITICAL_SECTION>(
+ )
+ }
+};
+
+#[repr(C)]
+struct uv_mutex_t {
+ mutex: Mutex<()>,
+ _padding: [MaybeUninit<usize>; const {
+ (UV_MUTEX_SIZE - size_of::<Mutex<()>>()) / size_of::<usize>()
+ }],
+}
+
+#[no_mangle]
+unsafe extern "C" fn uv_mutex_init(lock: *mut uv_mutex_t) -> c_int {
+ unsafe {
+ addr_of_mut!((*lock).mutex).write(Mutex::new(()));
+ 0
+ }
+}
+
+#[no_mangle]
+unsafe extern "C" fn uv_mutex_lock(lock: *mut uv_mutex_t) {
+ unsafe {
+ let guard = (*lock).mutex.lock();
+ // forget the guard so it doesn't unlock when it goes out of scope.
+ // we're going to unlock it manually
+ std::mem::forget(guard);
+ }
+}
+
+#[no_mangle]
+unsafe extern "C" fn uv_mutex_unlock(lock: *mut uv_mutex_t) {
+ unsafe {
+ (*lock).mutex.force_unlock();
+ }
+}
+
+#[no_mangle]
+unsafe extern "C" fn uv_mutex_destroy(_lock: *mut uv_mutex_t) {
+ // no cleanup required
+}
+
+#[repr(C)]
+#[derive(Clone, Copy, Debug)]
+#[allow(dead_code)]
+enum uv_handle_type {
+ UV_UNKNOWN_HANDLE = 0,
+ UV_ASYNC,
+ UV_CHECK,
+ UV_FS_EVENT,
+ UV_FS_POLL,
+ UV_HANDLE,
+ UV_IDLE,
+ UV_NAMED_PIPE,
+ UV_POLL,
+ UV_PREPARE,
+ UV_PROCESS,
+ UV_STREAM,
+ UV_TCP,
+ UV_TIMER,
+ UV_TTY,
+ UV_UDP,
+ UV_SIGNAL,
+ UV_FILE,
+ UV_HANDLE_TYPE_MAX,
+}
+
+const UV_HANDLE_SIZE: usize = 96;
+
+#[repr(C)]
+struct uv_handle_t {
+ // public members
+ pub data: *mut c_void,
+ pub r#loop: *mut uv_loop_t,
+ pub r#type: uv_handle_type,
+
+ _padding: [MaybeUninit<usize>; const {
+ (UV_HANDLE_SIZE
+ - size_of::<*mut c_void>()
+ - size_of::<*mut uv_loop_t>()
+ - size_of::<uv_handle_type>())
+ / size_of::<usize>()
+ }],
+}
+
+#[cfg(unix)]
+const UV_ASYNC_SIZE: usize = 128;
+
+#[cfg(windows)]
+const UV_ASYNC_SIZE: usize = 224;
+
+#[repr(C)]
+struct uv_async_t {
+ // public members
+ pub data: *mut c_void,
+ pub r#loop: *mut uv_loop_t,
+ pub r#type: uv_handle_type,
+ // private
+ async_cb: uv_async_cb,
+ work: napi_async_work,
+ _padding: [MaybeUninit<usize>; const {
+ (UV_ASYNC_SIZE
+ - size_of::<*mut c_void>()
+ - size_of::<*mut uv_loop_t>()
+ - size_of::<uv_handle_type>()
+ - size_of::<uv_async_cb>()
+ - size_of::<napi_async_work>())
+ / size_of::<usize>()
+ }],
+}
+
+type uv_loop_t = Env;
+type uv_async_cb = extern "C" fn(handle: *mut uv_async_t);
+#[no_mangle]
+unsafe extern "C" fn uv_async_init(
+ r#loop: *mut uv_loop_t,
+ // probably uninitialized
+ r#async: *mut uv_async_t,
+ async_cb: uv_async_cb,
+) -> c_int {
+ unsafe {
+ addr_of_mut!((*r#async).r#loop).write(r#loop);
+ addr_of_mut!((*r#async).r#type).write(uv_handle_type::UV_ASYNC);
+ addr_of_mut!((*r#async).async_cb).write(async_cb);
+
+ let mut resource_name: MaybeUninit<napi_value> = MaybeUninit::uninit();
+ assert_ok(napi_create_string_utf8(
+ r#loop,
+ c"uv_async".as_ptr(),
+ usize::MAX,
+ resource_name.as_mut_ptr(),
+ ));
+ let resource_name = resource_name.assume_init();
+
+ let res = napi_create_async_work(
+ r#loop,
+ None::<v8::Local<'static, v8::Value>>.into(),
+ resource_name,
+ Some(async_exec_wrap),
+ None,
+ r#async.cast(),
+ addr_of_mut!((*r#async).work),
+ );
+ -res
+ }
+}
+
+#[no_mangle]
+unsafe extern "C" fn uv_async_send(handle: *mut uv_async_t) -> c_int {
+ unsafe { -napi_queue_async_work((*handle).r#loop, (*handle).work) }
+}
+
+type uv_close_cb = unsafe extern "C" fn(*mut uv_handle_t);
+
+#[no_mangle]
+unsafe extern "C" fn uv_close(handle: *mut uv_handle_t, close: uv_close_cb) {
+ unsafe {
+ if handle.is_null() {
+ close(handle);
+ return;
+ }
+ if let uv_handle_type::UV_ASYNC = (*handle).r#type {
+ let handle: *mut uv_async_t = handle.cast();
+ napi_delete_async_work((*handle).r#loop, (*handle).work);
+ }
+ close(handle);
+ }
+}
+
+unsafe extern "C" fn async_exec_wrap(_env: napi_env, data: *mut c_void) {
+ let data: *mut uv_async_t = data.cast();
+ unsafe {
+ ((*data).async_cb)(data);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn sizes() {
+ assert_eq!(
+ std::mem::size_of::<libuv_sys_lite::uv_mutex_t>(),
+ UV_MUTEX_SIZE
+ );
+ assert_eq!(
+ std::mem::size_of::<libuv_sys_lite::uv_handle_t>(),
+ UV_HANDLE_SIZE
+ );
+ assert_eq!(
+ std::mem::size_of::<libuv_sys_lite::uv_async_t>(),
+ UV_ASYNC_SIZE
+ );
+ assert_eq!(std::mem::size_of::<uv_mutex_t>(), UV_MUTEX_SIZE);
+ assert_eq!(std::mem::size_of::<uv_handle_t>(), UV_HANDLE_SIZE);
+ assert_eq!(std::mem::size_of::<uv_async_t>(), UV_ASYNC_SIZE);
+ }
+}
diff --git a/ext/net/01_net.js b/ext/net/01_net.js
index 5b894947e..c3e5f9e5c 100644
--- a/ext/net/01_net.js
+++ b/ext/net/01_net.js
@@ -194,6 +194,20 @@ class Conn {
}
}
+class UpgradedConn extends Conn {
+ #rid = 0;
+
+ constructor(rid, remoteAddr, localAddr) {
+ super(rid, remoteAddr, localAddr);
+ ObjectDefineProperty(this, internalRidSymbol, {
+ __proto__: null,
+ enumerable: false,
+ value: rid,
+ });
+ this.#rid = rid;
+ }
+}
+
class TcpConn extends Conn {
#rid = 0;
@@ -601,5 +615,6 @@ export {
resolveDns,
TcpConn,
UnixConn,
+ UpgradedConn,
validatePort,
};
diff --git a/ext/net/Cargo.toml b/ext/net/Cargo.toml
index 9f72456b9..1febbd533 100644
--- a/ext/net/Cargo.toml
+++ b/ext/net/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_net"
-version = "0.163.0"
+version = "0.169.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -17,10 +17,11 @@ path = "lib.rs"
deno_core.workspace = true
deno_permissions.workspace = true
deno_tls.workspace = true
+hickory-proto = "0.24"
+hickory-resolver.workspace = true
pin-project.workspace = true
rustls-tokio-stream.workspace = true
serde.workspace = true
socket2.workspace = true
+thiserror.workspace = true
tokio.workspace = true
-trust-dns-proto = "0.23"
-trust-dns-resolver = { version = "0.23", features = ["tokio-runtime", "serde-config"] }
diff --git a/ext/net/io.rs b/ext/net/io.rs
index f3aed3fcb..2907fa398 100644
--- a/ext/net/io.rs
+++ b/ext/net/io.rs
@@ -1,7 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::generic_error;
-use deno_core::error::AnyError;
+use deno_core::futures::TryFutureExt;
use deno_core::AsyncMutFuture;
use deno_core::AsyncRefCell;
use deno_core::AsyncResult;
@@ -69,25 +68,36 @@ where
pub async fn read(
self: Rc<Self>,
data: &mut [u8],
- ) -> Result<usize, AnyError> {
+ ) -> Result<usize, std::io::Error> {
let mut rd = self.rd_borrow_mut().await;
let nread = rd.read(data).try_or_cancel(self.cancel_handle()).await?;
Ok(nread)
}
- pub async fn write(self: Rc<Self>, data: &[u8]) -> Result<usize, AnyError> {
+ pub async fn write(
+ self: Rc<Self>,
+ data: &[u8],
+ ) -> Result<usize, std::io::Error> {
let mut wr = self.wr_borrow_mut().await;
let nwritten = wr.write(data).await?;
Ok(nwritten)
}
- pub async fn shutdown(self: Rc<Self>) -> Result<(), AnyError> {
+ pub async fn shutdown(self: Rc<Self>) -> Result<(), std::io::Error> {
let mut wr = self.wr_borrow_mut().await;
wr.shutdown().await?;
Ok(())
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum MapError {
+ #[error("{0}")]
+ Io(std::io::Error),
+ #[error("Unable to get resources")]
+ NoResources,
+}
+
pub type TcpStreamResource =
FullDuplexResource<tcp::OwnedReadHalf, tcp::OwnedWriteHalf>;
@@ -100,7 +110,7 @@ impl Resource for TcpStreamResource {
}
fn shutdown(self: Rc<Self>) -> AsyncResult<()> {
- Box::pin(self.shutdown())
+ Box::pin(self.shutdown().map_err(Into::into))
}
fn close(self: Rc<Self>) {
@@ -109,31 +119,30 @@ impl Resource for TcpStreamResource {
}
impl TcpStreamResource {
- pub fn set_nodelay(self: Rc<Self>, nodelay: bool) -> Result<(), AnyError> {
- self.map_socket(Box::new(move |socket| Ok(socket.set_nodelay(nodelay)?)))
+ pub fn set_nodelay(self: Rc<Self>, nodelay: bool) -> Result<(), MapError> {
+ self.map_socket(Box::new(move |socket| socket.set_nodelay(nodelay)))
}
pub fn set_keepalive(
self: Rc<Self>,
keepalive: bool,
- ) -> Result<(), AnyError> {
- self
- .map_socket(Box::new(move |socket| Ok(socket.set_keepalive(keepalive)?)))
+ ) -> Result<(), MapError> {
+ self.map_socket(Box::new(move |socket| socket.set_keepalive(keepalive)))
}
#[allow(clippy::type_complexity)]
fn map_socket(
self: Rc<Self>,
- map: Box<dyn FnOnce(SockRef) -> Result<(), AnyError>>,
- ) -> Result<(), AnyError> {
+ map: Box<dyn FnOnce(SockRef) -> Result<(), std::io::Error>>,
+ ) -> Result<(), MapError> {
if let Some(wr) = RcRef::map(self, |r| &r.wr).try_borrow() {
let stream = wr.as_ref().as_ref();
let socket = socket2::SockRef::from(stream);
- return map(socket);
+ return map(socket).map_err(MapError::Io);
}
- Err(generic_error("Unable to get resources"))
+ Err(MapError::NoResources)
}
}
@@ -153,7 +162,9 @@ impl UnixStreamResource {
unreachable!()
}
#[allow(clippy::unused_async)]
- pub async fn shutdown(self: Rc<Self>) -> Result<(), AnyError> {
+ pub async fn shutdown(
+ self: Rc<Self>,
+ ) -> Result<(), deno_core::error::AnyError> {
unreachable!()
}
pub fn cancel_read_ops(&self) {
@@ -170,7 +181,7 @@ impl Resource for UnixStreamResource {
}
fn shutdown(self: Rc<Self>) -> AsyncResult<()> {
- Box::pin(self.shutdown())
+ Box::pin(self.shutdown().map_err(Into::into))
}
fn close(self: Rc<Self>) {
diff --git a/ext/net/lib.rs b/ext/net/lib.rs
index b039965d4..f482750b3 100644
--- a/ext/net/lib.rs
+++ b/ext/net/lib.rs
@@ -7,10 +7,11 @@ pub mod ops_tls;
pub mod ops_unix;
pub mod raw;
pub mod resolve_addr;
-mod tcp;
+pub mod tcp;
use deno_core::error::AnyError;
use deno_core::OpState;
+use deno_permissions::PermissionCheckError;
use deno_tls::rustls::RootCertStore;
use deno_tls::RootCertStoreProvider;
use std::borrow::Cow;
@@ -25,25 +26,25 @@ pub trait NetPermissions {
&mut self,
host: &(T, Option<u16>),
api_name: &str,
- ) -> Result<(), AnyError>;
+ ) -> Result<(), PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_read(
&mut self,
p: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError>;
+ ) -> Result<PathBuf, PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_write(
&mut self,
p: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError>;
+ ) -> Result<PathBuf, PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_write_path<'a>(
&mut self,
p: &'a Path,
api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError>;
+ ) -> Result<Cow<'a, Path>, PermissionCheckError>;
}
impl NetPermissions for deno_permissions::PermissionsContainer {
@@ -52,7 +53,7 @@ impl NetPermissions for deno_permissions::PermissionsContainer {
&mut self,
host: &(T, Option<u16>),
api_name: &str,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_net(self, host, api_name)
}
@@ -61,7 +62,7 @@ impl NetPermissions for deno_permissions::PermissionsContainer {
&mut self,
path: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_read(self, path, api_name)
}
@@ -70,7 +71,7 @@ impl NetPermissions for deno_permissions::PermissionsContainer {
&mut self,
path: &str,
api_name: &str,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_write(self, path, api_name)
}
@@ -79,7 +80,7 @@ impl NetPermissions for deno_permissions::PermissionsContainer {
&mut self,
path: &'a Path,
api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError> {
+ ) -> Result<Cow<'a, Path>, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_write_path(
self, path, api_name,
)
diff --git a/ext/net/ops.rs b/ext/net/ops.rs
index 5248493f4..9a8b70f0f 100644
--- a/ext/net/ops.rs
+++ b/ext/net/ops.rs
@@ -6,10 +6,6 @@ use crate::resolve_addr::resolve_addr;
use crate::resolve_addr::resolve_addr_sync;
use crate::tcp::TcpListener;
use crate::NetPermissions;
-use deno_core::error::bad_resource;
-use deno_core::error::custom_error;
-use deno_core::error::generic_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::CancelFuture;
@@ -22,6 +18,16 @@ use deno_core::OpState;
use deno_core::RcRef;
use deno_core::Resource;
use deno_core::ResourceId;
+use hickory_proto::rr::rdata::caa::Value;
+use hickory_proto::rr::record_data::RData;
+use hickory_proto::rr::record_type::RecordType;
+use hickory_resolver::config::NameServerConfigGroup;
+use hickory_resolver::config::ResolverConfig;
+use hickory_resolver::config::ResolverOpts;
+use hickory_resolver::error::ResolveError;
+use hickory_resolver::error::ResolveErrorKind;
+use hickory_resolver::system_conf;
+use hickory_resolver::AsyncResolver;
use serde::Deserialize;
use serde::Serialize;
use socket2::Domain;
@@ -37,15 +43,6 @@ use std::rc::Rc;
use std::str::FromStr;
use tokio::net::TcpStream;
use tokio::net::UdpSocket;
-use trust_dns_proto::rr::rdata::caa::Value;
-use trust_dns_proto::rr::record_data::RData;
-use trust_dns_proto::rr::record_type::RecordType;
-use trust_dns_resolver::config::NameServerConfigGroup;
-use trust_dns_resolver::config::ResolverConfig;
-use trust_dns_resolver::config::ResolverOpts;
-use trust_dns_resolver::error::ResolveErrorKind;
-use trust_dns_resolver::system_conf;
-use trust_dns_resolver::AsyncResolver;
#[derive(Serialize, Clone, Debug)]
#[serde(rename_all = "camelCase")]
@@ -68,11 +65,69 @@ impl From<SocketAddr> for IpAddr {
}
}
-pub(crate) fn accept_err(e: std::io::Error) -> AnyError {
+#[derive(Debug, thiserror::Error)]
+pub enum NetError {
+ #[error("Listener has been closed")]
+ ListenerClosed,
+ #[error("Listener already in use")]
+ ListenerBusy,
+ #[error("Socket has been closed")]
+ SocketClosed,
+ #[error("Socket has been closed")]
+ SocketClosedNotConnected,
+ #[error("Socket already in use")]
+ SocketBusy,
+ #[error("{0}")]
+ Io(#[from] std::io::Error),
+ #[error("Another accept task is ongoing")]
+ AcceptTaskOngoing,
+ #[error(transparent)]
+ Permission(#[from] deno_permissions::PermissionCheckError),
+ #[error("{0}")]
+ Resource(deno_core::error::AnyError),
+ #[error("No resolved address found")]
+ NoResolvedAddress,
+ #[error("{0}")]
+ AddrParse(#[from] std::net::AddrParseError),
+ #[error("{0}")]
+ Map(crate::io::MapError),
+ #[error("{0}")]
+ Canceled(#[from] deno_core::Canceled),
+ #[error("{0}")]
+ DnsNotFound(ResolveError),
+ #[error("{0}")]
+ DnsNotConnected(ResolveError),
+ #[error("{0}")]
+ DnsTimedOut(ResolveError),
+ #[error("{0}")]
+ Dns(#[from] ResolveError),
+ #[error("Provided record type is not supported")]
+ UnsupportedRecordType,
+ #[error("File name or path {0:?} is not valid UTF-8")]
+ InvalidUtf8(std::ffi::OsString),
+ #[error("unexpected key type")]
+ UnexpectedKeyType,
+ #[error("Invalid hostname: '{0}'")]
+ InvalidHostname(String), // TypeError
+ #[error("TCP stream is currently in use")]
+ TcpStreamBusy,
+ #[error("{0}")]
+ Rustls(#[from] deno_tls::rustls::Error),
+ #[error("{0}")]
+ Tls(#[from] deno_tls::TlsError),
+ #[error("Error creating TLS certificate: Deno.listenTls requires a key")]
+ ListenTlsRequiresKey, // InvalidData
+ #[error("{0}")]
+ RootCertStore(deno_core::anyhow::Error),
+ #[error("{0}")]
+ Reunite(tokio::net::tcp::ReuniteError),
+}
+
+pub(crate) fn accept_err(e: std::io::Error) -> NetError {
if let std::io::ErrorKind::Interrupted = e.kind() {
- bad_resource("Listener has been closed")
+ NetError::ListenerClosed
} else {
- e.into()
+ NetError::Io(e)
}
}
@@ -81,15 +136,15 @@ pub(crate) fn accept_err(e: std::io::Error) -> AnyError {
pub async fn op_net_accept_tcp(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> {
+) -> Result<(ResourceId, IpAddr, IpAddr), NetError> {
let resource = state
.borrow()
.resource_table
.get::<NetworkListenerResource<TcpListener>>(rid)
- .map_err(|_| bad_resource("Listener has been closed"))?;
+ .map_err(|_| NetError::ListenerClosed)?;
let listener = RcRef::map(&resource, |r| &r.listener)
.try_borrow_mut()
- .ok_or_else(|| custom_error("Busy", "Another accept task is ongoing"))?;
+ .ok_or_else(|| NetError::AcceptTaskOngoing)?;
let cancel = RcRef::map(resource, |r| &r.cancel);
let (tcp_stream, _socket_addr) = listener
.accept()
@@ -112,12 +167,12 @@ pub async fn op_net_recv_udp(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[buffer] mut buf: JsBuffer,
-) -> Result<(usize, IpAddr), AnyError> {
+) -> Result<(usize, IpAddr), NetError> {
let resource = state
.borrow_mut()
.resource_table
.get::<UdpSocketResource>(rid)
- .map_err(|_| bad_resource("Socket has been closed"))?;
+ .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let cancel_handle = RcRef::map(&resource, |r| &r.cancel);
let (nread, remote_addr) = socket
@@ -134,7 +189,7 @@ pub async fn op_net_send_udp<NP>(
#[smi] rid: ResourceId,
#[serde] addr: IpAddr,
#[buffer] zero_copy: JsBuffer,
-) -> Result<usize, AnyError>
+) -> Result<usize, NetError>
where
NP: NetPermissions + 'static,
{
@@ -148,13 +203,13 @@ where
let addr = resolve_addr(&addr.hostname, addr.port)
.await?
.next()
- .ok_or_else(|| generic_error("No resolved address found"))?;
+ .ok_or(NetError::NoResolvedAddress)?;
let resource = state
.borrow_mut()
.resource_table
.get::<UdpSocketResource>(rid)
- .map_err(|_| bad_resource("Socket has been closed"))?;
+ .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let nwritten = socket.send_to(&zero_copy, &addr).await?;
@@ -167,12 +222,12 @@ pub async fn op_net_join_multi_v4_udp(
#[smi] rid: ResourceId,
#[string] address: String,
#[string] multi_interface: String,
-) -> Result<(), AnyError> {
+) -> Result<(), NetError> {
let resource = state
.borrow_mut()
.resource_table
.get::<UdpSocketResource>(rid)
- .map_err(|_| bad_resource("Socket has been closed"))?;
+ .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let addr = Ipv4Addr::from_str(address.as_str())?;
@@ -189,12 +244,12 @@ pub async fn op_net_join_multi_v6_udp(
#[smi] rid: ResourceId,
#[string] address: String,
#[smi] multi_interface: u32,
-) -> Result<(), AnyError> {
+) -> Result<(), NetError> {
let resource = state
.borrow_mut()
.resource_table
.get::<UdpSocketResource>(rid)
- .map_err(|_| bad_resource("Socket has been closed"))?;
+ .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let addr = Ipv6Addr::from_str(address.as_str())?;
@@ -210,12 +265,12 @@ pub async fn op_net_leave_multi_v4_udp(
#[smi] rid: ResourceId,
#[string] address: String,
#[string] multi_interface: String,
-) -> Result<(), AnyError> {
+) -> Result<(), NetError> {
let resource = state
.borrow_mut()
.resource_table
.get::<UdpSocketResource>(rid)
- .map_err(|_| bad_resource("Socket has been closed"))?;
+ .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let addr = Ipv4Addr::from_str(address.as_str())?;
@@ -232,12 +287,12 @@ pub async fn op_net_leave_multi_v6_udp(
#[smi] rid: ResourceId,
#[string] address: String,
#[smi] multi_interface: u32,
-) -> Result<(), AnyError> {
+) -> Result<(), NetError> {
let resource = state
.borrow_mut()
.resource_table
.get::<UdpSocketResource>(rid)
- .map_err(|_| bad_resource("Socket has been closed"))?;
+ .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
let addr = Ipv6Addr::from_str(address.as_str())?;
@@ -253,16 +308,16 @@ pub async fn op_net_set_multi_loopback_udp(
#[smi] rid: ResourceId,
is_v4_membership: bool,
loopback: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), NetError> {
let resource = state
.borrow_mut()
.resource_table
.get::<UdpSocketResource>(rid)
- .map_err(|_| bad_resource("Socket has been closed"))?;
+ .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
if is_v4_membership {
- socket.set_multicast_loop_v4(loopback)?
+ socket.set_multicast_loop_v4(loopback)?;
} else {
socket.set_multicast_loop_v6(loopback)?;
}
@@ -275,12 +330,12 @@ pub async fn op_net_set_multi_ttl_udp(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[smi] ttl: u32,
-) -> Result<(), AnyError> {
+) -> Result<(), NetError> {
let resource = state
.borrow_mut()
.resource_table
.get::<UdpSocketResource>(rid)
- .map_err(|_| bad_resource("Socket has been closed"))?;
+ .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket).borrow().await;
socket.set_multicast_ttl_v4(ttl)?;
@@ -293,7 +348,7 @@ pub async fn op_net_set_multi_ttl_udp(
pub async fn op_net_connect_tcp<NP>(
state: Rc<RefCell<OpState>>,
#[serde] addr: IpAddr,
-) -> Result<(ResourceId, IpAddr, IpAddr), AnyError>
+) -> Result<(ResourceId, IpAddr, IpAddr), NetError>
where
NP: NetPermissions + 'static,
{
@@ -304,7 +359,7 @@ where
pub async fn op_net_connect_tcp_inner<NP>(
state: Rc<RefCell<OpState>>,
addr: IpAddr,
-) -> Result<(ResourceId, IpAddr, IpAddr), AnyError>
+) -> Result<(ResourceId, IpAddr, IpAddr), NetError>
where
NP: NetPermissions + 'static,
{
@@ -318,7 +373,7 @@ where
let addr = resolve_addr(&addr.hostname, addr.port)
.await?
.next()
- .ok_or_else(|| generic_error("No resolved address found"))?;
+ .ok_or_else(|| NetError::NoResolvedAddress)?;
let tcp_stream = TcpStream::connect(&addr).await?;
let local_addr = tcp_stream.local_addr()?;
let remote_addr = tcp_stream.peer_addr()?;
@@ -353,7 +408,7 @@ pub fn op_net_listen_tcp<NP>(
#[serde] addr: IpAddr,
reuse_port: bool,
load_balanced: bool,
-) -> Result<(ResourceId, IpAddr), AnyError>
+) -> Result<(ResourceId, IpAddr), NetError>
where
NP: NetPermissions + 'static,
{
@@ -365,7 +420,7 @@ where
.check_net(&(&addr.hostname, Some(addr.port)), "Deno.listen()")?;
let addr = resolve_addr_sync(&addr.hostname, addr.port)?
.next()
- .ok_or_else(|| generic_error("No resolved address found"))?;
+ .ok_or_else(|| NetError::NoResolvedAddress)?;
let listener = if load_balanced {
TcpListener::bind_load_balanced(addr)
@@ -384,7 +439,7 @@ fn net_listen_udp<NP>(
addr: IpAddr,
reuse_address: bool,
loopback: bool,
-) -> Result<(ResourceId, IpAddr), AnyError>
+) -> Result<(ResourceId, IpAddr), NetError>
where
NP: NetPermissions + 'static,
{
@@ -393,7 +448,7 @@ where
.check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenDatagram()")?;
let addr = resolve_addr_sync(&addr.hostname, addr.port)?
.next()
- .ok_or_else(|| generic_error("No resolved address found"))?;
+ .ok_or_else(|| NetError::NoResolvedAddress)?;
let domain = if addr.is_ipv4() {
Domain::IPV4
@@ -453,7 +508,7 @@ pub fn op_net_listen_udp<NP>(
#[serde] addr: IpAddr,
reuse_address: bool,
loopback: bool,
-) -> Result<(ResourceId, IpAddr), AnyError>
+) -> Result<(ResourceId, IpAddr), NetError>
where
NP: NetPermissions + 'static,
{
@@ -468,7 +523,7 @@ pub fn op_node_unstable_net_listen_udp<NP>(
#[serde] addr: IpAddr,
reuse_address: bool,
loopback: bool,
-) -> Result<(ResourceId, IpAddr), AnyError>
+) -> Result<(ResourceId, IpAddr), NetError>
where
NP: NetPermissions + 'static,
{
@@ -551,7 +606,7 @@ pub struct NameServer {
pub async fn op_dns_resolve<NP>(
state: Rc<RefCell<OpState>>,
#[serde] args: ResolveAddrArgs,
-) -> Result<Vec<DnsReturnRecord>, AnyError>
+) -> Result<Vec<DnsReturnRecord>, NetError>
where
NP: NetPermissions + 'static,
{
@@ -618,22 +673,17 @@ where
};
lookup
- .map_err(|e| {
- let message = format!("{e}");
- match e.kind() {
- ResolveErrorKind::NoRecordsFound { .. } => {
- custom_error("NotFound", message)
- }
- ResolveErrorKind::Message("No connections available") => {
- custom_error("NotConnected", message)
- }
- ResolveErrorKind::Timeout => custom_error("TimedOut", message),
- _ => generic_error(message),
+ .map_err(|e| match e.kind() {
+ ResolveErrorKind::NoRecordsFound { .. } => NetError::DnsNotFound(e),
+ ResolveErrorKind::Message("No connections available") => {
+ NetError::DnsNotConnected(e)
}
+ ResolveErrorKind::Timeout => NetError::DnsTimedOut(e),
+ _ => NetError::Dns(e),
})?
.iter()
.filter_map(|rdata| rdata_to_return_record(record_type)(rdata).transpose())
- .collect::<Result<Vec<DnsReturnRecord>, AnyError>>()
+ .collect::<Result<Vec<DnsReturnRecord>, NetError>>()
}
#[op2(fast)]
@@ -641,7 +691,7 @@ pub fn op_set_nodelay(
state: &mut OpState,
#[smi] rid: ResourceId,
nodelay: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), NetError> {
op_set_nodelay_inner(state, rid, nodelay)
}
@@ -650,10 +700,12 @@ pub fn op_set_nodelay_inner(
state: &mut OpState,
rid: ResourceId,
nodelay: bool,
-) -> Result<(), AnyError> {
- let resource: Rc<TcpStreamResource> =
- state.resource_table.get::<TcpStreamResource>(rid)?;
- resource.set_nodelay(nodelay)
+) -> Result<(), NetError> {
+ let resource: Rc<TcpStreamResource> = state
+ .resource_table
+ .get::<TcpStreamResource>(rid)
+ .map_err(NetError::Resource)?;
+ resource.set_nodelay(nodelay).map_err(NetError::Map)
}
#[op2(fast)]
@@ -661,7 +713,7 @@ pub fn op_set_keepalive(
state: &mut OpState,
#[smi] rid: ResourceId,
keepalive: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), NetError> {
op_set_keepalive_inner(state, rid, keepalive)
}
@@ -670,17 +722,19 @@ pub fn op_set_keepalive_inner(
state: &mut OpState,
rid: ResourceId,
keepalive: bool,
-) -> Result<(), AnyError> {
- let resource: Rc<TcpStreamResource> =
- state.resource_table.get::<TcpStreamResource>(rid)?;
- resource.set_keepalive(keepalive)
+) -> Result<(), NetError> {
+ let resource: Rc<TcpStreamResource> = state
+ .resource_table
+ .get::<TcpStreamResource>(rid)
+ .map_err(NetError::Resource)?;
+ resource.set_keepalive(keepalive).map_err(NetError::Map)
}
fn rdata_to_return_record(
ty: RecordType,
-) -> impl Fn(&RData) -> Result<Option<DnsReturnRecord>, AnyError> {
+) -> impl Fn(&RData) -> Result<Option<DnsReturnRecord>, NetError> {
use RecordType::*;
- move |r: &RData| -> Result<Option<DnsReturnRecord>, AnyError> {
+ move |r: &RData| -> Result<Option<DnsReturnRecord>, NetError> {
let record = match ty {
A => r.as_a().map(ToString::to_string).map(DnsReturnRecord::A),
AAAA => r
@@ -761,12 +815,7 @@ fn rdata_to_return_record(
.collect();
DnsReturnRecord::Txt(texts)
}),
- _ => {
- return Err(custom_error(
- "NotSupported",
- "Provided record type is not supported",
- ))
- }
+ _ => return Err(NetError::UnsupportedRecordType),
};
Ok(record)
}
@@ -778,6 +827,22 @@ mod tests {
use deno_core::futures::FutureExt;
use deno_core::JsRuntime;
use deno_core::RuntimeOptions;
+ use deno_permissions::PermissionCheckError;
+ use hickory_proto::rr::rdata::a::A;
+ use hickory_proto::rr::rdata::aaaa::AAAA;
+ use hickory_proto::rr::rdata::caa::KeyValue;
+ use hickory_proto::rr::rdata::caa::CAA;
+ use hickory_proto::rr::rdata::mx::MX;
+ use hickory_proto::rr::rdata::name::ANAME;
+ use hickory_proto::rr::rdata::name::CNAME;
+ use hickory_proto::rr::rdata::name::NS;
+ use hickory_proto::rr::rdata::name::PTR;
+ use hickory_proto::rr::rdata::naptr::NAPTR;
+ use hickory_proto::rr::rdata::srv::SRV;
+ use hickory_proto::rr::rdata::txt::TXT;
+ use hickory_proto::rr::rdata::SOA;
+ use hickory_proto::rr::record_data::RData;
+ use hickory_proto::rr::Name;
use socket2::SockRef;
use std::net::Ipv4Addr;
use std::net::Ipv6Addr;
@@ -786,21 +851,6 @@ mod tests {
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
- use trust_dns_proto::rr::rdata::a::A;
- use trust_dns_proto::rr::rdata::aaaa::AAAA;
- use trust_dns_proto::rr::rdata::caa::KeyValue;
- use trust_dns_proto::rr::rdata::caa::CAA;
- use trust_dns_proto::rr::rdata::mx::MX;
- use trust_dns_proto::rr::rdata::name::ANAME;
- use trust_dns_proto::rr::rdata::name::CNAME;
- use trust_dns_proto::rr::rdata::name::NS;
- use trust_dns_proto::rr::rdata::name::PTR;
- use trust_dns_proto::rr::rdata::naptr::NAPTR;
- use trust_dns_proto::rr::rdata::srv::SRV;
- use trust_dns_proto::rr::rdata::txt::TXT;
- use trust_dns_proto::rr::rdata::SOA;
- use trust_dns_proto::rr::record_data::RData;
- use trust_dns_proto::rr::Name;
#[test]
fn rdata_to_return_record_a() {
@@ -985,7 +1035,7 @@ mod tests {
&mut self,
_host: &(T, Option<u16>),
_api_name: &str,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), PermissionCheckError> {
Ok(())
}
@@ -993,7 +1043,7 @@ mod tests {
&mut self,
p: &str,
_api_name: &str,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
Ok(PathBuf::from(p))
}
@@ -1001,7 +1051,7 @@ mod tests {
&mut self,
p: &str,
_api_name: &str,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
Ok(PathBuf::from(p))
}
@@ -1009,7 +1059,7 @@ mod tests {
&mut self,
p: &'a Path,
_api_name: &str,
- ) -> Result<Cow<'a, Path>, AnyError> {
+ ) -> Result<Cow<'a, Path>, PermissionCheckError> {
Ok(Cow::Borrowed(p))
}
}
@@ -1091,7 +1141,7 @@ mod tests {
let vals = result.unwrap();
rid = rid.or(Some(vals.0));
}
- };
+ }
let rid = rid.unwrap();
let state = runtime.op_state();
diff --git a/ext/net/ops_tls.rs b/ext/net/ops_tls.rs
index a68d144b5..c7d65dd85 100644
--- a/ext/net/ops_tls.rs
+++ b/ext/net/ops_tls.rs
@@ -2,6 +2,7 @@
use crate::io::TcpStreamResource;
use crate::ops::IpAddr;
+use crate::ops::NetError;
use crate::ops::TlsHandshakeInfo;
use crate::raw::NetworkListenerResource;
use crate::resolve_addr::resolve_addr;
@@ -10,13 +11,7 @@ use crate::tcp::TcpListener;
use crate::DefaultTlsOptions;
use crate::NetPermissions;
use crate::UnsafelyIgnoreCertificateErrors;
-use deno_core::anyhow::anyhow;
-use deno_core::anyhow::bail;
-use deno_core::error::bad_resource;
-use deno_core::error::custom_error;
-use deno_core::error::generic_error;
-use deno_core::error::invalid_hostname;
-use deno_core::error::AnyError;
+use deno_core::futures::TryFutureExt;
use deno_core::op2;
use deno_core::v8;
use deno_core::AsyncRefCell;
@@ -118,20 +113,23 @@ impl TlsStreamResource {
pub async fn read(
self: Rc<Self>,
data: &mut [u8],
- ) -> Result<usize, AnyError> {
+ ) -> Result<usize, std::io::Error> {
let mut rd = RcRef::map(&self, |r| &r.rd).borrow_mut().await;
let cancel_handle = RcRef::map(&self, |r| &r.cancel_handle);
- Ok(rd.read(data).try_or_cancel(cancel_handle).await?)
+ rd.read(data).try_or_cancel(cancel_handle).await
}
- pub async fn write(self: Rc<Self>, data: &[u8]) -> Result<usize, AnyError> {
+ pub async fn write(
+ self: Rc<Self>,
+ data: &[u8],
+ ) -> Result<usize, std::io::Error> {
let mut wr = RcRef::map(self, |r| &r.wr).borrow_mut().await;
let nwritten = wr.write(data).await?;
wr.flush().await?;
Ok(nwritten)
}
- pub async fn shutdown(self: Rc<Self>) -> Result<(), AnyError> {
+ pub async fn shutdown(self: Rc<Self>) -> Result<(), std::io::Error> {
let mut wr = RcRef::map(self, |r| &r.wr).borrow_mut().await;
wr.shutdown().await?;
Ok(())
@@ -139,7 +137,7 @@ impl TlsStreamResource {
pub async fn handshake(
self: &Rc<Self>,
- ) -> Result<TlsHandshakeInfo, AnyError> {
+ ) -> Result<TlsHandshakeInfo, std::io::Error> {
if let Some(tls_info) = &*self.handshake_info.borrow() {
return Ok(tls_info.clone());
}
@@ -164,7 +162,7 @@ impl Resource for TlsStreamResource {
}
fn shutdown(self: Rc<Self>) -> AsyncResult<()> {
- Box::pin(self.shutdown())
+ Box::pin(self.shutdown().map_err(Into::into))
}
fn close(self: Rc<Self>) {
@@ -201,7 +199,7 @@ pub fn op_tls_key_null() -> TlsKeysHolder {
pub fn op_tls_key_static(
#[string] cert: &str,
#[string] key: &str,
-) -> Result<TlsKeysHolder, AnyError> {
+) -> Result<TlsKeysHolder, deno_tls::TlsError> {
let cert = load_certs(&mut BufReader::new(cert.as_bytes()))?;
let key = load_private_keys(key.as_bytes())?
.into_iter()
@@ -236,9 +234,9 @@ pub fn op_tls_cert_resolver_resolve(
#[cppgc] lookup: &TlsKeyLookup,
#[string] sni: String,
#[cppgc] key: &TlsKeysHolder,
-) -> Result<(), AnyError> {
+) -> Result<(), NetError> {
let TlsKeys::Static(key) = key.take() else {
- bail!("unexpected key type");
+ return Err(NetError::UnexpectedKeyType);
};
lookup.resolve(sni, Ok(key));
Ok(())
@@ -258,7 +256,7 @@ pub fn op_tls_cert_resolver_resolve_error(
pub fn op_tls_start<NP>(
state: Rc<RefCell<OpState>>,
#[serde] args: StartTlsArgs,
-) -> Result<(ResourceId, IpAddr, IpAddr), AnyError>
+) -> Result<(ResourceId, IpAddr, IpAddr), NetError>
where
NP: NetPermissions + 'static,
{
@@ -271,7 +269,9 @@ where
{
let mut s = state.borrow_mut();
let permissions = s.borrow_mut::<NP>();
- permissions.check_net(&(&hostname, Some(0)), "Deno.startTls()")?;
+ permissions
+ .check_net(&(&hostname, Some(0)), "Deno.startTls()")
+ .map_err(NetError::Permission)?;
}
let ca_certs = args
@@ -281,7 +281,7 @@ where
.collect::<Vec<_>>();
let hostname_dns = ServerName::try_from(hostname.to_string())
- .map_err(|_| invalid_hostname(&hostname))?;
+ .map_err(|_| NetError::InvalidHostname(hostname))?;
let unsafely_ignore_certificate_errors = state
.borrow()
@@ -291,19 +291,21 @@ where
let root_cert_store = state
.borrow()
.borrow::<DefaultTlsOptions>()
- .root_cert_store()?;
+ .root_cert_store()
+ .map_err(NetError::RootCertStore)?;
let resource_rc = state
.borrow_mut()
.resource_table
- .take::<TcpStreamResource>(rid)?;
+ .take::<TcpStreamResource>(rid)
+ .map_err(NetError::Resource)?;
// This TCP connection might be used somewhere else. If it's the case, we cannot proceed with the
// process of starting a TLS connection on top of this TCP connection, so we just return a Busy error.
// See also: https://github.com/denoland/deno/pull/16242
- let resource = Rc::try_unwrap(resource_rc)
- .map_err(|_| custom_error("Busy", "TCP stream is currently in use"))?;
+ let resource =
+ Rc::try_unwrap(resource_rc).map_err(|_| NetError::TcpStreamBusy)?;
let (read_half, write_half) = resource.into_inner();
- let tcp_stream = read_half.reunite(write_half)?;
+ let tcp_stream = read_half.reunite(write_half).map_err(NetError::Reunite)?;
let local_addr = tcp_stream.local_addr()?;
let remote_addr = tcp_stream.peer_addr()?;
@@ -345,7 +347,7 @@ pub async fn op_net_connect_tls<NP>(
#[serde] addr: IpAddr,
#[serde] args: ConnectTlsArgs,
#[cppgc] key_pair: &TlsKeysHolder,
-) -> Result<(ResourceId, IpAddr, IpAddr), AnyError>
+) -> Result<(ResourceId, IpAddr, IpAddr), NetError>
where
NP: NetPermissions + 'static,
{
@@ -359,9 +361,14 @@ where
let mut s = state.borrow_mut();
let permissions = s.borrow_mut::<NP>();
permissions
- .check_net(&(&addr.hostname, Some(addr.port)), "Deno.connectTls()")?;
+ .check_net(&(&addr.hostname, Some(addr.port)), "Deno.connectTls()")
+ .map_err(NetError::Permission)?;
if let Some(path) = cert_file {
- Some(permissions.check_read(path, "Deno.connectTls()")?)
+ Some(
+ permissions
+ .check_read(path, "Deno.connectTls()")
+ .map_err(NetError::Permission)?,
+ )
} else {
None
}
@@ -382,17 +389,18 @@ where
let root_cert_store = state
.borrow()
.borrow::<DefaultTlsOptions>()
- .root_cert_store()?;
+ .root_cert_store()
+ .map_err(NetError::RootCertStore)?;
let hostname_dns = if let Some(server_name) = args.server_name {
ServerName::try_from(server_name)
} else {
ServerName::try_from(addr.hostname.clone())
}
- .map_err(|_| invalid_hostname(&addr.hostname))?;
+ .map_err(|_| NetError::InvalidHostname(addr.hostname.clone()))?;
let connect_addr = resolve_addr(&addr.hostname, addr.port)
.await?
.next()
- .ok_or_else(|| generic_error("No resolved address found"))?;
+ .ok_or_else(|| NetError::NoResolvedAddress)?;
let tcp_stream = TcpStream::connect(connect_addr).await?;
let local_addr = tcp_stream.local_addr()?;
let remote_addr = tcp_stream.peer_addr()?;
@@ -444,7 +452,7 @@ pub fn op_net_listen_tls<NP>(
#[serde] addr: IpAddr,
#[serde] args: ListenTlsArgs,
#[cppgc] keys: &TlsKeysHolder,
-) -> Result<(ResourceId, IpAddr), AnyError>
+) -> Result<(ResourceId, IpAddr), NetError>
where
NP: NetPermissions + 'static,
{
@@ -455,12 +463,13 @@ where
{
let permissions = state.borrow_mut::<NP>();
permissions
- .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenTls()")?;
+ .check_net(&(&addr.hostname, Some(addr.port)), "Deno.listenTls()")
+ .map_err(NetError::Permission)?;
}
let bind_addr = resolve_addr_sync(&addr.hostname, addr.port)?
.next()
- .ok_or_else(|| generic_error("No resolved address found"))?;
+ .ok_or(NetError::NoResolvedAddress)?;
let tcp_listener = if args.load_balanced {
TcpListener::bind_load_balanced(bind_addr)
@@ -475,28 +484,24 @@ where
.map(|s| s.into_bytes())
.collect();
let listener = match keys.take() {
- TlsKeys::Null => Err(anyhow!("Deno.listenTls requires a key")),
+ TlsKeys::Null => return Err(NetError::ListenTlsRequiresKey),
TlsKeys::Static(TlsKey(cert, key)) => {
let mut tls_config = ServerConfig::builder()
.with_no_client_auth()
- .with_single_cert(cert, key)
- .map_err(|e| anyhow!(e))?;
+ .with_single_cert(cert, key)?;
tls_config.alpn_protocols = alpn;
- Ok(TlsListener {
+ TlsListener {
tcp_listener,
tls_config: Some(tls_config.into()),
server_config_provider: None,
- })
+ }
}
- TlsKeys::Resolver(resolver) => Ok(TlsListener {
+ TlsKeys::Resolver(resolver) => TlsListener {
tcp_listener,
tls_config: None,
server_config_provider: Some(resolver.into_server_config_provider(alpn)),
- }),
- }
- .map_err(|e| {
- custom_error("InvalidData", "Error creating TLS certificate").context(e)
- })?;
+ },
+ };
let tls_listener_resource = NetworkListenerResource::new(listener);
@@ -510,23 +515,23 @@ where
pub async fn op_net_accept_tls(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<(ResourceId, IpAddr, IpAddr), AnyError> {
+) -> Result<(ResourceId, IpAddr, IpAddr), NetError> {
let resource = state
.borrow()
.resource_table
.get::<NetworkListenerResource<TlsListener>>(rid)
- .map_err(|_| bad_resource("Listener has been closed"))?;
+ .map_err(|_| NetError::ListenerClosed)?;
let cancel_handle = RcRef::map(&resource, |r| &r.cancel);
let listener = RcRef::map(&resource, |r| &r.listener)
.try_borrow_mut()
- .ok_or_else(|| custom_error("Busy", "Another accept task is ongoing"))?;
+ .ok_or_else(|| NetError::AcceptTaskOngoing)?;
let (tls_stream, remote_addr) =
match listener.accept().try_or_cancel(&cancel_handle).await {
Ok(tuple) => tuple,
Err(err) if err.kind() == ErrorKind::Interrupted => {
- return Err(bad_resource("Listener has been closed"));
+ return Err(NetError::ListenerClosed);
}
Err(err) => return Err(err.into()),
};
@@ -547,11 +552,11 @@ pub async fn op_net_accept_tls(
pub async fn op_tls_handshake(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<TlsHandshakeInfo, AnyError> {
+) -> Result<TlsHandshakeInfo, NetError> {
let resource = state
.borrow()
.resource_table
.get::<TlsStreamResource>(rid)
- .map_err(|_| bad_resource("Listener has been closed"))?;
- resource.handshake().await
+ .map_err(|_| NetError::ListenerClosed)?;
+ resource.handshake().await.map_err(Into::into)
}
diff --git a/ext/net/ops_unix.rs b/ext/net/ops_unix.rs
index 95293284f..04ae84906 100644
--- a/ext/net/ops_unix.rs
+++ b/ext/net/ops_unix.rs
@@ -1,11 +1,9 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use crate::io::UnixStreamResource;
+use crate::ops::NetError;
use crate::raw::NetworkListenerResource;
use crate::NetPermissions;
-use deno_core::error::bad_resource;
-use deno_core::error::custom_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::AsyncRefCell;
use deno_core::CancelHandle;
@@ -26,11 +24,8 @@ use tokio::net::UnixListener;
pub use tokio::net::UnixStream;
/// A utility function to map OsStrings to Strings
-pub fn into_string(s: std::ffi::OsString) -> Result<String, AnyError> {
- s.into_string().map_err(|s| {
- let message = format!("File name or path {s:?} is not valid UTF-8");
- custom_error("InvalidData", message)
- })
+pub fn into_string(s: std::ffi::OsString) -> Result<String, NetError> {
+ s.into_string().map_err(NetError::InvalidUtf8)
}
pub struct UnixDatagramResource {
@@ -63,15 +58,15 @@ pub struct UnixListenArgs {
pub async fn op_net_accept_unix(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<(ResourceId, Option<String>, Option<String>), AnyError> {
+) -> Result<(ResourceId, Option<String>, Option<String>), NetError> {
let resource = state
.borrow()
.resource_table
.get::<NetworkListenerResource<UnixListener>>(rid)
- .map_err(|_| bad_resource("Listener has been closed"))?;
+ .map_err(|_| NetError::ListenerClosed)?;
let listener = RcRef::map(&resource, |r| &r.listener)
.try_borrow_mut()
- .ok_or_else(|| custom_error("Busy", "Listener already in use"))?;
+ .ok_or(NetError::ListenerBusy)?;
let cancel = RcRef::map(resource, |r| &r.cancel);
let (unix_stream, _socket_addr) = listener
.accept()
@@ -95,7 +90,7 @@ pub async fn op_net_accept_unix(
pub async fn op_net_connect_unix<NP>(
state: Rc<RefCell<OpState>>,
#[string] address_path: String,
-) -> Result<(ResourceId, Option<String>, Option<String>), AnyError>
+) -> Result<(ResourceId, Option<String>, Option<String>), NetError>
where
NP: NetPermissions + 'static,
{
@@ -103,10 +98,12 @@ where
let mut state_ = state.borrow_mut();
let address_path = state_
.borrow_mut::<NP>()
- .check_read(&address_path, "Deno.connect()")?;
+ .check_read(&address_path, "Deno.connect()")
+ .map_err(NetError::Permission)?;
_ = state_
.borrow_mut::<NP>()
- .check_write_path(&address_path, "Deno.connect()")?;
+ .check_write_path(&address_path, "Deno.connect()")
+ .map_err(NetError::Permission)?;
address_path
};
let unix_stream = UnixStream::connect(&address_path).await?;
@@ -127,15 +124,15 @@ pub async fn op_net_recv_unixpacket(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[buffer] mut buf: JsBuffer,
-) -> Result<(usize, Option<String>), AnyError> {
+) -> Result<(usize, Option<String>), NetError> {
let resource = state
.borrow()
.resource_table
.get::<UnixDatagramResource>(rid)
- .map_err(|_| bad_resource("Socket has been closed"))?;
+ .map_err(|_| NetError::SocketClosed)?;
let socket = RcRef::map(&resource, |r| &r.socket)
.try_borrow_mut()
- .ok_or_else(|| custom_error("Busy", "Socket already in use"))?;
+ .ok_or(NetError::SocketBusy)?;
let cancel = RcRef::map(resource, |r| &r.cancel);
let (nread, remote_addr) =
socket.recv_from(&mut buf).try_or_cancel(cancel).await?;
@@ -150,24 +147,25 @@ pub async fn op_net_send_unixpacket<NP>(
#[smi] rid: ResourceId,
#[string] address_path: String,
#[buffer] zero_copy: JsBuffer,
-) -> Result<usize, AnyError>
+) -> Result<usize, NetError>
where
NP: NetPermissions + 'static,
{
let address_path = {
let mut s = state.borrow_mut();
s.borrow_mut::<NP>()
- .check_write(&address_path, "Deno.DatagramConn.send()")?
+ .check_write(&address_path, "Deno.DatagramConn.send()")
+ .map_err(NetError::Permission)?
};
let resource = state
.borrow()
.resource_table
.get::<UnixDatagramResource>(rid)
- .map_err(|_| custom_error("NotConnected", "Socket has been closed"))?;
+ .map_err(|_| NetError::SocketClosedNotConnected)?;
let socket = RcRef::map(&resource, |r| &r.socket)
.try_borrow_mut()
- .ok_or_else(|| custom_error("Busy", "Socket already in use"))?;
+ .ok_or(NetError::SocketBusy)?;
let nwritten = socket.send_to(&zero_copy, address_path).await?;
Ok(nwritten)
@@ -179,14 +177,18 @@ pub fn op_net_listen_unix<NP>(
state: &mut OpState,
#[string] address_path: String,
#[string] api_name: String,
-) -> Result<(ResourceId, Option<String>), AnyError>
+) -> Result<(ResourceId, Option<String>), NetError>
where
NP: NetPermissions + 'static,
{
let permissions = state.borrow_mut::<NP>();
let api_call_expr = format!("{}()", api_name);
- let address_path = permissions.check_read(&address_path, &api_call_expr)?;
- _ = permissions.check_write_path(&address_path, &api_call_expr)?;
+ let address_path = permissions
+ .check_read(&address_path, &api_call_expr)
+ .map_err(NetError::Permission)?;
+ _ = permissions
+ .check_write_path(&address_path, &api_call_expr)
+ .map_err(NetError::Permission)?;
let listener = UnixListener::bind(address_path)?;
let local_addr = listener.local_addr()?;
let pathname = local_addr.as_pathname().map(pathstring).transpose()?;
@@ -198,14 +200,17 @@ where
pub fn net_listen_unixpacket<NP>(
state: &mut OpState,
address_path: String,
-) -> Result<(ResourceId, Option<String>), AnyError>
+) -> Result<(ResourceId, Option<String>), NetError>
where
NP: NetPermissions + 'static,
{
let permissions = state.borrow_mut::<NP>();
- let address_path =
- permissions.check_read(&address_path, "Deno.listenDatagram()")?;
- _ = permissions.check_write_path(&address_path, "Deno.listenDatagram()")?;
+ let address_path = permissions
+ .check_read(&address_path, "Deno.listenDatagram()")
+ .map_err(NetError::Permission)?;
+ _ = permissions
+ .check_write_path(&address_path, "Deno.listenDatagram()")
+ .map_err(NetError::Permission)?;
let socket = UnixDatagram::bind(address_path)?;
let local_addr = socket.local_addr()?;
let pathname = local_addr.as_pathname().map(pathstring).transpose()?;
@@ -222,7 +227,7 @@ where
pub fn op_net_listen_unixpacket<NP>(
state: &mut OpState,
#[string] path: String,
-) -> Result<(ResourceId, Option<String>), AnyError>
+) -> Result<(ResourceId, Option<String>), NetError>
where
NP: NetPermissions + 'static,
{
@@ -235,13 +240,13 @@ where
pub fn op_node_unstable_net_listen_unixpacket<NP>(
state: &mut OpState,
#[string] path: String,
-) -> Result<(ResourceId, Option<String>), AnyError>
+) -> Result<(ResourceId, Option<String>), NetError>
where
NP: NetPermissions + 'static,
{
net_listen_unixpacket::<NP>(state, path)
}
-pub fn pathstring(pathname: &Path) -> Result<String, AnyError> {
+pub fn pathstring(pathname: &Path) -> Result<String, NetError> {
into_string(pathname.into())
}
diff --git a/ext/net/resolve_addr.rs b/ext/net/resolve_addr.rs
index 8bbdd5192..3a97081ea 100644
--- a/ext/net/resolve_addr.rs
+++ b/ext/net/resolve_addr.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use std::net::SocketAddr;
use std::net::ToSocketAddrs;
use tokio::net::lookup_host;
@@ -9,7 +8,7 @@ use tokio::net::lookup_host;
pub async fn resolve_addr(
hostname: &str,
port: u16,
-) -> Result<impl Iterator<Item = SocketAddr> + '_, AnyError> {
+) -> Result<impl Iterator<Item = SocketAddr> + '_, std::io::Error> {
let addr_port_pair = make_addr_port_pair(hostname, port);
let result = lookup_host(addr_port_pair).await?;
Ok(result)
@@ -19,7 +18,7 @@ pub async fn resolve_addr(
pub fn resolve_addr_sync(
hostname: &str,
port: u16,
-) -> Result<impl Iterator<Item = SocketAddr>, AnyError> {
+) -> Result<impl Iterator<Item = SocketAddr>, std::io::Error> {
let addr_port_pair = make_addr_port_pair(hostname, port);
let result = addr_port_pair.to_socket_addrs()?;
Ok(result)
diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml
index c5f07210b..36910a844 100644
--- a/ext/node/Cargo.toml
+++ b/ext/node/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_node"
-version = "0.108.0"
+version = "0.114.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -22,6 +22,7 @@ aes.workspace = true
async-trait.workspace = true
base64.workspace = true
blake2 = "0.10.6"
+boxed_error.workspace = true
brotli.workspace = true
bytes.workspace = true
cbc.workspace = true
@@ -94,6 +95,7 @@ spki.workspace = true
stable_deref_trait = "1.2.0"
thiserror.workspace = true
tokio.workspace = true
+tokio-eld = "0.2"
url.workspace = true
webpki-root-certs.workspace = true
winapi.workspace = true
diff --git a/ext/node/lib.rs b/ext/node/lib.rs
index 03462f36f..63f5794b7 100644
--- a/ext/node/lib.rs
+++ b/ext/node/lib.rs
@@ -9,25 +9,23 @@ use std::path::Path;
use std::path::PathBuf;
use deno_core::error::AnyError;
-use deno_core::located_script_name;
use deno_core::op2;
use deno_core::url::Url;
#[allow(unused_imports)]
use deno_core::v8;
use deno_core::v8::ExternalReference;
-use deno_core::JsRuntime;
-use deno_fs::sync::MaybeSend;
-use deno_fs::sync::MaybeSync;
-use node_resolver::NpmResolverRc;
+use node_resolver::errors::ClosestPkgJsonError;
+use node_resolver::NpmPackageFolderResolverRc;
use once_cell::sync::Lazy;
extern crate libz_sys as zlib;
mod global;
-mod ops;
+pub mod ops;
mod polyfill;
pub use deno_package_json::PackageJson;
+use deno_permissions::PermissionCheckError;
pub use node_resolver::PathClean;
pub use ops::ipc::ChildPipeFd;
pub use ops::ipc::IpcJsonStreamResource;
@@ -49,10 +47,18 @@ pub trait NodePermissions {
&mut self,
url: &Url,
api_name: &str,
- ) -> Result<(), AnyError>;
+ ) -> Result<(), PermissionCheckError>;
+ fn check_net(
+ &mut self,
+ host: (&str, Option<u16>),
+ api_name: &str,
+ ) -> Result<(), PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
#[inline(always)]
- fn check_read(&mut self, path: &str) -> Result<PathBuf, AnyError> {
+ fn check_read(
+ &mut self,
+ path: &str,
+ ) -> Result<PathBuf, PermissionCheckError> {
self.check_read_with_api_name(path, None)
}
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
@@ -60,20 +66,24 @@ pub trait NodePermissions {
&mut self,
path: &str,
api_name: Option<&str>,
- ) -> Result<PathBuf, AnyError>;
+ ) -> Result<PathBuf, PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_read_path<'a>(
&mut self,
path: &'a Path,
- ) -> Result<Cow<'a, Path>, AnyError>;
+ ) -> Result<Cow<'a, Path>, PermissionCheckError>;
fn query_read_all(&mut self) -> bool;
- fn check_sys(&mut self, kind: &str, api_name: &str) -> Result<(), AnyError>;
+ fn check_sys(
+ &mut self,
+ kind: &str,
+ api_name: &str,
+ ) -> Result<(), PermissionCheckError>;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn check_write_with_api_name(
&mut self,
path: &str,
api_name: Option<&str>,
- ) -> Result<PathBuf, AnyError>;
+ ) -> Result<PathBuf, PermissionCheckError>;
}
impl NodePermissions for deno_permissions::PermissionsContainer {
@@ -82,16 +92,24 @@ impl NodePermissions for deno_permissions::PermissionsContainer {
&mut self,
url: &Url,
api_name: &str,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_net_url(self, url, api_name)
}
+ fn check_net(
+ &mut self,
+ host: (&str, Option<u16>),
+ api_name: &str,
+ ) -> Result<(), PermissionCheckError> {
+ deno_permissions::PermissionsContainer::check_net(self, &host, api_name)
+ }
+
#[inline(always)]
fn check_read_with_api_name(
&mut self,
path: &str,
api_name: Option<&str>,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_read_with_api_name(
self, path, api_name,
)
@@ -100,7 +118,7 @@ impl NodePermissions for deno_permissions::PermissionsContainer {
fn check_read_path<'a>(
&mut self,
path: &'a Path,
- ) -> Result<Cow<'a, Path>, AnyError> {
+ ) -> Result<Cow<'a, Path>, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_read_path(self, path, None)
}
@@ -113,28 +131,37 @@ impl NodePermissions for deno_permissions::PermissionsContainer {
&mut self,
path: &str,
api_name: Option<&str>,
- ) -> Result<PathBuf, AnyError> {
+ ) -> Result<PathBuf, PermissionCheckError> {
deno_permissions::PermissionsContainer::check_write_with_api_name(
self, path, api_name,
)
}
- fn check_sys(&mut self, kind: &str, api_name: &str) -> Result<(), AnyError> {
+ fn check_sys(
+ &mut self,
+ kind: &str,
+ api_name: &str,
+ ) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_sys(self, kind, api_name)
}
}
#[allow(clippy::disallowed_types)]
-pub type NodeRequireResolverRc =
- deno_fs::sync::MaybeArc<dyn NodeRequireResolver>;
+pub type NodeRequireLoaderRc = std::rc::Rc<dyn NodeRequireLoader>;
-pub trait NodeRequireResolver: std::fmt::Debug + MaybeSend + MaybeSync {
+pub trait NodeRequireLoader {
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn ensure_read_permission<'a>(
&self,
permissions: &mut dyn NodePermissions,
path: &'a Path,
) -> Result<Cow<'a, Path>, AnyError>;
+
+ fn load_text_file_lossy(&self, path: &Path) -> Result<String, AnyError>;
+
+ /// Get if the module kind is maybe CJS and loading should determine
+ /// if its CJS or ESM.
+ fn is_maybe_cjs(&self, specifier: &Url) -> Result<bool, ClosestPkgJsonError>;
}
pub static NODE_ENV_VAR_ALLOWLIST: Lazy<HashSet<String>> = Lazy::new(|| {
@@ -152,10 +179,12 @@ fn op_node_build_os() -> String {
env!("TARGET").split('-').nth(2).unwrap().to_string()
}
+#[derive(Clone)]
pub struct NodeExtInitServices {
- pub node_require_resolver: NodeRequireResolverRc,
+ pub node_require_loader: NodeRequireLoaderRc,
pub node_resolver: NodeResolverRc,
- pub npm_resolver: NpmResolverRc,
+ pub npm_resolver: NpmPackageFolderResolverRc,
+ pub pkg_json_resolver: PackageJsonResolverRc,
}
deno_core::extension!(deno_node,
@@ -321,6 +350,7 @@ deno_core::extension!(deno_node,
ops::zlib::op_zlib_write,
ops::zlib::op_zlib_init,
ops::zlib::op_zlib_reset,
+ ops::zlib::op_zlib_crc32,
ops::zlib::brotli::op_brotli_compress,
ops::zlib::brotli::op_brotli_compress_async,
ops::zlib::brotli::op_create_brotli_compress,
@@ -348,7 +378,7 @@ deno_core::extension!(deno_node,
ops::http2::op_http2_send_response,
ops::os::op_node_os_get_priority<P>,
ops::os::op_node_os_set_priority<P>,
- ops::os::op_node_os_username<P>,
+ ops::os::op_node_os_user_info<P>,
ops::os::op_geteuid<P>,
ops::os::op_getegid<P>,
ops::os::op_cpus<P>,
@@ -360,6 +390,7 @@ deno_core::extension!(deno_node,
ops::require::op_require_proxy_path,
ops::require::op_require_is_deno_dir_package,
ops::require::op_require_resolve_deno_dir,
+ ops::require::op_require_is_maybe_cjs,
ops::require::op_require_is_request_relative,
ops::require::op_require_resolve_lookup_paths,
ops::require::op_require_try_self_parent_path<P>,
@@ -373,7 +404,6 @@ deno_core::extension!(deno_node,
ops::require::op_require_read_file<P>,
ops::require::op_require_as_file_path,
ops::require::op_require_resolve_exports<P>,
- ops::require::op_require_read_closest_package_json<P>,
ops::require::op_require_read_package_scope<P>,
ops::require::op_require_package_imports_resolve<P>,
ops::require::op_require_break_on_next_statement,
@@ -387,6 +417,18 @@ deno_core::extension!(deno_node,
ops::process::op_node_process_kill,
ops::process::op_process_abort,
ops::tls::op_get_root_certificates,
+ ops::inspector::op_inspector_open<P>,
+ ops::inspector::op_inspector_close,
+ ops::inspector::op_inspector_url,
+ ops::inspector::op_inspector_wait,
+ ops::inspector::op_inspector_connect<P>,
+ ops::inspector::op_inspector_dispatch,
+ ops::inspector::op_inspector_disconnect,
+ ops::inspector::op_inspector_emit_protocol_event,
+ ops::inspector::op_inspector_enabled,
+ ],
+ objects = [
+ ops::perf_hooks::EldHistogram
],
esm_entry_point = "ext:deno_node/02_init.js",
esm = [
@@ -469,6 +511,7 @@ deno_core::extension!(deno_node,
"internal_binding/constants.ts",
"internal_binding/crypto.ts",
"internal_binding/handle_wrap.ts",
+ "internal_binding/http_parser.ts",
"internal_binding/mod.ts",
"internal_binding/node_file.ts",
"internal_binding/node_options.ts",
@@ -594,8 +637,8 @@ deno_core::extension!(deno_node,
"node:http" = "http.ts",
"node:http2" = "http2.ts",
"node:https" = "https.ts",
- "node:inspector" = "inspector.ts",
- "node:inspector/promises" = "inspector.ts",
+ "node:inspector" = "inspector.js",
+ "node:inspector/promises" = "inspector/promises.js",
"node:module" = "01_require.js",
"node:net" = "net.ts",
"node:os" = "os.ts",
@@ -638,9 +681,10 @@ deno_core::extension!(deno_node,
state.put(options.fs.clone());
if let Some(init) = &options.maybe_init {
- state.put(init.node_require_resolver.clone());
+ state.put(init.node_require_loader.clone());
state.put(init.node_resolver.clone());
state.put(init.npm_resolver.clone());
+ state.put(init.pkg_json_resolver.clone());
}
},
global_template_middleware = global_template_middleware,
@@ -760,33 +804,16 @@ deno_core::extension!(deno_node,
},
);
-pub fn load_cjs_module(
- js_runtime: &mut JsRuntime,
- module: &str,
- main: bool,
- inspect_brk: bool,
-) -> Result<(), AnyError> {
- fn escape_for_single_quote_string(text: &str) -> String {
- text.replace('\\', r"\\").replace('\'', r"\'")
- }
-
- let source_code = format!(
- r#"(function loadCjsModule(moduleName, isMain, inspectBrk) {{
- Deno[Deno.internal].node.loadCjsModule(moduleName, isMain, inspectBrk);
- }})('{module}', {main}, {inspect_brk});"#,
- main = main,
- module = escape_for_single_quote_string(module),
- inspect_brk = inspect_brk,
- );
-
- js_runtime.execute_script(located_script_name!(), source_code)?;
- Ok(())
-}
-
pub type NodeResolver = node_resolver::NodeResolver<DenoFsNodeResolverEnv>;
#[allow(clippy::disallowed_types)]
pub type NodeResolverRc =
deno_fs::sync::MaybeArc<node_resolver::NodeResolver<DenoFsNodeResolverEnv>>;
+pub type PackageJsonResolver =
+ node_resolver::PackageJsonResolver<DenoFsNodeResolverEnv>;
+#[allow(clippy::disallowed_types)]
+pub type PackageJsonResolverRc = deno_fs::sync::MaybeArc<
+ node_resolver::PackageJsonResolver<DenoFsNodeResolverEnv>,
+>;
#[derive(Debug)]
pub struct DenoFsNodeResolverEnv {
diff --git a/ext/node/ops/blocklist.rs b/ext/node/ops/blocklist.rs
index 332cdda8f..6c64d68ec 100644
--- a/ext/node/ops/blocklist.rs
+++ b/ext/node/ops/blocklist.rs
@@ -7,9 +7,6 @@ use std::net::Ipv4Addr;
use std::net::Ipv6Addr;
use std::net::SocketAddr;
-use deno_core::anyhow::anyhow;
-use deno_core::anyhow::bail;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
@@ -27,13 +24,25 @@ impl deno_core::GarbageCollected for BlockListResource {}
#[derive(Serialize)]
struct SocketAddressSerialization(String, String);
+#[derive(Debug, thiserror::Error)]
+pub enum BlocklistError {
+ #[error("{0}")]
+ AddrParse(#[from] std::net::AddrParseError),
+ #[error("{0}")]
+ IpNetwork(#[from] ipnetwork::IpNetworkError),
+ #[error("Invalid address")]
+ InvalidAddress,
+ #[error("IP version mismatch between start and end addresses")]
+ IpVersionMismatch,
+}
+
#[op2(fast)]
pub fn op_socket_address_parse(
state: &mut OpState,
#[string] addr: &str,
#[smi] port: u16,
#[string] family: &str,
-) -> Result<bool, AnyError> {
+) -> Result<bool, BlocklistError> {
let ip = addr.parse::<IpAddr>()?;
let parsed: SocketAddr = SocketAddr::new(ip, port);
let parsed_ip_str = parsed.ip().to_string();
@@ -52,7 +61,7 @@ pub fn op_socket_address_parse(
Ok(false)
}
} else {
- Err(anyhow!("Invalid address"))
+ Err(BlocklistError::InvalidAddress)
}
}
@@ -60,8 +69,8 @@ pub fn op_socket_address_parse(
#[serde]
pub fn op_socket_address_get_serialization(
state: &mut OpState,
-) -> Result<SocketAddressSerialization, AnyError> {
- Ok(state.take::<SocketAddressSerialization>())
+) -> SocketAddressSerialization {
+ state.take::<SocketAddressSerialization>()
}
#[op2]
@@ -77,7 +86,7 @@ pub fn op_blocklist_new() -> BlockListResource {
pub fn op_blocklist_add_address(
#[cppgc] wrap: &BlockListResource,
#[string] addr: &str,
-) -> Result<(), AnyError> {
+) -> Result<(), BlocklistError> {
wrap.blocklist.borrow_mut().add_address(addr)
}
@@ -86,7 +95,7 @@ pub fn op_blocklist_add_range(
#[cppgc] wrap: &BlockListResource,
#[string] start: &str,
#[string] end: &str,
-) -> Result<bool, AnyError> {
+) -> Result<bool, BlocklistError> {
wrap.blocklist.borrow_mut().add_range(start, end)
}
@@ -95,7 +104,7 @@ pub fn op_blocklist_add_subnet(
#[cppgc] wrap: &BlockListResource,
#[string] addr: &str,
#[smi] prefix: u8,
-) -> Result<(), AnyError> {
+) -> Result<(), BlocklistError> {
wrap.blocklist.borrow_mut().add_subnet(addr, prefix)
}
@@ -104,7 +113,7 @@ pub fn op_blocklist_check(
#[cppgc] wrap: &BlockListResource,
#[string] addr: &str,
#[string] r#type: &str,
-) -> Result<bool, AnyError> {
+) -> Result<bool, BlocklistError> {
wrap.blocklist.borrow().check(addr, r#type)
}
@@ -123,7 +132,7 @@ impl BlockList {
&mut self,
addr: IpAddr,
prefix: Option<u8>,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), BlocklistError> {
match addr {
IpAddr::V4(addr) => {
let ipv4_prefix = prefix.unwrap_or(32);
@@ -154,7 +163,7 @@ impl BlockList {
Ok(())
}
- pub fn add_address(&mut self, address: &str) -> Result<(), AnyError> {
+ pub fn add_address(&mut self, address: &str) -> Result<(), BlocklistError> {
let ip: IpAddr = address.parse()?;
self.map_addr_add_network(ip, None)?;
Ok(())
@@ -164,7 +173,7 @@ impl BlockList {
&mut self,
start: &str,
end: &str,
- ) -> Result<bool, AnyError> {
+ ) -> Result<bool, BlocklistError> {
let start_ip: IpAddr = start.parse()?;
let end_ip: IpAddr = end.parse()?;
@@ -193,25 +202,33 @@ impl BlockList {
self.map_addr_add_network(IpAddr::V6(addr), None)?;
}
}
- _ => bail!("IP version mismatch between start and end addresses"),
+ _ => return Err(BlocklistError::IpVersionMismatch),
}
Ok(true)
}
- pub fn add_subnet(&mut self, addr: &str, prefix: u8) -> Result<(), AnyError> {
+ pub fn add_subnet(
+ &mut self,
+ addr: &str,
+ prefix: u8,
+ ) -> Result<(), BlocklistError> {
let ip: IpAddr = addr.parse()?;
self.map_addr_add_network(ip, Some(prefix))?;
Ok(())
}
- pub fn check(&self, addr: &str, r#type: &str) -> Result<bool, AnyError> {
+ pub fn check(
+ &self,
+ addr: &str,
+ r#type: &str,
+ ) -> Result<bool, BlocklistError> {
let addr: IpAddr = addr.parse()?;
let family = r#type.to_lowercase();
if family == "ipv4" && addr.is_ipv4() || family == "ipv6" && addr.is_ipv6()
{
Ok(self.rules.iter().any(|net| net.contains(addr)))
} else {
- Err(anyhow!("Invalid address"))
+ Err(BlocklistError::InvalidAddress)
}
}
}
diff --git a/ext/node/ops/crypto/cipher.rs b/ext/node/ops/crypto/cipher.rs
index b80aa33fe..ec45146b4 100644
--- a/ext/node/ops/crypto/cipher.rs
+++ b/ext/node/ops/crypto/cipher.rs
@@ -4,9 +4,6 @@ use aes::cipher::block_padding::Pkcs7;
use aes::cipher::BlockDecryptMut;
use aes::cipher::BlockEncryptMut;
use aes::cipher::KeyIvInit;
-use deno_core::error::range_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::Resource;
use digest::generic_array::GenericArray;
use digest::KeyInit;
@@ -50,8 +47,22 @@ pub struct DecipherContext {
decipher: Rc<RefCell<Decipher>>,
}
+#[derive(Debug, thiserror::Error)]
+pub enum CipherContextError {
+ #[error("Cipher context is already in use")]
+ ContextInUse,
+ #[error("{0}")]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ Cipher(#[from] CipherError),
+}
+
impl CipherContext {
- pub fn new(algorithm: &str, key: &[u8], iv: &[u8]) -> Result<Self, AnyError> {
+ pub fn new(
+ algorithm: &str,
+ key: &[u8],
+ iv: &[u8],
+ ) -> Result<Self, CipherContextError> {
Ok(Self {
cipher: Rc::new(RefCell::new(Cipher::new(algorithm, key, iv)?)),
})
@@ -74,16 +85,31 @@ impl CipherContext {
auto_pad: bool,
input: &[u8],
output: &mut [u8],
- ) -> Result<Tag, AnyError> {
+ ) -> Result<Tag, CipherContextError> {
Rc::try_unwrap(self.cipher)
- .map_err(|_| type_error("Cipher context is already in use"))?
+ .map_err(|_| CipherContextError::ContextInUse)?
.into_inner()
.r#final(auto_pad, input, output)
+ .map_err(Into::into)
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum DecipherContextError {
+ #[error("Decipher context is already in use")]
+ ContextInUse,
+ #[error("{0}")]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ Decipher(#[from] DecipherError),
+}
+
impl DecipherContext {
- pub fn new(algorithm: &str, key: &[u8], iv: &[u8]) -> Result<Self, AnyError> {
+ pub fn new(
+ algorithm: &str,
+ key: &[u8],
+ iv: &[u8],
+ ) -> Result<Self, DecipherContextError> {
Ok(Self {
decipher: Rc::new(RefCell::new(Decipher::new(algorithm, key, iv)?)),
})
@@ -103,11 +129,12 @@ impl DecipherContext {
input: &[u8],
output: &mut [u8],
auth_tag: &[u8],
- ) -> Result<(), AnyError> {
+ ) -> Result<(), DecipherContextError> {
Rc::try_unwrap(self.decipher)
- .map_err(|_| type_error("Decipher context is already in use"))?
+ .map_err(|_| DecipherContextError::ContextInUse)?
.into_inner()
.r#final(auto_pad, input, output, auth_tag)
+ .map_err(Into::into)
}
}
@@ -123,12 +150,26 @@ impl Resource for DecipherContext {
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum CipherError {
+ #[error("IV length must be 12 bytes")]
+ InvalidIvLength,
+ #[error("Invalid key length")]
+ InvalidKeyLength,
+ #[error("Invalid initialization vector")]
+ InvalidInitializationVector,
+ #[error("Cannot pad the input data")]
+ CannotPadInputData,
+ #[error("Unknown cipher {0}")]
+ UnknownCipher(String),
+}
+
impl Cipher {
fn new(
algorithm_name: &str,
key: &[u8],
iv: &[u8],
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, CipherError> {
use Cipher::*;
Ok(match algorithm_name {
"aes-128-cbc" => {
@@ -139,7 +180,7 @@ impl Cipher {
"aes-256-ecb" => Aes256Ecb(Box::new(ecb::Encryptor::new(key.into()))),
"aes-128-gcm" => {
if iv.len() != 12 {
- return Err(type_error("IV length must be 12 bytes"));
+ return Err(CipherError::InvalidIvLength);
}
let cipher =
@@ -149,7 +190,7 @@ impl Cipher {
}
"aes-256-gcm" => {
if iv.len() != 12 {
- return Err(type_error("IV length must be 12 bytes"));
+ return Err(CipherError::InvalidIvLength);
}
let cipher =
@@ -159,15 +200,15 @@ impl Cipher {
}
"aes256" | "aes-256-cbc" => {
if key.len() != 32 {
- return Err(range_error("Invalid key length"));
+ return Err(CipherError::InvalidKeyLength);
}
if iv.len() != 16 {
- return Err(type_error("Invalid initialization vector"));
+ return Err(CipherError::InvalidInitializationVector);
}
Aes256Cbc(Box::new(cbc::Encryptor::new(key.into(), iv.into())))
}
- _ => return Err(type_error(format!("Unknown cipher {algorithm_name}"))),
+ _ => return Err(CipherError::UnknownCipher(algorithm_name.to_string())),
})
}
@@ -235,14 +276,14 @@ impl Cipher {
auto_pad: bool,
input: &[u8],
output: &mut [u8],
- ) -> Result<Tag, AnyError> {
+ ) -> Result<Tag, CipherError> {
assert!(input.len() < 16);
use Cipher::*;
match (self, auto_pad) {
(Aes128Cbc(encryptor), true) => {
let _ = (*encryptor)
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot pad the input data"))?;
+ .map_err(|_| CipherError::CannotPadInputData)?;
Ok(None)
}
(Aes128Cbc(mut encryptor), false) => {
@@ -255,7 +296,7 @@ impl Cipher {
(Aes128Ecb(encryptor), true) => {
let _ = (*encryptor)
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot pad the input data"))?;
+ .map_err(|_| CipherError::CannotPadInputData)?;
Ok(None)
}
(Aes128Ecb(mut encryptor), false) => {
@@ -268,7 +309,7 @@ impl Cipher {
(Aes192Ecb(encryptor), true) => {
let _ = (*encryptor)
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot pad the input data"))?;
+ .map_err(|_| CipherError::CannotPadInputData)?;
Ok(None)
}
(Aes192Ecb(mut encryptor), false) => {
@@ -281,7 +322,7 @@ impl Cipher {
(Aes256Ecb(encryptor), true) => {
let _ = (*encryptor)
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot pad the input data"))?;
+ .map_err(|_| CipherError::CannotPadInputData)?;
Ok(None)
}
(Aes256Ecb(mut encryptor), false) => {
@@ -296,7 +337,7 @@ impl Cipher {
(Aes256Cbc(encryptor), true) => {
let _ = (*encryptor)
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot pad the input data"))?;
+ .map_err(|_| CipherError::CannotPadInputData)?;
Ok(None)
}
(Aes256Cbc(mut encryptor), false) => {
@@ -319,12 +360,32 @@ impl Cipher {
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum DecipherError {
+ #[error("IV length must be 12 bytes")]
+ InvalidIvLength,
+ #[error("Invalid key length")]
+ InvalidKeyLength,
+ #[error("Invalid initialization vector")]
+ InvalidInitializationVector,
+ #[error("Cannot unpad the input data")]
+ CannotUnpadInputData,
+ #[error("Failed to authenticate data")]
+ DataAuthenticationFailed,
+ #[error("setAutoPadding(false) not supported for Aes128Gcm yet")]
+ SetAutoPaddingFalseAes128GcmUnsupported,
+ #[error("setAutoPadding(false) not supported for Aes256Gcm yet")]
+ SetAutoPaddingFalseAes256GcmUnsupported,
+ #[error("Unknown cipher {0}")]
+ UnknownCipher(String),
+}
+
impl Decipher {
fn new(
algorithm_name: &str,
key: &[u8],
iv: &[u8],
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, DecipherError> {
use Decipher::*;
Ok(match algorithm_name {
"aes-128-cbc" => {
@@ -335,7 +396,7 @@ impl Decipher {
"aes-256-ecb" => Aes256Ecb(Box::new(ecb::Decryptor::new(key.into()))),
"aes-128-gcm" => {
if iv.len() != 12 {
- return Err(type_error("IV length must be 12 bytes"));
+ return Err(DecipherError::InvalidIvLength);
}
let decipher =
@@ -345,7 +406,7 @@ impl Decipher {
}
"aes-256-gcm" => {
if iv.len() != 12 {
- return Err(type_error("IV length must be 12 bytes"));
+ return Err(DecipherError::InvalidIvLength);
}
let decipher =
@@ -355,15 +416,17 @@ impl Decipher {
}
"aes256" | "aes-256-cbc" => {
if key.len() != 32 {
- return Err(range_error("Invalid key length"));
+ return Err(DecipherError::InvalidKeyLength);
}
if iv.len() != 16 {
- return Err(type_error("Invalid initialization vector"));
+ return Err(DecipherError::InvalidInitializationVector);
}
Aes256Cbc(Box::new(cbc::Decryptor::new(key.into(), iv.into())))
}
- _ => return Err(type_error(format!("Unknown cipher {algorithm_name}"))),
+ _ => {
+ return Err(DecipherError::UnknownCipher(algorithm_name.to_string()))
+ }
})
}
@@ -432,14 +495,14 @@ impl Decipher {
input: &[u8],
output: &mut [u8],
auth_tag: &[u8],
- ) -> Result<(), AnyError> {
+ ) -> Result<(), DecipherError> {
use Decipher::*;
match (self, auto_pad) {
(Aes128Cbc(decryptor), true) => {
assert!(input.len() == 16);
let _ = (*decryptor)
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot unpad the input data"))?;
+ .map_err(|_| DecipherError::CannotUnpadInputData)?;
Ok(())
}
(Aes128Cbc(mut decryptor), false) => {
@@ -453,7 +516,7 @@ impl Decipher {
assert!(input.len() == 16);
let _ = (*decryptor)
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot unpad the input data"))?;
+ .map_err(|_| DecipherError::CannotUnpadInputData)?;
Ok(())
}
(Aes128Ecb(mut decryptor), false) => {
@@ -467,7 +530,7 @@ impl Decipher {
assert!(input.len() == 16);
let _ = (*decryptor)
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot unpad the input data"))?;
+ .map_err(|_| DecipherError::CannotUnpadInputData)?;
Ok(())
}
(Aes192Ecb(mut decryptor), false) => {
@@ -481,7 +544,7 @@ impl Decipher {
assert!(input.len() == 16);
let _ = (*decryptor)
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot unpad the input data"))?;
+ .map_err(|_| DecipherError::CannotUnpadInputData)?;
Ok(())
}
(Aes256Ecb(mut decryptor), false) => {
@@ -496,28 +559,28 @@ impl Decipher {
if tag.as_slice() == auth_tag {
Ok(())
} else {
- Err(type_error("Failed to authenticate data"))
+ Err(DecipherError::DataAuthenticationFailed)
}
}
- (Aes128Gcm(_), false) => Err(type_error(
- "setAutoPadding(false) not supported for Aes256Gcm yet",
- )),
+ (Aes128Gcm(_), false) => {
+ Err(DecipherError::SetAutoPaddingFalseAes128GcmUnsupported)
+ }
(Aes256Gcm(decipher), true) => {
let tag = decipher.finish();
if tag.as_slice() == auth_tag {
Ok(())
} else {
- Err(type_error("Failed to authenticate data"))
+ Err(DecipherError::DataAuthenticationFailed)
}
}
- (Aes256Gcm(_), false) => Err(type_error(
- "setAutoPadding(false) not supported for Aes256Gcm yet",
- )),
+ (Aes256Gcm(_), false) => {
+ Err(DecipherError::SetAutoPaddingFalseAes256GcmUnsupported)
+ }
(Aes256Cbc(decryptor), true) => {
assert!(input.len() == 16);
let _ = (*decryptor)
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot unpad the input data"))?;
+ .map_err(|_| DecipherError::CannotUnpadInputData)?;
Ok(())
}
(Aes256Cbc(mut decryptor), false) => {
diff --git a/ext/node/ops/crypto/digest.rs b/ext/node/ops/crypto/digest.rs
index 293e8e063..a7d8fb51f 100644
--- a/ext/node/ops/crypto/digest.rs
+++ b/ext/node/ops/crypto/digest.rs
@@ -1,6 +1,4 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::generic_error;
-use deno_core::error::AnyError;
use deno_core::GarbageCollected;
use digest::Digest;
use digest::DynDigest;
@@ -19,7 +17,7 @@ impl Hasher {
pub fn new(
algorithm: &str,
output_length: Option<usize>,
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, HashError> {
let hash = Hash::new(algorithm, output_length)?;
Ok(Self {
@@ -44,7 +42,7 @@ impl Hasher {
pub fn clone_inner(
&self,
output_length: Option<usize>,
- ) -> Result<Option<Self>, AnyError> {
+ ) -> Result<Option<Self>, HashError> {
let hash = self.hash.borrow();
let Some(hash) = hash.as_ref() else {
return Ok(None);
@@ -184,11 +182,19 @@ pub enum Hash {
use Hash::*;
+#[derive(Debug, thiserror::Error)]
+pub enum HashError {
+ #[error("Output length mismatch for non-extendable algorithm")]
+ OutputLengthMismatch,
+ #[error("Digest method not supported: {0}")]
+ DigestMethodUnsupported(String),
+}
+
impl Hash {
pub fn new(
algorithm_name: &str,
output_length: Option<usize>,
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, HashError> {
match algorithm_name {
"shake128" => return Ok(Shake128(Default::default(), output_length)),
"shake256" => return Ok(Shake256(Default::default(), output_length)),
@@ -201,17 +207,13 @@ impl Hash {
let digest: D = Digest::new();
if let Some(length) = output_length {
if length != digest.output_size() {
- return Err(generic_error(
- "Output length mismatch for non-extendable algorithm",
- ));
+ return Err(HashError::OutputLengthMismatch);
}
}
FixedSize(Box::new(digest))
},
_ => {
- return Err(generic_error(format!(
- "Digest method not supported: {algorithm_name}"
- )))
+ return Err(HashError::DigestMethodUnsupported(algorithm_name.to_string()))
}
);
@@ -243,14 +245,12 @@ impl Hash {
pub fn clone_hash(
&self,
output_length: Option<usize>,
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, HashError> {
let hash = match self {
FixedSize(context) => {
if let Some(length) = output_length {
if length != context.output_size() {
- return Err(generic_error(
- "Output length mismatch for non-extendable algorithm",
- ));
+ return Err(HashError::OutputLengthMismatch);
}
}
FixedSize(context.box_clone())
diff --git a/ext/node/ops/crypto/keys.rs b/ext/node/ops/crypto/keys.rs
index 867b34e04..f164972d4 100644
--- a/ext/node/ops/crypto/keys.rs
+++ b/ext/node/ops/crypto/keys.rs
@@ -4,9 +4,7 @@ use std::borrow::Cow;
use std::cell::RefCell;
use base64::Engine;
-use deno_core::error::generic_error;
use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::serde_v8::BigInt as V8BigInt;
use deno_core::unsync::spawn_blocking;
@@ -46,6 +44,7 @@ use spki::der::Reader as _;
use spki::DecodePublicKey as _;
use spki::EncodePublicKey as _;
use spki::SubjectPublicKeyInfoRef;
+use x509_parser::error::X509Error;
use x509_parser::x509;
use super::dh;
@@ -236,9 +235,11 @@ impl RsaPssPrivateKey {
}
impl EcPublicKey {
- pub fn to_jwk(&self) -> Result<elliptic_curve::JwkEcKey, AnyError> {
+ pub fn to_jwk(&self) -> Result<JwkEcKey, AsymmetricPublicKeyJwkError> {
match self {
- EcPublicKey::P224(_) => Err(type_error("Unsupported JWK EC curve: P224")),
+ EcPublicKey::P224(_) => {
+ Err(AsymmetricPublicKeyJwkError::UnsupportedJwkEcCurveP224)
+ }
EcPublicKey::P256(key) => Ok(key.to_jwk()),
EcPublicKey::P384(key) => Ok(key.to_jwk()),
}
@@ -363,49 +364,201 @@ impl<'a> TryFrom<rsa::pkcs8::der::asn1::AnyRef<'a>> for RsaPssParameters<'a> {
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum X509PublicKeyError {
+ #[error(transparent)]
+ X509(#[from] x509_parser::error::X509Error),
+ #[error(transparent)]
+ Rsa(#[from] rsa::Error),
+ #[error(transparent)]
+ Asn1(#[from] x509_parser::der_parser::asn1_rs::Error),
+ #[error(transparent)]
+ Ec(#[from] elliptic_curve::Error),
+ #[error("unsupported ec named curve")]
+ UnsupportedEcNamedCurve,
+ #[error("missing ec parameters")]
+ MissingEcParameters,
+ #[error("malformed DSS public key")]
+ MalformedDssPublicKey,
+ #[error("unsupported x509 public key type")]
+ UnsupportedX509KeyType,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum RsaJwkError {
+ #[error(transparent)]
+ Base64(#[from] base64::DecodeError),
+ #[error(transparent)]
+ Rsa(#[from] rsa::Error),
+ #[error("missing RSA private component")]
+ MissingRsaPrivateComponent,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum EcJwkError {
+ #[error(transparent)]
+ Ec(#[from] elliptic_curve::Error),
+ #[error("unsupported curve: {0}")]
+ UnsupportedCurve(String),
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum EdRawError {
+ #[error(transparent)]
+ Ed25519Signature(#[from] ed25519_dalek::SignatureError),
+ #[error("invalid Ed25519 key")]
+ InvalidEd25519Key,
+ #[error("unsupported curve")]
+ UnsupportedCurve,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum AsymmetricPrivateKeyError {
+ #[error("invalid PEM private key: not valid utf8 starting at byte {0}")]
+ InvalidPemPrivateKeyInvalidUtf8(usize),
+ #[error("invalid encrypted PEM private key")]
+ InvalidEncryptedPemPrivateKey,
+ #[error("invalid PEM private key")]
+ InvalidPemPrivateKey,
+ #[error("encrypted private key requires a passphrase to decrypt")]
+ EncryptedPrivateKeyRequiresPassphraseToDecrypt,
+ #[error("invalid PKCS#1 private key")]
+ InvalidPkcs1PrivateKey,
+ #[error("invalid SEC1 private key")]
+ InvalidSec1PrivateKey,
+ #[error("unsupported PEM label: {0}")]
+ UnsupportedPemLabel(String),
+ #[error(transparent)]
+ RsaPssParamsParse(#[from] RsaPssParamsParseError),
+ #[error("invalid encrypted PKCS#8 private key")]
+ InvalidEncryptedPkcs8PrivateKey,
+ #[error("invalid PKCS#8 private key")]
+ InvalidPkcs8PrivateKey,
+ #[error("PKCS#1 private key does not support encryption with passphrase")]
+ Pkcs1PrivateKeyDoesNotSupportEncryptionWithPassphrase,
+ #[error("SEC1 private key does not support encryption with passphrase")]
+ Sec1PrivateKeyDoesNotSupportEncryptionWithPassphrase,
+ #[error("unsupported ec named curve")]
+ UnsupportedEcNamedCurve,
+ #[error("invalid private key")]
+ InvalidPrivateKey,
+ #[error("invalid DSA private key")]
+ InvalidDsaPrivateKey,
+ #[error("malformed or missing named curve in ec parameters")]
+ MalformedOrMissingNamedCurveInEcParameters,
+ #[error("unsupported key type: {0}")]
+ UnsupportedKeyType(String),
+ #[error("unsupported key format: {0}")]
+ UnsupportedKeyFormat(String),
+ #[error("invalid x25519 private key")]
+ InvalidX25519PrivateKey,
+ #[error("x25519 private key is the wrong length")]
+ X25519PrivateKeyIsWrongLength,
+ #[error("invalid Ed25519 private key")]
+ InvalidEd25519PrivateKey,
+ #[error("missing dh parameters")]
+ MissingDhParameters,
+ #[error("unsupported private key oid")]
+ UnsupportedPrivateKeyOid,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum AsymmetricPublicKeyError {
+ #[error("invalid PEM private key: not valid utf8 starting at byte {0}")]
+ InvalidPemPrivateKeyInvalidUtf8(usize),
+ #[error("invalid PEM public key")]
+ InvalidPemPublicKey,
+ #[error("invalid PKCS#1 public key")]
+ InvalidPkcs1PublicKey,
+ #[error(transparent)]
+ AsymmetricPrivateKey(#[from] AsymmetricPrivateKeyError),
+ #[error("invalid x509 certificate")]
+ InvalidX509Certificate,
+ #[error(transparent)]
+ X509(#[from] x509_parser::nom::Err<X509Error>),
+ #[error(transparent)]
+ X509PublicKey(#[from] X509PublicKeyError),
+ #[error("unsupported PEM label: {0}")]
+ UnsupportedPemLabel(String),
+ #[error("invalid SPKI public key")]
+ InvalidSpkiPublicKey,
+ #[error("unsupported key type: {0}")]
+ UnsupportedKeyType(String),
+ #[error("unsupported key format: {0}")]
+ UnsupportedKeyFormat(String),
+ #[error(transparent)]
+ Spki(#[from] spki::Error),
+ #[error(transparent)]
+ Pkcs1(#[from] rsa::pkcs1::Error),
+ #[error(transparent)]
+ RsaPssParamsParse(#[from] RsaPssParamsParseError),
+ #[error("malformed DSS public key")]
+ MalformedDssPublicKey,
+ #[error("malformed or missing named curve in ec parameters")]
+ MalformedOrMissingNamedCurveInEcParameters,
+ #[error("malformed or missing public key in ec spki")]
+ MalformedOrMissingPublicKeyInEcSpki,
+ #[error(transparent)]
+ Ec(#[from] elliptic_curve::Error),
+ #[error("unsupported ec named curve")]
+ UnsupportedEcNamedCurve,
+ #[error("malformed or missing public key in x25519 spki")]
+ MalformedOrMissingPublicKeyInX25519Spki,
+ #[error("x25519 public key is too short")]
+ X25519PublicKeyIsTooShort,
+ #[error("invalid Ed25519 public key")]
+ InvalidEd25519PublicKey,
+ #[error("missing dh parameters")]
+ MissingDhParameters,
+ #[error("malformed dh parameters")]
+ MalformedDhParameters,
+ #[error("malformed or missing public key in dh spki")]
+ MalformedOrMissingPublicKeyInDhSpki,
+ #[error("unsupported private key oid")]
+ UnsupportedPrivateKeyOid,
+}
+
impl KeyObjectHandle {
pub fn new_asymmetric_private_key_from_js(
key: &[u8],
format: &str,
typ: &str,
passphrase: Option<&[u8]>,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, AsymmetricPrivateKeyError> {
let document = match format {
"pem" => {
let pem = std::str::from_utf8(key).map_err(|err| {
- type_error(format!(
- "invalid PEM private key: not valid utf8 starting at byte {}",
- err.valid_up_to()
- ))
+ AsymmetricPrivateKeyError::InvalidPemPrivateKeyInvalidUtf8(
+ err.valid_up_to(),
+ )
})?;
if let Some(passphrase) = passphrase {
- SecretDocument::from_pkcs8_encrypted_pem(pem, passphrase)
- .map_err(|_| type_error("invalid encrypted PEM private key"))?
+ SecretDocument::from_pkcs8_encrypted_pem(pem, passphrase).map_err(
+ |_| AsymmetricPrivateKeyError::InvalidEncryptedPemPrivateKey,
+ )?
} else {
let (label, doc) = SecretDocument::from_pem(pem)
- .map_err(|_| type_error("invalid PEM private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPemPrivateKey)?;
match label {
EncryptedPrivateKeyInfo::PEM_LABEL => {
- return Err(type_error(
- "encrypted private key requires a passphrase to decrypt",
- ))
+ return Err(AsymmetricPrivateKeyError::EncryptedPrivateKeyRequiresPassphraseToDecrypt);
}
PrivateKeyInfo::PEM_LABEL => doc,
rsa::pkcs1::RsaPrivateKey::PEM_LABEL => {
- SecretDocument::from_pkcs1_der(doc.as_bytes())
- .map_err(|_| type_error("invalid PKCS#1 private key"))?
+ SecretDocument::from_pkcs1_der(doc.as_bytes()).map_err(|_| {
+ AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey
+ })?
}
sec1::EcPrivateKey::PEM_LABEL => {
SecretDocument::from_sec1_der(doc.as_bytes())
- .map_err(|_| type_error("invalid SEC1 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?
}
_ => {
- return Err(type_error(format!(
- "unsupported PEM label: {}",
- label
- )))
+ return Err(AsymmetricPrivateKeyError::UnsupportedPemLabel(
+ label.to_string(),
+ ))
}
}
}
@@ -413,54 +566,57 @@ impl KeyObjectHandle {
"der" => match typ {
"pkcs8" => {
if let Some(passphrase) = passphrase {
- SecretDocument::from_pkcs8_encrypted_der(key, passphrase)
- .map_err(|_| type_error("invalid encrypted PKCS#8 private key"))?
+ SecretDocument::from_pkcs8_encrypted_der(key, passphrase).map_err(
+ |_| AsymmetricPrivateKeyError::InvalidEncryptedPkcs8PrivateKey,
+ )?
} else {
SecretDocument::from_pkcs8_der(key)
- .map_err(|_| type_error("invalid PKCS#8 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs8PrivateKey)?
}
}
"pkcs1" => {
if passphrase.is_some() {
- return Err(type_error(
- "PKCS#1 private key does not support encryption with passphrase",
- ));
+ return Err(AsymmetricPrivateKeyError::Pkcs1PrivateKeyDoesNotSupportEncryptionWithPassphrase);
}
SecretDocument::from_pkcs1_der(key)
- .map_err(|_| type_error("invalid PKCS#1 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey)?
}
"sec1" => {
if passphrase.is_some() {
- return Err(type_error(
- "SEC1 private key does not support encryption with passphrase",
- ));
+ return Err(AsymmetricPrivateKeyError::Sec1PrivateKeyDoesNotSupportEncryptionWithPassphrase);
}
SecretDocument::from_sec1_der(key)
- .map_err(|_| type_error("invalid SEC1 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?
+ }
+ _ => {
+ return Err(AsymmetricPrivateKeyError::UnsupportedKeyType(
+ typ.to_string(),
+ ))
}
- _ => return Err(type_error(format!("unsupported key type: {}", typ))),
},
_ => {
- return Err(type_error(format!("unsupported key format: {}", format)))
+ return Err(AsymmetricPrivateKeyError::UnsupportedKeyFormat(
+ format.to_string(),
+ ))
}
};
let pk_info = PrivateKeyInfo::try_from(document.as_bytes())
- .map_err(|_| type_error("invalid private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPrivateKey)?;
let alg = pk_info.algorithm.oid;
let private_key = match alg {
RSA_ENCRYPTION_OID => {
let private_key =
rsa::RsaPrivateKey::from_pkcs1_der(pk_info.private_key)
- .map_err(|_| type_error("invalid PKCS#1 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey)?;
AsymmetricPrivateKey::Rsa(private_key)
}
RSASSA_PSS_OID => {
let details = parse_rsa_pss_params(pk_info.algorithm.parameters)?;
let private_key =
rsa::RsaPrivateKey::from_pkcs1_der(pk_info.private_key)
- .map_err(|_| type_error("invalid PKCS#1 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey)?;
AsymmetricPrivateKey::RsaPss(RsaPssPrivateKey {
key: private_key,
details,
@@ -468,40 +624,43 @@ impl KeyObjectHandle {
}
DSA_OID => {
let private_key = dsa::SigningKey::try_from(pk_info)
- .map_err(|_| type_error("invalid DSA private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidDsaPrivateKey)?;
AsymmetricPrivateKey::Dsa(private_key)
}
EC_OID => {
let named_curve = pk_info.algorithm.parameters_oid().map_err(|_| {
- type_error("malformed or missing named curve in ec parameters")
+ AsymmetricPrivateKeyError::MalformedOrMissingNamedCurveInEcParameters
})?;
match named_curve {
ID_SECP224R1_OID => {
- let secret_key =
- p224::SecretKey::from_sec1_der(pk_info.private_key)
- .map_err(|_| type_error("invalid SEC1 private key"))?;
+ let secret_key = p224::SecretKey::from_sec1_der(
+ pk_info.private_key,
+ )
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?;
AsymmetricPrivateKey::Ec(EcPrivateKey::P224(secret_key))
}
ID_SECP256R1_OID => {
- let secret_key =
- p256::SecretKey::from_sec1_der(pk_info.private_key)
- .map_err(|_| type_error("invalid SEC1 private key"))?;
+ let secret_key = p256::SecretKey::from_sec1_der(
+ pk_info.private_key,
+ )
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?;
AsymmetricPrivateKey::Ec(EcPrivateKey::P256(secret_key))
}
ID_SECP384R1_OID => {
- let secret_key =
- p384::SecretKey::from_sec1_der(pk_info.private_key)
- .map_err(|_| type_error("invalid SEC1 private key"))?;
+ let secret_key = p384::SecretKey::from_sec1_der(
+ pk_info.private_key,
+ )
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?;
AsymmetricPrivateKey::Ec(EcPrivateKey::P384(secret_key))
}
- _ => return Err(type_error("unsupported ec named curve")),
+ _ => return Err(AsymmetricPrivateKeyError::UnsupportedEcNamedCurve),
}
}
X25519_OID => {
let string_ref = OctetStringRef::from_der(pk_info.private_key)
- .map_err(|_| type_error("invalid x25519 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidX25519PrivateKey)?;
if string_ref.as_bytes().len() != 32 {
- return Err(type_error("x25519 private key is the wrong length"));
+ return Err(AsymmetricPrivateKeyError::X25519PrivateKeyIsWrongLength);
}
let mut bytes = [0; 32];
bytes.copy_from_slice(string_ref.as_bytes());
@@ -509,22 +668,22 @@ impl KeyObjectHandle {
}
ED25519_OID => {
let signing_key = ed25519_dalek::SigningKey::try_from(pk_info)
- .map_err(|_| type_error("invalid Ed25519 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidEd25519PrivateKey)?;
AsymmetricPrivateKey::Ed25519(signing_key)
}
DH_KEY_AGREEMENT_OID => {
let params = pk_info
.algorithm
.parameters
- .ok_or_else(|| type_error("missing dh parameters"))?;
+ .ok_or(AsymmetricPrivateKeyError::MissingDhParameters)?;
let params = pkcs3::DhParameter::from_der(&params.to_der().unwrap())
- .map_err(|_| type_error("malformed dh parameters"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::MissingDhParameters)?;
AsymmetricPrivateKey::Dh(DhPrivateKey {
key: dh::PrivateKey::from_bytes(pk_info.private_key),
params,
})
}
- _ => return Err(type_error("unsupported private key oid")),
+ _ => return Err(AsymmetricPrivateKeyError::UnsupportedPrivateKeyOid),
};
Ok(KeyObjectHandle::AsymmetricPrivate(private_key))
@@ -532,7 +691,7 @@ impl KeyObjectHandle {
pub fn new_x509_public_key(
spki: &x509::SubjectPublicKeyInfo,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, X509PublicKeyError> {
use x509_parser::der_parser::asn1_rs::oid;
use x509_parser::public_key::PublicKey;
@@ -565,18 +724,18 @@ impl KeyObjectHandle {
let public_key = p384::PublicKey::from_sec1_bytes(data)?;
AsymmetricPublicKey::Ec(EcPublicKey::P384(public_key))
}
- _ => return Err(type_error("unsupported ec named curve")),
+ _ => return Err(X509PublicKeyError::UnsupportedEcNamedCurve),
}
} else {
- return Err(type_error("missing ec parameters"));
+ return Err(X509PublicKeyError::MissingEcParameters);
}
}
PublicKey::DSA(_) => {
let verifying_key = dsa::VerifyingKey::from_public_key_der(spki.raw)
- .map_err(|_| type_error("malformed DSS public key"))?;
+ .map_err(|_| X509PublicKeyError::MalformedDssPublicKey)?;
AsymmetricPublicKey::Dsa(verifying_key)
}
- _ => return Err(type_error("unsupported x509 public key type")),
+ _ => return Err(X509PublicKeyError::UnsupportedX509KeyType),
};
Ok(KeyObjectHandle::AsymmetricPublic(key))
@@ -585,7 +744,7 @@ impl KeyObjectHandle {
pub fn new_rsa_jwk(
jwk: RsaJwkKey,
is_public: bool,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, RsaJwkError> {
use base64::prelude::BASE64_URL_SAFE_NO_PAD;
let n = BASE64_URL_SAFE_NO_PAD.decode(jwk.n.as_bytes())?;
@@ -604,19 +763,19 @@ impl KeyObjectHandle {
let d = BASE64_URL_SAFE_NO_PAD.decode(
jwk
.d
- .ok_or_else(|| type_error("missing RSA private component"))?
+ .ok_or(RsaJwkError::MissingRsaPrivateComponent)?
.as_bytes(),
)?;
let p = BASE64_URL_SAFE_NO_PAD.decode(
jwk
.p
- .ok_or_else(|| type_error("missing RSA private component"))?
+ .ok_or(RsaJwkError::MissingRsaPrivateComponent)?
.as_bytes(),
)?;
let q = BASE64_URL_SAFE_NO_PAD.decode(
jwk
.q
- .ok_or_else(|| type_error("missing RSA private component"))?
+ .ok_or(RsaJwkError::MissingRsaPrivateComponent)?
.as_bytes(),
)?;
@@ -640,7 +799,7 @@ impl KeyObjectHandle {
pub fn new_ec_jwk(
jwk: &JwkEcKey,
is_public: bool,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, EcJwkError> {
// https://datatracker.ietf.org/doc/html/rfc7518#section-6.2.1.1
let handle = match jwk.crv() {
"P-256" if is_public => {
@@ -660,7 +819,7 @@ impl KeyObjectHandle {
EcPrivateKey::P384(p384::SecretKey::from_jwk(jwk)?),
)),
_ => {
- return Err(type_error(format!("unsupported curve: {}", jwk.crv())));
+ return Err(EcJwkError::UnsupportedCurve(jwk.crv().to_string()));
}
};
@@ -671,12 +830,11 @@ impl KeyObjectHandle {
curve: &str,
data: &[u8],
is_public: bool,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, EdRawError> {
match curve {
"Ed25519" => {
- let data = data
- .try_into()
- .map_err(|_| type_error("invalid Ed25519 key"))?;
+ let data =
+ data.try_into().map_err(|_| EdRawError::InvalidEd25519Key)?;
if !is_public {
Ok(KeyObjectHandle::AsymmetricPrivate(
AsymmetricPrivateKey::Ed25519(
@@ -692,9 +850,8 @@ impl KeyObjectHandle {
}
}
"X25519" => {
- let data: [u8; 32] = data
- .try_into()
- .map_err(|_| type_error("invalid x25519 key"))?;
+ let data: [u8; 32] =
+ data.try_into().map_err(|_| EdRawError::InvalidEd25519Key)?;
if !is_public {
Ok(KeyObjectHandle::AsymmetricPrivate(
AsymmetricPrivateKey::X25519(x25519_dalek::StaticSecret::from(
@@ -707,7 +864,7 @@ impl KeyObjectHandle {
))
}
}
- _ => Err(type_error("unsupported curve")),
+ _ => Err(EdRawError::UnsupportedCurve),
}
}
@@ -716,24 +873,23 @@ impl KeyObjectHandle {
format: &str,
typ: &str,
passphrase: Option<&[u8]>,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, AsymmetricPublicKeyError> {
let document = match format {
"pem" => {
let pem = std::str::from_utf8(key).map_err(|err| {
- type_error(format!(
- "invalid PEM public key: not valid utf8 starting at byte {}",
- err.valid_up_to()
- ))
+ AsymmetricPublicKeyError::InvalidPemPrivateKeyInvalidUtf8(
+ err.valid_up_to(),
+ )
})?;
let (label, document) = Document::from_pem(pem)
- .map_err(|_| type_error("invalid PEM public key"))?;
+ .map_err(|_| AsymmetricPublicKeyError::InvalidPemPublicKey)?;
match label {
SubjectPublicKeyInfoRef::PEM_LABEL => document,
rsa::pkcs1::RsaPublicKey::PEM_LABEL => {
Document::from_pkcs1_der(document.as_bytes())
- .map_err(|_| type_error("invalid PKCS#1 public key"))?
+ .map_err(|_| AsymmetricPublicKeyError::InvalidPkcs1PublicKey)?
}
EncryptedPrivateKeyInfo::PEM_LABEL
| PrivateKeyInfo::PEM_LABEL
@@ -754,27 +910,36 @@ impl KeyObjectHandle {
}
"CERTIFICATE" => {
let (_, pem) = x509_parser::pem::parse_x509_pem(pem.as_bytes())
- .map_err(|_| type_error("invalid x509 certificate"))?;
+ .map_err(|_| AsymmetricPublicKeyError::InvalidX509Certificate)?;
let cert = pem.parse_x509()?;
let public_key = cert.tbs_certificate.subject_pki;
- return KeyObjectHandle::new_x509_public_key(&public_key);
+ return KeyObjectHandle::new_x509_public_key(&public_key)
+ .map_err(Into::into);
}
_ => {
- return Err(type_error(format!("unsupported PEM label: {}", label)))
+ return Err(AsymmetricPublicKeyError::UnsupportedPemLabel(
+ label.to_string(),
+ ))
}
}
}
"der" => match typ {
"pkcs1" => Document::from_pkcs1_der(key)
- .map_err(|_| type_error("invalid PKCS#1 public key"))?,
+ .map_err(|_| AsymmetricPublicKeyError::InvalidPkcs1PublicKey)?,
"spki" => Document::from_public_key_der(key)
- .map_err(|_| type_error("invalid SPKI public key"))?,
- _ => return Err(type_error(format!("unsupported key type: {}", typ))),
+ .map_err(|_| AsymmetricPublicKeyError::InvalidSpkiPublicKey)?,
+ _ => {
+ return Err(AsymmetricPublicKeyError::UnsupportedKeyType(
+ typ.to_string(),
+ ))
+ }
},
_ => {
- return Err(type_error(format!("unsupported key format: {}", format)))
+ return Err(AsymmetricPublicKeyError::UnsupportedKeyType(
+ format.to_string(),
+ ))
}
};
@@ -799,16 +964,16 @@ impl KeyObjectHandle {
}
DSA_OID => {
let verifying_key = dsa::VerifyingKey::try_from(spki)
- .map_err(|_| type_error("malformed DSS public key"))?;
+ .map_err(|_| AsymmetricPublicKeyError::MalformedDssPublicKey)?;
AsymmetricPublicKey::Dsa(verifying_key)
}
EC_OID => {
let named_curve = spki.algorithm.parameters_oid().map_err(|_| {
- type_error("malformed or missing named curve in ec parameters")
- })?;
- let data = spki.subject_public_key.as_bytes().ok_or_else(|| {
- type_error("malformed or missing public key in ec spki")
+ AsymmetricPublicKeyError::MalformedOrMissingNamedCurveInEcParameters
})?;
+ let data = spki.subject_public_key.as_bytes().ok_or(
+ AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInEcSpki,
+ )?;
match named_curve {
ID_SECP224R1_OID => {
@@ -823,54 +988,68 @@ impl KeyObjectHandle {
let public_key = p384::PublicKey::from_sec1_bytes(data)?;
AsymmetricPublicKey::Ec(EcPublicKey::P384(public_key))
}
- _ => return Err(type_error("unsupported ec named curve")),
+ _ => return Err(AsymmetricPublicKeyError::UnsupportedEcNamedCurve),
}
}
X25519_OID => {
let mut bytes = [0; 32];
- let data = spki.subject_public_key.as_bytes().ok_or_else(|| {
- type_error("malformed or missing public key in x25519 spki")
- })?;
+ let data = spki.subject_public_key.as_bytes().ok_or(
+ AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInX25519Spki,
+ )?;
if data.len() < 32 {
- return Err(type_error("x25519 public key is too short"));
+ return Err(AsymmetricPublicKeyError::X25519PublicKeyIsTooShort);
}
bytes.copy_from_slice(&data[0..32]);
AsymmetricPublicKey::X25519(x25519_dalek::PublicKey::from(bytes))
}
ED25519_OID => {
let verifying_key = ed25519_dalek::VerifyingKey::try_from(spki)
- .map_err(|_| type_error("invalid Ed25519 private key"))?;
+ .map_err(|_| AsymmetricPublicKeyError::InvalidEd25519PublicKey)?;
AsymmetricPublicKey::Ed25519(verifying_key)
}
DH_KEY_AGREEMENT_OID => {
let params = spki
.algorithm
.parameters
- .ok_or_else(|| type_error("missing dh parameters"))?;
+ .ok_or(AsymmetricPublicKeyError::MissingDhParameters)?;
let params = pkcs3::DhParameter::from_der(&params.to_der().unwrap())
- .map_err(|_| type_error("malformed dh parameters"))?;
+ .map_err(|_| AsymmetricPublicKeyError::MalformedDhParameters)?;
let Some(subject_public_key) = spki.subject_public_key.as_bytes()
else {
- return Err(type_error("malformed or missing public key in dh spki"));
+ return Err(
+ AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInDhSpki,
+ );
};
AsymmetricPublicKey::Dh(DhPublicKey {
key: dh::PublicKey::from_bytes(subject_public_key),
params,
})
}
- _ => return Err(type_error("unsupported public key oid")),
+ _ => return Err(AsymmetricPublicKeyError::UnsupportedPrivateKeyOid),
};
Ok(KeyObjectHandle::AsymmetricPublic(public_key))
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum RsaPssParamsParseError {
+ #[error("malformed pss private key parameters")]
+ MalformedPssPrivateKeyParameters,
+ #[error("unsupported pss hash algorithm")]
+ UnsupportedPssHashAlgorithm,
+ #[error("unsupported pss mask gen algorithm")]
+ UnsupportedPssMaskGenAlgorithm,
+ #[error("malformed or missing pss mask gen algorithm parameters")]
+ MalformedOrMissingPssMaskGenAlgorithm,
+}
+
fn parse_rsa_pss_params(
parameters: Option<AnyRef<'_>>,
-) -> Result<Option<RsaPssDetails>, deno_core::anyhow::Error> {
+) -> Result<Option<RsaPssDetails>, RsaPssParamsParseError> {
let details = if let Some(parameters) = parameters {
let params = RsaPssParameters::try_from(parameters)
- .map_err(|_| type_error("malformed pss private key parameters"))?;
+ .map_err(|_| RsaPssParamsParseError::MalformedPssPrivateKeyParameters)?;
let hash_algorithm = match params.hash_algorithm.map(|k| k.oid) {
Some(ID_SHA1_OID) => RsaPssHashAlgorithm::Sha1,
@@ -881,16 +1060,16 @@ fn parse_rsa_pss_params(
Some(ID_SHA512_224_OID) => RsaPssHashAlgorithm::Sha512_224,
Some(ID_SHA512_256_OID) => RsaPssHashAlgorithm::Sha512_256,
None => RsaPssHashAlgorithm::Sha1,
- _ => return Err(type_error("unsupported pss hash algorithm")),
+ _ => return Err(RsaPssParamsParseError::UnsupportedPssHashAlgorithm),
};
let mf1_hash_algorithm = match params.mask_gen_algorithm {
Some(alg) => {
if alg.oid != ID_MFG1 {
- return Err(type_error("unsupported pss mask gen algorithm"));
+ return Err(RsaPssParamsParseError::UnsupportedPssMaskGenAlgorithm);
}
let params = alg.parameters_oid().map_err(|_| {
- type_error("malformed or missing pss mask gen algorithm parameters")
+ RsaPssParamsParseError::MalformedOrMissingPssMaskGenAlgorithm
})?;
match params {
ID_SHA1_OID => RsaPssHashAlgorithm::Sha1,
@@ -900,7 +1079,9 @@ fn parse_rsa_pss_params(
ID_SHA512_OID => RsaPssHashAlgorithm::Sha512,
ID_SHA512_224_OID => RsaPssHashAlgorithm::Sha512_224,
ID_SHA512_256_OID => RsaPssHashAlgorithm::Sha512_256,
- _ => return Err(type_error("unsupported pss mask gen algorithm")),
+ _ => {
+ return Err(RsaPssParamsParseError::UnsupportedPssMaskGenAlgorithm)
+ }
}
}
None => hash_algorithm,
@@ -921,14 +1102,49 @@ fn parse_rsa_pss_params(
Ok(details)
}
-use base64::prelude::BASE64_URL_SAFE_NO_PAD;
-
fn bytes_to_b64(bytes: &[u8]) -> String {
+ use base64::prelude::BASE64_URL_SAFE_NO_PAD;
BASE64_URL_SAFE_NO_PAD.encode(bytes)
}
+#[derive(Debug, thiserror::Error)]
+pub enum AsymmetricPublicKeyJwkError {
+ #[error("key is not an asymmetric public key")]
+ KeyIsNotAsymmetricPublicKey,
+ #[error("Unsupported JWK EC curve: P224")]
+ UnsupportedJwkEcCurveP224,
+ #[error("jwk export not implemented for this key type")]
+ JwkExportNotImplementedForKeyType,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum AsymmetricPublicKeyDerError {
+ #[error("key is not an asymmetric public key")]
+ KeyIsNotAsymmetricPublicKey,
+ #[error("invalid RSA public key")]
+ InvalidRsaPublicKey,
+ #[error("exporting non-RSA public key as PKCS#1 is not supported")]
+ ExportingNonRsaPublicKeyAsPkcs1Unsupported,
+ #[error("invalid EC public key")]
+ InvalidEcPublicKey,
+ #[error("exporting RSA-PSS public key as SPKI is not supported yet")]
+ ExportingNonRsaPssPublicKeyAsSpkiUnsupported,
+ #[error("invalid DSA public key")]
+ InvalidDsaPublicKey,
+ #[error("invalid X25519 public key")]
+ InvalidX25519PublicKey,
+ #[error("invalid Ed25519 public key")]
+ InvalidEd25519PublicKey,
+ #[error("invalid DH public key")]
+ InvalidDhPublicKey,
+ #[error("unsupported key type: {0}")]
+ UnsupportedKeyType(String),
+}
+
impl AsymmetricPublicKey {
- fn export_jwk(&self) -> Result<deno_core::serde_json::Value, AnyError> {
+ fn export_jwk(
+ &self,
+ ) -> Result<deno_core::serde_json::Value, AsymmetricPublicKeyJwkError> {
match self {
AsymmetricPublicKey::Ec(key) => {
let jwk = key.to_jwk()?;
@@ -974,40 +1190,39 @@ impl AsymmetricPublicKey {
});
Ok(jwk)
}
- _ => Err(type_error("jwk export not implemented for this key type")),
+ _ => Err(AsymmetricPublicKeyJwkError::JwkExportNotImplementedForKeyType),
}
}
- fn export_der(&self, typ: &str) -> Result<Box<[u8]>, AnyError> {
+ fn export_der(
+ &self,
+ typ: &str,
+ ) -> Result<Box<[u8]>, AsymmetricPublicKeyDerError> {
match typ {
"pkcs1" => match self {
AsymmetricPublicKey::Rsa(key) => {
let der = key
.to_pkcs1_der()
- .map_err(|_| type_error("invalid RSA public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidRsaPublicKey)?
.into_vec()
.into_boxed_slice();
Ok(der)
}
- _ => Err(type_error(
- "exporting non-RSA public key as PKCS#1 is not supported",
- )),
+ _ => Err(AsymmetricPublicKeyDerError::ExportingNonRsaPublicKeyAsPkcs1Unsupported),
},
"spki" => {
let der = match self {
AsymmetricPublicKey::Rsa(key) => key
.to_public_key_der()
- .map_err(|_| type_error("invalid RSA public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidRsaPublicKey)?
.into_vec()
.into_boxed_slice(),
AsymmetricPublicKey::RsaPss(_key) => {
- return Err(generic_error(
- "exporting RSA-PSS public key as SPKI is not supported yet",
- ))
+ return Err(AsymmetricPublicKeyDerError::ExportingNonRsaPssPublicKeyAsSpkiUnsupported)
}
AsymmetricPublicKey::Dsa(key) => key
.to_public_key_der()
- .map_err(|_| type_error("invalid DSA public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidDsaPublicKey)?
.into_vec()
.into_boxed_slice(),
AsymmetricPublicKey::Ec(key) => {
@@ -1023,12 +1238,12 @@ impl AsymmetricPublicKey {
parameters: Some(asn1::AnyRef::from(&oid)),
},
subject_public_key: BitStringRef::from_bytes(&sec1)
- .map_err(|_| type_error("invalid EC public key"))?,
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidEcPublicKey)?,
};
spki
.to_der()
- .map_err(|_| type_error("invalid EC public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidEcPublicKey)?
.into_boxed_slice()
}
AsymmetricPublicKey::X25519(key) => {
@@ -1038,12 +1253,12 @@ impl AsymmetricPublicKey {
parameters: None,
},
subject_public_key: BitStringRef::from_bytes(key.as_bytes())
- .map_err(|_| type_error("invalid X25519 public key"))?,
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidX25519PublicKey)?,
};
spki
.to_der()
- .map_err(|_| type_error("invalid X25519 public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidX25519PublicKey)?
.into_boxed_slice()
}
AsymmetricPublicKey::Ed25519(key) => {
@@ -1053,12 +1268,12 @@ impl AsymmetricPublicKey {
parameters: None,
},
subject_public_key: BitStringRef::from_bytes(key.as_bytes())
- .map_err(|_| type_error("invalid Ed25519 public key"))?,
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidEd25519PublicKey)?,
};
spki
.to_der()
- .map_err(|_| type_error("invalid Ed25519 public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidEd25519PublicKey)?
.into_boxed_slice()
}
AsymmetricPublicKey::Dh(key) => {
@@ -1071,43 +1286,67 @@ impl AsymmetricPublicKey {
},
subject_public_key: BitStringRef::from_bytes(&public_key_bytes)
.map_err(|_| {
- type_error("invalid DH public key")
+ AsymmetricPublicKeyDerError::InvalidDhPublicKey
})?,
};
spki
.to_der()
- .map_err(|_| type_error("invalid DH public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidDhPublicKey)?
.into_boxed_slice()
}
};
Ok(der)
}
- _ => Err(type_error(format!("unsupported key type: {}", typ))),
+ _ => Err(AsymmetricPublicKeyDerError::UnsupportedKeyType(typ.to_string())),
}
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum AsymmetricPrivateKeyDerError {
+ #[error("key is not an asymmetric private key")]
+ KeyIsNotAsymmetricPrivateKey,
+ #[error("invalid RSA private key")]
+ InvalidRsaPrivateKey,
+ #[error("exporting non-RSA private key as PKCS#1 is not supported")]
+ ExportingNonRsaPrivateKeyAsPkcs1Unsupported,
+ #[error("invalid EC private key")]
+ InvalidEcPrivateKey,
+ #[error("exporting non-EC private key as SEC1 is not supported")]
+ ExportingNonEcPrivateKeyAsSec1Unsupported,
+ #[error("exporting RSA-PSS private key as PKCS#8 is not supported yet")]
+ ExportingNonRsaPssPrivateKeyAsPkcs8Unsupported,
+ #[error("invalid DSA private key")]
+ InvalidDsaPrivateKey,
+ #[error("invalid X25519 private key")]
+ InvalidX25519PrivateKey,
+ #[error("invalid Ed25519 private key")]
+ InvalidEd25519PrivateKey,
+ #[error("invalid DH private key")]
+ InvalidDhPrivateKey,
+ #[error("unsupported key type: {0}")]
+ UnsupportedKeyType(String),
+}
+
impl AsymmetricPrivateKey {
fn export_der(
&self,
typ: &str,
// cipher: Option<&str>,
// passphrase: Option<&str>,
- ) -> Result<Box<[u8]>, AnyError> {
+ ) -> Result<Box<[u8]>, AsymmetricPrivateKeyDerError> {
match typ {
"pkcs1" => match self {
AsymmetricPrivateKey::Rsa(key) => {
let der = key
.to_pkcs1_der()
- .map_err(|_| type_error("invalid RSA private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidRsaPrivateKey)?
.to_bytes()
.to_vec()
.into_boxed_slice();
Ok(der)
}
- _ => Err(type_error(
- "exporting non-RSA private key as PKCS#1 is not supported",
- )),
+ _ => Err(AsymmetricPrivateKeyDerError::ExportingNonRsaPrivateKeyAsPkcs1Unsupported),
},
"sec1" => match self {
AsymmetricPrivateKey::Ec(key) => {
@@ -1116,30 +1355,26 @@ impl AsymmetricPrivateKey {
EcPrivateKey::P256(key) => key.to_sec1_der(),
EcPrivateKey::P384(key) => key.to_sec1_der(),
}
- .map_err(|_| type_error("invalid EC private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEcPrivateKey)?;
Ok(sec1.to_vec().into_boxed_slice())
}
- _ => Err(type_error(
- "exporting non-EC private key as SEC1 is not supported",
- )),
+ _ => Err(AsymmetricPrivateKeyDerError::ExportingNonEcPrivateKeyAsSec1Unsupported),
},
"pkcs8" => {
let der = match self {
AsymmetricPrivateKey::Rsa(key) => {
let document = key
.to_pkcs8_der()
- .map_err(|_| type_error("invalid RSA private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidRsaPrivateKey)?;
document.to_bytes().to_vec().into_boxed_slice()
}
AsymmetricPrivateKey::RsaPss(_key) => {
- return Err(generic_error(
- "exporting RSA-PSS private key as PKCS#8 is not supported yet",
- ))
+ return Err(AsymmetricPrivateKeyDerError::ExportingNonRsaPssPrivateKeyAsPkcs8Unsupported)
}
AsymmetricPrivateKey::Dsa(key) => {
let document = key
.to_pkcs8_der()
- .map_err(|_| type_error("invalid DSA private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidDsaPrivateKey)?;
document.to_bytes().to_vec().into_boxed_slice()
}
AsymmetricPrivateKey::Ec(key) => {
@@ -1148,14 +1383,14 @@ impl AsymmetricPrivateKey {
EcPrivateKey::P256(key) => key.to_pkcs8_der(),
EcPrivateKey::P384(key) => key.to_pkcs8_der(),
}
- .map_err(|_| type_error("invalid EC private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEcPrivateKey)?;
document.to_bytes().to_vec().into_boxed_slice()
}
AsymmetricPrivateKey::X25519(key) => {
let private_key = OctetStringRef::new(key.as_bytes())
- .map_err(|_| type_error("invalid X25519 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidX25519PrivateKey)?
.to_der()
- .map_err(|_| type_error("invalid X25519 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidX25519PrivateKey)?;
let private_key = PrivateKeyInfo {
algorithm: rsa::pkcs8::AlgorithmIdentifierRef {
@@ -1168,15 +1403,15 @@ impl AsymmetricPrivateKey {
let der = private_key
.to_der()
- .map_err(|_| type_error("invalid X25519 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidX25519PrivateKey)?
.into_boxed_slice();
return Ok(der);
}
AsymmetricPrivateKey::Ed25519(key) => {
let private_key = OctetStringRef::new(key.as_bytes())
- .map_err(|_| type_error("invalid Ed25519 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEd25519PrivateKey)?
.to_der()
- .map_err(|_| type_error("invalid Ed25519 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEd25519PrivateKey)?;
let private_key = PrivateKeyInfo {
algorithm: rsa::pkcs8::AlgorithmIdentifierRef {
@@ -1189,7 +1424,7 @@ impl AsymmetricPrivateKey {
private_key
.to_der()
- .map_err(|_| type_error("invalid ED25519 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEd25519PrivateKey)?
.into_boxed_slice()
}
AsymmetricPrivateKey::Dh(key) => {
@@ -1206,14 +1441,14 @@ impl AsymmetricPrivateKey {
private_key
.to_der()
- .map_err(|_| type_error("invalid DH private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidDhPrivateKey)?
.into_boxed_slice()
}
};
Ok(der)
}
- _ => Err(type_error(format!("unsupported key type: {}", typ))),
+ _ => Err(AsymmetricPrivateKeyDerError::UnsupportedKeyType(typ.to_string())),
}
}
}
@@ -1225,7 +1460,7 @@ pub fn op_node_create_private_key(
#[string] format: &str,
#[string] typ: &str,
#[buffer] passphrase: Option<&[u8]>,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, AsymmetricPrivateKeyError> {
KeyObjectHandle::new_asymmetric_private_key_from_js(
key, format, typ, passphrase,
)
@@ -1237,7 +1472,7 @@ pub fn op_node_create_ed_raw(
#[string] curve: &str,
#[buffer] key: &[u8],
is_public: bool,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, EdRawError> {
KeyObjectHandle::new_ed_raw(curve, key, is_public)
}
@@ -1255,16 +1490,16 @@ pub struct RsaJwkKey {
pub fn op_node_create_rsa_jwk(
#[serde] jwk: RsaJwkKey,
is_public: bool,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, RsaJwkError> {
KeyObjectHandle::new_rsa_jwk(jwk, is_public)
}
#[op2]
#[cppgc]
pub fn op_node_create_ec_jwk(
- #[serde] jwk: elliptic_curve::JwkEcKey,
+ #[serde] jwk: JwkEcKey,
is_public: bool,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, EcJwkError> {
KeyObjectHandle::new_ec_jwk(&jwk, is_public)
}
@@ -1275,7 +1510,7 @@ pub fn op_node_create_public_key(
#[string] format: &str,
#[string] typ: &str,
#[buffer] passphrase: Option<&[u8]>,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, AsymmetricPublicKeyError> {
KeyObjectHandle::new_asymmetric_public_key_from_js(
key, format, typ, passphrase,
)
@@ -1293,7 +1528,7 @@ pub fn op_node_create_secret_key(
#[string]
pub fn op_node_get_asymmetric_key_type(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<&'static str, AnyError> {
+) -> Result<&'static str, deno_core::error::AnyError> {
match handle {
KeyObjectHandle::AsymmetricPrivate(AsymmetricPrivateKey::Rsa(_))
| KeyObjectHandle::AsymmetricPublic(AsymmetricPublicKey::Rsa(_)) => {
@@ -1364,7 +1599,7 @@ pub enum AsymmetricKeyDetails {
#[serde]
pub fn op_node_get_asymmetric_key_details(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<AsymmetricKeyDetails, AnyError> {
+) -> Result<AsymmetricKeyDetails, deno_core::error::AnyError> {
match handle {
KeyObjectHandle::AsymmetricPrivate(private_key) => match private_key {
AsymmetricPrivateKey::Rsa(key) => {
@@ -1482,12 +1717,10 @@ pub fn op_node_get_asymmetric_key_details(
#[smi]
pub fn op_node_get_symmetric_key_size(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<usize, AnyError> {
+) -> Result<usize, deno_core::error::AnyError> {
match handle {
- KeyObjectHandle::AsymmetricPrivate(_) => {
- Err(type_error("asymmetric key is not a symmetric key"))
- }
- KeyObjectHandle::AsymmetricPublic(_) => {
+ KeyObjectHandle::AsymmetricPrivate(_)
+ | KeyObjectHandle::AsymmetricPublic(_) => {
Err(type_error("asymmetric key is not a symmetric key"))
}
KeyObjectHandle::Secret(key) => Ok(key.len() * 8),
@@ -1592,13 +1825,17 @@ pub async fn op_node_generate_rsa_key_async(
.unwrap()
}
+#[derive(Debug, thiserror::Error)]
+#[error("digest not allowed for RSA-PSS keys{}", .0.as_ref().map(|digest| format!(": {digest}")).unwrap_or_default())]
+pub struct GenerateRsaPssError(Option<String>);
+
fn generate_rsa_pss(
modulus_length: usize,
public_exponent: usize,
hash_algorithm: Option<&str>,
mf1_hash_algorithm: Option<&str>,
salt_length: Option<u32>,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, GenerateRsaPssError> {
let key = RsaPrivateKey::new_with_exp(
&mut thread_rng(),
modulus_length,
@@ -1617,25 +1854,19 @@ fn generate_rsa_pss(
let hash_algorithm = match_fixed_digest_with_oid!(
hash_algorithm,
fn (algorithm: Option<RsaPssHashAlgorithm>) {
- algorithm.ok_or_else(|| type_error("digest not allowed for RSA-PSS keys: {}"))?
+ algorithm.ok_or(GenerateRsaPssError(None))?
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA-PSS keys: {}",
- hash_algorithm
- )))
+ return Err(GenerateRsaPssError(Some(hash_algorithm.to_string())))
}
);
let mf1_hash_algorithm = match_fixed_digest_with_oid!(
mf1_hash_algorithm,
fn (algorithm: Option<RsaPssHashAlgorithm>) {
- algorithm.ok_or_else(|| type_error("digest not allowed for RSA-PSS keys: {}"))?
+ algorithm.ok_or(GenerateRsaPssError(None))?
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA-PSS keys: {}",
- mf1_hash_algorithm
- )))
+ return Err(GenerateRsaPssError(Some(mf1_hash_algorithm.to_string())))
}
);
let salt_length =
@@ -1663,7 +1894,7 @@ pub fn op_node_generate_rsa_pss_key(
#[string] hash_algorithm: Option<String>, // todo: Option<&str> not supproted in ops yet
#[string] mf1_hash_algorithm: Option<String>, // todo: Option<&str> not supproted in ops yet
#[smi] salt_length: Option<u32>,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, GenerateRsaPssError> {
generate_rsa_pss(
modulus_length,
public_exponent,
@@ -1681,7 +1912,7 @@ pub async fn op_node_generate_rsa_pss_key_async(
#[string] hash_algorithm: Option<String>, // todo: Option<&str> not supproted in ops yet
#[string] mf1_hash_algorithm: Option<String>, // todo: Option<&str> not supproted in ops yet
#[smi] salt_length: Option<u32>,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, GenerateRsaPssError> {
spawn_blocking(move || {
generate_rsa_pss(
modulus_length,
@@ -1698,7 +1929,7 @@ pub async fn op_node_generate_rsa_pss_key_async(
fn dsa_generate(
modulus_length: usize,
divisor_length: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
let mut rng = rand::thread_rng();
use dsa::Components;
use dsa::KeySize;
@@ -1729,7 +1960,7 @@ fn dsa_generate(
pub fn op_node_generate_dsa_key(
#[smi] modulus_length: usize,
#[smi] divisor_length: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
dsa_generate(modulus_length, divisor_length)
}
@@ -1738,13 +1969,15 @@ pub fn op_node_generate_dsa_key(
pub async fn op_node_generate_dsa_key_async(
#[smi] modulus_length: usize,
#[smi] divisor_length: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
spawn_blocking(move || dsa_generate(modulus_length, divisor_length))
.await
.unwrap()
}
-fn ec_generate(named_curve: &str) -> Result<KeyObjectHandlePair, AnyError> {
+fn ec_generate(
+ named_curve: &str,
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
let mut rng = rand::thread_rng();
// TODO(@littledivy): Support public key point encoding.
// Default is uncompressed.
@@ -1776,7 +2009,7 @@ fn ec_generate(named_curve: &str) -> Result<KeyObjectHandlePair, AnyError> {
#[cppgc]
pub fn op_node_generate_ec_key(
#[string] named_curve: &str,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
ec_generate(named_curve)
}
@@ -1784,7 +2017,7 @@ pub fn op_node_generate_ec_key(
#[cppgc]
pub async fn op_node_generate_ec_key_async(
#[string] named_curve: String,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
spawn_blocking(move || ec_generate(&named_curve))
.await
.unwrap()
@@ -1840,7 +2073,7 @@ fn u32_slice_to_u8_slice(slice: &[u32]) -> &[u8] {
fn dh_group_generate(
group_name: &str,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
let (dh, prime, generator) = match group_name {
"modp5" => (
dh::DiffieHellman::group::<dh::Modp1536>(),
@@ -1895,7 +2128,7 @@ fn dh_group_generate(
#[cppgc]
pub fn op_node_generate_dh_group_key(
#[string] group_name: &str,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
dh_group_generate(group_name)
}
@@ -1903,7 +2136,7 @@ pub fn op_node_generate_dh_group_key(
#[cppgc]
pub async fn op_node_generate_dh_group_key_async(
#[string] group_name: String,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
spawn_blocking(move || dh_group_generate(&group_name))
.await
.unwrap()
@@ -1913,7 +2146,7 @@ fn dh_generate(
prime: Option<&[u8]>,
prime_len: usize,
generator: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> KeyObjectHandlePair {
let prime = prime
.map(|p| p.into())
.unwrap_or_else(|| Prime::generate(prime_len));
@@ -1923,7 +2156,7 @@ fn dh_generate(
base: asn1::Int::new(generator.to_be_bytes().as_slice()).unwrap(),
private_value_length: None,
};
- Ok(KeyObjectHandlePair::new(
+ KeyObjectHandlePair::new(
AsymmetricPrivateKey::Dh(DhPrivateKey {
key: dh.private_key,
params: params.clone(),
@@ -1932,7 +2165,7 @@ fn dh_generate(
key: dh.public_key,
params,
}),
- ))
+ )
}
#[op2]
@@ -1941,7 +2174,7 @@ pub fn op_node_generate_dh_key(
#[buffer] prime: Option<&[u8]>,
#[smi] prime_len: usize,
#[smi] generator: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> KeyObjectHandlePair {
dh_generate(prime, prime_len, generator)
}
@@ -1951,7 +2184,7 @@ pub async fn op_node_generate_dh_key_async(
#[buffer(copy)] prime: Option<Box<[u8]>>,
#[smi] prime_len: usize,
#[smi] generator: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> KeyObjectHandlePair {
spawn_blocking(move || dh_generate(prime.as_deref(), prime_len, generator))
.await
.unwrap()
@@ -1963,21 +2196,21 @@ pub fn op_node_dh_keys_generate_and_export(
#[buffer] prime: Option<&[u8]>,
#[smi] prime_len: usize,
#[smi] generator: usize,
-) -> Result<(ToJsBuffer, ToJsBuffer), AnyError> {
+) -> (ToJsBuffer, ToJsBuffer) {
let prime = prime
.map(|p| p.into())
.unwrap_or_else(|| Prime::generate(prime_len));
let dh = dh::DiffieHellman::new(prime, generator);
let private_key = dh.private_key.into_vec().into_boxed_slice();
let public_key = dh.public_key.into_vec().into_boxed_slice();
- Ok((private_key.into(), public_key.into()))
+ (private_key.into(), public_key.into())
}
#[op2]
#[buffer]
pub fn op_node_export_secret_key(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<Box<[u8]>, AnyError> {
+) -> Result<Box<[u8]>, deno_core::error::AnyError> {
let key = handle
.as_secret_key()
.ok_or_else(|| type_error("key is not a secret key"))?;
@@ -1988,7 +2221,7 @@ pub fn op_node_export_secret_key(
#[string]
pub fn op_node_export_secret_key_b64url(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<String, AnyError> {
+) -> Result<String, deno_core::error::AnyError> {
let key = handle
.as_secret_key()
.ok_or_else(|| type_error("key is not a secret key"))?;
@@ -1999,23 +2232,33 @@ pub fn op_node_export_secret_key_b64url(
#[serde]
pub fn op_node_export_public_key_jwk(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<deno_core::serde_json::Value, AnyError> {
+) -> Result<deno_core::serde_json::Value, AsymmetricPublicKeyJwkError> {
let public_key = handle
.as_public_key()
- .ok_or_else(|| type_error("key is not an asymmetric public key"))?;
+ .ok_or(AsymmetricPublicKeyJwkError::KeyIsNotAsymmetricPublicKey)?;
public_key.export_jwk()
}
+#[derive(Debug, thiserror::Error)]
+pub enum ExportPublicKeyPemError {
+ #[error(transparent)]
+ AsymmetricPublicKeyDer(#[from] AsymmetricPublicKeyDerError),
+ #[error("very large data")]
+ VeryLargeData,
+ #[error(transparent)]
+ Der(#[from] der::Error),
+}
+
#[op2]
#[string]
pub fn op_node_export_public_key_pem(
#[cppgc] handle: &KeyObjectHandle,
#[string] typ: &str,
-) -> Result<String, AnyError> {
+) -> Result<String, ExportPublicKeyPemError> {
let public_key = handle
.as_public_key()
- .ok_or_else(|| type_error("key is not an asymmetric public key"))?;
+ .ok_or(AsymmetricPublicKeyDerError::KeyIsNotAsymmetricPublicKey)?;
let data = public_key.export_der(typ)?;
let label = match typ {
@@ -2024,7 +2267,9 @@ pub fn op_node_export_public_key_pem(
_ => unreachable!("export_der would have errored"),
};
- let mut out = vec![0; 2048];
+ let pem_len = der::pem::encapsulated_len(label, LineEnding::LF, data.len())
+ .map_err(|_| ExportPublicKeyPemError::VeryLargeData)?;
+ let mut out = vec![0; pem_len];
let mut writer = PemWriter::new(label, LineEnding::LF, &mut out)?;
writer.write(&data)?;
let len = writer.finish()?;
@@ -2038,22 +2283,32 @@ pub fn op_node_export_public_key_pem(
pub fn op_node_export_public_key_der(
#[cppgc] handle: &KeyObjectHandle,
#[string] typ: &str,
-) -> Result<Box<[u8]>, AnyError> {
+) -> Result<Box<[u8]>, AsymmetricPublicKeyDerError> {
let public_key = handle
.as_public_key()
- .ok_or_else(|| type_error("key is not an asymmetric public key"))?;
+ .ok_or(AsymmetricPublicKeyDerError::KeyIsNotAsymmetricPublicKey)?;
public_key.export_der(typ)
}
+#[derive(Debug, thiserror::Error)]
+pub enum ExportPrivateKeyPemError {
+ #[error(transparent)]
+ AsymmetricPublicKeyDer(#[from] AsymmetricPrivateKeyDerError),
+ #[error("very large data")]
+ VeryLargeData,
+ #[error(transparent)]
+ Der(#[from] der::Error),
+}
+
#[op2]
#[string]
pub fn op_node_export_private_key_pem(
#[cppgc] handle: &KeyObjectHandle,
#[string] typ: &str,
-) -> Result<String, AnyError> {
+) -> Result<String, ExportPrivateKeyPemError> {
let private_key = handle
.as_private_key()
- .ok_or_else(|| type_error("key is not an asymmetric private key"))?;
+ .ok_or(AsymmetricPrivateKeyDerError::KeyIsNotAsymmetricPrivateKey)?;
let data = private_key.export_der(typ)?;
let label = match typ {
@@ -2063,7 +2318,9 @@ pub fn op_node_export_private_key_pem(
_ => unreachable!("export_der would have errored"),
};
- let mut out = vec![0; 2048];
+ let pem_len = der::pem::encapsulated_len(label, LineEnding::LF, data.len())
+ .map_err(|_| ExportPrivateKeyPemError::VeryLargeData)?;
+ let mut out = vec![0; pem_len];
let mut writer = PemWriter::new(label, LineEnding::LF, &mut out)?;
writer.write(&data)?;
let len = writer.finish()?;
@@ -2077,10 +2334,10 @@ pub fn op_node_export_private_key_pem(
pub fn op_node_export_private_key_der(
#[cppgc] handle: &KeyObjectHandle,
#[string] typ: &str,
-) -> Result<Box<[u8]>, AnyError> {
+) -> Result<Box<[u8]>, AsymmetricPrivateKeyDerError> {
let private_key = handle
.as_private_key()
- .ok_or_else(|| type_error("key is not an asymmetric private key"))?;
+ .ok_or(AsymmetricPrivateKeyDerError::KeyIsNotAsymmetricPrivateKey)?;
private_key.export_der(typ)
}
@@ -2098,7 +2355,7 @@ pub fn op_node_key_type(#[cppgc] handle: &KeyObjectHandle) -> &'static str {
#[cppgc]
pub fn op_node_derive_public_key_from_private_key(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, deno_core::error::AnyError> {
let Some(private_key) = handle.as_private_key() else {
return Err(type_error("expected private key"));
};
diff --git a/ext/node/ops/crypto/mod.rs b/ext/node/ops/crypto/mod.rs
index 600d31558..e90e82090 100644
--- a/ext/node/ops/crypto/mod.rs
+++ b/ext/node/ops/crypto/mod.rs
@@ -1,7 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_core::error::generic_error;
use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::unsync::spawn_blocking;
use deno_core::JsBuffer;
@@ -34,14 +33,14 @@ use rsa::Pkcs1v15Encrypt;
use rsa::RsaPrivateKey;
use rsa::RsaPublicKey;
-mod cipher;
+pub mod cipher;
mod dh;
-mod digest;
+pub mod digest;
pub mod keys;
mod md5_sha1;
mod pkcs3;
mod primes;
-mod sign;
+pub mod sign;
pub mod x509;
use self::digest::match_fixed_digest_with_eager_block_buffer;
@@ -58,38 +57,31 @@ pub fn op_node_check_prime(
pub fn op_node_check_prime_bytes(
#[anybuffer] bytes: &[u8],
#[number] checks: usize,
-) -> Result<bool, AnyError> {
+) -> bool {
let candidate = BigInt::from_bytes_be(num_bigint::Sign::Plus, bytes);
- Ok(primes::is_probably_prime(&candidate, checks))
+ primes::is_probably_prime(&candidate, checks)
}
#[op2(async)]
pub async fn op_node_check_prime_async(
#[bigint] num: i64,
#[number] checks: usize,
-) -> Result<bool, AnyError> {
+) -> Result<bool, tokio::task::JoinError> {
// TODO(@littledivy): use rayon for CPU-bound tasks
- Ok(
- spawn_blocking(move || {
- primes::is_probably_prime(&BigInt::from(num), checks)
- })
- .await?,
- )
+ spawn_blocking(move || primes::is_probably_prime(&BigInt::from(num), checks))
+ .await
}
#[op2(async)]
pub fn op_node_check_prime_bytes_async(
#[anybuffer] bytes: &[u8],
#[number] checks: usize,
-) -> Result<impl Future<Output = Result<bool, AnyError>>, AnyError> {
+) -> impl Future<Output = Result<bool, tokio::task::JoinError>> {
let candidate = BigInt::from_bytes_be(num_bigint::Sign::Plus, bytes);
// TODO(@littledivy): use rayon for CPU-bound tasks
- Ok(async move {
- Ok(
- spawn_blocking(move || primes::is_probably_prime(&candidate, checks))
- .await?,
- )
- })
+ async move {
+ spawn_blocking(move || primes::is_probably_prime(&candidate, checks)).await
+ }
}
#[op2]
@@ -97,7 +89,7 @@ pub fn op_node_check_prime_bytes_async(
pub fn op_node_create_hash(
#[string] algorithm: &str,
output_length: Option<u32>,
-) -> Result<digest::Hasher, AnyError> {
+) -> Result<digest::Hasher, digest::HashError> {
digest::Hasher::new(algorithm, output_length.map(|l| l as usize))
}
@@ -145,17 +137,31 @@ pub fn op_node_hash_digest_hex(
pub fn op_node_hash_clone(
#[cppgc] hasher: &digest::Hasher,
output_length: Option<u32>,
-) -> Result<Option<digest::Hasher>, AnyError> {
+) -> Result<Option<digest::Hasher>, digest::HashError> {
hasher.clone_inner(output_length.map(|l| l as usize))
}
+#[derive(Debug, thiserror::Error)]
+pub enum PrivateEncryptDecryptError {
+ #[error(transparent)]
+ Pkcs8(#[from] pkcs8::Error),
+ #[error(transparent)]
+ Spki(#[from] spki::Error),
+ #[error(transparent)]
+ Utf8(#[from] std::str::Utf8Error),
+ #[error(transparent)]
+ Rsa(#[from] rsa::Error),
+ #[error("Unknown padding")]
+ UnknownPadding,
+}
+
#[op2]
#[serde]
pub fn op_node_private_encrypt(
#[serde] key: StringOrBuffer,
#[serde] msg: StringOrBuffer,
#[smi] padding: u32,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, PrivateEncryptDecryptError> {
let key = RsaPrivateKey::from_pkcs8_pem((&key).try_into()?)?;
let mut rng = rand::thread_rng();
@@ -172,7 +178,7 @@ pub fn op_node_private_encrypt(
.encrypt(&mut rng, Oaep::new::<sha1::Sha1>(), &msg)?
.into(),
),
- _ => Err(type_error("Unknown padding")),
+ _ => Err(PrivateEncryptDecryptError::UnknownPadding),
}
}
@@ -182,13 +188,13 @@ pub fn op_node_private_decrypt(
#[serde] key: StringOrBuffer,
#[serde] msg: StringOrBuffer,
#[smi] padding: u32,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, PrivateEncryptDecryptError> {
let key = RsaPrivateKey::from_pkcs8_pem((&key).try_into()?)?;
match padding {
1 => Ok(key.decrypt(Pkcs1v15Encrypt, &msg)?.into()),
4 => Ok(key.decrypt(Oaep::new::<sha1::Sha1>(), &msg)?.into()),
- _ => Err(type_error("Unknown padding")),
+ _ => Err(PrivateEncryptDecryptError::UnknownPadding),
}
}
@@ -198,7 +204,7 @@ pub fn op_node_public_encrypt(
#[serde] key: StringOrBuffer,
#[serde] msg: StringOrBuffer,
#[smi] padding: u32,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, PrivateEncryptDecryptError> {
let key = RsaPublicKey::from_public_key_pem((&key).try_into()?)?;
let mut rng = rand::thread_rng();
@@ -209,7 +215,7 @@ pub fn op_node_public_encrypt(
.encrypt(&mut rng, Oaep::new::<sha1::Sha1>(), &msg)?
.into(),
),
- _ => Err(type_error("Unknown padding")),
+ _ => Err(PrivateEncryptDecryptError::UnknownPadding),
}
}
@@ -220,7 +226,7 @@ pub fn op_node_create_cipheriv(
#[string] algorithm: &str,
#[buffer] key: &[u8],
#[buffer] iv: &[u8],
-) -> Result<u32, AnyError> {
+) -> Result<u32, cipher::CipherContextError> {
let context = cipher::CipherContext::new(algorithm, key, iv)?;
Ok(state.resource_table.add(context))
}
@@ -262,11 +268,14 @@ pub fn op_node_cipheriv_final(
auto_pad: bool,
#[buffer] input: &[u8],
#[anybuffer] output: &mut [u8],
-) -> Result<Option<Vec<u8>>, AnyError> {
- let context = state.resource_table.take::<cipher::CipherContext>(rid)?;
+) -> Result<Option<Vec<u8>>, cipher::CipherContextError> {
+ let context = state
+ .resource_table
+ .take::<cipher::CipherContext>(rid)
+ .map_err(cipher::CipherContextError::Resource)?;
let context = Rc::try_unwrap(context)
- .map_err(|_| type_error("Cipher context is already in use"))?;
- context.r#final(auto_pad, input, output)
+ .map_err(|_| cipher::CipherContextError::ContextInUse)?;
+ context.r#final(auto_pad, input, output).map_err(Into::into)
}
#[op2]
@@ -274,10 +283,13 @@ pub fn op_node_cipheriv_final(
pub fn op_node_cipheriv_take(
state: &mut OpState,
#[smi] rid: u32,
-) -> Result<Option<Vec<u8>>, AnyError> {
- let context = state.resource_table.take::<cipher::CipherContext>(rid)?;
+) -> Result<Option<Vec<u8>>, cipher::CipherContextError> {
+ let context = state
+ .resource_table
+ .take::<cipher::CipherContext>(rid)
+ .map_err(cipher::CipherContextError::Resource)?;
let context = Rc::try_unwrap(context)
- .map_err(|_| type_error("Cipher context is already in use"))?;
+ .map_err(|_| cipher::CipherContextError::ContextInUse)?;
Ok(context.take_tag())
}
@@ -288,7 +300,7 @@ pub fn op_node_create_decipheriv(
#[string] algorithm: &str,
#[buffer] key: &[u8],
#[buffer] iv: &[u8],
-) -> Result<u32, AnyError> {
+) -> Result<u32, cipher::DecipherContextError> {
let context = cipher::DecipherContext::new(algorithm, key, iv)?;
Ok(state.resource_table.add(context))
}
@@ -326,10 +338,13 @@ pub fn op_node_decipheriv_decrypt(
pub fn op_node_decipheriv_take(
state: &mut OpState,
#[smi] rid: u32,
-) -> Result<(), AnyError> {
- let context = state.resource_table.take::<cipher::DecipherContext>(rid)?;
+) -> Result<(), cipher::DecipherContextError> {
+ let context = state
+ .resource_table
+ .take::<cipher::DecipherContext>(rid)
+ .map_err(cipher::DecipherContextError::Resource)?;
Rc::try_unwrap(context)
- .map_err(|_| type_error("Cipher context is already in use"))?;
+ .map_err(|_| cipher::DecipherContextError::ContextInUse)?;
Ok(())
}
@@ -341,11 +356,16 @@ pub fn op_node_decipheriv_final(
#[buffer] input: &[u8],
#[anybuffer] output: &mut [u8],
#[buffer] auth_tag: &[u8],
-) -> Result<(), AnyError> {
- let context = state.resource_table.take::<cipher::DecipherContext>(rid)?;
+) -> Result<(), cipher::DecipherContextError> {
+ let context = state
+ .resource_table
+ .take::<cipher::DecipherContext>(rid)
+ .map_err(cipher::DecipherContextError::Resource)?;
let context = Rc::try_unwrap(context)
- .map_err(|_| type_error("Cipher context is already in use"))?;
- context.r#final(auto_pad, input, output, auth_tag)
+ .map_err(|_| cipher::DecipherContextError::ContextInUse)?;
+ context
+ .r#final(auto_pad, input, output, auth_tag)
+ .map_err(Into::into)
}
#[op2]
@@ -356,7 +376,7 @@ pub fn op_node_sign(
#[string] digest_type: &str,
#[smi] pss_salt_length: Option<u32>,
#[smi] dsa_signature_encoding: u32,
-) -> Result<Box<[u8]>, AnyError> {
+) -> Result<Box<[u8]>, sign::KeyObjectHandlePrehashedSignAndVerifyError> {
handle.sign_prehashed(
digest_type,
digest,
@@ -373,7 +393,7 @@ pub fn op_node_verify(
#[buffer] signature: &[u8],
#[smi] pss_salt_length: Option<u32>,
#[smi] dsa_signature_encoding: u32,
-) -> Result<bool, AnyError> {
+) -> Result<bool, sign::KeyObjectHandlePrehashedSignAndVerifyError> {
handle.verify_prehashed(
digest_type,
digest,
@@ -383,13 +403,21 @@ pub fn op_node_verify(
)
}
+#[derive(Debug, thiserror::Error)]
+pub enum Pbkdf2Error {
+ #[error("unsupported digest: {0}")]
+ UnsupportedDigest(String),
+ #[error(transparent)]
+ Join(#[from] tokio::task::JoinError),
+}
+
fn pbkdf2_sync(
password: &[u8],
salt: &[u8],
iterations: u32,
algorithm_name: &str,
derived_key: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), Pbkdf2Error> {
match_fixed_digest_with_eager_block_buffer!(
algorithm_name,
fn <D>() {
@@ -397,10 +425,7 @@ fn pbkdf2_sync(
Ok(())
},
_ => {
- Err(type_error(format!(
- "unsupported digest: {}",
- algorithm_name
- )))
+ Err(Pbkdf2Error::UnsupportedDigest(algorithm_name.to_string()))
}
)
}
@@ -424,7 +449,7 @@ pub async fn op_node_pbkdf2_async(
#[smi] iterations: u32,
#[string] digest: String,
#[number] keylen: usize,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, Pbkdf2Error> {
spawn_blocking(move || {
let mut derived_key = vec![0; keylen];
pbkdf2_sync(&password, &salt, iterations, &digest, &mut derived_key)
@@ -450,15 +475,27 @@ pub async fn op_node_fill_random_async(#[smi] len: i32) -> ToJsBuffer {
.unwrap()
}
+#[derive(Debug, thiserror::Error)]
+pub enum HkdfError {
+ #[error("expected secret key")]
+ ExpectedSecretKey,
+ #[error("HKDF-Expand failed")]
+ HkdfExpandFailed,
+ #[error("Unsupported digest: {0}")]
+ UnsupportedDigest(String),
+ #[error(transparent)]
+ Join(#[from] tokio::task::JoinError),
+}
+
fn hkdf_sync(
digest_algorithm: &str,
handle: &KeyObjectHandle,
salt: &[u8],
info: &[u8],
okm: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), HkdfError> {
let Some(ikm) = handle.as_secret_key() else {
- return Err(type_error("expected secret key"));
+ return Err(HkdfError::ExpectedSecretKey);
};
match_fixed_digest_with_eager_block_buffer!(
@@ -466,10 +503,10 @@ fn hkdf_sync(
fn <D>() {
let hk = Hkdf::<D>::new(Some(salt), ikm);
hk.expand(info, okm)
- .map_err(|_| type_error("HKDF-Expand failed"))
+ .map_err(|_| HkdfError::HkdfExpandFailed)
},
_ => {
- Err(type_error(format!("Unsupported digest: {}", digest_algorithm)))
+ Err(HkdfError::UnsupportedDigest(digest_algorithm.to_string()))
}
)
}
@@ -481,7 +518,7 @@ pub fn op_node_hkdf(
#[buffer] salt: &[u8],
#[buffer] info: &[u8],
#[buffer] okm: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), HkdfError> {
hkdf_sync(digest_algorithm, handle, salt, info, okm)
}
@@ -493,7 +530,7 @@ pub async fn op_node_hkdf_async(
#[buffer] salt: JsBuffer,
#[buffer] info: JsBuffer,
#[number] okm_len: usize,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, HkdfError> {
let handle = handle.clone();
spawn_blocking(move || {
let mut okm = vec![0u8; okm_len];
@@ -509,27 +546,24 @@ pub fn op_node_dh_compute_secret(
#[buffer] prime: JsBuffer,
#[buffer] private_key: JsBuffer,
#[buffer] their_public_key: JsBuffer,
-) -> Result<ToJsBuffer, AnyError> {
+) -> ToJsBuffer {
let pubkey: BigUint = BigUint::from_bytes_be(their_public_key.as_ref());
let privkey: BigUint = BigUint::from_bytes_be(private_key.as_ref());
let primei: BigUint = BigUint::from_bytes_be(prime.as_ref());
let shared_secret: BigUint = pubkey.modpow(&privkey, &primei);
- Ok(shared_secret.to_bytes_be().into())
+ shared_secret.to_bytes_be().into()
}
#[op2(fast)]
-#[smi]
-pub fn op_node_random_int(
- #[smi] min: i32,
- #[smi] max: i32,
-) -> Result<i32, AnyError> {
+#[number]
+pub fn op_node_random_int(#[number] min: i64, #[number] max: i64) -> i64 {
let mut rng = rand::thread_rng();
// Uniform distribution is required to avoid Modulo Bias
// https://en.wikipedia.org/wiki/Fisher–Yates_shuffle#Modulo_bias
let dist = Uniform::from(min..max);
- Ok(dist.sample(&mut rng))
+ dist.sample(&mut rng)
}
#[allow(clippy::too_many_arguments)]
@@ -542,7 +576,7 @@ fn scrypt(
parallelization: u32,
_maxmem: u32,
output_buffer: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), deno_core::error::AnyError> {
// Construct Params
let params = scrypt::Params::new(
cost as u8,
@@ -573,7 +607,7 @@ pub fn op_node_scrypt_sync(
#[smi] parallelization: u32,
#[smi] maxmem: u32,
#[anybuffer] output_buffer: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), deno_core::error::AnyError> {
scrypt(
password,
salt,
@@ -586,6 +620,14 @@ pub fn op_node_scrypt_sync(
)
}
+#[derive(Debug, thiserror::Error)]
+pub enum ScryptAsyncError {
+ #[error(transparent)]
+ Join(#[from] tokio::task::JoinError),
+ #[error(transparent)]
+ Other(deno_core::error::AnyError),
+}
+
#[op2(async)]
#[serde]
pub async fn op_node_scrypt_async(
@@ -596,10 +638,11 @@ pub async fn op_node_scrypt_async(
#[smi] block_size: u32,
#[smi] parallelization: u32,
#[smi] maxmem: u32,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, ScryptAsyncError> {
spawn_blocking(move || {
let mut output_buffer = vec![0u8; keylen as usize];
- let res = scrypt(
+
+ scrypt(
password,
salt,
keylen,
@@ -608,25 +651,30 @@ pub async fn op_node_scrypt_async(
parallelization,
maxmem,
&mut output_buffer,
- );
-
- if res.is_ok() {
- Ok(output_buffer.into())
- } else {
- // TODO(lev): rethrow the error?
- Err(generic_error("scrypt failure"))
- }
+ )
+ .map(|_| output_buffer.into())
+ .map_err(ScryptAsyncError::Other)
})
.await?
}
+#[derive(Debug, thiserror::Error)]
+pub enum EcdhEncodePubKey {
+ #[error("Invalid public key")]
+ InvalidPublicKey,
+ #[error("Unsupported curve")]
+ UnsupportedCurve,
+ #[error(transparent)]
+ Sec1(#[from] sec1::Error),
+}
+
#[op2]
#[buffer]
pub fn op_node_ecdh_encode_pubkey(
#[string] curve: &str,
#[buffer] pubkey: &[u8],
compress: bool,
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, EcdhEncodePubKey> {
use elliptic_curve::sec1::FromEncodedPoint;
match curve {
@@ -639,7 +687,7 @@ pub fn op_node_ecdh_encode_pubkey(
);
// CtOption does not expose its variants.
if pubkey.is_none().into() {
- return Err(type_error("Invalid public key"));
+ return Err(EcdhEncodePubKey::InvalidPublicKey);
}
let pubkey = pubkey.unwrap();
@@ -652,7 +700,7 @@ pub fn op_node_ecdh_encode_pubkey(
);
// CtOption does not expose its variants.
if pubkey.is_none().into() {
- return Err(type_error("Invalid public key"));
+ return Err(EcdhEncodePubKey::InvalidPublicKey);
}
let pubkey = pubkey.unwrap();
@@ -665,7 +713,7 @@ pub fn op_node_ecdh_encode_pubkey(
);
// CtOption does not expose its variants.
if pubkey.is_none().into() {
- return Err(type_error("Invalid public key"));
+ return Err(EcdhEncodePubKey::InvalidPublicKey);
}
let pubkey = pubkey.unwrap();
@@ -678,14 +726,14 @@ pub fn op_node_ecdh_encode_pubkey(
);
// CtOption does not expose its variants.
if pubkey.is_none().into() {
- return Err(type_error("Invalid public key"));
+ return Err(EcdhEncodePubKey::InvalidPublicKey);
}
let pubkey = pubkey.unwrap();
Ok(pubkey.to_encoded_point(compress).as_ref().to_vec())
}
- &_ => Err(type_error("Unsupported curve")),
+ &_ => Err(EcdhEncodePubKey::UnsupportedCurve),
}
}
@@ -695,7 +743,7 @@ pub fn op_node_ecdh_generate_keys(
#[buffer] pubbuf: &mut [u8],
#[buffer] privbuf: &mut [u8],
#[string] format: &str,
-) -> Result<(), AnyError> {
+) -> Result<(), deno_core::error::AnyError> {
let mut rng = rand::thread_rng();
let compress = format == "compressed";
match curve {
@@ -742,7 +790,7 @@ pub fn op_node_ecdh_compute_secret(
#[buffer] this_priv: Option<JsBuffer>,
#[buffer] their_pub: &mut [u8],
#[buffer] secret: &mut [u8],
-) -> Result<(), AnyError> {
+) {
match curve {
"secp256k1" => {
let their_public_key =
@@ -760,8 +808,6 @@ pub fn op_node_ecdh_compute_secret(
their_public_key.as_affine(),
);
secret.copy_from_slice(shared_secret.raw_secret_bytes());
-
- Ok(())
}
"prime256v1" | "secp256r1" => {
let their_public_key =
@@ -776,8 +822,6 @@ pub fn op_node_ecdh_compute_secret(
their_public_key.as_affine(),
);
secret.copy_from_slice(shared_secret.raw_secret_bytes());
-
- Ok(())
}
"secp384r1" => {
let their_public_key =
@@ -792,8 +836,6 @@ pub fn op_node_ecdh_compute_secret(
their_public_key.as_affine(),
);
secret.copy_from_slice(shared_secret.raw_secret_bytes());
-
- Ok(())
}
"secp224r1" => {
let their_public_key =
@@ -808,8 +850,6 @@ pub fn op_node_ecdh_compute_secret(
their_public_key.as_affine(),
);
secret.copy_from_slice(shared_secret.raw_secret_bytes());
-
- Ok(())
}
&_ => todo!(),
}
@@ -820,7 +860,7 @@ pub fn op_node_ecdh_compute_public_key(
#[string] curve: &str,
#[buffer] privkey: &[u8],
#[buffer] pubkey: &mut [u8],
-) -> Result<(), AnyError> {
+) {
match curve {
"secp256k1" => {
let this_private_key =
@@ -828,8 +868,6 @@ pub fn op_node_ecdh_compute_public_key(
.expect("bad private key");
let public_key = this_private_key.public_key();
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
-
- Ok(())
}
"prime256v1" | "secp256r1" => {
let this_private_key =
@@ -837,7 +875,6 @@ pub fn op_node_ecdh_compute_public_key(
.expect("bad private key");
let public_key = this_private_key.public_key();
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
- Ok(())
}
"secp384r1" => {
let this_private_key =
@@ -845,7 +882,6 @@ pub fn op_node_ecdh_compute_public_key(
.expect("bad private key");
let public_key = this_private_key.public_key();
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
- Ok(())
}
"secp224r1" => {
let this_private_key =
@@ -853,7 +889,6 @@ pub fn op_node_ecdh_compute_public_key(
.expect("bad private key");
let public_key = this_private_key.public_key();
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
- Ok(())
}
&_ => todo!(),
}
@@ -874,8 +909,20 @@ pub fn op_node_gen_prime(#[number] size: usize) -> ToJsBuffer {
#[serde]
pub async fn op_node_gen_prime_async(
#[number] size: usize,
-) -> Result<ToJsBuffer, AnyError> {
- Ok(spawn_blocking(move || gen_prime(size)).await?)
+) -> Result<ToJsBuffer, tokio::task::JoinError> {
+ spawn_blocking(move || gen_prime(size)).await
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum DiffieHellmanError {
+ #[error("Expected private key")]
+ ExpectedPrivateKey,
+ #[error("Expected public key")]
+ ExpectedPublicKey,
+ #[error("DH parameters mismatch")]
+ DhParametersMismatch,
+ #[error("Unsupported key type for diffie hellman, or key type mismatch")]
+ UnsupportedKeyTypeForDiffieHellmanOrKeyTypeMismatch,
}
#[op2]
@@ -883,117 +930,134 @@ pub async fn op_node_gen_prime_async(
pub fn op_node_diffie_hellman(
#[cppgc] private: &KeyObjectHandle,
#[cppgc] public: &KeyObjectHandle,
-) -> Result<Box<[u8]>, AnyError> {
+) -> Result<Box<[u8]>, DiffieHellmanError> {
let private = private
.as_private_key()
- .ok_or_else(|| type_error("Expected private key"))?;
+ .ok_or(DiffieHellmanError::ExpectedPrivateKey)?;
let public = public
.as_public_key()
- .ok_or_else(|| type_error("Expected public key"))?;
-
- let res = match (private, &*public) {
- (
- AsymmetricPrivateKey::Ec(EcPrivateKey::P224(private)),
- AsymmetricPublicKey::Ec(EcPublicKey::P224(public)),
- ) => p224::ecdh::diffie_hellman(
- private.to_nonzero_scalar(),
- public.as_affine(),
- )
- .raw_secret_bytes()
- .to_vec()
- .into_boxed_slice(),
- (
- AsymmetricPrivateKey::Ec(EcPrivateKey::P256(private)),
- AsymmetricPublicKey::Ec(EcPublicKey::P256(public)),
- ) => p256::ecdh::diffie_hellman(
- private.to_nonzero_scalar(),
- public.as_affine(),
- )
- .raw_secret_bytes()
- .to_vec()
- .into_boxed_slice(),
- (
- AsymmetricPrivateKey::Ec(EcPrivateKey::P384(private)),
- AsymmetricPublicKey::Ec(EcPublicKey::P384(public)),
- ) => p384::ecdh::diffie_hellman(
- private.to_nonzero_scalar(),
- public.as_affine(),
- )
- .raw_secret_bytes()
- .to_vec()
- .into_boxed_slice(),
- (
- AsymmetricPrivateKey::X25519(private),
- AsymmetricPublicKey::X25519(public),
- ) => private
- .diffie_hellman(public)
- .to_bytes()
- .into_iter()
- .collect(),
- (AsymmetricPrivateKey::Dh(private), AsymmetricPublicKey::Dh(public)) => {
- if private.params.prime != public.params.prime
- || private.params.base != public.params.base
- {
- return Err(type_error("DH parameters mismatch"));
+ .ok_or(DiffieHellmanError::ExpectedPublicKey)?;
+
+ let res =
+ match (private, &*public) {
+ (
+ AsymmetricPrivateKey::Ec(EcPrivateKey::P224(private)),
+ AsymmetricPublicKey::Ec(EcPublicKey::P224(public)),
+ ) => p224::ecdh::diffie_hellman(
+ private.to_nonzero_scalar(),
+ public.as_affine(),
+ )
+ .raw_secret_bytes()
+ .to_vec()
+ .into_boxed_slice(),
+ (
+ AsymmetricPrivateKey::Ec(EcPrivateKey::P256(private)),
+ AsymmetricPublicKey::Ec(EcPublicKey::P256(public)),
+ ) => p256::ecdh::diffie_hellman(
+ private.to_nonzero_scalar(),
+ public.as_affine(),
+ )
+ .raw_secret_bytes()
+ .to_vec()
+ .into_boxed_slice(),
+ (
+ AsymmetricPrivateKey::Ec(EcPrivateKey::P384(private)),
+ AsymmetricPublicKey::Ec(EcPublicKey::P384(public)),
+ ) => p384::ecdh::diffie_hellman(
+ private.to_nonzero_scalar(),
+ public.as_affine(),
+ )
+ .raw_secret_bytes()
+ .to_vec()
+ .into_boxed_slice(),
+ (
+ AsymmetricPrivateKey::X25519(private),
+ AsymmetricPublicKey::X25519(public),
+ ) => private
+ .diffie_hellman(public)
+ .to_bytes()
+ .into_iter()
+ .collect(),
+ (AsymmetricPrivateKey::Dh(private), AsymmetricPublicKey::Dh(public)) => {
+ if private.params.prime != public.params.prime
+ || private.params.base != public.params.base
+ {
+ return Err(DiffieHellmanError::DhParametersMismatch);
+ }
+
+ // OSIP - Octet-String-to-Integer primitive
+ let public_key = public.key.clone().into_vec();
+ let pubkey = BigUint::from_bytes_be(&public_key);
+
+ // Exponentiation (z = y^x mod p)
+ let prime = BigUint::from_bytes_be(private.params.prime.as_bytes());
+ let private_key = private.key.clone().into_vec();
+ let private_key = BigUint::from_bytes_be(&private_key);
+ let shared_secret = pubkey.modpow(&private_key, &prime);
+
+ shared_secret.to_bytes_be().into()
}
-
- // OSIP - Octet-String-to-Integer primitive
- let public_key = public.key.clone().into_vec();
- let pubkey = BigUint::from_bytes_be(&public_key);
-
- // Exponentiation (z = y^x mod p)
- let prime = BigUint::from_bytes_be(private.params.prime.as_bytes());
- let private_key = private.key.clone().into_vec();
- let private_key = BigUint::from_bytes_be(&private_key);
- let shared_secret = pubkey.modpow(&private_key, &prime);
-
- shared_secret.to_bytes_be().into()
- }
- _ => {
- return Err(type_error(
- "Unsupported key type for diffie hellman, or key type mismatch",
- ))
- }
- };
+ _ => return Err(
+ DiffieHellmanError::UnsupportedKeyTypeForDiffieHellmanOrKeyTypeMismatch,
+ ),
+ };
Ok(res)
}
+#[derive(Debug, thiserror::Error)]
+pub enum SignEd25519Error {
+ #[error("Expected private key")]
+ ExpectedPrivateKey,
+ #[error("Expected Ed25519 private key")]
+ ExpectedEd25519PrivateKey,
+ #[error("Invalid Ed25519 private key")]
+ InvalidEd25519PrivateKey,
+}
+
#[op2(fast)]
pub fn op_node_sign_ed25519(
#[cppgc] key: &KeyObjectHandle,
#[buffer] data: &[u8],
#[buffer] signature: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), SignEd25519Error> {
let private = key
.as_private_key()
- .ok_or_else(|| type_error("Expected private key"))?;
+ .ok_or(SignEd25519Error::ExpectedPrivateKey)?;
let ed25519 = match private {
AsymmetricPrivateKey::Ed25519(private) => private,
- _ => return Err(type_error("Expected Ed25519 private key")),
+ _ => return Err(SignEd25519Error::ExpectedEd25519PrivateKey),
};
let pair = Ed25519KeyPair::from_seed_unchecked(ed25519.as_bytes().as_slice())
- .map_err(|_| type_error("Invalid Ed25519 private key"))?;
+ .map_err(|_| SignEd25519Error::InvalidEd25519PrivateKey)?;
signature.copy_from_slice(pair.sign(data).as_ref());
Ok(())
}
+#[derive(Debug, thiserror::Error)]
+pub enum VerifyEd25519Error {
+ #[error("Expected public key")]
+ ExpectedPublicKey,
+ #[error("Expected Ed25519 public key")]
+ ExpectedEd25519PublicKey,
+}
+
#[op2(fast)]
pub fn op_node_verify_ed25519(
#[cppgc] key: &KeyObjectHandle,
#[buffer] data: &[u8],
#[buffer] signature: &[u8],
-) -> Result<bool, AnyError> {
+) -> Result<bool, VerifyEd25519Error> {
let public = key
.as_public_key()
- .ok_or_else(|| type_error("Expected public key"))?;
+ .ok_or(VerifyEd25519Error::ExpectedPublicKey)?;
let ed25519 = match &*public {
AsymmetricPublicKey::Ed25519(public) => public,
- _ => return Err(type_error("Expected Ed25519 public key")),
+ _ => return Err(VerifyEd25519Error::ExpectedEd25519PublicKey),
};
let verified = ring::signature::UnparsedPublicKey::new(
diff --git a/ext/node/ops/crypto/sign.rs b/ext/node/ops/crypto/sign.rs
index b7779a5d8..30094c076 100644
--- a/ext/node/ops/crypto/sign.rs
+++ b/ext/node/ops/crypto/sign.rs
@@ -1,7 +1,4 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::generic_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use rand::rngs::OsRng;
use rsa::signature::hazmat::PrehashSigner as _;
use rsa::signature::hazmat::PrehashVerifier as _;
@@ -26,7 +23,7 @@ use elliptic_curve::FieldBytesSize;
fn dsa_signature<C: elliptic_curve::PrimeCurve>(
encoding: u32,
signature: ecdsa::Signature<C>,
-) -> Result<Box<[u8]>, AnyError>
+) -> Result<Box<[u8]>, KeyObjectHandlePrehashedSignAndVerifyError>
where
MaxSize<C>: ArrayLength<u8>,
<FieldBytesSize<C> as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
@@ -36,10 +33,54 @@ where
0 => Ok(signature.to_der().to_bytes().to_vec().into_boxed_slice()),
// IEEE P1363
1 => Ok(signature.to_bytes().to_vec().into_boxed_slice()),
- _ => Err(type_error("invalid DSA signature encoding")),
+ _ => Err(
+ KeyObjectHandlePrehashedSignAndVerifyError::InvalidDsaSignatureEncoding,
+ ),
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum KeyObjectHandlePrehashedSignAndVerifyError {
+ #[error("invalid DSA signature encoding")]
+ InvalidDsaSignatureEncoding,
+ #[error("key is not a private key")]
+ KeyIsNotPrivate,
+ #[error("digest not allowed for RSA signature: {0}")]
+ DigestNotAllowedForRsaSignature(String),
+ #[error("failed to sign digest with RSA")]
+ FailedToSignDigestWithRsa,
+ #[error("digest not allowed for RSA-PSS signature: {0}")]
+ DigestNotAllowedForRsaPssSignature(String),
+ #[error("failed to sign digest with RSA-PSS")]
+ FailedToSignDigestWithRsaPss,
+ #[error("failed to sign digest with DSA")]
+ FailedToSignDigestWithDsa,
+ #[error("rsa-pss with different mf1 hash algorithm and hash algorithm is not supported")]
+ RsaPssHashAlgorithmUnsupported,
+ #[error(
+ "private key does not allow {actual} to be used, expected {expected}"
+ )]
+ PrivateKeyDisallowsUsage { actual: String, expected: String },
+ #[error("failed to sign digest")]
+ FailedToSignDigest,
+ #[error("x25519 key cannot be used for signing")]
+ X25519KeyCannotBeUsedForSigning,
+ #[error("Ed25519 key cannot be used for prehashed signing")]
+ Ed25519KeyCannotBeUsedForPrehashedSigning,
+ #[error("DH key cannot be used for signing")]
+ DhKeyCannotBeUsedForSigning,
+ #[error("key is not a public or private key")]
+ KeyIsNotPublicOrPrivate,
+ #[error("Invalid DSA signature")]
+ InvalidDsaSignature,
+ #[error("x25519 key cannot be used for verification")]
+ X25519KeyCannotBeUsedForVerification,
+ #[error("Ed25519 key cannot be used for prehashed verification")]
+ Ed25519KeyCannotBeUsedForPrehashedVerification,
+ #[error("DH key cannot be used for verification")]
+ DhKeyCannotBeUsedForVerification,
+}
+
impl KeyObjectHandle {
pub fn sign_prehashed(
&self,
@@ -47,10 +88,10 @@ impl KeyObjectHandle {
digest: &[u8],
pss_salt_length: Option<u32>,
dsa_signature_encoding: u32,
- ) -> Result<Box<[u8]>, AnyError> {
+ ) -> Result<Box<[u8]>, KeyObjectHandlePrehashedSignAndVerifyError> {
let private_key = self
.as_private_key()
- .ok_or_else(|| type_error("key is not a private key"))?;
+ .ok_or(KeyObjectHandlePrehashedSignAndVerifyError::KeyIsNotPrivate)?;
match private_key {
AsymmetricPrivateKey::Rsa(key) => {
@@ -63,34 +104,26 @@ impl KeyObjectHandle {
rsa::pkcs1v15::Pkcs1v15Sign::new::<D>()
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA signature: {}",
- digest_type
- )))
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string()))
}
)
};
let signature = signer
.sign(Some(&mut OsRng), key, digest)
- .map_err(|_| generic_error("failed to sign digest with RSA"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithRsa)?;
Ok(signature.into())
}
AsymmetricPrivateKey::RsaPss(key) => {
let mut hash_algorithm = None;
let mut salt_length = None;
- match &key.details {
- Some(details) => {
- if details.hash_algorithm != details.mf1_hash_algorithm {
- return Err(type_error(
- "rsa-pss with different mf1 hash algorithm and hash algorithm is not supported",
- ));
- }
- hash_algorithm = Some(details.hash_algorithm);
- salt_length = Some(details.salt_length as usize);
+ if let Some(details) = &key.details {
+ if details.hash_algorithm != details.mf1_hash_algorithm {
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::RsaPssHashAlgorithmUnsupported);
}
- None => {}
- };
+ hash_algorithm = Some(details.hash_algorithm);
+ salt_length = Some(details.salt_length as usize);
+ }
if let Some(s) = pss_salt_length {
salt_length = Some(s as usize);
}
@@ -99,10 +132,10 @@ impl KeyObjectHandle {
fn <D>(algorithm: Option<RsaPssHashAlgorithm>) {
if let Some(hash_algorithm) = hash_algorithm.take() {
if Some(hash_algorithm) != algorithm {
- return Err(type_error(format!(
- "private key does not allow {} to be used, expected {}",
- digest_type, hash_algorithm.as_str()
- )));
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::PrivateKeyDisallowsUsage {
+ actual: digest_type.to_string(),
+ expected: hash_algorithm.as_str().to_string(),
+ });
}
}
if let Some(salt_length) = salt_length {
@@ -112,15 +145,12 @@ impl KeyObjectHandle {
}
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA-PSS signature: {}",
- digest_type
- )))
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaPssSignature(digest_type.to_string()));
}
);
let signature = pss
.sign(Some(&mut OsRng), &key.key, digest)
- .map_err(|_| generic_error("failed to sign digest with RSA-PSS"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithRsaPss)?;
Ok(signature.into())
}
AsymmetricPrivateKey::Dsa(key) => {
@@ -130,15 +160,12 @@ impl KeyObjectHandle {
key.sign_prehashed_rfc6979::<D>(digest)
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA signature: {}",
- digest_type
- )))
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string()))
}
);
let signature =
- res.map_err(|_| generic_error("failed to sign digest with DSA"))?;
+ res.map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithDsa)?;
Ok(signature.into())
}
AsymmetricPrivateKey::Ec(key) => match key {
@@ -146,7 +173,7 @@ impl KeyObjectHandle {
let signing_key = p224::ecdsa::SigningKey::from(key);
let signature: p224::ecdsa::Signature = signing_key
.sign_prehash(digest)
- .map_err(|_| type_error("failed to sign digest"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?;
dsa_signature(dsa_signature_encoding, signature)
}
@@ -154,7 +181,7 @@ impl KeyObjectHandle {
let signing_key = p256::ecdsa::SigningKey::from(key);
let signature: p256::ecdsa::Signature = signing_key
.sign_prehash(digest)
- .map_err(|_| type_error("failed to sign digest"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?;
dsa_signature(dsa_signature_encoding, signature)
}
@@ -162,19 +189,17 @@ impl KeyObjectHandle {
let signing_key = p384::ecdsa::SigningKey::from(key);
let signature: p384::ecdsa::Signature = signing_key
.sign_prehash(digest)
- .map_err(|_| type_error("failed to sign digest"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?;
dsa_signature(dsa_signature_encoding, signature)
}
},
AsymmetricPrivateKey::X25519(_) => {
- Err(type_error("x25519 key cannot be used for signing"))
+ Err(KeyObjectHandlePrehashedSignAndVerifyError::X25519KeyCannotBeUsedForSigning)
}
- AsymmetricPrivateKey::Ed25519(_) => Err(type_error(
- "Ed25519 key cannot be used for prehashed signing",
- )),
+ AsymmetricPrivateKey::Ed25519(_) => Err(KeyObjectHandlePrehashedSignAndVerifyError::Ed25519KeyCannotBeUsedForPrehashedSigning),
AsymmetricPrivateKey::Dh(_) => {
- Err(type_error("DH key cannot be used for signing"))
+ Err(KeyObjectHandlePrehashedSignAndVerifyError::DhKeyCannotBeUsedForSigning)
}
}
}
@@ -186,10 +211,10 @@ impl KeyObjectHandle {
signature: &[u8],
pss_salt_length: Option<u32>,
dsa_signature_encoding: u32,
- ) -> Result<bool, AnyError> {
- let public_key = self
- .as_public_key()
- .ok_or_else(|| type_error("key is not a public or private key"))?;
+ ) -> Result<bool, KeyObjectHandlePrehashedSignAndVerifyError> {
+ let public_key = self.as_public_key().ok_or(
+ KeyObjectHandlePrehashedSignAndVerifyError::KeyIsNotPublicOrPrivate,
+ )?;
match &*public_key {
AsymmetricPublicKey::Rsa(key) => {
@@ -202,10 +227,7 @@ impl KeyObjectHandle {
rsa::pkcs1v15::Pkcs1v15Sign::new::<D>()
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA signature: {}",
- digest_type
- )))
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string()))
}
)
};
@@ -215,18 +237,13 @@ impl KeyObjectHandle {
AsymmetricPublicKey::RsaPss(key) => {
let mut hash_algorithm = None;
let mut salt_length = None;
- match &key.details {
- Some(details) => {
- if details.hash_algorithm != details.mf1_hash_algorithm {
- return Err(type_error(
- "rsa-pss with different mf1 hash algorithm and hash algorithm is not supported",
- ));
- }
- hash_algorithm = Some(details.hash_algorithm);
- salt_length = Some(details.salt_length as usize);
+ if let Some(details) = &key.details {
+ if details.hash_algorithm != details.mf1_hash_algorithm {
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::RsaPssHashAlgorithmUnsupported);
}
- None => {}
- };
+ hash_algorithm = Some(details.hash_algorithm);
+ salt_length = Some(details.salt_length as usize);
+ }
if let Some(s) = pss_salt_length {
salt_length = Some(s as usize);
}
@@ -235,10 +252,10 @@ impl KeyObjectHandle {
fn <D>(algorithm: Option<RsaPssHashAlgorithm>) {
if let Some(hash_algorithm) = hash_algorithm.take() {
if Some(hash_algorithm) != algorithm {
- return Err(type_error(format!(
- "private key does not allow {} to be used, expected {}",
- digest_type, hash_algorithm.as_str()
- )));
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::PrivateKeyDisallowsUsage {
+ actual: digest_type.to_string(),
+ expected: hash_algorithm.as_str().to_string(),
+ });
}
}
if let Some(salt_length) = salt_length {
@@ -248,17 +265,14 @@ impl KeyObjectHandle {
}
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA-PSS signature: {}",
- digest_type
- )))
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaPssSignature(digest_type.to_string()));
}
);
Ok(pss.verify(&key.key, digest, signature).is_ok())
}
AsymmetricPublicKey::Dsa(key) => {
let signature = dsa::Signature::from_der(signature)
- .map_err(|_| type_error("Invalid DSA signature"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::InvalidDsaSignature)?;
Ok(key.verify_prehash(digest, &signature).is_ok())
}
AsymmetricPublicKey::Ec(key) => match key {
@@ -300,13 +314,11 @@ impl KeyObjectHandle {
}
},
AsymmetricPublicKey::X25519(_) => {
- Err(type_error("x25519 key cannot be used for verification"))
+ Err(KeyObjectHandlePrehashedSignAndVerifyError::X25519KeyCannotBeUsedForVerification)
}
- AsymmetricPublicKey::Ed25519(_) => Err(type_error(
- "Ed25519 key cannot be used for prehashed verification",
- )),
+ AsymmetricPublicKey::Ed25519(_) => Err(KeyObjectHandlePrehashedSignAndVerifyError::Ed25519KeyCannotBeUsedForPrehashedVerification),
AsymmetricPublicKey::Dh(_) => {
- Err(type_error("DH key cannot be used for verification"))
+ Err(KeyObjectHandlePrehashedSignAndVerifyError::DhKeyCannotBeUsedForVerification)
}
}
}
diff --git a/ext/node/ops/crypto/x509.rs b/ext/node/ops/crypto/x509.rs
index b44ff3a4b..ab8e52f70 100644
--- a/ext/node/ops/crypto/x509.rs
+++ b/ext/node/ops/crypto/x509.rs
@@ -1,11 +1,11 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use x509_parser::der_parser::asn1_rs::Any;
use x509_parser::der_parser::asn1_rs::Tag;
use x509_parser::der_parser::oid::Oid;
+pub use x509_parser::error::X509Error;
use x509_parser::extensions;
use x509_parser::pem;
use x509_parser::prelude::*;
@@ -65,7 +65,7 @@ impl<'a> Deref for CertificateView<'a> {
#[cppgc]
pub fn op_node_x509_parse(
#[buffer] buf: &[u8],
-) -> Result<Certificate, AnyError> {
+) -> Result<Certificate, X509Error> {
let source = match pem::parse_x509_pem(buf) {
Ok((_, pem)) => CertificateSources::Pem(pem),
Err(_) => CertificateSources::Der(buf.to_vec().into_boxed_slice()),
@@ -81,7 +81,7 @@ pub fn op_node_x509_parse(
X509Certificate::from_der(buf).map(|(_, cert)| cert)?
}
};
- Ok::<_, AnyError>(CertificateView { cert })
+ Ok::<_, X509Error>(CertificateView { cert })
},
)?;
@@ -89,23 +89,23 @@ pub fn op_node_x509_parse(
}
#[op2(fast)]
-pub fn op_node_x509_ca(#[cppgc] cert: &Certificate) -> Result<bool, AnyError> {
+pub fn op_node_x509_ca(#[cppgc] cert: &Certificate) -> bool {
let cert = cert.inner.get().deref();
- Ok(cert.is_ca())
+ cert.is_ca()
}
#[op2(fast)]
pub fn op_node_x509_check_email(
#[cppgc] cert: &Certificate,
#[string] email: &str,
-) -> Result<bool, AnyError> {
+) -> bool {
let cert = cert.inner.get().deref();
let subject = cert.subject();
if subject
.iter_email()
.any(|e| e.as_str().unwrap_or("") == email)
{
- return Ok(true);
+ return true;
}
let subject_alt = cert
@@ -121,62 +121,60 @@ pub fn op_node_x509_check_email(
for name in &subject_alt.general_names {
if let extensions::GeneralName::RFC822Name(n) = name {
if *n == email {
- return Ok(true);
+ return true;
}
}
}
}
- Ok(false)
+ false
}
#[op2]
#[string]
-pub fn op_node_x509_fingerprint(
- #[cppgc] cert: &Certificate,
-) -> Result<Option<String>, AnyError> {
- Ok(cert.fingerprint::<sha1::Sha1>())
+pub fn op_node_x509_fingerprint(#[cppgc] cert: &Certificate) -> Option<String> {
+ cert.fingerprint::<sha1::Sha1>()
}
#[op2]
#[string]
pub fn op_node_x509_fingerprint256(
#[cppgc] cert: &Certificate,
-) -> Result<Option<String>, AnyError> {
- Ok(cert.fingerprint::<sha2::Sha256>())
+) -> Option<String> {
+ cert.fingerprint::<sha2::Sha256>()
}
#[op2]
#[string]
pub fn op_node_x509_fingerprint512(
#[cppgc] cert: &Certificate,
-) -> Result<Option<String>, AnyError> {
- Ok(cert.fingerprint::<sha2::Sha512>())
+) -> Option<String> {
+ cert.fingerprint::<sha2::Sha512>()
}
#[op2]
#[string]
pub fn op_node_x509_get_issuer(
#[cppgc] cert: &Certificate,
-) -> Result<String, AnyError> {
+) -> Result<String, X509Error> {
let cert = cert.inner.get().deref();
- Ok(x509name_to_string(cert.issuer(), oid_registry())?)
+ x509name_to_string(cert.issuer(), oid_registry())
}
#[op2]
#[string]
pub fn op_node_x509_get_subject(
#[cppgc] cert: &Certificate,
-) -> Result<String, AnyError> {
+) -> Result<String, X509Error> {
let cert = cert.inner.get().deref();
- Ok(x509name_to_string(cert.subject(), oid_registry())?)
+ x509name_to_string(cert.subject(), oid_registry())
}
#[op2]
#[cppgc]
pub fn op_node_x509_public_key(
#[cppgc] cert: &Certificate,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, super::keys::X509PublicKeyError> {
let cert = cert.inner.get().deref();
let public_key = &cert.tbs_certificate.subject_pki;
@@ -245,37 +243,29 @@ fn x509name_to_string(
#[op2]
#[string]
-pub fn op_node_x509_get_valid_from(
- #[cppgc] cert: &Certificate,
-) -> Result<String, AnyError> {
+pub fn op_node_x509_get_valid_from(#[cppgc] cert: &Certificate) -> String {
let cert = cert.inner.get().deref();
- Ok(cert.validity().not_before.to_string())
+ cert.validity().not_before.to_string()
}
#[op2]
#[string]
-pub fn op_node_x509_get_valid_to(
- #[cppgc] cert: &Certificate,
-) -> Result<String, AnyError> {
+pub fn op_node_x509_get_valid_to(#[cppgc] cert: &Certificate) -> String {
let cert = cert.inner.get().deref();
- Ok(cert.validity().not_after.to_string())
+ cert.validity().not_after.to_string()
}
#[op2]
#[string]
-pub fn op_node_x509_get_serial_number(
- #[cppgc] cert: &Certificate,
-) -> Result<String, AnyError> {
+pub fn op_node_x509_get_serial_number(#[cppgc] cert: &Certificate) -> String {
let cert = cert.inner.get().deref();
let mut s = cert.serial.to_str_radix(16);
s.make_ascii_uppercase();
- Ok(s)
+ s
}
#[op2(fast)]
-pub fn op_node_x509_key_usage(
- #[cppgc] cert: &Certificate,
-) -> Result<u16, AnyError> {
+pub fn op_node_x509_key_usage(#[cppgc] cert: &Certificate) -> u16 {
let cert = cert.inner.get().deref();
let key_usage = cert
.extensions()
@@ -286,5 +276,5 @@ pub fn op_node_x509_key_usage(
_ => None,
});
- Ok(key_usage.map(|k| k.flags).unwrap_or(0))
+ key_usage.map(|k| k.flags).unwrap_or(0)
}
diff --git a/ext/node/ops/fs.rs b/ext/node/ops/fs.rs
index 6253f32d0..9c0e4e1cc 100644
--- a/ext/node/ops/fs.rs
+++ b/ext/node/ops/fs.rs
@@ -3,7 +3,6 @@
use std::cell::RefCell;
use std::rc::Rc;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_fs::FileSystemRc;
@@ -11,11 +10,27 @@ use serde::Serialize;
use crate::NodePermissions;
+#[derive(Debug, thiserror::Error)]
+pub enum FsError {
+ #[error(transparent)]
+ Permission(#[from] deno_permissions::PermissionCheckError),
+ #[error("{0}")]
+ Io(#[from] std::io::Error),
+ #[cfg(windows)]
+ #[error("Path has no root.")]
+ PathHasNoRoot,
+ #[cfg(not(any(unix, windows)))]
+ #[error("Unsupported platform.")]
+ UnsupportedPlatform,
+ #[error(transparent)]
+ Fs(#[from] deno_io::fs::FsError),
+}
+
#[op2(fast)]
pub fn op_node_fs_exists_sync<P>(
state: &mut OpState,
#[string] path: String,
-) -> Result<bool, AnyError>
+) -> Result<bool, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
@@ -30,7 +45,7 @@ where
pub async fn op_node_fs_exists<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
-) -> Result<bool, AnyError>
+) -> Result<bool, FsError>
where
P: NodePermissions + 'static,
{
@@ -50,7 +65,7 @@ pub fn op_node_cp_sync<P>(
state: &mut OpState,
#[string] path: &str,
#[string] new_path: &str,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
@@ -71,7 +86,7 @@ pub async fn op_node_cp<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
#[string] new_path: String,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
@@ -108,7 +123,7 @@ pub fn op_node_statfs<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
bigint: bool,
-) -> Result<StatFs, AnyError>
+) -> Result<StatFs, FsError>
where
P: NodePermissions + 'static,
{
@@ -130,13 +145,21 @@ where
let mut cpath = path.as_bytes().to_vec();
cpath.push(0);
if bigint {
- #[cfg(not(target_os = "macos"))]
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "freebsd",
+ target_os = "openbsd"
+ )))]
// SAFETY: `cpath` is NUL-terminated and result is pointer to valid statfs memory.
let (code, result) = unsafe {
let mut result: libc::statfs64 = std::mem::zeroed();
(libc::statfs64(cpath.as_ptr() as _, &mut result), result)
};
- #[cfg(target_os = "macos")]
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "freebsd",
+ target_os = "openbsd"
+ ))]
// SAFETY: `cpath` is NUL-terminated and result is pointer to valid statfs memory.
let (code, result) = unsafe {
let mut result: libc::statfs = std::mem::zeroed();
@@ -146,7 +169,10 @@ where
return Err(std::io::Error::last_os_error().into());
}
Ok(StatFs {
+ #[cfg(not(target_os = "openbsd"))]
typ: result.f_type as _,
+ #[cfg(target_os = "openbsd")]
+ typ: 0 as _,
bsize: result.f_bsize as _,
blocks: result.f_blocks as _,
bfree: result.f_bfree as _,
@@ -164,7 +190,10 @@ where
return Err(std::io::Error::last_os_error().into());
}
Ok(StatFs {
+ #[cfg(not(target_os = "openbsd"))]
typ: result.f_type as _,
+ #[cfg(target_os = "openbsd")]
+ typ: 0 as _,
bsize: result.f_bsize as _,
blocks: result.f_blocks as _,
bfree: result.f_bfree as _,
@@ -176,7 +205,6 @@ where
}
#[cfg(windows)]
{
- use deno_core::anyhow::anyhow;
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use windows_sys::Win32::Storage::FileSystem::GetDiskFreeSpaceW;
@@ -186,10 +214,7 @@ where
// call below.
#[allow(clippy::disallowed_methods)]
let path = path.canonicalize()?;
- let root = path
- .ancestors()
- .last()
- .ok_or(anyhow!("Path has no root."))?;
+ let root = path.ancestors().last().ok_or(FsError::PathHasNoRoot)?;
let mut root = OsStr::new(root).encode_wide().collect::<Vec<_>>();
root.push(0);
let mut sectors_per_cluster = 0;
@@ -229,7 +254,7 @@ where
{
let _ = path;
let _ = bigint;
- Err(anyhow!("Unsupported platform."))
+ Err(FsError::UnsupportedPlatform)
}
}
@@ -241,7 +266,7 @@ pub fn op_node_lutimes_sync<P>(
#[smi] atime_nanos: u32,
#[number] mtime_secs: i64,
#[smi] mtime_nanos: u32,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
@@ -262,7 +287,7 @@ pub async fn op_node_lutimes<P>(
#[smi] atime_nanos: u32,
#[number] mtime_secs: i64,
#[smi] mtime_nanos: u32,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
@@ -286,7 +311,7 @@ pub fn op_node_lchown_sync<P>(
#[string] path: String,
uid: Option<u32>,
gid: Option<u32>,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
@@ -304,7 +329,7 @@ pub async fn op_node_lchown<P>(
#[string] path: String,
uid: Option<u32>,
gid: Option<u32>,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
diff --git a/ext/node/ops/http.rs b/ext/node/ops/http.rs
index 773902ded..69571078f 100644
--- a/ext/node/ops/http.rs
+++ b/ext/node/ops/http.rs
@@ -8,14 +8,12 @@ use std::task::Context;
use std::task::Poll;
use bytes::Bytes;
-use deno_core::anyhow;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::futures::stream::Peekable;
use deno_core::futures::Future;
use deno_core::futures::FutureExt;
use deno_core::futures::Stream;
use deno_core::futures::StreamExt;
+use deno_core::futures::TryFutureExt;
use deno_core::op2;
use deno_core::serde::Serialize;
use deno_core::unsync::spawn;
@@ -33,6 +31,7 @@ use deno_core::Resource;
use deno_core::ResourceId;
use deno_fetch::get_or_create_client_from_state;
use deno_fetch::FetchCancelHandle;
+use deno_fetch::FetchError;
use deno_fetch::FetchRequestResource;
use deno_fetch::FetchReturn;
use deno_fetch::HttpClientResource;
@@ -59,12 +58,15 @@ pub fn op_node_http_request<P>(
#[serde] headers: Vec<(ByteString, ByteString)>,
#[smi] client_rid: Option<u32>,
#[smi] body: Option<ResourceId>,
-) -> Result<FetchReturn, AnyError>
+) -> Result<FetchReturn, FetchError>
where
P: crate::NodePermissions + 'static,
{
let client = if let Some(rid) = client_rid {
- let r = state.resource_table.get::<HttpClientResource>(rid)?;
+ let r = state
+ .resource_table
+ .get::<HttpClientResource>(rid)
+ .map_err(FetchError::Resource)?;
r.client.clone()
} else {
get_or_create_client_from_state(state)?
@@ -81,10 +83,8 @@ where
let mut header_map = HeaderMap::new();
for (key, value) in headers {
- let name = HeaderName::from_bytes(&key)
- .map_err(|err| type_error(err.to_string()))?;
- let v = HeaderValue::from_bytes(&value)
- .map_err(|err| type_error(err.to_string()))?;
+ let name = HeaderName::from_bytes(&key)?;
+ let v = HeaderValue::from_bytes(&value)?;
header_map.append(name, v);
}
@@ -92,7 +92,10 @@ where
let (body, con_len) = if let Some(body) = body {
(
BodyExt::boxed(NodeHttpResourceToBodyAdapter::new(
- state.resource_table.take_any(body)?,
+ state
+ .resource_table
+ .take_any(body)
+ .map_err(FetchError::Resource)?,
)),
None,
)
@@ -117,7 +120,7 @@ where
*request.uri_mut() = url
.as_str()
.parse()
- .map_err(|_| type_error("Invalid URL"))?;
+ .map_err(|_| FetchError::InvalidUrl(url.clone()))?;
*request.headers_mut() = header_map;
if let Some((username, password)) = maybe_authority {
@@ -136,9 +139,9 @@ where
let fut = async move {
client
.send(request)
+ .map_err(Into::into)
.or_cancel(cancel_handle_)
.await
- .map(|res| res.map_err(|err| type_error(err.to_string())))
};
let request_rid = state.resource_table.add(FetchRequestResource {
@@ -174,11 +177,12 @@ pub struct NodeHttpFetchResponse {
pub async fn op_node_http_fetch_send(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<NodeHttpFetchResponse, AnyError> {
+) -> Result<NodeHttpFetchResponse, FetchError> {
let request = state
.borrow_mut()
.resource_table
- .take::<FetchRequestResource>(rid)?;
+ .take::<FetchRequestResource>(rid)
+ .map_err(FetchError::Resource)?;
let request = Rc::try_unwrap(request)
.ok()
@@ -191,22 +195,23 @@ pub async fn op_node_http_fetch_send(
// If any error in the chain is a hyper body error, return that as a special result we can use to
// reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`).
// TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead
- let mut err_ref: &dyn std::error::Error = err.as_ref();
- while let Some(err) = std::error::Error::source(err_ref) {
- if let Some(err) = err.downcast_ref::<hyper::Error>() {
- if let Some(err) = std::error::Error::source(err) {
- return Ok(NodeHttpFetchResponse {
- error: Some(err.to_string()),
- ..Default::default()
- });
+
+ if let FetchError::ClientSend(err_src) = &err {
+ if let Some(client_err) = std::error::Error::source(&err_src.source) {
+ if let Some(err_src) = client_err.downcast_ref::<hyper::Error>() {
+ if let Some(err_src) = std::error::Error::source(err_src) {
+ return Ok(NodeHttpFetchResponse {
+ error: Some(err_src.to_string()),
+ ..Default::default()
+ });
+ }
}
}
- err_ref = err;
}
- return Err(type_error(err.to_string()));
+ return Err(err);
}
- Err(_) => return Err(type_error("request was cancelled")),
+ Err(_) => return Err(FetchError::RequestCanceled),
};
let status = res.status();
@@ -250,11 +255,12 @@ pub async fn op_node_http_fetch_send(
pub async fn op_node_http_fetch_response_upgrade(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<ResourceId, AnyError> {
+) -> Result<ResourceId, FetchError> {
let raw_response = state
.borrow_mut()
.resource_table
- .take::<NodeHttpFetchResponseResource>(rid)?;
+ .take::<NodeHttpFetchResponseResource>(rid)
+ .map_err(FetchError::Resource)?;
let raw_response = Rc::try_unwrap(raw_response)
.expect("Someone is holding onto NodeHttpFetchResponseResource");
@@ -277,7 +283,7 @@ pub async fn op_node_http_fetch_response_upgrade(
}
read_tx.write_all(&buf[..read]).await?;
}
- Ok::<_, AnyError>(())
+ Ok::<_, FetchError>(())
});
spawn(async move {
let mut buf = [0; 1024];
@@ -288,7 +294,7 @@ pub async fn op_node_http_fetch_response_upgrade(
}
upgraded_tx.write_all(&buf[..read]).await?;
}
- Ok::<_, AnyError>(())
+ Ok::<_, FetchError>(())
});
}
@@ -318,23 +324,26 @@ impl UpgradeStream {
}
}
- async fn read(self: Rc<Self>, buf: &mut [u8]) -> Result<usize, AnyError> {
+ async fn read(
+ self: Rc<Self>,
+ buf: &mut [u8],
+ ) -> Result<usize, std::io::Error> {
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
async {
let read = RcRef::map(self, |this| &this.read);
let mut read = read.borrow_mut().await;
- Ok(Pin::new(&mut *read).read(buf).await?)
+ Pin::new(&mut *read).read(buf).await
}
.try_or_cancel(cancel_handle)
.await
}
- async fn write(self: Rc<Self>, buf: &[u8]) -> Result<usize, AnyError> {
+ async fn write(self: Rc<Self>, buf: &[u8]) -> Result<usize, std::io::Error> {
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
async {
let write = RcRef::map(self, |this| &this.write);
let mut write = write.borrow_mut().await;
- Ok(Pin::new(&mut *write).write(buf).await?)
+ Pin::new(&mut *write).write(buf).await
}
.try_or_cancel(cancel_handle)
.await
@@ -387,7 +396,7 @@ impl NodeHttpFetchResponseResource {
}
}
- pub async fn upgrade(self) -> Result<hyper::upgrade::Upgraded, AnyError> {
+ pub async fn upgrade(self) -> Result<hyper::upgrade::Upgraded, hyper::Error> {
let reader = self.response_reader.into_inner();
match reader {
NodeHttpFetchResponseReader::Start(resp) => {
@@ -445,7 +454,9 @@ impl Resource for NodeHttpFetchResponseResource {
// safely call `await` on it without creating a race condition.
Some(_) => match reader.as_mut().next().await.unwrap() {
Ok(chunk) => assert!(chunk.is_empty()),
- Err(err) => break Err(type_error(err.to_string())),
+ Err(err) => {
+ break Err(deno_core::error::type_error(err.to_string()))
+ }
},
None => break Ok(BufView::empty()),
}
@@ -453,7 +464,7 @@ impl Resource for NodeHttpFetchResponseResource {
};
let cancel_handle = RcRef::map(self, |r| &r.cancel);
- fut.try_or_cancel(cancel_handle).await
+ fut.try_or_cancel(cancel_handle).await.map_err(Into::into)
})
}
@@ -469,7 +480,9 @@ impl Resource for NodeHttpFetchResponseResource {
#[allow(clippy::type_complexity)]
pub struct NodeHttpResourceToBodyAdapter(
Rc<dyn Resource>,
- Option<Pin<Box<dyn Future<Output = Result<BufView, anyhow::Error>>>>>,
+ Option<
+ Pin<Box<dyn Future<Output = Result<BufView, deno_core::anyhow::Error>>>>,
+ >,
);
impl NodeHttpResourceToBodyAdapter {
@@ -485,7 +498,7 @@ unsafe impl Send for NodeHttpResourceToBodyAdapter {}
unsafe impl Sync for NodeHttpResourceToBodyAdapter {}
impl Stream for NodeHttpResourceToBodyAdapter {
- type Item = Result<Bytes, anyhow::Error>;
+ type Item = Result<Bytes, deno_core::anyhow::Error>;
fn poll_next(
self: Pin<&mut Self>,
@@ -515,7 +528,7 @@ impl Stream for NodeHttpResourceToBodyAdapter {
impl hyper::body::Body for NodeHttpResourceToBodyAdapter {
type Data = Bytes;
- type Error = anyhow::Error;
+ type Error = deno_core::anyhow::Error;
fn poll_frame(
self: Pin<&mut Self>,
diff --git a/ext/node/ops/http2.rs b/ext/node/ops/http2.rs
index 9595cb33d..53dada9f4 100644
--- a/ext/node/ops/http2.rs
+++ b/ext/node/ops/http2.rs
@@ -7,7 +7,6 @@ use std::rc::Rc;
use std::task::Poll;
use bytes::Bytes;
-use deno_core::error::AnyError;
use deno_core::futures::future::poll_fn;
use deno_core::op2;
use deno_core::serde::Serialize;
@@ -110,17 +109,28 @@ impl Resource for Http2ServerSendResponse {
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum Http2Error {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ UrlParse(#[from] url::ParseError),
+ #[error(transparent)]
+ H2(#[from] h2::Error),
+}
+
#[op2(async)]
#[serde]
pub async fn op_http2_connect(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[string] url: String,
-) -> Result<(ResourceId, ResourceId), AnyError> {
+) -> Result<(ResourceId, ResourceId), Http2Error> {
// No permission check necessary because we're using an existing connection
let network_stream = {
let mut state = state.borrow_mut();
- take_network_stream_resource(&mut state.resource_table, rid)?
+ take_network_stream_resource(&mut state.resource_table, rid)
+ .map_err(Http2Error::Resource)?
};
let url = Url::parse(&url)?;
@@ -144,9 +154,10 @@ pub async fn op_http2_connect(
pub async fn op_http2_listen(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<ResourceId, AnyError> {
+) -> Result<ResourceId, Http2Error> {
let stream =
- take_network_stream_resource(&mut state.borrow_mut().resource_table, rid)?;
+ take_network_stream_resource(&mut state.borrow_mut().resource_table, rid)
+ .map_err(Http2Error::Resource)?;
let conn = h2::server::Builder::new().handshake(stream).await?;
Ok(
@@ -166,12 +177,13 @@ pub async fn op_http2_accept(
#[smi] rid: ResourceId,
) -> Result<
Option<(Vec<(ByteString, ByteString)>, ResourceId, ResourceId)>,
- AnyError,
+ Http2Error,
> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ServerConnection>(rid)?;
+ .get::<Http2ServerConnection>(rid)
+ .map_err(Http2Error::Resource)?;
let mut conn = RcRef::map(&resource, |r| &r.conn).borrow_mut().await;
if let Some(res) = conn.accept().await {
let (req, resp) = res?;
@@ -233,11 +245,12 @@ pub async fn op_http2_send_response(
#[smi] rid: ResourceId,
#[smi] status: u16,
#[serde] headers: Vec<(ByteString, ByteString)>,
-) -> Result<(ResourceId, u32), AnyError> {
+) -> Result<(ResourceId, u32), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ServerSendResponse>(rid)?;
+ .get::<Http2ServerSendResponse>(rid)
+ .map_err(Http2Error::Resource)?;
let mut send_response = RcRef::map(resource, |r| &r.send_response)
.borrow_mut()
.await;
@@ -262,8 +275,12 @@ pub async fn op_http2_send_response(
pub async fn op_http2_poll_client_connection(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
- let resource = state.borrow().resource_table.get::<Http2ClientConn>(rid)?;
+) -> Result<(), Http2Error> {
+ let resource = state
+ .borrow()
+ .resource_table
+ .get::<Http2ClientConn>(rid)
+ .map_err(Http2Error::Resource)?;
let cancel_handle = RcRef::map(resource.clone(), |this| &this.cancel_handle);
let mut conn = RcRef::map(resource, |this| &this.conn).borrow_mut().await;
@@ -289,11 +306,12 @@ pub async fn op_http2_client_request(
// 4 strings of keys?
#[serde] mut pseudo_headers: HashMap<String, String>,
#[serde] headers: Vec<(ByteString, ByteString)>,
-) -> Result<(ResourceId, u32), AnyError> {
+) -> Result<(ResourceId, u32), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2Client>(client_rid)?;
+ .get::<Http2Client>(client_rid)
+ .map_err(Http2Error::Resource)?;
let url = resource.url.clone();
@@ -326,7 +344,10 @@ pub async fn op_http2_client_request(
let resource = {
let state = state.borrow();
- state.resource_table.get::<Http2Client>(client_rid)?
+ state
+ .resource_table
+ .get::<Http2Client>(client_rid)
+ .map_err(Http2Error::Resource)?
};
let mut client = RcRef::map(&resource, |r| &r.client).borrow_mut().await;
poll_fn(|cx| client.poll_ready(cx)).await?;
@@ -345,11 +366,12 @@ pub async fn op_http2_client_send_data(
#[smi] stream_rid: ResourceId,
#[buffer] data: JsBuffer,
end_of_stream: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ClientStream>(stream_rid)?;
+ .get::<Http2ClientStream>(stream_rid)
+ .map_err(Http2Error::Resource)?;
let mut stream = RcRef::map(&resource, |r| &r.stream).borrow_mut().await;
stream.send_data(data.to_vec().into(), end_of_stream)?;
@@ -361,7 +383,7 @@ pub async fn op_http2_client_reset_stream(
state: Rc<RefCell<OpState>>,
#[smi] stream_rid: ResourceId,
#[smi] code: u32,
-) -> Result<(), AnyError> {
+) -> Result<(), deno_core::error::AnyError> {
let resource = state
.borrow()
.resource_table
@@ -376,11 +398,12 @@ pub async fn op_http2_client_send_trailers(
state: Rc<RefCell<OpState>>,
#[smi] stream_rid: ResourceId,
#[serde] trailers: Vec<(ByteString, ByteString)>,
-) -> Result<(), AnyError> {
+) -> Result<(), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ClientStream>(stream_rid)?;
+ .get::<Http2ClientStream>(stream_rid)
+ .map_err(Http2Error::Resource)?;
let mut stream = RcRef::map(&resource, |r| &r.stream).borrow_mut().await;
let mut trailers_map = http::HeaderMap::new();
@@ -408,11 +431,12 @@ pub struct Http2ClientResponse {
pub async fn op_http2_client_get_response(
state: Rc<RefCell<OpState>>,
#[smi] stream_rid: ResourceId,
-) -> Result<(Http2ClientResponse, bool), AnyError> {
+) -> Result<(Http2ClientResponse, bool), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ClientStream>(stream_rid)?;
+ .get::<Http2ClientStream>(stream_rid)
+ .map_err(Http2Error::Resource)?;
let mut response_future =
RcRef::map(&resource, |r| &r.response).borrow_mut().await;
@@ -478,23 +502,22 @@ fn poll_data_or_trailers(
pub async fn op_http2_client_get_response_body_chunk(
state: Rc<RefCell<OpState>>,
#[smi] body_rid: ResourceId,
-) -> Result<(Option<Vec<u8>>, bool, bool), AnyError> {
+) -> Result<(Option<Vec<u8>>, bool, bool), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ClientResponseBody>(body_rid)?;
+ .get::<Http2ClientResponseBody>(body_rid)
+ .map_err(Http2Error::Resource)?;
let mut body = RcRef::map(&resource, |r| &r.body).borrow_mut().await;
loop {
let result = poll_fn(|cx| poll_data_or_trailers(cx, &mut body)).await;
if let Err(err) = result {
- let reason = err.reason();
- if let Some(reason) = reason {
- if reason == Reason::CANCEL {
- return Ok((None, false, true));
- }
+ match err.reason() {
+ Some(Reason::NO_ERROR) => return Ok((None, true, false)),
+ Some(Reason::CANCEL) => return Ok((None, false, true)),
+ _ => return Err(err.into()),
}
- return Err(err.into());
}
match result.unwrap() {
DataOrTrailers::Data(data) => {
@@ -527,7 +550,7 @@ pub async fn op_http2_client_get_response_body_chunk(
pub async fn op_http2_client_get_response_trailers(
state: Rc<RefCell<OpState>>,
#[smi] body_rid: ResourceId,
-) -> Result<Option<Vec<(ByteString, ByteString)>>, AnyError> {
+) -> Result<Option<Vec<(ByteString, ByteString)>>, deno_core::error::AnyError> {
let resource = state
.borrow()
.resource_table
diff --git a/ext/node/ops/idna.rs b/ext/node/ops/idna.rs
index 9c9450c70..a3d85e77c 100644
--- a/ext/node/ops/idna.rs
+++ b/ext/node/ops/idna.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::anyhow::Error;
-use deno_core::error::range_error;
use deno_core::op2;
use std::borrow::Cow;
@@ -11,19 +9,21 @@ use std::borrow::Cow;
const PUNY_PREFIX: &str = "xn--";
-fn invalid_input_err() -> Error {
- range_error("Invalid input")
-}
-
-fn not_basic_err() -> Error {
- range_error("Illegal input >= 0x80 (not a basic code point)")
+#[derive(Debug, thiserror::Error)]
+pub enum IdnaError {
+ #[error("Invalid input")]
+ InvalidInput,
+ #[error("Input would take more than 63 characters to encode")]
+ InputTooLong,
+ #[error("Illegal input >= 0x80 (not a basic code point)")]
+ IllegalInput,
}
/// map a domain by mapping each label with the given function
-fn map_domain<E>(
+fn map_domain(
domain: &str,
- f: impl Fn(&str) -> Result<Cow<'_, str>, E>,
-) -> Result<String, E> {
+ f: impl Fn(&str) -> Result<Cow<'_, str>, IdnaError>,
+) -> Result<String, IdnaError> {
let mut result = String::with_capacity(domain.len());
let mut domain = domain;
@@ -48,7 +48,7 @@ fn map_domain<E>(
/// Maps a unicode domain to ascii by punycode encoding each label
///
/// Note this is not IDNA2003 or IDNA2008 compliant, rather it matches node.js's punycode implementation
-fn to_ascii(input: &str) -> Result<String, Error> {
+fn to_ascii(input: &str) -> Result<String, IdnaError> {
if input.is_ascii() {
return Ok(input.into());
}
@@ -61,9 +61,7 @@ fn to_ascii(input: &str) -> Result<String, Error> {
} else {
idna::punycode::encode_str(label)
.map(|encoded| [PUNY_PREFIX, &encoded].join("").into()) // add the prefix
- .ok_or_else(|| {
- Error::msg("Input would take more than 63 characters to encode") // only error possible per the docs
- })
+ .ok_or(IdnaError::InputTooLong) // only error possible per the docs
}
})?;
@@ -74,13 +72,13 @@ fn to_ascii(input: &str) -> Result<String, Error> {
/// Maps an ascii domain to unicode by punycode decoding each label
///
/// Note this is not IDNA2003 or IDNA2008 compliant, rather it matches node.js's punycode implementation
-fn to_unicode(input: &str) -> Result<String, Error> {
+fn to_unicode(input: &str) -> Result<String, IdnaError> {
map_domain(input, |s| {
if let Some(puny) = s.strip_prefix(PUNY_PREFIX) {
// it's a punycode encoded label
Ok(
idna::punycode::decode_to_string(&puny.to_lowercase())
- .ok_or_else(invalid_input_err)?
+ .ok_or(IdnaError::InvalidInput)?
.into(),
)
} else {
@@ -95,7 +93,7 @@ fn to_unicode(input: &str) -> Result<String, Error> {
#[string]
pub fn op_node_idna_punycode_to_ascii(
#[string] domain: String,
-) -> Result<String, Error> {
+) -> Result<String, IdnaError> {
to_ascii(&domain)
}
@@ -105,7 +103,7 @@ pub fn op_node_idna_punycode_to_ascii(
#[string]
pub fn op_node_idna_punycode_to_unicode(
#[string] domain: String,
-) -> Result<String, Error> {
+) -> Result<String, IdnaError> {
to_unicode(&domain)
}
@@ -115,8 +113,8 @@ pub fn op_node_idna_punycode_to_unicode(
#[string]
pub fn op_node_idna_domain_to_ascii(
#[string] domain: String,
-) -> Result<String, Error> {
- idna::domain_to_ascii(&domain).map_err(|e| e.into())
+) -> Result<String, idna::Errors> {
+ idna::domain_to_ascii(&domain)
}
/// Converts a domain to Unicode as per the IDNA spec
@@ -131,7 +129,7 @@ pub fn op_node_idna_domain_to_unicode(#[string] domain: String) -> String {
#[string]
pub fn op_node_idna_punycode_decode(
#[string] domain: String,
-) -> Result<String, Error> {
+) -> Result<String, IdnaError> {
if domain.is_empty() {
return Ok(domain);
}
@@ -147,11 +145,10 @@ pub fn op_node_idna_punycode_decode(
.unwrap_or(domain.len() - 1);
if !domain[..last_dash].is_ascii() {
- return Err(not_basic_err());
+ return Err(IdnaError::IllegalInput);
}
- idna::punycode::decode_to_string(&domain)
- .ok_or_else(|| deno_core::error::range_error("Invalid input"))
+ idna::punycode::decode_to_string(&domain).ok_or(IdnaError::InvalidInput)
}
#[op2]
diff --git a/ext/node/ops/inspector.rs b/ext/node/ops/inspector.rs
new file mode 100644
index 000000000..34a7e004c
--- /dev/null
+++ b/ext/node/ops/inspector.rs
@@ -0,0 +1,161 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use crate::NodePermissions;
+use deno_core::anyhow::Error;
+use deno_core::error::generic_error;
+use deno_core::futures::channel::mpsc;
+use deno_core::op2;
+use deno_core::v8;
+use deno_core::GarbageCollected;
+use deno_core::InspectorSessionKind;
+use deno_core::InspectorSessionOptions;
+use deno_core::JsRuntimeInspector;
+use deno_core::OpState;
+use std::cell::RefCell;
+use std::rc::Rc;
+
+#[op2(fast)]
+pub fn op_inspector_enabled() -> bool {
+ // TODO: hook up to InspectorServer
+ false
+}
+
+#[op2]
+pub fn op_inspector_open<P>(
+ _state: &mut OpState,
+ _port: Option<u16>,
+ #[string] _host: Option<String>,
+) -> Result<(), Error>
+where
+ P: NodePermissions + 'static,
+{
+ // TODO: hook up to InspectorServer
+ /*
+ let server = state.borrow_mut::<InspectorServer>();
+ if let Some(host) = host {
+ server.set_host(host);
+ }
+ if let Some(port) = port {
+ server.set_port(port);
+ }
+ state
+ .borrow_mut::<P>()
+ .check_net((server.host(), Some(server.port())), "inspector.open")?;
+ */
+
+ Ok(())
+}
+
+#[op2(fast)]
+pub fn op_inspector_close() {
+ // TODO: hook up to InspectorServer
+}
+
+#[op2]
+#[string]
+pub fn op_inspector_url() -> Option<String> {
+ // TODO: hook up to InspectorServer
+ None
+}
+
+#[op2(fast)]
+pub fn op_inspector_wait(state: &OpState) -> bool {
+ match state.try_borrow::<Rc<RefCell<JsRuntimeInspector>>>() {
+ Some(inspector) => {
+ inspector
+ .borrow_mut()
+ .wait_for_session_and_break_on_next_statement();
+ true
+ }
+ None => false,
+ }
+}
+
+#[op2(fast)]
+pub fn op_inspector_emit_protocol_event(
+ #[string] _event_name: String,
+ #[string] _params: String,
+) {
+ // TODO: inspector channel & protocol notifications
+}
+
+struct JSInspectorSession {
+ tx: RefCell<Option<mpsc::UnboundedSender<String>>>,
+}
+
+impl GarbageCollected for JSInspectorSession {}
+
+#[op2]
+#[cppgc]
+pub fn op_inspector_connect<'s, P>(
+ isolate: *mut v8::Isolate,
+ scope: &mut v8::HandleScope<'s>,
+ state: &mut OpState,
+ connect_to_main_thread: bool,
+ callback: v8::Local<'s, v8::Function>,
+) -> Result<JSInspectorSession, Error>
+where
+ P: NodePermissions + 'static,
+{
+ state
+ .borrow_mut::<P>()
+ .check_sys("inspector", "inspector.Session.connect")?;
+
+ if connect_to_main_thread {
+ return Err(generic_error("connectToMainThread not supported"));
+ }
+
+ let context = scope.get_current_context();
+ let context = v8::Global::new(scope, context);
+ let callback = v8::Global::new(scope, callback);
+
+ let inspector = state
+ .borrow::<Rc<RefCell<JsRuntimeInspector>>>()
+ .borrow_mut();
+
+ let tx = inspector.create_raw_session(
+ InspectorSessionOptions {
+ kind: InspectorSessionKind::NonBlocking {
+ wait_for_disconnect: false,
+ },
+ },
+ // The inspector connection does not keep the event loop alive but
+ // when the inspector sends a message to the frontend, the JS that
+ // that runs may keep the event loop alive so we have to call back
+ // synchronously, instead of using the usual LocalInspectorSession
+ // UnboundedReceiver<InspectorMsg> API.
+ Box::new(move |message| {
+ // SAFETY: This function is called directly by the inspector, so
+ // 1) The isolate is still valid
+ // 2) We are on the same thread as the Isolate
+ let scope = unsafe { &mut v8::CallbackScope::new(&mut *isolate) };
+ let context = v8::Local::new(scope, context.clone());
+ let scope = &mut v8::ContextScope::new(scope, context);
+ let scope = &mut v8::TryCatch::new(scope);
+ let recv = v8::undefined(scope);
+ if let Some(message) = v8::String::new(scope, &message.content) {
+ let callback = v8::Local::new(scope, callback.clone());
+ callback.call(scope, recv.into(), &[message.into()]);
+ }
+ }),
+ );
+
+ Ok(JSInspectorSession {
+ tx: RefCell::new(Some(tx)),
+ })
+}
+
+#[op2(fast)]
+pub fn op_inspector_dispatch(
+ #[cppgc] session: &JSInspectorSession,
+ #[string] message: String,
+) {
+ if let Some(tx) = &*session.tx.borrow() {
+ let _ = tx.unbounded_send(message);
+ }
+}
+
+#[op2(fast)]
+pub fn op_inspector_disconnect(#[cppgc] session: &JSInspectorSession) {
+ drop(session.tx.borrow_mut().take());
+}
diff --git a/ext/node/ops/ipc.rs b/ext/node/ops/ipc.rs
index 59b6fece1..672cf0d70 100644
--- a/ext/node/ops/ipc.rs
+++ b/ext/node/ops/ipc.rs
@@ -17,8 +17,6 @@ mod impl_ {
use std::task::Context;
use std::task::Poll;
- use deno_core::error::bad_resource_id;
- use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::serde;
use deno_core::serde::Serializer;
@@ -167,7 +165,7 @@ mod impl_ {
#[smi]
pub fn op_node_child_ipc_pipe(
state: &mut OpState,
- ) -> Result<Option<ResourceId>, AnyError> {
+ ) -> Result<Option<ResourceId>, io::Error> {
let fd = match state.try_borrow_mut::<crate::ChildPipeFd>() {
Some(child_pipe_fd) => child_pipe_fd.0,
None => return Ok(None),
@@ -180,6 +178,18 @@ mod impl_ {
))
}
+ #[derive(Debug, thiserror::Error)]
+ pub enum IpcError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ IpcJsonStream(#[from] IpcJsonStreamError),
+ #[error(transparent)]
+ Canceled(#[from] deno_core::Canceled),
+ #[error("failed to serialize json value: {0}")]
+ SerdeJson(serde_json::Error),
+ }
+
#[op2(async)]
pub fn op_node_ipc_write<'a>(
scope: &mut v8::HandleScope<'a>,
@@ -192,34 +202,37 @@ mod impl_ {
// ideally we would just return `Result<(impl Future, bool), ..>`, but that's not
// supported by `op2` currently.
queue_ok: v8::Local<'a, v8::Array>,
- ) -> Result<impl Future<Output = Result<(), AnyError>>, AnyError> {
+ ) -> Result<impl Future<Output = Result<(), io::Error>>, IpcError> {
let mut serialized = Vec::with_capacity(64);
let mut ser = serde_json::Serializer::new(&mut serialized);
- serialize_v8_value(scope, value, &mut ser).map_err(|e| {
- deno_core::error::type_error(format!(
- "failed to serialize json value: {e}"
- ))
- })?;
+ serialize_v8_value(scope, value, &mut ser).map_err(IpcError::SerdeJson)?;
serialized.push(b'\n');
let stream = state
.borrow()
.resource_table
.get::<IpcJsonStreamResource>(rid)
- .map_err(|_| bad_resource_id())?;
+ .map_err(IpcError::Resource)?;
let old = stream
.queued_bytes
.fetch_add(serialized.len(), std::sync::atomic::Ordering::Relaxed);
if old + serialized.len() > 2 * INITIAL_CAPACITY {
// sending messages too fast
- let v = false.to_v8(scope)?;
+ let v = false.to_v8(scope).unwrap(); // Infallible
queue_ok.set_index(scope, 0, v);
}
Ok(async move {
- stream.clone().write_msg_bytes(&serialized).await?;
+ let cancel = stream.cancel.clone();
+ let result = stream
+ .clone()
+ .write_msg_bytes(&serialized)
+ .or_cancel(cancel)
+ .await;
+ // adjust count even on error
stream
.queued_bytes
.fetch_sub(serialized.len(), std::sync::atomic::Ordering::Relaxed);
+ result??;
Ok(())
})
}
@@ -239,12 +252,12 @@ mod impl_ {
pub async fn op_node_ipc_read(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
- ) -> Result<serde_json::Value, AnyError> {
+ ) -> Result<serde_json::Value, IpcError> {
let stream = state
.borrow()
.resource_table
.get::<IpcJsonStreamResource>(rid)
- .map_err(|_| bad_resource_id())?;
+ .map_err(IpcError::Resource)?;
let cancel = stream.cancel.clone();
let mut stream = RcRef::map(stream, |r| &r.read_half).borrow_mut().await;
@@ -400,7 +413,7 @@ mod impl_ {
async fn write_msg_bytes(
self: Rc<Self>,
msg: &[u8],
- ) -> Result<(), AnyError> {
+ ) -> Result<(), io::Error> {
let mut write_half =
RcRef::map(self, |r| &r.write_half).borrow_mut().await;
write_half.write_all(msg).await?;
@@ -455,6 +468,14 @@ mod impl_ {
}
}
+ #[derive(Debug, thiserror::Error)]
+ pub enum IpcJsonStreamError {
+ #[error("{0}")]
+ Io(#[source] std::io::Error),
+ #[error("{0}")]
+ SimdJson(#[source] simd_json::Error),
+ }
+
// JSON serialization stream over IPC pipe.
//
// `\n` is used as a delimiter between messages.
@@ -475,7 +496,7 @@ mod impl_ {
async fn read_msg(
&mut self,
- ) -> Result<Option<serde_json::Value>, AnyError> {
+ ) -> Result<Option<serde_json::Value>, IpcJsonStreamError> {
let mut json = None;
let nread = read_msg_inner(
&mut self.pipe,
@@ -483,7 +504,8 @@ mod impl_ {
&mut json,
&mut self.read_buffer,
)
- .await?;
+ .await
+ .map_err(IpcJsonStreamError::Io)?;
if nread == 0 {
// EOF.
return Ok(None);
@@ -493,7 +515,8 @@ mod impl_ {
Some(v) => v,
None => {
// Took more than a single read and some buffering.
- simd_json::from_slice(&mut self.buffer[..nread])?
+ simd_json::from_slice(&mut self.buffer[..nread])
+ .map_err(IpcJsonStreamError::SimdJson)?
}
};
diff --git a/ext/node/ops/mod.rs b/ext/node/ops/mod.rs
index b562261f3..e5ea8b417 100644
--- a/ext/node/ops/mod.rs
+++ b/ext/node/ops/mod.rs
@@ -7,8 +7,10 @@ pub mod fs;
pub mod http;
pub mod http2;
pub mod idna;
+pub mod inspector;
pub mod ipc;
pub mod os;
+pub mod perf_hooks;
pub mod process;
pub mod require;
pub mod tls;
diff --git a/ext/node/ops/os/mod.rs b/ext/node/ops/os/mod.rs
index ca91895f2..d291277ad 100644
--- a/ext/node/ops/os/mod.rs
+++ b/ext/node/ops/os/mod.rs
@@ -1,19 +1,31 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+use std::mem::MaybeUninit;
+
use crate::NodePermissions;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
mod cpus;
-mod priority;
+pub mod priority;
+
+#[derive(Debug, thiserror::Error)]
+pub enum OsError {
+ #[error(transparent)]
+ Priority(priority::PriorityError),
+ #[error(transparent)]
+ Permission(#[from] deno_permissions::PermissionCheckError),
+ #[error("Failed to get cpu info")]
+ FailedToGetCpuInfo,
+ #[error("Failed to get user info")]
+ FailedToGetUserInfo(#[source] std::io::Error),
+}
#[op2(fast)]
pub fn op_node_os_get_priority<P>(
state: &mut OpState,
pid: u32,
-) -> Result<i32, AnyError>
+) -> Result<i32, OsError>
where
P: NodePermissions + 'static,
{
@@ -22,7 +34,7 @@ where
permissions.check_sys("getPriority", "node:os.getPriority()")?;
}
- priority::get_priority(pid)
+ priority::get_priority(pid).map_err(OsError::Priority)
}
#[op2(fast)]
@@ -30,7 +42,7 @@ pub fn op_node_os_set_priority<P>(
state: &mut OpState,
pid: u32,
priority: i32,
-) -> Result<(), AnyError>
+) -> Result<(), OsError>
where
P: NodePermissions + 'static,
{
@@ -39,25 +51,171 @@ where
permissions.check_sys("setPriority", "node:os.setPriority()")?;
}
- priority::set_priority(pid, priority)
+ priority::set_priority(pid, priority).map_err(OsError::Priority)
+}
+
+#[derive(serde::Serialize)]
+pub struct UserInfo {
+ username: String,
+ homedir: String,
+ shell: Option<String>,
+}
+
+#[cfg(unix)]
+fn get_user_info(uid: u32) -> Result<UserInfo, OsError> {
+ use std::ffi::CStr;
+ let mut pw: MaybeUninit<libc::passwd> = MaybeUninit::uninit();
+ let mut result: *mut libc::passwd = std::ptr::null_mut();
+ // SAFETY: libc call, no invariants
+ let max_buf_size = unsafe { libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) };
+ let buf_size = if max_buf_size < 0 {
+ // from the man page
+ 16_384
+ } else {
+ max_buf_size as usize
+ };
+ let mut buf = {
+ let mut b = Vec::<MaybeUninit<libc::c_char>>::with_capacity(buf_size);
+ // SAFETY: MaybeUninit has no initialization invariants, and len == cap
+ unsafe {
+ b.set_len(buf_size);
+ }
+ b
+ };
+ // SAFETY: libc call, args are correct
+ let s = unsafe {
+ libc::getpwuid_r(
+ uid,
+ pw.as_mut_ptr(),
+ buf.as_mut_ptr().cast(),
+ buf_size,
+ std::ptr::addr_of_mut!(result),
+ )
+ };
+ if result.is_null() {
+ if s != 0 {
+ return Err(
+ OsError::FailedToGetUserInfo(std::io::Error::last_os_error()),
+ );
+ } else {
+ return Err(OsError::FailedToGetUserInfo(std::io::Error::from(
+ std::io::ErrorKind::NotFound,
+ )));
+ }
+ }
+ // SAFETY: pw was initialized by the call to `getpwuid_r` above
+ let pw = unsafe { pw.assume_init() };
+ // SAFETY: initialized above, pw alive until end of function, nul terminated
+ let username = unsafe { CStr::from_ptr(pw.pw_name) };
+ // SAFETY: initialized above, pw alive until end of function, nul terminated
+ let homedir = unsafe { CStr::from_ptr(pw.pw_dir) };
+ // SAFETY: initialized above, pw alive until end of function, nul terminated
+ let shell = unsafe { CStr::from_ptr(pw.pw_shell) };
+ Ok(UserInfo {
+ username: username.to_string_lossy().into_owned(),
+ homedir: homedir.to_string_lossy().into_owned(),
+ shell: Some(shell.to_string_lossy().into_owned()),
+ })
+}
+
+#[cfg(windows)]
+fn get_user_info(_uid: u32) -> Result<UserInfo, OsError> {
+ use std::ffi::OsString;
+ use std::os::windows::ffi::OsStringExt;
+
+ use windows_sys::Win32::Foundation::CloseHandle;
+ use windows_sys::Win32::Foundation::GetLastError;
+ use windows_sys::Win32::Foundation::ERROR_INSUFFICIENT_BUFFER;
+ use windows_sys::Win32::Foundation::HANDLE;
+ use windows_sys::Win32::System::Threading::GetCurrentProcess;
+ use windows_sys::Win32::System::Threading::OpenProcessToken;
+ use windows_sys::Win32::UI::Shell::GetUserProfileDirectoryW;
+ struct Handle(HANDLE);
+ impl Drop for Handle {
+ fn drop(&mut self) {
+ // SAFETY: win32 call
+ unsafe {
+ CloseHandle(self.0);
+ }
+ }
+ }
+ let mut token: MaybeUninit<HANDLE> = MaybeUninit::uninit();
+
+ // Get a handle to the current process
+ // SAFETY: win32 call
+ unsafe {
+ if OpenProcessToken(
+ GetCurrentProcess(),
+ windows_sys::Win32::Security::TOKEN_READ,
+ token.as_mut_ptr(),
+ ) == 0
+ {
+ return Err(
+ OsError::FailedToGetUserInfo(std::io::Error::last_os_error()),
+ );
+ }
+ }
+
+ // SAFETY: initialized by call above
+ let token = Handle(unsafe { token.assume_init() });
+
+ let mut bufsize = 0;
+ // get the size for the homedir buf (it'll end up in `bufsize`)
+ // SAFETY: win32 call
+ unsafe {
+ GetUserProfileDirectoryW(token.0, std::ptr::null_mut(), &mut bufsize);
+ let err = GetLastError();
+ if err != ERROR_INSUFFICIENT_BUFFER {
+ return Err(OsError::FailedToGetUserInfo(
+ std::io::Error::from_raw_os_error(err as i32),
+ ));
+ }
+ }
+ let mut path = vec![0; bufsize as usize];
+ // Actually get the homedir
+ // SAFETY: path is `bufsize` elements
+ unsafe {
+ if GetUserProfileDirectoryW(token.0, path.as_mut_ptr(), &mut bufsize) == 0 {
+ return Err(
+ OsError::FailedToGetUserInfo(std::io::Error::last_os_error()),
+ );
+ }
+ }
+ // remove trailing nul
+ path.pop();
+ let homedir_wide = OsString::from_wide(&path);
+ let homedir = homedir_wide.to_string_lossy().into_owned();
+
+ Ok(UserInfo {
+ username: deno_whoami::username(),
+ homedir,
+ shell: None,
+ })
}
#[op2]
-#[string]
-pub fn op_node_os_username<P>(state: &mut OpState) -> Result<String, AnyError>
+#[serde]
+pub fn op_node_os_user_info<P>(
+ state: &mut OpState,
+ #[smi] uid: u32,
+) -> Result<UserInfo, OsError>
where
P: NodePermissions + 'static,
{
{
let permissions = state.borrow_mut::<P>();
- permissions.check_sys("username", "node:os.userInfo()")?;
+ permissions
+ .check_sys("userInfo", "node:os.userInfo()")
+ .map_err(OsError::Permission)?;
}
- Ok(deno_whoami::username())
+ get_user_info(uid)
}
#[op2(fast)]
-pub fn op_geteuid<P>(state: &mut OpState) -> Result<u32, AnyError>
+pub fn op_geteuid<P>(
+ state: &mut OpState,
+) -> Result<u32, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
@@ -76,7 +234,9 @@ where
}
#[op2(fast)]
-pub fn op_getegid<P>(state: &mut OpState) -> Result<u32, AnyError>
+pub fn op_getegid<P>(
+ state: &mut OpState,
+) -> Result<u32, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
@@ -96,7 +256,7 @@ where
#[op2]
#[serde]
-pub fn op_cpus<P>(state: &mut OpState) -> Result<Vec<cpus::CpuInfo>, AnyError>
+pub fn op_cpus<P>(state: &mut OpState) -> Result<Vec<cpus::CpuInfo>, OsError>
where
P: NodePermissions + 'static,
{
@@ -105,12 +265,14 @@ where
permissions.check_sys("cpus", "node:os.cpus()")?;
}
- cpus::cpu_info().ok_or_else(|| type_error("Failed to get cpu info"))
+ cpus::cpu_info().ok_or(OsError::FailedToGetCpuInfo)
}
#[op2]
#[string]
-pub fn op_homedir<P>(state: &mut OpState) -> Result<Option<String>, AnyError>
+pub fn op_homedir<P>(
+ state: &mut OpState,
+) -> Result<Option<String>, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
diff --git a/ext/node/ops/os/priority.rs b/ext/node/ops/os/priority.rs
index 043928e2a..9a1ebcca7 100644
--- a/ext/node/ops/os/priority.rs
+++ b/ext/node/ops/os/priority.rs
@@ -1,12 +1,18 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
-
pub use impl_::*;
+#[derive(Debug, thiserror::Error)]
+pub enum PriorityError {
+ #[error("{0}")]
+ Io(#[from] std::io::Error),
+ #[cfg(windows)]
+ #[error("Invalid priority")]
+ InvalidPriority,
+}
+
#[cfg(unix)]
mod impl_ {
- use super::*;
use errno::errno;
use errno::set_errno;
use errno::Errno;
@@ -16,7 +22,7 @@ mod impl_ {
const PRIORITY_HIGH: i32 = -14;
// Ref: https://github.com/libuv/libuv/blob/55376b044b74db40772e8a6e24d67a8673998e02/src/unix/core.c#L1533-L1547
- pub fn get_priority(pid: u32) -> Result<i32, AnyError> {
+ pub fn get_priority(pid: u32) -> Result<i32, super::PriorityError> {
set_errno(Errno(0));
match (
// SAFETY: libc::getpriority is unsafe
@@ -29,7 +35,10 @@ mod impl_ {
}
}
- pub fn set_priority(pid: u32, priority: i32) -> Result<(), AnyError> {
+ pub fn set_priority(
+ pid: u32,
+ priority: i32,
+ ) -> Result<(), super::PriorityError> {
// SAFETY: libc::setpriority is unsafe
match unsafe { libc::setpriority(PRIO_PROCESS, pid as id_t, priority) } {
-1 => Err(std::io::Error::last_os_error().into()),
@@ -40,8 +49,6 @@ mod impl_ {
#[cfg(windows)]
mod impl_ {
- use super::*;
- use deno_core::error::type_error;
use winapi::shared::minwindef::DWORD;
use winapi::shared::minwindef::FALSE;
use winapi::shared::ntdef::NULL;
@@ -67,7 +74,7 @@ mod impl_ {
const PRIORITY_HIGHEST: i32 = -20;
// Ported from: https://github.com/libuv/libuv/blob/a877ca2435134ef86315326ef4ef0c16bdbabf17/src/win/util.c#L1649-L1685
- pub fn get_priority(pid: u32) -> Result<i32, AnyError> {
+ pub fn get_priority(pid: u32) -> Result<i32, super::PriorityError> {
// SAFETY: Windows API calls
unsafe {
let handle = if pid == 0 {
@@ -95,7 +102,10 @@ mod impl_ {
}
// Ported from: https://github.com/libuv/libuv/blob/a877ca2435134ef86315326ef4ef0c16bdbabf17/src/win/util.c#L1688-L1719
- pub fn set_priority(pid: u32, priority: i32) -> Result<(), AnyError> {
+ pub fn set_priority(
+ pid: u32,
+ priority: i32,
+ ) -> Result<(), super::PriorityError> {
// SAFETY: Windows API calls
unsafe {
let handle = if pid == 0 {
@@ -109,7 +119,7 @@ mod impl_ {
#[allow(clippy::manual_range_contains)]
let priority_class =
if priority < PRIORITY_HIGHEST || priority > PRIORITY_LOW {
- return Err(type_error("Invalid priority"));
+ return Err(super::PriorityError::InvalidPriority);
} else if priority < PRIORITY_HIGH {
REALTIME_PRIORITY_CLASS
} else if priority < PRIORITY_ABOVE_NORMAL {
diff --git a/ext/node/ops/perf_hooks.rs b/ext/node/ops/perf_hooks.rs
new file mode 100644
index 000000000..636d0b2ad
--- /dev/null
+++ b/ext/node/ops/perf_hooks.rs
@@ -0,0 +1,135 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use deno_core::op2;
+use deno_core::GarbageCollected;
+
+use std::cell::Cell;
+
+#[derive(Debug, thiserror::Error)]
+pub enum PerfHooksError {
+ #[error(transparent)]
+ TokioEld(#[from] tokio_eld::Error),
+}
+
+pub struct EldHistogram {
+ eld: tokio_eld::EldHistogram<u64>,
+ started: Cell<bool>,
+}
+
+impl GarbageCollected for EldHistogram {}
+
+#[op2]
+impl EldHistogram {
+ // Creates an interval EldHistogram object that samples and reports the event
+ // loop delay over time.
+ //
+ // The delays will be reported in nanoseconds.
+ #[constructor]
+ #[cppgc]
+ pub fn new(#[smi] resolution: u32) -> Result<EldHistogram, PerfHooksError> {
+ Ok(EldHistogram {
+ eld: tokio_eld::EldHistogram::new(resolution as usize)?,
+ started: Cell::new(false),
+ })
+ }
+
+ // Disables the update interval timer.
+ //
+ // Returns true if the timer was stopped, false if it was already stopped.
+ #[fast]
+ fn enable(&self) -> bool {
+ if self.started.get() {
+ return false;
+ }
+
+ self.eld.start();
+ self.started.set(true);
+
+ true
+ }
+
+ // Enables the update interval timer.
+ //
+ // Returns true if the timer was started, false if it was already started.
+ #[fast]
+ fn disable(&self) -> bool {
+ if !self.started.get() {
+ return false;
+ }
+
+ self.eld.stop();
+ self.started.set(false);
+
+ true
+ }
+
+ // Returns the value at the given percentile.
+ //
+ // `percentile` ∈ (0, 100]
+ #[fast]
+ #[number]
+ fn percentile(&self, percentile: f64) -> u64 {
+ self.eld.value_at_percentile(percentile)
+ }
+
+ // Returns the value at the given percentile as a bigint.
+ #[fast]
+ #[bigint]
+ fn percentile_big_int(&self, percentile: f64) -> u64 {
+ self.eld.value_at_percentile(percentile)
+ }
+
+ // The number of samples recorded by the histogram.
+ #[getter]
+ #[number]
+ fn count(&self) -> u64 {
+ self.eld.len()
+ }
+
+ // The number of samples recorded by the histogram as a bigint.
+ #[getter]
+ #[bigint]
+ fn count_big_int(&self) -> u64 {
+ self.eld.len()
+ }
+
+ // The maximum recorded event loop delay.
+ #[getter]
+ #[number]
+ fn max(&self) -> u64 {
+ self.eld.max()
+ }
+
+ // The maximum recorded event loop delay as a bigint.
+ #[getter]
+ #[bigint]
+ fn max_big_int(&self) -> u64 {
+ self.eld.max()
+ }
+
+ // The mean of the recorded event loop delays.
+ #[getter]
+ fn mean(&self) -> f64 {
+ self.eld.mean()
+ }
+
+ // The minimum recorded event loop delay.
+ #[getter]
+ #[number]
+ fn min(&self) -> u64 {
+ self.eld.min()
+ }
+
+ // The minimum recorded event loop delay as a bigint.
+ #[getter]
+ #[bigint]
+ fn min_big_int(&self) -> u64 {
+ self.eld.min()
+ }
+
+ // The standard deviation of the recorded event loop delays.
+ #[getter]
+ fn stddev(&self) -> f64 {
+ self.eld.stdev()
+ }
+}
diff --git a/ext/node/ops/process.rs b/ext/node/ops/process.rs
index 0992c46c6..282567226 100644
--- a/ext/node/ops/process.rs
+++ b/ext/node/ops/process.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_permissions::PermissionsContainer;
@@ -51,7 +50,7 @@ pub fn op_node_process_kill(
state: &mut OpState,
#[smi] pid: i32,
#[smi] sig: i32,
-) -> Result<i32, AnyError> {
+) -> Result<i32, deno_core::error::AnyError> {
state
.borrow_mut::<PermissionsContainer>()
.check_run_all("process.kill")?;
diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs
index 547336981..06c034fd5 100644
--- a/ext/node/ops/require.rs
+++ b/ext/node/ops/require.rs
@@ -1,18 +1,19 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::anyhow::Context;
-use deno_core::error::generic_error;
+use boxed_error::Boxed;
use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::url::Url;
use deno_core::v8;
use deno_core::JsRuntimeInspector;
-use deno_core::ModuleSpecifier;
use deno_core::OpState;
use deno_fs::FileSystemRc;
+use deno_package_json::NodeModuleKind;
use deno_package_json::PackageJsonRc;
use deno_path_util::normalize_path;
-use node_resolver::NodeModuleKind;
+use deno_path_util::url_from_file_path;
+use deno_path_util::url_to_file_path;
+use node_resolver::errors::ClosestPkgJsonError;
use node_resolver::NodeResolutionMode;
use node_resolver::REQUIRE_CONDITIONS;
use std::borrow::Cow;
@@ -22,21 +23,55 @@ use std::path::PathBuf;
use std::rc::Rc;
use crate::NodePermissions;
-use crate::NodeRequireResolverRc;
+use crate::NodeRequireLoaderRc;
use crate::NodeResolverRc;
-use crate::NpmResolverRc;
+use crate::NpmPackageFolderResolverRc;
+use crate::PackageJsonResolverRc;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn ensure_read_permission<'a, P>(
state: &mut OpState,
file_path: &'a Path,
-) -> Result<Cow<'a, Path>, AnyError>
+) -> Result<Cow<'a, Path>, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
- let resolver = state.borrow::<NodeRequireResolverRc>().clone();
+ let loader = state.borrow::<NodeRequireLoaderRc>().clone();
let permissions = state.borrow_mut::<P>();
- resolver.ensure_read_permission(permissions, file_path)
+ loader.ensure_read_permission(permissions, file_path)
+}
+
+#[derive(Debug, Boxed)]
+pub struct RequireError(pub Box<RequireErrorKind>);
+
+#[derive(Debug, thiserror::Error)]
+pub enum RequireErrorKind {
+ #[error(transparent)]
+ UrlParse(#[from] url::ParseError),
+ #[error(transparent)]
+ Permission(deno_core::error::AnyError),
+ #[error(transparent)]
+ PackageExportsResolve(
+ #[from] node_resolver::errors::PackageExportsResolveError,
+ ),
+ #[error(transparent)]
+ PackageJsonLoad(#[from] node_resolver::errors::PackageJsonLoadError),
+ #[error(transparent)]
+ ClosestPkgJson(#[from] node_resolver::errors::ClosestPkgJsonError),
+ #[error(transparent)]
+ PackageImportsResolve(
+ #[from] node_resolver::errors::PackageImportsResolveError,
+ ),
+ #[error(transparent)]
+ FilePathConversion(#[from] deno_path_util::UrlToFilePathError),
+ #[error(transparent)]
+ UrlConversion(#[from] deno_path_util::PathToUrlError),
+ #[error(transparent)]
+ Fs(#[from] deno_io::fs::FsError),
+ #[error(transparent)]
+ ReadModule(deno_core::error::AnyError),
+ #[error("Unable to get CWD: {0}")]
+ UnableToGetCwd(deno_io::fs::FsError),
}
#[op2]
@@ -95,7 +130,7 @@ pub fn op_require_init_paths() -> Vec<String> {
pub fn op_require_node_module_paths<P>(
state: &mut OpState,
#[string] from: String,
-) -> Result<Vec<String>, AnyError>
+) -> Result<Vec<String>, RequireError>
where
P: NodePermissions + 'static,
{
@@ -104,13 +139,10 @@ where
let from = if from.starts_with("file:///") {
url_to_file_path(&Url::parse(&from)?)?
} else {
- let current_dir =
- &(fs.cwd().map_err(AnyError::from)).context("Unable to get CWD")?;
- deno_path_util::normalize_path(current_dir.join(from))
+ let current_dir = &fs.cwd().map_err(RequireErrorKind::UnableToGetCwd)?;
+ normalize_path(current_dir.join(from))
};
- let from = ensure_read_permission::<P>(state, &from)?;
-
if cfg!(windows) {
// return root node_modules when path is 'D:\\'.
let from_str = from.to_str().unwrap();
@@ -131,7 +163,7 @@ where
}
let mut paths = Vec::with_capacity(from.components().count());
- let mut current_path = from.as_ref();
+ let mut current_path = from.as_path();
let mut maybe_parent = Some(current_path);
while let Some(parent) = maybe_parent {
if !parent.ends_with("node_modules") {
@@ -191,17 +223,17 @@ pub fn op_require_resolve_deno_dir(
state: &mut OpState,
#[string] request: String,
#[string] parent_filename: String,
-) -> Option<String> {
- let resolver = state.borrow::<NpmResolverRc>();
- resolver
- .resolve_package_folder_from_package(
- &request,
- &ModuleSpecifier::from_file_path(&parent_filename).unwrap_or_else(|_| {
- panic!("Url::from_file_path: [{:?}]", parent_filename)
- }),
- )
- .ok()
- .map(|p| p.to_string_lossy().into_owned())
+) -> Result<Option<String>, AnyError> {
+ let resolver = state.borrow::<NpmPackageFolderResolverRc>();
+ Ok(
+ resolver
+ .resolve_package_folder_from_package(
+ &request,
+ &url_from_file_path(&PathBuf::from(parent_filename))?,
+ )
+ .ok()
+ .map(|p| p.to_string_lossy().into_owned()),
+ )
}
#[op2(fast)]
@@ -209,8 +241,11 @@ pub fn op_require_is_deno_dir_package(
state: &mut OpState,
#[string] path: String,
) -> bool {
- let resolver = state.borrow::<NpmResolverRc>();
- resolver.in_npm_package_at_file_path(&PathBuf::from(path))
+ let resolver = state.borrow::<NodeResolverRc>();
+ match deno_path_util::url_from_file_path(&PathBuf::from(path)) {
+ Ok(specifier) => resolver.in_npm_package(&specifier),
+ Err(_) => false,
+ }
}
#[op2]
@@ -264,7 +299,7 @@ pub fn op_require_path_is_absolute(#[string] p: String) -> bool {
pub fn op_require_stat<P>(
state: &mut OpState,
#[string] path: String,
-) -> Result<i32, AnyError>
+) -> Result<i32, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
@@ -287,15 +322,16 @@ where
pub fn op_require_real_path<P>(
state: &mut OpState,
#[string] request: String,
-) -> Result<String, AnyError>
+) -> Result<String, RequireError>
where
P: NodePermissions + 'static,
{
let path = PathBuf::from(request);
- let path = ensure_read_permission::<P>(state, &path)?;
+ let path = ensure_read_permission::<P>(state, &path)
+ .map_err(RequireErrorKind::Permission)?;
let fs = state.borrow::<FileSystemRc>();
let canonicalized_path =
- deno_core::strip_unc_prefix(fs.realpath_sync(&path)?);
+ deno_path_util::strip_unc_prefix(fs.realpath_sync(&path)?);
Ok(canonicalized_path.to_string_lossy().into_owned())
}
@@ -319,12 +355,14 @@ pub fn op_require_path_resolve(#[serde] parts: Vec<String>) -> String {
#[string]
pub fn op_require_path_dirname(
#[string] request: String,
-) -> Result<String, AnyError> {
+) -> Result<String, deno_core::error::AnyError> {
let p = PathBuf::from(request);
if let Some(parent) = p.parent() {
Ok(parent.to_string_lossy().into_owned())
} else {
- Err(generic_error("Path doesn't have a parent"))
+ Err(deno_core::error::generic_error(
+ "Path doesn't have a parent",
+ ))
}
}
@@ -332,12 +370,14 @@ pub fn op_require_path_dirname(
#[string]
pub fn op_require_path_basename(
#[string] request: String,
-) -> Result<String, AnyError> {
+) -> Result<String, deno_core::error::AnyError> {
let p = PathBuf::from(request);
if let Some(path) = p.file_name() {
Ok(path.to_string_lossy().into_owned())
} else {
- Err(generic_error("Path doesn't have a file name"))
+ Err(deno_core::error::generic_error(
+ "Path doesn't have a file name",
+ ))
}
}
@@ -348,7 +388,7 @@ pub fn op_require_try_self_parent_path<P>(
has_parent: bool,
#[string] maybe_parent_filename: Option<String>,
#[string] maybe_parent_id: Option<String>,
-) -> Result<Option<String>, AnyError>
+) -> Result<Option<String>, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
@@ -378,7 +418,7 @@ pub fn op_require_try_self<P>(
state: &mut OpState,
#[string] parent_path: Option<String>,
#[string] request: String,
-) -> Result<Option<String>, AnyError>
+) -> Result<Option<String>, RequireError>
where
P: NodePermissions + 'static,
{
@@ -386,8 +426,8 @@ where
return Ok(None);
}
- let node_resolver = state.borrow::<NodeResolverRc>();
- let pkg = node_resolver
+ let pkg_json_resolver = state.borrow::<PackageJsonResolverRc>();
+ let pkg = pkg_json_resolver
.get_closest_package_json_from_path(&PathBuf::from(parent_path.unwrap()))
.ok()
.flatten();
@@ -416,6 +456,7 @@ where
let referrer = deno_core::url::Url::from_file_path(&pkg.path).unwrap();
if let Some(exports) = &pkg.exports {
+ let node_resolver = state.borrow::<NodeResolverRc>();
let r = node_resolver.package_exports_resolve(
&pkg.path,
&expansion,
@@ -440,14 +481,18 @@ where
pub fn op_require_read_file<P>(
state: &mut OpState,
#[string] file_path: String,
-) -> Result<String, AnyError>
+) -> Result<String, RequireError>
where
P: NodePermissions + 'static,
{
let file_path = PathBuf::from(file_path);
- let file_path = ensure_read_permission::<P>(state, &file_path)?;
- let fs = state.borrow::<FileSystemRc>();
- Ok(fs.read_text_file_lossy_sync(&file_path, None)?)
+ // todo(dsherret): there's multiple borrows to NodeRequireLoaderRc here
+ let file_path = ensure_read_permission::<P>(state, &file_path)
+ .map_err(RequireErrorKind::Permission)?;
+ let loader = state.borrow::<NodeRequireLoaderRc>();
+ loader
+ .load_text_file_lossy(&file_path)
+ .map_err(|e| RequireErrorKind::ReadModule(e).into_box())
}
#[op2]
@@ -472,16 +517,17 @@ pub fn op_require_resolve_exports<P>(
#[string] name: String,
#[string] expansion: String,
#[string] parent_path: String,
-) -> Result<Option<String>, AnyError>
+) -> Result<Option<String>, RequireError>
where
P: NodePermissions + 'static,
{
let fs = state.borrow::<FileSystemRc>();
- let npm_resolver = state.borrow::<NpmResolverRc>();
let node_resolver = state.borrow::<NodeResolverRc>();
+ let pkg_json_resolver = state.borrow::<PackageJsonResolverRc>();
let modules_path = PathBuf::from(&modules_path_str);
- let pkg_path = if npm_resolver.in_npm_package_at_file_path(&modules_path)
+ let modules_specifier = deno_path_util::url_from_file_path(&modules_path)?;
+ let pkg_path = if node_resolver.in_npm_package(&modules_specifier)
&& !uses_local_node_modules_dir
{
modules_path
@@ -495,7 +541,7 @@ where
}
};
let Some(pkg) =
- node_resolver.load_package_json(&pkg_path.join("package.json"))?
+ pkg_json_resolver.load_package_json(&pkg_path.join("package.json"))?
else {
return Ok(None);
};
@@ -503,12 +549,16 @@ where
return Ok(None);
};
- let referrer = Url::from_file_path(parent_path).unwrap();
+ let referrer = if parent_path.is_empty() {
+ None
+ } else {
+ Some(Url::from_file_path(parent_path).unwrap())
+ };
let r = node_resolver.package_exports_resolve(
&pkg.path,
&format!(".{expansion}"),
exports,
- Some(&referrer),
+ referrer.as_ref(),
NodeModuleKind::Cjs,
REQUIRE_CONDITIONS,
NodeResolutionMode::Execution,
@@ -520,21 +570,17 @@ where
}))
}
-#[op2]
-#[serde]
-pub fn op_require_read_closest_package_json<P>(
+#[op2(fast)]
+pub fn op_require_is_maybe_cjs(
state: &mut OpState,
#[string] filename: String,
-) -> Result<Option<PackageJsonRc>, AnyError>
-where
- P: NodePermissions + 'static,
-{
+) -> Result<bool, ClosestPkgJsonError> {
let filename = PathBuf::from(filename);
- // permissions: allow reading the closest package.json files
- let node_resolver = state.borrow::<NodeResolverRc>().clone();
- node_resolver
- .get_closest_package_json_from_path(&filename)
- .map_err(AnyError::from)
+ let Ok(url) = url_from_file_path(&filename) else {
+ return Ok(false);
+ };
+ let loader = state.borrow::<NodeRequireLoaderRc>();
+ loader.is_maybe_cjs(&url)
}
#[op2]
@@ -546,13 +592,13 @@ pub fn op_require_read_package_scope<P>(
where
P: NodePermissions + 'static,
{
- let node_resolver = state.borrow::<NodeResolverRc>().clone();
+ let pkg_json_resolver = state.borrow::<PackageJsonResolverRc>();
let package_json_path = PathBuf::from(package_json_path);
if package_json_path.file_name() != Some("package.json".as_ref()) {
// permissions: do not allow reading a non-package.json file
return None;
}
- node_resolver
+ pkg_json_resolver
.load_package_json(&package_json_path)
.ok()
.flatten()
@@ -564,22 +610,23 @@ pub fn op_require_package_imports_resolve<P>(
state: &mut OpState,
#[string] referrer_filename: String,
#[string] request: String,
-) -> Result<Option<String>, AnyError>
+) -> Result<Option<String>, RequireError>
where
P: NodePermissions + 'static,
{
let referrer_path = PathBuf::from(&referrer_filename);
- let referrer_path = ensure_read_permission::<P>(state, &referrer_path)?;
- let node_resolver = state.borrow::<NodeResolverRc>();
+ let referrer_path = ensure_read_permission::<P>(state, &referrer_path)
+ .map_err(RequireErrorKind::Permission)?;
+ let pkg_json_resolver = state.borrow::<PackageJsonResolverRc>();
let Some(pkg) =
- node_resolver.get_closest_package_json_from_path(&referrer_path)?
+ pkg_json_resolver.get_closest_package_json_from_path(&referrer_path)?
else {
return Ok(None);
};
if pkg.imports.is_some() {
- let referrer_url =
- deno_core::url::Url::from_file_path(&referrer_filename).unwrap();
+ let node_resolver = state.borrow::<NodeResolverRc>();
+ let referrer_url = Url::from_file_path(&referrer_filename).unwrap();
let url = node_resolver.package_imports_resolve(
&request,
Some(&referrer_url),
@@ -604,20 +651,11 @@ pub fn op_require_break_on_next_statement(state: Rc<RefCell<OpState>>) {
inspector.wait_for_session_and_break_on_next_statement()
}
-fn url_to_file_path_string(url: &Url) -> Result<String, AnyError> {
+fn url_to_file_path_string(url: &Url) -> Result<String, RequireError> {
let file_path = url_to_file_path(url)?;
Ok(file_path.to_string_lossy().into_owned())
}
-fn url_to_file_path(url: &Url) -> Result<PathBuf, AnyError> {
- match url.to_file_path() {
- Ok(file_path) => Ok(file_path),
- Err(()) => {
- deno_core::anyhow::bail!("failed to convert '{}' to file path", url)
- }
- }
-}
-
#[op2(fast)]
pub fn op_require_can_parse_as_esm(
scope: &mut v8::HandleScope,
diff --git a/ext/node/ops/util.rs b/ext/node/ops/util.rs
index 533d51c92..1c177ac04 100644
--- a/ext/node/ops/util.rs
+++ b/ext/node/ops/util.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::ResourceHandle;
@@ -22,7 +21,7 @@ enum HandleType {
pub fn op_node_guess_handle_type(
state: &mut OpState,
rid: u32,
-) -> Result<u32, AnyError> {
+) -> Result<u32, deno_core::error::AnyError> {
let handle = state.resource_table.get_handle(rid)?;
let handle_type = match handle {
diff --git a/ext/node/ops/v8.rs b/ext/node/ops/v8.rs
index 8813d2e18..61f67f11f 100644
--- a/ext/node/ops/v8.rs
+++ b/ext/node/ops/v8.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::generic_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
+
use deno_core::op2;
use deno_core::v8;
use deno_core::FastString;
@@ -206,10 +204,9 @@ pub fn op_v8_write_value(
scope: &mut v8::HandleScope,
#[cppgc] ser: &Serializer,
value: v8::Local<v8::Value>,
-) -> Result<(), AnyError> {
+) {
let context = scope.get_current_context();
ser.inner.write_value(context, value);
- Ok(())
}
struct DeserBuffer {
@@ -271,11 +268,13 @@ pub fn op_v8_new_deserializer(
scope: &mut v8::HandleScope,
obj: v8::Local<v8::Object>,
buffer: v8::Local<v8::ArrayBufferView>,
-) -> Result<Deserializer<'static>, AnyError> {
+) -> Result<Deserializer<'static>, deno_core::error::AnyError> {
let offset = buffer.byte_offset();
let len = buffer.byte_length();
let backing_store = buffer.get_backing_store().ok_or_else(|| {
- generic_error("deserialization buffer has no backing store")
+ deno_core::error::generic_error(
+ "deserialization buffer has no backing store",
+ )
})?;
let (buf_slice, buf_ptr) = if let Some(data) = backing_store.data() {
// SAFETY: the offset is valid for the underlying buffer because we're getting it directly from v8
@@ -317,10 +316,10 @@ pub fn op_v8_transfer_array_buffer_de(
#[op2(fast)]
pub fn op_v8_read_double(
#[cppgc] deser: &Deserializer,
-) -> Result<f64, AnyError> {
+) -> Result<f64, deno_core::error::AnyError> {
let mut double = 0f64;
if !deser.inner.read_double(&mut double) {
- return Err(type_error("ReadDouble() failed"));
+ return Err(deno_core::error::type_error("ReadDouble() failed"));
}
Ok(double)
}
@@ -355,10 +354,10 @@ pub fn op_v8_read_raw_bytes(
#[op2(fast)]
pub fn op_v8_read_uint32(
#[cppgc] deser: &Deserializer,
-) -> Result<u32, AnyError> {
+) -> Result<u32, deno_core::error::AnyError> {
let mut value = 0;
if !deser.inner.read_uint32(&mut value) {
- return Err(type_error("ReadUint32() failed"));
+ return Err(deno_core::error::type_error("ReadUint32() failed"));
}
Ok(value)
@@ -368,10 +367,10 @@ pub fn op_v8_read_uint32(
#[serde]
pub fn op_v8_read_uint64(
#[cppgc] deser: &Deserializer,
-) -> Result<(u32, u32), AnyError> {
+) -> Result<(u32, u32), deno_core::error::AnyError> {
let mut val = 0;
if !deser.inner.read_uint64(&mut val) {
- return Err(type_error("ReadUint64() failed"));
+ return Err(deno_core::error::type_error("ReadUint64() failed"));
}
Ok(((val >> 32) as u32, val as u32))
diff --git a/ext/node/ops/winerror.rs b/ext/node/ops/winerror.rs
index c0d66f7d0..cb053774e 100644
--- a/ext/node/ops/winerror.rs
+++ b/ext/node/ops/winerror.rs
@@ -62,10 +62,11 @@ pub fn op_node_sys_to_uv_error(err: i32) -> String {
WSAEHOSTUNREACH => "EHOSTUNREACH",
ERROR_INSUFFICIENT_BUFFER => "EINVAL",
ERROR_INVALID_DATA => "EINVAL",
- ERROR_INVALID_NAME => "EINVAL",
+ ERROR_INVALID_NAME => "ENOENT",
ERROR_INVALID_PARAMETER => "EINVAL",
WSAEINVAL => "EINVAL",
WSAEPFNOSUPPORT => "EINVAL",
+ ERROR_NOT_A_REPARSE_POINT => "EINVAL",
ERROR_BEGINNING_OF_MEDIA => "EIO",
ERROR_BUS_RESET => "EIO",
ERROR_CRC => "EIO",
diff --git a/ext/node/ops/worker_threads.rs b/ext/node/ops/worker_threads.rs
index 4c50092f2..d2e575882 100644
--- a/ext/node/ops/worker_threads.rs
+++ b/ext/node/ops/worker_threads.rs
@@ -1,39 +1,56 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::generic_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::url::Url;
use deno_core::OpState;
use deno_fs::FileSystemRc;
-use node_resolver::NodeResolution;
use std::borrow::Cow;
use std::path::Path;
use std::path::PathBuf;
use crate::NodePermissions;
-use crate::NodeRequireResolverRc;
-use crate::NodeResolverRc;
+use crate::NodeRequireLoaderRc;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn ensure_read_permission<'a, P>(
state: &mut OpState,
file_path: &'a Path,
-) -> Result<Cow<'a, Path>, AnyError>
+) -> Result<Cow<'a, Path>, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
- let resolver = state.borrow::<NodeRequireResolverRc>().clone();
+ let loader = state.borrow::<NodeRequireLoaderRc>().clone();
let permissions = state.borrow_mut::<P>();
- resolver.ensure_read_permission(permissions, file_path)
+ loader.ensure_read_permission(permissions, file_path)
}
+#[derive(Debug, thiserror::Error)]
+pub enum WorkerThreadsFilenameError {
+ #[error(transparent)]
+ Permission(deno_core::error::AnyError),
+ #[error("{0}")]
+ UrlParse(#[from] url::ParseError),
+ #[error("Relative path entries must start with '.' or '..'")]
+ InvalidRelativeUrl,
+ #[error("URL from Path-String")]
+ UrlFromPathString,
+ #[error("URL to Path-String")]
+ UrlToPathString,
+ #[error("URL to Path")]
+ UrlToPath,
+ #[error("File not found [{0:?}]")]
+ FileNotFound(PathBuf),
+ #[error(transparent)]
+ Fs(#[from] deno_io::fs::FsError),
+}
+
+// todo(dsherret): we should remove this and do all this work inside op_create_worker
#[op2]
#[string]
pub fn op_worker_threads_filename<P>(
state: &mut OpState,
#[string] specifier: String,
-) -> Result<String, AnyError>
+) -> Result<String, WorkerThreadsFilenameError>
where
P: NodePermissions + 'static,
{
@@ -45,44 +62,26 @@ where
} else {
let path = PathBuf::from(&specifier);
if path.is_relative() && !specifier.starts_with('.') {
- return Err(generic_error(
- "Relative path entries must start with '.' or '..'",
- ));
+ return Err(WorkerThreadsFilenameError::InvalidRelativeUrl);
}
- let path = ensure_read_permission::<P>(state, &path)?;
+ let path = ensure_read_permission::<P>(state, &path)
+ .map_err(WorkerThreadsFilenameError::Permission)?;
let fs = state.borrow::<FileSystemRc>();
let canonicalized_path =
- deno_core::strip_unc_prefix(fs.realpath_sync(&path)?);
+ deno_path_util::strip_unc_prefix(fs.realpath_sync(&path)?);
Url::from_file_path(canonicalized_path)
- .map_err(|e| generic_error(format!("URL from Path-String: {:#?}", e)))?
+ .map_err(|_| WorkerThreadsFilenameError::UrlFromPathString)?
};
let url_path = url
.to_file_path()
- .map_err(|e| generic_error(format!("URL to Path-String: {:#?}", e)))?;
- let url_path = ensure_read_permission::<P>(state, &url_path)?;
+ .map_err(|_| WorkerThreadsFilenameError::UrlToPathString)?;
+ let url_path = ensure_read_permission::<P>(state, &url_path)
+ .map_err(WorkerThreadsFilenameError::Permission)?;
let fs = state.borrow::<FileSystemRc>();
if !fs.exists_sync(&url_path) {
- return Err(generic_error(format!("File not found [{:?}]", url_path)));
- }
- let node_resolver = state.borrow::<NodeResolverRc>();
- match node_resolver.url_to_node_resolution(url)? {
- NodeResolution::Esm(u) => Ok(u.to_string()),
- NodeResolution::CommonJs(u) => wrap_cjs(u),
- NodeResolution::BuiltIn(_) => Err(generic_error("Neither ESM nor CJS")),
+ return Err(WorkerThreadsFilenameError::FileNotFound(
+ url_path.to_path_buf(),
+ ));
}
-}
-
-///
-/// Wrap a CJS file-URL and the required setup in a stringified `data:`-URL
-///
-fn wrap_cjs(url: Url) -> Result<String, AnyError> {
- let path = url
- .to_file_path()
- .map_err(|e| generic_error(format!("URL to Path: {:#?}", e)))?;
- let filename = path.file_name().unwrap().to_string_lossy();
- Ok(format!(
- "data:text/javascript,import {{ createRequire }} from \"node:module\";\
- const require = createRequire(\"{}\"); require(\"./{}\");",
- url, filename,
- ))
+ Ok(url.to_string())
}
diff --git a/ext/node/ops/zlib/brotli.rs b/ext/node/ops/zlib/brotli.rs
index 3e3905fc3..1a681ff7f 100644
--- a/ext/node/ops/zlib/brotli.rs
+++ b/ext/node/ops/zlib/brotli.rs
@@ -9,8 +9,6 @@ use brotli::BrotliDecompressStream;
use brotli::BrotliResult;
use brotli::BrotliState;
use brotli::Decompressor;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::JsBuffer;
use deno_core::OpState;
@@ -19,7 +17,23 @@ use deno_core::ToJsBuffer;
use std::cell::RefCell;
use std::io::Read;
-fn encoder_mode(mode: u32) -> Result<BrotliEncoderMode, AnyError> {
+#[derive(Debug, thiserror::Error)]
+pub enum BrotliError {
+ #[error("Invalid encoder mode")]
+ InvalidEncoderMode,
+ #[error("Failed to compress")]
+ CompressFailed,
+ #[error("Failed to decompress")]
+ DecompressFailed,
+ #[error(transparent)]
+ Join(#[from] tokio::task::JoinError),
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error("{0}")]
+ Io(std::io::Error),
+}
+
+fn encoder_mode(mode: u32) -> Result<BrotliEncoderMode, BrotliError> {
Ok(match mode {
0 => BrotliEncoderMode::BROTLI_MODE_GENERIC,
1 => BrotliEncoderMode::BROTLI_MODE_TEXT,
@@ -28,7 +42,7 @@ fn encoder_mode(mode: u32) -> Result<BrotliEncoderMode, AnyError> {
4 => BrotliEncoderMode::BROTLI_FORCE_MSB_PRIOR,
5 => BrotliEncoderMode::BROTLI_FORCE_UTF8_PRIOR,
6 => BrotliEncoderMode::BROTLI_FORCE_SIGNED_PRIOR,
- _ => return Err(type_error("Invalid encoder mode")),
+ _ => return Err(BrotliError::InvalidEncoderMode),
})
}
@@ -40,7 +54,7 @@ pub fn op_brotli_compress(
#[smi] quality: i32,
#[smi] lgwin: i32,
#[smi] mode: u32,
-) -> Result<usize, AnyError> {
+) -> Result<usize, BrotliError> {
let mode = encoder_mode(mode)?;
let mut out_size = out.len();
@@ -57,7 +71,7 @@ pub fn op_brotli_compress(
&mut |_, _, _, _| (),
);
if result != 1 {
- return Err(type_error("Failed to compress"));
+ return Err(BrotliError::CompressFailed);
}
Ok(out_size)
@@ -87,7 +101,7 @@ pub async fn op_brotli_compress_async(
#[smi] quality: i32,
#[smi] lgwin: i32,
#[smi] mode: u32,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, BrotliError> {
let mode = encoder_mode(mode)?;
tokio::task::spawn_blocking(move || {
let input = &*input;
@@ -107,7 +121,7 @@ pub async fn op_brotli_compress_async(
&mut |_, _, _, _| (),
);
if result != 1 {
- return Err(type_error("Failed to compress"));
+ return Err(BrotliError::CompressFailed);
}
out.truncate(out_size);
@@ -151,8 +165,11 @@ pub fn op_brotli_compress_stream(
#[smi] rid: u32,
#[buffer] input: &[u8],
#[buffer] output: &mut [u8],
-) -> Result<usize, AnyError> {
- let ctx = state.resource_table.get::<BrotliCompressCtx>(rid)?;
+) -> Result<usize, BrotliError> {
+ let ctx = state
+ .resource_table
+ .get::<BrotliCompressCtx>(rid)
+ .map_err(BrotliError::Resource)?;
let mut inst = ctx.inst.borrow_mut();
let mut output_offset = 0;
@@ -168,7 +185,7 @@ pub fn op_brotli_compress_stream(
&mut |_, _, _, _| (),
);
if !result {
- return Err(type_error("Failed to compress"));
+ return Err(BrotliError::CompressFailed);
}
Ok(output_offset)
@@ -180,8 +197,11 @@ pub fn op_brotli_compress_stream_end(
state: &mut OpState,
#[smi] rid: u32,
#[buffer] output: &mut [u8],
-) -> Result<usize, AnyError> {
- let ctx = state.resource_table.get::<BrotliCompressCtx>(rid)?;
+) -> Result<usize, BrotliError> {
+ let ctx = state
+ .resource_table
+ .get::<BrotliCompressCtx>(rid)
+ .map_err(BrotliError::Resource)?;
let mut inst = ctx.inst.borrow_mut();
let mut output_offset = 0;
@@ -197,13 +217,13 @@ pub fn op_brotli_compress_stream_end(
&mut |_, _, _, _| (),
);
if !result {
- return Err(type_error("Failed to compress"));
+ return Err(BrotliError::CompressFailed);
}
Ok(output_offset)
}
-fn brotli_decompress(buffer: &[u8]) -> Result<ToJsBuffer, AnyError> {
+fn brotli_decompress(buffer: &[u8]) -> Result<ToJsBuffer, std::io::Error> {
let mut output = Vec::with_capacity(4096);
let mut decompressor = Decompressor::new(buffer, buffer.len());
decompressor.read_to_end(&mut output)?;
@@ -214,7 +234,7 @@ fn brotli_decompress(buffer: &[u8]) -> Result<ToJsBuffer, AnyError> {
#[serde]
pub fn op_brotli_decompress(
#[buffer] buffer: &[u8],
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, std::io::Error> {
brotli_decompress(buffer)
}
@@ -222,8 +242,11 @@ pub fn op_brotli_decompress(
#[serde]
pub async fn op_brotli_decompress_async(
#[buffer] buffer: JsBuffer,
-) -> Result<ToJsBuffer, AnyError> {
- tokio::task::spawn_blocking(move || brotli_decompress(&buffer)).await?
+) -> Result<ToJsBuffer, BrotliError> {
+ tokio::task::spawn_blocking(move || {
+ brotli_decompress(&buffer).map_err(BrotliError::Io)
+ })
+ .await?
}
struct BrotliDecompressCtx {
@@ -252,8 +275,11 @@ pub fn op_brotli_decompress_stream(
#[smi] rid: u32,
#[buffer] input: &[u8],
#[buffer] output: &mut [u8],
-) -> Result<usize, AnyError> {
- let ctx = state.resource_table.get::<BrotliDecompressCtx>(rid)?;
+) -> Result<usize, BrotliError> {
+ let ctx = state
+ .resource_table
+ .get::<BrotliDecompressCtx>(rid)
+ .map_err(BrotliError::Resource)?;
let mut inst = ctx.inst.borrow_mut();
let mut output_offset = 0;
@@ -268,7 +294,7 @@ pub fn op_brotli_decompress_stream(
&mut inst,
);
if matches!(result, BrotliResult::ResultFailure) {
- return Err(type_error("Failed to decompress"));
+ return Err(BrotliError::DecompressFailed);
}
Ok(output_offset)
@@ -280,8 +306,11 @@ pub fn op_brotli_decompress_stream_end(
state: &mut OpState,
#[smi] rid: u32,
#[buffer] output: &mut [u8],
-) -> Result<usize, AnyError> {
- let ctx = state.resource_table.get::<BrotliDecompressCtx>(rid)?;
+) -> Result<usize, BrotliError> {
+ let ctx = state
+ .resource_table
+ .get::<BrotliDecompressCtx>(rid)
+ .map_err(BrotliError::Resource)?;
let mut inst = ctx.inst.borrow_mut();
let mut output_offset = 0;
@@ -296,7 +325,7 @@ pub fn op_brotli_decompress_stream_end(
&mut inst,
);
if matches!(result, BrotliResult::ResultFailure) {
- return Err(type_error("Failed to decompress"));
+ return Err(BrotliError::DecompressFailed);
}
Ok(output_offset)
diff --git a/ext/node/ops/zlib/mod.rs b/ext/node/ops/zlib/mod.rs
index b1d6d21d2..991c0925d 100644
--- a/ext/node/ops/zlib/mod.rs
+++ b/ext/node/ops/zlib/mod.rs
@@ -1,14 +1,14 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
+
use deno_core::op2;
+use libc::c_ulong;
use std::borrow::Cow;
use std::cell::RefCell;
use zlib::*;
mod alloc;
pub mod brotli;
-mod mode;
+pub mod mode;
mod stream;
use mode::Flush;
@@ -17,11 +17,11 @@ use mode::Mode;
use self::stream::StreamWrapper;
#[inline]
-fn check(condition: bool, msg: &str) -> Result<(), AnyError> {
+fn check(condition: bool, msg: &str) -> Result<(), deno_core::error::AnyError> {
if condition {
Ok(())
} else {
- Err(type_error(msg.to_string()))
+ Err(deno_core::error::type_error(msg.to_string()))
}
}
@@ -56,7 +56,7 @@ impl ZlibInner {
out_off: u32,
out_len: u32,
flush: Flush,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), deno_core::error::AnyError> {
check(self.init_done, "write before init")?;
check(!self.write_in_progress, "write already in progress")?;
check(!self.pending_close, "close already in progress")?;
@@ -65,11 +65,11 @@ impl ZlibInner {
let next_in = input
.get(in_off as usize..in_off as usize + in_len as usize)
- .ok_or_else(|| type_error("invalid input range"))?
+ .ok_or_else(|| deno_core::error::type_error("invalid input range"))?
.as_ptr() as *mut _;
let next_out = out
.get_mut(out_off as usize..out_off as usize + out_len as usize)
- .ok_or_else(|| type_error("invalid output range"))?
+ .ok_or_else(|| deno_core::error::type_error("invalid output range"))?
.as_mut_ptr();
self.strm.avail_in = in_len;
@@ -81,7 +81,10 @@ impl ZlibInner {
Ok(())
}
- fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> {
+ fn do_write(
+ &mut self,
+ flush: Flush,
+ ) -> Result<(), deno_core::error::AnyError> {
self.flush = flush;
match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => {
@@ -127,7 +130,7 @@ impl ZlibInner {
self.mode = Mode::Inflate;
}
} else if next_expected_header_byte.is_some() {
- return Err(type_error(
+ return Err(deno_core::error::type_error(
"invalid number of gzip magic number bytes read",
));
}
@@ -181,7 +184,7 @@ impl ZlibInner {
Ok(())
}
- fn init_stream(&mut self) -> Result<(), AnyError> {
+ fn init_stream(&mut self) -> Result<(), deno_core::error::AnyError> {
match self.mode {
Mode::Gzip | Mode::Gunzip => self.window_bits += 16,
Mode::Unzip => self.window_bits += 32,
@@ -199,7 +202,7 @@ impl ZlibInner {
Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => {
self.strm.inflate_init(self.window_bits)
}
- Mode::None => return Err(type_error("Unknown mode")),
+ Mode::None => return Err(deno_core::error::type_error("Unknown mode")),
};
self.write_in_progress = false;
@@ -208,7 +211,7 @@ impl ZlibInner {
Ok(())
}
- fn close(&mut self) -> Result<bool, AnyError> {
+ fn close(&mut self) -> Result<bool, deno_core::error::AnyError> {
if self.write_in_progress {
self.pending_close = true;
return Ok(false);
@@ -222,10 +225,8 @@ impl ZlibInner {
Ok(true)
}
- fn reset_stream(&mut self) -> Result<(), AnyError> {
+ fn reset_stream(&mut self) {
self.err = self.strm.reset(self.mode);
-
- Ok(())
}
}
@@ -243,7 +244,7 @@ impl deno_core::Resource for Zlib {
#[op2]
#[cppgc]
-pub fn op_zlib_new(#[smi] mode: i32) -> Result<Zlib, AnyError> {
+pub fn op_zlib_new(#[smi] mode: i32) -> Result<Zlib, mode::ModeError> {
let mode = Mode::try_from(mode)?;
let inner = ZlibInner {
@@ -256,12 +257,20 @@ pub fn op_zlib_new(#[smi] mode: i32) -> Result<Zlib, AnyError> {
})
}
+#[derive(Debug, thiserror::Error)]
+pub enum ZlibError {
+ #[error("zlib not initialized")]
+ NotInitialized,
+ #[error(transparent)]
+ Mode(#[from] mode::ModeError),
+ #[error(transparent)]
+ Other(#[from] deno_core::error::AnyError),
+}
+
#[op2(fast)]
-pub fn op_zlib_close(#[cppgc] resource: &Zlib) -> Result<(), AnyError> {
+pub fn op_zlib_close(#[cppgc] resource: &Zlib) -> Result<(), ZlibError> {
let mut resource = resource.inner.borrow_mut();
- let zlib = resource
- .as_mut()
- .ok_or_else(|| type_error("zlib not initialized"))?;
+ let zlib = resource.as_mut().ok_or(ZlibError::NotInitialized)?;
// If there is a pending write, defer the close until the write is done.
zlib.close()?;
@@ -282,11 +291,9 @@ pub fn op_zlib_write(
#[smi] out_off: u32,
#[smi] out_len: u32,
#[buffer] result: &mut [u32],
-) -> Result<i32, AnyError> {
+) -> Result<i32, ZlibError> {
let mut zlib = resource.inner.borrow_mut();
- let zlib = zlib
- .as_mut()
- .ok_or_else(|| type_error("zlib not initialized"))?;
+ let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?;
let flush = Flush::try_from(flush)?;
zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
@@ -307,11 +314,9 @@ pub fn op_zlib_init(
#[smi] mem_level: i32,
#[smi] strategy: i32,
#[buffer] dictionary: &[u8],
-) -> Result<i32, AnyError> {
+) -> Result<i32, ZlibError> {
let mut zlib = resource.inner.borrow_mut();
- let zlib = zlib
- .as_mut()
- .ok_or_else(|| type_error("zlib not initialized"))?;
+ let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?;
check((8..=15).contains(&window_bits), "invalid windowBits")?;
check((-1..=9).contains(&level), "invalid level")?;
@@ -348,13 +353,11 @@ pub fn op_zlib_init(
#[op2(fast)]
#[smi]
-pub fn op_zlib_reset(#[cppgc] resource: &Zlib) -> Result<i32, AnyError> {
+pub fn op_zlib_reset(#[cppgc] resource: &Zlib) -> Result<i32, ZlibError> {
let mut zlib = resource.inner.borrow_mut();
- let zlib = zlib
- .as_mut()
- .ok_or_else(|| type_error("zlib not initialized"))?;
+ let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?;
- zlib.reset_stream()?;
+ zlib.reset_stream();
Ok(zlib.err)
}
@@ -362,12 +365,10 @@ pub fn op_zlib_reset(#[cppgc] resource: &Zlib) -> Result<i32, AnyError> {
#[op2(fast)]
pub fn op_zlib_close_if_pending(
#[cppgc] resource: &Zlib,
-) -> Result<(), AnyError> {
+) -> Result<(), ZlibError> {
let pending_close = {
let mut zlib = resource.inner.borrow_mut();
- let zlib = zlib
- .as_mut()
- .ok_or_else(|| type_error("zlib not initialized"))?;
+ let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?;
zlib.write_in_progress = false;
zlib.pending_close
@@ -381,6 +382,15 @@ pub fn op_zlib_close_if_pending(
Ok(())
}
+#[op2(fast)]
+#[smi]
+pub fn op_zlib_crc32(#[buffer] data: &[u8], #[smi] value: u32) -> u32 {
+ // SAFETY: `data` is a valid buffer.
+ unsafe {
+ zlib::crc32(value as c_ulong, data.as_ptr(), data.len() as u32) as u32
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/ext/node/ops/zlib/mode.rs b/ext/node/ops/zlib/mode.rs
index 753300cc4..41565f9b1 100644
--- a/ext/node/ops/zlib/mode.rs
+++ b/ext/node/ops/zlib/mode.rs
@@ -1,19 +1,8 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-#[derive(Debug)]
-pub enum Error {
- BadArgument,
-}
-
-impl std::fmt::Display for Error {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- match self {
- Error::BadArgument => write!(f, "bad argument"),
- }
- }
-}
-
-impl std::error::Error for Error {}
+#[derive(Debug, thiserror::Error)]
+#[error("bad argument")]
+pub struct ModeError;
macro_rules! repr_i32 {
($(#[$meta:meta])* $vis:vis enum $name:ident {
@@ -25,12 +14,12 @@ macro_rules! repr_i32 {
}
impl core::convert::TryFrom<i32> for $name {
- type Error = Error;
+ type Error = ModeError;
fn try_from(v: i32) -> Result<Self, Self::Error> {
match v {
$(x if x == $name::$vname as i32 => Ok($name::$vname),)*
- _ => Err(Error::BadArgument),
+ _ => Err(ModeError),
}
}
}
diff --git a/ext/node/polyfills/01_require.js b/ext/node/polyfills/01_require.js
index 5b0980c31..083d4e49b 100644
--- a/ext/node/polyfills/01_require.js
+++ b/ext/node/polyfills/01_require.js
@@ -11,6 +11,7 @@ import {
op_require_can_parse_as_esm,
op_require_init_paths,
op_require_is_deno_dir_package,
+ op_require_is_maybe_cjs,
op_require_is_request_relative,
op_require_node_module_paths,
op_require_package_imports_resolve,
@@ -19,7 +20,6 @@ import {
op_require_path_is_absolute,
op_require_path_resolve,
op_require_proxy_path,
- op_require_read_closest_package_json,
op_require_read_file,
op_require_read_package_scope,
op_require_real_path,
@@ -523,17 +523,13 @@ function resolveExports(
return;
}
- if (!parentPath) {
- return false;
- }
-
return op_require_resolve_exports(
usesLocalNodeModulesDir,
modulesPath,
request,
name,
expansion,
- parentPath,
+ parentPath ?? "",
) ?? false;
}
@@ -1064,23 +1060,22 @@ Module.prototype._compile = function (content, filename, format) {
return result;
};
-Module._extensions[".js"] = function (module, filename) {
- const content = op_require_read_file(filename);
-
- let format;
- if (StringPrototypeEndsWith(filename, ".js")) {
- const pkg = op_require_read_closest_package_json(filename);
- if (pkg?.type === "module") {
- format = "module";
- } else if (pkg?.type === "commonjs") {
- format = "commonjs";
- }
- } else if (StringPrototypeEndsWith(filename, ".cjs")) {
- format = "commonjs";
- }
-
- module._compile(content, filename, format);
-};
+Module._extensions[".js"] =
+ Module._extensions[".ts"] =
+ Module._extensions[".jsx"] =
+ Module._extensions[".tsx"] =
+ function (module, filename) {
+ const content = op_require_read_file(filename);
+ const format = op_require_is_maybe_cjs(filename) ? undefined : "module";
+ module._compile(content, filename, format);
+ };
+
+Module._extensions[".cjs"] =
+ Module._extensions[".cts"] =
+ function (module, filename) {
+ const content = op_require_read_file(filename);
+ module._compile(content, filename, "commonjs");
+ };
function loadESMFromCJS(module, filename, code) {
const namespace = op_import_sync(
@@ -1091,7 +1086,10 @@ function loadESMFromCJS(module, filename, code) {
module.exports = namespace;
}
-Module._extensions[".mjs"] = function (module, filename) {
+Module._extensions[".mjs"] = Module._extensions[".mts"] = function (
+ module,
+ filename,
+) {
loadESMFromCJS(module, filename);
};
@@ -1212,6 +1210,24 @@ function isBuiltin(moduleName) {
!StringPrototypeStartsWith(moduleName, "internal/");
}
+function getBuiltinModule(id) {
+ if (!isBuiltin(id)) {
+ return undefined;
+ }
+
+ if (StringPrototypeStartsWith(id, "node:")) {
+ // Slice 'node:' prefix
+ id = StringPrototypeSlice(id, 5);
+ }
+
+ const mod = loadNativeModule(id, id);
+ if (mod) {
+ return mod.exports;
+ }
+
+ return undefined;
+}
+
Module.isBuiltin = isBuiltin;
Module.createRequire = createRequire;
@@ -1291,6 +1307,8 @@ export function findSourceMap(_path) {
return undefined;
}
+Module.findSourceMap = findSourceMap;
+
/**
* @param {string | URL} _specifier
* @param {string | URL} _parentUrl
@@ -1304,7 +1322,7 @@ export function register(_specifier, _parentUrl, _options) {
return undefined;
}
-export { builtinModules, createRequire, isBuiltin, Module };
+export { builtinModules, createRequire, getBuiltinModule, isBuiltin, Module };
export const _cache = Module._cache;
export const _extensions = Module._extensions;
export const _findPath = Module._findPath;
diff --git a/ext/node/polyfills/_fs/_fs_common.ts b/ext/node/polyfills/_fs/_fs_common.ts
index ac0bf5a55..a29548bb3 100644
--- a/ext/node/polyfills/_fs/_fs_common.ts
+++ b/ext/node/polyfills/_fs/_fs_common.ts
@@ -20,6 +20,7 @@ import {
notImplemented,
TextEncodings,
} from "ext:deno_node/_utils.ts";
+import { type Buffer } from "node:buffer";
export type CallbackWithError = (err: ErrnoException | null) => void;
diff --git a/ext/node/polyfills/_fs/_fs_copy.ts b/ext/node/polyfills/_fs/_fs_copy.ts
index 2f8ddf4fc..0434bff4d 100644
--- a/ext/node/polyfills/_fs/_fs_copy.ts
+++ b/ext/node/polyfills/_fs/_fs_copy.ts
@@ -53,8 +53,9 @@ export function copyFile(
}, (e) => {
if (e instanceof Deno.errors.NotFound) {
Deno.copyFile(srcStr, destStr).then(() => cb(null), cb);
+ } else {
+ cb(e);
}
- cb(e);
});
} else {
Deno.copyFile(srcStr, destStr).then(() => cb(null), cb);
@@ -83,8 +84,9 @@ export function copyFileSync(
} catch (e) {
if (e instanceof Deno.errors.NotFound) {
Deno.copyFileSync(srcStr, destStr);
+ } else {
+ throw e;
}
- throw e;
}
} else {
Deno.copyFileSync(srcStr, destStr);
diff --git a/ext/node/polyfills/_fs/_fs_open.ts b/ext/node/polyfills/_fs/_fs_open.ts
index 8bd989790..31ca4bb61 100644
--- a/ext/node/polyfills/_fs/_fs_open.ts
+++ b/ext/node/polyfills/_fs/_fs_open.ts
@@ -147,8 +147,8 @@ export function open(
export function openPromise(
path: string | Buffer | URL,
- flags?: openFlags = "r",
- mode? = 0o666,
+ flags: openFlags = "r",
+ mode = 0o666,
): Promise<FileHandle> {
return new Promise((resolve, reject) => {
open(path, flags, mode, (err, fd) => {
diff --git a/ext/node/polyfills/_fs/_fs_readFile.ts b/ext/node/polyfills/_fs/_fs_readFile.ts
index 0f05ee167..cf7e0305d 100644
--- a/ext/node/polyfills/_fs/_fs_readFile.ts
+++ b/ext/node/polyfills/_fs/_fs_readFile.ts
@@ -19,6 +19,7 @@ import {
TextEncodings,
} from "ext:deno_node/_utils.ts";
import { FsFile } from "ext:deno_fs/30_fs.js";
+import { denoErrorToNodeError } from "ext:deno_node/internal/errors.ts";
function maybeDecode(data: Uint8Array, encoding: TextEncodings): string;
function maybeDecode(
@@ -87,7 +88,7 @@ export function readFile(
}
const buffer = maybeDecode(data, encoding);
(cb as BinaryCallback)(null, buffer);
- }, (err) => cb && cb(err));
+ }, (err) => cb && cb(denoErrorToNodeError(err)));
}
}
@@ -117,7 +118,12 @@ export function readFileSync(
opt?: FileOptionsArgument,
): string | Buffer {
path = path instanceof URL ? pathFromURL(path) : path;
- const data = Deno.readFileSync(path);
+ let data;
+ try {
+ data = Deno.readFileSync(path);
+ } catch (err) {
+ throw denoErrorToNodeError(err);
+ }
const encoding = getEncoding(opt);
if (encoding && encoding !== "binary") {
const text = maybeDecode(data, encoding);
diff --git a/ext/node/polyfills/_fs/_fs_readlink.ts b/ext/node/polyfills/_fs/_fs_readlink.ts
index 5f2312798..08bea843f 100644
--- a/ext/node/polyfills/_fs/_fs_readlink.ts
+++ b/ext/node/polyfills/_fs/_fs_readlink.ts
@@ -4,13 +4,10 @@
// deno-lint-ignore-file prefer-primordials
import { TextEncoder } from "ext:deno_web/08_text_encoding.js";
-import {
- intoCallbackAPIWithIntercept,
- MaybeEmpty,
- notImplemented,
-} from "ext:deno_node/_utils.ts";
+import { MaybeEmpty, notImplemented } from "ext:deno_node/_utils.ts";
import { pathFromURL } from "ext:deno_web/00_infra.js";
import { promisify } from "ext:deno_node/internal/util.mjs";
+import { denoErrorToNodeError } from "ext:deno_node/internal/errors.ts";
type ReadlinkCallback = (
err: MaybeEmpty<Error>,
@@ -69,12 +66,17 @@ export function readlink(
const encoding = getEncoding(optOrCallback);
- intoCallbackAPIWithIntercept<string, Uint8Array | string>(
- Deno.readLink,
- (data: string): string | Uint8Array => maybeEncode(data, encoding),
- cb,
- path,
- );
+ Deno.readLink(path).then((data: string) => {
+ const res = maybeEncode(data, encoding);
+ if (cb) cb(null, res);
+ }, (err: Error) => {
+ if (cb) {
+ (cb as (e: Error) => void)(denoErrorToNodeError(err, {
+ syscall: "readlink",
+ path,
+ }));
+ }
+ });
}
export const readlinkPromise = promisify(readlink) as (
@@ -88,5 +90,12 @@ export function readlinkSync(
): string | Uint8Array {
path = path instanceof URL ? pathFromURL(path) : path;
- return maybeEncode(Deno.readLinkSync(path), getEncoding(opt));
+ try {
+ return maybeEncode(Deno.readLinkSync(path), getEncoding(opt));
+ } catch (error) {
+ throw denoErrorToNodeError(error, {
+ syscall: "readlink",
+ path,
+ });
+ }
}
diff --git a/ext/node/polyfills/_fs/_fs_readv.ts b/ext/node/polyfills/_fs/_fs_readv.ts
index 384f5e319..2259f029a 100644
--- a/ext/node/polyfills/_fs/_fs_readv.ts
+++ b/ext/node/polyfills/_fs/_fs_readv.ts
@@ -15,6 +15,7 @@ import { maybeCallback } from "ext:deno_node/_fs/_fs_common.ts";
import { validateInteger } from "ext:deno_node/internal/validators.mjs";
import * as io from "ext:deno_io/12_io.js";
import { op_fs_seek_async, op_fs_seek_sync } from "ext:core/ops";
+import process from "node:process";
type Callback = (
err: ErrnoException | null,
diff --git a/ext/node/polyfills/_fs/_fs_stat.ts b/ext/node/polyfills/_fs/_fs_stat.ts
index c4ed82d57..507cb05ea 100644
--- a/ext/node/polyfills/_fs/_fs_stat.ts
+++ b/ext/node/polyfills/_fs/_fs_stat.ts
@@ -290,8 +290,8 @@ export function convertFileInfoToStats(origin: Deno.FileInfo): Stats {
isFIFO: () => false,
isCharacterDevice: () => false,
isSocket: () => false,
- ctime: origin.mtime,
- ctimeMs: origin.mtime?.getTime() || null,
+ ctime: origin.ctime,
+ ctimeMs: origin.ctime?.getTime() || null,
});
return stats;
@@ -336,9 +336,9 @@ export function convertFileInfoToBigIntStats(
isFIFO: () => false,
isCharacterDevice: () => false,
isSocket: () => false,
- ctime: origin.mtime,
- ctimeMs: origin.mtime ? BigInt(origin.mtime.getTime()) : null,
- ctimeNs: origin.mtime ? BigInt(origin.mtime.getTime()) * 1000000n : null,
+ ctime: origin.ctime,
+ ctimeMs: origin.ctime ? BigInt(origin.ctime.getTime()) : null,
+ ctimeNs: origin.ctime ? BigInt(origin.ctime.getTime()) * 1000000n : null,
});
return stats;
}
@@ -383,7 +383,10 @@ export function stat(
Deno.stat(path).then(
(stat) => callback(null, CFISBIS(stat, options.bigint)),
- (err) => callback(denoErrorToNodeError(err, { syscall: "stat" })),
+ (err) =>
+ callback(
+ denoErrorToNodeError(err, { syscall: "stat", path: getPathname(path) }),
+ ),
);
}
@@ -417,9 +420,16 @@ export function statSync(
return;
}
if (err instanceof Error) {
- throw denoErrorToNodeError(err, { syscall: "stat" });
+ throw denoErrorToNodeError(err, {
+ syscall: "stat",
+ path: getPathname(path),
+ });
} else {
throw err;
}
}
}
+
+function getPathname(path: string | URL) {
+ return typeof path === "string" ? path : path.pathname;
+}
diff --git a/ext/node/polyfills/_next_tick.ts b/ext/node/polyfills/_next_tick.ts
index 5ee27728d..af306a29c 100644
--- a/ext/node/polyfills/_next_tick.ts
+++ b/ext/node/polyfills/_next_tick.ts
@@ -62,6 +62,8 @@ export function processTicksAndRejections() {
callback(...args);
}
}
+ } catch (e) {
+ reportError(e);
} finally {
// FIXME(bartlomieju): Deno currently doesn't support async hooks
// if (destroyHooksExist())
@@ -87,8 +89,7 @@ export function runNextTicks() {
// runMicrotasks();
// if (!hasTickScheduled() && !hasRejectionToWarn())
// return;
- if (!core.hasTickScheduled()) {
- core.runMicrotasks();
+ if (queue.isEmpty() || !core.hasTickScheduled()) {
return true;
}
diff --git a/ext/node/polyfills/_process/streams.mjs b/ext/node/polyfills/_process/streams.mjs
index 7936e82aa..3573956c9 100644
--- a/ext/node/polyfills/_process/streams.mjs
+++ b/ext/node/polyfills/_process/streams.mjs
@@ -66,14 +66,19 @@ export function createWritableStdioStream(writer, name, warmup = false) {
// We cannot call `writer?.isTerminal()` eagerly here
let getIsTTY = () => writer?.isTerminal();
+ const getColumns = () =>
+ stream._columns ||
+ (writer?.isTerminal() ? Deno.consoleSize?.().columns : undefined);
ObjectDefineProperties(stream, {
columns: {
__proto__: null,
enumerable: true,
configurable: true,
- get: () =>
- writer?.isTerminal() ? Deno.consoleSize?.().columns : undefined,
+ get: () => getColumns(),
+ set: (value) => {
+ stream._columns = value;
+ },
},
rows: {
__proto__: null,
diff --git a/ext/node/polyfills/_tls_wrap.ts b/ext/node/polyfills/_tls_wrap.ts
index a614b45df..e36fc637e 100644
--- a/ext/node/polyfills/_tls_wrap.ts
+++ b/ext/node/polyfills/_tls_wrap.ts
@@ -68,6 +68,7 @@ export class TLSSocket extends net.Socket {
secureConnecting: boolean;
_SNICallback: any;
servername: string | null;
+ alpnProtocol: string | boolean | null;
alpnProtocols: string[] | null;
authorized: boolean;
authorizationError: any;
@@ -114,6 +115,7 @@ export class TLSSocket extends net.Socket {
this.secureConnecting = true;
this._SNICallback = null;
this.servername = null;
+ this.alpnProtocol = null;
this.alpnProtocols = tlsOptions.ALPNProtocols;
this.authorized = false;
this.authorizationError = null;
@@ -151,10 +153,21 @@ export class TLSSocket extends net.Socket {
handle.afterConnect = async (req: any, status: number) => {
try {
const conn = await Deno.startTls(handle[kStreamBaseField], options);
+ try {
+ const hs = await conn.handshake();
+ if (hs.alpnProtocol) {
+ tlssock.alpnProtocol = hs.alpnProtocol;
+ } else {
+ tlssock.alpnProtocol = false;
+ }
+ } catch {
+ // Don't interrupt "secure" event to let the first read/write
+ // operation emit the error.
+ }
handle[kStreamBaseField] = conn;
tlssock.emit("secure");
tlssock.removeListener("end", onConnectEnd);
- } catch {
+ } catch (_) {
// TODO(kt3k): Handle this
}
return afterConnect.call(handle, req, status);
@@ -269,6 +282,7 @@ export class ServerImpl extends EventEmitter {
// Creates TCP handle and socket directly from Deno.TlsConn.
// This works as TLS socket. We don't use TLSSocket class for doing
// this because Deno.startTls only supports client side tcp connection.
+ // TODO(@satyarohith): set TLSSocket.alpnProtocol when we use TLSSocket class.
const handle = new TCP(TCPConstants.SOCKET, await listener.accept());
const socket = new net.Socket({ handle });
this.emit("secureConnection", socket);
diff --git a/ext/node/polyfills/_utils.ts b/ext/node/polyfills/_utils.ts
index b50c113e1..79d84e00f 100644
--- a/ext/node/polyfills/_utils.ts
+++ b/ext/node/polyfills/_utils.ts
@@ -17,6 +17,7 @@ const {
import { TextDecoder, TextEncoder } from "ext:deno_web/08_text_encoding.js";
import { errorMap } from "ext:deno_node/internal_binding/uv.ts";
import { codes } from "ext:deno_node/internal/error_codes.ts";
+import { ERR_NOT_IMPLEMENTED } from "ext:deno_node/internal/errors.ts";
export type BinaryEncodings = "binary";
@@ -34,8 +35,7 @@ export type TextEncodings =
export type Encodings = BinaryEncodings | TextEncodings;
export function notImplemented(msg: string): never {
- const message = msg ? `Not implemented: ${msg}` : "Not implemented";
- throw new Error(message);
+ throw new ERR_NOT_IMPLEMENTED(msg);
}
export function warnNotImplemented(msg?: string) {
diff --git a/ext/node/polyfills/_zlib.mjs b/ext/node/polyfills/_zlib.mjs
index 851bd602f..07fc440ef 100644
--- a/ext/node/polyfills/_zlib.mjs
+++ b/ext/node/polyfills/_zlib.mjs
@@ -14,6 +14,7 @@ import { nextTick } from "ext:deno_node/_next_tick.ts";
import {
isAnyArrayBuffer,
isArrayBufferView,
+ isUint8Array,
} from "ext:deno_node/internal/util/types.ts";
var kRangeErrorMessage = "Cannot create final Buffer. It would be larger " +
@@ -158,6 +159,12 @@ export const inflateRawSync = function (buffer, opts) {
function sanitizeInput(input) {
if (typeof input === "string") input = Buffer.from(input);
+ if (isArrayBufferView(input) && !isUint8Array(input)) {
+ input = Buffer.from(input.buffer, input.byteOffset, input.byteLength);
+ } else if (isAnyArrayBuffer(input)) {
+ input = Buffer.from(input);
+ }
+
if (
!Buffer.isBuffer(input) &&
(input.buffer && !input.buffer.constructor === ArrayBuffer)
diff --git a/ext/node/polyfills/child_process.ts b/ext/node/polyfills/child_process.ts
index c37dfc410..eda718ff3 100644
--- a/ext/node/polyfills/child_process.ts
+++ b/ext/node/polyfills/child_process.ts
@@ -132,6 +132,8 @@ export function fork(
rm = 2;
}
execArgv.splice(index, rm);
+ } else if (flag.startsWith("--no-warnings")) {
+ execArgv[index] = "--quiet";
} else {
index++;
}
diff --git a/ext/node/polyfills/http.ts b/ext/node/polyfills/http.ts
index f3f6f86ed..9a920adee 100644
--- a/ext/node/polyfills/http.ts
+++ b/ext/node/polyfills/http.ts
@@ -34,6 +34,7 @@ import {
finished,
Readable as NodeReadable,
Writable as NodeWritable,
+ WritableOptions as NodeWritableOptions,
} from "node:stream";
import {
kUniqueHeaders,
@@ -66,12 +67,13 @@ import { headersEntries } from "ext:deno_fetch/20_headers.js";
import { timerId } from "ext:deno_web/03_abort_signal.js";
import { clearTimeout as webClearTimeout } from "ext:deno_web/02_timers.js";
import { resourceForReadableStream } from "ext:deno_web/06_streams.js";
-import { TcpConn } from "ext:deno_net/01_net.js";
+import { UpgradedConn } from "ext:deno_net/01_net.js";
import { STATUS_CODES } from "node:_http_server";
import { methods as METHODS } from "node:_http_common";
+import { deprecate } from "node:util";
const { internalRidSymbol } = core;
-const { ArrayIsArray } = primordials;
+const { ArrayIsArray, StringPrototypeToLowerCase } = primordials;
type Chunk = string | Buffer | Uint8Array;
@@ -516,7 +518,7 @@ class ClientRequest extends OutgoingMessage {
);
assert(typeof res.remoteAddrIp !== "undefined");
assert(typeof res.remoteAddrIp !== "undefined");
- const conn = new TcpConn(
+ const conn = new UpgradedConn(
upgradeRid,
{
transport: "tcp",
@@ -1183,49 +1185,95 @@ function onError(self, error, cb) {
}
}
-export class ServerResponse extends NodeWritable {
- statusCode = 200;
- statusMessage?: string = undefined;
- #headers: Record<string, string | string[]> = { __proto__: null };
- #hasNonStringHeaders: boolean = false;
- #readable: ReadableStream;
- override writable = true;
- // used by `npm:on-finished`
- finished = false;
- headersSent = false;
- #resolve: (value: Response | PromiseLike<Response>) => void;
+export type ServerResponse = {
+ statusCode: number;
+ statusMessage?: string;
+
+ _headers: Record<string, string | string[]>;
+ _hasNonStringHeaders: boolean;
+
+ _readable: ReadableStream;
+ finished: boolean;
+ headersSent: boolean;
+ _resolve: (value: Response | PromiseLike<Response>) => void;
+ // deno-lint-ignore no-explicit-any
+ _socketOverride: any | null;
// deno-lint-ignore no-explicit-any
- #socketOverride: any | null = null;
+ socket: any | null;
- static #enqueue(controller: ReadableStreamDefaultController, chunk: Chunk) {
- try {
- if (typeof chunk === "string") {
- controller.enqueue(ENCODER.encode(chunk));
- } else {
- controller.enqueue(chunk);
- }
- } catch (_) {
- // The stream might have been closed. Ignore the error.
- }
- }
+ setHeader(name: string, value: string | string[]): void;
+ appendHeader(name: string, value: string | string[]): void;
+ getHeader(name: string): string | string[];
+ removeHeader(name: string): void;
+ getHeaderNames(): string[];
+ getHeaders(): Record<string, string | number | string[]>;
+ hasHeader(name: string): boolean;
- /** Returns true if the response body should be null with the given
- * http status code */
- static #bodyShouldBeNull(status: number) {
- return status === 101 || status === 204 || status === 205 || status === 304;
- }
+ writeHead(
+ status: number,
+ statusMessage?: string,
+ headers?:
+ | Record<string, string | number | string[]>
+ | Array<[string, string]>,
+ ): void;
+ writeHead(
+ status: number,
+ headers?:
+ | Record<string, string | number | string[]>
+ | Array<[string, string]>,
+ ): void;
- constructor(
+ _ensureHeaders(singleChunk?: Chunk): void;
+
+ respond(final: boolean, singleChunk?: Chunk): void;
+ // deno-lint-ignore no-explicit-any
+ end(chunk?: any, encoding?: any, cb?: any): void;
+
+ flushHeaders(): void;
+ _implicitHeader(): void;
+
+ // Undocumented field used by `npm:light-my-request`.
+ _header: string;
+
+ assignSocket(socket): void;
+ detachSocket(socket): void;
+} & { -readonly [K in keyof NodeWritable]: NodeWritable[K] };
+
+type ServerResponseStatic = {
+ new (
resolve: (value: Response | PromiseLike<Response>) => void,
socket: FakeSocket,
- ) {
- let controller: ReadableByteStreamController;
- const readable = new ReadableStream({
- start(c) {
- controller = c as ReadableByteStreamController;
- },
- });
- super({
+ ): ServerResponse;
+ _enqueue(controller: ReadableStreamDefaultController, chunk: Chunk): void;
+ _bodyShouldBeNull(statusCode: number): boolean;
+};
+
+export const ServerResponse = function (
+ this: ServerResponse,
+ resolve: (value: Response | PromiseLike<Response>) => void,
+ socket: FakeSocket,
+) {
+ this.statusCode = 200;
+ this.statusMessage = undefined;
+ this._headers = { __proto__: null };
+ this._hasNonStringHeaders = false;
+ this.writable = true;
+
+ // used by `npm:on-finished`
+ this.finished = false;
+ this.headersSent = false;
+ this._socketOverride = null;
+
+ let controller: ReadableByteStreamController;
+ const readable = new ReadableStream({
+ start(c) {
+ controller = c as ReadableByteStreamController;
+ },
+ });
+
+ NodeWritable.call(
+ this,
+ {
autoDestroy: true,
defaultEncoding: "utf-8",
emitClose: true,
@@ -1234,16 +1282,16 @@ export class ServerResponse extends NodeWritable {
write: (chunk, encoding, cb) => {
// Writes chunks are directly written to the socket if
// one is assigned via assignSocket()
- if (this.#socketOverride && this.#socketOverride.writable) {
- this.#socketOverride.write(chunk, encoding);
+ if (this._socketOverride && this._socketOverride.writable) {
+ this._socketOverride.write(chunk, encoding);
return cb();
}
if (!this.headersSent) {
- ServerResponse.#enqueue(controller, chunk);
+ ServerResponse._enqueue(controller, chunk);
this.respond(false);
return cb();
}
- ServerResponse.#enqueue(controller, chunk);
+ ServerResponse._enqueue(controller, chunk);
return cb();
},
final: (cb) => {
@@ -1259,192 +1307,269 @@ export class ServerResponse extends NodeWritable {
}
return cb(null);
},
- });
- this.#readable = readable;
- this.#resolve = resolve;
- this.socket = socket;
+ } satisfies NodeWritableOptions,
+ );
+
+ this._readable = readable;
+ this._resolve = resolve;
+ this.socket = socket;
+
+ this._header = "";
+} as unknown as ServerResponseStatic;
+
+Object.setPrototypeOf(ServerResponse.prototype, NodeWritable.prototype);
+Object.setPrototypeOf(ServerResponse, NodeWritable);
+
+ServerResponse._enqueue = function (
+ this: ServerResponse,
+ controller: ReadableStreamDefaultController,
+ chunk: Chunk,
+) {
+ try {
+ if (typeof chunk === "string") {
+ controller.enqueue(ENCODER.encode(chunk));
+ } else {
+ controller.enqueue(chunk);
+ }
+ } catch (_) {
+ // The stream might have been closed. Ignore the error.
}
+};
- setHeader(name: string, value: string | string[]) {
- if (Array.isArray(value)) {
- this.#hasNonStringHeaders = true;
- }
- this.#headers[name] = value;
- return this;
+/** Returns true if the response body should be null with the given
+ * http status code */
+ServerResponse._bodyShouldBeNull = function (
+ this: ServerResponse,
+ status: number,
+) {
+ return status === 101 || status === 204 || status === 205 || status === 304;
+};
+
+ServerResponse.prototype.setHeader = function (
+ this: ServerResponse,
+ name: string,
+ value: string | string[],
+) {
+ if (Array.isArray(value)) {
+ this._hasNonStringHeaders = true;
}
+ this._headers[StringPrototypeToLowerCase(name)] = value;
+ return this;
+};
- appendHeader(name: string, value: string | string[]) {
- if (this.#headers[name] === undefined) {
- if (Array.isArray(value)) this.#hasNonStringHeaders = true;
- this.#headers[name] = value;
+ServerResponse.prototype.appendHeader = function (
+ this: ServerResponse,
+ name: string,
+ value: string | string[],
+) {
+ const key = StringPrototypeToLowerCase(name);
+ if (this._headers[key] === undefined) {
+ if (Array.isArray(value)) this._hasNonStringHeaders = true;
+ this._headers[key] = value;
+ } else {
+ this._hasNonStringHeaders = true;
+ if (!Array.isArray(this._headers[key])) {
+ this._headers[key] = [this._headers[key]];
+ }
+ const header = this._headers[key];
+ if (Array.isArray(value)) {
+ header.push(...value);
} else {
- this.#hasNonStringHeaders = true;
- if (!Array.isArray(this.#headers[name])) {
- this.#headers[name] = [this.#headers[name]];
- }
- const header = this.#headers[name];
- if (Array.isArray(value)) {
- header.push(...value);
- } else {
- header.push(value);
- }
+ header.push(value);
}
- return this;
}
+ return this;
+};
- getHeader(name: string) {
- return this.#headers[name];
- }
- removeHeader(name: string) {
- delete this.#headers[name];
- }
- getHeaderNames() {
- return Object.keys(this.#headers);
- }
- getHeaders(): Record<string, string | number | string[]> {
- // @ts-ignore Ignore null __proto__
- return { __proto__: null, ...this.#headers };
- }
- hasHeader(name: string) {
- return Object.hasOwn(this.#headers, name);
- }
+ServerResponse.prototype.getHeader = function (
+ this: ServerResponse,
+ name: string,
+) {
+ return this._headers[StringPrototypeToLowerCase(name)];
+};
- writeHead(
- status: number,
- statusMessage?: string,
- headers?:
- | Record<string, string | number | string[]>
- | Array<[string, string]>,
- ): this;
- writeHead(
- status: number,
- headers?:
- | Record<string, string | number | string[]>
- | Array<[string, string]>,
- ): this;
- writeHead(
- status: number,
- statusMessageOrHeaders?:
- | string
- | Record<string, string | number | string[]>
- | Array<[string, string]>,
- maybeHeaders?:
- | Record<string, string | number | string[]>
- | Array<[string, string]>,
- ): this {
- this.statusCode = status;
-
- let headers = null;
- if (typeof statusMessageOrHeaders === "string") {
- this.statusMessage = statusMessageOrHeaders;
- if (maybeHeaders !== undefined) {
- headers = maybeHeaders;
- }
- } else if (statusMessageOrHeaders !== undefined) {
- headers = statusMessageOrHeaders;
- }
+ServerResponse.prototype.removeHeader = function (
+ this: ServerResponse,
+ name: string,
+) {
+ delete this._headers[StringPrototypeToLowerCase(name)];
+};
- if (headers !== null) {
- if (ArrayIsArray(headers)) {
- headers = headers as Array<[string, string]>;
- for (let i = 0; i < headers.length; i++) {
- this.appendHeader(headers[i][0], headers[i][1]);
- }
- } else {
- headers = headers as Record<string, string>;
- for (const k in headers) {
- if (Object.hasOwn(headers, k)) {
- this.setHeader(k, headers[k]);
- }
+ServerResponse.prototype.getHeaderNames = function (this: ServerResponse) {
+ return Object.keys(this._headers);
+};
+
+ServerResponse.prototype.getHeaders = function (
+ this: ServerResponse,
+): Record<string, string | number | string[]> {
+ return { __proto__: null, ...this._headers };
+};
+
+ServerResponse.prototype.hasHeader = function (
+ this: ServerResponse,
+ name: string,
+) {
+ return Object.hasOwn(this._headers, name);
+};
+
+ServerResponse.prototype.writeHead = function (
+ this: ServerResponse,
+ status: number,
+ statusMessageOrHeaders?:
+ | string
+ | Record<string, string | number | string[]>
+ | Array<[string, string]>,
+ maybeHeaders?:
+ | Record<string, string | number | string[]>
+ | Array<[string, string]>,
+) {
+ this.statusCode = status;
+
+ let headers = null;
+ if (typeof statusMessageOrHeaders === "string") {
+ this.statusMessage = statusMessageOrHeaders;
+ if (maybeHeaders !== undefined) {
+ headers = maybeHeaders;
+ }
+ } else if (statusMessageOrHeaders !== undefined) {
+ headers = statusMessageOrHeaders;
+ }
+
+ if (headers !== null) {
+ if (ArrayIsArray(headers)) {
+ headers = headers as Array<[string, string]>;
+ for (let i = 0; i < headers.length; i++) {
+ this.appendHeader(headers[i][0], headers[i][1]);
+ }
+ } else {
+ headers = headers as Record<string, string>;
+ for (const k in headers) {
+ if (Object.hasOwn(headers, k)) {
+ this.setHeader(k, headers[k]);
}
}
}
+ }
- return this;
+ return this;
+};
+
+ServerResponse.prototype._ensureHeaders = function (
+ this: ServerResponse,
+ singleChunk?: Chunk,
+) {
+ if (this.statusCode === 200 && this.statusMessage === undefined) {
+ this.statusMessage = "OK";
}
+ if (typeof singleChunk === "string" && !this.hasHeader("content-type")) {
+ this.setHeader("content-type", "text/plain;charset=UTF-8");
+ }
+};
- #ensureHeaders(singleChunk?: Chunk) {
- if (this.statusCode === 200 && this.statusMessage === undefined) {
- this.statusMessage = "OK";
- }
- if (
- typeof singleChunk === "string" &&
- !this.hasHeader("content-type")
- ) {
- this.setHeader("content-type", "text/plain;charset=UTF-8");
- }
- }
-
- respond(final: boolean, singleChunk?: Chunk) {
- this.headersSent = true;
- this.#ensureHeaders(singleChunk);
- let body = singleChunk ?? (final ? null : this.#readable);
- if (ServerResponse.#bodyShouldBeNull(this.statusCode)) {
- body = null;
- }
- let headers: Record<string, string> | [string, string][] = this
- .#headers as Record<string, string>;
- if (this.#hasNonStringHeaders) {
- headers = [];
- // Guard is not needed as this is a null prototype object.
- // deno-lint-ignore guard-for-in
- for (const key in this.#headers) {
- const entry = this.#headers[key];
- if (Array.isArray(entry)) {
- for (const value of entry) {
- headers.push([key, value]);
- }
- } else {
- headers.push([key, entry]);
+ServerResponse.prototype.respond = function (
+ this: ServerResponse,
+ final: boolean,
+ singleChunk?: Chunk,
+) {
+ this.headersSent = true;
+ this._ensureHeaders(singleChunk);
+ let body = singleChunk ?? (final ? null : this._readable);
+ if (ServerResponse._bodyShouldBeNull(this.statusCode)) {
+ body = null;
+ }
+ let headers: Record<string, string> | [string, string][] = this
+ ._headers as Record<string, string>;
+ if (this._hasNonStringHeaders) {
+ headers = [];
+ // Guard is not needed as this is a null prototype object.
+ // deno-lint-ignore guard-for-in
+ for (const key in this._headers) {
+ const entry = this._headers[key];
+ if (Array.isArray(entry)) {
+ for (const value of entry) {
+ headers.push([key, value]);
}
+ } else {
+ headers.push([key, entry]);
}
}
- this.#resolve(
- new Response(body, {
- headers,
- status: this.statusCode,
- statusText: this.statusMessage,
- }),
- );
}
+ this._resolve(
+ new Response(body, {
+ headers,
+ status: this.statusCode,
+ statusText: this.statusMessage,
+ }),
+ );
+};
+ServerResponse.prototype.end = function (
+ this: ServerResponse,
// deno-lint-ignore no-explicit-any
- override end(chunk?: any, encoding?: any, cb?: any): this {
- this.finished = true;
- if (!chunk && "transfer-encoding" in this.#headers) {
- // FIXME(bnoordhuis) Node sends a zero length chunked body instead, i.e.,
- // the trailing "0\r\n", but respondWith() just hangs when I try that.
- this.#headers["content-length"] = "0";
- delete this.#headers["transfer-encoding"];
- }
+ chunk?: any,
+ // deno-lint-ignore no-explicit-any
+ encoding?: any,
+ // deno-lint-ignore no-explicit-any
+ cb?: any,
+) {
+ this.finished = true;
+ if (!chunk && "transfer-encoding" in this._headers) {
+ // FIXME(bnoordhuis) Node sends a zero length chunked body instead, i.e.,
+ // the trailing "0\r\n", but respondWith() just hangs when I try that.
+ this._headers["content-length"] = "0";
+ delete this._headers["transfer-encoding"];
+ }
+
+ // @ts-expect-error The signature for cb is stricter than the one implemented here
+ NodeWritable.prototype.end.call(this, chunk, encoding, cb);
+};
- // @ts-expect-error The signature for cb is stricter than the one implemented here
- return super.end(chunk, encoding, cb);
- }
+ServerResponse.prototype.flushHeaders = function (this: ServerResponse) {
+ // no-op
+};
- flushHeaders() {
- // no-op
- }
+// Undocumented API used by `npm:compression`.
+ServerResponse.prototype._implicitHeader = function (this: ServerResponse) {
+ this.writeHead(this.statusCode);
+};
- // Undocumented API used by `npm:compression`.
- _implicitHeader() {
- this.writeHead(this.statusCode);
+ServerResponse.prototype.assignSocket = function (
+ this: ServerResponse,
+ socket,
+) {
+ if (socket._httpMessage) {
+ throw new ERR_HTTP_SOCKET_ASSIGNED();
}
+ socket._httpMessage = this;
+ this._socketOverride = socket;
+};
- assignSocket(socket) {
- if (socket._httpMessage) {
- throw new ERR_HTTP_SOCKET_ASSIGNED();
- }
- socket._httpMessage = this;
- this.#socketOverride = socket;
- }
+ServerResponse.prototype.detachSocket = function (
+ this: ServerResponse,
+ socket,
+) {
+ assert(socket._httpMessage === this);
+ socket._httpMessage = null;
+ this._socketOverride = null;
+};
- detachSocket(socket) {
- assert(socket._httpMessage === this);
- socket._httpMessage = null;
- this.#socketOverride = null;
- }
-}
+Object.defineProperty(ServerResponse.prototype, "connection", {
+ get: deprecate(
+ function (this: ServerResponse) {
+ return this._socketOverride;
+ },
+ "ServerResponse.prototype.connection is deprecated",
+ "DEP0066",
+ ),
+ set: deprecate(
+ // deno-lint-ignore no-explicit-any
+ function (this: ServerResponse, socket: any) {
+ this._socketOverride = socket;
+ },
+ "ServerResponse.prototype.connection is deprecated",
+ "DEP0066",
+ ),
+});
// TODO(@AaronO): optimize
export class IncomingMessageForServer extends NodeReadable {
@@ -1677,6 +1802,8 @@ export class ServerImpl extends EventEmitter {
this.#server.ref();
}
this.#unref = false;
+
+ return this;
}
unref() {
@@ -1684,6 +1811,8 @@ export class ServerImpl extends EventEmitter {
this.#server.unref();
}
this.#unref = true;
+
+ return this;
}
close(cb?: (err?: Error) => void): this {
diff --git a/ext/node/polyfills/http2.ts b/ext/node/polyfills/http2.ts
index a9ced2bd9..dc2379aeb 100644
--- a/ext/node/polyfills/http2.ts
+++ b/ext/node/polyfills/http2.ts
@@ -882,6 +882,7 @@ export class ClientHttp2Stream extends Duplex {
trailersReady: false,
endAfterHeaders: false,
shutdownWritableCalled: false,
+ serverEndedCall: false,
};
this[kDenoResponse] = undefined;
this[kDenoRid] = undefined;
@@ -1109,7 +1110,9 @@ export class ClientHttp2Stream extends Duplex {
}
debugHttp2(">>> chunk", chunk, finished, this[kDenoResponse].bodyRid);
- if (chunk === null) {
+ if (finished || chunk === null) {
+ this[kState].serverEndedCall = true;
+
const trailerList = await op_http2_client_get_response_trailers(
this[kDenoResponse].bodyRid,
);
@@ -1237,7 +1240,9 @@ export class ClientHttp2Stream extends Duplex {
this[kSession] = undefined;
session[kMaybeDestroy]();
- callback(err);
+ if (callback) {
+ callback(err);
+ }
}
[kMaybeDestroy](code = constants.NGHTTP2_NO_ERROR) {
@@ -1280,6 +1285,9 @@ function shutdownWritable(stream, callback, streamRid) {
if (state.flags & STREAM_FLAGS_HAS_TRAILERS) {
onStreamTrailers(stream);
callback();
+ } else if (state.serverEndedCall) {
+ debugHttp2(">>> stream finished");
+ callback();
} else {
op_http2_client_send_data(streamRid, new Uint8Array(), true)
.then(() => {
diff --git a/ext/node/polyfills/inspector.js b/ext/node/polyfills/inspector.js
new file mode 100644
index 000000000..7eb15ce91
--- /dev/null
+++ b/ext/node/polyfills/inspector.js
@@ -0,0 +1,210 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+// Copyright Joyent and Node contributors. All rights reserved. MIT license.
+
+import process from "node:process";
+import { EventEmitter } from "node:events";
+import { primordials } from "ext:core/mod.js";
+import {
+ op_get_extras_binding_object,
+ op_inspector_close,
+ op_inspector_connect,
+ op_inspector_disconnect,
+ op_inspector_dispatch,
+ op_inspector_emit_protocol_event,
+ op_inspector_enabled,
+ op_inspector_open,
+ op_inspector_url,
+ op_inspector_wait,
+} from "ext:core/ops";
+import {
+ isUint32,
+ validateFunction,
+ validateInt32,
+ validateObject,
+ validateString,
+} from "ext:deno_node/internal/validators.mjs";
+import {
+ ERR_INSPECTOR_ALREADY_ACTIVATED,
+ ERR_INSPECTOR_ALREADY_CONNECTED,
+ ERR_INSPECTOR_CLOSED,
+ ERR_INSPECTOR_COMMAND,
+ ERR_INSPECTOR_NOT_ACTIVE,
+ ERR_INSPECTOR_NOT_CONNECTED,
+ ERR_INSPECTOR_NOT_WORKER,
+} from "ext:deno_node/internal/errors.ts";
+
+const {
+ SymbolDispose,
+ JSONParse,
+ JSONStringify,
+ SafeMap,
+} = primordials;
+
+class Session extends EventEmitter {
+ #connection = null;
+ #nextId = 1;
+ #messageCallbacks = new SafeMap();
+
+ connect() {
+ if (this.#connection) {
+ throw new ERR_INSPECTOR_ALREADY_CONNECTED("The inspector session");
+ }
+ this.#connection = op_inspector_connect(false, (m) => this.#onMessage(m));
+ }
+
+ connectToMainThread() {
+ if (isMainThread) {
+ throw new ERR_INSPECTOR_NOT_WORKER();
+ }
+ if (this.#connection) {
+ throw new ERR_INSPECTOR_ALREADY_CONNECTED("The inspector session");
+ }
+ this.#connection = op_inspector_connect(true, (m) => this.#onMessage(m));
+ }
+
+ #onMessage(message) {
+ const parsed = JSONParse(message);
+ try {
+ if (parsed.id) {
+ const callback = this.#messageCallbacks.get(parsed.id);
+ this.#messageCallbacks.delete(parsed.id);
+ if (callback) {
+ if (parsed.error) {
+ return callback(
+ new ERR_INSPECTOR_COMMAND(
+ parsed.error.code,
+ parsed.error.message,
+ ),
+ );
+ }
+
+ callback(null, parsed.result);
+ }
+ } else {
+ this.emit(parsed.method, parsed);
+ this.emit("inspectorNotification", parsed);
+ }
+ } catch (error) {
+ process.emitWarning(error);
+ }
+ }
+
+ post(method, params, callback) {
+ validateString(method, "method");
+ if (!callback && typeof params === "function") {
+ callback = params;
+ params = null;
+ }
+ if (params) {
+ validateObject(params, "params");
+ }
+ if (callback) {
+ validateFunction(callback, "callback");
+ }
+
+ if (!this.#connection) {
+ throw new ERR_INSPECTOR_NOT_CONNECTED();
+ }
+ const id = this.#nextId++;
+ const message = { id, method };
+ if (params) {
+ message.params = params;
+ }
+ if (callback) {
+ this.#messageCallbacks.set(id, callback);
+ }
+ op_inspector_dispatch(this.#connection, JSONStringify(message));
+ }
+
+ disconnect() {
+ if (!this.#connection) {
+ return;
+ }
+ op_inspector_disconnect(this.#connection);
+ this.#connection = null;
+ // deno-lint-ignore prefer-primordials
+ for (const callback of this.#messageCallbacks.values()) {
+ process.nextTick(callback, new ERR_INSPECTOR_CLOSED());
+ }
+ this.#messageCallbacks.clear();
+ this.#nextId = 1;
+ }
+}
+
+function open(port, host, wait) {
+ if (op_inspector_enabled()) {
+ throw new ERR_INSPECTOR_ALREADY_ACTIVATED();
+ }
+ // inspectorOpen() currently does not typecheck its arguments and adding
+ // such checks would be a potentially breaking change. However, the native
+ // open() function requires the port to fit into a 16-bit unsigned integer,
+ // causing an integer overflow otherwise, so we at least need to prevent that.
+ if (isUint32(port)) {
+ validateInt32(port, "port", 0, 65535);
+ } else {
+ // equiv of handling args[0]->IsUint32()
+ port = undefined;
+ }
+ if (typeof host !== "string") {
+ // equiv of handling args[1]->IsString()
+ host = undefined;
+ }
+ op_inspector_open(port, host);
+ if (wait) {
+ op_inspector_wait();
+ }
+
+ return {
+ __proto__: null,
+ [SymbolDispose]() {
+ _debugEnd();
+ },
+ };
+}
+
+function close() {
+ op_inspector_close();
+}
+
+function url() {
+ return op_inspector_url();
+}
+
+function waitForDebugger() {
+ if (!op_inspector_wait()) {
+ throw new ERR_INSPECTOR_NOT_ACTIVE();
+ }
+}
+
+function broadcastToFrontend(eventName, params) {
+ validateString(eventName, "eventName");
+ if (params) {
+ validateObject(params, "params");
+ }
+ op_inspector_emit_protocol_event(eventName, JSONStringify(params ?? {}));
+}
+
+const Network = {
+ requestWillBeSent: (params) =>
+ broadcastToFrontend("Network.requestWillBeSent", params),
+ responseReceived: (params) =>
+ broadcastToFrontend("Network.responseReceived", params),
+ loadingFinished: (params) =>
+ broadcastToFrontend("Network.loadingFinished", params),
+ loadingFailed: (params) =>
+ broadcastToFrontend("Network.loadingFailed", params),
+};
+
+const console = op_get_extras_binding_object().console;
+
+export { close, console, Network, open, Session, url, waitForDebugger };
+
+export default {
+ open,
+ close,
+ url,
+ waitForDebugger,
+ console,
+ Session,
+ Network,
+};
diff --git a/ext/node/polyfills/inspector.ts b/ext/node/polyfills/inspector.ts
deleted file mode 100644
index 9de86ab14..000000000
--- a/ext/node/polyfills/inspector.ts
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-// Copyright Joyent and Node contributors. All rights reserved. MIT license.
-
-import { EventEmitter } from "node:events";
-import { notImplemented } from "ext:deno_node/_utils.ts";
-import { primordials } from "ext:core/mod.js";
-
-const {
- SafeMap,
-} = primordials;
-
-class Session extends EventEmitter {
- #connection = null;
- #nextId = 1;
- #messageCallbacks = new SafeMap();
-
- /** Connects the session to the inspector back-end. */
- connect() {
- notImplemented("inspector.Session.prototype.connect");
- }
-
- /** Connects the session to the main thread
- * inspector back-end. */
- connectToMainThread() {
- notImplemented("inspector.Session.prototype.connectToMainThread");
- }
-
- /** Posts a message to the inspector back-end. */
- post(
- _method: string,
- _params?: Record<string, unknown>,
- _callback?: (...args: unknown[]) => void,
- ) {
- notImplemented("inspector.Session.prototype.post");
- }
-
- /** Immediately closes the session, all pending
- * message callbacks will be called with an
- * error.
- */
- disconnect() {
- notImplemented("inspector.Session.prototype.disconnect");
- }
-}
-
-/** Activates inspector on host and port.
- * See https://nodejs.org/api/inspector.html#inspectoropenport-host-wait */
-function open(_port?: number, _host?: string, _wait?: boolean) {
- notImplemented("inspector.Session.prototype.open");
-}
-
-/** Deactivate the inspector. Blocks until there are no active connections.
- * See https://nodejs.org/api/inspector.html#inspectorclose */
-function close() {
- notImplemented("inspector.Session.prototype.close");
-}
-
-/** Return the URL of the active inspector, or undefined if there is none.
- * See https://nodejs.org/api/inspector.html#inspectorurl */
-function url() {
- // TODO(kt3k): returns undefined for now, which means the inspector is not activated.
- return undefined;
-}
-
-/** Blocks until a client (existing or connected later) has sent Runtime.runIfWaitingForDebugger command.
- * See https://nodejs.org/api/inspector.html#inspectorwaitfordebugger */
-function waitForDebugger() {
- notImplemented("inspector.wairForDebugger");
-}
-
-const console = globalThis.console;
-
-export { close, console, open, Session, url, waitForDebugger };
-
-export default {
- close,
- console,
- open,
- Session,
- url,
- waitForDebugger,
-};
diff --git a/ext/node/polyfills/inspector/promises.js b/ext/node/polyfills/inspector/promises.js
new file mode 100644
index 000000000..3483e53f5
--- /dev/null
+++ b/ext/node/polyfills/inspector/promises.js
@@ -0,0 +1,20 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+// Copyright Joyent and Node contributors. All rights reserved. MIT license.
+
+import inspector from "node:inspector";
+import { promisify } from "ext:deno_node/internal/util.mjs";
+
+class Session extends inspector.Session {
+ constructor() {
+ super();
+ }
+}
+Session.prototype.post = promisify(inspector.Session.prototype.post);
+
+export * from "node:inspector";
+export { Session };
+
+export default {
+ ...inspector,
+ Session,
+};
diff --git a/ext/node/polyfills/internal/buffer.mjs b/ext/node/polyfills/internal/buffer.mjs
index 6687f7394..dd549221f 100644
--- a/ext/node/polyfills/internal/buffer.mjs
+++ b/ext/node/polyfills/internal/buffer.mjs
@@ -2,10 +2,59 @@
// Copyright Joyent and Node contributors. All rights reserved. MIT license.
// Copyright Feross Aboukhadijeh, and other contributors. All rights reserved. MIT license.
-// TODO(petamoriken): enable prefer-primordials for node polyfills
-// deno-lint-ignore-file prefer-primordials
-
-import { core } from "ext:core/mod.js";
+import { core, primordials } from "ext:core/mod.js";
+const {
+ isAnyArrayBuffer,
+ isArrayBuffer,
+ isDataView,
+ isSharedArrayBuffer,
+ isTypedArray,
+} = core;
+const {
+ ArrayBufferPrototypeGetByteLength,
+ ArrayBufferPrototypeGetDetached,
+ ArrayIsArray,
+ ArrayPrototypeSlice,
+ BigInt,
+ DataViewPrototypeGetByteLength,
+ Float32Array,
+ Float64Array,
+ MathFloor,
+ MathMin,
+ Number,
+ NumberIsInteger,
+ NumberIsNaN,
+ NumberMAX_SAFE_INTEGER,
+ NumberMIN_SAFE_INTEGER,
+ NumberPrototypeToString,
+ ObjectCreate,
+ ObjectDefineProperty,
+ ObjectPrototypeIsPrototypeOf,
+ ObjectSetPrototypeOf,
+ RangeError,
+ SafeRegExp,
+ String,
+ StringFromCharCode,
+ StringPrototypeCharCodeAt,
+ StringPrototypeIncludes,
+ StringPrototypeReplace,
+ StringPrototypeToLowerCase,
+ StringPrototypeTrim,
+ SymbolFor,
+ SymbolToPrimitive,
+ TypeError,
+ TypeErrorPrototype,
+ TypedArrayPrototypeCopyWithin,
+ TypedArrayPrototypeFill,
+ TypedArrayPrototypeGetBuffer,
+ TypedArrayPrototypeGetByteLength,
+ TypedArrayPrototypeGetByteOffset,
+ TypedArrayPrototypeSet,
+ TypedArrayPrototypeSlice,
+ TypedArrayPrototypeSubarray,
+ Uint8Array,
+ Uint8ArrayPrototype,
+} = primordials;
import { op_is_ascii, op_is_utf8, op_transcode } from "ext:core/ops";
import { TextDecoder, TextEncoder } from "ext:deno_web/08_text_encoding.js";
@@ -24,11 +73,6 @@ import {
hexToBytes,
utf16leToBytes,
} from "ext:deno_node/internal_binding/_utils.ts";
-import {
- isAnyArrayBuffer,
- isArrayBufferView,
- isTypedArray,
-} from "ext:deno_node/internal/util/types.ts";
import { normalizeEncoding } from "ext:deno_node/internal/util.mjs";
import { validateBuffer } from "ext:deno_node/internal/validators.mjs";
import { isUint8Array } from "ext:deno_node/internal/util/types.ts";
@@ -50,9 +94,13 @@ const utf8Encoder = new TextEncoder();
// Temporary buffers to convert numbers.
const float32Array = new Float32Array(1);
-const uInt8Float32Array = new Uint8Array(float32Array.buffer);
+const uInt8Float32Array = new Uint8Array(
+ TypedArrayPrototypeGetBuffer(float32Array),
+);
const float64Array = new Float64Array(1);
-const uInt8Float64Array = new Uint8Array(float64Array.buffer);
+const uInt8Float64Array = new Uint8Array(
+ TypedArrayPrototypeGetBuffer(float64Array),
+);
// Check endianness.
float32Array[0] = -1; // 0xBF800000
@@ -64,10 +112,7 @@ export const kMaxLength = 2147483647;
export const kStringMaxLength = 536870888;
const MAX_UINT32 = 2 ** 32;
-const customInspectSymbol =
- typeof Symbol === "function" && typeof Symbol["for"] === "function"
- ? Symbol["for"]("nodejs.util.inspect.custom")
- : null;
+const customInspectSymbol = SymbolFor("nodejs.util.inspect.custom");
export const INSPECT_MAX_BYTES = 50;
@@ -76,23 +121,25 @@ export const constants = {
MAX_STRING_LENGTH: kStringMaxLength,
};
-Object.defineProperty(Buffer.prototype, "parent", {
+ObjectDefineProperty(Buffer.prototype, "parent", {
+ __proto__: null,
enumerable: true,
get: function () {
- if (!Buffer.isBuffer(this)) {
+ if (!BufferIsBuffer(this)) {
return void 0;
}
- return this.buffer;
+ return TypedArrayPrototypeGetBuffer(this);
},
});
-Object.defineProperty(Buffer.prototype, "offset", {
+ObjectDefineProperty(Buffer.prototype, "offset", {
+ __proto__: null,
enumerable: true,
get: function () {
- if (!Buffer.isBuffer(this)) {
+ if (!BufferIsBuffer(this)) {
return void 0;
}
- return this.byteOffset;
+ return TypedArrayPrototypeGetByteOffset(this);
},
});
@@ -103,10 +150,21 @@ function createBuffer(length) {
);
}
const buf = new Uint8Array(length);
- Object.setPrototypeOf(buf, Buffer.prototype);
+ ObjectSetPrototypeOf(buf, BufferPrototype);
return buf;
}
+/**
+ * @param {ArrayBufferLike} O
+ * @returns {boolean}
+ */
+function isDetachedBuffer(O) {
+ if (isSharedArrayBuffer(O)) {
+ return false;
+ }
+ return ArrayBufferPrototypeGetDetached(O);
+}
+
export function Buffer(arg, encodingOrOffset, length) {
if (typeof arg === "number") {
if (typeof encodingOrOffset === "string") {
@@ -133,6 +191,7 @@ function _from(value, encodingOrOffset, length) {
return fromArrayBuffer(value, encodingOrOffset, length);
}
+ // deno-lint-ignore prefer-primordials
const valueOf = value.valueOf && value.valueOf();
if (
valueOf != null &&
@@ -147,8 +206,8 @@ function _from(value, encodingOrOffset, length) {
return b;
}
- if (typeof value[Symbol.toPrimitive] === "function") {
- const primitive = value[Symbol.toPrimitive]("string");
+ if (typeof value[SymbolToPrimitive] === "function") {
+ const primitive = value[SymbolToPrimitive]("string");
if (typeof primitive === "string") {
return fromString(primitive, encodingOrOffset);
}
@@ -162,13 +221,19 @@ function _from(value, encodingOrOffset, length) {
);
}
-Buffer.from = function from(value, encodingOrOffset, length) {
+const BufferFrom = Buffer.from = function from(
+ value,
+ encodingOrOffset,
+ length,
+) {
return _from(value, encodingOrOffset, length);
};
-Object.setPrototypeOf(Buffer.prototype, Uint8Array.prototype);
+const BufferPrototype = Buffer.prototype;
+
+ObjectSetPrototypeOf(Buffer.prototype, Uint8ArrayPrototype);
-Object.setPrototypeOf(Buffer, Uint8Array);
+ObjectSetPrototypeOf(Buffer, Uint8Array);
function assertSize(size) {
validateNumber(size, "size", 0, kMaxLength);
@@ -186,6 +251,7 @@ function _alloc(size, fill, encoding) {
encoding,
);
}
+ // deno-lint-ignore prefer-primordials
return buffer.fill(fill, encoding);
}
return buffer;
@@ -212,13 +278,14 @@ function fromString(string, encoding) {
if (typeof encoding !== "string" || encoding === "") {
encoding = "utf8";
}
- if (!Buffer.isEncoding(encoding)) {
+ if (!BufferIsEncoding(encoding)) {
throw new codes.ERR_UNKNOWN_ENCODING(encoding);
}
const length = byteLength(string, encoding) | 0;
let buf = createBuffer(length);
const actual = buf.write(string, encoding);
if (actual !== length) {
+ // deno-lint-ignore prefer-primordials
buf = buf.slice(0, actual);
}
return buf;
@@ -226,11 +293,12 @@ function fromString(string, encoding) {
function fromArrayLike(obj) {
const buf = new Uint8Array(obj);
- Object.setPrototypeOf(buf, Buffer.prototype);
+ ObjectSetPrototypeOf(buf, BufferPrototype);
return buf;
}
function fromObject(obj) {
+ // deno-lint-ignore prefer-primordials
if (obj.length !== undefined || isAnyArrayBuffer(obj.buffer)) {
if (typeof obj.length !== "number") {
return createBuffer(0);
@@ -239,7 +307,7 @@ function fromObject(obj) {
return fromArrayLike(obj);
}
- if (obj.type === "Buffer" && Array.isArray(obj.data)) {
+ if (obj.type === "Buffer" && ArrayIsArray(obj.data)) {
return fromArrayLike(obj.data);
}
}
@@ -248,7 +316,7 @@ function checked(length) {
if (length >= kMaxLength) {
throw new RangeError(
"Attempt to allocate Buffer larger than maximum size: 0x" +
- kMaxLength.toString(16) + " bytes",
+ NumberPrototypeToString(kMaxLength, 16) + " bytes",
);
}
return length | 0;
@@ -256,25 +324,33 @@ function checked(length) {
export function SlowBuffer(length) {
assertSize(length);
- return Buffer.alloc(+length);
+ return _alloc(+length);
}
-Object.setPrototypeOf(SlowBuffer.prototype, Uint8Array.prototype);
+ObjectSetPrototypeOf(SlowBuffer.prototype, Uint8ArrayPrototype);
-Object.setPrototypeOf(SlowBuffer, Uint8Array);
+ObjectSetPrototypeOf(SlowBuffer, Uint8Array);
-Buffer.isBuffer = function isBuffer(b) {
- return b != null && b._isBuffer === true && b !== Buffer.prototype;
+const BufferIsBuffer = Buffer.isBuffer = function isBuffer(b) {
+ return b != null && b._isBuffer === true && b !== BufferPrototype;
};
-Buffer.compare = function compare(a, b) {
- if (isInstance(a, Uint8Array)) {
- a = Buffer.from(a, a.offset, a.byteLength);
+const BufferCompare = Buffer.compare = function compare(a, b) {
+ if (isUint8Array(a)) {
+ a = BufferFrom(
+ a,
+ TypedArrayPrototypeGetByteOffset(a),
+ TypedArrayPrototypeGetByteLength(a),
+ );
}
- if (isInstance(b, Uint8Array)) {
- b = Buffer.from(b, b.offset, b.byteLength);
+ if (isUint8Array(b)) {
+ b = BufferFrom(
+ b,
+ TypedArrayPrototypeGetByteOffset(b),
+ TypedArrayPrototypeGetByteLength(b),
+ );
}
- if (!Buffer.isBuffer(a) || !Buffer.isBuffer(b)) {
+ if (!BufferIsBuffer(a) || !BufferIsBuffer(b)) {
throw new TypeError(
'The "buf1", "buf2" arguments must be one of type Buffer or Uint8Array',
);
@@ -284,7 +360,7 @@ Buffer.compare = function compare(a, b) {
}
let x = a.length;
let y = b.length;
- for (let i = 0, len = Math.min(x, y); i < len; ++i) {
+ for (let i = 0, len = MathMin(x, y); i < len; ++i) {
if (a[i] !== b[i]) {
x = a[i];
y = b[i];
@@ -300,18 +376,18 @@ Buffer.compare = function compare(a, b) {
return 0;
};
-Buffer.isEncoding = function isEncoding(encoding) {
+const BufferIsEncoding = Buffer.isEncoding = function isEncoding(encoding) {
return typeof encoding === "string" && encoding.length !== 0 &&
normalizeEncoding(encoding) !== undefined;
};
Buffer.concat = function concat(list, length) {
- if (!Array.isArray(list)) {
+ if (!ArrayIsArray(list)) {
throw new codes.ERR_INVALID_ARG_TYPE("list", "Array", list);
}
if (list.length === 0) {
- return Buffer.alloc(0);
+ return _alloc(0);
}
if (length === undefined) {
@@ -325,7 +401,7 @@ Buffer.concat = function concat(list, length) {
validateOffset(length, "length");
}
- const buffer = Buffer.allocUnsafe(length);
+ const buffer = _allocUnsafe(length);
let pos = 0;
for (let i = 0; i < list.length; i++) {
const buf = list[i];
@@ -346,7 +422,7 @@ Buffer.concat = function concat(list, length) {
// Zero-fill the remaining bytes if the specified `length` was more than
// the actual total length, i.e. if we have some remaining allocated bytes
// there were not initialized.
- buffer.fill(0, pos, length);
+ TypedArrayPrototypeFill(buffer, 0, pos, length);
}
return buffer;
@@ -354,7 +430,18 @@ Buffer.concat = function concat(list, length) {
function byteLength(string, encoding) {
if (typeof string !== "string") {
- if (isArrayBufferView(string) || isAnyArrayBuffer(string)) {
+ if (isTypedArray(string)) {
+ return TypedArrayPrototypeGetByteLength(string);
+ }
+ if (isDataView(string)) {
+ return DataViewPrototypeGetByteLength(string);
+ }
+ if (isArrayBuffer(string)) {
+ return ArrayBufferPrototypeGetByteLength(string);
+ }
+ if (isSharedArrayBuffer(string)) {
+ // TODO(petamoriken): add SharedArayBuffer to primordials
+ // deno-lint-ignore prefer-primordials
return string.byteLength;
}
@@ -463,6 +550,7 @@ Buffer.prototype.toString = function toString(encoding, start, end) {
throw new codes.ERR_UNKNOWN_ENCODING(encoding);
}
+ // deno-lint-ignore prefer-primordials
return ops.slice(this, start, end);
};
@@ -479,22 +567,29 @@ Buffer.prototype.equals = function equals(b) {
if (this === b) {
return true;
}
- return Buffer.compare(this, b) === 0;
+ return BufferCompare(this, b) === 0;
};
-Buffer.prototype.inspect = function inspect() {
- let str = "";
- const max = INSPECT_MAX_BYTES;
- str = this.toString("hex", 0, max).replace(/(.{2})/g, "$1 ").trim();
- if (this.length > max) {
- str += " ... ";
- }
- return "<Buffer " + str + ">";
-};
+const SPACER_PATTERN = new SafeRegExp(/(.{2})/g);
-if (customInspectSymbol) {
- Buffer.prototype[customInspectSymbol] = Buffer.prototype.inspect;
-}
+Buffer.prototype[customInspectSymbol] =
+ Buffer.prototype.inspect =
+ function inspect() {
+ let str = "";
+ const max = INSPECT_MAX_BYTES;
+ str = StringPrototypeTrim(
+ StringPrototypeReplace(
+ // deno-lint-ignore prefer-primordials
+ this.toString("hex", 0, max),
+ SPACER_PATTERN,
+ "$1 ",
+ ),
+ );
+ if (this.length > max) {
+ str += " ... ";
+ }
+ return "<Buffer " + str + ">";
+ };
Buffer.prototype.compare = function compare(
target,
@@ -503,10 +598,14 @@ Buffer.prototype.compare = function compare(
thisStart,
thisEnd,
) {
- if (isInstance(target, Uint8Array)) {
- target = Buffer.from(target, target.offset, target.byteLength);
+ if (isUint8Array(target)) {
+ target = BufferFrom(
+ target,
+ TypedArrayPrototypeGetByteOffset(target),
+ TypedArrayPrototypeGetByteLength(target),
+ );
}
- if (!Buffer.isBuffer(target)) {
+ if (!BufferIsBuffer(target)) {
throw new codes.ERR_INVALID_ARG_TYPE(
"target",
["Buffer", "Uint8Array"],
@@ -563,8 +662,9 @@ Buffer.prototype.compare = function compare(
}
let x = thisEnd - thisStart;
let y = end - start;
- const len = Math.min(x, y);
- const thisCopy = this.slice(thisStart, thisEnd);
+ const len = MathMin(x, y);
+ const thisCopy = TypedArrayPrototypeSlice(this, thisStart, thisEnd);
+ // deno-lint-ignore prefer-primordials
const targetCopy = target.slice(start, end);
for (let i = 0; i < len; ++i) {
if (thisCopy[i] !== targetCopy[i]) {
@@ -594,7 +694,8 @@ function bidirectionalIndexOf(buffer, val, byteOffset, encoding, dir) {
byteOffset = -0x80000000;
}
byteOffset = +byteOffset;
- if (Number.isNaN(byteOffset)) {
+ if (NumberIsNaN(byteOffset)) {
+ // deno-lint-ignore prefer-primordials
byteOffset = dir ? 0 : (buffer.length || buffer.byteLength);
}
dir = !!dir;
@@ -614,6 +715,7 @@ function bidirectionalIndexOf(buffer, val, byteOffset, encoding, dir) {
if (ops === undefined) {
throw new codes.ERR_UNKNOWN_ENCODING(encoding);
}
+ // deno-lint-ignore prefer-primordials
return ops.indexOf(buffer, val, byteOffset, dir);
}
@@ -630,6 +732,7 @@ function bidirectionalIndexOf(buffer, val, byteOffset, encoding, dir) {
}
Buffer.prototype.includes = function includes(val, byteOffset, encoding) {
+ // deno-lint-ignore prefer-primordials
return this.indexOf(val, byteOffset, encoding) !== -1;
};
@@ -649,7 +752,7 @@ Buffer.prototype.asciiSlice = function asciiSlice(offset, length) {
if (offset === 0 && length === this.length) {
return bytesToAscii(this);
} else {
- return bytesToAscii(this.slice(offset, length));
+ return bytesToAscii(TypedArrayPrototypeSlice(this, offset, length));
}
};
@@ -664,7 +767,9 @@ Buffer.prototype.base64Slice = function base64Slice(
if (offset === 0 && length === this.length) {
return forgivingBase64Encode(this);
} else {
- return forgivingBase64Encode(this.slice(offset, length));
+ return forgivingBase64Encode(
+ TypedArrayPrototypeSlice(this, offset, length),
+ );
}
};
@@ -683,7 +788,9 @@ Buffer.prototype.base64urlSlice = function base64urlSlice(
if (offset === 0 && length === this.length) {
return forgivingBase64UrlEncode(this);
} else {
- return forgivingBase64UrlEncode(this.slice(offset, length));
+ return forgivingBase64UrlEncode(
+ TypedArrayPrototypeSlice(this, offset, length),
+ );
}
};
@@ -728,7 +835,7 @@ Buffer.prototype.ucs2Slice = function ucs2Slice(offset, length) {
if (offset === 0 && length === this.length) {
return bytesToUtf16le(this);
} else {
- return bytesToUtf16le(this.slice(offset, length));
+ return bytesToUtf16le(TypedArrayPrototypeSlice(this, offset, length));
}
};
@@ -747,9 +854,9 @@ Buffer.prototype.utf8Slice = function utf8Slice(string, offset, length) {
Buffer.prototype.utf8Write = function utf8Write(string, offset, length) {
offset = offset || 0;
- const maxLength = Math.min(length || Infinity, this.length - offset);
+ const maxLength = MathMin(length || Infinity, this.length - offset);
const buf = offset || maxLength < this.length
- ? this.subarray(offset, maxLength + offset)
+ ? TypedArrayPrototypeSubarray(this, offset, maxLength + offset)
: this;
return utf8Encoder.encodeInto(string, buf).written;
};
@@ -801,7 +908,7 @@ Buffer.prototype.write = function write(string, offset, length, encoding) {
Buffer.prototype.toJSON = function toJSON() {
return {
type: "Buffer",
- data: Array.prototype.slice.call(this._arr || this, 0),
+ data: ArrayPrototypeSlice(this._arr || this, 0),
};
};
function fromArrayBuffer(obj, byteOffset, length) {
@@ -810,11 +917,12 @@ function fromArrayBuffer(obj, byteOffset, length) {
byteOffset = 0;
} else {
byteOffset = +byteOffset;
- if (Number.isNaN(byteOffset)) {
+ if (NumberIsNaN(byteOffset)) {
byteOffset = 0;
}
}
+ // deno-lint-ignore prefer-primordials
const maxLength = obj.byteLength - byteOffset;
if (maxLength < 0) {
@@ -836,7 +944,7 @@ function fromArrayBuffer(obj, byteOffset, length) {
}
const buffer = new Uint8Array(obj, byteOffset, length);
- Object.setPrototypeOf(buffer, Buffer.prototype);
+ ObjectSetPrototypeOf(buffer, BufferPrototype);
return buffer;
}
@@ -844,6 +952,7 @@ function _base64Slice(buf, start, end) {
if (start === 0 && end === buf.length) {
return forgivingBase64Encode(buf);
} else {
+ // deno-lint-ignore prefer-primordials
return forgivingBase64Encode(buf.slice(start, end));
}
}
@@ -852,9 +961,10 @@ const decoder = new TextDecoder();
function _utf8Slice(buf, start, end) {
try {
+ // deno-lint-ignore prefer-primordials
return decoder.decode(buf.slice(start, end));
} catch (err) {
- if (err instanceof TypeError) {
+ if (ObjectPrototypeIsPrototypeOf(TypeErrorPrototype, err)) {
throw new NodeError("ERR_STRING_TOO_LONG", "String too long");
}
throw err;
@@ -863,9 +973,9 @@ function _utf8Slice(buf, start, end) {
function _latin1Slice(buf, start, end) {
let ret = "";
- end = Math.min(buf.length, end);
+ end = MathMin(buf.length, end);
for (let i = start; i < end; ++i) {
- ret += String.fromCharCode(buf[i]);
+ ret += StringFromCharCode(buf[i]);
}
return ret;
}
@@ -994,42 +1104,38 @@ Buffer.prototype.readUint32BE = Buffer.prototype.readUInt32BE = readUInt32BE;
Buffer.prototype.readBigUint64LE =
Buffer.prototype.readBigUInt64LE =
- defineBigIntMethod(
- function readBigUInt64LE(offset) {
- offset = offset >>> 0;
- validateNumber(offset, "offset");
- const first = this[offset];
- const last = this[offset + 7];
- if (first === void 0 || last === void 0) {
- boundsError(offset, this.length - 8);
- }
- const lo = first + this[++offset] * 2 ** 8 +
- this[++offset] * 2 ** 16 +
- this[++offset] * 2 ** 24;
- const hi = this[++offset] + this[++offset] * 2 ** 8 +
- this[++offset] * 2 ** 16 + last * 2 ** 24;
- return BigInt(lo) + (BigInt(hi) << BigInt(32));
- },
- );
+ function readBigUInt64LE(offset) {
+ offset = offset >>> 0;
+ validateNumber(offset, "offset");
+ const first = this[offset];
+ const last = this[offset + 7];
+ if (first === void 0 || last === void 0) {
+ boundsError(offset, this.length - 8);
+ }
+ const lo = first + this[++offset] * 2 ** 8 +
+ this[++offset] * 2 ** 16 +
+ this[++offset] * 2 ** 24;
+ const hi = this[++offset] + this[++offset] * 2 ** 8 +
+ this[++offset] * 2 ** 16 + last * 2 ** 24;
+ return BigInt(lo) + (BigInt(hi) << 32n);
+ };
Buffer.prototype.readBigUint64BE =
Buffer.prototype.readBigUInt64BE =
- defineBigIntMethod(
- function readBigUInt64BE(offset) {
- offset = offset >>> 0;
- validateNumber(offset, "offset");
- const first = this[offset];
- const last = this[offset + 7];
- if (first === void 0 || last === void 0) {
- boundsError(offset, this.length - 8);
- }
- const hi = first * 2 ** 24 + this[++offset] * 2 ** 16 +
- this[++offset] * 2 ** 8 + this[++offset];
- const lo = this[++offset] * 2 ** 24 + this[++offset] * 2 ** 16 +
- this[++offset] * 2 ** 8 + last;
- return (BigInt(hi) << BigInt(32)) + BigInt(lo);
- },
- );
+ function readBigUInt64BE(offset) {
+ offset = offset >>> 0;
+ validateNumber(offset, "offset");
+ const first = this[offset];
+ const last = this[offset + 7];
+ if (first === void 0 || last === void 0) {
+ boundsError(offset, this.length - 8);
+ }
+ const hi = first * 2 ** 24 + this[++offset] * 2 ** 16 +
+ this[++offset] * 2 ** 8 + this[++offset];
+ const lo = this[++offset] * 2 ** 24 + this[++offset] * 2 ** 16 +
+ this[++offset] * 2 ** 8 + last;
+ return (BigInt(hi) << 32n) + BigInt(lo);
+ };
Buffer.prototype.readIntLE = function readIntLE(
offset,
@@ -1148,43 +1254,39 @@ Buffer.prototype.readInt32BE = function readInt32BE(offset = 0) {
last;
};
-Buffer.prototype.readBigInt64LE = defineBigIntMethod(
- function readBigInt64LE(offset) {
- offset = offset >>> 0;
- validateNumber(offset, "offset");
- const first = this[offset];
- const last = this[offset + 7];
- if (first === void 0 || last === void 0) {
- boundsError(offset, this.length - 8);
- }
- const val = this[offset + 4] + this[offset + 5] * 2 ** 8 +
- this[offset + 6] * 2 ** 16 + (last << 24);
- return (BigInt(val) << BigInt(32)) +
- BigInt(
- first + this[++offset] * 2 ** 8 + this[++offset] * 2 ** 16 +
- this[++offset] * 2 ** 24,
- );
- },
-);
+Buffer.prototype.readBigInt64LE = function readBigInt64LE(offset) {
+ offset = offset >>> 0;
+ validateNumber(offset, "offset");
+ const first = this[offset];
+ const last = this[offset + 7];
+ if (first === void 0 || last === void 0) {
+ boundsError(offset, this.length - 8);
+ }
+ const val = this[offset + 4] + this[offset + 5] * 2 ** 8 +
+ this[offset + 6] * 2 ** 16 + (last << 24);
+ return (BigInt(val) << 32n) +
+ BigInt(
+ first + this[++offset] * 2 ** 8 + this[++offset] * 2 ** 16 +
+ this[++offset] * 2 ** 24,
+ );
+};
-Buffer.prototype.readBigInt64BE = defineBigIntMethod(
- function readBigInt64BE(offset) {
- offset = offset >>> 0;
- validateNumber(offset, "offset");
- const first = this[offset];
- const last = this[offset + 7];
- if (first === void 0 || last === void 0) {
- boundsError(offset, this.length - 8);
- }
- const val = (first << 24) + this[++offset] * 2 ** 16 +
- this[++offset] * 2 ** 8 + this[++offset];
- return (BigInt(val) << BigInt(32)) +
- BigInt(
- this[++offset] * 2 ** 24 + this[++offset] * 2 ** 16 +
- this[++offset] * 2 ** 8 + last,
- );
- },
-);
+Buffer.prototype.readBigInt64BE = function readBigInt64BE(offset) {
+ offset = offset >>> 0;
+ validateNumber(offset, "offset");
+ const first = this[offset];
+ const last = this[offset + 7];
+ if (first === void 0 || last === void 0) {
+ boundsError(offset, this.length - 8);
+ }
+ const val = (first << 24) + this[++offset] * 2 ** 16 +
+ this[++offset] * 2 ** 8 + this[++offset];
+ return (BigInt(val) << 32n) +
+ BigInt(
+ this[++offset] * 2 ** 24 + this[++offset] * 2 ** 16 +
+ this[++offset] * 2 ** 8 + last,
+ );
+};
Buffer.prototype.readFloatLE = function readFloatLE(offset) {
return bigEndian
@@ -1293,7 +1395,7 @@ Buffer.prototype.writeUint32BE =
function wrtBigUInt64LE(buf, value, offset, min, max) {
checkIntBI(value, min, max, buf, offset, 7);
- let lo = Number(value & BigInt(4294967295));
+ let lo = Number(value & 4294967295n);
buf[offset++] = lo;
lo = lo >> 8;
buf[offset++] = lo;
@@ -1301,7 +1403,7 @@ function wrtBigUInt64LE(buf, value, offset, min, max) {
buf[offset++] = lo;
lo = lo >> 8;
buf[offset++] = lo;
- let hi = Number(value >> BigInt(32) & BigInt(4294967295));
+ let hi = Number(value >> 32n & 4294967295n);
buf[offset++] = hi;
hi = hi >> 8;
buf[offset++] = hi;
@@ -1314,7 +1416,7 @@ function wrtBigUInt64LE(buf, value, offset, min, max) {
function wrtBigUInt64BE(buf, value, offset, min, max) {
checkIntBI(value, min, max, buf, offset, 7);
- let lo = Number(value & BigInt(4294967295));
+ let lo = Number(value & 4294967295n);
buf[offset + 7] = lo;
lo = lo >> 8;
buf[offset + 6] = lo;
@@ -1322,7 +1424,7 @@ function wrtBigUInt64BE(buf, value, offset, min, max) {
buf[offset + 5] = lo;
lo = lo >> 8;
buf[offset + 4] = lo;
- let hi = Number(value >> BigInt(32) & BigInt(4294967295));
+ let hi = Number(value >> 32n & 4294967295n);
buf[offset + 3] = hi;
hi = hi >> 8;
buf[offset + 2] = hi;
@@ -1335,31 +1437,27 @@ function wrtBigUInt64BE(buf, value, offset, min, max) {
Buffer.prototype.writeBigUint64LE =
Buffer.prototype.writeBigUInt64LE =
- defineBigIntMethod(
- function writeBigUInt64LE(value, offset = 0) {
- return wrtBigUInt64LE(
- this,
- value,
- offset,
- BigInt(0),
- BigInt("0xffffffffffffffff"),
- );
- },
- );
+ function writeBigUInt64LE(value, offset = 0) {
+ return wrtBigUInt64LE(
+ this,
+ value,
+ offset,
+ 0n,
+ 0xffffffffffffffffn,
+ );
+ };
Buffer.prototype.writeBigUint64BE =
Buffer.prototype.writeBigUInt64BE =
- defineBigIntMethod(
- function writeBigUInt64BE(value, offset = 0) {
- return wrtBigUInt64BE(
- this,
- value,
- offset,
- BigInt(0),
- BigInt("0xffffffffffffffff"),
- );
- },
- );
+ function writeBigUInt64BE(value, offset = 0) {
+ return wrtBigUInt64BE(
+ this,
+ value,
+ offset,
+ 0n,
+ 0xffffffffffffffffn,
+ );
+ };
Buffer.prototype.writeIntLE = function writeIntLE(
value,
@@ -1450,29 +1548,25 @@ Buffer.prototype.writeInt32BE = function writeInt32BE(value, offset = 0) {
return writeU_Int32BE(this, value, offset, -0x80000000, 0x7fffffff);
};
-Buffer.prototype.writeBigInt64LE = defineBigIntMethod(
- function writeBigInt64LE(value, offset = 0) {
- return wrtBigUInt64LE(
- this,
- value,
- offset,
- -BigInt("0x8000000000000000"),
- BigInt("0x7fffffffffffffff"),
- );
- },
-);
+Buffer.prototype.writeBigInt64LE = function writeBigInt64LE(value, offset = 0) {
+ return wrtBigUInt64LE(
+ this,
+ value,
+ offset,
+ -0x8000000000000000n,
+ 0x7fffffffffffffffn,
+ );
+};
-Buffer.prototype.writeBigInt64BE = defineBigIntMethod(
- function writeBigInt64BE(value, offset = 0) {
- return wrtBigUInt64BE(
- this,
- value,
- offset,
- -BigInt("0x8000000000000000"),
- BigInt("0x7fffffffffffffff"),
- );
- },
-);
+Buffer.prototype.writeBigInt64BE = function writeBigInt64BE(value, offset = 0) {
+ return wrtBigUInt64BE(
+ this,
+ value,
+ offset,
+ -0x8000000000000000n,
+ 0x7fffffffffffffffn,
+ );
+};
Buffer.prototype.writeFloatLE = function writeFloatLE(
value,
@@ -1600,14 +1694,12 @@ Buffer.prototype.copy = function copy(
}
const len = sourceEnd - sourceStart;
- if (
- this === target && typeof Uint8Array.prototype.copyWithin === "function"
- ) {
- this.copyWithin(targetStart, sourceStart, sourceEnd);
+ if (this === target) {
+ TypedArrayPrototypeCopyWithin(this, targetStart, sourceStart, sourceEnd);
} else {
- Uint8Array.prototype.set.call(
+ TypedArrayPrototypeSet(
target,
- this.subarray(sourceStart, sourceEnd),
+ TypedArrayPrototypeSubarray(this, sourceStart, sourceEnd),
targetStart,
);
}
@@ -1627,11 +1719,11 @@ Buffer.prototype.fill = function fill(val, start, end, encoding) {
if (encoding !== void 0 && typeof encoding !== "string") {
throw new TypeError("encoding must be a string");
}
- if (typeof encoding === "string" && !Buffer.isEncoding(encoding)) {
+ if (typeof encoding === "string" && !BufferIsEncoding(encoding)) {
throw new TypeError("Unknown encoding: " + encoding);
}
if (val.length === 1) {
- const code = val.charCodeAt(0);
+ const code = StringPrototypeCharCodeAt(val, 0);
if (encoding === "utf8" && code < 128 || encoding === "latin1") {
val = code;
}
@@ -1658,7 +1750,7 @@ Buffer.prototype.fill = function fill(val, start, end, encoding) {
this[i] = val;
}
} else {
- const bytes = Buffer.isBuffer(val) ? val : Buffer.from(val, encoding);
+ const bytes = BufferIsBuffer(val) ? val : BufferFrom(val, encoding);
const len = bytes.length;
if (len === 0) {
throw new codes.ERR_INVALID_ARG_VALUE(
@@ -1685,7 +1777,7 @@ function checkIntBI(value, min, max, buf, offset, byteLength2) {
const n = typeof min === "bigint" ? "n" : "";
let range;
if (byteLength2 > 3) {
- if (min === 0 || min === BigInt(0)) {
+ if (min === 0 || min === 0n) {
range = `>= 0${n} and < 2${n} ** ${(byteLength2 + 1) * 8}${n}`;
} else {
range = `>= -(2${n} ** ${(byteLength2 + 1) * 8 - 1}${n}) and < 2 ** ${
@@ -1710,7 +1802,7 @@ function checkIntBI(value, min, max, buf, offset, byteLength2) {
function blitBuffer(src, dst, offset, byteLength = Infinity) {
const srcLength = src.length;
// Establish the number of bytes to be written
- const bytesToWrite = Math.min(
+ const bytesToWrite = MathMin(
// If byte length is defined in the call, then it sets an upper bound,
// otherwise it is Infinity and is never chosen.
byteLength,
@@ -1730,15 +1822,9 @@ function blitBuffer(src, dst, offset, byteLength = Infinity) {
return bytesToWrite;
}
-function isInstance(obj, type) {
- return obj instanceof type ||
- obj != null && obj.constructor != null &&
- obj.constructor.name != null && obj.constructor.name === type.name;
-}
-
const hexSliceLookupTable = function () {
const alphabet = "0123456789abcdef";
- const table = new Array(256);
+ const table = [];
for (let i = 0; i < 16; ++i) {
const i16 = i * 16;
for (let j = 0; j < 16; ++j) {
@@ -1748,14 +1834,6 @@ const hexSliceLookupTable = function () {
return table;
}();
-function defineBigIntMethod(fn) {
- return typeof BigInt === "undefined" ? BufferBigIntNotDefined : fn;
-}
-
-function BufferBigIntNotDefined() {
- throw new Error("BigInt not supported");
-}
-
export function readUInt48LE(buf, offset = 0) {
validateNumber(offset, "offset");
const first = buf[offset];
@@ -2079,10 +2157,10 @@ export function byteLengthUtf8(str) {
function base64ByteLength(str, bytes) {
// Handle padding
- if (str.charCodeAt(bytes - 1) === 0x3D) {
+ if (StringPrototypeCharCodeAt(str, bytes - 1) === 0x3D) {
bytes--;
}
- if (bytes > 1 && str.charCodeAt(bytes - 1) === 0x3D) {
+ if (bytes > 1 && StringPrototypeCharCodeAt(str, bytes - 1) === 0x3D) {
bytes--;
}
@@ -2090,7 +2168,7 @@ function base64ByteLength(str, bytes) {
return (bytes * 3) >>> 2;
}
-export const encodingsMap = Object.create(null);
+export const encodingsMap = ObjectCreate(null);
for (let i = 0; i < encodings.length; ++i) {
encodingsMap[encodings[i]] = i;
}
@@ -2220,7 +2298,7 @@ export const encodingOps = {
};
export function getEncodingOps(encoding) {
- encoding = String(encoding).toLowerCase();
+ encoding = StringPrototypeToLowerCase(String(encoding));
switch (encoding.length) {
case 4:
if (encoding === "utf8") return encodingOps.utf8;
@@ -2260,6 +2338,14 @@ export function getEncodingOps(encoding) {
}
}
+/**
+ * @param {Buffer} source
+ * @param {Buffer} target
+ * @param {number} targetStart
+ * @param {number} sourceStart
+ * @param {number} sourceEnd
+ * @returns {number}
+ */
export function _copyActual(
source,
target,
@@ -2278,6 +2364,7 @@ export function _copyActual(
}
if (sourceStart !== 0 || sourceEnd < source.length) {
+ // deno-lint-ignore prefer-primordials
source = new Uint8Array(source.buffer, source.byteOffset + sourceStart, nb);
}
@@ -2287,7 +2374,7 @@ export function _copyActual(
}
export function boundsError(value, length, type) {
- if (Math.floor(value) !== value) {
+ if (MathFloor(value) !== value) {
validateNumber(value, type);
throw new codes.ERR_OUT_OF_RANGE(type || "offset", "an integer", value);
}
@@ -2310,7 +2397,7 @@ export function validateNumber(value, name, min = undefined, max) {
if (
(min != null && value < min) || (max != null && value > max) ||
- ((min != null || max != null) && Number.isNaN(value))
+ ((min != null || max != null) && NumberIsNaN(value))
) {
throw new codes.ERR_OUT_OF_RANGE(
name,
@@ -2344,11 +2431,11 @@ function checkInt(value, min, max, buf, offset, byteLength) {
export function toInteger(n, defaultVal) {
n = +n;
if (
- !Number.isNaN(n) &&
- n >= Number.MIN_SAFE_INTEGER &&
- n <= Number.MAX_SAFE_INTEGER
+ !NumberIsNaN(n) &&
+ n >= NumberMIN_SAFE_INTEGER &&
+ n <= NumberMAX_SAFE_INTEGER
) {
- return ((n % 1) === 0 ? n : Math.floor(n));
+ return ((n % 1) === 0 ? n : MathFloor(n));
}
return defaultVal;
}
@@ -2421,7 +2508,7 @@ export function writeU_Int48BE(buf, value, offset, min, max) {
value = +value;
checkInt(value, min, max, buf, offset, 5);
- const newVal = Math.floor(value * 2 ** -32);
+ const newVal = MathFloor(value * 2 ** -32);
buf[offset++] = newVal >>> 8;
buf[offset++] = newVal;
buf[offset + 3] = value;
@@ -2439,7 +2526,7 @@ export function writeU_Int40BE(buf, value, offset, min, max) {
value = +value;
checkInt(value, min, max, buf, offset, 4);
- buf[offset++] = Math.floor(value * 2 ** -32);
+ buf[offset++] = MathFloor(value * 2 ** -32);
buf[offset + 3] = value;
value = value >>> 8;
buf[offset + 2] = value;
@@ -2482,12 +2569,12 @@ export function validateOffset(
value,
name,
min = 0,
- max = Number.MAX_SAFE_INTEGER,
+ max = NumberMAX_SAFE_INTEGER,
) {
if (typeof value !== "number") {
throw new codes.ERR_INVALID_ARG_TYPE(name, "number", value);
}
- if (!Number.isInteger(value)) {
+ if (!NumberIsInteger(value)) {
throw new codes.ERR_OUT_OF_RANGE(name, "an integer", value);
}
if (value < min || value > max) {
@@ -2500,7 +2587,7 @@ export function writeU_Int48LE(buf, value, offset, min, max) {
value = +value;
checkInt(value, min, max, buf, offset, 5);
- const newVal = Math.floor(value * 2 ** -32);
+ const newVal = MathFloor(value * 2 ** -32);
buf[offset++] = value;
value = value >>> 8;
buf[offset++] = value;
@@ -2526,7 +2613,7 @@ export function writeU_Int40LE(buf, value, offset, min, max) {
buf[offset++] = value;
value = value >>> 8;
buf[offset++] = value;
- buf[offset++] = Math.floor(newVal * 2 ** -32);
+ buf[offset++] = MathFloor(newVal * 2 ** -32);
return offset;
}
@@ -2560,14 +2647,14 @@ export function writeU_Int24LE(buf, value, offset, min, max) {
export function isUtf8(input) {
if (isTypedArray(input)) {
- if (input.buffer.detached) {
+ if (isDetachedBuffer(TypedArrayPrototypeGetBuffer(input))) {
throw new ERR_INVALID_STATE("Cannot validate on a detached buffer");
}
return op_is_utf8(input);
}
if (isAnyArrayBuffer(input)) {
- if (input.detached) {
+ if (isDetachedBuffer(input)) {
throw new ERR_INVALID_STATE("Cannot validate on a detached buffer");
}
return op_is_utf8(new Uint8Array(input));
@@ -2582,14 +2669,14 @@ export function isUtf8(input) {
export function isAscii(input) {
if (isTypedArray(input)) {
- if (input.buffer.detached) {
+ if (isDetachedBuffer(TypedArrayPrototypeGetBuffer(input))) {
throw new ERR_INVALID_STATE("Cannot validate on a detached buffer");
}
return op_is_ascii(input);
}
if (isAnyArrayBuffer(input)) {
- if (input.detached) {
+ if (isDetachedBuffer(input)) {
throw new ERR_INVALID_STATE("Cannot validate on a detached buffer");
}
return op_is_ascii(new Uint8Array(input));
@@ -2636,7 +2723,7 @@ export function transcode(source, fromEnco, toEnco) {
const result = op_transcode(new Uint8Array(source), fromEnco, toEnco);
return Buffer.from(result, toEnco);
} catch (err) {
- if (err.message.includes("Unable to transcode Buffer")) {
+ if (StringPrototypeIncludes(err.message, "Unable to transcode Buffer")) {
throw illegalArgumentError;
} else {
throw err;
diff --git a/ext/node/polyfills/internal/child_process.ts b/ext/node/polyfills/internal/child_process.ts
index 6f209b719..cfff1079f 100644
--- a/ext/node/polyfills/internal/child_process.ts
+++ b/ext/node/polyfills/internal/child_process.ts
@@ -1191,8 +1191,12 @@ function toDenoArgs(args: string[]): string[] {
}
if (flagInfo === undefined) {
- // Not a known flag that expects a value. Just copy it to the output.
- denoArgs.push(arg);
+ if (arg === "--no-warnings") {
+ denoArgs.push("--quiet");
+ } else {
+ // Not a known flag that expects a value. Just copy it to the output.
+ denoArgs.push(arg);
+ }
continue;
}
@@ -1335,7 +1339,7 @@ export function setupChannel(target: any, ipc: number) {
}
}
- process.nextTick(handleMessage, msg);
+ nextTick(handleMessage, msg);
}
} catch (err) {
if (
@@ -1396,7 +1400,7 @@ export function setupChannel(target: any, ipc: number) {
if (!target.connected) {
const err = new ERR_IPC_CHANNEL_CLOSED();
if (typeof callback === "function") {
- process.nextTick(callback, err);
+ nextTick(callback, err);
} else {
nextTick(() => target.emit("error", err));
}
@@ -1412,7 +1416,18 @@ export function setupChannel(target: any, ipc: number) {
.then(() => {
control.unrefCounted();
if (callback) {
- process.nextTick(callback, null);
+ nextTick(callback, null);
+ }
+ }, (err: Error) => {
+ control.unrefCounted();
+ if (err instanceof Deno.errors.Interrupted) {
+ // Channel closed on us mid-write.
+ } else {
+ if (typeof callback === "function") {
+ nextTick(callback, err);
+ } else {
+ nextTick(() => target.emit("error", err));
+ }
}
});
return queueOk[0];
@@ -1429,7 +1444,7 @@ export function setupChannel(target: any, ipc: number) {
target.connected = false;
target[kCanDisconnect] = false;
control[kControlDisconnect]();
- process.nextTick(() => {
+ nextTick(() => {
target.channel = null;
core.close(ipc);
target.emit("disconnect");
diff --git a/ext/node/polyfills/internal/crypto/_randomInt.ts b/ext/node/polyfills/internal/crypto/_randomInt.ts
index 7f4d703ad..e08b3e963 100644
--- a/ext/node/polyfills/internal/crypto/_randomInt.ts
+++ b/ext/node/polyfills/internal/crypto/_randomInt.ts
@@ -1,9 +1,15 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-// TODO(petamoriken): enable prefer-primordials for node polyfills
-// deno-lint-ignore-file prefer-primordials
-
import { op_node_random_int } from "ext:core/ops";
+import { primordials } from "ext:core/mod.js";
+const {
+ Error,
+ MathCeil,
+ MathFloor,
+ MathPow,
+ NumberIsSafeInteger,
+ RangeError,
+} = primordials;
export default function randomInt(max: number): number;
export default function randomInt(min: number, max: number): number;
@@ -23,7 +29,9 @@ export default function randomInt(
cb?: (err: Error | null, n?: number) => void,
): number | void {
if (typeof max === "number" && typeof min === "number") {
- [max, min] = [min, max];
+ const temp = max;
+ max = min;
+ min = temp;
}
if (min === undefined) min = 0;
else if (typeof min === "function") {
@@ -32,13 +40,13 @@ export default function randomInt(
}
if (
- !Number.isSafeInteger(min) ||
- typeof max === "number" && !Number.isSafeInteger(max)
+ !NumberIsSafeInteger(min) ||
+ typeof max === "number" && !NumberIsSafeInteger(max)
) {
throw new Error("max or min is not a Safe Number");
}
- if (max - min > Math.pow(2, 48)) {
+ if (max - min > MathPow(2, 48)) {
throw new RangeError("max - min should be less than 2^48!");
}
@@ -46,8 +54,8 @@ export default function randomInt(
throw new Error("Min is bigger than Max!");
}
- min = Math.ceil(min);
- max = Math.floor(max);
+ min = MathCeil(min);
+ max = MathFloor(max);
const result = op_node_random_int(min, max);
if (cb) {
diff --git a/ext/node/polyfills/internal/crypto/keygen.ts b/ext/node/polyfills/internal/crypto/keygen.ts
index a40c76c0d..b023ab106 100644
--- a/ext/node/polyfills/internal/crypto/keygen.ts
+++ b/ext/node/polyfills/internal/crypto/keygen.ts
@@ -29,6 +29,8 @@ import {
} from "ext:deno_node/internal/validators.mjs";
import { Buffer } from "node:buffer";
import { KeyFormat, KeyType } from "ext:deno_node/internal/crypto/types.ts";
+import process from "node:process";
+import { promisify } from "node:util";
import {
op_node_generate_dh_group_key,
@@ -569,7 +571,15 @@ export function generateKeyPair(
privateKey: any,
) => void,
) {
- createJob(kAsync, type, options).then((pair) => {
+ _generateKeyPair(type, options)
+ .then(
+ (res) => callback(null, res.publicKey, res.privateKey),
+ (err) => callback(err, null, null),
+ );
+}
+
+function _generateKeyPair(type: string, options: unknown) {
+ return createJob(kAsync, type, options).then((pair) => {
const privateKeyHandle = op_node_get_private_key_from_pair(pair);
const publicKeyHandle = op_node_get_public_key_from_pair(pair);
@@ -588,12 +598,15 @@ export function generateKeyPair(
}
}
- callback(null, publicKey, privateKey);
- }).catch((err) => {
- callback(err, null, null);
+ return { publicKey, privateKey };
});
}
+Object.defineProperty(generateKeyPair, promisify.custom, {
+ enumerable: false,
+ value: _generateKeyPair,
+});
+
export interface KeyPairKeyObjectResult {
publicKey: KeyObject;
privateKey: KeyObject;
diff --git a/ext/node/polyfills/internal/crypto/random.ts b/ext/node/polyfills/internal/crypto/random.ts
index 4219414dc..a41b86819 100644
--- a/ext/node/polyfills/internal/crypto/random.ts
+++ b/ext/node/polyfills/internal/crypto/random.ts
@@ -38,6 +38,7 @@ import {
ERR_INVALID_ARG_TYPE,
ERR_OUT_OF_RANGE,
} from "ext:deno_node/internal/errors.ts";
+import { Buffer } from "node:buffer";
export { default as randomBytes } from "ext:deno_node/internal/crypto/_randomBytes.ts";
export {
diff --git a/ext/node/polyfills/internal/errors.ts b/ext/node/polyfills/internal/errors.ts
index 51bd7a025..61b53fa96 100644
--- a/ext/node/polyfills/internal/errors.ts
+++ b/ext/node/polyfills/internal/errors.ts
@@ -18,7 +18,7 @@
*/
import { primordials } from "ext:core/mod.js";
-const { JSONStringify, SymbolFor } = primordials;
+const { JSONStringify, SafeArrayIterator, SymbolFor } = primordials;
import { format, inspect } from "ext:deno_node/internal/util/inspect.mjs";
import { codes } from "ext:deno_node/internal/error_codes.ts";
import {
@@ -1874,6 +1874,11 @@ export class ERR_SOCKET_CLOSED extends NodeError {
super("ERR_SOCKET_CLOSED", `Socket is closed`);
}
}
+export class ERR_SOCKET_CONNECTION_TIMEOUT extends NodeError {
+ constructor() {
+ super("ERR_SOCKET_CONNECTION_TIMEOUT", `Socket connection timeout`);
+ }
+}
export class ERR_SOCKET_DGRAM_IS_CONNECTED extends NodeError {
constructor() {
super("ERR_SOCKET_DGRAM_IS_CONNECTED", `Already connected`);
@@ -2385,6 +2390,15 @@ export class ERR_INVALID_RETURN_VALUE extends NodeTypeError {
}
}
+export class ERR_NOT_IMPLEMENTED extends NodeError {
+ constructor(message?: string) {
+ super(
+ "ERR_NOT_IMPLEMENTED",
+ message ? `Not implemented: ${message}` : "Not implemented",
+ );
+ }
+}
+
export class ERR_INVALID_URL extends NodeTypeError {
input: string;
constructor(input: string) {
@@ -2558,19 +2572,6 @@ export class ERR_FS_RMDIR_ENOTDIR extends NodeSystemError {
}
}
-export class ERR_OS_NO_HOMEDIR extends NodeSystemError {
- constructor() {
- const code = isWindows ? "ENOENT" : "ENOTDIR";
- const ctx: NodeSystemErrorCtx = {
- message: "not a directory",
- syscall: "home",
- code,
- errno: isWindows ? osConstants.errno.ENOENT : osConstants.errno.ENOTDIR,
- };
- super(code, ctx, "Path is not a directory");
- }
-}
-
export class ERR_HTTP_SOCKET_ASSIGNED extends NodeError {
constructor() {
super(
@@ -2646,11 +2647,30 @@ export function aggregateTwoErrors(
}
return innerError || outerError;
}
+
+export class NodeAggregateError extends AggregateError {
+ code: string;
+ constructor(errors, message) {
+ super(new SafeArrayIterator(errors), message);
+ this.code = errors[0]?.code;
+ }
+
+ get [kIsNodeError]() {
+ return true;
+ }
+
+ // deno-lint-ignore adjacent-overload-signatures
+ get ["constructor"]() {
+ return AggregateError;
+ }
+}
+
codes.ERR_IPC_CHANNEL_CLOSED = ERR_IPC_CHANNEL_CLOSED;
codes.ERR_INVALID_ARG_TYPE = ERR_INVALID_ARG_TYPE;
codes.ERR_INVALID_ARG_VALUE = ERR_INVALID_ARG_VALUE;
codes.ERR_OUT_OF_RANGE = ERR_OUT_OF_RANGE;
codes.ERR_SOCKET_BAD_PORT = ERR_SOCKET_BAD_PORT;
+codes.ERR_SOCKET_CONNECTION_TIMEOUT = ERR_SOCKET_CONNECTION_TIMEOUT;
codes.ERR_BUFFER_OUT_OF_BOUNDS = ERR_BUFFER_OUT_OF_BOUNDS;
codes.ERR_UNKNOWN_ENCODING = ERR_UNKNOWN_ENCODING;
codes.ERR_PARSE_ARGS_INVALID_OPTION_VALUE = ERR_PARSE_ARGS_INVALID_OPTION_VALUE;
@@ -2851,6 +2871,7 @@ export default {
ERR_INVALID_SYNC_FORK_INPUT,
ERR_INVALID_THIS,
ERR_INVALID_TUPLE,
+ ERR_NOT_IMPLEMENTED,
ERR_INVALID_URI,
ERR_INVALID_URL,
ERR_INVALID_URL_SCHEME,
diff --git a/ext/node/polyfills/internal/net.ts b/ext/node/polyfills/internal/net.ts
index 144612626..a3dcb3ed2 100644
--- a/ext/node/polyfills/internal/net.ts
+++ b/ext/node/polyfills/internal/net.ts
@@ -95,4 +95,5 @@ export function makeSyncWrite(fd: number) {
};
}
+export const kReinitializeHandle = Symbol("kReinitializeHandle");
export const normalizedArgsSymbol = Symbol("normalizedArgs");
diff --git a/ext/node/polyfills/internal/util/inspect.mjs b/ext/node/polyfills/internal/util/inspect.mjs
index 3a61c387c..ae797449b 100644
--- a/ext/node/polyfills/internal/util/inspect.mjs
+++ b/ext/node/polyfills/internal/util/inspect.mjs
@@ -565,6 +565,19 @@ export function stripVTControlCharacters(str) {
export function styleText(format, text) {
validateString(text, "text");
+
+ if (Array.isArray(format)) {
+ for (let i = 0; i < format.length; i++) {
+ const item = format[i];
+ const formatCodes = inspect.colors[item];
+ if (formatCodes == null) {
+ validateOneOf(item, "format", Object.keys(inspect.colors));
+ }
+ text = `\u001b[${formatCodes[0]}m${text}\u001b[${formatCodes[1]}m`;
+ }
+ return text;
+ }
+
const formatCodes = inspect.colors[format];
if (formatCodes == null) {
validateOneOf(format, "format", Object.keys(inspect.colors));
diff --git a/ext/node/polyfills/internal_binding/_timingSafeEqual.ts b/ext/node/polyfills/internal_binding/_timingSafeEqual.ts
index ff141fdbf..559b7685b 100644
--- a/ext/node/polyfills/internal_binding/_timingSafeEqual.ts
+++ b/ext/node/polyfills/internal_binding/_timingSafeEqual.ts
@@ -5,10 +5,11 @@
import { Buffer } from "node:buffer";
-function assert(cond) {
- if (!cond) {
- throw new Error("assertion failed");
+function toDataView(ab: ArrayBufferLike | ArrayBufferView): DataView {
+ if (ArrayBuffer.isView(ab)) {
+ return new DataView(ab.buffer, ab.byteOffset, ab.byteLength);
}
+ return new DataView(ab);
}
/** Compare to array buffers or data views in a way that timing based attacks
@@ -21,13 +22,11 @@ function stdTimingSafeEqual(
return false;
}
if (!(a instanceof DataView)) {
- a = new DataView(ArrayBuffer.isView(a) ? a.buffer : a);
+ a = toDataView(a);
}
if (!(b instanceof DataView)) {
- b = new DataView(ArrayBuffer.isView(b) ? b.buffer : b);
+ b = toDataView(b);
}
- assert(a instanceof DataView);
- assert(b instanceof DataView);
const length = a.byteLength;
let out = 0;
let i = -1;
@@ -41,7 +40,11 @@ export const timingSafeEqual = (
a: Buffer | DataView | ArrayBuffer,
b: Buffer | DataView | ArrayBuffer,
): boolean => {
- if (a instanceof Buffer) a = new DataView(a.buffer);
- if (a instanceof Buffer) b = new DataView(a.buffer);
+ if (a instanceof Buffer) {
+ a = new DataView(a.buffer, a.byteOffset, a.byteLength);
+ }
+ if (b instanceof Buffer) {
+ b = new DataView(b.buffer, b.byteOffset, b.byteLength);
+ }
return stdTimingSafeEqual(a, b);
};
diff --git a/ext/node/polyfills/internal_binding/http_parser.ts b/ext/node/polyfills/internal_binding/http_parser.ts
new file mode 100644
index 000000000..bad10d985
--- /dev/null
+++ b/ext/node/polyfills/internal_binding/http_parser.ts
@@ -0,0 +1,160 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+// Copyright Joyent, Inc. and other Node contributors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a
+// copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to permit
+// persons to whom the Software is furnished to do so, subject to the
+// following conditions:
+//
+// The above copyright notice and this permission notice shall be included
+// in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
+// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+// USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import { primordials } from "ext:core/mod.js";
+import { AsyncWrap } from "ext:deno_node/internal_binding/async_wrap.ts";
+
+const {
+ ObjectDefineProperty,
+ ObjectEntries,
+ ObjectSetPrototypeOf,
+ SafeArrayIterator,
+} = primordials;
+
+export const methods = [
+ "DELETE",
+ "GET",
+ "HEAD",
+ "POST",
+ "PUT",
+ "CONNECT",
+ "OPTIONS",
+ "TRACE",
+ "COPY",
+ "LOCK",
+ "MKCOL",
+ "MOVE",
+ "PROPFIND",
+ "PROPPATCH",
+ "SEARCH",
+ "UNLOCK",
+ "BIND",
+ "REBIND",
+ "UNBIND",
+ "ACL",
+ "REPORT",
+ "MKACTIVITY",
+ "CHECKOUT",
+ "MERGE",
+ "M-SEARCH",
+ "NOTIFY",
+ "SUBSCRIBE",
+ "UNSUBSCRIBE",
+ "PATCH",
+ "PURGE",
+ "MKCALENDAR",
+ "LINK",
+ "UNLINK",
+ "SOURCE",
+ "QUERY",
+];
+
+export const allMethods = [
+ "DELETE",
+ "GET",
+ "HEAD",
+ "POST",
+ "PUT",
+ "CONNECT",
+ "OPTIONS",
+ "TRACE",
+ "COPY",
+ "LOCK",
+ "MKCOL",
+ "MOVE",
+ "PROPFIND",
+ "PROPPATCH",
+ "SEARCH",
+ "UNLOCK",
+ "BIND",
+ "REBIND",
+ "UNBIND",
+ "ACL",
+ "REPORT",
+ "MKACTIVITY",
+ "CHECKOUT",
+ "MERGE",
+ "M-SEARCH",
+ "NOTIFY",
+ "SUBSCRIBE",
+ "UNSUBSCRIBE",
+ "PATCH",
+ "PURGE",
+ "MKCALENDAR",
+ "LINK",
+ "UNLINK",
+ "SOURCE",
+ "PRI",
+ "DESCRIBE",
+ "ANNOUNCE",
+ "SETUP",
+ "PLAY",
+ "PAUSE",
+ "TEARDOWN",
+ "GET_PARAMETER",
+ "SET_PARAMETER",
+ "REDIRECT",
+ "RECORD",
+ "FLUSH",
+ "QUERY",
+];
+
+export function HTTPParser() {
+}
+
+ObjectSetPrototypeOf(HTTPParser.prototype, AsyncWrap.prototype);
+
+function defineProps(obj: object, props: Record<string, unknown>) {
+ for (const entry of new SafeArrayIterator(ObjectEntries(props))) {
+ ObjectDefineProperty(obj, entry[0], {
+ __proto__: null,
+ value: entry[1],
+ enumerable: true,
+ writable: true,
+ configurable: true,
+ });
+ }
+}
+
+defineProps(HTTPParser, {
+ REQUEST: 1,
+ RESPONSE: 2,
+ kOnMessageBegin: 0,
+ kOnHeaders: 1,
+ kOnHeadersComplete: 2,
+ kOnBody: 3,
+ kOnMessageComplete: 4,
+ kOnExecute: 5,
+ kOnTimeout: 6,
+ kLenientNone: 0,
+ kLenientHeaders: 1,
+ kLenientChunkedLength: 2,
+ kLenientKeepAlive: 4,
+ kLenientTransferEncoding: 8,
+ kLenientVersion: 16,
+ kLenientDataAfterClose: 32,
+ kLenientOptionalLFAfterCR: 64,
+ kLenientOptionalCRLFAfterChunk: 128,
+ kLenientOptionalCRBeforeLF: 256,
+ kLenientSpacesAfterChunkSize: 512,
+ kLenientAll: 1023,
+});
diff --git a/ext/node/polyfills/internal_binding/mod.ts b/ext/node/polyfills/internal_binding/mod.ts
index f2d7f55bc..ebbfc629f 100644
--- a/ext/node/polyfills/internal_binding/mod.ts
+++ b/ext/node/polyfills/internal_binding/mod.ts
@@ -17,6 +17,7 @@ import * as types from "ext:deno_node/internal_binding/types.ts";
import * as udpWrap from "ext:deno_node/internal_binding/udp_wrap.ts";
import * as util from "ext:deno_node/internal_binding/util.ts";
import * as uv from "ext:deno_node/internal_binding/uv.ts";
+import * as httpParser from "ext:deno_node/internal_binding/http_parser.ts";
const modules = {
"async_wrap": asyncWrap,
@@ -32,7 +33,7 @@ const modules = {
"fs_dir": {},
"fs_event_wrap": {},
"heap_utils": {},
- "http_parser": {},
+ "http_parser": httpParser,
icu: {},
inspector: {},
"js_stream": {},
diff --git a/ext/node/polyfills/internal_binding/tcp_wrap.ts b/ext/node/polyfills/internal_binding/tcp_wrap.ts
index 973a1d1c0..d9f1c5356 100644
--- a/ext/node/polyfills/internal_binding/tcp_wrap.ts
+++ b/ext/node/polyfills/internal_binding/tcp_wrap.ts
@@ -299,8 +299,10 @@ export class TCP extends ConnectionWrap {
* @param noDelay
* @return An error status code.
*/
- setNoDelay(_noDelay: boolean): number {
- // TODO(bnoordhuis) https://github.com/denoland/deno/pull/13103
+ setNoDelay(noDelay: boolean): number {
+ if (this[kStreamBaseField] && "setNoDelay" in this[kStreamBaseField]) {
+ this[kStreamBaseField].setNoDelay(noDelay);
+ }
return 0;
}
diff --git a/ext/node/polyfills/internal_binding/uv.ts b/ext/node/polyfills/internal_binding/uv.ts
index aa468a0a5..6cd70a7e8 100644
--- a/ext/node/polyfills/internal_binding/uv.ts
+++ b/ext/node/polyfills/internal_binding/uv.ts
@@ -530,10 +530,12 @@ export function mapSysErrnoToUvErrno(sysErrno: number): number {
export const UV_EAI_MEMORY = codeMap.get("EAI_MEMORY")!;
export const UV_EBADF = codeMap.get("EBADF")!;
+export const UV_ECANCELED = codeMap.get("ECANCELED")!;
export const UV_EEXIST = codeMap.get("EEXIST");
export const UV_EINVAL = codeMap.get("EINVAL")!;
export const UV_ENOENT = codeMap.get("ENOENT");
export const UV_ENOTSOCK = codeMap.get("ENOTSOCK")!;
+export const UV_ETIMEDOUT = codeMap.get("ETIMEDOUT")!;
export const UV_UNKNOWN = codeMap.get("UNKNOWN")!;
export function errname(errno: number): string {
diff --git a/ext/node/polyfills/net.ts b/ext/node/polyfills/net.ts
index 48e1d0de8..2b0112519 100644
--- a/ext/node/polyfills/net.ts
+++ b/ext/node/polyfills/net.ts
@@ -31,6 +31,7 @@ import {
isIP,
isIPv4,
isIPv6,
+ kReinitializeHandle,
normalizedArgsSymbol,
} from "ext:deno_node/internal/net.ts";
import { Duplex } from "node:stream";
@@ -50,9 +51,11 @@ import {
ERR_SERVER_ALREADY_LISTEN,
ERR_SERVER_NOT_RUNNING,
ERR_SOCKET_CLOSED,
+ ERR_SOCKET_CONNECTION_TIMEOUT,
errnoException,
exceptionWithHostPort,
genericNodeError,
+ NodeAggregateError,
uvExceptionWithHostPort,
} from "ext:deno_node/internal/errors.ts";
import type { ErrnoException } from "ext:deno_node/internal/errors.ts";
@@ -80,6 +83,7 @@ import { Buffer } from "node:buffer";
import type { LookupOneOptions } from "ext:deno_node/internal/dns/utils.ts";
import {
validateAbortSignal,
+ validateBoolean,
validateFunction,
validateInt32,
validateNumber,
@@ -100,13 +104,25 @@ import { ShutdownWrap } from "ext:deno_node/internal_binding/stream_wrap.ts";
import { assert } from "ext:deno_node/_util/asserts.ts";
import { isWindows } from "ext:deno_node/_util/os.ts";
import { ADDRCONFIG, lookup as dnsLookup } from "node:dns";
-import { codeMap } from "ext:deno_node/internal_binding/uv.ts";
+import {
+ codeMap,
+ UV_ECANCELED,
+ UV_ETIMEDOUT,
+} from "ext:deno_node/internal_binding/uv.ts";
import { guessHandleType } from "ext:deno_node/internal_binding/util.ts";
import { debuglog } from "ext:deno_node/internal/util/debuglog.ts";
import type { DuplexOptions } from "ext:deno_node/_stream.d.ts";
import type { BufferEncoding } from "ext:deno_node/_global.d.ts";
import type { Abortable } from "ext:deno_node/_events.d.ts";
import { channel } from "node:diagnostics_channel";
+import { primordials } from "ext:core/mod.js";
+
+const {
+ ArrayPrototypeIncludes,
+ ArrayPrototypePush,
+ FunctionPrototypeBind,
+ MathMax,
+} = primordials;
let debug = debuglog("net", (fn) => {
debug = fn;
@@ -120,6 +136,9 @@ const kBytesWritten = Symbol("kBytesWritten");
const DEFAULT_IPV4_ADDR = "0.0.0.0";
const DEFAULT_IPV6_ADDR = "::";
+let autoSelectFamilyDefault = true;
+let autoSelectFamilyAttemptTimeoutDefault = 250;
+
type Handle = TCP | Pipe;
interface HandleOptions {
@@ -214,6 +233,8 @@ interface TcpSocketConnectOptions extends ConnectOptions {
hints?: number;
family?: number;
lookup?: LookupFunction;
+ autoSelectFamily?: boolean | undefined;
+ autoSelectFamilyAttemptTimeout?: number | undefined;
}
interface IpcSocketConnectOptions extends ConnectOptions {
@@ -316,12 +337,6 @@ export function _normalizeArgs(args: unknown[]): NormalizedArgs {
return arr;
}
-function _isTCPConnectWrap(
- req: TCPConnectWrap | PipeConnectWrap,
-): req is TCPConnectWrap {
- return "localAddress" in req && "localPort" in req;
-}
-
function _afterConnect(
status: number,
// deno-lint-ignore no-explicit-any
@@ -372,7 +387,7 @@ function _afterConnect(
socket.connecting = false;
let details;
- if (_isTCPConnectWrap(req)) {
+ if (req.localAddress && req.localPort) {
details = req.localAddress + ":" + req.localPort;
}
@@ -384,7 +399,7 @@ function _afterConnect(
details,
);
- if (_isTCPConnectWrap(req)) {
+ if (details) {
ex.localAddress = req.localAddress;
ex.localPort = req.localPort;
}
@@ -393,6 +408,107 @@ function _afterConnect(
}
}
+function _createConnectionError(req, status) {
+ let details;
+
+ if (req.localAddress && req.localPort) {
+ details = req.localAddress + ":" + req.localPort;
+ }
+
+ const ex = exceptionWithHostPort(
+ status,
+ "connect",
+ req.address,
+ req.port,
+ details,
+ );
+ if (details) {
+ ex.localAddress = req.localAddress;
+ ex.localPort = req.localPort;
+ }
+
+ return ex;
+}
+
+function _afterConnectMultiple(
+ context,
+ current,
+ status,
+ handle,
+ req,
+ readable,
+ writable,
+) {
+ debug(
+ "connect/multiple: connection attempt to %s:%s completed with status %s",
+ req.address,
+ req.port,
+ status,
+ );
+
+ // Make sure another connection is not spawned
+ clearTimeout(context[kTimeout]);
+
+ // One of the connection has completed and correctly dispatched but after timeout, ignore this one
+ if (status === 0 && current !== context.current - 1) {
+ debug(
+ "connect/multiple: ignoring successful but timedout connection to %s:%s",
+ req.address,
+ req.port,
+ );
+ handle.close();
+ return;
+ }
+
+ const self = context.socket;
+
+ // Some error occurred, add to the list of exceptions
+ if (status !== 0) {
+ const ex = _createConnectionError(req, status);
+ ArrayPrototypePush(context.errors, ex);
+
+ self.emit(
+ "connectionAttemptFailed",
+ req.address,
+ req.port,
+ req.addressType,
+ ex,
+ );
+
+ // Try the next address, unless we were aborted
+ if (context.socket.connecting) {
+ _internalConnectMultiple(context, status === UV_ECANCELED);
+ }
+
+ return;
+ }
+
+ _afterConnect(status, self._handle, req, readable, writable);
+}
+
+function _internalConnectMultipleTimeout(context, req, handle) {
+ debug(
+ "connect/multiple: connection to %s:%s timed out",
+ req.address,
+ req.port,
+ );
+ context.socket.emit(
+ "connectionAttemptTimeout",
+ req.address,
+ req.port,
+ req.addressType,
+ );
+
+ req.oncomplete = undefined;
+ ArrayPrototypePush(context.errors, _createConnectionError(req, UV_ETIMEDOUT));
+ handle.close();
+
+ // Try the next address, unless we were aborted
+ if (context.socket.connecting) {
+ _internalConnectMultiple(context);
+ }
+}
+
function _checkBindError(err: number, port: number, handle: TCP) {
// EADDRINUSE may not be reported until we call `listen()` or `connect()`.
// To complicate matters, a failed `bind()` followed by `listen()` or `connect()`
@@ -495,6 +611,131 @@ function _internalConnect(
}
}
+function _internalConnectMultiple(context, canceled?: boolean) {
+ clearTimeout(context[kTimeout]);
+ const self = context.socket;
+
+ // We were requested to abort. Stop all operations
+ if (self._aborted) {
+ return;
+ }
+
+ // All connections have been tried without success, destroy with error
+ if (canceled || context.current === context.addresses.length) {
+ if (context.errors.length === 0) {
+ self.destroy(new ERR_SOCKET_CONNECTION_TIMEOUT());
+ return;
+ }
+
+ self.destroy(new NodeAggregateError(context.errors));
+ return;
+ }
+
+ assert(self.connecting);
+
+ const current = context.current++;
+
+ if (current > 0) {
+ self[kReinitializeHandle](new TCP(TCPConstants.SOCKET));
+ }
+
+ const { localPort, port, flags } = context;
+ const { address, family: addressType } = context.addresses[current];
+ let localAddress;
+ let err;
+
+ if (localPort) {
+ if (addressType === 4) {
+ localAddress = DEFAULT_IPV4_ADDR;
+ err = self._handle.bind(localAddress, localPort);
+ } else { // addressType === 6
+ localAddress = DEFAULT_IPV6_ADDR;
+ err = self._handle.bind6(localAddress, localPort, flags);
+ }
+
+ debug(
+ "connect/multiple: binding to localAddress: %s and localPort: %d (addressType: %d)",
+ localAddress,
+ localPort,
+ addressType,
+ );
+
+ err = _checkBindError(err, localPort, self._handle);
+ if (err) {
+ ArrayPrototypePush(
+ context.errors,
+ exceptionWithHostPort(err, "bind", localAddress, localPort),
+ );
+ _internalConnectMultiple(context);
+ return;
+ }
+ }
+
+ debug(
+ "connect/multiple: attempting to connect to %s:%d (addressType: %d)",
+ address,
+ port,
+ addressType,
+ );
+ self.emit("connectionAttempt", address, port, addressType);
+
+ const req = new TCPConnectWrap();
+ req.oncomplete = FunctionPrototypeBind(
+ _afterConnectMultiple,
+ undefined,
+ context,
+ current,
+ );
+ req.address = address;
+ req.port = port;
+ req.localAddress = localAddress;
+ req.localPort = localPort;
+ req.addressType = addressType;
+
+ ArrayPrototypePush(
+ self.autoSelectFamilyAttemptedAddresses,
+ `${address}:${port}`,
+ );
+
+ if (addressType === 4) {
+ err = self._handle.connect(req, address, port);
+ } else {
+ err = self._handle.connect6(req, address, port);
+ }
+
+ if (err) {
+ const sockname = self._getsockname();
+ let details;
+
+ if (sockname) {
+ details = sockname.address + ":" + sockname.port;
+ }
+
+ const ex = exceptionWithHostPort(err, "connect", address, port, details);
+ ArrayPrototypePush(context.errors, ex);
+
+ self.emit("connectionAttemptFailed", address, port, addressType, ex);
+ _internalConnectMultiple(context);
+ return;
+ }
+
+ if (current < context.addresses.length - 1) {
+ debug(
+ "connect/multiple: setting the attempt timeout to %d ms",
+ context.timeout,
+ );
+
+ // If the attempt has not returned an error, start the connection timer
+ context[kTimeout] = setTimeout(
+ _internalConnectMultipleTimeout,
+ context.timeout,
+ context,
+ req,
+ self._handle,
+ );
+ }
+}
+
// Provide a better error message when we call end() as a result
// of the other side sending a FIN. The standard "write after end"
// is overly vague, and makes it seem like the user's code is to blame.
@@ -597,7 +838,7 @@ function _lookupAndConnect(
) {
const { localAddress, localPort } = options;
const host = options.host || "localhost";
- let { port } = options;
+ let { port, autoSelectFamilyAttemptTimeout, autoSelectFamily } = options;
if (localAddress && !isIP(localAddress)) {
throw new ERR_INVALID_IP_ADDRESS(localAddress);
@@ -621,6 +862,22 @@ function _lookupAndConnect(
port |= 0;
+ if (autoSelectFamily != null) {
+ validateBoolean(autoSelectFamily, "options.autoSelectFamily");
+ } else {
+ autoSelectFamily = autoSelectFamilyDefault;
+ }
+
+ if (autoSelectFamilyAttemptTimeout !== undefined) {
+ validateInt32(autoSelectFamilyAttemptTimeout);
+
+ if (autoSelectFamilyAttemptTimeout < 10) {
+ autoSelectFamilyAttemptTimeout = 10;
+ }
+ } else {
+ autoSelectFamilyAttemptTimeout = autoSelectFamilyAttemptTimeoutDefault;
+ }
+
// If host is an IP, skip performing a lookup
const addressType = isIP(host);
if (addressType) {
@@ -649,6 +906,7 @@ function _lookupAndConnect(
const dnsOpts = {
family: options.family,
hints: options.hints || 0,
+ all: false,
};
if (
@@ -665,6 +923,31 @@ function _lookupAndConnect(
self._host = host;
const lookup = options.lookup || dnsLookup;
+ if (
+ dnsOpts.family !== 4 && dnsOpts.family !== 6 && !localAddress &&
+ autoSelectFamily
+ ) {
+ debug("connect: autodetecting");
+
+ dnsOpts.all = true;
+ defaultTriggerAsyncIdScope(self[asyncIdSymbol], function () {
+ _lookupAndConnectMultiple(
+ self,
+ asyncIdSymbol,
+ lookup,
+ host,
+ options,
+ dnsOpts,
+ port,
+ localAddress,
+ localPort,
+ autoSelectFamilyAttemptTimeout,
+ );
+ });
+
+ return;
+ }
+
defaultTriggerAsyncIdScope(self[asyncIdSymbol], function () {
lookup(
host,
@@ -719,6 +1002,143 @@ function _lookupAndConnect(
});
}
+function _lookupAndConnectMultiple(
+ self: Socket,
+ asyncIdSymbol: number,
+ // deno-lint-ignore no-explicit-any
+ lookup: any,
+ host: string,
+ options: TcpSocketConnectOptions,
+ dnsopts,
+ port: number,
+ localAddress: string,
+ localPort: number,
+ timeout: number | undefined,
+) {
+ defaultTriggerAsyncIdScope(self[asyncIdSymbol], function emitLookup() {
+ lookup(host, dnsopts, function emitLookup(err, addresses) {
+ // It's possible we were destroyed while looking this up.
+ // XXX it would be great if we could cancel the promise returned by
+ // the look up.
+ if (!self.connecting) {
+ return;
+ } else if (err) {
+ self.emit("lookup", err, undefined, undefined, host);
+
+ // net.createConnection() creates a net.Socket object and immediately
+ // calls net.Socket.connect() on it (that's us). There are no event
+ // listeners registered yet so defer the error event to the next tick.
+ nextTick(_connectErrorNT, self, err);
+ return;
+ }
+
+ // Filter addresses by only keeping the one which are either IPv4 or IPV6.
+ // The first valid address determines which group has preference on the
+ // alternate family sorting which happens later.
+ const validAddresses = [[], []];
+ const validIps = [[], []];
+ let destinations;
+ for (let i = 0, l = addresses.length; i < l; i++) {
+ const address = addresses[i];
+ const { address: ip, family: addressType } = address;
+ self.emit("lookup", err, ip, addressType, host);
+ // It's possible we were destroyed while looking this up.
+ if (!self.connecting) {
+ return;
+ }
+ if (isIP(ip) && (addressType === 4 || addressType === 6)) {
+ destinations ||= addressType === 6 ? { 6: 0, 4: 1 } : { 4: 0, 6: 1 };
+
+ const destination = destinations[addressType];
+
+ // Only try an address once
+ if (!ArrayPrototypeIncludes(validIps[destination], ip)) {
+ ArrayPrototypePush(validAddresses[destination], address);
+ ArrayPrototypePush(validIps[destination], ip);
+ }
+ }
+ }
+
+ // When no AAAA or A records are available, fail on the first one
+ if (!validAddresses[0].length && !validAddresses[1].length) {
+ const { address: firstIp, family: firstAddressType } = addresses[0];
+
+ if (!isIP(firstIp)) {
+ err = new ERR_INVALID_IP_ADDRESS(firstIp);
+ nextTick(_connectErrorNT, self, err);
+ } else if (firstAddressType !== 4 && firstAddressType !== 6) {
+ err = new ERR_INVALID_ADDRESS_FAMILY(
+ firstAddressType,
+ options.host,
+ options.port,
+ );
+ nextTick(_connectErrorNT, self, err);
+ }
+
+ return;
+ }
+
+ // Sort addresses alternating families
+ const toAttempt = [];
+ for (
+ let i = 0,
+ l = MathMax(validAddresses[0].length, validAddresses[1].length);
+ i < l;
+ i++
+ ) {
+ if (i in validAddresses[0]) {
+ ArrayPrototypePush(toAttempt, validAddresses[0][i]);
+ }
+ if (i in validAddresses[1]) {
+ ArrayPrototypePush(toAttempt, validAddresses[1][i]);
+ }
+ }
+
+ if (toAttempt.length === 1) {
+ debug(
+ "connect/multiple: only one address found, switching back to single connection",
+ );
+ const { address: ip, family: addressType } = toAttempt[0];
+
+ self._unrefTimer();
+ defaultTriggerAsyncIdScope(
+ self[asyncIdSymbol],
+ _internalConnect,
+ self,
+ ip,
+ port,
+ addressType,
+ localAddress,
+ localPort,
+ );
+
+ return;
+ }
+
+ self.autoSelectFamilyAttemptedAddresses = [];
+ debug("connect/multiple: will try the following addresses", toAttempt);
+
+ const context = {
+ socket: self,
+ addresses: toAttempt,
+ current: 0,
+ port,
+ localPort,
+ timeout,
+ [kTimeout]: null,
+ errors: [],
+ };
+
+ self._unrefTimer();
+ defaultTriggerAsyncIdScope(
+ self[asyncIdSymbol],
+ _internalConnectMultiple,
+ context,
+ );
+ });
+ });
+}
+
function _afterShutdown(this: ShutdownWrap<TCP>) {
// deno-lint-ignore no-explicit-any
const self: any = this.handle[ownerSymbol];
@@ -777,6 +1197,7 @@ export class Socket extends Duplex {
_host: string | null = null;
// deno-lint-ignore no-explicit-any
_parent: any = null;
+ autoSelectFamilyAttemptedAddresses: AddressInfo[] | undefined = undefined;
constructor(options: SocketOptions | number) {
if (typeof options === "number") {
@@ -1546,6 +1967,16 @@ export class Socket extends Duplex {
set _handle(v: Handle | null) {
this[kHandle] = v;
}
+
+ // deno-lint-ignore no-explicit-any
+ [kReinitializeHandle](handle: any) {
+ this._handle?.close();
+
+ this._handle = handle;
+ this._handle[ownerSymbol] = this;
+
+ _initSocketHandle(this);
+ }
}
export const Stream = Socket;
@@ -1593,6 +2024,33 @@ export function connect(...args: unknown[]) {
export const createConnection = connect;
+/** https://docs.deno.com/api/node/net/#namespace_getdefaultautoselectfamily */
+export function getDefaultAutoSelectFamily() {
+ return autoSelectFamilyDefault;
+}
+
+/** https://docs.deno.com/api/node/net/#namespace_setdefaultautoselectfamily */
+export function setDefaultAutoSelectFamily(value: boolean) {
+ validateBoolean(value, "value");
+ autoSelectFamilyDefault = value;
+}
+
+/** https://docs.deno.com/api/node/net/#namespace_getdefaultautoselectfamilyattempttimeout */
+export function getDefaultAutoSelectFamilyAttemptTimeout() {
+ return autoSelectFamilyAttemptTimeoutDefault;
+}
+
+/** https://docs.deno.com/api/node/net/#namespace_setdefaultautoselectfamilyattempttimeout */
+export function setDefaultAutoSelectFamilyAttemptTimeout(value: number) {
+ validateInt32(value, "value", 1);
+
+ if (value < 10) {
+ value = 10;
+ }
+
+ autoSelectFamilyAttemptTimeoutDefault = value;
+}
+
export interface ListenOptions extends Abortable {
fd?: number;
port?: number | undefined;
@@ -2478,15 +2936,19 @@ export { BlockList, isIP, isIPv4, isIPv6, SocketAddress };
export default {
_createServerHandle,
_normalizeArgs,
- isIP,
- isIPv4,
- isIPv6,
BlockList,
- SocketAddress,
connect,
createConnection,
createServer,
+ getDefaultAutoSelectFamily,
+ getDefaultAutoSelectFamilyAttemptTimeout,
+ isIP,
+ isIPv4,
+ isIPv6,
Server,
+ setDefaultAutoSelectFamily,
+ setDefaultAutoSelectFamilyAttemptTimeout,
Socket,
+ SocketAddress,
Stream,
};
diff --git a/ext/node/polyfills/os.ts b/ext/node/polyfills/os.ts
index e47e8679e..edc89ed2c 100644
--- a/ext/node/polyfills/os.ts
+++ b/ext/node/polyfills/os.ts
@@ -28,16 +28,17 @@ import {
op_homedir,
op_node_os_get_priority,
op_node_os_set_priority,
- op_node_os_username,
+ op_node_os_user_info,
} from "ext:core/ops";
import { validateIntegerRange } from "ext:deno_node/_utils.ts";
import process from "node:process";
import { isWindows } from "ext:deno_node/_util/os.ts";
-import { ERR_OS_NO_HOMEDIR } from "ext:deno_node/internal/errors.ts";
import { os } from "ext:deno_node/internal_binding/constants.ts";
import { osUptime } from "ext:runtime/30_os.js";
import { Buffer } from "ext:deno_node/internal/buffer.mjs";
+import { primordials } from "ext:core/mod.js";
+const { StringPrototypeEndsWith, StringPrototypeSlice } = primordials;
export const constants = os;
@@ -136,6 +137,8 @@ export function arch(): string {
(uptime as any)[Symbol.toPrimitive] = (): number => uptime();
// deno-lint-ignore no-explicit-any
(machine as any)[Symbol.toPrimitive] = (): string => machine();
+// deno-lint-ignore no-explicit-any
+(tmpdir as any)[Symbol.toPrimitive] = (): string | null => tmpdir();
export function cpus(): CPUCoreInfo[] {
return op_cpus();
@@ -268,26 +271,27 @@ export function setPriority(pid: number, priority?: number) {
export function tmpdir(): string | null {
/* This follows the node js implementation, but has a few
differences:
- * On windows, if none of the environment variables are defined,
- we return null.
- * On unix we use a plain Deno.env.get, instead of safeGetenv,
+ * We use a plain Deno.env.get, instead of safeGetenv,
which special cases setuid binaries.
- * Node removes a single trailing / or \, we remove all.
*/
if (isWindows) {
- const temp = Deno.env.get("TEMP") || Deno.env.get("TMP");
- if (temp) {
- return temp.replace(/(?<!:)[/\\]*$/, "");
- }
- const base = Deno.env.get("SYSTEMROOT") || Deno.env.get("WINDIR");
- if (base) {
- return base + "\\temp";
+ let temp = Deno.env.get("TEMP") || Deno.env.get("TMP") ||
+ (Deno.env.get("SystemRoot") || Deno.env.get("windir")) + "\\temp";
+ if (
+ temp.length > 1 && StringPrototypeEndsWith(temp, "\\") &&
+ !StringPrototypeEndsWith(temp, ":\\")
+ ) {
+ temp = StringPrototypeSlice(temp, 0, -1);
}
- return null;
+
+ return temp;
} else { // !isWindows
- const temp = Deno.env.get("TMPDIR") || Deno.env.get("TMP") ||
+ let temp = Deno.env.get("TMPDIR") || Deno.env.get("TMP") ||
Deno.env.get("TEMP") || "/tmp";
- return temp.replace(/(?<!^)\/*$/, "");
+ if (temp.length > 1 && StringPrototypeEndsWith(temp, "/")) {
+ temp = StringPrototypeSlice(temp, 0, -1);
+ }
+ return temp;
}
}
@@ -320,7 +324,6 @@ export function uptime(): number {
return osUptime();
}
-/** Not yet implemented */
export function userInfo(
options: UserInfoOptions = { encoding: "utf-8" },
): UserInfo {
@@ -331,20 +334,10 @@ export function userInfo(
uid = -1;
gid = -1;
}
-
- // TODO(@crowlKats): figure out how to do this correctly:
- // The value of homedir returned by os.userInfo() is provided by the operating system.
- // This differs from the result of os.homedir(), which queries environment
- // variables for the home directory before falling back to the operating system response.
- let _homedir = homedir();
- if (!_homedir) {
- throw new ERR_OS_NO_HOMEDIR();
- }
- let shell = isWindows ? null : (Deno.env.get("SHELL") || null);
- let username = op_node_os_username();
+ let { username, homedir, shell } = op_node_os_user_info(uid);
if (options?.encoding === "buffer") {
- _homedir = _homedir ? Buffer.from(_homedir) : _homedir;
+ homedir = homedir ? Buffer.from(homedir) : homedir;
shell = shell ? Buffer.from(shell) : shell;
username = Buffer.from(username);
}
@@ -352,7 +345,7 @@ export function userInfo(
return {
uid,
gid,
- homedir: _homedir,
+ homedir,
shell,
username,
};
diff --git a/ext/node/polyfills/perf_hooks.ts b/ext/node/polyfills/perf_hooks.ts
index d92b925b5..ec76b3ce2 100644
--- a/ext/node/polyfills/perf_hooks.ts
+++ b/ext/node/polyfills/perf_hooks.ts
@@ -8,6 +8,7 @@ import {
performance as shimPerformance,
PerformanceEntry,
} from "ext:deno_web/15_performance.js";
+import { EldHistogram } from "ext:core/ops";
class PerformanceObserver {
static supportedEntryTypes: string[] = [];
@@ -89,10 +90,11 @@ const performance:
) => shimPerformance.dispatchEvent(...args),
};
-const monitorEventLoopDelay = () =>
- notImplemented(
- "monitorEventLoopDelay from performance",
- );
+function monitorEventLoopDelay(options = {}) {
+ const { resolution = 10 } = options;
+
+ return new EldHistogram(resolution);
+}
export default {
performance,
diff --git a/ext/node/polyfills/process.ts b/ext/node/polyfills/process.ts
index 3dc6ce61a..647376d5c 100644
--- a/ext/node/polyfills/process.ts
+++ b/ext/node/polyfills/process.ts
@@ -15,7 +15,7 @@ import {
import { warnNotImplemented } from "ext:deno_node/_utils.ts";
import { EventEmitter } from "node:events";
-import Module from "node:module";
+import Module, { getBuiltinModule } from "node:module";
import { report } from "ext:deno_node/internal/process/report.ts";
import { validateString } from "ext:deno_node/internal/validators.mjs";
import {
@@ -38,7 +38,15 @@ import {
versions,
} from "ext:deno_node/_process/process.ts";
import { _exiting } from "ext:deno_node/_process/exiting.ts";
-export { _nextTick as nextTick, chdir, cwd, env, version, versions };
+export {
+ _nextTick as nextTick,
+ chdir,
+ cwd,
+ env,
+ getBuiltinModule,
+ version,
+ versions,
+};
import {
createWritableStdioStream,
initStdin,
@@ -520,9 +528,7 @@ Process.prototype.on = function (
} else if (
event !== "SIGBREAK" && event !== "SIGINT" && Deno.build.os === "windows"
) {
- // Ignores all signals except SIGBREAK and SIGINT on windows.
- // deno-lint-ignore no-console
- console.warn(`Ignoring signal "${event}" on Windows`);
+ // TODO(#26331): Ignores all signals except SIGBREAK and SIGINT on windows.
} else {
EventEmitter.prototype.on.call(this, event, listener);
Deno.addSignalListener(event as Deno.Signal, listener);
@@ -730,6 +736,8 @@ Process.prototype.getegid = getegid;
/** This method is removed on Windows */
Process.prototype.geteuid = geteuid;
+Process.prototype.getBuiltinModule = getBuiltinModule;
+
// TODO(kt3k): Implement this when we added -e option to node compat mode
Process.prototype._eval = undefined;
@@ -911,7 +919,7 @@ Object.defineProperty(argv, "1", {
if (Deno.mainModule?.startsWith("file:")) {
return pathFromURL(new URL(Deno.mainModule));
} else {
- return join(Deno.cwd(), "$deno$node.js");
+ return join(Deno.cwd(), "$deno$node.mjs");
}
},
});
diff --git a/ext/node/polyfills/timers.ts b/ext/node/polyfills/timers.ts
index 02f69466e..e826416ed 100644
--- a/ext/node/polyfills/timers.ts
+++ b/ext/node/polyfills/timers.ts
@@ -15,10 +15,16 @@ import {
setUnrefTimeout,
Timeout,
} from "ext:deno_node/internal/timers.mjs";
-import { validateFunction } from "ext:deno_node/internal/validators.mjs";
+import {
+ validateAbortSignal,
+ validateBoolean,
+ validateFunction,
+ validateObject,
+} from "ext:deno_node/internal/validators.mjs";
import { promisify } from "ext:deno_node/internal/util.mjs";
export { setUnrefTimeout } from "ext:deno_node/internal/timers.mjs";
import * as timers from "ext:deno_web/02_timers.js";
+import { AbortError } from "ext:deno_node/internal/errors.ts";
const clearTimeout_ = timers.clearTimeout;
const clearInterval_ = timers.clearInterval;
@@ -89,10 +95,88 @@ export function clearImmediate(immediate: Immediate) {
clearTimeout_(immediate._immediateId);
}
+async function* setIntervalAsync(
+ after: number,
+ value: number,
+ options: { signal?: AbortSignal; ref?: boolean } = { __proto__: null },
+) {
+ validateObject(options, "options");
+
+ if (typeof options?.signal !== "undefined") {
+ validateAbortSignal(options.signal, "options.signal");
+ }
+
+ if (typeof options?.ref !== "undefined") {
+ validateBoolean(options.ref, "options.ref");
+ }
+
+ const { signal, ref = true } = options;
+
+ if (signal?.aborted) {
+ throw new AbortError(undefined, { cause: signal?.reason });
+ }
+
+ let onCancel: (() => void) | undefined = undefined;
+ let interval: Timeout | undefined = undefined;
+ try {
+ let notYielded = 0;
+ let callback: ((value?: object) => void) | undefined = undefined;
+ let rejectCallback: ((message?: string) => void) | undefined = undefined;
+ interval = new Timeout(
+ () => {
+ notYielded++;
+ if (callback) {
+ callback();
+ callback = undefined;
+ rejectCallback = undefined;
+ }
+ },
+ after,
+ [],
+ true,
+ ref,
+ );
+ if (signal) {
+ onCancel = () => {
+ clearInterval(interval);
+ if (rejectCallback) {
+ rejectCallback(signal.reason);
+ callback = undefined;
+ rejectCallback = undefined;
+ }
+ };
+ signal.addEventListener("abort", onCancel, { once: true });
+ }
+ while (!signal?.aborted) {
+ if (notYielded === 0) {
+ await new Promise((resolve: () => void, reject: () => void) => {
+ callback = resolve;
+ rejectCallback = reject;
+ });
+ }
+ for (; notYielded > 0; notYielded--) {
+ yield value;
+ }
+ }
+ } catch (error) {
+ if (signal?.aborted) {
+ throw new AbortError(undefined, { cause: signal?.reason });
+ }
+ throw error;
+ } finally {
+ if (interval) {
+ clearInterval(interval);
+ }
+ if (onCancel) {
+ signal?.removeEventListener("abort", onCancel);
+ }
+ }
+}
+
export const promises = {
setTimeout: promisify(setTimeout),
setImmediate: promisify(setImmediate),
- setInterval: promisify(setInterval),
+ setInterval: setIntervalAsync,
};
promises.scheduler = {
diff --git a/ext/node/polyfills/vm.js b/ext/node/polyfills/vm.js
index 183ddad2f..b64c847c5 100644
--- a/ext/node/polyfills/vm.js
+++ b/ext/node/polyfills/vm.js
@@ -182,6 +182,7 @@ function getContextOptions(options) {
let defaultContextNameIndex = 1;
export function createContext(
+ // deno-lint-ignore prefer-primordials
contextObject = {},
options = { __proto__: null },
) {
diff --git a/ext/node/polyfills/zlib.ts b/ext/node/polyfills/zlib.ts
index 3fe5f8bbd..6e5d02b5b 100644
--- a/ext/node/polyfills/zlib.ts
+++ b/ext/node/polyfills/zlib.ts
@@ -40,6 +40,58 @@ import {
createBrotliCompress,
createBrotliDecompress,
} from "ext:deno_node/_brotli.js";
+import { ERR_INVALID_ARG_TYPE } from "ext:deno_node/internal/errors.ts";
+import { validateUint32 } from "ext:deno_node/internal/validators.mjs";
+import { op_zlib_crc32 } from "ext:core/ops";
+import { core, primordials } from "ext:core/mod.js";
+import { TextEncoder } from "ext:deno_web/08_text_encoding.js";
+const {
+ Uint8Array,
+ TypedArrayPrototypeGetBuffer,
+ TypedArrayPrototypeGetByteLength,
+ TypedArrayPrototypeGetByteOffset,
+ DataViewPrototypeGetBuffer,
+ DataViewPrototypeGetByteLength,
+ DataViewPrototypeGetByteOffset,
+} = primordials;
+const { isTypedArray, isDataView } = core;
+
+const enc = new TextEncoder();
+const toU8 = (input) => {
+ if (typeof input === "string") {
+ return enc.encode(input);
+ }
+
+ if (isTypedArray(input)) {
+ return new Uint8Array(
+ TypedArrayPrototypeGetBuffer(input),
+ TypedArrayPrototypeGetByteOffset(input),
+ TypedArrayPrototypeGetByteLength(input),
+ );
+ } else if (isDataView(input)) {
+ return new Uint8Array(
+ DataViewPrototypeGetBuffer(input),
+ DataViewPrototypeGetByteOffset(input),
+ DataViewPrototypeGetByteLength(input),
+ );
+ }
+
+ return input;
+};
+
+export function crc32(data, value = 0) {
+ if (typeof data !== "string" && !isArrayBufferView(data)) {
+ throw new ERR_INVALID_ARG_TYPE("data", [
+ "Buffer",
+ "TypedArray",
+ "DataView",
+ "string",
+ ], data);
+ }
+ validateUint32(value, "value");
+
+ return op_zlib_crc32(toU8(data), value);
+}
export class Options {
constructor() {
@@ -87,6 +139,7 @@ export default {
BrotliOptions,
codes,
constants,
+ crc32,
createBrotliCompress,
createBrotliDecompress,
createDeflate,
diff --git a/ext/tls/Cargo.toml b/ext/tls/Cargo.toml
index 9f7bffe67..943fc8413 100644
--- a/ext/tls/Cargo.toml
+++ b/ext/tls/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_tls"
-version = "0.158.0"
+version = "0.164.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
diff --git a/ext/url/Cargo.toml b/ext/url/Cargo.toml
index 13aca9953..557a4669e 100644
--- a/ext/url/Cargo.toml
+++ b/ext/url/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_url"
-version = "0.171.0"
+version = "0.177.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -15,6 +15,7 @@ path = "lib.rs"
[dependencies]
deno_core.workspace = true
+thiserror.workspace = true
urlpattern = "0.3.0"
[dev-dependencies]
diff --git a/ext/url/lib.rs b/ext/url/lib.rs
index 6869d656b..f8946532a 100644
--- a/ext/url/lib.rs
+++ b/ext/url/lib.rs
@@ -15,6 +15,8 @@ use std::path::PathBuf;
use crate::urlpattern::op_urlpattern_parse;
use crate::urlpattern::op_urlpattern_process_match_input;
+pub use urlpattern::UrlPatternError;
+
deno_core::extension!(
deno_url,
deps = [deno_webidl],
diff --git a/ext/url/urlpattern.rs b/ext/url/urlpattern.rs
index b6d9a1382..7d4e8ee71 100644
--- a/ext/url/urlpattern.rs
+++ b/ext/url/urlpattern.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use urlpattern::quirks;
@@ -9,21 +7,23 @@ use urlpattern::quirks::MatchInput;
use urlpattern::quirks::StringOrInit;
use urlpattern::quirks::UrlPattern;
+#[derive(Debug, thiserror::Error)]
+#[error(transparent)]
+pub struct UrlPatternError(urlpattern::Error);
+
#[op2]
#[serde]
pub fn op_urlpattern_parse(
#[serde] input: StringOrInit,
#[string] base_url: Option<String>,
#[serde] options: urlpattern::UrlPatternOptions,
-) -> Result<UrlPattern, AnyError> {
- let init = urlpattern::quirks::process_construct_pattern_input(
- input,
- base_url.as_deref(),
- )
- .map_err(|e| type_error(e.to_string()))?;
+) -> Result<UrlPattern, UrlPatternError> {
+ let init =
+ quirks::process_construct_pattern_input(input, base_url.as_deref())
+ .map_err(UrlPatternError)?;
- let pattern = urlpattern::quirks::parse_pattern(init, options)
- .map_err(|e| type_error(e.to_string()))?;
+ let pattern =
+ quirks::parse_pattern(init, options).map_err(UrlPatternError)?;
Ok(pattern)
}
@@ -33,14 +33,14 @@ pub fn op_urlpattern_parse(
pub fn op_urlpattern_process_match_input(
#[serde] input: StringOrInit,
#[string] base_url: Option<String>,
-) -> Result<Option<(MatchInput, quirks::Inputs)>, AnyError> {
- let res = urlpattern::quirks::process_match_input(input, base_url.as_deref())
- .map_err(|e| type_error(e.to_string()))?;
+) -> Result<Option<(MatchInput, quirks::Inputs)>, UrlPatternError> {
+ let res = quirks::process_match_input(input, base_url.as_deref())
+ .map_err(UrlPatternError)?;
let (input, inputs) = match res {
Some((input, inputs)) => (input, inputs),
None => return Ok(None),
};
- Ok(urlpattern::quirks::parse_match_input(input).map(|input| (input, inputs)))
+ Ok(quirks::parse_match_input(input).map(|input| (input, inputs)))
}
diff --git a/ext/web/02_timers.js b/ext/web/02_timers.js
index 89acaca42..6058febd5 100644
--- a/ext/web/02_timers.js
+++ b/ext/web/02_timers.js
@@ -1,12 +1,9 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
import { core, primordials } from "ext:core/mod.js";
-import { op_defer, op_now } from "ext:core/ops";
+import { op_defer } from "ext:core/ops";
const {
- Uint8Array,
- Uint32Array,
PromisePrototypeThen,
- TypedArrayPrototypeGetBuffer,
TypeError,
indirectEval,
ReflectApply,
@@ -18,13 +15,6 @@ const {
import * as webidl from "ext:deno_webidl/00_webidl.js";
-const hrU8 = new Uint8Array(8);
-const hr = new Uint32Array(TypedArrayPrototypeGetBuffer(hrU8));
-function opNow() {
- op_now(hrU8);
- return (hr[0] * 1000 + hr[1] / 1e6);
-}
-
// ---------------------------------------------------------------------------
function checkThis(thisArg) {
@@ -151,7 +141,6 @@ export {
clearInterval,
clearTimeout,
defer,
- opNow,
refTimer,
setImmediate,
setInterval,
diff --git a/ext/web/06_streams.js b/ext/web/06_streams.js
index a4f2275c5..f29e5f204 100644
--- a/ext/web/06_streams.js
+++ b/ext/web/06_streams.js
@@ -70,7 +70,6 @@ const {
String,
Symbol,
SymbolAsyncIterator,
- SymbolIterator,
SymbolFor,
TypeError,
TypedArrayPrototypeGetBuffer,
@@ -5084,34 +5083,6 @@ function initializeCountSizeFunction(globalObject) {
WeakMapPrototypeSet(countSizeFunctionWeakMap, globalObject, size);
}
-// Ref: https://tc39.es/ecma262/#sec-getiterator
-function getAsyncOrSyncIterator(obj) {
- let iterator;
- if (obj[SymbolAsyncIterator] != null) {
- iterator = obj[SymbolAsyncIterator]();
- if (!isObject(iterator)) {
- throw new TypeError(
- "[Symbol.asyncIterator] returned a non-object value",
- );
- }
- } else if (obj[SymbolIterator] != null) {
- iterator = obj[SymbolIterator]();
- if (!isObject(iterator)) {
- throw new TypeError("[Symbol.iterator] returned a non-object value");
- }
- } else {
- throw new TypeError("No iterator found");
- }
- if (typeof iterator.next !== "function") {
- throw new TypeError("iterator.next is not a function");
- }
- return iterator;
-}
-
-function isObject(x) {
- return (typeof x === "object" && x != null) || typeof x === "function";
-}
-
const _resourceBacking = Symbol("[[resourceBacking]]");
// This distinction exists to prevent unrefable streams being used in
// regular fast streams that are unaware of refability
@@ -5197,21 +5168,22 @@ class ReadableStream {
}
static from(asyncIterable) {
+ const prefix = "Failed to execute 'ReadableStream.from'";
webidl.requiredArguments(
arguments.length,
1,
- "Failed to execute 'ReadableStream.from'",
+ prefix,
);
- asyncIterable = webidl.converters.any(asyncIterable);
-
- const iterator = getAsyncOrSyncIterator(asyncIterable);
+ asyncIterable = webidl.converters["async iterable<any>"](
+ asyncIterable,
+ prefix,
+ "Argument 1",
+ );
+ const iter = asyncIterable.open();
const stream = createReadableStream(noop, async () => {
// deno-lint-ignore prefer-primordials
- const res = await iterator.next();
- if (!isObject(res)) {
- throw new TypeError("iterator.next value is not an object");
- }
+ const res = await iter.next();
if (res.done) {
readableStreamDefaultControllerClose(stream[_controller]);
} else {
@@ -5221,17 +5193,8 @@ class ReadableStream {
);
}
}, async (reason) => {
- if (iterator.return == null) {
- return undefined;
- } else {
- // deno-lint-ignore prefer-primordials
- const res = await iterator.return(reason);
- if (!isObject(res)) {
- throw new TypeError("iterator.return value is not an object");
- } else {
- return undefined;
- }
- }
+ // deno-lint-ignore prefer-primordials
+ await iter.return(reason);
}, 0);
return stream;
}
@@ -6892,6 +6855,10 @@ webidl.converters.StreamPipeOptions = webidl
{ key: "signal", converter: webidl.converters.AbortSignal },
]);
+webidl.converters["async iterable<any>"] = webidl.createAsyncIterableConverter(
+ webidl.converters.any,
+);
+
internals.resourceForReadableStream = resourceForReadableStream;
export {
diff --git a/ext/web/15_performance.js b/ext/web/15_performance.js
index ea5557278..9e0e310a5 100644
--- a/ext/web/15_performance.js
+++ b/ext/web/15_performance.js
@@ -1,6 +1,7 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
import { primordials } from "ext:core/mod.js";
+import { op_now, op_time_origin } from "ext:core/ops";
const {
ArrayPrototypeFilter,
ArrayPrototypePush,
@@ -10,19 +11,34 @@ const {
Symbol,
SymbolFor,
TypeError,
+ TypedArrayPrototypeGetBuffer,
+ Uint8Array,
+ Uint32Array,
} = primordials;
import * as webidl from "ext:deno_webidl/00_webidl.js";
import { structuredClone } from "./02_structured_clone.js";
import { createFilteredInspectProxy } from "ext:deno_console/01_console.js";
import { EventTarget } from "./02_event.js";
-import { opNow } from "./02_timers.js";
import { DOMException } from "./01_dom_exception.js";
const illegalConstructorKey = Symbol("illegalConstructorKey");
let performanceEntries = [];
let timeOrigin;
+const hrU8 = new Uint8Array(8);
+const hr = new Uint32Array(TypedArrayPrototypeGetBuffer(hrU8));
+
+function setTimeOrigin() {
+ op_time_origin(hrU8);
+ timeOrigin = hr[0] * 1000 + hr[1] / 1e6;
+}
+
+function now() {
+ op_now(hrU8);
+ return hr[0] * 1000 + hr[1] / 1e6;
+}
+
webidl.converters["PerformanceMarkOptions"] = webidl
.createDictionaryConverter(
"PerformanceMarkOptions",
@@ -90,10 +106,6 @@ webidl.converters["DOMString or PerformanceMeasureOptions"] = (
return webidl.converters.DOMString(V, prefix, context, opts);
};
-function setTimeOrigin(origin) {
- timeOrigin = origin;
-}
-
function findMostRecent(
name,
type,
@@ -135,8 +147,6 @@ function filterByNameType(
);
}
-const now = opNow;
-
const _name = Symbol("[[name]]");
const _entryType = Symbol("[[entryType]]");
const _startTime = Symbol("[[startTime]]");
diff --git a/ext/web/Cargo.toml b/ext/web/Cargo.toml
index 4120e978e..db28d0e57 100644
--- a/ext/web/Cargo.toml
+++ b/ext/web/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_web"
-version = "0.202.0"
+version = "0.208.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -23,6 +23,7 @@ encoding_rs.workspace = true
flate2 = { workspace = true, features = ["default"] }
futures.workspace = true
serde = "1.0.149"
+thiserror.workspace = true
tokio.workspace = true
uuid = { workspace = true, features = ["serde"] }
diff --git a/ext/web/blob.rs b/ext/web/blob.rs
index 392f36acb..bc64a0f27 100644
--- a/ext/web/blob.rs
+++ b/ext/web/blob.rs
@@ -7,8 +7,6 @@ use std::rc::Rc;
use std::sync::Arc;
use async_trait::async_trait;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::parking_lot::Mutex;
use deno_core::url::Url;
@@ -19,6 +17,18 @@ use serde::Deserialize;
use serde::Serialize;
use uuid::Uuid;
+#[derive(Debug, thiserror::Error)]
+pub enum BlobError {
+ #[error("Blob part not found")]
+ BlobPartNotFound,
+ #[error("start + len can not be larger than blob part size")]
+ SizeLargerThanBlobPart,
+ #[error("Blob URLs are not supported in this context")]
+ BlobURLsNotSupported,
+ #[error(transparent)]
+ Url(#[from] deno_core::url::ParseError),
+}
+
use crate::Location;
pub type PartMap = HashMap<Uuid, Arc<dyn BlobPart + Send + Sync>>;
@@ -96,18 +106,18 @@ pub struct Blob {
impl Blob {
// TODO(lucacsonato): this should be a stream!
- pub async fn read_all(&self) -> Result<Vec<u8>, AnyError> {
+ pub async fn read_all(&self) -> Vec<u8> {
let size = self.size();
let mut bytes = Vec::with_capacity(size);
for part in &self.parts {
- let chunk = part.read().await?;
+ let chunk = part.read().await;
bytes.extend_from_slice(chunk);
}
assert_eq!(bytes.len(), size);
- Ok(bytes)
+ bytes
}
fn size(&self) -> usize {
@@ -122,7 +132,7 @@ impl Blob {
#[async_trait]
pub trait BlobPart: Debug {
// TODO(lucacsonato): this should be a stream!
- async fn read(&self) -> Result<&[u8], AnyError>;
+ async fn read(&self) -> &[u8];
fn size(&self) -> usize;
}
@@ -137,8 +147,8 @@ impl From<Vec<u8>> for InMemoryBlobPart {
#[async_trait]
impl BlobPart for InMemoryBlobPart {
- async fn read(&self) -> Result<&[u8], AnyError> {
- Ok(&self.0)
+ async fn read(&self) -> &[u8] {
+ &self.0
}
fn size(&self) -> usize {
@@ -155,9 +165,9 @@ pub struct SlicedBlobPart {
#[async_trait]
impl BlobPart for SlicedBlobPart {
- async fn read(&self) -> Result<&[u8], AnyError> {
- let original = self.part.read().await?;
- Ok(&original[self.start..self.start + self.len])
+ async fn read(&self) -> &[u8] {
+ let original = self.part.read().await;
+ &original[self.start..self.start + self.len]
}
fn size(&self) -> usize {
@@ -189,19 +199,17 @@ pub fn op_blob_slice_part(
state: &mut OpState,
#[serde] id: Uuid,
#[serde] options: SliceOptions,
-) -> Result<Uuid, AnyError> {
+) -> Result<Uuid, BlobError> {
let blob_store = state.borrow::<Arc<BlobStore>>();
let part = blob_store
.get_part(&id)
- .ok_or_else(|| type_error("Blob part not found"))?;
+ .ok_or(BlobError::BlobPartNotFound)?;
let SliceOptions { start, len } = options;
let size = part.size();
if start + len > size {
- return Err(type_error(
- "start + len can not be larger than blob part size",
- ));
+ return Err(BlobError::SizeLargerThanBlobPart);
}
let sliced_part = SlicedBlobPart { part, start, len };
@@ -215,14 +223,14 @@ pub fn op_blob_slice_part(
pub async fn op_blob_read_part(
state: Rc<RefCell<OpState>>,
#[serde] id: Uuid,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, BlobError> {
let part = {
let state = state.borrow();
let blob_store = state.borrow::<Arc<BlobStore>>();
blob_store.get_part(&id)
}
- .ok_or_else(|| type_error("Blob part not found"))?;
- let buf = part.read().await?;
+ .ok_or(BlobError::BlobPartNotFound)?;
+ let buf = part.read().await;
Ok(ToJsBuffer::from(buf.to_vec()))
}
@@ -238,13 +246,13 @@ pub fn op_blob_create_object_url(
state: &mut OpState,
#[string] media_type: String,
#[serde] part_ids: Vec<Uuid>,
-) -> Result<String, AnyError> {
+) -> Result<String, BlobError> {
let mut parts = Vec::with_capacity(part_ids.len());
let blob_store = state.borrow::<Arc<BlobStore>>();
for part_id in part_ids {
let part = blob_store
.get_part(&part_id)
- .ok_or_else(|| type_error("Blob part not found"))?;
+ .ok_or(BlobError::BlobPartNotFound)?;
parts.push(part);
}
@@ -263,7 +271,7 @@ pub fn op_blob_create_object_url(
pub fn op_blob_revoke_object_url(
state: &mut OpState,
#[string] url: &str,
-) -> Result<(), AnyError> {
+) -> Result<(), BlobError> {
let url = Url::parse(url)?;
let blob_store = state.borrow::<Arc<BlobStore>>();
blob_store.remove_object_url(&url);
@@ -287,15 +295,15 @@ pub struct ReturnBlobPart {
pub fn op_blob_from_object_url(
state: &mut OpState,
#[string] url: String,
-) -> Result<Option<ReturnBlob>, AnyError> {
+) -> Result<Option<ReturnBlob>, BlobError> {
let url = Url::parse(&url)?;
if url.scheme() != "blob" {
return Ok(None);
}
- let blob_store = state.try_borrow::<Arc<BlobStore>>().ok_or_else(|| {
- type_error("Blob URLs are not supported in this context.")
- })?;
+ let blob_store = state
+ .try_borrow::<Arc<BlobStore>>()
+ .ok_or(BlobError::BlobURLsNotSupported)?;
if let Some(blob) = blob_store.get_object_url(url) {
let parts = blob
.parts
diff --git a/ext/web/compression.rs b/ext/web/compression.rs
index b9ae12ef1..696700991 100644
--- a/ext/web/compression.rs
+++ b/ext/web/compression.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use flate2::write::DeflateDecoder;
use flate2::write::DeflateEncoder;
@@ -13,6 +11,18 @@ use flate2::Compression;
use std::cell::RefCell;
use std::io::Write;
+#[derive(Debug, thiserror::Error)]
+pub enum CompressionError {
+ #[error("Unsupported format")]
+ UnsupportedFormat,
+ #[error("resource is closed")]
+ ResourceClosed,
+ #[error(transparent)]
+ IoTypeError(std::io::Error),
+ #[error(transparent)]
+ Io(std::io::Error),
+}
+
#[derive(Debug)]
struct CompressionResource(RefCell<Option<Inner>>);
@@ -34,7 +44,7 @@ enum Inner {
pub fn op_compression_new(
#[string] format: &str,
is_decoder: bool,
-) -> Result<CompressionResource, AnyError> {
+) -> Result<CompressionResource, CompressionError> {
let w = Vec::new();
let inner = match (format, is_decoder) {
("deflate", true) => Inner::DeflateDecoder(ZlibDecoder::new(w)),
@@ -49,7 +59,7 @@ pub fn op_compression_new(
("gzip", false) => {
Inner::GzEncoder(GzEncoder::new(w, Compression::default()))
}
- _ => return Err(type_error("Unsupported format")),
+ _ => return Err(CompressionError::UnsupportedFormat),
};
Ok(CompressionResource(RefCell::new(Some(inner))))
}
@@ -59,40 +69,38 @@ pub fn op_compression_new(
pub fn op_compression_write(
#[cppgc] resource: &CompressionResource,
#[anybuffer] input: &[u8],
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, CompressionError> {
let mut inner = resource.0.borrow_mut();
- let inner = inner
- .as_mut()
- .ok_or_else(|| type_error("resource is closed"))?;
+ let inner = inner.as_mut().ok_or(CompressionError::ResourceClosed)?;
let out: Vec<u8> = match &mut *inner {
Inner::DeflateDecoder(d) => {
- d.write_all(input).map_err(|e| type_error(e.to_string()))?;
- d.flush()?;
+ d.write_all(input).map_err(CompressionError::IoTypeError)?;
+ d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
Inner::DeflateEncoder(d) => {
- d.write_all(input).map_err(|e| type_error(e.to_string()))?;
- d.flush()?;
+ d.write_all(input).map_err(CompressionError::IoTypeError)?;
+ d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
Inner::DeflateRawDecoder(d) => {
- d.write_all(input).map_err(|e| type_error(e.to_string()))?;
- d.flush()?;
+ d.write_all(input).map_err(CompressionError::IoTypeError)?;
+ d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
Inner::DeflateRawEncoder(d) => {
- d.write_all(input).map_err(|e| type_error(e.to_string()))?;
- d.flush()?;
+ d.write_all(input).map_err(CompressionError::IoTypeError)?;
+ d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
Inner::GzDecoder(d) => {
- d.write_all(input).map_err(|e| type_error(e.to_string()))?;
- d.flush()?;
+ d.write_all(input).map_err(CompressionError::IoTypeError)?;
+ d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
Inner::GzEncoder(d) => {
- d.write_all(input).map_err(|e| type_error(e.to_string()))?;
- d.flush()?;
+ d.write_all(input).map_err(CompressionError::IoTypeError)?;
+ d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
}
@@ -105,27 +113,27 @@ pub fn op_compression_write(
pub fn op_compression_finish(
#[cppgc] resource: &CompressionResource,
report_errors: bool,
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, CompressionError> {
let inner = resource
.0
.borrow_mut()
.take()
- .ok_or_else(|| type_error("resource is closed"))?;
+ .ok_or(CompressionError::ResourceClosed)?;
let out = match inner {
Inner::DeflateDecoder(d) => {
- d.finish().map_err(|e| type_error(e.to_string()))
+ d.finish().map_err(CompressionError::IoTypeError)
}
Inner::DeflateEncoder(d) => {
- d.finish().map_err(|e| type_error(e.to_string()))
+ d.finish().map_err(CompressionError::IoTypeError)
}
Inner::DeflateRawDecoder(d) => {
- d.finish().map_err(|e| type_error(e.to_string()))
+ d.finish().map_err(CompressionError::IoTypeError)
}
Inner::DeflateRawEncoder(d) => {
- d.finish().map_err(|e| type_error(e.to_string()))
+ d.finish().map_err(CompressionError::IoTypeError)
}
- Inner::GzDecoder(d) => d.finish().map_err(|e| type_error(e.to_string())),
- Inner::GzEncoder(d) => d.finish().map_err(|e| type_error(e.to_string())),
+ Inner::GzDecoder(d) => d.finish().map_err(CompressionError::IoTypeError),
+ Inner::GzEncoder(d) => d.finish().map_err(CompressionError::IoTypeError),
};
match out {
Err(err) => {
diff --git a/ext/web/lib.rs b/ext/web/lib.rs
index 3977379a5..af0fc2c27 100644
--- a/ext/web/lib.rs
+++ b/ext/web/lib.rs
@@ -6,9 +6,6 @@ mod message_port;
mod stream_resource;
mod timers;
-use deno_core::error::range_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::url::Url;
use deno_core::v8;
@@ -22,10 +19,14 @@ use encoding_rs::DecoderResult;
use encoding_rs::Encoding;
use std::borrow::Cow;
use std::cell::RefCell;
-use std::fmt;
use std::path::PathBuf;
use std::sync::Arc;
+pub use blob::BlobError;
+pub use compression::CompressionError;
+pub use message_port::MessagePortError;
+pub use stream_resource::StreamResourceError;
+
use crate::blob::op_blob_create_object_url;
use crate::blob::op_blob_create_part;
use crate::blob::op_blob_from_object_url;
@@ -51,7 +52,8 @@ pub use crate::message_port::Transferable;
use crate::timers::op_defer;
use crate::timers::op_now;
-use crate::timers::StartTime;
+use crate::timers::op_time_origin;
+pub use crate::timers::StartTime;
pub use crate::timers::TimersPermission;
deno_core::extension!(deno_web,
@@ -83,6 +85,7 @@ deno_core::extension!(deno_web,
compression::op_compression_write,
compression::op_compression_finish,
op_now<P>,
+ op_time_origin<P>,
op_defer,
stream_resource::op_readable_stream_resource_allocate,
stream_resource::op_readable_stream_resource_allocate_sized,
@@ -122,13 +125,31 @@ deno_core::extension!(deno_web,
if let Some(location) = options.maybe_location {
state.put(Location(location));
}
- state.put(StartTime::now());
+ state.put(StartTime::default());
}
);
+#[derive(Debug, thiserror::Error)]
+pub enum WebError {
+ #[error("Failed to decode base64")]
+ Base64Decode,
+ #[error("The encoding label provided ('{0}') is invalid.")]
+ InvalidEncodingLabel(String),
+ #[error("buffer exceeds maximum length")]
+ BufferTooLong,
+ #[error("Value too large to decode")]
+ ValueTooLarge,
+ #[error("Provided buffer too small")]
+ BufferTooSmall,
+ #[error("The encoded data is not valid")]
+ DataInvalid,
+ #[error(transparent)]
+ DataError(#[from] v8::DataError),
+}
+
#[op2]
#[serde]
-fn op_base64_decode(#[string] input: String) -> Result<ToJsBuffer, AnyError> {
+fn op_base64_decode(#[string] input: String) -> Result<ToJsBuffer, WebError> {
let mut s = input.into_bytes();
let decoded_len = forgiving_base64_decode_inplace(&mut s)?;
s.truncate(decoded_len);
@@ -137,7 +158,7 @@ fn op_base64_decode(#[string] input: String) -> Result<ToJsBuffer, AnyError> {
#[op2]
#[serde]
-fn op_base64_atob(#[serde] mut s: ByteString) -> Result<ByteString, AnyError> {
+fn op_base64_atob(#[serde] mut s: ByteString) -> Result<ByteString, WebError> {
let decoded_len = forgiving_base64_decode_inplace(&mut s)?;
s.truncate(decoded_len);
Ok(s)
@@ -147,11 +168,9 @@ fn op_base64_atob(#[serde] mut s: ByteString) -> Result<ByteString, AnyError> {
#[inline]
fn forgiving_base64_decode_inplace(
input: &mut [u8],
-) -> Result<usize, AnyError> {
- let error =
- || DomExceptionInvalidCharacterError::new("Failed to decode base64");
- let decoded =
- base64_simd::forgiving_decode_inplace(input).map_err(|_| error())?;
+) -> Result<usize, WebError> {
+ let decoded = base64_simd::forgiving_decode_inplace(input)
+ .map_err(|_| WebError::Base64Decode)?;
Ok(decoded.len())
}
@@ -177,13 +196,9 @@ fn forgiving_base64_encode(s: &[u8]) -> String {
#[string]
fn op_encoding_normalize_label(
#[string] label: String,
-) -> Result<String, AnyError> {
+) -> Result<String, WebError> {
let encoding = Encoding::for_label_no_replacement(label.as_bytes())
- .ok_or_else(|| {
- range_error(format!(
- "The encoding label provided ('{label}') is invalid."
- ))
- })?;
+ .ok_or(WebError::InvalidEncodingLabel(label))?;
Ok(encoding.name().to_lowercase())
}
@@ -192,7 +207,7 @@ fn op_encoding_decode_utf8<'a>(
scope: &mut v8::HandleScope<'a>,
#[anybuffer] zero_copy: &[u8],
ignore_bom: bool,
-) -> Result<v8::Local<'a, v8::String>, AnyError> {
+) -> Result<v8::Local<'a, v8::String>, WebError> {
let buf = &zero_copy;
let buf = if !ignore_bom
@@ -216,7 +231,7 @@ fn op_encoding_decode_utf8<'a>(
// - https://github.com/v8/v8/blob/d68fb4733e39525f9ff0a9222107c02c28096e2a/include/v8.h#L3277-L3278
match v8::String::new_from_utf8(scope, buf, v8::NewStringType::Normal) {
Some(text) => Ok(text),
- None => Err(type_error("buffer exceeds maximum length")),
+ None => Err(WebError::BufferTooLong),
}
}
@@ -227,12 +242,9 @@ fn op_encoding_decode_single(
#[string] label: String,
fatal: bool,
ignore_bom: bool,
-) -> Result<U16String, AnyError> {
- let encoding = Encoding::for_label(label.as_bytes()).ok_or_else(|| {
- range_error(format!(
- "The encoding label provided ('{label}') is invalid."
- ))
- })?;
+) -> Result<U16String, WebError> {
+ let encoding = Encoding::for_label(label.as_bytes())
+ .ok_or(WebError::InvalidEncodingLabel(label))?;
let mut decoder = if ignore_bom {
encoding.new_decoder_without_bom_handling()
@@ -242,7 +254,7 @@ fn op_encoding_decode_single(
let max_buffer_length = decoder
.max_utf16_buffer_length(data.len())
- .ok_or_else(|| range_error("Value too large to decode."))?;
+ .ok_or(WebError::ValueTooLarge)?;
let mut output = vec![0; max_buffer_length];
@@ -254,12 +266,8 @@ fn op_encoding_decode_single(
output.truncate(written);
Ok(output.into())
}
- DecoderResult::OutputFull => {
- Err(range_error("Provided buffer too small."))
- }
- DecoderResult::Malformed(_, _) => {
- Err(type_error("The encoded data is not valid."))
- }
+ DecoderResult::OutputFull => Err(WebError::BufferTooSmall),
+ DecoderResult::Malformed(_, _) => Err(WebError::DataInvalid),
}
} else {
let (result, _, written, _) =
@@ -269,7 +277,7 @@ fn op_encoding_decode_single(
output.truncate(written);
Ok(output.into())
}
- CoderResult::OutputFull => Err(range_error("Provided buffer too small.")),
+ CoderResult::OutputFull => Err(WebError::BufferTooSmall),
}
}
}
@@ -280,12 +288,9 @@ fn op_encoding_new_decoder(
#[string] label: &str,
fatal: bool,
ignore_bom: bool,
-) -> Result<TextDecoderResource, AnyError> {
- let encoding = Encoding::for_label(label.as_bytes()).ok_or_else(|| {
- range_error(format!(
- "The encoding label provided ('{label}') is invalid."
- ))
- })?;
+) -> Result<TextDecoderResource, WebError> {
+ let encoding = Encoding::for_label(label.as_bytes())
+ .ok_or_else(|| WebError::InvalidEncodingLabel(label.to_string()))?;
let decoder = if ignore_bom {
encoding.new_decoder_without_bom_handling()
@@ -305,13 +310,13 @@ fn op_encoding_decode(
#[anybuffer] data: &[u8],
#[cppgc] resource: &TextDecoderResource,
stream: bool,
-) -> Result<U16String, AnyError> {
+) -> Result<U16String, WebError> {
let mut decoder = resource.decoder.borrow_mut();
let fatal = resource.fatal;
let max_buffer_length = decoder
.max_utf16_buffer_length(data.len())
- .ok_or_else(|| range_error("Value too large to decode."))?;
+ .ok_or(WebError::ValueTooLarge)?;
let mut output = vec![0; max_buffer_length];
@@ -323,12 +328,8 @@ fn op_encoding_decode(
output.truncate(written);
Ok(output.into())
}
- DecoderResult::OutputFull => {
- Err(range_error("Provided buffer too small."))
- }
- DecoderResult::Malformed(_, _) => {
- Err(type_error("The encoded data is not valid."))
- }
+ DecoderResult::OutputFull => Err(WebError::BufferTooSmall),
+ DecoderResult::Malformed(_, _) => Err(WebError::DataInvalid),
}
} else {
let (result, _, written, _) =
@@ -338,7 +339,7 @@ fn op_encoding_decode(
output.truncate(written);
Ok(output.into())
}
- CoderResult::OutputFull => Err(range_error("Provided buffer too small.")),
+ CoderResult::OutputFull => Err(WebError::BufferTooSmall),
}
}
}
@@ -356,7 +357,7 @@ fn op_encoding_encode_into(
input: v8::Local<v8::Value>,
#[buffer] buffer: &mut [u8],
#[buffer] out_buf: &mut [u32],
-) -> Result<(), AnyError> {
+) -> Result<(), WebError> {
let s = v8::Local::<v8::String>::try_from(input)?;
let mut nchars = 0;
@@ -414,53 +415,4 @@ pub fn get_declaration() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("lib.deno_web.d.ts")
}
-#[derive(Debug)]
-pub struct DomExceptionQuotaExceededError {
- pub msg: String,
-}
-
-impl DomExceptionQuotaExceededError {
- pub fn new(msg: &str) -> Self {
- DomExceptionQuotaExceededError {
- msg: msg.to_string(),
- }
- }
-}
-
-#[derive(Debug)]
-pub struct DomExceptionInvalidCharacterError {
- pub msg: String,
-}
-
-impl DomExceptionInvalidCharacterError {
- pub fn new(msg: &str) -> Self {
- DomExceptionInvalidCharacterError {
- msg: msg.to_string(),
- }
- }
-}
-
-impl fmt::Display for DomExceptionQuotaExceededError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.pad(&self.msg)
- }
-}
-impl fmt::Display for DomExceptionInvalidCharacterError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.pad(&self.msg)
- }
-}
-
-impl std::error::Error for DomExceptionQuotaExceededError {}
-
-impl std::error::Error for DomExceptionInvalidCharacterError {}
-
-pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> {
- e.downcast_ref::<DomExceptionQuotaExceededError>()
- .map(|_| "DOMExceptionQuotaExceededError")
- .or_else(|| {
- e.downcast_ref::<DomExceptionInvalidCharacterError>()
- .map(|_| "DOMExceptionInvalidCharacterError")
- })
-}
pub struct Location(pub Url);
diff --git a/ext/web/message_port.rs b/ext/web/message_port.rs
index fa299475d..1a4a09073 100644
--- a/ext/web/message_port.rs
+++ b/ext/web/message_port.rs
@@ -4,8 +4,6 @@ use std::borrow::Cow;
use std::cell::RefCell;
use std::rc::Rc;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::CancelFuture;
@@ -23,6 +21,20 @@ use tokio::sync::mpsc::unbounded_channel;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::sync::mpsc::UnboundedSender;
+#[derive(Debug, thiserror::Error)]
+pub enum MessagePortError {
+ #[error("Invalid message port transfer")]
+ InvalidTransfer,
+ #[error("Message port is not ready for transfer")]
+ NotReady,
+ #[error("Can not transfer self message port")]
+ TransferSelf,
+ #[error(transparent)]
+ Canceled(#[from] deno_core::Canceled),
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+}
+
pub enum Transferable {
MessagePort(MessagePort),
ArrayBuffer(u32),
@@ -40,7 +52,7 @@ impl MessagePort {
&self,
state: &mut OpState,
data: JsMessageData,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), MessagePortError> {
let transferables =
deserialize_js_transferables(state, data.transferables)?;
@@ -56,7 +68,7 @@ impl MessagePort {
pub async fn recv(
&self,
state: Rc<RefCell<OpState>>,
- ) -> Result<Option<JsMessageData>, AnyError> {
+ ) -> Result<Option<JsMessageData>, MessagePortError> {
let rx = &self.rx;
let maybe_data = poll_fn(|cx| {
@@ -147,7 +159,7 @@ pub enum JsTransferable {
pub fn deserialize_js_transferables(
state: &mut OpState,
js_transferables: Vec<JsTransferable>,
-) -> Result<Vec<Transferable>, AnyError> {
+) -> Result<Vec<Transferable>, MessagePortError> {
let mut transferables = Vec::with_capacity(js_transferables.len());
for js_transferable in js_transferables {
match js_transferable {
@@ -155,10 +167,10 @@ pub fn deserialize_js_transferables(
let resource = state
.resource_table
.take::<MessagePortResource>(id)
- .map_err(|_| type_error("Invalid message port transfer"))?;
+ .map_err(|_| MessagePortError::InvalidTransfer)?;
resource.cancel.cancel();
- let resource = Rc::try_unwrap(resource)
- .map_err(|_| type_error("Message port is not ready for transfer"))?;
+ let resource =
+ Rc::try_unwrap(resource).map_err(|_| MessagePortError::NotReady)?;
transferables.push(Transferable::MessagePort(resource.port));
}
JsTransferable::ArrayBuffer(id) => {
@@ -202,16 +214,19 @@ pub fn op_message_port_post_message(
state: &mut OpState,
#[smi] rid: ResourceId,
#[serde] data: JsMessageData,
-) -> Result<(), AnyError> {
+) -> Result<(), MessagePortError> {
for js_transferable in &data.transferables {
if let JsTransferable::MessagePort(id) = js_transferable {
if *id == rid {
- return Err(type_error("Can not transfer self message port"));
+ return Err(MessagePortError::TransferSelf);
}
}
}
- let resource = state.resource_table.get::<MessagePortResource>(rid)?;
+ let resource = state
+ .resource_table
+ .get::<MessagePortResource>(rid)
+ .map_err(MessagePortError::Resource)?;
resource.port.send(state, data)
}
@@ -220,7 +235,7 @@ pub fn op_message_port_post_message(
pub async fn op_message_port_recv_message(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<Option<JsMessageData>, AnyError> {
+) -> Result<Option<JsMessageData>, MessagePortError> {
let resource = {
let state = state.borrow();
match state.resource_table.get::<MessagePortResource>(rid) {
@@ -237,8 +252,11 @@ pub async fn op_message_port_recv_message(
pub fn op_message_port_recv_message_sync(
state: &mut OpState, // Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<Option<JsMessageData>, AnyError> {
- let resource = state.resource_table.get::<MessagePortResource>(rid)?;
+) -> Result<Option<JsMessageData>, MessagePortError> {
+ let resource = state
+ .resource_table
+ .get::<MessagePortResource>(rid)
+ .map_err(MessagePortError::Resource)?;
let mut rx = resource.port.rx.borrow_mut();
match rx.try_recv() {
diff --git a/ext/web/stream_resource.rs b/ext/web/stream_resource.rs
index 78487883b..c44a385ea 100644
--- a/ext/web/stream_resource.rs
+++ b/ext/web/stream_resource.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use bytes::BytesMut;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::external;
use deno_core::op2;
use deno_core::serde_v8::V8Slice;
@@ -18,6 +16,7 @@ use deno_core::RcRef;
use deno_core::Resource;
use deno_core::ResourceId;
use futures::future::poll_fn;
+use futures::TryFutureExt;
use std::borrow::Cow;
use std::cell::RefCell;
use std::cell::RefMut;
@@ -31,6 +30,14 @@ use std::task::Context;
use std::task::Poll;
use std::task::Waker;
+#[derive(Debug, thiserror::Error)]
+pub enum StreamResourceError {
+ #[error(transparent)]
+ Canceled(#[from] deno_core::Canceled),
+ #[error("{0}")]
+ Js(String),
+}
+
// How many buffers we'll allow in the channel before we stop allowing writes.
const BUFFER_CHANNEL_SIZE: u16 = 1024;
@@ -48,7 +55,7 @@ struct BoundedBufferChannelInner {
buffers: [MaybeUninit<V8Slice<u8>>; BUFFER_CHANNEL_SIZE as _],
ring_producer: u16,
ring_consumer: u16,
- error: Option<AnyError>,
+ error: Option<StreamResourceError>,
current_size: usize,
// TODO(mmastrac): we can math this field instead of accounting for it
len: usize,
@@ -141,7 +148,10 @@ impl BoundedBufferChannelInner {
self.len = 0;
}
- pub fn read(&mut self, limit: usize) -> Result<Option<BufView>, AnyError> {
+ pub fn read(
+ &mut self,
+ limit: usize,
+ ) -> Result<Option<BufView>, StreamResourceError> {
// Empty buffers will return the error, if one exists, or None
if self.len == 0 {
if let Some(error) = self.error.take() {
@@ -230,7 +240,7 @@ impl BoundedBufferChannelInner {
Ok(())
}
- pub fn write_error(&mut self, error: AnyError) {
+ pub fn write_error(&mut self, error: StreamResourceError) {
self.error = Some(error);
if let Some(waker) = self.read_waker.take() {
waker.wake();
@@ -306,7 +316,10 @@ impl BoundedBufferChannel {
self.inner.borrow_mut()
}
- pub fn read(&self, limit: usize) -> Result<Option<BufView>, AnyError> {
+ pub fn read(
+ &self,
+ limit: usize,
+ ) -> Result<Option<BufView>, StreamResourceError> {
self.inner().read(limit)
}
@@ -314,7 +327,7 @@ impl BoundedBufferChannel {
self.inner().write(buffer)
}
- pub fn write_error(&self, error: AnyError) {
+ pub fn write_error(&self, error: StreamResourceError) {
self.inner().write_error(error)
}
@@ -358,7 +371,10 @@ impl ReadableStreamResource {
RcRef::map(self, |s| &s.cancel_handle).clone()
}
- async fn read(self: Rc<Self>, limit: usize) -> Result<BufView, AnyError> {
+ async fn read(
+ self: Rc<Self>,
+ limit: usize,
+ ) -> Result<BufView, StreamResourceError> {
let cancel_handle = self.cancel_handle();
// Serialize all the reads using a task queue.
let _read_permit = self.read_queue.acquire().await;
@@ -387,7 +403,7 @@ impl Resource for ReadableStreamResource {
}
fn read(self: Rc<Self>, limit: usize) -> AsyncResult<BufView> {
- Box::pin(ReadableStreamResource::read(self, limit))
+ Box::pin(ReadableStreamResource::read(self, limit).map_err(|e| e.into()))
}
fn close(self: Rc<Self>) {
@@ -550,7 +566,7 @@ pub fn op_readable_stream_resource_write_error(
) -> bool {
let sender = get_sender(sender);
// We can always write an error, no polling required
- sender.write_error(type_error(Cow::Owned(error)));
+ sender.write_error(StreamResourceError::Js(error));
!sender.closed()
}
diff --git a/ext/web/timers.rs b/ext/web/timers.rs
index a9ab7c97e..06444ed34 100644
--- a/ext/web/timers.rs
+++ b/ext/web/timers.rs
@@ -4,7 +4,10 @@
use deno_core::op2;
use deno_core::OpState;
+use std::time::Duration;
use std::time::Instant;
+use std::time::SystemTime;
+use std::time::UNIX_EPOCH;
pub trait TimersPermission {
fn allow_hrtime(&mut self) -> bool;
@@ -17,21 +20,28 @@ impl TimersPermission for deno_permissions::PermissionsContainer {
}
}
-pub type StartTime = Instant;
+pub struct StartTime(Instant);
-// Returns a milliseconds and nanoseconds subsec
-// since the start time of the deno runtime.
-// If the High precision flag is not set, the
-// nanoseconds are rounded on 2ms.
-#[op2(fast)]
-pub fn op_now<TP>(state: &mut OpState, #[buffer] buf: &mut [u8])
+impl Default for StartTime {
+ fn default() -> Self {
+ Self(Instant::now())
+ }
+}
+
+impl std::ops::Deref for StartTime {
+ type Target = Instant;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+fn expose_time<TP>(state: &mut OpState, duration: Duration, out: &mut [u8])
where
TP: TimersPermission + 'static,
{
- let start_time = state.borrow::<StartTime>();
- let elapsed = start_time.elapsed();
- let seconds = elapsed.as_secs();
- let mut subsec_nanos = elapsed.subsec_nanos();
+ let seconds = duration.as_secs() as u32;
+ let mut subsec_nanos = duration.subsec_nanos();
// If the permission is not enabled
// Round the nano result on 2 milliseconds
@@ -40,14 +50,33 @@ where
let reduced_time_precision = 2_000_000; // 2ms in nanoseconds
subsec_nanos -= subsec_nanos % reduced_time_precision;
}
- if buf.len() < 8 {
- return;
+
+ if out.len() >= 8 {
+ out[0..4].copy_from_slice(&seconds.to_ne_bytes());
+ out[4..8].copy_from_slice(&subsec_nanos.to_ne_bytes());
}
- let buf: &mut [u32] =
- // SAFETY: buffer is at least 8 bytes long.
- unsafe { std::slice::from_raw_parts_mut(buf.as_mut_ptr() as _, 2) };
- buf[0] = seconds as u32;
- buf[1] = subsec_nanos;
+}
+
+#[op2(fast)]
+pub fn op_now<TP>(state: &mut OpState, #[buffer] buf: &mut [u8])
+where
+ TP: TimersPermission + 'static,
+{
+ let start_time = state.borrow::<StartTime>();
+ let elapsed = start_time.elapsed();
+ expose_time::<TP>(state, elapsed, buf);
+}
+
+#[op2(fast)]
+pub fn op_time_origin<TP>(state: &mut OpState, #[buffer] buf: &mut [u8])
+where
+ TP: TimersPermission + 'static,
+{
+ // https://w3c.github.io/hr-time/#dfn-estimated-monotonic-time-of-the-unix-epoch
+ let wall_time = SystemTime::now();
+ let monotonic_time = state.borrow::<StartTime>().elapsed();
+ let epoch = wall_time.duration_since(UNIX_EPOCH).unwrap() - monotonic_time;
+ expose_time::<TP>(state, epoch, buf);
}
#[allow(clippy::unused_async)]
diff --git a/ext/webgpu/01_webgpu.js b/ext/webgpu/01_webgpu.js
index 719877750..cab5cbbdb 100644
--- a/ext/webgpu/01_webgpu.js
+++ b/ext/webgpu/01_webgpu.js
@@ -6982,6 +6982,12 @@ webidl.converters.GPUComputePassEncoder = webidl.createInterfaceConverter(
GPUComputePassEncoder.prototype,
);
+// INTERFACE: GPUQuerySet
+webidl.converters.GPUQuerySet = webidl.createInterfaceConverter(
+ "GPUQuerySet",
+ GPUQuerySet.prototype,
+);
+
// DICTIONARY: GPUComputePassTimestampWrites
webidl.converters["GPUComputePassTimestampWrites"] = webidl
.createDictionaryConverter(
@@ -7154,12 +7160,6 @@ webidl.converters["GPURenderPassDepthStencilAttachment"] = webidl
dictMembersGPURenderPassDepthStencilAttachment,
);
-// INTERFACE: GPUQuerySet
-webidl.converters.GPUQuerySet = webidl.createInterfaceConverter(
- "GPUQuerySet",
- GPUQuerySet.prototype,
-);
-
// DICTIONARY: GPURenderPassTimestampWrites
webidl.converters["GPURenderPassTimestampWrites"] = webidl
.createDictionaryConverter(
diff --git a/ext/webgpu/Cargo.toml b/ext/webgpu/Cargo.toml
index 4c709b9c3..f23bb8371 100644
--- a/ext/webgpu/Cargo.toml
+++ b/ext/webgpu/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_webgpu"
-version = "0.138.0"
+version = "0.144.0"
authors = ["the Deno authors"]
edition.workspace = true
license = "MIT"
@@ -25,6 +25,7 @@ serde = { workspace = true, features = ["derive"] }
tokio = { workspace = true, features = ["full"] }
wgpu-types = { workspace = true, features = ["serde"] }
raw-window-handle = { workspace = true }
+thiserror.workspace = true
[target.'cfg(not(target_arch = "wasm32"))'.dependencies.wgpu-core]
workspace = true
diff --git a/ext/webgpu/buffer.rs b/ext/webgpu/buffer.rs
index c6cd6f0a7..c2b53890e 100644
--- a/ext/webgpu/buffer.rs
+++ b/ext/webgpu/buffer.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
@@ -13,9 +11,18 @@ use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
-use super::error::DomExceptionOperationError;
use super::error::WebGpuResult;
+#[derive(Debug, thiserror::Error)]
+pub enum BufferError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error("usage is not valid")]
+ InvalidUsage,
+ #[error(transparent)]
+ Access(wgpu_core::resource::BufferAccessError),
+}
+
pub(crate) struct WebGpuBuffer(
pub(crate) super::Instance,
pub(crate) wgpu_core::id::BufferId,
@@ -46,18 +53,19 @@ pub fn op_webgpu_create_buffer(
#[number] size: u64,
usage: u32,
mapped_at_creation: bool,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, BufferError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
- .get::<super::WebGpuDevice>(device_rid)?;
+ .get::<super::WebGpuDevice>(device_rid)
+ .map_err(BufferError::Resource)?;
let device = device_resource.1;
let descriptor = wgpu_core::resource::BufferDescriptor {
label: Some(label),
size,
usage: wgpu_types::BufferUsages::from_bits(usage)
- .ok_or_else(|| type_error("usage is not valid"))?,
+ .ok_or(BufferError::InvalidUsage)?,
mapped_at_creation,
};
@@ -77,18 +85,21 @@ pub async fn op_webgpu_buffer_get_map_async(
mode: u32,
#[number] offset: u64,
#[number] size: u64,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, BufferError> {
let device;
let done = Arc::new(Mutex::new(None));
{
let state_ = state.borrow();
let instance = state_.borrow::<super::Instance>();
- let buffer_resource =
- state_.resource_table.get::<WebGpuBuffer>(buffer_rid)?;
+ let buffer_resource = state_
+ .resource_table
+ .get::<WebGpuBuffer>(buffer_rid)
+ .map_err(BufferError::Resource)?;
let buffer = buffer_resource.1;
let device_resource = state_
.resource_table
- .get::<super::WebGpuDevice>(device_rid)?;
+ .get::<super::WebGpuDevice>(device_rid)
+ .map_err(BufferError::Resource)?;
device = device_resource.1;
let done_ = done.clone();
@@ -120,9 +131,7 @@ pub async fn op_webgpu_buffer_get_map_async(
let result = done.lock().unwrap().take();
match result {
Some(Ok(())) => return Ok(WebGpuResult::empty()),
- Some(Err(e)) => {
- return Err(DomExceptionOperationError::new(&e.to_string()).into())
- }
+ Some(Err(e)) => return Err(BufferError::Access(e)),
None => {
{
let state = state.borrow();
@@ -143,9 +152,12 @@ pub fn op_webgpu_buffer_get_mapped_range(
#[number] offset: u64,
#[number] size: Option<u64>,
#[buffer] buf: &mut [u8],
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, BufferError> {
let instance = state.borrow::<super::Instance>();
- let buffer_resource = state.resource_table.get::<WebGpuBuffer>(buffer_rid)?;
+ let buffer_resource = state
+ .resource_table
+ .get::<WebGpuBuffer>(buffer_rid)
+ .map_err(BufferError::Resource)?;
let buffer = buffer_resource.1;
let (slice_pointer, range_size) =
@@ -154,7 +166,7 @@ pub fn op_webgpu_buffer_get_mapped_range(
offset,
size
))
- .map_err(|e| DomExceptionOperationError::new(&e.to_string()))?;
+ .map_err(BufferError::Access)?;
// SAFETY: guarantee to be safe from wgpu
let slice = unsafe {
@@ -176,12 +188,16 @@ pub fn op_webgpu_buffer_unmap(
#[smi] buffer_rid: ResourceId,
#[smi] mapped_rid: ResourceId,
#[buffer] buf: Option<&[u8]>,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, BufferError> {
let mapped_resource = state
.resource_table
- .take::<WebGpuBufferMapped>(mapped_rid)?;
+ .take::<WebGpuBufferMapped>(mapped_rid)
+ .map_err(BufferError::Resource)?;
let instance = state.borrow::<super::Instance>();
- let buffer_resource = state.resource_table.get::<WebGpuBuffer>(buffer_rid)?;
+ let buffer_resource = state
+ .resource_table
+ .get::<WebGpuBuffer>(buffer_rid)
+ .map_err(BufferError::Resource)?;
let buffer = buffer_resource.1;
if let Some(buf) = buf {
diff --git a/ext/webgpu/bundle.rs b/ext/webgpu/bundle.rs
index 57158271c..d9a5b2953 100644
--- a/ext/webgpu/bundle.rs
+++ b/ext/webgpu/bundle.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
@@ -13,6 +11,14 @@ use std::rc::Rc;
use super::error::WebGpuResult;
+#[derive(Debug, thiserror::Error)]
+pub enum BundleError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error("size must be larger than 0")]
+ InvalidSize,
+}
+
struct WebGpuRenderBundleEncoder(
RefCell<wgpu_core::command::RenderBundleEncoder>,
);
@@ -53,7 +59,7 @@ pub struct CreateRenderBundleEncoderArgs {
pub fn op_webgpu_create_render_bundle_encoder(
state: &mut OpState,
#[serde] args: CreateRenderBundleEncoderArgs,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let device_resource = state
.resource_table
.get::<super::WebGpuDevice>(args.device_rid)?;
@@ -100,7 +106,7 @@ pub fn op_webgpu_render_bundle_encoder_finish(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
#[string] label: Cow<str>,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_bundle_encoder_resource =
state
.resource_table
@@ -131,7 +137,7 @@ pub fn op_webgpu_render_bundle_encoder_set_bind_group(
#[buffer] dynamic_offsets_data: &[u32],
#[number] dynamic_offsets_data_start: usize,
#[number] dynamic_offsets_data_length: usize,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let bind_group_resource =
state
.resource_table
@@ -171,7 +177,7 @@ pub fn op_webgpu_render_bundle_encoder_push_debug_group(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
#[string] group_label: &str,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_bundle_encoder_resource =
state
.resource_table
@@ -195,7 +201,7 @@ pub fn op_webgpu_render_bundle_encoder_push_debug_group(
pub fn op_webgpu_render_bundle_encoder_pop_debug_group(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_bundle_encoder_resource =
state
.resource_table
@@ -214,7 +220,7 @@ pub fn op_webgpu_render_bundle_encoder_insert_debug_marker(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
#[string] marker_label: &str,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_bundle_encoder_resource =
state
.resource_table
@@ -239,7 +245,7 @@ pub fn op_webgpu_render_bundle_encoder_set_pipeline(
state: &mut OpState,
#[smi] render_bundle_encoder_rid: ResourceId,
#[smi] pipeline: ResourceId,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pipeline_resource =
state
.resource_table
@@ -266,18 +272,17 @@ pub fn op_webgpu_render_bundle_encoder_set_index_buffer(
#[serde] index_format: wgpu_types::IndexFormat,
#[number] offset: u64,
#[number] size: u64,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, BundleError> {
let buffer_resource = state
.resource_table
- .get::<super::buffer::WebGpuBuffer>(buffer)?;
- let render_bundle_encoder_resource =
- state
- .resource_table
- .get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
- let size = Some(
- std::num::NonZeroU64::new(size)
- .ok_or_else(|| type_error("size must be larger than 0"))?,
- );
+ .get::<super::buffer::WebGpuBuffer>(buffer)
+ .map_err(BundleError::Resource)?;
+ let render_bundle_encoder_resource = state
+ .resource_table
+ .get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)
+ .map_err(BundleError::Resource)?;
+ let size =
+ Some(std::num::NonZeroU64::new(size).ok_or(BundleError::InvalidSize)?);
render_bundle_encoder_resource
.0
@@ -296,19 +301,17 @@ pub fn op_webgpu_render_bundle_encoder_set_vertex_buffer(
#[smi] buffer: ResourceId,
#[number] offset: u64,
#[number] size: Option<u64>,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, BundleError> {
let buffer_resource = state
.resource_table
- .get::<super::buffer::WebGpuBuffer>(buffer)?;
- let render_bundle_encoder_resource =
- state
- .resource_table
- .get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)?;
+ .get::<super::buffer::WebGpuBuffer>(buffer)
+ .map_err(BundleError::Resource)?;
+ let render_bundle_encoder_resource = state
+ .resource_table
+ .get::<WebGpuRenderBundleEncoder>(render_bundle_encoder_rid)
+ .map_err(BundleError::Resource)?;
let size = if let Some(size) = size {
- Some(
- std::num::NonZeroU64::new(size)
- .ok_or_else(|| type_error("size must be larger than 0"))?,
- )
+ Some(std::num::NonZeroU64::new(size).ok_or(BundleError::InvalidSize)?)
} else {
None
};
@@ -333,7 +336,7 @@ pub fn op_webgpu_render_bundle_encoder_draw(
instance_count: u32,
first_vertex: u32,
first_instance: u32,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_bundle_encoder_resource =
state
.resource_table
@@ -360,7 +363,7 @@ pub fn op_webgpu_render_bundle_encoder_draw_indexed(
first_index: u32,
base_vertex: i32,
first_instance: u32,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_bundle_encoder_resource =
state
.resource_table
@@ -385,7 +388,7 @@ pub fn op_webgpu_render_bundle_encoder_draw_indirect(
#[smi] render_bundle_encoder_rid: ResourceId,
#[smi] indirect_buffer: ResourceId,
#[number] indirect_offset: u64,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let buffer_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(indirect_buffer)?;
diff --git a/ext/webgpu/byow.rs b/ext/webgpu/byow.rs
index 3a43f416e..c9e1177b1 100644
--- a/ext/webgpu/byow.rs
+++ b/ext/webgpu/byow.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::ResourceId;
@@ -16,6 +14,47 @@ use std::ptr::NonNull;
use crate::surface::WebGpuSurface;
+#[derive(Debug, thiserror::Error)]
+pub enum ByowError {
+ #[error("Cannot create surface outside of WebGPU context. Did you forget to call `navigator.gpu.requestAdapter()`?")]
+ WebGPUNotInitiated,
+ #[error("Invalid parameters")]
+ InvalidParameters,
+ #[error(transparent)]
+ CreateSurface(wgpu_core::instance::CreateSurfaceError),
+ #[cfg(target_os = "windows")]
+ #[error("Invalid system on Windows")]
+ InvalidSystem,
+ #[cfg(target_os = "macos")]
+ #[error("Invalid system on macOS")]
+ InvalidSystem,
+ #[cfg(any(
+ target_os = "linux",
+ target_os = "freebsd",
+ target_os = "openbsd"
+ ))]
+ #[error("Invalid system on Linux/BSD")]
+ InvalidSystem,
+ #[cfg(any(
+ target_os = "windows",
+ target_os = "linux",
+ target_os = "freebsd",
+ target_os = "openbsd"
+ ))]
+ #[error("window is null")]
+ NullWindow,
+ #[cfg(any(
+ target_os = "linux",
+ target_os = "freebsd",
+ target_os = "openbsd"
+ ))]
+ #[error("display is null")]
+ NullDisplay,
+ #[cfg(target_os = "macos")]
+ #[error("ns_view is null")]
+ NSViewDisplay,
+}
+
#[op2(fast)]
#[smi]
pub fn op_webgpu_surface_create(
@@ -23,10 +62,10 @@ pub fn op_webgpu_surface_create(
#[string] system: &str,
p1: *const c_void,
p2: *const c_void,
-) -> Result<ResourceId, AnyError> {
- let instance = state.try_borrow::<super::Instance>().ok_or_else(|| {
- type_error("Cannot create surface outside of WebGPU context. Did you forget to call `navigator.gpu.requestAdapter()`?")
- })?;
+) -> Result<ResourceId, ByowError> {
+ let instance = state
+ .try_borrow::<super::Instance>()
+ .ok_or(ByowError::WebGPUNotInitiated)?;
// Security note:
//
// The `p1` and `p2` parameters are pointers to platform-specific window
@@ -41,13 +80,15 @@ pub fn op_webgpu_surface_create(
//
// - Only FFI can export v8::External to user code.
if p1.is_null() {
- return Err(type_error("Invalid parameters"));
+ return Err(ByowError::InvalidParameters);
}
let (win_handle, display_handle) = raw_window(system, p1, p2)?;
// SAFETY: see above comment
let surface = unsafe {
- instance.instance_create_surface(display_handle, win_handle, None)?
+ instance
+ .instance_create_surface(display_handle, win_handle, None)
+ .map_err(ByowError::CreateSurface)?
};
let rid = state
@@ -66,15 +107,14 @@ fn raw_window(
system: &str,
_ns_window: *const c_void,
ns_view: *const c_void,
-) -> Result<RawHandles, AnyError> {
+) -> Result<RawHandles, ByowError> {
if system != "cocoa" {
- return Err(type_error("Invalid system on macOS"));
+ return Err(ByowError::InvalidSystem);
}
let win_handle = raw_window_handle::RawWindowHandle::AppKit(
raw_window_handle::AppKitWindowHandle::new(
- NonNull::new(ns_view as *mut c_void)
- .ok_or(type_error("ns_view is null"))?,
+ NonNull::new(ns_view as *mut c_void).ok_or(ByowError::NSViewDisplay)?,
),
);
@@ -89,16 +129,16 @@ fn raw_window(
system: &str,
window: *const c_void,
hinstance: *const c_void,
-) -> Result<RawHandles, AnyError> {
+) -> Result<RawHandles, ByowError> {
use raw_window_handle::WindowsDisplayHandle;
if system != "win32" {
- return Err(type_error("Invalid system on Windows"));
+ return Err(ByowError::InvalidSystem);
}
let win_handle = {
let mut handle = raw_window_handle::Win32WindowHandle::new(
std::num::NonZeroIsize::new(window as isize)
- .ok_or(type_error("window is null"))?,
+ .ok_or(ByowError::NullWindow)?,
);
handle.hinstance = std::num::NonZeroIsize::new(hinstance as isize);
@@ -115,7 +155,7 @@ fn raw_window(
system: &str,
window: *const c_void,
display: *const c_void,
-) -> Result<RawHandles, AnyError> {
+) -> Result<RawHandles, ByowError> {
let (win_handle, display_handle);
if system == "x11" {
win_handle = raw_window_handle::RawWindowHandle::Xlib(
@@ -131,19 +171,17 @@ fn raw_window(
} else if system == "wayland" {
win_handle = raw_window_handle::RawWindowHandle::Wayland(
raw_window_handle::WaylandWindowHandle::new(
- NonNull::new(window as *mut c_void)
- .ok_or(type_error("window is null"))?,
+ NonNull::new(window as *mut c_void).ok_or(ByowError::NullWindow)?,
),
);
display_handle = raw_window_handle::RawDisplayHandle::Wayland(
raw_window_handle::WaylandDisplayHandle::new(
- NonNull::new(display as *mut c_void)
- .ok_or(type_error("display is null"))?,
+ NonNull::new(display as *mut c_void).ok_or(ByowError::NullDisplay)?,
),
);
} else {
- return Err(type_error("Invalid system on Linux/BSD"));
+ return Err(ByowError::InvalidSystem);
}
Ok((win_handle, display_handle))
@@ -160,6 +198,6 @@ fn raw_window(
_system: &str,
_window: *const c_void,
_display: *const c_void,
-) -> Result<RawHandles, AnyError> {
- Err(type_error("Unsupported platform"))
+) -> Result<RawHandles, deno_core::error::AnyError> {
+ Err(deno_core::error::type_error("Unsupported platform"))
}
diff --git a/ext/webgpu/error.rs b/ext/webgpu/error.rs
index 5b55d506a..f08f76538 100644
--- a/ext/webgpu/error.rs
+++ b/ext/webgpu/error.rs
@@ -1,11 +1,9 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::ResourceId;
use serde::Serialize;
use std::convert::From;
use std::error::Error;
-use std::fmt;
use wgpu_core::binding_model::CreateBindGroupError;
use wgpu_core::binding_model::CreateBindGroupLayoutError;
use wgpu_core::binding_model::CreatePipelineLayoutError;
@@ -286,29 +284,3 @@ impl From<ConfigureSurfaceError> for WebGpuError {
WebGpuError::Validation(fmt_err(&err))
}
}
-
-#[derive(Debug)]
-pub struct DomExceptionOperationError {
- pub msg: String,
-}
-
-impl DomExceptionOperationError {
- pub fn new(msg: &str) -> Self {
- DomExceptionOperationError {
- msg: msg.to_string(),
- }
- }
-}
-
-impl fmt::Display for DomExceptionOperationError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.pad(&self.msg)
- }
-}
-
-impl std::error::Error for DomExceptionOperationError {}
-
-pub fn get_error_class_name(e: &AnyError) -> Option<&'static str> {
- e.downcast_ref::<DomExceptionOperationError>()
- .map(|_| "DOMExceptionOperationError")
-}
diff --git a/ext/webgpu/lib.rs b/ext/webgpu/lib.rs
index df2ab323a..5dc8278e4 100644
--- a/ext/webgpu/lib.rs
+++ b/ext/webgpu/lib.rs
@@ -2,7 +2,6 @@
#![cfg(not(target_arch = "wasm32"))]
#![warn(unsafe_op_in_unsafe_fn)]
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
@@ -16,7 +15,6 @@ use std::rc::Rc;
pub use wgpu_core;
pub use wgpu_types;
-use error::DomExceptionOperationError;
use error::WebGpuResult;
pub const UNSTABLE_FEATURE_NAME: &str = "webgpu";
@@ -44,7 +42,7 @@ mod macros {
#[cfg(all(not(target_arch = "wasm32"), windows))]
wgpu_types::Backend::Dx12 => $($c)*.$method::<wgpu_core::api::Dx12> $params,
#[cfg(any(
- all(unix, not(target_os = "macos"), not(target_os = "ios")),
+ all(not(target_os = "macos"), not(target_os = "ios")),
feature = "angle",
target_arch = "wasm32"
))]
@@ -85,6 +83,18 @@ pub mod shader;
pub mod surface;
pub mod texture;
+#[derive(Debug, thiserror::Error)]
+pub enum InitError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ InvalidAdapter(wgpu_core::instance::InvalidAdapter),
+ #[error(transparent)]
+ RequestDevice(wgpu_core::instance::RequestDeviceError),
+ #[error(transparent)]
+ InvalidDevice(wgpu_core::device::InvalidDevice),
+}
+
pub type Instance = std::sync::Arc<wgpu_core::global::Global>;
struct WebGpuAdapter(Instance, wgpu_core::id::AdapterId);
@@ -400,7 +410,7 @@ pub fn op_webgpu_request_adapter(
state: Rc<RefCell<OpState>>,
#[serde] power_preference: Option<wgpu_types::PowerPreference>,
force_fallback_adapter: bool,
-) -> Result<GpuAdapterResOrErr, AnyError> {
+) -> Result<GpuAdapterResOrErr, InitError> {
let mut state = state.borrow_mut();
let backends = std::env::var("DENO_WEBGPU_BACKEND").map_or_else(
@@ -441,10 +451,11 @@ pub fn op_webgpu_request_adapter(
}
};
let adapter_features =
- gfx_select!(adapter => instance.adapter_features(adapter))?;
+ gfx_select!(adapter => instance.adapter_features(adapter))
+ .map_err(InitError::InvalidAdapter)?;
let features = deserialize_features(&adapter_features);
- let adapter_limits =
- gfx_select!(adapter => instance.adapter_limits(adapter))?;
+ let adapter_limits = gfx_select!(adapter => instance.adapter_limits(adapter))
+ .map_err(InitError::InvalidAdapter)?;
let instance = instance.clone();
@@ -663,10 +674,12 @@ pub fn op_webgpu_request_device(
#[string] label: String,
#[serde] required_features: GpuRequiredFeatures,
#[serde] required_limits: Option<wgpu_types::Limits>,
-) -> Result<GpuDeviceRes, AnyError> {
+) -> Result<GpuDeviceRes, InitError> {
let mut state = state.borrow_mut();
- let adapter_resource =
- state.resource_table.take::<WebGpuAdapter>(adapter_rid)?;
+ let adapter_resource = state
+ .resource_table
+ .take::<WebGpuAdapter>(adapter_rid)
+ .map_err(InitError::Resource)?;
let adapter = adapter_resource.1;
let instance = state.borrow::<Instance>();
@@ -685,13 +698,14 @@ pub fn op_webgpu_request_device(
));
adapter_resource.close();
if let Some(err) = maybe_err {
- return Err(DomExceptionOperationError::new(&err.to_string()).into());
+ return Err(InitError::RequestDevice(err));
}
- let device_features =
- gfx_select!(device => instance.device_features(device))?;
+ let device_features = gfx_select!(device => instance.device_features(device))
+ .map_err(InitError::InvalidDevice)?;
let features = deserialize_features(&device_features);
- let limits = gfx_select!(device => instance.device_limits(device))?;
+ let limits = gfx_select!(device => instance.device_limits(device))
+ .map_err(InitError::InvalidDevice)?;
let instance = instance.clone();
let instance2 = instance.clone();
@@ -722,14 +736,17 @@ pub struct GPUAdapterInfo {
pub fn op_webgpu_request_adapter_info(
state: Rc<RefCell<OpState>>,
#[smi] adapter_rid: ResourceId,
-) -> Result<GPUAdapterInfo, AnyError> {
+) -> Result<GPUAdapterInfo, InitError> {
let state = state.borrow_mut();
- let adapter_resource =
- state.resource_table.get::<WebGpuAdapter>(adapter_rid)?;
+ let adapter_resource = state
+ .resource_table
+ .get::<WebGpuAdapter>(adapter_rid)
+ .map_err(InitError::Resource)?;
let adapter = adapter_resource.1;
let instance = state.borrow::<Instance>();
- let info = gfx_select!(adapter => instance.adapter_get_info(adapter))?;
+ let info = gfx_select!(adapter => instance.adapter_get_info(adapter))
+ .map_err(InitError::InvalidAdapter)?;
Ok(GPUAdapterInfo {
vendor: info.vendor.to_string(),
@@ -770,9 +787,11 @@ impl From<GpuQueryType> for wgpu_types::QueryType {
pub fn op_webgpu_create_query_set(
state: &mut OpState,
#[serde] args: CreateQuerySetArgs,
-) -> Result<WebGpuResult, AnyError> {
- let device_resource =
- state.resource_table.get::<WebGpuDevice>(args.device_rid)?;
+) -> Result<WebGpuResult, InitError> {
+ let device_resource = state
+ .resource_table
+ .get::<WebGpuDevice>(args.device_rid)
+ .map_err(InitError::Resource)?;
let device = device_resource.1;
let instance = state.borrow::<Instance>();
diff --git a/ext/webgpu/render_pass.rs b/ext/webgpu/render_pass.rs
index c68be3d99..9b9d87d9f 100644
--- a/ext/webgpu/render_pass.rs
+++ b/ext/webgpu/render_pass.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
@@ -12,6 +10,14 @@ use std::cell::RefCell;
use super::error::WebGpuResult;
+#[derive(Debug, thiserror::Error)]
+pub enum RenderPassError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error("size must be larger than 0")]
+ InvalidSize,
+}
+
pub(crate) struct WebGpuRenderPass(
pub(crate) RefCell<wgpu_core::command::RenderPass>,
);
@@ -38,7 +44,7 @@ pub struct RenderPassSetViewportArgs {
pub fn op_webgpu_render_pass_set_viewport(
state: &mut OpState,
#[serde] args: RenderPassSetViewportArgs,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(args.render_pass_rid)?;
@@ -65,7 +71,7 @@ pub fn op_webgpu_render_pass_set_scissor_rect(
y: u32,
width: u32,
height: u32,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(render_pass_rid)?;
@@ -87,7 +93,7 @@ pub fn op_webgpu_render_pass_set_blend_constant(
state: &mut OpState,
#[smi] render_pass_rid: ResourceId,
#[serde] color: wgpu_types::Color,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(render_pass_rid)?;
@@ -106,7 +112,7 @@ pub fn op_webgpu_render_pass_set_stencil_reference(
state: &mut OpState,
#[smi] render_pass_rid: ResourceId,
reference: u32,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(render_pass_rid)?;
@@ -125,7 +131,7 @@ pub fn op_webgpu_render_pass_begin_occlusion_query(
state: &mut OpState,
#[smi] render_pass_rid: ResourceId,
query_index: u32,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(render_pass_rid)?;
@@ -143,7 +149,7 @@ pub fn op_webgpu_render_pass_begin_occlusion_query(
pub fn op_webgpu_render_pass_end_occlusion_query(
state: &mut OpState,
#[smi] render_pass_rid: ResourceId,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(render_pass_rid)?;
@@ -161,7 +167,7 @@ pub fn op_webgpu_render_pass_execute_bundles(
state: &mut OpState,
#[smi] render_pass_rid: ResourceId,
#[serde] bundles: Vec<u32>,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let bundles = bundles
.iter()
.map(|rid| {
@@ -171,7 +177,7 @@ pub fn op_webgpu_render_pass_execute_bundles(
.get::<super::bundle::WebGpuRenderBundle>(*rid)?;
Ok(render_bundle_resource.1)
})
- .collect::<Result<Vec<_>, AnyError>>()?;
+ .collect::<Result<Vec<_>, deno_core::error::AnyError>>()?;
let render_pass_resource = state
.resource_table
@@ -191,7 +197,7 @@ pub fn op_webgpu_render_pass_end(
state: &mut OpState,
#[smi] command_encoder_rid: ResourceId,
#[smi] render_pass_rid: ResourceId,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let command_encoder_resource = state
.resource_table
.get::<super::command_encoder::WebGpuCommandEncoder>(
@@ -217,7 +223,7 @@ pub fn op_webgpu_render_pass_set_bind_group(
#[buffer] dynamic_offsets_data: &[u32],
#[number] dynamic_offsets_data_start: usize,
#[number] dynamic_offsets_data_length: usize,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let bind_group_resource =
state
.resource_table
@@ -251,7 +257,7 @@ pub fn op_webgpu_render_pass_push_debug_group(
state: &mut OpState,
#[smi] render_pass_rid: ResourceId,
#[string] group_label: &str,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(render_pass_rid)?;
@@ -270,7 +276,7 @@ pub fn op_webgpu_render_pass_push_debug_group(
pub fn op_webgpu_render_pass_pop_debug_group(
state: &mut OpState,
#[smi] render_pass_rid: ResourceId,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(render_pass_rid)?;
@@ -288,7 +294,7 @@ pub fn op_webgpu_render_pass_insert_debug_marker(
state: &mut OpState,
#[smi] render_pass_rid: ResourceId,
#[string] marker_label: &str,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(render_pass_rid)?;
@@ -308,7 +314,7 @@ pub fn op_webgpu_render_pass_set_pipeline(
state: &mut OpState,
#[smi] render_pass_rid: ResourceId,
pipeline: u32,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pipeline_resource =
state
.resource_table
@@ -334,19 +340,18 @@ pub fn op_webgpu_render_pass_set_index_buffer(
#[serde] index_format: wgpu_types::IndexFormat,
#[number] offset: u64,
#[number] size: Option<u64>,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, RenderPassError> {
let buffer_resource = state
.resource_table
- .get::<super::buffer::WebGpuBuffer>(buffer)?;
+ .get::<super::buffer::WebGpuBuffer>(buffer)
+ .map_err(RenderPassError::Resource)?;
let render_pass_resource = state
.resource_table
- .get::<WebGpuRenderPass>(render_pass_rid)?;
+ .get::<WebGpuRenderPass>(render_pass_rid)
+ .map_err(RenderPassError::Resource)?;
let size = if let Some(size) = size {
- Some(
- std::num::NonZeroU64::new(size)
- .ok_or_else(|| type_error("size must be larger than 0"))?,
- )
+ Some(std::num::NonZeroU64::new(size).ok_or(RenderPassError::InvalidSize)?)
} else {
None
};
@@ -370,19 +375,18 @@ pub fn op_webgpu_render_pass_set_vertex_buffer(
buffer: u32,
#[number] offset: u64,
#[number] size: Option<u64>,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, RenderPassError> {
let buffer_resource = state
.resource_table
- .get::<super::buffer::WebGpuBuffer>(buffer)?;
+ .get::<super::buffer::WebGpuBuffer>(buffer)
+ .map_err(RenderPassError::Resource)?;
let render_pass_resource = state
.resource_table
- .get::<WebGpuRenderPass>(render_pass_rid)?;
+ .get::<WebGpuRenderPass>(render_pass_rid)
+ .map_err(RenderPassError::Resource)?;
let size = if let Some(size) = size {
- Some(
- std::num::NonZeroU64::new(size)
- .ok_or_else(|| type_error("size must be larger than 0"))?,
- )
+ Some(std::num::NonZeroU64::new(size).ok_or(RenderPassError::InvalidSize)?)
} else {
None
};
@@ -407,7 +411,7 @@ pub fn op_webgpu_render_pass_draw(
instance_count: u32,
first_vertex: u32,
first_instance: u32,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(render_pass_rid)?;
@@ -433,7 +437,7 @@ pub fn op_webgpu_render_pass_draw_indexed(
first_index: u32,
base_vertex: i32,
first_instance: u32,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let render_pass_resource = state
.resource_table
.get::<WebGpuRenderPass>(render_pass_rid)?;
@@ -457,7 +461,7 @@ pub fn op_webgpu_render_pass_draw_indirect(
#[smi] render_pass_rid: ResourceId,
indirect_buffer: u32,
#[number] indirect_offset: u64,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let buffer_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(indirect_buffer)?;
@@ -481,7 +485,7 @@ pub fn op_webgpu_render_pass_draw_indexed_indirect(
#[smi] render_pass_rid: ResourceId,
indirect_buffer: u32,
#[number] indirect_offset: u64,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let buffer_resource = state
.resource_table
.get::<super::buffer::WebGpuBuffer>(indirect_buffer)?;
diff --git a/ext/webgpu/sampler.rs b/ext/webgpu/sampler.rs
index 27c36802e..9fc1269ea 100644
--- a/ext/webgpu/sampler.rs
+++ b/ext/webgpu/sampler.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
@@ -47,7 +46,7 @@ pub struct CreateSamplerArgs {
pub fn op_webgpu_create_sampler(
state: &mut OpState,
#[serde] args: CreateSamplerArgs,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
diff --git a/ext/webgpu/shader.rs b/ext/webgpu/shader.rs
index 0b3991c5d..4653bd85b 100644
--- a/ext/webgpu/shader.rs
+++ b/ext/webgpu/shader.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
@@ -31,7 +30,7 @@ pub fn op_webgpu_create_shader_module(
#[smi] device_rid: ResourceId,
#[string] label: Cow<str>,
#[string] code: Cow<str>,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
diff --git a/ext/webgpu/surface.rs b/ext/webgpu/surface.rs
index 1f6d2c87d..297eaeb00 100644
--- a/ext/webgpu/surface.rs
+++ b/ext/webgpu/surface.rs
@@ -1,7 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use super::WebGpuResult;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
@@ -11,6 +10,16 @@ use std::borrow::Cow;
use std::rc::Rc;
use wgpu_types::SurfaceStatus;
+#[derive(Debug, thiserror::Error)]
+pub enum SurfaceError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error("Invalid Surface Status")]
+ InvalidStatus,
+ #[error(transparent)]
+ Surface(wgpu_core::present::SurfaceError),
+}
+
pub struct WebGpuSurface(pub crate::Instance, pub wgpu_core::id::SurfaceId);
impl Resource for WebGpuSurface {
fn name(&self) -> Cow<str> {
@@ -41,7 +50,7 @@ pub struct SurfaceConfigureArgs {
pub fn op_webgpu_surface_configure(
state: &mut OpState,
#[serde] args: SurfaceConfigureArgs,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
@@ -75,18 +84,22 @@ pub fn op_webgpu_surface_get_current_texture(
state: &mut OpState,
#[smi] device_rid: ResourceId,
#[smi] surface_rid: ResourceId,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, SurfaceError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
- .get::<super::WebGpuDevice>(device_rid)?;
+ .get::<super::WebGpuDevice>(device_rid)
+ .map_err(SurfaceError::Resource)?;
let device = device_resource.1;
- let surface_resource =
- state.resource_table.get::<WebGpuSurface>(surface_rid)?;
+ let surface_resource = state
+ .resource_table
+ .get::<WebGpuSurface>(surface_rid)
+ .map_err(SurfaceError::Resource)?;
let surface = surface_resource.1;
let output =
- gfx_select!(device => instance.surface_get_current_texture(surface, None))?;
+ gfx_select!(device => instance.surface_get_current_texture(surface, None))
+ .map_err(SurfaceError::Surface)?;
match output.status {
SurfaceStatus::Good | SurfaceStatus::Suboptimal => {
@@ -98,7 +111,7 @@ pub fn op_webgpu_surface_get_current_texture(
});
Ok(WebGpuResult::rid(rid))
}
- _ => Err(AnyError::msg("Invalid Surface Status")),
+ _ => Err(SurfaceError::InvalidStatus),
}
}
@@ -107,17 +120,21 @@ pub fn op_webgpu_surface_present(
state: &mut OpState,
#[smi] device_rid: ResourceId,
#[smi] surface_rid: ResourceId,
-) -> Result<(), AnyError> {
+) -> Result<(), SurfaceError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
- .get::<super::WebGpuDevice>(device_rid)?;
+ .get::<super::WebGpuDevice>(device_rid)
+ .map_err(SurfaceError::Resource)?;
let device = device_resource.1;
- let surface_resource =
- state.resource_table.get::<WebGpuSurface>(surface_rid)?;
+ let surface_resource = state
+ .resource_table
+ .get::<WebGpuSurface>(surface_rid)
+ .map_err(SurfaceError::Resource)?;
let surface = surface_resource.1;
- let _ = gfx_select!(device => instance.surface_present(surface))?;
+ let _ = gfx_select!(device => instance.surface_present(surface))
+ .map_err(SurfaceError::Surface)?;
Ok(())
}
diff --git a/ext/webgpu/texture.rs b/ext/webgpu/texture.rs
index 44edd1a88..f8a5e05a3 100644
--- a/ext/webgpu/texture.rs
+++ b/ext/webgpu/texture.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::Resource;
@@ -62,7 +61,7 @@ pub struct CreateTextureArgs {
pub fn op_webgpu_create_texture(
state: &mut OpState,
#[serde] args: CreateTextureArgs,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let instance = state.borrow::<super::Instance>();
let device_resource = state
.resource_table
@@ -111,7 +110,7 @@ pub struct CreateTextureViewArgs {
pub fn op_webgpu_create_texture_view(
state: &mut OpState,
#[serde] args: CreateTextureViewArgs,
-) -> Result<WebGpuResult, AnyError> {
+) -> Result<WebGpuResult, deno_core::error::AnyError> {
let instance = state.borrow::<super::Instance>();
let texture_resource = state
.resource_table
diff --git a/ext/webidl/00_webidl.js b/ext/webidl/00_webidl.js
index 1d05aae5f..eb18cbcc3 100644
--- a/ext/webidl/00_webidl.js
+++ b/ext/webidl/00_webidl.js
@@ -26,6 +26,7 @@ const {
Float32Array,
Float64Array,
FunctionPrototypeBind,
+ FunctionPrototypeCall,
Int16Array,
Int32Array,
Int8Array,
@@ -77,6 +78,7 @@ const {
StringPrototypeToWellFormed,
Symbol,
SymbolIterator,
+ SymbolAsyncIterator,
SymbolToStringTag,
TypedArrayPrototypeGetBuffer,
TypedArrayPrototypeGetSymbolToStringTag,
@@ -920,6 +922,127 @@ function createSequenceConverter(converter) {
};
}
+function isAsyncIterable(obj) {
+ if (obj[SymbolAsyncIterator] === undefined) {
+ if (obj[SymbolIterator] === undefined) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+const AsyncIterable = Symbol("[[asyncIterable]]");
+
+function createAsyncIterableConverter(converter) {
+ return function (
+ V,
+ prefix = undefined,
+ context = undefined,
+ opts = { __proto__: null },
+ ) {
+ if (type(V) !== "Object") {
+ throw makeException(
+ TypeError,
+ "can not be converted to async iterable.",
+ prefix,
+ context,
+ );
+ }
+
+ let isAsync = true;
+ let method = V[SymbolAsyncIterator];
+ if (method === undefined) {
+ method = V[SymbolIterator];
+
+ if (method === undefined) {
+ throw makeException(
+ TypeError,
+ "is not iterable.",
+ prefix,
+ context,
+ );
+ }
+
+ isAsync = false;
+ }
+
+ return {
+ value: V,
+ [AsyncIterable]: AsyncIterable,
+ open(context) {
+ const iter = FunctionPrototypeCall(method, V);
+ if (type(iter) !== "Object") {
+ throw new TypeError(
+ `${context} could not be iterated because iterator method did not return object, but ${
+ type(iter)
+ }.`,
+ );
+ }
+
+ let asyncIterator = iter;
+
+ if (!isAsync) {
+ asyncIterator = {
+ // deno-lint-ignore require-await
+ async next() {
+ // deno-lint-ignore prefer-primordials
+ return iter.next();
+ },
+ };
+ }
+
+ return {
+ async next() {
+ // deno-lint-ignore prefer-primordials
+ const iterResult = await asyncIterator.next();
+ if (type(iterResult) !== "Object") {
+ throw TypeError(
+ `${context} failed to iterate next value because the next() method did not return an object, but ${
+ type(iterResult)
+ }.`,
+ );
+ }
+
+ if (iterResult.done) {
+ return { done: true };
+ }
+
+ const iterValue = converter(
+ iterResult.value,
+ `${context} failed to iterate next value`,
+ `The value returned from the next() method`,
+ opts,
+ );
+
+ return { done: false, value: iterValue };
+ },
+ async return(reason) {
+ if (asyncIterator.return === undefined) {
+ return undefined;
+ }
+
+ // deno-lint-ignore prefer-primordials
+ const returnPromiseResult = await asyncIterator.return(reason);
+ if (type(returnPromiseResult) !== "Object") {
+ throw TypeError(
+ `${context} failed to close iterator because the return() method did not return an object, but ${
+ type(returnPromiseResult)
+ }.`,
+ );
+ }
+
+ return undefined;
+ },
+ [SymbolAsyncIterator]() {
+ return this;
+ },
+ };
+ },
+ };
+ };
+}
+
function createRecordConverter(keyConverter, valueConverter) {
return (V, prefix, context, opts) => {
if (type(V) !== "Object") {
@@ -1302,9 +1425,11 @@ function setlike(obj, objPrototype, readonly) {
export {
assertBranded,
+ AsyncIterable,
brand,
configureInterface,
converters,
+ createAsyncIterableConverter,
createBranded,
createDictionaryConverter,
createEnumConverter,
@@ -1315,6 +1440,7 @@ export {
createSequenceConverter,
illegalConstructor,
invokeCallbackFunction,
+ isAsyncIterable,
makeException,
mixinPairIterable,
requiredArguments,
diff --git a/ext/webidl/Cargo.toml b/ext/webidl/Cargo.toml
index 8a25be366..8c3f6f612 100644
--- a/ext/webidl/Cargo.toml
+++ b/ext/webidl/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_webidl"
-version = "0.171.0"
+version = "0.177.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
diff --git a/ext/webidl/internal.d.ts b/ext/webidl/internal.d.ts
index 1ce45463e..375d548d3 100644
--- a/ext/webidl/internal.d.ts
+++ b/ext/webidl/internal.d.ts
@@ -439,6 +439,27 @@ declare module "ext:deno_webidl/00_webidl.js" {
) => T[];
/**
+ * Create a converter that converts an async iterable of the inner type.
+ */
+ function createAsyncIterableConverter<V, T>(
+ converter: (
+ v: V,
+ prefix?: string,
+ context?: string,
+ opts?: any,
+ ) => T,
+ ): (
+ v: any,
+ prefix?: string,
+ context?: string,
+ opts?: any,
+ ) => ConvertedAsyncIterable<V, T>;
+
+ interface ConvertedAsyncIterable<V, T> extends AsyncIterableIterator<T> {
+ value: V;
+ }
+
+ /**
* Create a converter that converts a Promise of the inner type.
*/
function createPromiseConverter<T>(
@@ -559,4 +580,9 @@ declare module "ext:deno_webidl/00_webidl.js" {
| "Symbol"
| "BigInt"
| "Object";
+
+ /**
+ * Check whether a value is an async iterable.
+ */
+ function isAsyncIterable(v: any): boolean;
}
diff --git a/ext/websocket/01_websocket.js b/ext/websocket/01_websocket.js
index 58f477310..468999b95 100644
--- a/ext/websocket/01_websocket.js
+++ b/ext/websocket/01_websocket.js
@@ -28,6 +28,7 @@ const {
ArrayPrototypePush,
ArrayPrototypeShift,
ArrayPrototypeSome,
+ Error,
ErrorPrototypeToString,
ObjectDefineProperties,
ObjectPrototypeIsPrototypeOf,
@@ -488,8 +489,11 @@ class WebSocket extends EventTarget {
/* error */
this[_readyState] = CLOSED;
+ const message = op_ws_get_error(rid);
+ const error = new Error(message);
const errorEv = new ErrorEvent("error", {
- message: op_ws_get_error(rid),
+ error,
+ message,
});
this.dispatchEvent(errorEv);
diff --git a/ext/websocket/Cargo.toml b/ext/websocket/Cargo.toml
index 6bef387b4..61f1f5959 100644
--- a/ext/websocket/Cargo.toml
+++ b/ext/websocket/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_websocket"
-version = "0.176.0"
+version = "0.182.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -28,4 +28,5 @@ hyper-util.workspace = true
once_cell.workspace = true
rustls-tokio-stream.workspace = true
serde.workspace = true
+thiserror.workspace = true
tokio.workspace = true
diff --git a/ext/websocket/lib.rs b/ext/websocket/lib.rs
index b8043516b..a5734271c 100644
--- a/ext/websocket/lib.rs
+++ b/ext/websocket/lib.rs
@@ -1,10 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use crate::stream::WebSocketStream;
use bytes::Bytes;
-use deno_core::anyhow::bail;
-use deno_core::error::invalid_hostname;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::futures::TryFutureExt;
use deno_core::op2;
use deno_core::unsync::spawn;
@@ -43,7 +39,6 @@ use serde::Serialize;
use std::borrow::Cow;
use std::cell::Cell;
use std::cell::RefCell;
-use std::fmt;
use std::future::Future;
use std::num::NonZeroUsize;
use std::path::PathBuf;
@@ -55,6 +50,7 @@ use tokio::io::ReadHalf;
use tokio::io::WriteHalf;
use tokio::net::TcpStream;
+use deno_permissions::PermissionCheckError;
use fastwebsockets::CloseCode;
use fastwebsockets::FragmentCollectorRead;
use fastwebsockets::Frame;
@@ -75,11 +71,33 @@ static USE_WRITEV: Lazy<bool> = Lazy::new(|| {
false
});
+#[derive(Debug, thiserror::Error)]
+pub enum WebsocketError {
+ #[error(transparent)]
+ Url(url::ParseError),
+ #[error(transparent)]
+ Permission(#[from] PermissionCheckError),
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ Uri(#[from] http::uri::InvalidUri),
+ #[error("{0}")]
+ Io(#[from] std::io::Error),
+ #[error(transparent)]
+ WebSocket(#[from] fastwebsockets::WebSocketError),
+ #[error("failed to connect to WebSocket: {0}")]
+ ConnectionFailed(#[from] HandshakeError),
+ #[error(transparent)]
+ Canceled(#[from] deno_core::Canceled),
+}
+
#[derive(Clone)]
pub struct WsRootStoreProvider(Option<Arc<dyn RootCertStoreProvider>>);
impl WsRootStoreProvider {
- pub fn get_or_try_init(&self) -> Result<Option<RootCertStore>, AnyError> {
+ pub fn get_or_try_init(
+ &self,
+ ) -> Result<Option<RootCertStore>, deno_core::error::AnyError> {
Ok(match &self.0 {
Some(provider) => Some(provider.get_or_try_init()?.clone()),
None => None,
@@ -95,7 +113,7 @@ pub trait WebSocketPermissions {
&mut self,
_url: &url::Url,
_api_name: &str,
- ) -> Result<(), AnyError>;
+ ) -> Result<(), PermissionCheckError>;
}
impl WebSocketPermissions for deno_permissions::PermissionsContainer {
@@ -104,7 +122,7 @@ impl WebSocketPermissions for deno_permissions::PermissionsContainer {
&mut self,
url: &url::Url,
api_name: &str,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), PermissionCheckError> {
deno_permissions::PermissionsContainer::check_net_url(self, url, api_name)
}
}
@@ -137,13 +155,14 @@ pub fn op_ws_check_permission_and_cancel_handle<WP>(
#[string] api_name: String,
#[string] url: String,
cancel_handle: bool,
-) -> Result<Option<ResourceId>, AnyError>
+) -> Result<Option<ResourceId>, WebsocketError>
where
WP: WebSocketPermissions + 'static,
{
- state
- .borrow_mut::<WP>()
- .check_net_url(&url::Url::parse(&url)?, &api_name)?;
+ state.borrow_mut::<WP>().check_net_url(
+ &url::Url::parse(&url).map_err(WebsocketError::Url)?,
+ &api_name,
+ )?;
if cancel_handle {
let rid = state
@@ -163,16 +182,46 @@ pub struct CreateResponse {
extensions: String,
}
+#[derive(Debug, thiserror::Error)]
+pub enum HandshakeError {
+ #[error("Missing path in url")]
+ MissingPath,
+ #[error("Invalid status code {0}")]
+ InvalidStatusCode(StatusCode),
+ #[error(transparent)]
+ Http(#[from] http::Error),
+ #[error(transparent)]
+ WebSocket(#[from] fastwebsockets::WebSocketError),
+ #[error("Didn't receive h2 alpn, aborting connection")]
+ NoH2Alpn,
+ #[error(transparent)]
+ Rustls(#[from] deno_tls::rustls::Error),
+ #[error(transparent)]
+ Io(#[from] std::io::Error),
+ #[error(transparent)]
+ H2(#[from] h2::Error),
+ #[error("Invalid hostname: '{0}'")]
+ InvalidHostname(String),
+ #[error(transparent)]
+ RootStoreError(deno_core::error::AnyError),
+ #[error(transparent)]
+ Tls(deno_tls::TlsError),
+ #[error(transparent)]
+ HeaderName(#[from] http::header::InvalidHeaderName),
+ #[error(transparent)]
+ HeaderValue(#[from] http::header::InvalidHeaderValue),
+}
+
async fn handshake_websocket(
state: &Rc<RefCell<OpState>>,
uri: &Uri,
protocols: &str,
headers: Option<Vec<(ByteString, ByteString)>>,
-) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), AnyError> {
+) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), HandshakeError> {
let mut request = Request::builder().method(Method::GET).uri(
uri
.path_and_query()
- .ok_or(type_error("Missing path in url".to_string()))?
+ .ok_or(HandshakeError::MissingPath)?
.as_str(),
);
@@ -194,7 +243,9 @@ async fn handshake_websocket(
request =
populate_common_request_headers(request, &user_agent, protocols, &headers)?;
- let request = request.body(http_body_util::Empty::new())?;
+ let request = request
+ .body(http_body_util::Empty::new())
+ .map_err(HandshakeError::Http)?;
let domain = &uri.host().unwrap().to_string();
let port = &uri.port_u16().unwrap_or(match uri.scheme_str() {
Some("wss") => 443,
@@ -231,7 +282,7 @@ async fn handshake_websocket(
async fn handshake_http1_ws(
request: Request<http_body_util::Empty<Bytes>>,
addr: &String,
-) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), AnyError> {
+) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), HandshakeError> {
let tcp_socket = TcpStream::connect(addr).await?;
handshake_connection(request, tcp_socket).await
}
@@ -241,11 +292,11 @@ async fn handshake_http1_wss(
request: Request<http_body_util::Empty<Bytes>>,
domain: &str,
addr: &str,
-) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), AnyError> {
+) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), HandshakeError> {
let tcp_socket = TcpStream::connect(addr).await?;
let tls_config = create_ws_client_config(state, SocketUse::Http1Only)?;
let dnsname = ServerName::try_from(domain.to_string())
- .map_err(|_| invalid_hostname(domain))?;
+ .map_err(|_| HandshakeError::InvalidHostname(domain.to_string()))?;
let mut tls_connector = TlsStream::new_client_side(
tcp_socket,
ClientConnection::new(tls_config.into(), dnsname)?,
@@ -266,11 +317,11 @@ async fn handshake_http2_wss(
domain: &str,
headers: &Option<Vec<(ByteString, ByteString)>>,
addr: &str,
-) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), AnyError> {
+) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), HandshakeError> {
let tcp_socket = TcpStream::connect(addr).await?;
let tls_config = create_ws_client_config(state, SocketUse::Http2Only)?;
let dnsname = ServerName::try_from(domain.to_string())
- .map_err(|_| invalid_hostname(domain))?;
+ .map_err(|_| HandshakeError::InvalidHostname(domain.to_string()))?;
// We need to better expose the underlying errors here
let mut tls_connector = TlsStream::new_client_side(
tcp_socket,
@@ -279,7 +330,7 @@ async fn handshake_http2_wss(
);
let handshake = tls_connector.handshake().await?;
if handshake.alpn.is_none() {
- bail!("Didn't receive h2 alpn, aborting connection");
+ return Err(HandshakeError::NoH2Alpn);
}
let h2 = h2::client::Builder::new();
let (mut send, conn) = h2.handshake::<_, Bytes>(tls_connector).await?;
@@ -298,7 +349,7 @@ async fn handshake_http2_wss(
let (resp, send) = send.send_request(request.body(())?, false)?;
let resp = resp.await?;
if resp.status() != StatusCode::OK {
- bail!("Invalid status code: {}", resp.status());
+ return Err(HandshakeError::InvalidStatusCode(resp.status()));
}
let (http::response::Parts { headers, .. }, recv) = resp.into_parts();
let mut stream = WebSocket::after_handshake(
@@ -317,7 +368,7 @@ async fn handshake_connection<
>(
request: Request<http_body_util::Empty<Bytes>>,
socket: S,
-) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), AnyError> {
+) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), HandshakeError> {
let (upgraded, response) =
fastwebsockets::handshake::client(&LocalExecutor, request, socket).await?;
@@ -332,7 +383,7 @@ async fn handshake_connection<
pub fn create_ws_client_config(
state: &Rc<RefCell<OpState>>,
socket_use: SocketUse,
-) -> Result<ClientConfig, AnyError> {
+) -> Result<ClientConfig, HandshakeError> {
let unsafely_ignore_certificate_errors: Option<Vec<String>> = state
.borrow()
.try_borrow::<UnsafelyIgnoreCertificateErrors>()
@@ -340,7 +391,8 @@ pub fn create_ws_client_config(
let root_cert_store = state
.borrow()
.borrow::<WsRootStoreProvider>()
- .get_or_try_init()?;
+ .get_or_try_init()
+ .map_err(HandshakeError::RootStoreError)?;
create_client_config(
root_cert_store,
@@ -349,7 +401,7 @@ pub fn create_ws_client_config(
TlsKeys::Null,
socket_use,
)
- .map_err(|e| e.into())
+ .map_err(HandshakeError::Tls)
}
/// Headers common to both http/1.1 and h2 requests.
@@ -358,7 +410,7 @@ fn populate_common_request_headers(
user_agent: &str,
protocols: &str,
headers: &Option<Vec<(ByteString, ByteString)>>,
-) -> Result<http::request::Builder, AnyError> {
+) -> Result<http::request::Builder, HandshakeError> {
request = request
.header("User-Agent", user_agent)
.header("Sec-WebSocket-Version", "13");
@@ -369,10 +421,8 @@ fn populate_common_request_headers(
if let Some(headers) = headers {
for (key, value) in headers {
- let name = HeaderName::from_bytes(key)
- .map_err(|err| type_error(err.to_string()))?;
- let v = HeaderValue::from_bytes(value)
- .map_err(|err| type_error(err.to_string()))?;
+ let name = HeaderName::from_bytes(key)?;
+ let v = HeaderValue::from_bytes(value)?;
let is_disallowed_header = matches!(
name,
@@ -402,14 +452,17 @@ pub async fn op_ws_create<WP>(
#[string] protocols: String,
#[smi] cancel_handle: Option<ResourceId>,
#[serde] headers: Option<Vec<(ByteString, ByteString)>>,
-) -> Result<CreateResponse, AnyError>
+) -> Result<CreateResponse, WebsocketError>
where
WP: WebSocketPermissions + 'static,
{
{
let mut s = state.borrow_mut();
s.borrow_mut::<WP>()
- .check_net_url(&url::Url::parse(&url)?, &api_name)
+ .check_net_url(
+ &url::Url::parse(&url).map_err(WebsocketError::Url)?,
+ &api_name,
+ )
.expect(
"Permission check should have been done in op_ws_check_permission",
);
@@ -419,7 +472,8 @@ where
let r = state
.borrow_mut()
.resource_table
- .get::<WsCancelResource>(cancel_rid)?;
+ .get::<WsCancelResource>(cancel_rid)
+ .map_err(WebsocketError::Resource)?;
Some(r.0.clone())
} else {
None
@@ -428,15 +482,11 @@ where
let uri: Uri = url.parse()?;
let handshake = handshake_websocket(&state, &uri, &protocols, headers)
- .map_err(|err| {
- AnyError::from(DomExceptionNetworkError::new(&format!(
- "failed to connect to WebSocket: {err}"
- )))
- });
+ .map_err(WebsocketError::ConnectionFailed);
let (stream, response) = match cancel_resource {
- Some(rc) => handshake.try_or_cancel(rc).await,
- None => handshake.await,
- }?;
+ Some(rc) => handshake.try_or_cancel(rc).await?,
+ None => handshake.await?,
+ };
if let Some(cancel_rid) = cancel_handle {
if let Ok(res) = state.borrow_mut().resource_table.take_any(cancel_rid) {
@@ -521,14 +571,12 @@ impl ServerWebSocket {
self: &Rc<Self>,
lock: AsyncMutFuture<WebSocketWrite<WriteHalf<WebSocketStream>>>,
frame: Frame<'_>,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), WebsocketError> {
let mut ws = lock.await;
if ws.is_closed() {
return Ok(());
}
- ws.write_frame(frame)
- .await
- .map_err(|err| type_error(err.to_string()))?;
+ ws.write_frame(frame).await?;
Ok(())
}
}
@@ -543,7 +591,7 @@ pub fn ws_create_server_stream(
state: &mut OpState,
transport: NetworkStream,
read_buf: Bytes,
-) -> Result<ResourceId, AnyError> {
+) -> ResourceId {
let mut ws = WebSocket::after_handshake(
WebSocketStream::new(
stream::WsStreamKind::Network(transport),
@@ -555,8 +603,7 @@ pub fn ws_create_server_stream(
ws.set_auto_close(true);
ws.set_auto_pong(true);
- let rid = state.resource_table.add(ServerWebSocket::new(ws));
- Ok(rid)
+ state.resource_table.add(ServerWebSocket::new(ws))
}
fn send_binary(state: &mut OpState, rid: ResourceId, data: &[u8]) {
@@ -626,11 +673,12 @@ pub async fn op_ws_send_binary_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[buffer] data: JsBuffer,
-) -> Result<(), AnyError> {
+) -> Result<(), WebsocketError> {
let resource = state
.borrow_mut()
.resource_table
- .get::<ServerWebSocket>(rid)?;
+ .get::<ServerWebSocket>(rid)
+ .map_err(WebsocketError::Resource)?;
let data = data.to_vec();
let lock = resource.reserve_lock();
resource
@@ -644,11 +692,12 @@ pub async fn op_ws_send_text_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[string] data: String,
-) -> Result<(), AnyError> {
+) -> Result<(), WebsocketError> {
let resource = state
.borrow_mut()
.resource_table
- .get::<ServerWebSocket>(rid)?;
+ .get::<ServerWebSocket>(rid)
+ .map_err(WebsocketError::Resource)?;
let lock = resource.reserve_lock();
resource
.write_frame(
@@ -678,11 +727,12 @@ pub fn op_ws_get_buffered_amount(
pub async fn op_ws_send_ping(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
+) -> Result<(), WebsocketError> {
let resource = state
.borrow_mut()
.resource_table
- .get::<ServerWebSocket>(rid)?;
+ .get::<ServerWebSocket>(rid)
+ .map_err(WebsocketError::Resource)?;
let lock = resource.reserve_lock();
resource
.write_frame(
@@ -698,7 +748,7 @@ pub async fn op_ws_close(
#[smi] rid: ResourceId,
#[smi] code: Option<u16>,
#[string] reason: Option<String>,
-) -> Result<(), AnyError> {
+) -> Result<(), WebsocketError> {
let Ok(resource) = state
.borrow_mut()
.resource_table
@@ -713,8 +763,7 @@ pub async fn op_ws_close(
resource.closed.set(true);
let lock = resource.reserve_lock();
- resource.write_frame(lock, frame).await?;
- Ok(())
+ resource.write_frame(lock, frame).await
}
#[op2]
@@ -868,32 +917,6 @@ pub fn get_declaration() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("lib.deno_websocket.d.ts")
}
-#[derive(Debug)]
-pub struct DomExceptionNetworkError {
- pub msg: String,
-}
-
-impl DomExceptionNetworkError {
- pub fn new(msg: &str) -> Self {
- DomExceptionNetworkError {
- msg: msg.to_string(),
- }
- }
-}
-
-impl fmt::Display for DomExceptionNetworkError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.pad(&self.msg)
- }
-}
-
-impl std::error::Error for DomExceptionNetworkError {}
-
-pub fn get_network_error_class_name(e: &AnyError) -> Option<&'static str> {
- e.downcast_ref::<DomExceptionNetworkError>()
- .map(|_| "DOMExceptionNetworkError")
-}
-
// Needed so hyper can use non Send futures
#[derive(Clone)]
struct LocalExecutor;
diff --git a/ext/webstorage/Cargo.toml b/ext/webstorage/Cargo.toml
index 08ed8b0f2..01e23ab83 100644
--- a/ext/webstorage/Cargo.toml
+++ b/ext/webstorage/Cargo.toml
@@ -2,7 +2,7 @@
[package]
name = "deno_webstorage"
-version = "0.166.0"
+version = "0.172.0"
authors.workspace = true
edition.workspace = true
license.workspace = true
@@ -17,3 +17,4 @@ path = "lib.rs"
deno_core.workspace = true
deno_web.workspace = true
rusqlite.workspace = true
+thiserror.workspace = true
diff --git a/ext/webstorage/lib.rs b/ext/webstorage/lib.rs
index 99e61a180..40946f05a 100644
--- a/ext/webstorage/lib.rs
+++ b/ext/webstorage/lib.rs
@@ -2,10 +2,8 @@
// NOTE to all: use **cached** prepared statements when interfacing with SQLite.
-use std::fmt;
use std::path::PathBuf;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use rusqlite::params;
@@ -14,6 +12,18 @@ use rusqlite::OptionalExtension;
pub use rusqlite;
+#[derive(Debug, thiserror::Error)]
+pub enum WebStorageError {
+ #[error("LocalStorage is not supported in this context.")]
+ ContextNotSupported,
+ #[error(transparent)]
+ Sqlite(#[from] rusqlite::Error),
+ #[error(transparent)]
+ Io(std::io::Error),
+ #[error("Exceeded maximum storage size")]
+ StorageExceeded,
+}
+
#[derive(Clone)]
struct OriginStorageDir(PathBuf);
@@ -51,15 +61,13 @@ struct SessionStorage(Connection);
fn get_webstorage(
state: &mut OpState,
persistent: bool,
-) -> Result<&Connection, AnyError> {
+) -> Result<&Connection, WebStorageError> {
let conn = if persistent {
if state.try_borrow::<LocalStorage>().is_none() {
- let path = state.try_borrow::<OriginStorageDir>().ok_or_else(|| {
- DomExceptionNotSupportedError::new(
- "LocalStorage is not supported in this context.",
- )
- })?;
- std::fs::create_dir_all(&path.0)?;
+ let path = state
+ .try_borrow::<OriginStorageDir>()
+ .ok_or(WebStorageError::ContextNotSupported)?;
+ std::fs::create_dir_all(&path.0).map_err(WebStorageError::Io)?;
let conn = Connection::open(path.0.join("local_storage"))?;
// Enable write-ahead-logging and tweak some other stuff.
let initial_pragmas = "
@@ -106,7 +114,7 @@ fn get_webstorage(
pub fn op_webstorage_length(
state: &mut OpState,
persistent: bool,
-) -> Result<u32, AnyError> {
+) -> Result<u32, WebStorageError> {
let conn = get_webstorage(state, persistent)?;
let mut stmt = conn.prepare_cached("SELECT COUNT(*) FROM data")?;
@@ -121,7 +129,7 @@ pub fn op_webstorage_key(
state: &mut OpState,
#[smi] index: u32,
persistent: bool,
-) -> Result<Option<String>, AnyError> {
+) -> Result<Option<String>, WebStorageError> {
let conn = get_webstorage(state, persistent)?;
let mut stmt =
@@ -135,14 +143,9 @@ pub fn op_webstorage_key(
}
#[inline]
-fn size_check(input: usize) -> Result<(), AnyError> {
+fn size_check(input: usize) -> Result<(), WebStorageError> {
if input >= MAX_STORAGE_BYTES {
- return Err(
- deno_web::DomExceptionQuotaExceededError::new(
- "Exceeded maximum storage size",
- )
- .into(),
- );
+ return Err(WebStorageError::StorageExceeded);
}
Ok(())
@@ -154,7 +157,7 @@ pub fn op_webstorage_set(
#[string] key: &str,
#[string] value: &str,
persistent: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), WebStorageError> {
let conn = get_webstorage(state, persistent)?;
size_check(key.len() + value.len())?;
@@ -178,7 +181,7 @@ pub fn op_webstorage_get(
state: &mut OpState,
#[string] key_name: String,
persistent: bool,
-) -> Result<Option<String>, AnyError> {
+) -> Result<Option<String>, WebStorageError> {
let conn = get_webstorage(state, persistent)?;
let mut stmt = conn.prepare_cached("SELECT value FROM data WHERE key = ?")?;
@@ -194,7 +197,7 @@ pub fn op_webstorage_remove(
state: &mut OpState,
#[string] key_name: &str,
persistent: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), WebStorageError> {
let conn = get_webstorage(state, persistent)?;
let mut stmt = conn.prepare_cached("DELETE FROM data WHERE key = ?")?;
@@ -207,7 +210,7 @@ pub fn op_webstorage_remove(
pub fn op_webstorage_clear(
state: &mut OpState,
persistent: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), WebStorageError> {
let conn = get_webstorage(state, persistent)?;
let mut stmt = conn.prepare_cached("DELETE FROM data")?;
@@ -221,7 +224,7 @@ pub fn op_webstorage_clear(
pub fn op_webstorage_iterate_keys(
state: &mut OpState,
persistent: bool,
-) -> Result<Vec<String>, AnyError> {
+) -> Result<Vec<String>, WebStorageError> {
let conn = get_webstorage(state, persistent)?;
let mut stmt = conn.prepare_cached("SELECT key FROM data")?;
@@ -232,31 +235,3 @@ pub fn op_webstorage_iterate_keys(
Ok(keys)
}
-
-#[derive(Debug)]
-pub struct DomExceptionNotSupportedError {
- pub msg: String,
-}
-
-impl DomExceptionNotSupportedError {
- pub fn new(msg: &str) -> Self {
- DomExceptionNotSupportedError {
- msg: msg.to_string(),
- }
- }
-}
-
-impl fmt::Display for DomExceptionNotSupportedError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.pad(&self.msg)
- }
-}
-
-impl std::error::Error for DomExceptionNotSupportedError {}
-
-pub fn get_not_supported_error_class_name(
- e: &AnyError,
-) -> Option<&'static str> {
- e.downcast_ref::<DomExceptionNotSupportedError>()
- .map(|_| "DOMExceptionNotSupportedError")
-}