summaryrefslogtreecommitdiff
path: root/ext/node/ops
diff options
context:
space:
mode:
authorhaturau <135221985+haturatu@users.noreply.github.com>2024-11-20 01:20:47 +0900
committerGitHub <noreply@github.com>2024-11-20 01:20:47 +0900
commit85719a67e59c7aa45bead26e4942d7df8b1b42d4 (patch)
treeface0aecaac53e93ce2f23b53c48859bcf1a36ec /ext/node/ops
parent67697bc2e4a62a9670699fd18ad0dd8efc5bd955 (diff)
parent186b52731c6bb326c4d32905c5e732d082e83465 (diff)
Merge branch 'denoland:main' into main
Diffstat (limited to 'ext/node/ops')
-rw-r--r--ext/node/ops/blocklist.rs53
-rw-r--r--ext/node/ops/crypto/cipher.rs145
-rw-r--r--ext/node/ops/crypto/digest.rs30
-rw-r--r--ext/node/ops/crypto/keys.rs683
-rw-r--r--ext/node/ops/crypto/mod.rs416
-rw-r--r--ext/node/ops/crypto/sign.rs168
-rw-r--r--ext/node/ops/crypto/x509.rs66
-rw-r--r--ext/node/ops/fs.rs61
-rw-r--r--ext/node/ops/http.rs91
-rw-r--r--ext/node/ops/http2.rs83
-rw-r--r--ext/node/ops/idna.rs47
-rw-r--r--ext/node/ops/inspector.rs161
-rw-r--r--ext/node/ops/ipc.rs59
-rw-r--r--ext/node/ops/mod.rs2
-rw-r--r--ext/node/ops/os/mod.rs194
-rw-r--r--ext/node/ops/os/priority.rs30
-rw-r--r--ext/node/ops/perf_hooks.rs135
-rw-r--r--ext/node/ops/process.rs3
-rw-r--r--ext/node/ops/require.rs200
-rw-r--r--ext/node/ops/util.rs3
-rw-r--r--ext/node/ops/v8.rs25
-rw-r--r--ext/node/ops/winerror.rs3
-rw-r--r--ext/node/ops/worker_threads.rs77
-rw-r--r--ext/node/ops/zlib/brotli.rs77
-rw-r--r--ext/node/ops/zlib/mod.rs86
-rw-r--r--ext/node/ops/zlib/mode.rs21
26 files changed, 1968 insertions, 951 deletions
diff --git a/ext/node/ops/blocklist.rs b/ext/node/ops/blocklist.rs
index 332cdda8f..6c64d68ec 100644
--- a/ext/node/ops/blocklist.rs
+++ b/ext/node/ops/blocklist.rs
@@ -7,9 +7,6 @@ use std::net::Ipv4Addr;
use std::net::Ipv6Addr;
use std::net::SocketAddr;
-use deno_core::anyhow::anyhow;
-use deno_core::anyhow::bail;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
@@ -27,13 +24,25 @@ impl deno_core::GarbageCollected for BlockListResource {}
#[derive(Serialize)]
struct SocketAddressSerialization(String, String);
+#[derive(Debug, thiserror::Error)]
+pub enum BlocklistError {
+ #[error("{0}")]
+ AddrParse(#[from] std::net::AddrParseError),
+ #[error("{0}")]
+ IpNetwork(#[from] ipnetwork::IpNetworkError),
+ #[error("Invalid address")]
+ InvalidAddress,
+ #[error("IP version mismatch between start and end addresses")]
+ IpVersionMismatch,
+}
+
#[op2(fast)]
pub fn op_socket_address_parse(
state: &mut OpState,
#[string] addr: &str,
#[smi] port: u16,
#[string] family: &str,
-) -> Result<bool, AnyError> {
+) -> Result<bool, BlocklistError> {
let ip = addr.parse::<IpAddr>()?;
let parsed: SocketAddr = SocketAddr::new(ip, port);
let parsed_ip_str = parsed.ip().to_string();
@@ -52,7 +61,7 @@ pub fn op_socket_address_parse(
Ok(false)
}
} else {
- Err(anyhow!("Invalid address"))
+ Err(BlocklistError::InvalidAddress)
}
}
@@ -60,8 +69,8 @@ pub fn op_socket_address_parse(
#[serde]
pub fn op_socket_address_get_serialization(
state: &mut OpState,
-) -> Result<SocketAddressSerialization, AnyError> {
- Ok(state.take::<SocketAddressSerialization>())
+) -> SocketAddressSerialization {
+ state.take::<SocketAddressSerialization>()
}
#[op2]
@@ -77,7 +86,7 @@ pub fn op_blocklist_new() -> BlockListResource {
pub fn op_blocklist_add_address(
#[cppgc] wrap: &BlockListResource,
#[string] addr: &str,
-) -> Result<(), AnyError> {
+) -> Result<(), BlocklistError> {
wrap.blocklist.borrow_mut().add_address(addr)
}
@@ -86,7 +95,7 @@ pub fn op_blocklist_add_range(
#[cppgc] wrap: &BlockListResource,
#[string] start: &str,
#[string] end: &str,
-) -> Result<bool, AnyError> {
+) -> Result<bool, BlocklistError> {
wrap.blocklist.borrow_mut().add_range(start, end)
}
@@ -95,7 +104,7 @@ pub fn op_blocklist_add_subnet(
#[cppgc] wrap: &BlockListResource,
#[string] addr: &str,
#[smi] prefix: u8,
-) -> Result<(), AnyError> {
+) -> Result<(), BlocklistError> {
wrap.blocklist.borrow_mut().add_subnet(addr, prefix)
}
@@ -104,7 +113,7 @@ pub fn op_blocklist_check(
#[cppgc] wrap: &BlockListResource,
#[string] addr: &str,
#[string] r#type: &str,
-) -> Result<bool, AnyError> {
+) -> Result<bool, BlocklistError> {
wrap.blocklist.borrow().check(addr, r#type)
}
@@ -123,7 +132,7 @@ impl BlockList {
&mut self,
addr: IpAddr,
prefix: Option<u8>,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), BlocklistError> {
match addr {
IpAddr::V4(addr) => {
let ipv4_prefix = prefix.unwrap_or(32);
@@ -154,7 +163,7 @@ impl BlockList {
Ok(())
}
- pub fn add_address(&mut self, address: &str) -> Result<(), AnyError> {
+ pub fn add_address(&mut self, address: &str) -> Result<(), BlocklistError> {
let ip: IpAddr = address.parse()?;
self.map_addr_add_network(ip, None)?;
Ok(())
@@ -164,7 +173,7 @@ impl BlockList {
&mut self,
start: &str,
end: &str,
- ) -> Result<bool, AnyError> {
+ ) -> Result<bool, BlocklistError> {
let start_ip: IpAddr = start.parse()?;
let end_ip: IpAddr = end.parse()?;
@@ -193,25 +202,33 @@ impl BlockList {
self.map_addr_add_network(IpAddr::V6(addr), None)?;
}
}
- _ => bail!("IP version mismatch between start and end addresses"),
+ _ => return Err(BlocklistError::IpVersionMismatch),
}
Ok(true)
}
- pub fn add_subnet(&mut self, addr: &str, prefix: u8) -> Result<(), AnyError> {
+ pub fn add_subnet(
+ &mut self,
+ addr: &str,
+ prefix: u8,
+ ) -> Result<(), BlocklistError> {
let ip: IpAddr = addr.parse()?;
self.map_addr_add_network(ip, Some(prefix))?;
Ok(())
}
- pub fn check(&self, addr: &str, r#type: &str) -> Result<bool, AnyError> {
+ pub fn check(
+ &self,
+ addr: &str,
+ r#type: &str,
+ ) -> Result<bool, BlocklistError> {
let addr: IpAddr = addr.parse()?;
let family = r#type.to_lowercase();
if family == "ipv4" && addr.is_ipv4() || family == "ipv6" && addr.is_ipv6()
{
Ok(self.rules.iter().any(|net| net.contains(addr)))
} else {
- Err(anyhow!("Invalid address"))
+ Err(BlocklistError::InvalidAddress)
}
}
}
diff --git a/ext/node/ops/crypto/cipher.rs b/ext/node/ops/crypto/cipher.rs
index b80aa33fe..ec45146b4 100644
--- a/ext/node/ops/crypto/cipher.rs
+++ b/ext/node/ops/crypto/cipher.rs
@@ -4,9 +4,6 @@ use aes::cipher::block_padding::Pkcs7;
use aes::cipher::BlockDecryptMut;
use aes::cipher::BlockEncryptMut;
use aes::cipher::KeyIvInit;
-use deno_core::error::range_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::Resource;
use digest::generic_array::GenericArray;
use digest::KeyInit;
@@ -50,8 +47,22 @@ pub struct DecipherContext {
decipher: Rc<RefCell<Decipher>>,
}
+#[derive(Debug, thiserror::Error)]
+pub enum CipherContextError {
+ #[error("Cipher context is already in use")]
+ ContextInUse,
+ #[error("{0}")]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ Cipher(#[from] CipherError),
+}
+
impl CipherContext {
- pub fn new(algorithm: &str, key: &[u8], iv: &[u8]) -> Result<Self, AnyError> {
+ pub fn new(
+ algorithm: &str,
+ key: &[u8],
+ iv: &[u8],
+ ) -> Result<Self, CipherContextError> {
Ok(Self {
cipher: Rc::new(RefCell::new(Cipher::new(algorithm, key, iv)?)),
})
@@ -74,16 +85,31 @@ impl CipherContext {
auto_pad: bool,
input: &[u8],
output: &mut [u8],
- ) -> Result<Tag, AnyError> {
+ ) -> Result<Tag, CipherContextError> {
Rc::try_unwrap(self.cipher)
- .map_err(|_| type_error("Cipher context is already in use"))?
+ .map_err(|_| CipherContextError::ContextInUse)?
.into_inner()
.r#final(auto_pad, input, output)
+ .map_err(Into::into)
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum DecipherContextError {
+ #[error("Decipher context is already in use")]
+ ContextInUse,
+ #[error("{0}")]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ Decipher(#[from] DecipherError),
+}
+
impl DecipherContext {
- pub fn new(algorithm: &str, key: &[u8], iv: &[u8]) -> Result<Self, AnyError> {
+ pub fn new(
+ algorithm: &str,
+ key: &[u8],
+ iv: &[u8],
+ ) -> Result<Self, DecipherContextError> {
Ok(Self {
decipher: Rc::new(RefCell::new(Decipher::new(algorithm, key, iv)?)),
})
@@ -103,11 +129,12 @@ impl DecipherContext {
input: &[u8],
output: &mut [u8],
auth_tag: &[u8],
- ) -> Result<(), AnyError> {
+ ) -> Result<(), DecipherContextError> {
Rc::try_unwrap(self.decipher)
- .map_err(|_| type_error("Decipher context is already in use"))?
+ .map_err(|_| DecipherContextError::ContextInUse)?
.into_inner()
.r#final(auto_pad, input, output, auth_tag)
+ .map_err(Into::into)
}
}
@@ -123,12 +150,26 @@ impl Resource for DecipherContext {
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum CipherError {
+ #[error("IV length must be 12 bytes")]
+ InvalidIvLength,
+ #[error("Invalid key length")]
+ InvalidKeyLength,
+ #[error("Invalid initialization vector")]
+ InvalidInitializationVector,
+ #[error("Cannot pad the input data")]
+ CannotPadInputData,
+ #[error("Unknown cipher {0}")]
+ UnknownCipher(String),
+}
+
impl Cipher {
fn new(
algorithm_name: &str,
key: &[u8],
iv: &[u8],
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, CipherError> {
use Cipher::*;
Ok(match algorithm_name {
"aes-128-cbc" => {
@@ -139,7 +180,7 @@ impl Cipher {
"aes-256-ecb" => Aes256Ecb(Box::new(ecb::Encryptor::new(key.into()))),
"aes-128-gcm" => {
if iv.len() != 12 {
- return Err(type_error("IV length must be 12 bytes"));
+ return Err(CipherError::InvalidIvLength);
}
let cipher =
@@ -149,7 +190,7 @@ impl Cipher {
}
"aes-256-gcm" => {
if iv.len() != 12 {
- return Err(type_error("IV length must be 12 bytes"));
+ return Err(CipherError::InvalidIvLength);
}
let cipher =
@@ -159,15 +200,15 @@ impl Cipher {
}
"aes256" | "aes-256-cbc" => {
if key.len() != 32 {
- return Err(range_error("Invalid key length"));
+ return Err(CipherError::InvalidKeyLength);
}
if iv.len() != 16 {
- return Err(type_error("Invalid initialization vector"));
+ return Err(CipherError::InvalidInitializationVector);
}
Aes256Cbc(Box::new(cbc::Encryptor::new(key.into(), iv.into())))
}
- _ => return Err(type_error(format!("Unknown cipher {algorithm_name}"))),
+ _ => return Err(CipherError::UnknownCipher(algorithm_name.to_string())),
})
}
@@ -235,14 +276,14 @@ impl Cipher {
auto_pad: bool,
input: &[u8],
output: &mut [u8],
- ) -> Result<Tag, AnyError> {
+ ) -> Result<Tag, CipherError> {
assert!(input.len() < 16);
use Cipher::*;
match (self, auto_pad) {
(Aes128Cbc(encryptor), true) => {
let _ = (*encryptor)
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot pad the input data"))?;
+ .map_err(|_| CipherError::CannotPadInputData)?;
Ok(None)
}
(Aes128Cbc(mut encryptor), false) => {
@@ -255,7 +296,7 @@ impl Cipher {
(Aes128Ecb(encryptor), true) => {
let _ = (*encryptor)
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot pad the input data"))?;
+ .map_err(|_| CipherError::CannotPadInputData)?;
Ok(None)
}
(Aes128Ecb(mut encryptor), false) => {
@@ -268,7 +309,7 @@ impl Cipher {
(Aes192Ecb(encryptor), true) => {
let _ = (*encryptor)
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot pad the input data"))?;
+ .map_err(|_| CipherError::CannotPadInputData)?;
Ok(None)
}
(Aes192Ecb(mut encryptor), false) => {
@@ -281,7 +322,7 @@ impl Cipher {
(Aes256Ecb(encryptor), true) => {
let _ = (*encryptor)
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot pad the input data"))?;
+ .map_err(|_| CipherError::CannotPadInputData)?;
Ok(None)
}
(Aes256Ecb(mut encryptor), false) => {
@@ -296,7 +337,7 @@ impl Cipher {
(Aes256Cbc(encryptor), true) => {
let _ = (*encryptor)
.encrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot pad the input data"))?;
+ .map_err(|_| CipherError::CannotPadInputData)?;
Ok(None)
}
(Aes256Cbc(mut encryptor), false) => {
@@ -319,12 +360,32 @@ impl Cipher {
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum DecipherError {
+ #[error("IV length must be 12 bytes")]
+ InvalidIvLength,
+ #[error("Invalid key length")]
+ InvalidKeyLength,
+ #[error("Invalid initialization vector")]
+ InvalidInitializationVector,
+ #[error("Cannot unpad the input data")]
+ CannotUnpadInputData,
+ #[error("Failed to authenticate data")]
+ DataAuthenticationFailed,
+ #[error("setAutoPadding(false) not supported for Aes128Gcm yet")]
+ SetAutoPaddingFalseAes128GcmUnsupported,
+ #[error("setAutoPadding(false) not supported for Aes256Gcm yet")]
+ SetAutoPaddingFalseAes256GcmUnsupported,
+ #[error("Unknown cipher {0}")]
+ UnknownCipher(String),
+}
+
impl Decipher {
fn new(
algorithm_name: &str,
key: &[u8],
iv: &[u8],
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, DecipherError> {
use Decipher::*;
Ok(match algorithm_name {
"aes-128-cbc" => {
@@ -335,7 +396,7 @@ impl Decipher {
"aes-256-ecb" => Aes256Ecb(Box::new(ecb::Decryptor::new(key.into()))),
"aes-128-gcm" => {
if iv.len() != 12 {
- return Err(type_error("IV length must be 12 bytes"));
+ return Err(DecipherError::InvalidIvLength);
}
let decipher =
@@ -345,7 +406,7 @@ impl Decipher {
}
"aes-256-gcm" => {
if iv.len() != 12 {
- return Err(type_error("IV length must be 12 bytes"));
+ return Err(DecipherError::InvalidIvLength);
}
let decipher =
@@ -355,15 +416,17 @@ impl Decipher {
}
"aes256" | "aes-256-cbc" => {
if key.len() != 32 {
- return Err(range_error("Invalid key length"));
+ return Err(DecipherError::InvalidKeyLength);
}
if iv.len() != 16 {
- return Err(type_error("Invalid initialization vector"));
+ return Err(DecipherError::InvalidInitializationVector);
}
Aes256Cbc(Box::new(cbc::Decryptor::new(key.into(), iv.into())))
}
- _ => return Err(type_error(format!("Unknown cipher {algorithm_name}"))),
+ _ => {
+ return Err(DecipherError::UnknownCipher(algorithm_name.to_string()))
+ }
})
}
@@ -432,14 +495,14 @@ impl Decipher {
input: &[u8],
output: &mut [u8],
auth_tag: &[u8],
- ) -> Result<(), AnyError> {
+ ) -> Result<(), DecipherError> {
use Decipher::*;
match (self, auto_pad) {
(Aes128Cbc(decryptor), true) => {
assert!(input.len() == 16);
let _ = (*decryptor)
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot unpad the input data"))?;
+ .map_err(|_| DecipherError::CannotUnpadInputData)?;
Ok(())
}
(Aes128Cbc(mut decryptor), false) => {
@@ -453,7 +516,7 @@ impl Decipher {
assert!(input.len() == 16);
let _ = (*decryptor)
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot unpad the input data"))?;
+ .map_err(|_| DecipherError::CannotUnpadInputData)?;
Ok(())
}
(Aes128Ecb(mut decryptor), false) => {
@@ -467,7 +530,7 @@ impl Decipher {
assert!(input.len() == 16);
let _ = (*decryptor)
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot unpad the input data"))?;
+ .map_err(|_| DecipherError::CannotUnpadInputData)?;
Ok(())
}
(Aes192Ecb(mut decryptor), false) => {
@@ -481,7 +544,7 @@ impl Decipher {
assert!(input.len() == 16);
let _ = (*decryptor)
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot unpad the input data"))?;
+ .map_err(|_| DecipherError::CannotUnpadInputData)?;
Ok(())
}
(Aes256Ecb(mut decryptor), false) => {
@@ -496,28 +559,28 @@ impl Decipher {
if tag.as_slice() == auth_tag {
Ok(())
} else {
- Err(type_error("Failed to authenticate data"))
+ Err(DecipherError::DataAuthenticationFailed)
}
}
- (Aes128Gcm(_), false) => Err(type_error(
- "setAutoPadding(false) not supported for Aes256Gcm yet",
- )),
+ (Aes128Gcm(_), false) => {
+ Err(DecipherError::SetAutoPaddingFalseAes128GcmUnsupported)
+ }
(Aes256Gcm(decipher), true) => {
let tag = decipher.finish();
if tag.as_slice() == auth_tag {
Ok(())
} else {
- Err(type_error("Failed to authenticate data"))
+ Err(DecipherError::DataAuthenticationFailed)
}
}
- (Aes256Gcm(_), false) => Err(type_error(
- "setAutoPadding(false) not supported for Aes256Gcm yet",
- )),
+ (Aes256Gcm(_), false) => {
+ Err(DecipherError::SetAutoPaddingFalseAes256GcmUnsupported)
+ }
(Aes256Cbc(decryptor), true) => {
assert!(input.len() == 16);
let _ = (*decryptor)
.decrypt_padded_b2b_mut::<Pkcs7>(input, output)
- .map_err(|_| type_error("Cannot unpad the input data"))?;
+ .map_err(|_| DecipherError::CannotUnpadInputData)?;
Ok(())
}
(Aes256Cbc(mut decryptor), false) => {
diff --git a/ext/node/ops/crypto/digest.rs b/ext/node/ops/crypto/digest.rs
index 293e8e063..a7d8fb51f 100644
--- a/ext/node/ops/crypto/digest.rs
+++ b/ext/node/ops/crypto/digest.rs
@@ -1,6 +1,4 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::generic_error;
-use deno_core::error::AnyError;
use deno_core::GarbageCollected;
use digest::Digest;
use digest::DynDigest;
@@ -19,7 +17,7 @@ impl Hasher {
pub fn new(
algorithm: &str,
output_length: Option<usize>,
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, HashError> {
let hash = Hash::new(algorithm, output_length)?;
Ok(Self {
@@ -44,7 +42,7 @@ impl Hasher {
pub fn clone_inner(
&self,
output_length: Option<usize>,
- ) -> Result<Option<Self>, AnyError> {
+ ) -> Result<Option<Self>, HashError> {
let hash = self.hash.borrow();
let Some(hash) = hash.as_ref() else {
return Ok(None);
@@ -184,11 +182,19 @@ pub enum Hash {
use Hash::*;
+#[derive(Debug, thiserror::Error)]
+pub enum HashError {
+ #[error("Output length mismatch for non-extendable algorithm")]
+ OutputLengthMismatch,
+ #[error("Digest method not supported: {0}")]
+ DigestMethodUnsupported(String),
+}
+
impl Hash {
pub fn new(
algorithm_name: &str,
output_length: Option<usize>,
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, HashError> {
match algorithm_name {
"shake128" => return Ok(Shake128(Default::default(), output_length)),
"shake256" => return Ok(Shake256(Default::default(), output_length)),
@@ -201,17 +207,13 @@ impl Hash {
let digest: D = Digest::new();
if let Some(length) = output_length {
if length != digest.output_size() {
- return Err(generic_error(
- "Output length mismatch for non-extendable algorithm",
- ));
+ return Err(HashError::OutputLengthMismatch);
}
}
FixedSize(Box::new(digest))
},
_ => {
- return Err(generic_error(format!(
- "Digest method not supported: {algorithm_name}"
- )))
+ return Err(HashError::DigestMethodUnsupported(algorithm_name.to_string()))
}
);
@@ -243,14 +245,12 @@ impl Hash {
pub fn clone_hash(
&self,
output_length: Option<usize>,
- ) -> Result<Self, AnyError> {
+ ) -> Result<Self, HashError> {
let hash = match self {
FixedSize(context) => {
if let Some(length) = output_length {
if length != context.output_size() {
- return Err(generic_error(
- "Output length mismatch for non-extendable algorithm",
- ));
+ return Err(HashError::OutputLengthMismatch);
}
}
FixedSize(context.box_clone())
diff --git a/ext/node/ops/crypto/keys.rs b/ext/node/ops/crypto/keys.rs
index 867b34e04..f164972d4 100644
--- a/ext/node/ops/crypto/keys.rs
+++ b/ext/node/ops/crypto/keys.rs
@@ -4,9 +4,7 @@ use std::borrow::Cow;
use std::cell::RefCell;
use base64::Engine;
-use deno_core::error::generic_error;
use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::serde_v8::BigInt as V8BigInt;
use deno_core::unsync::spawn_blocking;
@@ -46,6 +44,7 @@ use spki::der::Reader as _;
use spki::DecodePublicKey as _;
use spki::EncodePublicKey as _;
use spki::SubjectPublicKeyInfoRef;
+use x509_parser::error::X509Error;
use x509_parser::x509;
use super::dh;
@@ -236,9 +235,11 @@ impl RsaPssPrivateKey {
}
impl EcPublicKey {
- pub fn to_jwk(&self) -> Result<elliptic_curve::JwkEcKey, AnyError> {
+ pub fn to_jwk(&self) -> Result<JwkEcKey, AsymmetricPublicKeyJwkError> {
match self {
- EcPublicKey::P224(_) => Err(type_error("Unsupported JWK EC curve: P224")),
+ EcPublicKey::P224(_) => {
+ Err(AsymmetricPublicKeyJwkError::UnsupportedJwkEcCurveP224)
+ }
EcPublicKey::P256(key) => Ok(key.to_jwk()),
EcPublicKey::P384(key) => Ok(key.to_jwk()),
}
@@ -363,49 +364,201 @@ impl<'a> TryFrom<rsa::pkcs8::der::asn1::AnyRef<'a>> for RsaPssParameters<'a> {
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum X509PublicKeyError {
+ #[error(transparent)]
+ X509(#[from] x509_parser::error::X509Error),
+ #[error(transparent)]
+ Rsa(#[from] rsa::Error),
+ #[error(transparent)]
+ Asn1(#[from] x509_parser::der_parser::asn1_rs::Error),
+ #[error(transparent)]
+ Ec(#[from] elliptic_curve::Error),
+ #[error("unsupported ec named curve")]
+ UnsupportedEcNamedCurve,
+ #[error("missing ec parameters")]
+ MissingEcParameters,
+ #[error("malformed DSS public key")]
+ MalformedDssPublicKey,
+ #[error("unsupported x509 public key type")]
+ UnsupportedX509KeyType,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum RsaJwkError {
+ #[error(transparent)]
+ Base64(#[from] base64::DecodeError),
+ #[error(transparent)]
+ Rsa(#[from] rsa::Error),
+ #[error("missing RSA private component")]
+ MissingRsaPrivateComponent,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum EcJwkError {
+ #[error(transparent)]
+ Ec(#[from] elliptic_curve::Error),
+ #[error("unsupported curve: {0}")]
+ UnsupportedCurve(String),
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum EdRawError {
+ #[error(transparent)]
+ Ed25519Signature(#[from] ed25519_dalek::SignatureError),
+ #[error("invalid Ed25519 key")]
+ InvalidEd25519Key,
+ #[error("unsupported curve")]
+ UnsupportedCurve,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum AsymmetricPrivateKeyError {
+ #[error("invalid PEM private key: not valid utf8 starting at byte {0}")]
+ InvalidPemPrivateKeyInvalidUtf8(usize),
+ #[error("invalid encrypted PEM private key")]
+ InvalidEncryptedPemPrivateKey,
+ #[error("invalid PEM private key")]
+ InvalidPemPrivateKey,
+ #[error("encrypted private key requires a passphrase to decrypt")]
+ EncryptedPrivateKeyRequiresPassphraseToDecrypt,
+ #[error("invalid PKCS#1 private key")]
+ InvalidPkcs1PrivateKey,
+ #[error("invalid SEC1 private key")]
+ InvalidSec1PrivateKey,
+ #[error("unsupported PEM label: {0}")]
+ UnsupportedPemLabel(String),
+ #[error(transparent)]
+ RsaPssParamsParse(#[from] RsaPssParamsParseError),
+ #[error("invalid encrypted PKCS#8 private key")]
+ InvalidEncryptedPkcs8PrivateKey,
+ #[error("invalid PKCS#8 private key")]
+ InvalidPkcs8PrivateKey,
+ #[error("PKCS#1 private key does not support encryption with passphrase")]
+ Pkcs1PrivateKeyDoesNotSupportEncryptionWithPassphrase,
+ #[error("SEC1 private key does not support encryption with passphrase")]
+ Sec1PrivateKeyDoesNotSupportEncryptionWithPassphrase,
+ #[error("unsupported ec named curve")]
+ UnsupportedEcNamedCurve,
+ #[error("invalid private key")]
+ InvalidPrivateKey,
+ #[error("invalid DSA private key")]
+ InvalidDsaPrivateKey,
+ #[error("malformed or missing named curve in ec parameters")]
+ MalformedOrMissingNamedCurveInEcParameters,
+ #[error("unsupported key type: {0}")]
+ UnsupportedKeyType(String),
+ #[error("unsupported key format: {0}")]
+ UnsupportedKeyFormat(String),
+ #[error("invalid x25519 private key")]
+ InvalidX25519PrivateKey,
+ #[error("x25519 private key is the wrong length")]
+ X25519PrivateKeyIsWrongLength,
+ #[error("invalid Ed25519 private key")]
+ InvalidEd25519PrivateKey,
+ #[error("missing dh parameters")]
+ MissingDhParameters,
+ #[error("unsupported private key oid")]
+ UnsupportedPrivateKeyOid,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum AsymmetricPublicKeyError {
+ #[error("invalid PEM private key: not valid utf8 starting at byte {0}")]
+ InvalidPemPrivateKeyInvalidUtf8(usize),
+ #[error("invalid PEM public key")]
+ InvalidPemPublicKey,
+ #[error("invalid PKCS#1 public key")]
+ InvalidPkcs1PublicKey,
+ #[error(transparent)]
+ AsymmetricPrivateKey(#[from] AsymmetricPrivateKeyError),
+ #[error("invalid x509 certificate")]
+ InvalidX509Certificate,
+ #[error(transparent)]
+ X509(#[from] x509_parser::nom::Err<X509Error>),
+ #[error(transparent)]
+ X509PublicKey(#[from] X509PublicKeyError),
+ #[error("unsupported PEM label: {0}")]
+ UnsupportedPemLabel(String),
+ #[error("invalid SPKI public key")]
+ InvalidSpkiPublicKey,
+ #[error("unsupported key type: {0}")]
+ UnsupportedKeyType(String),
+ #[error("unsupported key format: {0}")]
+ UnsupportedKeyFormat(String),
+ #[error(transparent)]
+ Spki(#[from] spki::Error),
+ #[error(transparent)]
+ Pkcs1(#[from] rsa::pkcs1::Error),
+ #[error(transparent)]
+ RsaPssParamsParse(#[from] RsaPssParamsParseError),
+ #[error("malformed DSS public key")]
+ MalformedDssPublicKey,
+ #[error("malformed or missing named curve in ec parameters")]
+ MalformedOrMissingNamedCurveInEcParameters,
+ #[error("malformed or missing public key in ec spki")]
+ MalformedOrMissingPublicKeyInEcSpki,
+ #[error(transparent)]
+ Ec(#[from] elliptic_curve::Error),
+ #[error("unsupported ec named curve")]
+ UnsupportedEcNamedCurve,
+ #[error("malformed or missing public key in x25519 spki")]
+ MalformedOrMissingPublicKeyInX25519Spki,
+ #[error("x25519 public key is too short")]
+ X25519PublicKeyIsTooShort,
+ #[error("invalid Ed25519 public key")]
+ InvalidEd25519PublicKey,
+ #[error("missing dh parameters")]
+ MissingDhParameters,
+ #[error("malformed dh parameters")]
+ MalformedDhParameters,
+ #[error("malformed or missing public key in dh spki")]
+ MalformedOrMissingPublicKeyInDhSpki,
+ #[error("unsupported private key oid")]
+ UnsupportedPrivateKeyOid,
+}
+
impl KeyObjectHandle {
pub fn new_asymmetric_private_key_from_js(
key: &[u8],
format: &str,
typ: &str,
passphrase: Option<&[u8]>,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, AsymmetricPrivateKeyError> {
let document = match format {
"pem" => {
let pem = std::str::from_utf8(key).map_err(|err| {
- type_error(format!(
- "invalid PEM private key: not valid utf8 starting at byte {}",
- err.valid_up_to()
- ))
+ AsymmetricPrivateKeyError::InvalidPemPrivateKeyInvalidUtf8(
+ err.valid_up_to(),
+ )
})?;
if let Some(passphrase) = passphrase {
- SecretDocument::from_pkcs8_encrypted_pem(pem, passphrase)
- .map_err(|_| type_error("invalid encrypted PEM private key"))?
+ SecretDocument::from_pkcs8_encrypted_pem(pem, passphrase).map_err(
+ |_| AsymmetricPrivateKeyError::InvalidEncryptedPemPrivateKey,
+ )?
} else {
let (label, doc) = SecretDocument::from_pem(pem)
- .map_err(|_| type_error("invalid PEM private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPemPrivateKey)?;
match label {
EncryptedPrivateKeyInfo::PEM_LABEL => {
- return Err(type_error(
- "encrypted private key requires a passphrase to decrypt",
- ))
+ return Err(AsymmetricPrivateKeyError::EncryptedPrivateKeyRequiresPassphraseToDecrypt);
}
PrivateKeyInfo::PEM_LABEL => doc,
rsa::pkcs1::RsaPrivateKey::PEM_LABEL => {
- SecretDocument::from_pkcs1_der(doc.as_bytes())
- .map_err(|_| type_error("invalid PKCS#1 private key"))?
+ SecretDocument::from_pkcs1_der(doc.as_bytes()).map_err(|_| {
+ AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey
+ })?
}
sec1::EcPrivateKey::PEM_LABEL => {
SecretDocument::from_sec1_der(doc.as_bytes())
- .map_err(|_| type_error("invalid SEC1 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?
}
_ => {
- return Err(type_error(format!(
- "unsupported PEM label: {}",
- label
- )))
+ return Err(AsymmetricPrivateKeyError::UnsupportedPemLabel(
+ label.to_string(),
+ ))
}
}
}
@@ -413,54 +566,57 @@ impl KeyObjectHandle {
"der" => match typ {
"pkcs8" => {
if let Some(passphrase) = passphrase {
- SecretDocument::from_pkcs8_encrypted_der(key, passphrase)
- .map_err(|_| type_error("invalid encrypted PKCS#8 private key"))?
+ SecretDocument::from_pkcs8_encrypted_der(key, passphrase).map_err(
+ |_| AsymmetricPrivateKeyError::InvalidEncryptedPkcs8PrivateKey,
+ )?
} else {
SecretDocument::from_pkcs8_der(key)
- .map_err(|_| type_error("invalid PKCS#8 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs8PrivateKey)?
}
}
"pkcs1" => {
if passphrase.is_some() {
- return Err(type_error(
- "PKCS#1 private key does not support encryption with passphrase",
- ));
+ return Err(AsymmetricPrivateKeyError::Pkcs1PrivateKeyDoesNotSupportEncryptionWithPassphrase);
}
SecretDocument::from_pkcs1_der(key)
- .map_err(|_| type_error("invalid PKCS#1 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey)?
}
"sec1" => {
if passphrase.is_some() {
- return Err(type_error(
- "SEC1 private key does not support encryption with passphrase",
- ));
+ return Err(AsymmetricPrivateKeyError::Sec1PrivateKeyDoesNotSupportEncryptionWithPassphrase);
}
SecretDocument::from_sec1_der(key)
- .map_err(|_| type_error("invalid SEC1 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?
+ }
+ _ => {
+ return Err(AsymmetricPrivateKeyError::UnsupportedKeyType(
+ typ.to_string(),
+ ))
}
- _ => return Err(type_error(format!("unsupported key type: {}", typ))),
},
_ => {
- return Err(type_error(format!("unsupported key format: {}", format)))
+ return Err(AsymmetricPrivateKeyError::UnsupportedKeyFormat(
+ format.to_string(),
+ ))
}
};
let pk_info = PrivateKeyInfo::try_from(document.as_bytes())
- .map_err(|_| type_error("invalid private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPrivateKey)?;
let alg = pk_info.algorithm.oid;
let private_key = match alg {
RSA_ENCRYPTION_OID => {
let private_key =
rsa::RsaPrivateKey::from_pkcs1_der(pk_info.private_key)
- .map_err(|_| type_error("invalid PKCS#1 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey)?;
AsymmetricPrivateKey::Rsa(private_key)
}
RSASSA_PSS_OID => {
let details = parse_rsa_pss_params(pk_info.algorithm.parameters)?;
let private_key =
rsa::RsaPrivateKey::from_pkcs1_der(pk_info.private_key)
- .map_err(|_| type_error("invalid PKCS#1 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidPkcs1PrivateKey)?;
AsymmetricPrivateKey::RsaPss(RsaPssPrivateKey {
key: private_key,
details,
@@ -468,40 +624,43 @@ impl KeyObjectHandle {
}
DSA_OID => {
let private_key = dsa::SigningKey::try_from(pk_info)
- .map_err(|_| type_error("invalid DSA private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidDsaPrivateKey)?;
AsymmetricPrivateKey::Dsa(private_key)
}
EC_OID => {
let named_curve = pk_info.algorithm.parameters_oid().map_err(|_| {
- type_error("malformed or missing named curve in ec parameters")
+ AsymmetricPrivateKeyError::MalformedOrMissingNamedCurveInEcParameters
})?;
match named_curve {
ID_SECP224R1_OID => {
- let secret_key =
- p224::SecretKey::from_sec1_der(pk_info.private_key)
- .map_err(|_| type_error("invalid SEC1 private key"))?;
+ let secret_key = p224::SecretKey::from_sec1_der(
+ pk_info.private_key,
+ )
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?;
AsymmetricPrivateKey::Ec(EcPrivateKey::P224(secret_key))
}
ID_SECP256R1_OID => {
- let secret_key =
- p256::SecretKey::from_sec1_der(pk_info.private_key)
- .map_err(|_| type_error("invalid SEC1 private key"))?;
+ let secret_key = p256::SecretKey::from_sec1_der(
+ pk_info.private_key,
+ )
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?;
AsymmetricPrivateKey::Ec(EcPrivateKey::P256(secret_key))
}
ID_SECP384R1_OID => {
- let secret_key =
- p384::SecretKey::from_sec1_der(pk_info.private_key)
- .map_err(|_| type_error("invalid SEC1 private key"))?;
+ let secret_key = p384::SecretKey::from_sec1_der(
+ pk_info.private_key,
+ )
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidSec1PrivateKey)?;
AsymmetricPrivateKey::Ec(EcPrivateKey::P384(secret_key))
}
- _ => return Err(type_error("unsupported ec named curve")),
+ _ => return Err(AsymmetricPrivateKeyError::UnsupportedEcNamedCurve),
}
}
X25519_OID => {
let string_ref = OctetStringRef::from_der(pk_info.private_key)
- .map_err(|_| type_error("invalid x25519 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidX25519PrivateKey)?;
if string_ref.as_bytes().len() != 32 {
- return Err(type_error("x25519 private key is the wrong length"));
+ return Err(AsymmetricPrivateKeyError::X25519PrivateKeyIsWrongLength);
}
let mut bytes = [0; 32];
bytes.copy_from_slice(string_ref.as_bytes());
@@ -509,22 +668,22 @@ impl KeyObjectHandle {
}
ED25519_OID => {
let signing_key = ed25519_dalek::SigningKey::try_from(pk_info)
- .map_err(|_| type_error("invalid Ed25519 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::InvalidEd25519PrivateKey)?;
AsymmetricPrivateKey::Ed25519(signing_key)
}
DH_KEY_AGREEMENT_OID => {
let params = pk_info
.algorithm
.parameters
- .ok_or_else(|| type_error("missing dh parameters"))?;
+ .ok_or(AsymmetricPrivateKeyError::MissingDhParameters)?;
let params = pkcs3::DhParameter::from_der(&params.to_der().unwrap())
- .map_err(|_| type_error("malformed dh parameters"))?;
+ .map_err(|_| AsymmetricPrivateKeyError::MissingDhParameters)?;
AsymmetricPrivateKey::Dh(DhPrivateKey {
key: dh::PrivateKey::from_bytes(pk_info.private_key),
params,
})
}
- _ => return Err(type_error("unsupported private key oid")),
+ _ => return Err(AsymmetricPrivateKeyError::UnsupportedPrivateKeyOid),
};
Ok(KeyObjectHandle::AsymmetricPrivate(private_key))
@@ -532,7 +691,7 @@ impl KeyObjectHandle {
pub fn new_x509_public_key(
spki: &x509::SubjectPublicKeyInfo,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, X509PublicKeyError> {
use x509_parser::der_parser::asn1_rs::oid;
use x509_parser::public_key::PublicKey;
@@ -565,18 +724,18 @@ impl KeyObjectHandle {
let public_key = p384::PublicKey::from_sec1_bytes(data)?;
AsymmetricPublicKey::Ec(EcPublicKey::P384(public_key))
}
- _ => return Err(type_error("unsupported ec named curve")),
+ _ => return Err(X509PublicKeyError::UnsupportedEcNamedCurve),
}
} else {
- return Err(type_error("missing ec parameters"));
+ return Err(X509PublicKeyError::MissingEcParameters);
}
}
PublicKey::DSA(_) => {
let verifying_key = dsa::VerifyingKey::from_public_key_der(spki.raw)
- .map_err(|_| type_error("malformed DSS public key"))?;
+ .map_err(|_| X509PublicKeyError::MalformedDssPublicKey)?;
AsymmetricPublicKey::Dsa(verifying_key)
}
- _ => return Err(type_error("unsupported x509 public key type")),
+ _ => return Err(X509PublicKeyError::UnsupportedX509KeyType),
};
Ok(KeyObjectHandle::AsymmetricPublic(key))
@@ -585,7 +744,7 @@ impl KeyObjectHandle {
pub fn new_rsa_jwk(
jwk: RsaJwkKey,
is_public: bool,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, RsaJwkError> {
use base64::prelude::BASE64_URL_SAFE_NO_PAD;
let n = BASE64_URL_SAFE_NO_PAD.decode(jwk.n.as_bytes())?;
@@ -604,19 +763,19 @@ impl KeyObjectHandle {
let d = BASE64_URL_SAFE_NO_PAD.decode(
jwk
.d
- .ok_or_else(|| type_error("missing RSA private component"))?
+ .ok_or(RsaJwkError::MissingRsaPrivateComponent)?
.as_bytes(),
)?;
let p = BASE64_URL_SAFE_NO_PAD.decode(
jwk
.p
- .ok_or_else(|| type_error("missing RSA private component"))?
+ .ok_or(RsaJwkError::MissingRsaPrivateComponent)?
.as_bytes(),
)?;
let q = BASE64_URL_SAFE_NO_PAD.decode(
jwk
.q
- .ok_or_else(|| type_error("missing RSA private component"))?
+ .ok_or(RsaJwkError::MissingRsaPrivateComponent)?
.as_bytes(),
)?;
@@ -640,7 +799,7 @@ impl KeyObjectHandle {
pub fn new_ec_jwk(
jwk: &JwkEcKey,
is_public: bool,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, EcJwkError> {
// https://datatracker.ietf.org/doc/html/rfc7518#section-6.2.1.1
let handle = match jwk.crv() {
"P-256" if is_public => {
@@ -660,7 +819,7 @@ impl KeyObjectHandle {
EcPrivateKey::P384(p384::SecretKey::from_jwk(jwk)?),
)),
_ => {
- return Err(type_error(format!("unsupported curve: {}", jwk.crv())));
+ return Err(EcJwkError::UnsupportedCurve(jwk.crv().to_string()));
}
};
@@ -671,12 +830,11 @@ impl KeyObjectHandle {
curve: &str,
data: &[u8],
is_public: bool,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, EdRawError> {
match curve {
"Ed25519" => {
- let data = data
- .try_into()
- .map_err(|_| type_error("invalid Ed25519 key"))?;
+ let data =
+ data.try_into().map_err(|_| EdRawError::InvalidEd25519Key)?;
if !is_public {
Ok(KeyObjectHandle::AsymmetricPrivate(
AsymmetricPrivateKey::Ed25519(
@@ -692,9 +850,8 @@ impl KeyObjectHandle {
}
}
"X25519" => {
- let data: [u8; 32] = data
- .try_into()
- .map_err(|_| type_error("invalid x25519 key"))?;
+ let data: [u8; 32] =
+ data.try_into().map_err(|_| EdRawError::InvalidEd25519Key)?;
if !is_public {
Ok(KeyObjectHandle::AsymmetricPrivate(
AsymmetricPrivateKey::X25519(x25519_dalek::StaticSecret::from(
@@ -707,7 +864,7 @@ impl KeyObjectHandle {
))
}
}
- _ => Err(type_error("unsupported curve")),
+ _ => Err(EdRawError::UnsupportedCurve),
}
}
@@ -716,24 +873,23 @@ impl KeyObjectHandle {
format: &str,
typ: &str,
passphrase: Option<&[u8]>,
- ) -> Result<KeyObjectHandle, AnyError> {
+ ) -> Result<KeyObjectHandle, AsymmetricPublicKeyError> {
let document = match format {
"pem" => {
let pem = std::str::from_utf8(key).map_err(|err| {
- type_error(format!(
- "invalid PEM public key: not valid utf8 starting at byte {}",
- err.valid_up_to()
- ))
+ AsymmetricPublicKeyError::InvalidPemPrivateKeyInvalidUtf8(
+ err.valid_up_to(),
+ )
})?;
let (label, document) = Document::from_pem(pem)
- .map_err(|_| type_error("invalid PEM public key"))?;
+ .map_err(|_| AsymmetricPublicKeyError::InvalidPemPublicKey)?;
match label {
SubjectPublicKeyInfoRef::PEM_LABEL => document,
rsa::pkcs1::RsaPublicKey::PEM_LABEL => {
Document::from_pkcs1_der(document.as_bytes())
- .map_err(|_| type_error("invalid PKCS#1 public key"))?
+ .map_err(|_| AsymmetricPublicKeyError::InvalidPkcs1PublicKey)?
}
EncryptedPrivateKeyInfo::PEM_LABEL
| PrivateKeyInfo::PEM_LABEL
@@ -754,27 +910,36 @@ impl KeyObjectHandle {
}
"CERTIFICATE" => {
let (_, pem) = x509_parser::pem::parse_x509_pem(pem.as_bytes())
- .map_err(|_| type_error("invalid x509 certificate"))?;
+ .map_err(|_| AsymmetricPublicKeyError::InvalidX509Certificate)?;
let cert = pem.parse_x509()?;
let public_key = cert.tbs_certificate.subject_pki;
- return KeyObjectHandle::new_x509_public_key(&public_key);
+ return KeyObjectHandle::new_x509_public_key(&public_key)
+ .map_err(Into::into);
}
_ => {
- return Err(type_error(format!("unsupported PEM label: {}", label)))
+ return Err(AsymmetricPublicKeyError::UnsupportedPemLabel(
+ label.to_string(),
+ ))
}
}
}
"der" => match typ {
"pkcs1" => Document::from_pkcs1_der(key)
- .map_err(|_| type_error("invalid PKCS#1 public key"))?,
+ .map_err(|_| AsymmetricPublicKeyError::InvalidPkcs1PublicKey)?,
"spki" => Document::from_public_key_der(key)
- .map_err(|_| type_error("invalid SPKI public key"))?,
- _ => return Err(type_error(format!("unsupported key type: {}", typ))),
+ .map_err(|_| AsymmetricPublicKeyError::InvalidSpkiPublicKey)?,
+ _ => {
+ return Err(AsymmetricPublicKeyError::UnsupportedKeyType(
+ typ.to_string(),
+ ))
+ }
},
_ => {
- return Err(type_error(format!("unsupported key format: {}", format)))
+ return Err(AsymmetricPublicKeyError::UnsupportedKeyType(
+ format.to_string(),
+ ))
}
};
@@ -799,16 +964,16 @@ impl KeyObjectHandle {
}
DSA_OID => {
let verifying_key = dsa::VerifyingKey::try_from(spki)
- .map_err(|_| type_error("malformed DSS public key"))?;
+ .map_err(|_| AsymmetricPublicKeyError::MalformedDssPublicKey)?;
AsymmetricPublicKey::Dsa(verifying_key)
}
EC_OID => {
let named_curve = spki.algorithm.parameters_oid().map_err(|_| {
- type_error("malformed or missing named curve in ec parameters")
- })?;
- let data = spki.subject_public_key.as_bytes().ok_or_else(|| {
- type_error("malformed or missing public key in ec spki")
+ AsymmetricPublicKeyError::MalformedOrMissingNamedCurveInEcParameters
})?;
+ let data = spki.subject_public_key.as_bytes().ok_or(
+ AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInEcSpki,
+ )?;
match named_curve {
ID_SECP224R1_OID => {
@@ -823,54 +988,68 @@ impl KeyObjectHandle {
let public_key = p384::PublicKey::from_sec1_bytes(data)?;
AsymmetricPublicKey::Ec(EcPublicKey::P384(public_key))
}
- _ => return Err(type_error("unsupported ec named curve")),
+ _ => return Err(AsymmetricPublicKeyError::UnsupportedEcNamedCurve),
}
}
X25519_OID => {
let mut bytes = [0; 32];
- let data = spki.subject_public_key.as_bytes().ok_or_else(|| {
- type_error("malformed or missing public key in x25519 spki")
- })?;
+ let data = spki.subject_public_key.as_bytes().ok_or(
+ AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInX25519Spki,
+ )?;
if data.len() < 32 {
- return Err(type_error("x25519 public key is too short"));
+ return Err(AsymmetricPublicKeyError::X25519PublicKeyIsTooShort);
}
bytes.copy_from_slice(&data[0..32]);
AsymmetricPublicKey::X25519(x25519_dalek::PublicKey::from(bytes))
}
ED25519_OID => {
let verifying_key = ed25519_dalek::VerifyingKey::try_from(spki)
- .map_err(|_| type_error("invalid Ed25519 private key"))?;
+ .map_err(|_| AsymmetricPublicKeyError::InvalidEd25519PublicKey)?;
AsymmetricPublicKey::Ed25519(verifying_key)
}
DH_KEY_AGREEMENT_OID => {
let params = spki
.algorithm
.parameters
- .ok_or_else(|| type_error("missing dh parameters"))?;
+ .ok_or(AsymmetricPublicKeyError::MissingDhParameters)?;
let params = pkcs3::DhParameter::from_der(&params.to_der().unwrap())
- .map_err(|_| type_error("malformed dh parameters"))?;
+ .map_err(|_| AsymmetricPublicKeyError::MalformedDhParameters)?;
let Some(subject_public_key) = spki.subject_public_key.as_bytes()
else {
- return Err(type_error("malformed or missing public key in dh spki"));
+ return Err(
+ AsymmetricPublicKeyError::MalformedOrMissingPublicKeyInDhSpki,
+ );
};
AsymmetricPublicKey::Dh(DhPublicKey {
key: dh::PublicKey::from_bytes(subject_public_key),
params,
})
}
- _ => return Err(type_error("unsupported public key oid")),
+ _ => return Err(AsymmetricPublicKeyError::UnsupportedPrivateKeyOid),
};
Ok(KeyObjectHandle::AsymmetricPublic(public_key))
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum RsaPssParamsParseError {
+ #[error("malformed pss private key parameters")]
+ MalformedPssPrivateKeyParameters,
+ #[error("unsupported pss hash algorithm")]
+ UnsupportedPssHashAlgorithm,
+ #[error("unsupported pss mask gen algorithm")]
+ UnsupportedPssMaskGenAlgorithm,
+ #[error("malformed or missing pss mask gen algorithm parameters")]
+ MalformedOrMissingPssMaskGenAlgorithm,
+}
+
fn parse_rsa_pss_params(
parameters: Option<AnyRef<'_>>,
-) -> Result<Option<RsaPssDetails>, deno_core::anyhow::Error> {
+) -> Result<Option<RsaPssDetails>, RsaPssParamsParseError> {
let details = if let Some(parameters) = parameters {
let params = RsaPssParameters::try_from(parameters)
- .map_err(|_| type_error("malformed pss private key parameters"))?;
+ .map_err(|_| RsaPssParamsParseError::MalformedPssPrivateKeyParameters)?;
let hash_algorithm = match params.hash_algorithm.map(|k| k.oid) {
Some(ID_SHA1_OID) => RsaPssHashAlgorithm::Sha1,
@@ -881,16 +1060,16 @@ fn parse_rsa_pss_params(
Some(ID_SHA512_224_OID) => RsaPssHashAlgorithm::Sha512_224,
Some(ID_SHA512_256_OID) => RsaPssHashAlgorithm::Sha512_256,
None => RsaPssHashAlgorithm::Sha1,
- _ => return Err(type_error("unsupported pss hash algorithm")),
+ _ => return Err(RsaPssParamsParseError::UnsupportedPssHashAlgorithm),
};
let mf1_hash_algorithm = match params.mask_gen_algorithm {
Some(alg) => {
if alg.oid != ID_MFG1 {
- return Err(type_error("unsupported pss mask gen algorithm"));
+ return Err(RsaPssParamsParseError::UnsupportedPssMaskGenAlgorithm);
}
let params = alg.parameters_oid().map_err(|_| {
- type_error("malformed or missing pss mask gen algorithm parameters")
+ RsaPssParamsParseError::MalformedOrMissingPssMaskGenAlgorithm
})?;
match params {
ID_SHA1_OID => RsaPssHashAlgorithm::Sha1,
@@ -900,7 +1079,9 @@ fn parse_rsa_pss_params(
ID_SHA512_OID => RsaPssHashAlgorithm::Sha512,
ID_SHA512_224_OID => RsaPssHashAlgorithm::Sha512_224,
ID_SHA512_256_OID => RsaPssHashAlgorithm::Sha512_256,
- _ => return Err(type_error("unsupported pss mask gen algorithm")),
+ _ => {
+ return Err(RsaPssParamsParseError::UnsupportedPssMaskGenAlgorithm)
+ }
}
}
None => hash_algorithm,
@@ -921,14 +1102,49 @@ fn parse_rsa_pss_params(
Ok(details)
}
-use base64::prelude::BASE64_URL_SAFE_NO_PAD;
-
fn bytes_to_b64(bytes: &[u8]) -> String {
+ use base64::prelude::BASE64_URL_SAFE_NO_PAD;
BASE64_URL_SAFE_NO_PAD.encode(bytes)
}
+#[derive(Debug, thiserror::Error)]
+pub enum AsymmetricPublicKeyJwkError {
+ #[error("key is not an asymmetric public key")]
+ KeyIsNotAsymmetricPublicKey,
+ #[error("Unsupported JWK EC curve: P224")]
+ UnsupportedJwkEcCurveP224,
+ #[error("jwk export not implemented for this key type")]
+ JwkExportNotImplementedForKeyType,
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum AsymmetricPublicKeyDerError {
+ #[error("key is not an asymmetric public key")]
+ KeyIsNotAsymmetricPublicKey,
+ #[error("invalid RSA public key")]
+ InvalidRsaPublicKey,
+ #[error("exporting non-RSA public key as PKCS#1 is not supported")]
+ ExportingNonRsaPublicKeyAsPkcs1Unsupported,
+ #[error("invalid EC public key")]
+ InvalidEcPublicKey,
+ #[error("exporting RSA-PSS public key as SPKI is not supported yet")]
+ ExportingNonRsaPssPublicKeyAsSpkiUnsupported,
+ #[error("invalid DSA public key")]
+ InvalidDsaPublicKey,
+ #[error("invalid X25519 public key")]
+ InvalidX25519PublicKey,
+ #[error("invalid Ed25519 public key")]
+ InvalidEd25519PublicKey,
+ #[error("invalid DH public key")]
+ InvalidDhPublicKey,
+ #[error("unsupported key type: {0}")]
+ UnsupportedKeyType(String),
+}
+
impl AsymmetricPublicKey {
- fn export_jwk(&self) -> Result<deno_core::serde_json::Value, AnyError> {
+ fn export_jwk(
+ &self,
+ ) -> Result<deno_core::serde_json::Value, AsymmetricPublicKeyJwkError> {
match self {
AsymmetricPublicKey::Ec(key) => {
let jwk = key.to_jwk()?;
@@ -974,40 +1190,39 @@ impl AsymmetricPublicKey {
});
Ok(jwk)
}
- _ => Err(type_error("jwk export not implemented for this key type")),
+ _ => Err(AsymmetricPublicKeyJwkError::JwkExportNotImplementedForKeyType),
}
}
- fn export_der(&self, typ: &str) -> Result<Box<[u8]>, AnyError> {
+ fn export_der(
+ &self,
+ typ: &str,
+ ) -> Result<Box<[u8]>, AsymmetricPublicKeyDerError> {
match typ {
"pkcs1" => match self {
AsymmetricPublicKey::Rsa(key) => {
let der = key
.to_pkcs1_der()
- .map_err(|_| type_error("invalid RSA public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidRsaPublicKey)?
.into_vec()
.into_boxed_slice();
Ok(der)
}
- _ => Err(type_error(
- "exporting non-RSA public key as PKCS#1 is not supported",
- )),
+ _ => Err(AsymmetricPublicKeyDerError::ExportingNonRsaPublicKeyAsPkcs1Unsupported),
},
"spki" => {
let der = match self {
AsymmetricPublicKey::Rsa(key) => key
.to_public_key_der()
- .map_err(|_| type_error("invalid RSA public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidRsaPublicKey)?
.into_vec()
.into_boxed_slice(),
AsymmetricPublicKey::RsaPss(_key) => {
- return Err(generic_error(
- "exporting RSA-PSS public key as SPKI is not supported yet",
- ))
+ return Err(AsymmetricPublicKeyDerError::ExportingNonRsaPssPublicKeyAsSpkiUnsupported)
}
AsymmetricPublicKey::Dsa(key) => key
.to_public_key_der()
- .map_err(|_| type_error("invalid DSA public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidDsaPublicKey)?
.into_vec()
.into_boxed_slice(),
AsymmetricPublicKey::Ec(key) => {
@@ -1023,12 +1238,12 @@ impl AsymmetricPublicKey {
parameters: Some(asn1::AnyRef::from(&oid)),
},
subject_public_key: BitStringRef::from_bytes(&sec1)
- .map_err(|_| type_error("invalid EC public key"))?,
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidEcPublicKey)?,
};
spki
.to_der()
- .map_err(|_| type_error("invalid EC public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidEcPublicKey)?
.into_boxed_slice()
}
AsymmetricPublicKey::X25519(key) => {
@@ -1038,12 +1253,12 @@ impl AsymmetricPublicKey {
parameters: None,
},
subject_public_key: BitStringRef::from_bytes(key.as_bytes())
- .map_err(|_| type_error("invalid X25519 public key"))?,
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidX25519PublicKey)?,
};
spki
.to_der()
- .map_err(|_| type_error("invalid X25519 public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidX25519PublicKey)?
.into_boxed_slice()
}
AsymmetricPublicKey::Ed25519(key) => {
@@ -1053,12 +1268,12 @@ impl AsymmetricPublicKey {
parameters: None,
},
subject_public_key: BitStringRef::from_bytes(key.as_bytes())
- .map_err(|_| type_error("invalid Ed25519 public key"))?,
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidEd25519PublicKey)?,
};
spki
.to_der()
- .map_err(|_| type_error("invalid Ed25519 public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidEd25519PublicKey)?
.into_boxed_slice()
}
AsymmetricPublicKey::Dh(key) => {
@@ -1071,43 +1286,67 @@ impl AsymmetricPublicKey {
},
subject_public_key: BitStringRef::from_bytes(&public_key_bytes)
.map_err(|_| {
- type_error("invalid DH public key")
+ AsymmetricPublicKeyDerError::InvalidDhPublicKey
})?,
};
spki
.to_der()
- .map_err(|_| type_error("invalid DH public key"))?
+ .map_err(|_| AsymmetricPublicKeyDerError::InvalidDhPublicKey)?
.into_boxed_slice()
}
};
Ok(der)
}
- _ => Err(type_error(format!("unsupported key type: {}", typ))),
+ _ => Err(AsymmetricPublicKeyDerError::UnsupportedKeyType(typ.to_string())),
}
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum AsymmetricPrivateKeyDerError {
+ #[error("key is not an asymmetric private key")]
+ KeyIsNotAsymmetricPrivateKey,
+ #[error("invalid RSA private key")]
+ InvalidRsaPrivateKey,
+ #[error("exporting non-RSA private key as PKCS#1 is not supported")]
+ ExportingNonRsaPrivateKeyAsPkcs1Unsupported,
+ #[error("invalid EC private key")]
+ InvalidEcPrivateKey,
+ #[error("exporting non-EC private key as SEC1 is not supported")]
+ ExportingNonEcPrivateKeyAsSec1Unsupported,
+ #[error("exporting RSA-PSS private key as PKCS#8 is not supported yet")]
+ ExportingNonRsaPssPrivateKeyAsPkcs8Unsupported,
+ #[error("invalid DSA private key")]
+ InvalidDsaPrivateKey,
+ #[error("invalid X25519 private key")]
+ InvalidX25519PrivateKey,
+ #[error("invalid Ed25519 private key")]
+ InvalidEd25519PrivateKey,
+ #[error("invalid DH private key")]
+ InvalidDhPrivateKey,
+ #[error("unsupported key type: {0}")]
+ UnsupportedKeyType(String),
+}
+
impl AsymmetricPrivateKey {
fn export_der(
&self,
typ: &str,
// cipher: Option<&str>,
// passphrase: Option<&str>,
- ) -> Result<Box<[u8]>, AnyError> {
+ ) -> Result<Box<[u8]>, AsymmetricPrivateKeyDerError> {
match typ {
"pkcs1" => match self {
AsymmetricPrivateKey::Rsa(key) => {
let der = key
.to_pkcs1_der()
- .map_err(|_| type_error("invalid RSA private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidRsaPrivateKey)?
.to_bytes()
.to_vec()
.into_boxed_slice();
Ok(der)
}
- _ => Err(type_error(
- "exporting non-RSA private key as PKCS#1 is not supported",
- )),
+ _ => Err(AsymmetricPrivateKeyDerError::ExportingNonRsaPrivateKeyAsPkcs1Unsupported),
},
"sec1" => match self {
AsymmetricPrivateKey::Ec(key) => {
@@ -1116,30 +1355,26 @@ impl AsymmetricPrivateKey {
EcPrivateKey::P256(key) => key.to_sec1_der(),
EcPrivateKey::P384(key) => key.to_sec1_der(),
}
- .map_err(|_| type_error("invalid EC private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEcPrivateKey)?;
Ok(sec1.to_vec().into_boxed_slice())
}
- _ => Err(type_error(
- "exporting non-EC private key as SEC1 is not supported",
- )),
+ _ => Err(AsymmetricPrivateKeyDerError::ExportingNonEcPrivateKeyAsSec1Unsupported),
},
"pkcs8" => {
let der = match self {
AsymmetricPrivateKey::Rsa(key) => {
let document = key
.to_pkcs8_der()
- .map_err(|_| type_error("invalid RSA private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidRsaPrivateKey)?;
document.to_bytes().to_vec().into_boxed_slice()
}
AsymmetricPrivateKey::RsaPss(_key) => {
- return Err(generic_error(
- "exporting RSA-PSS private key as PKCS#8 is not supported yet",
- ))
+ return Err(AsymmetricPrivateKeyDerError::ExportingNonRsaPssPrivateKeyAsPkcs8Unsupported)
}
AsymmetricPrivateKey::Dsa(key) => {
let document = key
.to_pkcs8_der()
- .map_err(|_| type_error("invalid DSA private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidDsaPrivateKey)?;
document.to_bytes().to_vec().into_boxed_slice()
}
AsymmetricPrivateKey::Ec(key) => {
@@ -1148,14 +1383,14 @@ impl AsymmetricPrivateKey {
EcPrivateKey::P256(key) => key.to_pkcs8_der(),
EcPrivateKey::P384(key) => key.to_pkcs8_der(),
}
- .map_err(|_| type_error("invalid EC private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEcPrivateKey)?;
document.to_bytes().to_vec().into_boxed_slice()
}
AsymmetricPrivateKey::X25519(key) => {
let private_key = OctetStringRef::new(key.as_bytes())
- .map_err(|_| type_error("invalid X25519 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidX25519PrivateKey)?
.to_der()
- .map_err(|_| type_error("invalid X25519 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidX25519PrivateKey)?;
let private_key = PrivateKeyInfo {
algorithm: rsa::pkcs8::AlgorithmIdentifierRef {
@@ -1168,15 +1403,15 @@ impl AsymmetricPrivateKey {
let der = private_key
.to_der()
- .map_err(|_| type_error("invalid X25519 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidX25519PrivateKey)?
.into_boxed_slice();
return Ok(der);
}
AsymmetricPrivateKey::Ed25519(key) => {
let private_key = OctetStringRef::new(key.as_bytes())
- .map_err(|_| type_error("invalid Ed25519 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEd25519PrivateKey)?
.to_der()
- .map_err(|_| type_error("invalid Ed25519 private key"))?;
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEd25519PrivateKey)?;
let private_key = PrivateKeyInfo {
algorithm: rsa::pkcs8::AlgorithmIdentifierRef {
@@ -1189,7 +1424,7 @@ impl AsymmetricPrivateKey {
private_key
.to_der()
- .map_err(|_| type_error("invalid ED25519 private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidEd25519PrivateKey)?
.into_boxed_slice()
}
AsymmetricPrivateKey::Dh(key) => {
@@ -1206,14 +1441,14 @@ impl AsymmetricPrivateKey {
private_key
.to_der()
- .map_err(|_| type_error("invalid DH private key"))?
+ .map_err(|_| AsymmetricPrivateKeyDerError::InvalidDhPrivateKey)?
.into_boxed_slice()
}
};
Ok(der)
}
- _ => Err(type_error(format!("unsupported key type: {}", typ))),
+ _ => Err(AsymmetricPrivateKeyDerError::UnsupportedKeyType(typ.to_string())),
}
}
}
@@ -1225,7 +1460,7 @@ pub fn op_node_create_private_key(
#[string] format: &str,
#[string] typ: &str,
#[buffer] passphrase: Option<&[u8]>,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, AsymmetricPrivateKeyError> {
KeyObjectHandle::new_asymmetric_private_key_from_js(
key, format, typ, passphrase,
)
@@ -1237,7 +1472,7 @@ pub fn op_node_create_ed_raw(
#[string] curve: &str,
#[buffer] key: &[u8],
is_public: bool,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, EdRawError> {
KeyObjectHandle::new_ed_raw(curve, key, is_public)
}
@@ -1255,16 +1490,16 @@ pub struct RsaJwkKey {
pub fn op_node_create_rsa_jwk(
#[serde] jwk: RsaJwkKey,
is_public: bool,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, RsaJwkError> {
KeyObjectHandle::new_rsa_jwk(jwk, is_public)
}
#[op2]
#[cppgc]
pub fn op_node_create_ec_jwk(
- #[serde] jwk: elliptic_curve::JwkEcKey,
+ #[serde] jwk: JwkEcKey,
is_public: bool,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, EcJwkError> {
KeyObjectHandle::new_ec_jwk(&jwk, is_public)
}
@@ -1275,7 +1510,7 @@ pub fn op_node_create_public_key(
#[string] format: &str,
#[string] typ: &str,
#[buffer] passphrase: Option<&[u8]>,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, AsymmetricPublicKeyError> {
KeyObjectHandle::new_asymmetric_public_key_from_js(
key, format, typ, passphrase,
)
@@ -1293,7 +1528,7 @@ pub fn op_node_create_secret_key(
#[string]
pub fn op_node_get_asymmetric_key_type(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<&'static str, AnyError> {
+) -> Result<&'static str, deno_core::error::AnyError> {
match handle {
KeyObjectHandle::AsymmetricPrivate(AsymmetricPrivateKey::Rsa(_))
| KeyObjectHandle::AsymmetricPublic(AsymmetricPublicKey::Rsa(_)) => {
@@ -1364,7 +1599,7 @@ pub enum AsymmetricKeyDetails {
#[serde]
pub fn op_node_get_asymmetric_key_details(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<AsymmetricKeyDetails, AnyError> {
+) -> Result<AsymmetricKeyDetails, deno_core::error::AnyError> {
match handle {
KeyObjectHandle::AsymmetricPrivate(private_key) => match private_key {
AsymmetricPrivateKey::Rsa(key) => {
@@ -1482,12 +1717,10 @@ pub fn op_node_get_asymmetric_key_details(
#[smi]
pub fn op_node_get_symmetric_key_size(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<usize, AnyError> {
+) -> Result<usize, deno_core::error::AnyError> {
match handle {
- KeyObjectHandle::AsymmetricPrivate(_) => {
- Err(type_error("asymmetric key is not a symmetric key"))
- }
- KeyObjectHandle::AsymmetricPublic(_) => {
+ KeyObjectHandle::AsymmetricPrivate(_)
+ | KeyObjectHandle::AsymmetricPublic(_) => {
Err(type_error("asymmetric key is not a symmetric key"))
}
KeyObjectHandle::Secret(key) => Ok(key.len() * 8),
@@ -1592,13 +1825,17 @@ pub async fn op_node_generate_rsa_key_async(
.unwrap()
}
+#[derive(Debug, thiserror::Error)]
+#[error("digest not allowed for RSA-PSS keys{}", .0.as_ref().map(|digest| format!(": {digest}")).unwrap_or_default())]
+pub struct GenerateRsaPssError(Option<String>);
+
fn generate_rsa_pss(
modulus_length: usize,
public_exponent: usize,
hash_algorithm: Option<&str>,
mf1_hash_algorithm: Option<&str>,
salt_length: Option<u32>,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, GenerateRsaPssError> {
let key = RsaPrivateKey::new_with_exp(
&mut thread_rng(),
modulus_length,
@@ -1617,25 +1854,19 @@ fn generate_rsa_pss(
let hash_algorithm = match_fixed_digest_with_oid!(
hash_algorithm,
fn (algorithm: Option<RsaPssHashAlgorithm>) {
- algorithm.ok_or_else(|| type_error("digest not allowed for RSA-PSS keys: {}"))?
+ algorithm.ok_or(GenerateRsaPssError(None))?
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA-PSS keys: {}",
- hash_algorithm
- )))
+ return Err(GenerateRsaPssError(Some(hash_algorithm.to_string())))
}
);
let mf1_hash_algorithm = match_fixed_digest_with_oid!(
mf1_hash_algorithm,
fn (algorithm: Option<RsaPssHashAlgorithm>) {
- algorithm.ok_or_else(|| type_error("digest not allowed for RSA-PSS keys: {}"))?
+ algorithm.ok_or(GenerateRsaPssError(None))?
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA-PSS keys: {}",
- mf1_hash_algorithm
- )))
+ return Err(GenerateRsaPssError(Some(mf1_hash_algorithm.to_string())))
}
);
let salt_length =
@@ -1663,7 +1894,7 @@ pub fn op_node_generate_rsa_pss_key(
#[string] hash_algorithm: Option<String>, // todo: Option<&str> not supproted in ops yet
#[string] mf1_hash_algorithm: Option<String>, // todo: Option<&str> not supproted in ops yet
#[smi] salt_length: Option<u32>,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, GenerateRsaPssError> {
generate_rsa_pss(
modulus_length,
public_exponent,
@@ -1681,7 +1912,7 @@ pub async fn op_node_generate_rsa_pss_key_async(
#[string] hash_algorithm: Option<String>, // todo: Option<&str> not supproted in ops yet
#[string] mf1_hash_algorithm: Option<String>, // todo: Option<&str> not supproted in ops yet
#[smi] salt_length: Option<u32>,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, GenerateRsaPssError> {
spawn_blocking(move || {
generate_rsa_pss(
modulus_length,
@@ -1698,7 +1929,7 @@ pub async fn op_node_generate_rsa_pss_key_async(
fn dsa_generate(
modulus_length: usize,
divisor_length: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
let mut rng = rand::thread_rng();
use dsa::Components;
use dsa::KeySize;
@@ -1729,7 +1960,7 @@ fn dsa_generate(
pub fn op_node_generate_dsa_key(
#[smi] modulus_length: usize,
#[smi] divisor_length: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
dsa_generate(modulus_length, divisor_length)
}
@@ -1738,13 +1969,15 @@ pub fn op_node_generate_dsa_key(
pub async fn op_node_generate_dsa_key_async(
#[smi] modulus_length: usize,
#[smi] divisor_length: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
spawn_blocking(move || dsa_generate(modulus_length, divisor_length))
.await
.unwrap()
}
-fn ec_generate(named_curve: &str) -> Result<KeyObjectHandlePair, AnyError> {
+fn ec_generate(
+ named_curve: &str,
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
let mut rng = rand::thread_rng();
// TODO(@littledivy): Support public key point encoding.
// Default is uncompressed.
@@ -1776,7 +2009,7 @@ fn ec_generate(named_curve: &str) -> Result<KeyObjectHandlePair, AnyError> {
#[cppgc]
pub fn op_node_generate_ec_key(
#[string] named_curve: &str,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
ec_generate(named_curve)
}
@@ -1784,7 +2017,7 @@ pub fn op_node_generate_ec_key(
#[cppgc]
pub async fn op_node_generate_ec_key_async(
#[string] named_curve: String,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
spawn_blocking(move || ec_generate(&named_curve))
.await
.unwrap()
@@ -1840,7 +2073,7 @@ fn u32_slice_to_u8_slice(slice: &[u32]) -> &[u8] {
fn dh_group_generate(
group_name: &str,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
let (dh, prime, generator) = match group_name {
"modp5" => (
dh::DiffieHellman::group::<dh::Modp1536>(),
@@ -1895,7 +2128,7 @@ fn dh_group_generate(
#[cppgc]
pub fn op_node_generate_dh_group_key(
#[string] group_name: &str,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
dh_group_generate(group_name)
}
@@ -1903,7 +2136,7 @@ pub fn op_node_generate_dh_group_key(
#[cppgc]
pub async fn op_node_generate_dh_group_key_async(
#[string] group_name: String,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> Result<KeyObjectHandlePair, deno_core::error::AnyError> {
spawn_blocking(move || dh_group_generate(&group_name))
.await
.unwrap()
@@ -1913,7 +2146,7 @@ fn dh_generate(
prime: Option<&[u8]>,
prime_len: usize,
generator: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> KeyObjectHandlePair {
let prime = prime
.map(|p| p.into())
.unwrap_or_else(|| Prime::generate(prime_len));
@@ -1923,7 +2156,7 @@ fn dh_generate(
base: asn1::Int::new(generator.to_be_bytes().as_slice()).unwrap(),
private_value_length: None,
};
- Ok(KeyObjectHandlePair::new(
+ KeyObjectHandlePair::new(
AsymmetricPrivateKey::Dh(DhPrivateKey {
key: dh.private_key,
params: params.clone(),
@@ -1932,7 +2165,7 @@ fn dh_generate(
key: dh.public_key,
params,
}),
- ))
+ )
}
#[op2]
@@ -1941,7 +2174,7 @@ pub fn op_node_generate_dh_key(
#[buffer] prime: Option<&[u8]>,
#[smi] prime_len: usize,
#[smi] generator: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> KeyObjectHandlePair {
dh_generate(prime, prime_len, generator)
}
@@ -1951,7 +2184,7 @@ pub async fn op_node_generate_dh_key_async(
#[buffer(copy)] prime: Option<Box<[u8]>>,
#[smi] prime_len: usize,
#[smi] generator: usize,
-) -> Result<KeyObjectHandlePair, AnyError> {
+) -> KeyObjectHandlePair {
spawn_blocking(move || dh_generate(prime.as_deref(), prime_len, generator))
.await
.unwrap()
@@ -1963,21 +2196,21 @@ pub fn op_node_dh_keys_generate_and_export(
#[buffer] prime: Option<&[u8]>,
#[smi] prime_len: usize,
#[smi] generator: usize,
-) -> Result<(ToJsBuffer, ToJsBuffer), AnyError> {
+) -> (ToJsBuffer, ToJsBuffer) {
let prime = prime
.map(|p| p.into())
.unwrap_or_else(|| Prime::generate(prime_len));
let dh = dh::DiffieHellman::new(prime, generator);
let private_key = dh.private_key.into_vec().into_boxed_slice();
let public_key = dh.public_key.into_vec().into_boxed_slice();
- Ok((private_key.into(), public_key.into()))
+ (private_key.into(), public_key.into())
}
#[op2]
#[buffer]
pub fn op_node_export_secret_key(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<Box<[u8]>, AnyError> {
+) -> Result<Box<[u8]>, deno_core::error::AnyError> {
let key = handle
.as_secret_key()
.ok_or_else(|| type_error("key is not a secret key"))?;
@@ -1988,7 +2221,7 @@ pub fn op_node_export_secret_key(
#[string]
pub fn op_node_export_secret_key_b64url(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<String, AnyError> {
+) -> Result<String, deno_core::error::AnyError> {
let key = handle
.as_secret_key()
.ok_or_else(|| type_error("key is not a secret key"))?;
@@ -1999,23 +2232,33 @@ pub fn op_node_export_secret_key_b64url(
#[serde]
pub fn op_node_export_public_key_jwk(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<deno_core::serde_json::Value, AnyError> {
+) -> Result<deno_core::serde_json::Value, AsymmetricPublicKeyJwkError> {
let public_key = handle
.as_public_key()
- .ok_or_else(|| type_error("key is not an asymmetric public key"))?;
+ .ok_or(AsymmetricPublicKeyJwkError::KeyIsNotAsymmetricPublicKey)?;
public_key.export_jwk()
}
+#[derive(Debug, thiserror::Error)]
+pub enum ExportPublicKeyPemError {
+ #[error(transparent)]
+ AsymmetricPublicKeyDer(#[from] AsymmetricPublicKeyDerError),
+ #[error("very large data")]
+ VeryLargeData,
+ #[error(transparent)]
+ Der(#[from] der::Error),
+}
+
#[op2]
#[string]
pub fn op_node_export_public_key_pem(
#[cppgc] handle: &KeyObjectHandle,
#[string] typ: &str,
-) -> Result<String, AnyError> {
+) -> Result<String, ExportPublicKeyPemError> {
let public_key = handle
.as_public_key()
- .ok_or_else(|| type_error("key is not an asymmetric public key"))?;
+ .ok_or(AsymmetricPublicKeyDerError::KeyIsNotAsymmetricPublicKey)?;
let data = public_key.export_der(typ)?;
let label = match typ {
@@ -2024,7 +2267,9 @@ pub fn op_node_export_public_key_pem(
_ => unreachable!("export_der would have errored"),
};
- let mut out = vec![0; 2048];
+ let pem_len = der::pem::encapsulated_len(label, LineEnding::LF, data.len())
+ .map_err(|_| ExportPublicKeyPemError::VeryLargeData)?;
+ let mut out = vec![0; pem_len];
let mut writer = PemWriter::new(label, LineEnding::LF, &mut out)?;
writer.write(&data)?;
let len = writer.finish()?;
@@ -2038,22 +2283,32 @@ pub fn op_node_export_public_key_pem(
pub fn op_node_export_public_key_der(
#[cppgc] handle: &KeyObjectHandle,
#[string] typ: &str,
-) -> Result<Box<[u8]>, AnyError> {
+) -> Result<Box<[u8]>, AsymmetricPublicKeyDerError> {
let public_key = handle
.as_public_key()
- .ok_or_else(|| type_error("key is not an asymmetric public key"))?;
+ .ok_or(AsymmetricPublicKeyDerError::KeyIsNotAsymmetricPublicKey)?;
public_key.export_der(typ)
}
+#[derive(Debug, thiserror::Error)]
+pub enum ExportPrivateKeyPemError {
+ #[error(transparent)]
+ AsymmetricPublicKeyDer(#[from] AsymmetricPrivateKeyDerError),
+ #[error("very large data")]
+ VeryLargeData,
+ #[error(transparent)]
+ Der(#[from] der::Error),
+}
+
#[op2]
#[string]
pub fn op_node_export_private_key_pem(
#[cppgc] handle: &KeyObjectHandle,
#[string] typ: &str,
-) -> Result<String, AnyError> {
+) -> Result<String, ExportPrivateKeyPemError> {
let private_key = handle
.as_private_key()
- .ok_or_else(|| type_error("key is not an asymmetric private key"))?;
+ .ok_or(AsymmetricPrivateKeyDerError::KeyIsNotAsymmetricPrivateKey)?;
let data = private_key.export_der(typ)?;
let label = match typ {
@@ -2063,7 +2318,9 @@ pub fn op_node_export_private_key_pem(
_ => unreachable!("export_der would have errored"),
};
- let mut out = vec![0; 2048];
+ let pem_len = der::pem::encapsulated_len(label, LineEnding::LF, data.len())
+ .map_err(|_| ExportPrivateKeyPemError::VeryLargeData)?;
+ let mut out = vec![0; pem_len];
let mut writer = PemWriter::new(label, LineEnding::LF, &mut out)?;
writer.write(&data)?;
let len = writer.finish()?;
@@ -2077,10 +2334,10 @@ pub fn op_node_export_private_key_pem(
pub fn op_node_export_private_key_der(
#[cppgc] handle: &KeyObjectHandle,
#[string] typ: &str,
-) -> Result<Box<[u8]>, AnyError> {
+) -> Result<Box<[u8]>, AsymmetricPrivateKeyDerError> {
let private_key = handle
.as_private_key()
- .ok_or_else(|| type_error("key is not an asymmetric private key"))?;
+ .ok_or(AsymmetricPrivateKeyDerError::KeyIsNotAsymmetricPrivateKey)?;
private_key.export_der(typ)
}
@@ -2098,7 +2355,7 @@ pub fn op_node_key_type(#[cppgc] handle: &KeyObjectHandle) -> &'static str {
#[cppgc]
pub fn op_node_derive_public_key_from_private_key(
#[cppgc] handle: &KeyObjectHandle,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, deno_core::error::AnyError> {
let Some(private_key) = handle.as_private_key() else {
return Err(type_error("expected private key"));
};
diff --git a/ext/node/ops/crypto/mod.rs b/ext/node/ops/crypto/mod.rs
index 600d31558..e90e82090 100644
--- a/ext/node/ops/crypto/mod.rs
+++ b/ext/node/ops/crypto/mod.rs
@@ -1,7 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
use deno_core::error::generic_error;
use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::unsync::spawn_blocking;
use deno_core::JsBuffer;
@@ -34,14 +33,14 @@ use rsa::Pkcs1v15Encrypt;
use rsa::RsaPrivateKey;
use rsa::RsaPublicKey;
-mod cipher;
+pub mod cipher;
mod dh;
-mod digest;
+pub mod digest;
pub mod keys;
mod md5_sha1;
mod pkcs3;
mod primes;
-mod sign;
+pub mod sign;
pub mod x509;
use self::digest::match_fixed_digest_with_eager_block_buffer;
@@ -58,38 +57,31 @@ pub fn op_node_check_prime(
pub fn op_node_check_prime_bytes(
#[anybuffer] bytes: &[u8],
#[number] checks: usize,
-) -> Result<bool, AnyError> {
+) -> bool {
let candidate = BigInt::from_bytes_be(num_bigint::Sign::Plus, bytes);
- Ok(primes::is_probably_prime(&candidate, checks))
+ primes::is_probably_prime(&candidate, checks)
}
#[op2(async)]
pub async fn op_node_check_prime_async(
#[bigint] num: i64,
#[number] checks: usize,
-) -> Result<bool, AnyError> {
+) -> Result<bool, tokio::task::JoinError> {
// TODO(@littledivy): use rayon for CPU-bound tasks
- Ok(
- spawn_blocking(move || {
- primes::is_probably_prime(&BigInt::from(num), checks)
- })
- .await?,
- )
+ spawn_blocking(move || primes::is_probably_prime(&BigInt::from(num), checks))
+ .await
}
#[op2(async)]
pub fn op_node_check_prime_bytes_async(
#[anybuffer] bytes: &[u8],
#[number] checks: usize,
-) -> Result<impl Future<Output = Result<bool, AnyError>>, AnyError> {
+) -> impl Future<Output = Result<bool, tokio::task::JoinError>> {
let candidate = BigInt::from_bytes_be(num_bigint::Sign::Plus, bytes);
// TODO(@littledivy): use rayon for CPU-bound tasks
- Ok(async move {
- Ok(
- spawn_blocking(move || primes::is_probably_prime(&candidate, checks))
- .await?,
- )
- })
+ async move {
+ spawn_blocking(move || primes::is_probably_prime(&candidate, checks)).await
+ }
}
#[op2]
@@ -97,7 +89,7 @@ pub fn op_node_check_prime_bytes_async(
pub fn op_node_create_hash(
#[string] algorithm: &str,
output_length: Option<u32>,
-) -> Result<digest::Hasher, AnyError> {
+) -> Result<digest::Hasher, digest::HashError> {
digest::Hasher::new(algorithm, output_length.map(|l| l as usize))
}
@@ -145,17 +137,31 @@ pub fn op_node_hash_digest_hex(
pub fn op_node_hash_clone(
#[cppgc] hasher: &digest::Hasher,
output_length: Option<u32>,
-) -> Result<Option<digest::Hasher>, AnyError> {
+) -> Result<Option<digest::Hasher>, digest::HashError> {
hasher.clone_inner(output_length.map(|l| l as usize))
}
+#[derive(Debug, thiserror::Error)]
+pub enum PrivateEncryptDecryptError {
+ #[error(transparent)]
+ Pkcs8(#[from] pkcs8::Error),
+ #[error(transparent)]
+ Spki(#[from] spki::Error),
+ #[error(transparent)]
+ Utf8(#[from] std::str::Utf8Error),
+ #[error(transparent)]
+ Rsa(#[from] rsa::Error),
+ #[error("Unknown padding")]
+ UnknownPadding,
+}
+
#[op2]
#[serde]
pub fn op_node_private_encrypt(
#[serde] key: StringOrBuffer,
#[serde] msg: StringOrBuffer,
#[smi] padding: u32,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, PrivateEncryptDecryptError> {
let key = RsaPrivateKey::from_pkcs8_pem((&key).try_into()?)?;
let mut rng = rand::thread_rng();
@@ -172,7 +178,7 @@ pub fn op_node_private_encrypt(
.encrypt(&mut rng, Oaep::new::<sha1::Sha1>(), &msg)?
.into(),
),
- _ => Err(type_error("Unknown padding")),
+ _ => Err(PrivateEncryptDecryptError::UnknownPadding),
}
}
@@ -182,13 +188,13 @@ pub fn op_node_private_decrypt(
#[serde] key: StringOrBuffer,
#[serde] msg: StringOrBuffer,
#[smi] padding: u32,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, PrivateEncryptDecryptError> {
let key = RsaPrivateKey::from_pkcs8_pem((&key).try_into()?)?;
match padding {
1 => Ok(key.decrypt(Pkcs1v15Encrypt, &msg)?.into()),
4 => Ok(key.decrypt(Oaep::new::<sha1::Sha1>(), &msg)?.into()),
- _ => Err(type_error("Unknown padding")),
+ _ => Err(PrivateEncryptDecryptError::UnknownPadding),
}
}
@@ -198,7 +204,7 @@ pub fn op_node_public_encrypt(
#[serde] key: StringOrBuffer,
#[serde] msg: StringOrBuffer,
#[smi] padding: u32,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, PrivateEncryptDecryptError> {
let key = RsaPublicKey::from_public_key_pem((&key).try_into()?)?;
let mut rng = rand::thread_rng();
@@ -209,7 +215,7 @@ pub fn op_node_public_encrypt(
.encrypt(&mut rng, Oaep::new::<sha1::Sha1>(), &msg)?
.into(),
),
- _ => Err(type_error("Unknown padding")),
+ _ => Err(PrivateEncryptDecryptError::UnknownPadding),
}
}
@@ -220,7 +226,7 @@ pub fn op_node_create_cipheriv(
#[string] algorithm: &str,
#[buffer] key: &[u8],
#[buffer] iv: &[u8],
-) -> Result<u32, AnyError> {
+) -> Result<u32, cipher::CipherContextError> {
let context = cipher::CipherContext::new(algorithm, key, iv)?;
Ok(state.resource_table.add(context))
}
@@ -262,11 +268,14 @@ pub fn op_node_cipheriv_final(
auto_pad: bool,
#[buffer] input: &[u8],
#[anybuffer] output: &mut [u8],
-) -> Result<Option<Vec<u8>>, AnyError> {
- let context = state.resource_table.take::<cipher::CipherContext>(rid)?;
+) -> Result<Option<Vec<u8>>, cipher::CipherContextError> {
+ let context = state
+ .resource_table
+ .take::<cipher::CipherContext>(rid)
+ .map_err(cipher::CipherContextError::Resource)?;
let context = Rc::try_unwrap(context)
- .map_err(|_| type_error("Cipher context is already in use"))?;
- context.r#final(auto_pad, input, output)
+ .map_err(|_| cipher::CipherContextError::ContextInUse)?;
+ context.r#final(auto_pad, input, output).map_err(Into::into)
}
#[op2]
@@ -274,10 +283,13 @@ pub fn op_node_cipheriv_final(
pub fn op_node_cipheriv_take(
state: &mut OpState,
#[smi] rid: u32,
-) -> Result<Option<Vec<u8>>, AnyError> {
- let context = state.resource_table.take::<cipher::CipherContext>(rid)?;
+) -> Result<Option<Vec<u8>>, cipher::CipherContextError> {
+ let context = state
+ .resource_table
+ .take::<cipher::CipherContext>(rid)
+ .map_err(cipher::CipherContextError::Resource)?;
let context = Rc::try_unwrap(context)
- .map_err(|_| type_error("Cipher context is already in use"))?;
+ .map_err(|_| cipher::CipherContextError::ContextInUse)?;
Ok(context.take_tag())
}
@@ -288,7 +300,7 @@ pub fn op_node_create_decipheriv(
#[string] algorithm: &str,
#[buffer] key: &[u8],
#[buffer] iv: &[u8],
-) -> Result<u32, AnyError> {
+) -> Result<u32, cipher::DecipherContextError> {
let context = cipher::DecipherContext::new(algorithm, key, iv)?;
Ok(state.resource_table.add(context))
}
@@ -326,10 +338,13 @@ pub fn op_node_decipheriv_decrypt(
pub fn op_node_decipheriv_take(
state: &mut OpState,
#[smi] rid: u32,
-) -> Result<(), AnyError> {
- let context = state.resource_table.take::<cipher::DecipherContext>(rid)?;
+) -> Result<(), cipher::DecipherContextError> {
+ let context = state
+ .resource_table
+ .take::<cipher::DecipherContext>(rid)
+ .map_err(cipher::DecipherContextError::Resource)?;
Rc::try_unwrap(context)
- .map_err(|_| type_error("Cipher context is already in use"))?;
+ .map_err(|_| cipher::DecipherContextError::ContextInUse)?;
Ok(())
}
@@ -341,11 +356,16 @@ pub fn op_node_decipheriv_final(
#[buffer] input: &[u8],
#[anybuffer] output: &mut [u8],
#[buffer] auth_tag: &[u8],
-) -> Result<(), AnyError> {
- let context = state.resource_table.take::<cipher::DecipherContext>(rid)?;
+) -> Result<(), cipher::DecipherContextError> {
+ let context = state
+ .resource_table
+ .take::<cipher::DecipherContext>(rid)
+ .map_err(cipher::DecipherContextError::Resource)?;
let context = Rc::try_unwrap(context)
- .map_err(|_| type_error("Cipher context is already in use"))?;
- context.r#final(auto_pad, input, output, auth_tag)
+ .map_err(|_| cipher::DecipherContextError::ContextInUse)?;
+ context
+ .r#final(auto_pad, input, output, auth_tag)
+ .map_err(Into::into)
}
#[op2]
@@ -356,7 +376,7 @@ pub fn op_node_sign(
#[string] digest_type: &str,
#[smi] pss_salt_length: Option<u32>,
#[smi] dsa_signature_encoding: u32,
-) -> Result<Box<[u8]>, AnyError> {
+) -> Result<Box<[u8]>, sign::KeyObjectHandlePrehashedSignAndVerifyError> {
handle.sign_prehashed(
digest_type,
digest,
@@ -373,7 +393,7 @@ pub fn op_node_verify(
#[buffer] signature: &[u8],
#[smi] pss_salt_length: Option<u32>,
#[smi] dsa_signature_encoding: u32,
-) -> Result<bool, AnyError> {
+) -> Result<bool, sign::KeyObjectHandlePrehashedSignAndVerifyError> {
handle.verify_prehashed(
digest_type,
digest,
@@ -383,13 +403,21 @@ pub fn op_node_verify(
)
}
+#[derive(Debug, thiserror::Error)]
+pub enum Pbkdf2Error {
+ #[error("unsupported digest: {0}")]
+ UnsupportedDigest(String),
+ #[error(transparent)]
+ Join(#[from] tokio::task::JoinError),
+}
+
fn pbkdf2_sync(
password: &[u8],
salt: &[u8],
iterations: u32,
algorithm_name: &str,
derived_key: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), Pbkdf2Error> {
match_fixed_digest_with_eager_block_buffer!(
algorithm_name,
fn <D>() {
@@ -397,10 +425,7 @@ fn pbkdf2_sync(
Ok(())
},
_ => {
- Err(type_error(format!(
- "unsupported digest: {}",
- algorithm_name
- )))
+ Err(Pbkdf2Error::UnsupportedDigest(algorithm_name.to_string()))
}
)
}
@@ -424,7 +449,7 @@ pub async fn op_node_pbkdf2_async(
#[smi] iterations: u32,
#[string] digest: String,
#[number] keylen: usize,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, Pbkdf2Error> {
spawn_blocking(move || {
let mut derived_key = vec![0; keylen];
pbkdf2_sync(&password, &salt, iterations, &digest, &mut derived_key)
@@ -450,15 +475,27 @@ pub async fn op_node_fill_random_async(#[smi] len: i32) -> ToJsBuffer {
.unwrap()
}
+#[derive(Debug, thiserror::Error)]
+pub enum HkdfError {
+ #[error("expected secret key")]
+ ExpectedSecretKey,
+ #[error("HKDF-Expand failed")]
+ HkdfExpandFailed,
+ #[error("Unsupported digest: {0}")]
+ UnsupportedDigest(String),
+ #[error(transparent)]
+ Join(#[from] tokio::task::JoinError),
+}
+
fn hkdf_sync(
digest_algorithm: &str,
handle: &KeyObjectHandle,
salt: &[u8],
info: &[u8],
okm: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), HkdfError> {
let Some(ikm) = handle.as_secret_key() else {
- return Err(type_error("expected secret key"));
+ return Err(HkdfError::ExpectedSecretKey);
};
match_fixed_digest_with_eager_block_buffer!(
@@ -466,10 +503,10 @@ fn hkdf_sync(
fn <D>() {
let hk = Hkdf::<D>::new(Some(salt), ikm);
hk.expand(info, okm)
- .map_err(|_| type_error("HKDF-Expand failed"))
+ .map_err(|_| HkdfError::HkdfExpandFailed)
},
_ => {
- Err(type_error(format!("Unsupported digest: {}", digest_algorithm)))
+ Err(HkdfError::UnsupportedDigest(digest_algorithm.to_string()))
}
)
}
@@ -481,7 +518,7 @@ pub fn op_node_hkdf(
#[buffer] salt: &[u8],
#[buffer] info: &[u8],
#[buffer] okm: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), HkdfError> {
hkdf_sync(digest_algorithm, handle, salt, info, okm)
}
@@ -493,7 +530,7 @@ pub async fn op_node_hkdf_async(
#[buffer] salt: JsBuffer,
#[buffer] info: JsBuffer,
#[number] okm_len: usize,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, HkdfError> {
let handle = handle.clone();
spawn_blocking(move || {
let mut okm = vec![0u8; okm_len];
@@ -509,27 +546,24 @@ pub fn op_node_dh_compute_secret(
#[buffer] prime: JsBuffer,
#[buffer] private_key: JsBuffer,
#[buffer] their_public_key: JsBuffer,
-) -> Result<ToJsBuffer, AnyError> {
+) -> ToJsBuffer {
let pubkey: BigUint = BigUint::from_bytes_be(their_public_key.as_ref());
let privkey: BigUint = BigUint::from_bytes_be(private_key.as_ref());
let primei: BigUint = BigUint::from_bytes_be(prime.as_ref());
let shared_secret: BigUint = pubkey.modpow(&privkey, &primei);
- Ok(shared_secret.to_bytes_be().into())
+ shared_secret.to_bytes_be().into()
}
#[op2(fast)]
-#[smi]
-pub fn op_node_random_int(
- #[smi] min: i32,
- #[smi] max: i32,
-) -> Result<i32, AnyError> {
+#[number]
+pub fn op_node_random_int(#[number] min: i64, #[number] max: i64) -> i64 {
let mut rng = rand::thread_rng();
// Uniform distribution is required to avoid Modulo Bias
// https://en.wikipedia.org/wiki/Fisher–Yates_shuffle#Modulo_bias
let dist = Uniform::from(min..max);
- Ok(dist.sample(&mut rng))
+ dist.sample(&mut rng)
}
#[allow(clippy::too_many_arguments)]
@@ -542,7 +576,7 @@ fn scrypt(
parallelization: u32,
_maxmem: u32,
output_buffer: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), deno_core::error::AnyError> {
// Construct Params
let params = scrypt::Params::new(
cost as u8,
@@ -573,7 +607,7 @@ pub fn op_node_scrypt_sync(
#[smi] parallelization: u32,
#[smi] maxmem: u32,
#[anybuffer] output_buffer: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), deno_core::error::AnyError> {
scrypt(
password,
salt,
@@ -586,6 +620,14 @@ pub fn op_node_scrypt_sync(
)
}
+#[derive(Debug, thiserror::Error)]
+pub enum ScryptAsyncError {
+ #[error(transparent)]
+ Join(#[from] tokio::task::JoinError),
+ #[error(transparent)]
+ Other(deno_core::error::AnyError),
+}
+
#[op2(async)]
#[serde]
pub async fn op_node_scrypt_async(
@@ -596,10 +638,11 @@ pub async fn op_node_scrypt_async(
#[smi] block_size: u32,
#[smi] parallelization: u32,
#[smi] maxmem: u32,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, ScryptAsyncError> {
spawn_blocking(move || {
let mut output_buffer = vec![0u8; keylen as usize];
- let res = scrypt(
+
+ scrypt(
password,
salt,
keylen,
@@ -608,25 +651,30 @@ pub async fn op_node_scrypt_async(
parallelization,
maxmem,
&mut output_buffer,
- );
-
- if res.is_ok() {
- Ok(output_buffer.into())
- } else {
- // TODO(lev): rethrow the error?
- Err(generic_error("scrypt failure"))
- }
+ )
+ .map(|_| output_buffer.into())
+ .map_err(ScryptAsyncError::Other)
})
.await?
}
+#[derive(Debug, thiserror::Error)]
+pub enum EcdhEncodePubKey {
+ #[error("Invalid public key")]
+ InvalidPublicKey,
+ #[error("Unsupported curve")]
+ UnsupportedCurve,
+ #[error(transparent)]
+ Sec1(#[from] sec1::Error),
+}
+
#[op2]
#[buffer]
pub fn op_node_ecdh_encode_pubkey(
#[string] curve: &str,
#[buffer] pubkey: &[u8],
compress: bool,
-) -> Result<Vec<u8>, AnyError> {
+) -> Result<Vec<u8>, EcdhEncodePubKey> {
use elliptic_curve::sec1::FromEncodedPoint;
match curve {
@@ -639,7 +687,7 @@ pub fn op_node_ecdh_encode_pubkey(
);
// CtOption does not expose its variants.
if pubkey.is_none().into() {
- return Err(type_error("Invalid public key"));
+ return Err(EcdhEncodePubKey::InvalidPublicKey);
}
let pubkey = pubkey.unwrap();
@@ -652,7 +700,7 @@ pub fn op_node_ecdh_encode_pubkey(
);
// CtOption does not expose its variants.
if pubkey.is_none().into() {
- return Err(type_error("Invalid public key"));
+ return Err(EcdhEncodePubKey::InvalidPublicKey);
}
let pubkey = pubkey.unwrap();
@@ -665,7 +713,7 @@ pub fn op_node_ecdh_encode_pubkey(
);
// CtOption does not expose its variants.
if pubkey.is_none().into() {
- return Err(type_error("Invalid public key"));
+ return Err(EcdhEncodePubKey::InvalidPublicKey);
}
let pubkey = pubkey.unwrap();
@@ -678,14 +726,14 @@ pub fn op_node_ecdh_encode_pubkey(
);
// CtOption does not expose its variants.
if pubkey.is_none().into() {
- return Err(type_error("Invalid public key"));
+ return Err(EcdhEncodePubKey::InvalidPublicKey);
}
let pubkey = pubkey.unwrap();
Ok(pubkey.to_encoded_point(compress).as_ref().to_vec())
}
- &_ => Err(type_error("Unsupported curve")),
+ &_ => Err(EcdhEncodePubKey::UnsupportedCurve),
}
}
@@ -695,7 +743,7 @@ pub fn op_node_ecdh_generate_keys(
#[buffer] pubbuf: &mut [u8],
#[buffer] privbuf: &mut [u8],
#[string] format: &str,
-) -> Result<(), AnyError> {
+) -> Result<(), deno_core::error::AnyError> {
let mut rng = rand::thread_rng();
let compress = format == "compressed";
match curve {
@@ -742,7 +790,7 @@ pub fn op_node_ecdh_compute_secret(
#[buffer] this_priv: Option<JsBuffer>,
#[buffer] their_pub: &mut [u8],
#[buffer] secret: &mut [u8],
-) -> Result<(), AnyError> {
+) {
match curve {
"secp256k1" => {
let their_public_key =
@@ -760,8 +808,6 @@ pub fn op_node_ecdh_compute_secret(
their_public_key.as_affine(),
);
secret.copy_from_slice(shared_secret.raw_secret_bytes());
-
- Ok(())
}
"prime256v1" | "secp256r1" => {
let their_public_key =
@@ -776,8 +822,6 @@ pub fn op_node_ecdh_compute_secret(
their_public_key.as_affine(),
);
secret.copy_from_slice(shared_secret.raw_secret_bytes());
-
- Ok(())
}
"secp384r1" => {
let their_public_key =
@@ -792,8 +836,6 @@ pub fn op_node_ecdh_compute_secret(
their_public_key.as_affine(),
);
secret.copy_from_slice(shared_secret.raw_secret_bytes());
-
- Ok(())
}
"secp224r1" => {
let their_public_key =
@@ -808,8 +850,6 @@ pub fn op_node_ecdh_compute_secret(
their_public_key.as_affine(),
);
secret.copy_from_slice(shared_secret.raw_secret_bytes());
-
- Ok(())
}
&_ => todo!(),
}
@@ -820,7 +860,7 @@ pub fn op_node_ecdh_compute_public_key(
#[string] curve: &str,
#[buffer] privkey: &[u8],
#[buffer] pubkey: &mut [u8],
-) -> Result<(), AnyError> {
+) {
match curve {
"secp256k1" => {
let this_private_key =
@@ -828,8 +868,6 @@ pub fn op_node_ecdh_compute_public_key(
.expect("bad private key");
let public_key = this_private_key.public_key();
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
-
- Ok(())
}
"prime256v1" | "secp256r1" => {
let this_private_key =
@@ -837,7 +875,6 @@ pub fn op_node_ecdh_compute_public_key(
.expect("bad private key");
let public_key = this_private_key.public_key();
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
- Ok(())
}
"secp384r1" => {
let this_private_key =
@@ -845,7 +882,6 @@ pub fn op_node_ecdh_compute_public_key(
.expect("bad private key");
let public_key = this_private_key.public_key();
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
- Ok(())
}
"secp224r1" => {
let this_private_key =
@@ -853,7 +889,6 @@ pub fn op_node_ecdh_compute_public_key(
.expect("bad private key");
let public_key = this_private_key.public_key();
pubkey.copy_from_slice(public_key.to_sec1_bytes().as_ref());
- Ok(())
}
&_ => todo!(),
}
@@ -874,8 +909,20 @@ pub fn op_node_gen_prime(#[number] size: usize) -> ToJsBuffer {
#[serde]
pub async fn op_node_gen_prime_async(
#[number] size: usize,
-) -> Result<ToJsBuffer, AnyError> {
- Ok(spawn_blocking(move || gen_prime(size)).await?)
+) -> Result<ToJsBuffer, tokio::task::JoinError> {
+ spawn_blocking(move || gen_prime(size)).await
+}
+
+#[derive(Debug, thiserror::Error)]
+pub enum DiffieHellmanError {
+ #[error("Expected private key")]
+ ExpectedPrivateKey,
+ #[error("Expected public key")]
+ ExpectedPublicKey,
+ #[error("DH parameters mismatch")]
+ DhParametersMismatch,
+ #[error("Unsupported key type for diffie hellman, or key type mismatch")]
+ UnsupportedKeyTypeForDiffieHellmanOrKeyTypeMismatch,
}
#[op2]
@@ -883,117 +930,134 @@ pub async fn op_node_gen_prime_async(
pub fn op_node_diffie_hellman(
#[cppgc] private: &KeyObjectHandle,
#[cppgc] public: &KeyObjectHandle,
-) -> Result<Box<[u8]>, AnyError> {
+) -> Result<Box<[u8]>, DiffieHellmanError> {
let private = private
.as_private_key()
- .ok_or_else(|| type_error("Expected private key"))?;
+ .ok_or(DiffieHellmanError::ExpectedPrivateKey)?;
let public = public
.as_public_key()
- .ok_or_else(|| type_error("Expected public key"))?;
-
- let res = match (private, &*public) {
- (
- AsymmetricPrivateKey::Ec(EcPrivateKey::P224(private)),
- AsymmetricPublicKey::Ec(EcPublicKey::P224(public)),
- ) => p224::ecdh::diffie_hellman(
- private.to_nonzero_scalar(),
- public.as_affine(),
- )
- .raw_secret_bytes()
- .to_vec()
- .into_boxed_slice(),
- (
- AsymmetricPrivateKey::Ec(EcPrivateKey::P256(private)),
- AsymmetricPublicKey::Ec(EcPublicKey::P256(public)),
- ) => p256::ecdh::diffie_hellman(
- private.to_nonzero_scalar(),
- public.as_affine(),
- )
- .raw_secret_bytes()
- .to_vec()
- .into_boxed_slice(),
- (
- AsymmetricPrivateKey::Ec(EcPrivateKey::P384(private)),
- AsymmetricPublicKey::Ec(EcPublicKey::P384(public)),
- ) => p384::ecdh::diffie_hellman(
- private.to_nonzero_scalar(),
- public.as_affine(),
- )
- .raw_secret_bytes()
- .to_vec()
- .into_boxed_slice(),
- (
- AsymmetricPrivateKey::X25519(private),
- AsymmetricPublicKey::X25519(public),
- ) => private
- .diffie_hellman(public)
- .to_bytes()
- .into_iter()
- .collect(),
- (AsymmetricPrivateKey::Dh(private), AsymmetricPublicKey::Dh(public)) => {
- if private.params.prime != public.params.prime
- || private.params.base != public.params.base
- {
- return Err(type_error("DH parameters mismatch"));
+ .ok_or(DiffieHellmanError::ExpectedPublicKey)?;
+
+ let res =
+ match (private, &*public) {
+ (
+ AsymmetricPrivateKey::Ec(EcPrivateKey::P224(private)),
+ AsymmetricPublicKey::Ec(EcPublicKey::P224(public)),
+ ) => p224::ecdh::diffie_hellman(
+ private.to_nonzero_scalar(),
+ public.as_affine(),
+ )
+ .raw_secret_bytes()
+ .to_vec()
+ .into_boxed_slice(),
+ (
+ AsymmetricPrivateKey::Ec(EcPrivateKey::P256(private)),
+ AsymmetricPublicKey::Ec(EcPublicKey::P256(public)),
+ ) => p256::ecdh::diffie_hellman(
+ private.to_nonzero_scalar(),
+ public.as_affine(),
+ )
+ .raw_secret_bytes()
+ .to_vec()
+ .into_boxed_slice(),
+ (
+ AsymmetricPrivateKey::Ec(EcPrivateKey::P384(private)),
+ AsymmetricPublicKey::Ec(EcPublicKey::P384(public)),
+ ) => p384::ecdh::diffie_hellman(
+ private.to_nonzero_scalar(),
+ public.as_affine(),
+ )
+ .raw_secret_bytes()
+ .to_vec()
+ .into_boxed_slice(),
+ (
+ AsymmetricPrivateKey::X25519(private),
+ AsymmetricPublicKey::X25519(public),
+ ) => private
+ .diffie_hellman(public)
+ .to_bytes()
+ .into_iter()
+ .collect(),
+ (AsymmetricPrivateKey::Dh(private), AsymmetricPublicKey::Dh(public)) => {
+ if private.params.prime != public.params.prime
+ || private.params.base != public.params.base
+ {
+ return Err(DiffieHellmanError::DhParametersMismatch);
+ }
+
+ // OSIP - Octet-String-to-Integer primitive
+ let public_key = public.key.clone().into_vec();
+ let pubkey = BigUint::from_bytes_be(&public_key);
+
+ // Exponentiation (z = y^x mod p)
+ let prime = BigUint::from_bytes_be(private.params.prime.as_bytes());
+ let private_key = private.key.clone().into_vec();
+ let private_key = BigUint::from_bytes_be(&private_key);
+ let shared_secret = pubkey.modpow(&private_key, &prime);
+
+ shared_secret.to_bytes_be().into()
}
-
- // OSIP - Octet-String-to-Integer primitive
- let public_key = public.key.clone().into_vec();
- let pubkey = BigUint::from_bytes_be(&public_key);
-
- // Exponentiation (z = y^x mod p)
- let prime = BigUint::from_bytes_be(private.params.prime.as_bytes());
- let private_key = private.key.clone().into_vec();
- let private_key = BigUint::from_bytes_be(&private_key);
- let shared_secret = pubkey.modpow(&private_key, &prime);
-
- shared_secret.to_bytes_be().into()
- }
- _ => {
- return Err(type_error(
- "Unsupported key type for diffie hellman, or key type mismatch",
- ))
- }
- };
+ _ => return Err(
+ DiffieHellmanError::UnsupportedKeyTypeForDiffieHellmanOrKeyTypeMismatch,
+ ),
+ };
Ok(res)
}
+#[derive(Debug, thiserror::Error)]
+pub enum SignEd25519Error {
+ #[error("Expected private key")]
+ ExpectedPrivateKey,
+ #[error("Expected Ed25519 private key")]
+ ExpectedEd25519PrivateKey,
+ #[error("Invalid Ed25519 private key")]
+ InvalidEd25519PrivateKey,
+}
+
#[op2(fast)]
pub fn op_node_sign_ed25519(
#[cppgc] key: &KeyObjectHandle,
#[buffer] data: &[u8],
#[buffer] signature: &mut [u8],
-) -> Result<(), AnyError> {
+) -> Result<(), SignEd25519Error> {
let private = key
.as_private_key()
- .ok_or_else(|| type_error("Expected private key"))?;
+ .ok_or(SignEd25519Error::ExpectedPrivateKey)?;
let ed25519 = match private {
AsymmetricPrivateKey::Ed25519(private) => private,
- _ => return Err(type_error("Expected Ed25519 private key")),
+ _ => return Err(SignEd25519Error::ExpectedEd25519PrivateKey),
};
let pair = Ed25519KeyPair::from_seed_unchecked(ed25519.as_bytes().as_slice())
- .map_err(|_| type_error("Invalid Ed25519 private key"))?;
+ .map_err(|_| SignEd25519Error::InvalidEd25519PrivateKey)?;
signature.copy_from_slice(pair.sign(data).as_ref());
Ok(())
}
+#[derive(Debug, thiserror::Error)]
+pub enum VerifyEd25519Error {
+ #[error("Expected public key")]
+ ExpectedPublicKey,
+ #[error("Expected Ed25519 public key")]
+ ExpectedEd25519PublicKey,
+}
+
#[op2(fast)]
pub fn op_node_verify_ed25519(
#[cppgc] key: &KeyObjectHandle,
#[buffer] data: &[u8],
#[buffer] signature: &[u8],
-) -> Result<bool, AnyError> {
+) -> Result<bool, VerifyEd25519Error> {
let public = key
.as_public_key()
- .ok_or_else(|| type_error("Expected public key"))?;
+ .ok_or(VerifyEd25519Error::ExpectedPublicKey)?;
let ed25519 = match &*public {
AsymmetricPublicKey::Ed25519(public) => public,
- _ => return Err(type_error("Expected Ed25519 public key")),
+ _ => return Err(VerifyEd25519Error::ExpectedEd25519PublicKey),
};
let verified = ring::signature::UnparsedPublicKey::new(
diff --git a/ext/node/ops/crypto/sign.rs b/ext/node/ops/crypto/sign.rs
index b7779a5d8..30094c076 100644
--- a/ext/node/ops/crypto/sign.rs
+++ b/ext/node/ops/crypto/sign.rs
@@ -1,7 +1,4 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::generic_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use rand::rngs::OsRng;
use rsa::signature::hazmat::PrehashSigner as _;
use rsa::signature::hazmat::PrehashVerifier as _;
@@ -26,7 +23,7 @@ use elliptic_curve::FieldBytesSize;
fn dsa_signature<C: elliptic_curve::PrimeCurve>(
encoding: u32,
signature: ecdsa::Signature<C>,
-) -> Result<Box<[u8]>, AnyError>
+) -> Result<Box<[u8]>, KeyObjectHandlePrehashedSignAndVerifyError>
where
MaxSize<C>: ArrayLength<u8>,
<FieldBytesSize<C> as Add>::Output: Add<MaxOverhead> + ArrayLength<u8>,
@@ -36,10 +33,54 @@ where
0 => Ok(signature.to_der().to_bytes().to_vec().into_boxed_slice()),
// IEEE P1363
1 => Ok(signature.to_bytes().to_vec().into_boxed_slice()),
- _ => Err(type_error("invalid DSA signature encoding")),
+ _ => Err(
+ KeyObjectHandlePrehashedSignAndVerifyError::InvalidDsaSignatureEncoding,
+ ),
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum KeyObjectHandlePrehashedSignAndVerifyError {
+ #[error("invalid DSA signature encoding")]
+ InvalidDsaSignatureEncoding,
+ #[error("key is not a private key")]
+ KeyIsNotPrivate,
+ #[error("digest not allowed for RSA signature: {0}")]
+ DigestNotAllowedForRsaSignature(String),
+ #[error("failed to sign digest with RSA")]
+ FailedToSignDigestWithRsa,
+ #[error("digest not allowed for RSA-PSS signature: {0}")]
+ DigestNotAllowedForRsaPssSignature(String),
+ #[error("failed to sign digest with RSA-PSS")]
+ FailedToSignDigestWithRsaPss,
+ #[error("failed to sign digest with DSA")]
+ FailedToSignDigestWithDsa,
+ #[error("rsa-pss with different mf1 hash algorithm and hash algorithm is not supported")]
+ RsaPssHashAlgorithmUnsupported,
+ #[error(
+ "private key does not allow {actual} to be used, expected {expected}"
+ )]
+ PrivateKeyDisallowsUsage { actual: String, expected: String },
+ #[error("failed to sign digest")]
+ FailedToSignDigest,
+ #[error("x25519 key cannot be used for signing")]
+ X25519KeyCannotBeUsedForSigning,
+ #[error("Ed25519 key cannot be used for prehashed signing")]
+ Ed25519KeyCannotBeUsedForPrehashedSigning,
+ #[error("DH key cannot be used for signing")]
+ DhKeyCannotBeUsedForSigning,
+ #[error("key is not a public or private key")]
+ KeyIsNotPublicOrPrivate,
+ #[error("Invalid DSA signature")]
+ InvalidDsaSignature,
+ #[error("x25519 key cannot be used for verification")]
+ X25519KeyCannotBeUsedForVerification,
+ #[error("Ed25519 key cannot be used for prehashed verification")]
+ Ed25519KeyCannotBeUsedForPrehashedVerification,
+ #[error("DH key cannot be used for verification")]
+ DhKeyCannotBeUsedForVerification,
+}
+
impl KeyObjectHandle {
pub fn sign_prehashed(
&self,
@@ -47,10 +88,10 @@ impl KeyObjectHandle {
digest: &[u8],
pss_salt_length: Option<u32>,
dsa_signature_encoding: u32,
- ) -> Result<Box<[u8]>, AnyError> {
+ ) -> Result<Box<[u8]>, KeyObjectHandlePrehashedSignAndVerifyError> {
let private_key = self
.as_private_key()
- .ok_or_else(|| type_error("key is not a private key"))?;
+ .ok_or(KeyObjectHandlePrehashedSignAndVerifyError::KeyIsNotPrivate)?;
match private_key {
AsymmetricPrivateKey::Rsa(key) => {
@@ -63,34 +104,26 @@ impl KeyObjectHandle {
rsa::pkcs1v15::Pkcs1v15Sign::new::<D>()
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA signature: {}",
- digest_type
- )))
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string()))
}
)
};
let signature = signer
.sign(Some(&mut OsRng), key, digest)
- .map_err(|_| generic_error("failed to sign digest with RSA"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithRsa)?;
Ok(signature.into())
}
AsymmetricPrivateKey::RsaPss(key) => {
let mut hash_algorithm = None;
let mut salt_length = None;
- match &key.details {
- Some(details) => {
- if details.hash_algorithm != details.mf1_hash_algorithm {
- return Err(type_error(
- "rsa-pss with different mf1 hash algorithm and hash algorithm is not supported",
- ));
- }
- hash_algorithm = Some(details.hash_algorithm);
- salt_length = Some(details.salt_length as usize);
+ if let Some(details) = &key.details {
+ if details.hash_algorithm != details.mf1_hash_algorithm {
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::RsaPssHashAlgorithmUnsupported);
}
- None => {}
- };
+ hash_algorithm = Some(details.hash_algorithm);
+ salt_length = Some(details.salt_length as usize);
+ }
if let Some(s) = pss_salt_length {
salt_length = Some(s as usize);
}
@@ -99,10 +132,10 @@ impl KeyObjectHandle {
fn <D>(algorithm: Option<RsaPssHashAlgorithm>) {
if let Some(hash_algorithm) = hash_algorithm.take() {
if Some(hash_algorithm) != algorithm {
- return Err(type_error(format!(
- "private key does not allow {} to be used, expected {}",
- digest_type, hash_algorithm.as_str()
- )));
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::PrivateKeyDisallowsUsage {
+ actual: digest_type.to_string(),
+ expected: hash_algorithm.as_str().to_string(),
+ });
}
}
if let Some(salt_length) = salt_length {
@@ -112,15 +145,12 @@ impl KeyObjectHandle {
}
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA-PSS signature: {}",
- digest_type
- )))
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaPssSignature(digest_type.to_string()));
}
);
let signature = pss
.sign(Some(&mut OsRng), &key.key, digest)
- .map_err(|_| generic_error("failed to sign digest with RSA-PSS"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithRsaPss)?;
Ok(signature.into())
}
AsymmetricPrivateKey::Dsa(key) => {
@@ -130,15 +160,12 @@ impl KeyObjectHandle {
key.sign_prehashed_rfc6979::<D>(digest)
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA signature: {}",
- digest_type
- )))
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string()))
}
);
let signature =
- res.map_err(|_| generic_error("failed to sign digest with DSA"))?;
+ res.map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigestWithDsa)?;
Ok(signature.into())
}
AsymmetricPrivateKey::Ec(key) => match key {
@@ -146,7 +173,7 @@ impl KeyObjectHandle {
let signing_key = p224::ecdsa::SigningKey::from(key);
let signature: p224::ecdsa::Signature = signing_key
.sign_prehash(digest)
- .map_err(|_| type_error("failed to sign digest"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?;
dsa_signature(dsa_signature_encoding, signature)
}
@@ -154,7 +181,7 @@ impl KeyObjectHandle {
let signing_key = p256::ecdsa::SigningKey::from(key);
let signature: p256::ecdsa::Signature = signing_key
.sign_prehash(digest)
- .map_err(|_| type_error("failed to sign digest"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?;
dsa_signature(dsa_signature_encoding, signature)
}
@@ -162,19 +189,17 @@ impl KeyObjectHandle {
let signing_key = p384::ecdsa::SigningKey::from(key);
let signature: p384::ecdsa::Signature = signing_key
.sign_prehash(digest)
- .map_err(|_| type_error("failed to sign digest"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::FailedToSignDigest)?;
dsa_signature(dsa_signature_encoding, signature)
}
},
AsymmetricPrivateKey::X25519(_) => {
- Err(type_error("x25519 key cannot be used for signing"))
+ Err(KeyObjectHandlePrehashedSignAndVerifyError::X25519KeyCannotBeUsedForSigning)
}
- AsymmetricPrivateKey::Ed25519(_) => Err(type_error(
- "Ed25519 key cannot be used for prehashed signing",
- )),
+ AsymmetricPrivateKey::Ed25519(_) => Err(KeyObjectHandlePrehashedSignAndVerifyError::Ed25519KeyCannotBeUsedForPrehashedSigning),
AsymmetricPrivateKey::Dh(_) => {
- Err(type_error("DH key cannot be used for signing"))
+ Err(KeyObjectHandlePrehashedSignAndVerifyError::DhKeyCannotBeUsedForSigning)
}
}
}
@@ -186,10 +211,10 @@ impl KeyObjectHandle {
signature: &[u8],
pss_salt_length: Option<u32>,
dsa_signature_encoding: u32,
- ) -> Result<bool, AnyError> {
- let public_key = self
- .as_public_key()
- .ok_or_else(|| type_error("key is not a public or private key"))?;
+ ) -> Result<bool, KeyObjectHandlePrehashedSignAndVerifyError> {
+ let public_key = self.as_public_key().ok_or(
+ KeyObjectHandlePrehashedSignAndVerifyError::KeyIsNotPublicOrPrivate,
+ )?;
match &*public_key {
AsymmetricPublicKey::Rsa(key) => {
@@ -202,10 +227,7 @@ impl KeyObjectHandle {
rsa::pkcs1v15::Pkcs1v15Sign::new::<D>()
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA signature: {}",
- digest_type
- )))
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaSignature(digest_type.to_string()))
}
)
};
@@ -215,18 +237,13 @@ impl KeyObjectHandle {
AsymmetricPublicKey::RsaPss(key) => {
let mut hash_algorithm = None;
let mut salt_length = None;
- match &key.details {
- Some(details) => {
- if details.hash_algorithm != details.mf1_hash_algorithm {
- return Err(type_error(
- "rsa-pss with different mf1 hash algorithm and hash algorithm is not supported",
- ));
- }
- hash_algorithm = Some(details.hash_algorithm);
- salt_length = Some(details.salt_length as usize);
+ if let Some(details) = &key.details {
+ if details.hash_algorithm != details.mf1_hash_algorithm {
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::RsaPssHashAlgorithmUnsupported);
}
- None => {}
- };
+ hash_algorithm = Some(details.hash_algorithm);
+ salt_length = Some(details.salt_length as usize);
+ }
if let Some(s) = pss_salt_length {
salt_length = Some(s as usize);
}
@@ -235,10 +252,10 @@ impl KeyObjectHandle {
fn <D>(algorithm: Option<RsaPssHashAlgorithm>) {
if let Some(hash_algorithm) = hash_algorithm.take() {
if Some(hash_algorithm) != algorithm {
- return Err(type_error(format!(
- "private key does not allow {} to be used, expected {}",
- digest_type, hash_algorithm.as_str()
- )));
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::PrivateKeyDisallowsUsage {
+ actual: digest_type.to_string(),
+ expected: hash_algorithm.as_str().to_string(),
+ });
}
}
if let Some(salt_length) = salt_length {
@@ -248,17 +265,14 @@ impl KeyObjectHandle {
}
},
_ => {
- return Err(type_error(format!(
- "digest not allowed for RSA-PSS signature: {}",
- digest_type
- )))
+ return Err(KeyObjectHandlePrehashedSignAndVerifyError::DigestNotAllowedForRsaPssSignature(digest_type.to_string()));
}
);
Ok(pss.verify(&key.key, digest, signature).is_ok())
}
AsymmetricPublicKey::Dsa(key) => {
let signature = dsa::Signature::from_der(signature)
- .map_err(|_| type_error("Invalid DSA signature"))?;
+ .map_err(|_| KeyObjectHandlePrehashedSignAndVerifyError::InvalidDsaSignature)?;
Ok(key.verify_prehash(digest, &signature).is_ok())
}
AsymmetricPublicKey::Ec(key) => match key {
@@ -300,13 +314,11 @@ impl KeyObjectHandle {
}
},
AsymmetricPublicKey::X25519(_) => {
- Err(type_error("x25519 key cannot be used for verification"))
+ Err(KeyObjectHandlePrehashedSignAndVerifyError::X25519KeyCannotBeUsedForVerification)
}
- AsymmetricPublicKey::Ed25519(_) => Err(type_error(
- "Ed25519 key cannot be used for prehashed verification",
- )),
+ AsymmetricPublicKey::Ed25519(_) => Err(KeyObjectHandlePrehashedSignAndVerifyError::Ed25519KeyCannotBeUsedForPrehashedVerification),
AsymmetricPublicKey::Dh(_) => {
- Err(type_error("DH key cannot be used for verification"))
+ Err(KeyObjectHandlePrehashedSignAndVerifyError::DhKeyCannotBeUsedForVerification)
}
}
}
diff --git a/ext/node/ops/crypto/x509.rs b/ext/node/ops/crypto/x509.rs
index b44ff3a4b..ab8e52f70 100644
--- a/ext/node/ops/crypto/x509.rs
+++ b/ext/node/ops/crypto/x509.rs
@@ -1,11 +1,11 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use x509_parser::der_parser::asn1_rs::Any;
use x509_parser::der_parser::asn1_rs::Tag;
use x509_parser::der_parser::oid::Oid;
+pub use x509_parser::error::X509Error;
use x509_parser::extensions;
use x509_parser::pem;
use x509_parser::prelude::*;
@@ -65,7 +65,7 @@ impl<'a> Deref for CertificateView<'a> {
#[cppgc]
pub fn op_node_x509_parse(
#[buffer] buf: &[u8],
-) -> Result<Certificate, AnyError> {
+) -> Result<Certificate, X509Error> {
let source = match pem::parse_x509_pem(buf) {
Ok((_, pem)) => CertificateSources::Pem(pem),
Err(_) => CertificateSources::Der(buf.to_vec().into_boxed_slice()),
@@ -81,7 +81,7 @@ pub fn op_node_x509_parse(
X509Certificate::from_der(buf).map(|(_, cert)| cert)?
}
};
- Ok::<_, AnyError>(CertificateView { cert })
+ Ok::<_, X509Error>(CertificateView { cert })
},
)?;
@@ -89,23 +89,23 @@ pub fn op_node_x509_parse(
}
#[op2(fast)]
-pub fn op_node_x509_ca(#[cppgc] cert: &Certificate) -> Result<bool, AnyError> {
+pub fn op_node_x509_ca(#[cppgc] cert: &Certificate) -> bool {
let cert = cert.inner.get().deref();
- Ok(cert.is_ca())
+ cert.is_ca()
}
#[op2(fast)]
pub fn op_node_x509_check_email(
#[cppgc] cert: &Certificate,
#[string] email: &str,
-) -> Result<bool, AnyError> {
+) -> bool {
let cert = cert.inner.get().deref();
let subject = cert.subject();
if subject
.iter_email()
.any(|e| e.as_str().unwrap_or("") == email)
{
- return Ok(true);
+ return true;
}
let subject_alt = cert
@@ -121,62 +121,60 @@ pub fn op_node_x509_check_email(
for name in &subject_alt.general_names {
if let extensions::GeneralName::RFC822Name(n) = name {
if *n == email {
- return Ok(true);
+ return true;
}
}
}
}
- Ok(false)
+ false
}
#[op2]
#[string]
-pub fn op_node_x509_fingerprint(
- #[cppgc] cert: &Certificate,
-) -> Result<Option<String>, AnyError> {
- Ok(cert.fingerprint::<sha1::Sha1>())
+pub fn op_node_x509_fingerprint(#[cppgc] cert: &Certificate) -> Option<String> {
+ cert.fingerprint::<sha1::Sha1>()
}
#[op2]
#[string]
pub fn op_node_x509_fingerprint256(
#[cppgc] cert: &Certificate,
-) -> Result<Option<String>, AnyError> {
- Ok(cert.fingerprint::<sha2::Sha256>())
+) -> Option<String> {
+ cert.fingerprint::<sha2::Sha256>()
}
#[op2]
#[string]
pub fn op_node_x509_fingerprint512(
#[cppgc] cert: &Certificate,
-) -> Result<Option<String>, AnyError> {
- Ok(cert.fingerprint::<sha2::Sha512>())
+) -> Option<String> {
+ cert.fingerprint::<sha2::Sha512>()
}
#[op2]
#[string]
pub fn op_node_x509_get_issuer(
#[cppgc] cert: &Certificate,
-) -> Result<String, AnyError> {
+) -> Result<String, X509Error> {
let cert = cert.inner.get().deref();
- Ok(x509name_to_string(cert.issuer(), oid_registry())?)
+ x509name_to_string(cert.issuer(), oid_registry())
}
#[op2]
#[string]
pub fn op_node_x509_get_subject(
#[cppgc] cert: &Certificate,
-) -> Result<String, AnyError> {
+) -> Result<String, X509Error> {
let cert = cert.inner.get().deref();
- Ok(x509name_to_string(cert.subject(), oid_registry())?)
+ x509name_to_string(cert.subject(), oid_registry())
}
#[op2]
#[cppgc]
pub fn op_node_x509_public_key(
#[cppgc] cert: &Certificate,
-) -> Result<KeyObjectHandle, AnyError> {
+) -> Result<KeyObjectHandle, super::keys::X509PublicKeyError> {
let cert = cert.inner.get().deref();
let public_key = &cert.tbs_certificate.subject_pki;
@@ -245,37 +243,29 @@ fn x509name_to_string(
#[op2]
#[string]
-pub fn op_node_x509_get_valid_from(
- #[cppgc] cert: &Certificate,
-) -> Result<String, AnyError> {
+pub fn op_node_x509_get_valid_from(#[cppgc] cert: &Certificate) -> String {
let cert = cert.inner.get().deref();
- Ok(cert.validity().not_before.to_string())
+ cert.validity().not_before.to_string()
}
#[op2]
#[string]
-pub fn op_node_x509_get_valid_to(
- #[cppgc] cert: &Certificate,
-) -> Result<String, AnyError> {
+pub fn op_node_x509_get_valid_to(#[cppgc] cert: &Certificate) -> String {
let cert = cert.inner.get().deref();
- Ok(cert.validity().not_after.to_string())
+ cert.validity().not_after.to_string()
}
#[op2]
#[string]
-pub fn op_node_x509_get_serial_number(
- #[cppgc] cert: &Certificate,
-) -> Result<String, AnyError> {
+pub fn op_node_x509_get_serial_number(#[cppgc] cert: &Certificate) -> String {
let cert = cert.inner.get().deref();
let mut s = cert.serial.to_str_radix(16);
s.make_ascii_uppercase();
- Ok(s)
+ s
}
#[op2(fast)]
-pub fn op_node_x509_key_usage(
- #[cppgc] cert: &Certificate,
-) -> Result<u16, AnyError> {
+pub fn op_node_x509_key_usage(#[cppgc] cert: &Certificate) -> u16 {
let cert = cert.inner.get().deref();
let key_usage = cert
.extensions()
@@ -286,5 +276,5 @@ pub fn op_node_x509_key_usage(
_ => None,
});
- Ok(key_usage.map(|k| k.flags).unwrap_or(0))
+ key_usage.map(|k| k.flags).unwrap_or(0)
}
diff --git a/ext/node/ops/fs.rs b/ext/node/ops/fs.rs
index 6253f32d0..9c0e4e1cc 100644
--- a/ext/node/ops/fs.rs
+++ b/ext/node/ops/fs.rs
@@ -3,7 +3,6 @@
use std::cell::RefCell;
use std::rc::Rc;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_fs::FileSystemRc;
@@ -11,11 +10,27 @@ use serde::Serialize;
use crate::NodePermissions;
+#[derive(Debug, thiserror::Error)]
+pub enum FsError {
+ #[error(transparent)]
+ Permission(#[from] deno_permissions::PermissionCheckError),
+ #[error("{0}")]
+ Io(#[from] std::io::Error),
+ #[cfg(windows)]
+ #[error("Path has no root.")]
+ PathHasNoRoot,
+ #[cfg(not(any(unix, windows)))]
+ #[error("Unsupported platform.")]
+ UnsupportedPlatform,
+ #[error(transparent)]
+ Fs(#[from] deno_io::fs::FsError),
+}
+
#[op2(fast)]
pub fn op_node_fs_exists_sync<P>(
state: &mut OpState,
#[string] path: String,
-) -> Result<bool, AnyError>
+) -> Result<bool, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
@@ -30,7 +45,7 @@ where
pub async fn op_node_fs_exists<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
-) -> Result<bool, AnyError>
+) -> Result<bool, FsError>
where
P: NodePermissions + 'static,
{
@@ -50,7 +65,7 @@ pub fn op_node_cp_sync<P>(
state: &mut OpState,
#[string] path: &str,
#[string] new_path: &str,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
@@ -71,7 +86,7 @@ pub async fn op_node_cp<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
#[string] new_path: String,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
@@ -108,7 +123,7 @@ pub fn op_node_statfs<P>(
state: Rc<RefCell<OpState>>,
#[string] path: String,
bigint: bool,
-) -> Result<StatFs, AnyError>
+) -> Result<StatFs, FsError>
where
P: NodePermissions + 'static,
{
@@ -130,13 +145,21 @@ where
let mut cpath = path.as_bytes().to_vec();
cpath.push(0);
if bigint {
- #[cfg(not(target_os = "macos"))]
+ #[cfg(not(any(
+ target_os = "macos",
+ target_os = "freebsd",
+ target_os = "openbsd"
+ )))]
// SAFETY: `cpath` is NUL-terminated and result is pointer to valid statfs memory.
let (code, result) = unsafe {
let mut result: libc::statfs64 = std::mem::zeroed();
(libc::statfs64(cpath.as_ptr() as _, &mut result), result)
};
- #[cfg(target_os = "macos")]
+ #[cfg(any(
+ target_os = "macos",
+ target_os = "freebsd",
+ target_os = "openbsd"
+ ))]
// SAFETY: `cpath` is NUL-terminated and result is pointer to valid statfs memory.
let (code, result) = unsafe {
let mut result: libc::statfs = std::mem::zeroed();
@@ -146,7 +169,10 @@ where
return Err(std::io::Error::last_os_error().into());
}
Ok(StatFs {
+ #[cfg(not(target_os = "openbsd"))]
typ: result.f_type as _,
+ #[cfg(target_os = "openbsd")]
+ typ: 0 as _,
bsize: result.f_bsize as _,
blocks: result.f_blocks as _,
bfree: result.f_bfree as _,
@@ -164,7 +190,10 @@ where
return Err(std::io::Error::last_os_error().into());
}
Ok(StatFs {
+ #[cfg(not(target_os = "openbsd"))]
typ: result.f_type as _,
+ #[cfg(target_os = "openbsd")]
+ typ: 0 as _,
bsize: result.f_bsize as _,
blocks: result.f_blocks as _,
bfree: result.f_bfree as _,
@@ -176,7 +205,6 @@ where
}
#[cfg(windows)]
{
- use deno_core::anyhow::anyhow;
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use windows_sys::Win32::Storage::FileSystem::GetDiskFreeSpaceW;
@@ -186,10 +214,7 @@ where
// call below.
#[allow(clippy::disallowed_methods)]
let path = path.canonicalize()?;
- let root = path
- .ancestors()
- .last()
- .ok_or(anyhow!("Path has no root."))?;
+ let root = path.ancestors().last().ok_or(FsError::PathHasNoRoot)?;
let mut root = OsStr::new(root).encode_wide().collect::<Vec<_>>();
root.push(0);
let mut sectors_per_cluster = 0;
@@ -229,7 +254,7 @@ where
{
let _ = path;
let _ = bigint;
- Err(anyhow!("Unsupported platform."))
+ Err(FsError::UnsupportedPlatform)
}
}
@@ -241,7 +266,7 @@ pub fn op_node_lutimes_sync<P>(
#[smi] atime_nanos: u32,
#[number] mtime_secs: i64,
#[smi] mtime_nanos: u32,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
@@ -262,7 +287,7 @@ pub async fn op_node_lutimes<P>(
#[smi] atime_nanos: u32,
#[number] mtime_secs: i64,
#[smi] mtime_nanos: u32,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
@@ -286,7 +311,7 @@ pub fn op_node_lchown_sync<P>(
#[string] path: String,
uid: Option<u32>,
gid: Option<u32>,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
@@ -304,7 +329,7 @@ pub async fn op_node_lchown<P>(
#[string] path: String,
uid: Option<u32>,
gid: Option<u32>,
-) -> Result<(), AnyError>
+) -> Result<(), FsError>
where
P: NodePermissions + 'static,
{
diff --git a/ext/node/ops/http.rs b/ext/node/ops/http.rs
index 773902ded..69571078f 100644
--- a/ext/node/ops/http.rs
+++ b/ext/node/ops/http.rs
@@ -8,14 +8,12 @@ use std::task::Context;
use std::task::Poll;
use bytes::Bytes;
-use deno_core::anyhow;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::futures::stream::Peekable;
use deno_core::futures::Future;
use deno_core::futures::FutureExt;
use deno_core::futures::Stream;
use deno_core::futures::StreamExt;
+use deno_core::futures::TryFutureExt;
use deno_core::op2;
use deno_core::serde::Serialize;
use deno_core::unsync::spawn;
@@ -33,6 +31,7 @@ use deno_core::Resource;
use deno_core::ResourceId;
use deno_fetch::get_or_create_client_from_state;
use deno_fetch::FetchCancelHandle;
+use deno_fetch::FetchError;
use deno_fetch::FetchRequestResource;
use deno_fetch::FetchReturn;
use deno_fetch::HttpClientResource;
@@ -59,12 +58,15 @@ pub fn op_node_http_request<P>(
#[serde] headers: Vec<(ByteString, ByteString)>,
#[smi] client_rid: Option<u32>,
#[smi] body: Option<ResourceId>,
-) -> Result<FetchReturn, AnyError>
+) -> Result<FetchReturn, FetchError>
where
P: crate::NodePermissions + 'static,
{
let client = if let Some(rid) = client_rid {
- let r = state.resource_table.get::<HttpClientResource>(rid)?;
+ let r = state
+ .resource_table
+ .get::<HttpClientResource>(rid)
+ .map_err(FetchError::Resource)?;
r.client.clone()
} else {
get_or_create_client_from_state(state)?
@@ -81,10 +83,8 @@ where
let mut header_map = HeaderMap::new();
for (key, value) in headers {
- let name = HeaderName::from_bytes(&key)
- .map_err(|err| type_error(err.to_string()))?;
- let v = HeaderValue::from_bytes(&value)
- .map_err(|err| type_error(err.to_string()))?;
+ let name = HeaderName::from_bytes(&key)?;
+ let v = HeaderValue::from_bytes(&value)?;
header_map.append(name, v);
}
@@ -92,7 +92,10 @@ where
let (body, con_len) = if let Some(body) = body {
(
BodyExt::boxed(NodeHttpResourceToBodyAdapter::new(
- state.resource_table.take_any(body)?,
+ state
+ .resource_table
+ .take_any(body)
+ .map_err(FetchError::Resource)?,
)),
None,
)
@@ -117,7 +120,7 @@ where
*request.uri_mut() = url
.as_str()
.parse()
- .map_err(|_| type_error("Invalid URL"))?;
+ .map_err(|_| FetchError::InvalidUrl(url.clone()))?;
*request.headers_mut() = header_map;
if let Some((username, password)) = maybe_authority {
@@ -136,9 +139,9 @@ where
let fut = async move {
client
.send(request)
+ .map_err(Into::into)
.or_cancel(cancel_handle_)
.await
- .map(|res| res.map_err(|err| type_error(err.to_string())))
};
let request_rid = state.resource_table.add(FetchRequestResource {
@@ -174,11 +177,12 @@ pub struct NodeHttpFetchResponse {
pub async fn op_node_http_fetch_send(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<NodeHttpFetchResponse, AnyError> {
+) -> Result<NodeHttpFetchResponse, FetchError> {
let request = state
.borrow_mut()
.resource_table
- .take::<FetchRequestResource>(rid)?;
+ .take::<FetchRequestResource>(rid)
+ .map_err(FetchError::Resource)?;
let request = Rc::try_unwrap(request)
.ok()
@@ -191,22 +195,23 @@ pub async fn op_node_http_fetch_send(
// If any error in the chain is a hyper body error, return that as a special result we can use to
// reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`).
// TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead
- let mut err_ref: &dyn std::error::Error = err.as_ref();
- while let Some(err) = std::error::Error::source(err_ref) {
- if let Some(err) = err.downcast_ref::<hyper::Error>() {
- if let Some(err) = std::error::Error::source(err) {
- return Ok(NodeHttpFetchResponse {
- error: Some(err.to_string()),
- ..Default::default()
- });
+
+ if let FetchError::ClientSend(err_src) = &err {
+ if let Some(client_err) = std::error::Error::source(&err_src.source) {
+ if let Some(err_src) = client_err.downcast_ref::<hyper::Error>() {
+ if let Some(err_src) = std::error::Error::source(err_src) {
+ return Ok(NodeHttpFetchResponse {
+ error: Some(err_src.to_string()),
+ ..Default::default()
+ });
+ }
}
}
- err_ref = err;
}
- return Err(type_error(err.to_string()));
+ return Err(err);
}
- Err(_) => return Err(type_error("request was cancelled")),
+ Err(_) => return Err(FetchError::RequestCanceled),
};
let status = res.status();
@@ -250,11 +255,12 @@ pub async fn op_node_http_fetch_send(
pub async fn op_node_http_fetch_response_upgrade(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<ResourceId, AnyError> {
+) -> Result<ResourceId, FetchError> {
let raw_response = state
.borrow_mut()
.resource_table
- .take::<NodeHttpFetchResponseResource>(rid)?;
+ .take::<NodeHttpFetchResponseResource>(rid)
+ .map_err(FetchError::Resource)?;
let raw_response = Rc::try_unwrap(raw_response)
.expect("Someone is holding onto NodeHttpFetchResponseResource");
@@ -277,7 +283,7 @@ pub async fn op_node_http_fetch_response_upgrade(
}
read_tx.write_all(&buf[..read]).await?;
}
- Ok::<_, AnyError>(())
+ Ok::<_, FetchError>(())
});
spawn(async move {
let mut buf = [0; 1024];
@@ -288,7 +294,7 @@ pub async fn op_node_http_fetch_response_upgrade(
}
upgraded_tx.write_all(&buf[..read]).await?;
}
- Ok::<_, AnyError>(())
+ Ok::<_, FetchError>(())
});
}
@@ -318,23 +324,26 @@ impl UpgradeStream {
}
}
- async fn read(self: Rc<Self>, buf: &mut [u8]) -> Result<usize, AnyError> {
+ async fn read(
+ self: Rc<Self>,
+ buf: &mut [u8],
+ ) -> Result<usize, std::io::Error> {
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
async {
let read = RcRef::map(self, |this| &this.read);
let mut read = read.borrow_mut().await;
- Ok(Pin::new(&mut *read).read(buf).await?)
+ Pin::new(&mut *read).read(buf).await
}
.try_or_cancel(cancel_handle)
.await
}
- async fn write(self: Rc<Self>, buf: &[u8]) -> Result<usize, AnyError> {
+ async fn write(self: Rc<Self>, buf: &[u8]) -> Result<usize, std::io::Error> {
let cancel_handle = RcRef::map(self.clone(), |this| &this.cancel_handle);
async {
let write = RcRef::map(self, |this| &this.write);
let mut write = write.borrow_mut().await;
- Ok(Pin::new(&mut *write).write(buf).await?)
+ Pin::new(&mut *write).write(buf).await
}
.try_or_cancel(cancel_handle)
.await
@@ -387,7 +396,7 @@ impl NodeHttpFetchResponseResource {
}
}
- pub async fn upgrade(self) -> Result<hyper::upgrade::Upgraded, AnyError> {
+ pub async fn upgrade(self) -> Result<hyper::upgrade::Upgraded, hyper::Error> {
let reader = self.response_reader.into_inner();
match reader {
NodeHttpFetchResponseReader::Start(resp) => {
@@ -445,7 +454,9 @@ impl Resource for NodeHttpFetchResponseResource {
// safely call `await` on it without creating a race condition.
Some(_) => match reader.as_mut().next().await.unwrap() {
Ok(chunk) => assert!(chunk.is_empty()),
- Err(err) => break Err(type_error(err.to_string())),
+ Err(err) => {
+ break Err(deno_core::error::type_error(err.to_string()))
+ }
},
None => break Ok(BufView::empty()),
}
@@ -453,7 +464,7 @@ impl Resource for NodeHttpFetchResponseResource {
};
let cancel_handle = RcRef::map(self, |r| &r.cancel);
- fut.try_or_cancel(cancel_handle).await
+ fut.try_or_cancel(cancel_handle).await.map_err(Into::into)
})
}
@@ -469,7 +480,9 @@ impl Resource for NodeHttpFetchResponseResource {
#[allow(clippy::type_complexity)]
pub struct NodeHttpResourceToBodyAdapter(
Rc<dyn Resource>,
- Option<Pin<Box<dyn Future<Output = Result<BufView, anyhow::Error>>>>>,
+ Option<
+ Pin<Box<dyn Future<Output = Result<BufView, deno_core::anyhow::Error>>>>,
+ >,
);
impl NodeHttpResourceToBodyAdapter {
@@ -485,7 +498,7 @@ unsafe impl Send for NodeHttpResourceToBodyAdapter {}
unsafe impl Sync for NodeHttpResourceToBodyAdapter {}
impl Stream for NodeHttpResourceToBodyAdapter {
- type Item = Result<Bytes, anyhow::Error>;
+ type Item = Result<Bytes, deno_core::anyhow::Error>;
fn poll_next(
self: Pin<&mut Self>,
@@ -515,7 +528,7 @@ impl Stream for NodeHttpResourceToBodyAdapter {
impl hyper::body::Body for NodeHttpResourceToBodyAdapter {
type Data = Bytes;
- type Error = anyhow::Error;
+ type Error = deno_core::anyhow::Error;
fn poll_frame(
self: Pin<&mut Self>,
diff --git a/ext/node/ops/http2.rs b/ext/node/ops/http2.rs
index 9595cb33d..53dada9f4 100644
--- a/ext/node/ops/http2.rs
+++ b/ext/node/ops/http2.rs
@@ -7,7 +7,6 @@ use std::rc::Rc;
use std::task::Poll;
use bytes::Bytes;
-use deno_core::error::AnyError;
use deno_core::futures::future::poll_fn;
use deno_core::op2;
use deno_core::serde::Serialize;
@@ -110,17 +109,28 @@ impl Resource for Http2ServerSendResponse {
}
}
+#[derive(Debug, thiserror::Error)]
+pub enum Http2Error {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ UrlParse(#[from] url::ParseError),
+ #[error(transparent)]
+ H2(#[from] h2::Error),
+}
+
#[op2(async)]
#[serde]
pub async fn op_http2_connect(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[string] url: String,
-) -> Result<(ResourceId, ResourceId), AnyError> {
+) -> Result<(ResourceId, ResourceId), Http2Error> {
// No permission check necessary because we're using an existing connection
let network_stream = {
let mut state = state.borrow_mut();
- take_network_stream_resource(&mut state.resource_table, rid)?
+ take_network_stream_resource(&mut state.resource_table, rid)
+ .map_err(Http2Error::Resource)?
};
let url = Url::parse(&url)?;
@@ -144,9 +154,10 @@ pub async fn op_http2_connect(
pub async fn op_http2_listen(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<ResourceId, AnyError> {
+) -> Result<ResourceId, Http2Error> {
let stream =
- take_network_stream_resource(&mut state.borrow_mut().resource_table, rid)?;
+ take_network_stream_resource(&mut state.borrow_mut().resource_table, rid)
+ .map_err(Http2Error::Resource)?;
let conn = h2::server::Builder::new().handshake(stream).await?;
Ok(
@@ -166,12 +177,13 @@ pub async fn op_http2_accept(
#[smi] rid: ResourceId,
) -> Result<
Option<(Vec<(ByteString, ByteString)>, ResourceId, ResourceId)>,
- AnyError,
+ Http2Error,
> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ServerConnection>(rid)?;
+ .get::<Http2ServerConnection>(rid)
+ .map_err(Http2Error::Resource)?;
let mut conn = RcRef::map(&resource, |r| &r.conn).borrow_mut().await;
if let Some(res) = conn.accept().await {
let (req, resp) = res?;
@@ -233,11 +245,12 @@ pub async fn op_http2_send_response(
#[smi] rid: ResourceId,
#[smi] status: u16,
#[serde] headers: Vec<(ByteString, ByteString)>,
-) -> Result<(ResourceId, u32), AnyError> {
+) -> Result<(ResourceId, u32), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ServerSendResponse>(rid)?;
+ .get::<Http2ServerSendResponse>(rid)
+ .map_err(Http2Error::Resource)?;
let mut send_response = RcRef::map(resource, |r| &r.send_response)
.borrow_mut()
.await;
@@ -262,8 +275,12 @@ pub async fn op_http2_send_response(
pub async fn op_http2_poll_client_connection(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
-) -> Result<(), AnyError> {
- let resource = state.borrow().resource_table.get::<Http2ClientConn>(rid)?;
+) -> Result<(), Http2Error> {
+ let resource = state
+ .borrow()
+ .resource_table
+ .get::<Http2ClientConn>(rid)
+ .map_err(Http2Error::Resource)?;
let cancel_handle = RcRef::map(resource.clone(), |this| &this.cancel_handle);
let mut conn = RcRef::map(resource, |this| &this.conn).borrow_mut().await;
@@ -289,11 +306,12 @@ pub async fn op_http2_client_request(
// 4 strings of keys?
#[serde] mut pseudo_headers: HashMap<String, String>,
#[serde] headers: Vec<(ByteString, ByteString)>,
-) -> Result<(ResourceId, u32), AnyError> {
+) -> Result<(ResourceId, u32), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2Client>(client_rid)?;
+ .get::<Http2Client>(client_rid)
+ .map_err(Http2Error::Resource)?;
let url = resource.url.clone();
@@ -326,7 +344,10 @@ pub async fn op_http2_client_request(
let resource = {
let state = state.borrow();
- state.resource_table.get::<Http2Client>(client_rid)?
+ state
+ .resource_table
+ .get::<Http2Client>(client_rid)
+ .map_err(Http2Error::Resource)?
};
let mut client = RcRef::map(&resource, |r| &r.client).borrow_mut().await;
poll_fn(|cx| client.poll_ready(cx)).await?;
@@ -345,11 +366,12 @@ pub async fn op_http2_client_send_data(
#[smi] stream_rid: ResourceId,
#[buffer] data: JsBuffer,
end_of_stream: bool,
-) -> Result<(), AnyError> {
+) -> Result<(), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ClientStream>(stream_rid)?;
+ .get::<Http2ClientStream>(stream_rid)
+ .map_err(Http2Error::Resource)?;
let mut stream = RcRef::map(&resource, |r| &r.stream).borrow_mut().await;
stream.send_data(data.to_vec().into(), end_of_stream)?;
@@ -361,7 +383,7 @@ pub async fn op_http2_client_reset_stream(
state: Rc<RefCell<OpState>>,
#[smi] stream_rid: ResourceId,
#[smi] code: u32,
-) -> Result<(), AnyError> {
+) -> Result<(), deno_core::error::AnyError> {
let resource = state
.borrow()
.resource_table
@@ -376,11 +398,12 @@ pub async fn op_http2_client_send_trailers(
state: Rc<RefCell<OpState>>,
#[smi] stream_rid: ResourceId,
#[serde] trailers: Vec<(ByteString, ByteString)>,
-) -> Result<(), AnyError> {
+) -> Result<(), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ClientStream>(stream_rid)?;
+ .get::<Http2ClientStream>(stream_rid)
+ .map_err(Http2Error::Resource)?;
let mut stream = RcRef::map(&resource, |r| &r.stream).borrow_mut().await;
let mut trailers_map = http::HeaderMap::new();
@@ -408,11 +431,12 @@ pub struct Http2ClientResponse {
pub async fn op_http2_client_get_response(
state: Rc<RefCell<OpState>>,
#[smi] stream_rid: ResourceId,
-) -> Result<(Http2ClientResponse, bool), AnyError> {
+) -> Result<(Http2ClientResponse, bool), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ClientStream>(stream_rid)?;
+ .get::<Http2ClientStream>(stream_rid)
+ .map_err(Http2Error::Resource)?;
let mut response_future =
RcRef::map(&resource, |r| &r.response).borrow_mut().await;
@@ -478,23 +502,22 @@ fn poll_data_or_trailers(
pub async fn op_http2_client_get_response_body_chunk(
state: Rc<RefCell<OpState>>,
#[smi] body_rid: ResourceId,
-) -> Result<(Option<Vec<u8>>, bool, bool), AnyError> {
+) -> Result<(Option<Vec<u8>>, bool, bool), Http2Error> {
let resource = state
.borrow()
.resource_table
- .get::<Http2ClientResponseBody>(body_rid)?;
+ .get::<Http2ClientResponseBody>(body_rid)
+ .map_err(Http2Error::Resource)?;
let mut body = RcRef::map(&resource, |r| &r.body).borrow_mut().await;
loop {
let result = poll_fn(|cx| poll_data_or_trailers(cx, &mut body)).await;
if let Err(err) = result {
- let reason = err.reason();
- if let Some(reason) = reason {
- if reason == Reason::CANCEL {
- return Ok((None, false, true));
- }
+ match err.reason() {
+ Some(Reason::NO_ERROR) => return Ok((None, true, false)),
+ Some(Reason::CANCEL) => return Ok((None, false, true)),
+ _ => return Err(err.into()),
}
- return Err(err.into());
}
match result.unwrap() {
DataOrTrailers::Data(data) => {
@@ -527,7 +550,7 @@ pub async fn op_http2_client_get_response_body_chunk(
pub async fn op_http2_client_get_response_trailers(
state: Rc<RefCell<OpState>>,
#[smi] body_rid: ResourceId,
-) -> Result<Option<Vec<(ByteString, ByteString)>>, AnyError> {
+) -> Result<Option<Vec<(ByteString, ByteString)>>, deno_core::error::AnyError> {
let resource = state
.borrow()
.resource_table
diff --git a/ext/node/ops/idna.rs b/ext/node/ops/idna.rs
index 9c9450c70..a3d85e77c 100644
--- a/ext/node/ops/idna.rs
+++ b/ext/node/ops/idna.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::anyhow::Error;
-use deno_core::error::range_error;
use deno_core::op2;
use std::borrow::Cow;
@@ -11,19 +9,21 @@ use std::borrow::Cow;
const PUNY_PREFIX: &str = "xn--";
-fn invalid_input_err() -> Error {
- range_error("Invalid input")
-}
-
-fn not_basic_err() -> Error {
- range_error("Illegal input >= 0x80 (not a basic code point)")
+#[derive(Debug, thiserror::Error)]
+pub enum IdnaError {
+ #[error("Invalid input")]
+ InvalidInput,
+ #[error("Input would take more than 63 characters to encode")]
+ InputTooLong,
+ #[error("Illegal input >= 0x80 (not a basic code point)")]
+ IllegalInput,
}
/// map a domain by mapping each label with the given function
-fn map_domain<E>(
+fn map_domain(
domain: &str,
- f: impl Fn(&str) -> Result<Cow<'_, str>, E>,
-) -> Result<String, E> {
+ f: impl Fn(&str) -> Result<Cow<'_, str>, IdnaError>,
+) -> Result<String, IdnaError> {
let mut result = String::with_capacity(domain.len());
let mut domain = domain;
@@ -48,7 +48,7 @@ fn map_domain<E>(
/// Maps a unicode domain to ascii by punycode encoding each label
///
/// Note this is not IDNA2003 or IDNA2008 compliant, rather it matches node.js's punycode implementation
-fn to_ascii(input: &str) -> Result<String, Error> {
+fn to_ascii(input: &str) -> Result<String, IdnaError> {
if input.is_ascii() {
return Ok(input.into());
}
@@ -61,9 +61,7 @@ fn to_ascii(input: &str) -> Result<String, Error> {
} else {
idna::punycode::encode_str(label)
.map(|encoded| [PUNY_PREFIX, &encoded].join("").into()) // add the prefix
- .ok_or_else(|| {
- Error::msg("Input would take more than 63 characters to encode") // only error possible per the docs
- })
+ .ok_or(IdnaError::InputTooLong) // only error possible per the docs
}
})?;
@@ -74,13 +72,13 @@ fn to_ascii(input: &str) -> Result<String, Error> {
/// Maps an ascii domain to unicode by punycode decoding each label
///
/// Note this is not IDNA2003 or IDNA2008 compliant, rather it matches node.js's punycode implementation
-fn to_unicode(input: &str) -> Result<String, Error> {
+fn to_unicode(input: &str) -> Result<String, IdnaError> {
map_domain(input, |s| {
if let Some(puny) = s.strip_prefix(PUNY_PREFIX) {
// it's a punycode encoded label
Ok(
idna::punycode::decode_to_string(&puny.to_lowercase())
- .ok_or_else(invalid_input_err)?
+ .ok_or(IdnaError::InvalidInput)?
.into(),
)
} else {
@@ -95,7 +93,7 @@ fn to_unicode(input: &str) -> Result<String, Error> {
#[string]
pub fn op_node_idna_punycode_to_ascii(
#[string] domain: String,
-) -> Result<String, Error> {
+) -> Result<String, IdnaError> {
to_ascii(&domain)
}
@@ -105,7 +103,7 @@ pub fn op_node_idna_punycode_to_ascii(
#[string]
pub fn op_node_idna_punycode_to_unicode(
#[string] domain: String,
-) -> Result<String, Error> {
+) -> Result<String, IdnaError> {
to_unicode(&domain)
}
@@ -115,8 +113,8 @@ pub fn op_node_idna_punycode_to_unicode(
#[string]
pub fn op_node_idna_domain_to_ascii(
#[string] domain: String,
-) -> Result<String, Error> {
- idna::domain_to_ascii(&domain).map_err(|e| e.into())
+) -> Result<String, idna::Errors> {
+ idna::domain_to_ascii(&domain)
}
/// Converts a domain to Unicode as per the IDNA spec
@@ -131,7 +129,7 @@ pub fn op_node_idna_domain_to_unicode(#[string] domain: String) -> String {
#[string]
pub fn op_node_idna_punycode_decode(
#[string] domain: String,
-) -> Result<String, Error> {
+) -> Result<String, IdnaError> {
if domain.is_empty() {
return Ok(domain);
}
@@ -147,11 +145,10 @@ pub fn op_node_idna_punycode_decode(
.unwrap_or(domain.len() - 1);
if !domain[..last_dash].is_ascii() {
- return Err(not_basic_err());
+ return Err(IdnaError::IllegalInput);
}
- idna::punycode::decode_to_string(&domain)
- .ok_or_else(|| deno_core::error::range_error("Invalid input"))
+ idna::punycode::decode_to_string(&domain).ok_or(IdnaError::InvalidInput)
}
#[op2]
diff --git a/ext/node/ops/inspector.rs b/ext/node/ops/inspector.rs
new file mode 100644
index 000000000..34a7e004c
--- /dev/null
+++ b/ext/node/ops/inspector.rs
@@ -0,0 +1,161 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use crate::NodePermissions;
+use deno_core::anyhow::Error;
+use deno_core::error::generic_error;
+use deno_core::futures::channel::mpsc;
+use deno_core::op2;
+use deno_core::v8;
+use deno_core::GarbageCollected;
+use deno_core::InspectorSessionKind;
+use deno_core::InspectorSessionOptions;
+use deno_core::JsRuntimeInspector;
+use deno_core::OpState;
+use std::cell::RefCell;
+use std::rc::Rc;
+
+#[op2(fast)]
+pub fn op_inspector_enabled() -> bool {
+ // TODO: hook up to InspectorServer
+ false
+}
+
+#[op2]
+pub fn op_inspector_open<P>(
+ _state: &mut OpState,
+ _port: Option<u16>,
+ #[string] _host: Option<String>,
+) -> Result<(), Error>
+where
+ P: NodePermissions + 'static,
+{
+ // TODO: hook up to InspectorServer
+ /*
+ let server = state.borrow_mut::<InspectorServer>();
+ if let Some(host) = host {
+ server.set_host(host);
+ }
+ if let Some(port) = port {
+ server.set_port(port);
+ }
+ state
+ .borrow_mut::<P>()
+ .check_net((server.host(), Some(server.port())), "inspector.open")?;
+ */
+
+ Ok(())
+}
+
+#[op2(fast)]
+pub fn op_inspector_close() {
+ // TODO: hook up to InspectorServer
+}
+
+#[op2]
+#[string]
+pub fn op_inspector_url() -> Option<String> {
+ // TODO: hook up to InspectorServer
+ None
+}
+
+#[op2(fast)]
+pub fn op_inspector_wait(state: &OpState) -> bool {
+ match state.try_borrow::<Rc<RefCell<JsRuntimeInspector>>>() {
+ Some(inspector) => {
+ inspector
+ .borrow_mut()
+ .wait_for_session_and_break_on_next_statement();
+ true
+ }
+ None => false,
+ }
+}
+
+#[op2(fast)]
+pub fn op_inspector_emit_protocol_event(
+ #[string] _event_name: String,
+ #[string] _params: String,
+) {
+ // TODO: inspector channel & protocol notifications
+}
+
+struct JSInspectorSession {
+ tx: RefCell<Option<mpsc::UnboundedSender<String>>>,
+}
+
+impl GarbageCollected for JSInspectorSession {}
+
+#[op2]
+#[cppgc]
+pub fn op_inspector_connect<'s, P>(
+ isolate: *mut v8::Isolate,
+ scope: &mut v8::HandleScope<'s>,
+ state: &mut OpState,
+ connect_to_main_thread: bool,
+ callback: v8::Local<'s, v8::Function>,
+) -> Result<JSInspectorSession, Error>
+where
+ P: NodePermissions + 'static,
+{
+ state
+ .borrow_mut::<P>()
+ .check_sys("inspector", "inspector.Session.connect")?;
+
+ if connect_to_main_thread {
+ return Err(generic_error("connectToMainThread not supported"));
+ }
+
+ let context = scope.get_current_context();
+ let context = v8::Global::new(scope, context);
+ let callback = v8::Global::new(scope, callback);
+
+ let inspector = state
+ .borrow::<Rc<RefCell<JsRuntimeInspector>>>()
+ .borrow_mut();
+
+ let tx = inspector.create_raw_session(
+ InspectorSessionOptions {
+ kind: InspectorSessionKind::NonBlocking {
+ wait_for_disconnect: false,
+ },
+ },
+ // The inspector connection does not keep the event loop alive but
+ // when the inspector sends a message to the frontend, the JS that
+ // that runs may keep the event loop alive so we have to call back
+ // synchronously, instead of using the usual LocalInspectorSession
+ // UnboundedReceiver<InspectorMsg> API.
+ Box::new(move |message| {
+ // SAFETY: This function is called directly by the inspector, so
+ // 1) The isolate is still valid
+ // 2) We are on the same thread as the Isolate
+ let scope = unsafe { &mut v8::CallbackScope::new(&mut *isolate) };
+ let context = v8::Local::new(scope, context.clone());
+ let scope = &mut v8::ContextScope::new(scope, context);
+ let scope = &mut v8::TryCatch::new(scope);
+ let recv = v8::undefined(scope);
+ if let Some(message) = v8::String::new(scope, &message.content) {
+ let callback = v8::Local::new(scope, callback.clone());
+ callback.call(scope, recv.into(), &[message.into()]);
+ }
+ }),
+ );
+
+ Ok(JSInspectorSession {
+ tx: RefCell::new(Some(tx)),
+ })
+}
+
+#[op2(fast)]
+pub fn op_inspector_dispatch(
+ #[cppgc] session: &JSInspectorSession,
+ #[string] message: String,
+) {
+ if let Some(tx) = &*session.tx.borrow() {
+ let _ = tx.unbounded_send(message);
+ }
+}
+
+#[op2(fast)]
+pub fn op_inspector_disconnect(#[cppgc] session: &JSInspectorSession) {
+ drop(session.tx.borrow_mut().take());
+}
diff --git a/ext/node/ops/ipc.rs b/ext/node/ops/ipc.rs
index 59b6fece1..672cf0d70 100644
--- a/ext/node/ops/ipc.rs
+++ b/ext/node/ops/ipc.rs
@@ -17,8 +17,6 @@ mod impl_ {
use std::task::Context;
use std::task::Poll;
- use deno_core::error::bad_resource_id;
- use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::serde;
use deno_core::serde::Serializer;
@@ -167,7 +165,7 @@ mod impl_ {
#[smi]
pub fn op_node_child_ipc_pipe(
state: &mut OpState,
- ) -> Result<Option<ResourceId>, AnyError> {
+ ) -> Result<Option<ResourceId>, io::Error> {
let fd = match state.try_borrow_mut::<crate::ChildPipeFd>() {
Some(child_pipe_fd) => child_pipe_fd.0,
None => return Ok(None),
@@ -180,6 +178,18 @@ mod impl_ {
))
}
+ #[derive(Debug, thiserror::Error)]
+ pub enum IpcError {
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error(transparent)]
+ IpcJsonStream(#[from] IpcJsonStreamError),
+ #[error(transparent)]
+ Canceled(#[from] deno_core::Canceled),
+ #[error("failed to serialize json value: {0}")]
+ SerdeJson(serde_json::Error),
+ }
+
#[op2(async)]
pub fn op_node_ipc_write<'a>(
scope: &mut v8::HandleScope<'a>,
@@ -192,34 +202,37 @@ mod impl_ {
// ideally we would just return `Result<(impl Future, bool), ..>`, but that's not
// supported by `op2` currently.
queue_ok: v8::Local<'a, v8::Array>,
- ) -> Result<impl Future<Output = Result<(), AnyError>>, AnyError> {
+ ) -> Result<impl Future<Output = Result<(), io::Error>>, IpcError> {
let mut serialized = Vec::with_capacity(64);
let mut ser = serde_json::Serializer::new(&mut serialized);
- serialize_v8_value(scope, value, &mut ser).map_err(|e| {
- deno_core::error::type_error(format!(
- "failed to serialize json value: {e}"
- ))
- })?;
+ serialize_v8_value(scope, value, &mut ser).map_err(IpcError::SerdeJson)?;
serialized.push(b'\n');
let stream = state
.borrow()
.resource_table
.get::<IpcJsonStreamResource>(rid)
- .map_err(|_| bad_resource_id())?;
+ .map_err(IpcError::Resource)?;
let old = stream
.queued_bytes
.fetch_add(serialized.len(), std::sync::atomic::Ordering::Relaxed);
if old + serialized.len() > 2 * INITIAL_CAPACITY {
// sending messages too fast
- let v = false.to_v8(scope)?;
+ let v = false.to_v8(scope).unwrap(); // Infallible
queue_ok.set_index(scope, 0, v);
}
Ok(async move {
- stream.clone().write_msg_bytes(&serialized).await?;
+ let cancel = stream.cancel.clone();
+ let result = stream
+ .clone()
+ .write_msg_bytes(&serialized)
+ .or_cancel(cancel)
+ .await;
+ // adjust count even on error
stream
.queued_bytes
.fetch_sub(serialized.len(), std::sync::atomic::Ordering::Relaxed);
+ result??;
Ok(())
})
}
@@ -239,12 +252,12 @@ mod impl_ {
pub async fn op_node_ipc_read(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
- ) -> Result<serde_json::Value, AnyError> {
+ ) -> Result<serde_json::Value, IpcError> {
let stream = state
.borrow()
.resource_table
.get::<IpcJsonStreamResource>(rid)
- .map_err(|_| bad_resource_id())?;
+ .map_err(IpcError::Resource)?;
let cancel = stream.cancel.clone();
let mut stream = RcRef::map(stream, |r| &r.read_half).borrow_mut().await;
@@ -400,7 +413,7 @@ mod impl_ {
async fn write_msg_bytes(
self: Rc<Self>,
msg: &[u8],
- ) -> Result<(), AnyError> {
+ ) -> Result<(), io::Error> {
let mut write_half =
RcRef::map(self, |r| &r.write_half).borrow_mut().await;
write_half.write_all(msg).await?;
@@ -455,6 +468,14 @@ mod impl_ {
}
}
+ #[derive(Debug, thiserror::Error)]
+ pub enum IpcJsonStreamError {
+ #[error("{0}")]
+ Io(#[source] std::io::Error),
+ #[error("{0}")]
+ SimdJson(#[source] simd_json::Error),
+ }
+
// JSON serialization stream over IPC pipe.
//
// `\n` is used as a delimiter between messages.
@@ -475,7 +496,7 @@ mod impl_ {
async fn read_msg(
&mut self,
- ) -> Result<Option<serde_json::Value>, AnyError> {
+ ) -> Result<Option<serde_json::Value>, IpcJsonStreamError> {
let mut json = None;
let nread = read_msg_inner(
&mut self.pipe,
@@ -483,7 +504,8 @@ mod impl_ {
&mut json,
&mut self.read_buffer,
)
- .await?;
+ .await
+ .map_err(IpcJsonStreamError::Io)?;
if nread == 0 {
// EOF.
return Ok(None);
@@ -493,7 +515,8 @@ mod impl_ {
Some(v) => v,
None => {
// Took more than a single read and some buffering.
- simd_json::from_slice(&mut self.buffer[..nread])?
+ simd_json::from_slice(&mut self.buffer[..nread])
+ .map_err(IpcJsonStreamError::SimdJson)?
}
};
diff --git a/ext/node/ops/mod.rs b/ext/node/ops/mod.rs
index b562261f3..e5ea8b417 100644
--- a/ext/node/ops/mod.rs
+++ b/ext/node/ops/mod.rs
@@ -7,8 +7,10 @@ pub mod fs;
pub mod http;
pub mod http2;
pub mod idna;
+pub mod inspector;
pub mod ipc;
pub mod os;
+pub mod perf_hooks;
pub mod process;
pub mod require;
pub mod tls;
diff --git a/ext/node/ops/os/mod.rs b/ext/node/ops/os/mod.rs
index ca91895f2..d291277ad 100644
--- a/ext/node/ops/os/mod.rs
+++ b/ext/node/ops/os/mod.rs
@@ -1,19 +1,31 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+use std::mem::MaybeUninit;
+
use crate::NodePermissions;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
mod cpus;
-mod priority;
+pub mod priority;
+
+#[derive(Debug, thiserror::Error)]
+pub enum OsError {
+ #[error(transparent)]
+ Priority(priority::PriorityError),
+ #[error(transparent)]
+ Permission(#[from] deno_permissions::PermissionCheckError),
+ #[error("Failed to get cpu info")]
+ FailedToGetCpuInfo,
+ #[error("Failed to get user info")]
+ FailedToGetUserInfo(#[source] std::io::Error),
+}
#[op2(fast)]
pub fn op_node_os_get_priority<P>(
state: &mut OpState,
pid: u32,
-) -> Result<i32, AnyError>
+) -> Result<i32, OsError>
where
P: NodePermissions + 'static,
{
@@ -22,7 +34,7 @@ where
permissions.check_sys("getPriority", "node:os.getPriority()")?;
}
- priority::get_priority(pid)
+ priority::get_priority(pid).map_err(OsError::Priority)
}
#[op2(fast)]
@@ -30,7 +42,7 @@ pub fn op_node_os_set_priority<P>(
state: &mut OpState,
pid: u32,
priority: i32,
-) -> Result<(), AnyError>
+) -> Result<(), OsError>
where
P: NodePermissions + 'static,
{
@@ -39,25 +51,171 @@ where
permissions.check_sys("setPriority", "node:os.setPriority()")?;
}
- priority::set_priority(pid, priority)
+ priority::set_priority(pid, priority).map_err(OsError::Priority)
+}
+
+#[derive(serde::Serialize)]
+pub struct UserInfo {
+ username: String,
+ homedir: String,
+ shell: Option<String>,
+}
+
+#[cfg(unix)]
+fn get_user_info(uid: u32) -> Result<UserInfo, OsError> {
+ use std::ffi::CStr;
+ let mut pw: MaybeUninit<libc::passwd> = MaybeUninit::uninit();
+ let mut result: *mut libc::passwd = std::ptr::null_mut();
+ // SAFETY: libc call, no invariants
+ let max_buf_size = unsafe { libc::sysconf(libc::_SC_GETPW_R_SIZE_MAX) };
+ let buf_size = if max_buf_size < 0 {
+ // from the man page
+ 16_384
+ } else {
+ max_buf_size as usize
+ };
+ let mut buf = {
+ let mut b = Vec::<MaybeUninit<libc::c_char>>::with_capacity(buf_size);
+ // SAFETY: MaybeUninit has no initialization invariants, and len == cap
+ unsafe {
+ b.set_len(buf_size);
+ }
+ b
+ };
+ // SAFETY: libc call, args are correct
+ let s = unsafe {
+ libc::getpwuid_r(
+ uid,
+ pw.as_mut_ptr(),
+ buf.as_mut_ptr().cast(),
+ buf_size,
+ std::ptr::addr_of_mut!(result),
+ )
+ };
+ if result.is_null() {
+ if s != 0 {
+ return Err(
+ OsError::FailedToGetUserInfo(std::io::Error::last_os_error()),
+ );
+ } else {
+ return Err(OsError::FailedToGetUserInfo(std::io::Error::from(
+ std::io::ErrorKind::NotFound,
+ )));
+ }
+ }
+ // SAFETY: pw was initialized by the call to `getpwuid_r` above
+ let pw = unsafe { pw.assume_init() };
+ // SAFETY: initialized above, pw alive until end of function, nul terminated
+ let username = unsafe { CStr::from_ptr(pw.pw_name) };
+ // SAFETY: initialized above, pw alive until end of function, nul terminated
+ let homedir = unsafe { CStr::from_ptr(pw.pw_dir) };
+ // SAFETY: initialized above, pw alive until end of function, nul terminated
+ let shell = unsafe { CStr::from_ptr(pw.pw_shell) };
+ Ok(UserInfo {
+ username: username.to_string_lossy().into_owned(),
+ homedir: homedir.to_string_lossy().into_owned(),
+ shell: Some(shell.to_string_lossy().into_owned()),
+ })
+}
+
+#[cfg(windows)]
+fn get_user_info(_uid: u32) -> Result<UserInfo, OsError> {
+ use std::ffi::OsString;
+ use std::os::windows::ffi::OsStringExt;
+
+ use windows_sys::Win32::Foundation::CloseHandle;
+ use windows_sys::Win32::Foundation::GetLastError;
+ use windows_sys::Win32::Foundation::ERROR_INSUFFICIENT_BUFFER;
+ use windows_sys::Win32::Foundation::HANDLE;
+ use windows_sys::Win32::System::Threading::GetCurrentProcess;
+ use windows_sys::Win32::System::Threading::OpenProcessToken;
+ use windows_sys::Win32::UI::Shell::GetUserProfileDirectoryW;
+ struct Handle(HANDLE);
+ impl Drop for Handle {
+ fn drop(&mut self) {
+ // SAFETY: win32 call
+ unsafe {
+ CloseHandle(self.0);
+ }
+ }
+ }
+ let mut token: MaybeUninit<HANDLE> = MaybeUninit::uninit();
+
+ // Get a handle to the current process
+ // SAFETY: win32 call
+ unsafe {
+ if OpenProcessToken(
+ GetCurrentProcess(),
+ windows_sys::Win32::Security::TOKEN_READ,
+ token.as_mut_ptr(),
+ ) == 0
+ {
+ return Err(
+ OsError::FailedToGetUserInfo(std::io::Error::last_os_error()),
+ );
+ }
+ }
+
+ // SAFETY: initialized by call above
+ let token = Handle(unsafe { token.assume_init() });
+
+ let mut bufsize = 0;
+ // get the size for the homedir buf (it'll end up in `bufsize`)
+ // SAFETY: win32 call
+ unsafe {
+ GetUserProfileDirectoryW(token.0, std::ptr::null_mut(), &mut bufsize);
+ let err = GetLastError();
+ if err != ERROR_INSUFFICIENT_BUFFER {
+ return Err(OsError::FailedToGetUserInfo(
+ std::io::Error::from_raw_os_error(err as i32),
+ ));
+ }
+ }
+ let mut path = vec![0; bufsize as usize];
+ // Actually get the homedir
+ // SAFETY: path is `bufsize` elements
+ unsafe {
+ if GetUserProfileDirectoryW(token.0, path.as_mut_ptr(), &mut bufsize) == 0 {
+ return Err(
+ OsError::FailedToGetUserInfo(std::io::Error::last_os_error()),
+ );
+ }
+ }
+ // remove trailing nul
+ path.pop();
+ let homedir_wide = OsString::from_wide(&path);
+ let homedir = homedir_wide.to_string_lossy().into_owned();
+
+ Ok(UserInfo {
+ username: deno_whoami::username(),
+ homedir,
+ shell: None,
+ })
}
#[op2]
-#[string]
-pub fn op_node_os_username<P>(state: &mut OpState) -> Result<String, AnyError>
+#[serde]
+pub fn op_node_os_user_info<P>(
+ state: &mut OpState,
+ #[smi] uid: u32,
+) -> Result<UserInfo, OsError>
where
P: NodePermissions + 'static,
{
{
let permissions = state.borrow_mut::<P>();
- permissions.check_sys("username", "node:os.userInfo()")?;
+ permissions
+ .check_sys("userInfo", "node:os.userInfo()")
+ .map_err(OsError::Permission)?;
}
- Ok(deno_whoami::username())
+ get_user_info(uid)
}
#[op2(fast)]
-pub fn op_geteuid<P>(state: &mut OpState) -> Result<u32, AnyError>
+pub fn op_geteuid<P>(
+ state: &mut OpState,
+) -> Result<u32, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
@@ -76,7 +234,9 @@ where
}
#[op2(fast)]
-pub fn op_getegid<P>(state: &mut OpState) -> Result<u32, AnyError>
+pub fn op_getegid<P>(
+ state: &mut OpState,
+) -> Result<u32, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
@@ -96,7 +256,7 @@ where
#[op2]
#[serde]
-pub fn op_cpus<P>(state: &mut OpState) -> Result<Vec<cpus::CpuInfo>, AnyError>
+pub fn op_cpus<P>(state: &mut OpState) -> Result<Vec<cpus::CpuInfo>, OsError>
where
P: NodePermissions + 'static,
{
@@ -105,12 +265,14 @@ where
permissions.check_sys("cpus", "node:os.cpus()")?;
}
- cpus::cpu_info().ok_or_else(|| type_error("Failed to get cpu info"))
+ cpus::cpu_info().ok_or(OsError::FailedToGetCpuInfo)
}
#[op2]
#[string]
-pub fn op_homedir<P>(state: &mut OpState) -> Result<Option<String>, AnyError>
+pub fn op_homedir<P>(
+ state: &mut OpState,
+) -> Result<Option<String>, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
diff --git a/ext/node/ops/os/priority.rs b/ext/node/ops/os/priority.rs
index 043928e2a..9a1ebcca7 100644
--- a/ext/node/ops/os/priority.rs
+++ b/ext/node/ops/os/priority.rs
@@ -1,12 +1,18 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
-
pub use impl_::*;
+#[derive(Debug, thiserror::Error)]
+pub enum PriorityError {
+ #[error("{0}")]
+ Io(#[from] std::io::Error),
+ #[cfg(windows)]
+ #[error("Invalid priority")]
+ InvalidPriority,
+}
+
#[cfg(unix)]
mod impl_ {
- use super::*;
use errno::errno;
use errno::set_errno;
use errno::Errno;
@@ -16,7 +22,7 @@ mod impl_ {
const PRIORITY_HIGH: i32 = -14;
// Ref: https://github.com/libuv/libuv/blob/55376b044b74db40772e8a6e24d67a8673998e02/src/unix/core.c#L1533-L1547
- pub fn get_priority(pid: u32) -> Result<i32, AnyError> {
+ pub fn get_priority(pid: u32) -> Result<i32, super::PriorityError> {
set_errno(Errno(0));
match (
// SAFETY: libc::getpriority is unsafe
@@ -29,7 +35,10 @@ mod impl_ {
}
}
- pub fn set_priority(pid: u32, priority: i32) -> Result<(), AnyError> {
+ pub fn set_priority(
+ pid: u32,
+ priority: i32,
+ ) -> Result<(), super::PriorityError> {
// SAFETY: libc::setpriority is unsafe
match unsafe { libc::setpriority(PRIO_PROCESS, pid as id_t, priority) } {
-1 => Err(std::io::Error::last_os_error().into()),
@@ -40,8 +49,6 @@ mod impl_ {
#[cfg(windows)]
mod impl_ {
- use super::*;
- use deno_core::error::type_error;
use winapi::shared::minwindef::DWORD;
use winapi::shared::minwindef::FALSE;
use winapi::shared::ntdef::NULL;
@@ -67,7 +74,7 @@ mod impl_ {
const PRIORITY_HIGHEST: i32 = -20;
// Ported from: https://github.com/libuv/libuv/blob/a877ca2435134ef86315326ef4ef0c16bdbabf17/src/win/util.c#L1649-L1685
- pub fn get_priority(pid: u32) -> Result<i32, AnyError> {
+ pub fn get_priority(pid: u32) -> Result<i32, super::PriorityError> {
// SAFETY: Windows API calls
unsafe {
let handle = if pid == 0 {
@@ -95,7 +102,10 @@ mod impl_ {
}
// Ported from: https://github.com/libuv/libuv/blob/a877ca2435134ef86315326ef4ef0c16bdbabf17/src/win/util.c#L1688-L1719
- pub fn set_priority(pid: u32, priority: i32) -> Result<(), AnyError> {
+ pub fn set_priority(
+ pid: u32,
+ priority: i32,
+ ) -> Result<(), super::PriorityError> {
// SAFETY: Windows API calls
unsafe {
let handle = if pid == 0 {
@@ -109,7 +119,7 @@ mod impl_ {
#[allow(clippy::manual_range_contains)]
let priority_class =
if priority < PRIORITY_HIGHEST || priority > PRIORITY_LOW {
- return Err(type_error("Invalid priority"));
+ return Err(super::PriorityError::InvalidPriority);
} else if priority < PRIORITY_HIGH {
REALTIME_PRIORITY_CLASS
} else if priority < PRIORITY_ABOVE_NORMAL {
diff --git a/ext/node/ops/perf_hooks.rs b/ext/node/ops/perf_hooks.rs
new file mode 100644
index 000000000..636d0b2ad
--- /dev/null
+++ b/ext/node/ops/perf_hooks.rs
@@ -0,0 +1,135 @@
+// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+
+use deno_core::op2;
+use deno_core::GarbageCollected;
+
+use std::cell::Cell;
+
+#[derive(Debug, thiserror::Error)]
+pub enum PerfHooksError {
+ #[error(transparent)]
+ TokioEld(#[from] tokio_eld::Error),
+}
+
+pub struct EldHistogram {
+ eld: tokio_eld::EldHistogram<u64>,
+ started: Cell<bool>,
+}
+
+impl GarbageCollected for EldHistogram {}
+
+#[op2]
+impl EldHistogram {
+ // Creates an interval EldHistogram object that samples and reports the event
+ // loop delay over time.
+ //
+ // The delays will be reported in nanoseconds.
+ #[constructor]
+ #[cppgc]
+ pub fn new(#[smi] resolution: u32) -> Result<EldHistogram, PerfHooksError> {
+ Ok(EldHistogram {
+ eld: tokio_eld::EldHistogram::new(resolution as usize)?,
+ started: Cell::new(false),
+ })
+ }
+
+ // Disables the update interval timer.
+ //
+ // Returns true if the timer was stopped, false if it was already stopped.
+ #[fast]
+ fn enable(&self) -> bool {
+ if self.started.get() {
+ return false;
+ }
+
+ self.eld.start();
+ self.started.set(true);
+
+ true
+ }
+
+ // Enables the update interval timer.
+ //
+ // Returns true if the timer was started, false if it was already started.
+ #[fast]
+ fn disable(&self) -> bool {
+ if !self.started.get() {
+ return false;
+ }
+
+ self.eld.stop();
+ self.started.set(false);
+
+ true
+ }
+
+ // Returns the value at the given percentile.
+ //
+ // `percentile` ∈ (0, 100]
+ #[fast]
+ #[number]
+ fn percentile(&self, percentile: f64) -> u64 {
+ self.eld.value_at_percentile(percentile)
+ }
+
+ // Returns the value at the given percentile as a bigint.
+ #[fast]
+ #[bigint]
+ fn percentile_big_int(&self, percentile: f64) -> u64 {
+ self.eld.value_at_percentile(percentile)
+ }
+
+ // The number of samples recorded by the histogram.
+ #[getter]
+ #[number]
+ fn count(&self) -> u64 {
+ self.eld.len()
+ }
+
+ // The number of samples recorded by the histogram as a bigint.
+ #[getter]
+ #[bigint]
+ fn count_big_int(&self) -> u64 {
+ self.eld.len()
+ }
+
+ // The maximum recorded event loop delay.
+ #[getter]
+ #[number]
+ fn max(&self) -> u64 {
+ self.eld.max()
+ }
+
+ // The maximum recorded event loop delay as a bigint.
+ #[getter]
+ #[bigint]
+ fn max_big_int(&self) -> u64 {
+ self.eld.max()
+ }
+
+ // The mean of the recorded event loop delays.
+ #[getter]
+ fn mean(&self) -> f64 {
+ self.eld.mean()
+ }
+
+ // The minimum recorded event loop delay.
+ #[getter]
+ #[number]
+ fn min(&self) -> u64 {
+ self.eld.min()
+ }
+
+ // The minimum recorded event loop delay as a bigint.
+ #[getter]
+ #[bigint]
+ fn min_big_int(&self) -> u64 {
+ self.eld.min()
+ }
+
+ // The standard deviation of the recorded event loop delays.
+ #[getter]
+ fn stddev(&self) -> f64 {
+ self.eld.stdev()
+ }
+}
diff --git a/ext/node/ops/process.rs b/ext/node/ops/process.rs
index 0992c46c6..282567226 100644
--- a/ext/node/ops/process.rs
+++ b/ext/node/ops/process.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_permissions::PermissionsContainer;
@@ -51,7 +50,7 @@ pub fn op_node_process_kill(
state: &mut OpState,
#[smi] pid: i32,
#[smi] sig: i32,
-) -> Result<i32, AnyError> {
+) -> Result<i32, deno_core::error::AnyError> {
state
.borrow_mut::<PermissionsContainer>()
.check_run_all("process.kill")?;
diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs
index 547336981..06c034fd5 100644
--- a/ext/node/ops/require.rs
+++ b/ext/node/ops/require.rs
@@ -1,18 +1,19 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::anyhow::Context;
-use deno_core::error::generic_error;
+use boxed_error::Boxed;
use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::url::Url;
use deno_core::v8;
use deno_core::JsRuntimeInspector;
-use deno_core::ModuleSpecifier;
use deno_core::OpState;
use deno_fs::FileSystemRc;
+use deno_package_json::NodeModuleKind;
use deno_package_json::PackageJsonRc;
use deno_path_util::normalize_path;
-use node_resolver::NodeModuleKind;
+use deno_path_util::url_from_file_path;
+use deno_path_util::url_to_file_path;
+use node_resolver::errors::ClosestPkgJsonError;
use node_resolver::NodeResolutionMode;
use node_resolver::REQUIRE_CONDITIONS;
use std::borrow::Cow;
@@ -22,21 +23,55 @@ use std::path::PathBuf;
use std::rc::Rc;
use crate::NodePermissions;
-use crate::NodeRequireResolverRc;
+use crate::NodeRequireLoaderRc;
use crate::NodeResolverRc;
-use crate::NpmResolverRc;
+use crate::NpmPackageFolderResolverRc;
+use crate::PackageJsonResolverRc;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn ensure_read_permission<'a, P>(
state: &mut OpState,
file_path: &'a Path,
-) -> Result<Cow<'a, Path>, AnyError>
+) -> Result<Cow<'a, Path>, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
- let resolver = state.borrow::<NodeRequireResolverRc>().clone();
+ let loader = state.borrow::<NodeRequireLoaderRc>().clone();
let permissions = state.borrow_mut::<P>();
- resolver.ensure_read_permission(permissions, file_path)
+ loader.ensure_read_permission(permissions, file_path)
+}
+
+#[derive(Debug, Boxed)]
+pub struct RequireError(pub Box<RequireErrorKind>);
+
+#[derive(Debug, thiserror::Error)]
+pub enum RequireErrorKind {
+ #[error(transparent)]
+ UrlParse(#[from] url::ParseError),
+ #[error(transparent)]
+ Permission(deno_core::error::AnyError),
+ #[error(transparent)]
+ PackageExportsResolve(
+ #[from] node_resolver::errors::PackageExportsResolveError,
+ ),
+ #[error(transparent)]
+ PackageJsonLoad(#[from] node_resolver::errors::PackageJsonLoadError),
+ #[error(transparent)]
+ ClosestPkgJson(#[from] node_resolver::errors::ClosestPkgJsonError),
+ #[error(transparent)]
+ PackageImportsResolve(
+ #[from] node_resolver::errors::PackageImportsResolveError,
+ ),
+ #[error(transparent)]
+ FilePathConversion(#[from] deno_path_util::UrlToFilePathError),
+ #[error(transparent)]
+ UrlConversion(#[from] deno_path_util::PathToUrlError),
+ #[error(transparent)]
+ Fs(#[from] deno_io::fs::FsError),
+ #[error(transparent)]
+ ReadModule(deno_core::error::AnyError),
+ #[error("Unable to get CWD: {0}")]
+ UnableToGetCwd(deno_io::fs::FsError),
}
#[op2]
@@ -95,7 +130,7 @@ pub fn op_require_init_paths() -> Vec<String> {
pub fn op_require_node_module_paths<P>(
state: &mut OpState,
#[string] from: String,
-) -> Result<Vec<String>, AnyError>
+) -> Result<Vec<String>, RequireError>
where
P: NodePermissions + 'static,
{
@@ -104,13 +139,10 @@ where
let from = if from.starts_with("file:///") {
url_to_file_path(&Url::parse(&from)?)?
} else {
- let current_dir =
- &(fs.cwd().map_err(AnyError::from)).context("Unable to get CWD")?;
- deno_path_util::normalize_path(current_dir.join(from))
+ let current_dir = &fs.cwd().map_err(RequireErrorKind::UnableToGetCwd)?;
+ normalize_path(current_dir.join(from))
};
- let from = ensure_read_permission::<P>(state, &from)?;
-
if cfg!(windows) {
// return root node_modules when path is 'D:\\'.
let from_str = from.to_str().unwrap();
@@ -131,7 +163,7 @@ where
}
let mut paths = Vec::with_capacity(from.components().count());
- let mut current_path = from.as_ref();
+ let mut current_path = from.as_path();
let mut maybe_parent = Some(current_path);
while let Some(parent) = maybe_parent {
if !parent.ends_with("node_modules") {
@@ -191,17 +223,17 @@ pub fn op_require_resolve_deno_dir(
state: &mut OpState,
#[string] request: String,
#[string] parent_filename: String,
-) -> Option<String> {
- let resolver = state.borrow::<NpmResolverRc>();
- resolver
- .resolve_package_folder_from_package(
- &request,
- &ModuleSpecifier::from_file_path(&parent_filename).unwrap_or_else(|_| {
- panic!("Url::from_file_path: [{:?}]", parent_filename)
- }),
- )
- .ok()
- .map(|p| p.to_string_lossy().into_owned())
+) -> Result<Option<String>, AnyError> {
+ let resolver = state.borrow::<NpmPackageFolderResolverRc>();
+ Ok(
+ resolver
+ .resolve_package_folder_from_package(
+ &request,
+ &url_from_file_path(&PathBuf::from(parent_filename))?,
+ )
+ .ok()
+ .map(|p| p.to_string_lossy().into_owned()),
+ )
}
#[op2(fast)]
@@ -209,8 +241,11 @@ pub fn op_require_is_deno_dir_package(
state: &mut OpState,
#[string] path: String,
) -> bool {
- let resolver = state.borrow::<NpmResolverRc>();
- resolver.in_npm_package_at_file_path(&PathBuf::from(path))
+ let resolver = state.borrow::<NodeResolverRc>();
+ match deno_path_util::url_from_file_path(&PathBuf::from(path)) {
+ Ok(specifier) => resolver.in_npm_package(&specifier),
+ Err(_) => false,
+ }
}
#[op2]
@@ -264,7 +299,7 @@ pub fn op_require_path_is_absolute(#[string] p: String) -> bool {
pub fn op_require_stat<P>(
state: &mut OpState,
#[string] path: String,
-) -> Result<i32, AnyError>
+) -> Result<i32, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
@@ -287,15 +322,16 @@ where
pub fn op_require_real_path<P>(
state: &mut OpState,
#[string] request: String,
-) -> Result<String, AnyError>
+) -> Result<String, RequireError>
where
P: NodePermissions + 'static,
{
let path = PathBuf::from(request);
- let path = ensure_read_permission::<P>(state, &path)?;
+ let path = ensure_read_permission::<P>(state, &path)
+ .map_err(RequireErrorKind::Permission)?;
let fs = state.borrow::<FileSystemRc>();
let canonicalized_path =
- deno_core::strip_unc_prefix(fs.realpath_sync(&path)?);
+ deno_path_util::strip_unc_prefix(fs.realpath_sync(&path)?);
Ok(canonicalized_path.to_string_lossy().into_owned())
}
@@ -319,12 +355,14 @@ pub fn op_require_path_resolve(#[serde] parts: Vec<String>) -> String {
#[string]
pub fn op_require_path_dirname(
#[string] request: String,
-) -> Result<String, AnyError> {
+) -> Result<String, deno_core::error::AnyError> {
let p = PathBuf::from(request);
if let Some(parent) = p.parent() {
Ok(parent.to_string_lossy().into_owned())
} else {
- Err(generic_error("Path doesn't have a parent"))
+ Err(deno_core::error::generic_error(
+ "Path doesn't have a parent",
+ ))
}
}
@@ -332,12 +370,14 @@ pub fn op_require_path_dirname(
#[string]
pub fn op_require_path_basename(
#[string] request: String,
-) -> Result<String, AnyError> {
+) -> Result<String, deno_core::error::AnyError> {
let p = PathBuf::from(request);
if let Some(path) = p.file_name() {
Ok(path.to_string_lossy().into_owned())
} else {
- Err(generic_error("Path doesn't have a file name"))
+ Err(deno_core::error::generic_error(
+ "Path doesn't have a file name",
+ ))
}
}
@@ -348,7 +388,7 @@ pub fn op_require_try_self_parent_path<P>(
has_parent: bool,
#[string] maybe_parent_filename: Option<String>,
#[string] maybe_parent_id: Option<String>,
-) -> Result<Option<String>, AnyError>
+) -> Result<Option<String>, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
@@ -378,7 +418,7 @@ pub fn op_require_try_self<P>(
state: &mut OpState,
#[string] parent_path: Option<String>,
#[string] request: String,
-) -> Result<Option<String>, AnyError>
+) -> Result<Option<String>, RequireError>
where
P: NodePermissions + 'static,
{
@@ -386,8 +426,8 @@ where
return Ok(None);
}
- let node_resolver = state.borrow::<NodeResolverRc>();
- let pkg = node_resolver
+ let pkg_json_resolver = state.borrow::<PackageJsonResolverRc>();
+ let pkg = pkg_json_resolver
.get_closest_package_json_from_path(&PathBuf::from(parent_path.unwrap()))
.ok()
.flatten();
@@ -416,6 +456,7 @@ where
let referrer = deno_core::url::Url::from_file_path(&pkg.path).unwrap();
if let Some(exports) = &pkg.exports {
+ let node_resolver = state.borrow::<NodeResolverRc>();
let r = node_resolver.package_exports_resolve(
&pkg.path,
&expansion,
@@ -440,14 +481,18 @@ where
pub fn op_require_read_file<P>(
state: &mut OpState,
#[string] file_path: String,
-) -> Result<String, AnyError>
+) -> Result<String, RequireError>
where
P: NodePermissions + 'static,
{
let file_path = PathBuf::from(file_path);
- let file_path = ensure_read_permission::<P>(state, &file_path)?;
- let fs = state.borrow::<FileSystemRc>();
- Ok(fs.read_text_file_lossy_sync(&file_path, None)?)
+ // todo(dsherret): there's multiple borrows to NodeRequireLoaderRc here
+ let file_path = ensure_read_permission::<P>(state, &file_path)
+ .map_err(RequireErrorKind::Permission)?;
+ let loader = state.borrow::<NodeRequireLoaderRc>();
+ loader
+ .load_text_file_lossy(&file_path)
+ .map_err(|e| RequireErrorKind::ReadModule(e).into_box())
}
#[op2]
@@ -472,16 +517,17 @@ pub fn op_require_resolve_exports<P>(
#[string] name: String,
#[string] expansion: String,
#[string] parent_path: String,
-) -> Result<Option<String>, AnyError>
+) -> Result<Option<String>, RequireError>
where
P: NodePermissions + 'static,
{
let fs = state.borrow::<FileSystemRc>();
- let npm_resolver = state.borrow::<NpmResolverRc>();
let node_resolver = state.borrow::<NodeResolverRc>();
+ let pkg_json_resolver = state.borrow::<PackageJsonResolverRc>();
let modules_path = PathBuf::from(&modules_path_str);
- let pkg_path = if npm_resolver.in_npm_package_at_file_path(&modules_path)
+ let modules_specifier = deno_path_util::url_from_file_path(&modules_path)?;
+ let pkg_path = if node_resolver.in_npm_package(&modules_specifier)
&& !uses_local_node_modules_dir
{
modules_path
@@ -495,7 +541,7 @@ where
}
};
let Some(pkg) =
- node_resolver.load_package_json(&pkg_path.join("package.json"))?
+ pkg_json_resolver.load_package_json(&pkg_path.join("package.json"))?
else {
return Ok(None);
};
@@ -503,12 +549,16 @@ where
return Ok(None);
};
- let referrer = Url::from_file_path(parent_path).unwrap();
+ let referrer = if parent_path.is_empty() {
+ None
+ } else {
+ Some(Url::from_file_path(parent_path).unwrap())
+ };
let r = node_resolver.package_exports_resolve(
&pkg.path,
&format!(".{expansion}"),
exports,
- Some(&referrer),
+ referrer.as_ref(),
NodeModuleKind::Cjs,
REQUIRE_CONDITIONS,
NodeResolutionMode::Execution,
@@ -520,21 +570,17 @@ where
}))
}
-#[op2]
-#[serde]
-pub fn op_require_read_closest_package_json<P>(
+#[op2(fast)]
+pub fn op_require_is_maybe_cjs(
state: &mut OpState,
#[string] filename: String,
-) -> Result<Option<PackageJsonRc>, AnyError>
-where
- P: NodePermissions + 'static,
-{
+) -> Result<bool, ClosestPkgJsonError> {
let filename = PathBuf::from(filename);
- // permissions: allow reading the closest package.json files
- let node_resolver = state.borrow::<NodeResolverRc>().clone();
- node_resolver
- .get_closest_package_json_from_path(&filename)
- .map_err(AnyError::from)
+ let Ok(url) = url_from_file_path(&filename) else {
+ return Ok(false);
+ };
+ let loader = state.borrow::<NodeRequireLoaderRc>();
+ loader.is_maybe_cjs(&url)
}
#[op2]
@@ -546,13 +592,13 @@ pub fn op_require_read_package_scope<P>(
where
P: NodePermissions + 'static,
{
- let node_resolver = state.borrow::<NodeResolverRc>().clone();
+ let pkg_json_resolver = state.borrow::<PackageJsonResolverRc>();
let package_json_path = PathBuf::from(package_json_path);
if package_json_path.file_name() != Some("package.json".as_ref()) {
// permissions: do not allow reading a non-package.json file
return None;
}
- node_resolver
+ pkg_json_resolver
.load_package_json(&package_json_path)
.ok()
.flatten()
@@ -564,22 +610,23 @@ pub fn op_require_package_imports_resolve<P>(
state: &mut OpState,
#[string] referrer_filename: String,
#[string] request: String,
-) -> Result<Option<String>, AnyError>
+) -> Result<Option<String>, RequireError>
where
P: NodePermissions + 'static,
{
let referrer_path = PathBuf::from(&referrer_filename);
- let referrer_path = ensure_read_permission::<P>(state, &referrer_path)?;
- let node_resolver = state.borrow::<NodeResolverRc>();
+ let referrer_path = ensure_read_permission::<P>(state, &referrer_path)
+ .map_err(RequireErrorKind::Permission)?;
+ let pkg_json_resolver = state.borrow::<PackageJsonResolverRc>();
let Some(pkg) =
- node_resolver.get_closest_package_json_from_path(&referrer_path)?
+ pkg_json_resolver.get_closest_package_json_from_path(&referrer_path)?
else {
return Ok(None);
};
if pkg.imports.is_some() {
- let referrer_url =
- deno_core::url::Url::from_file_path(&referrer_filename).unwrap();
+ let node_resolver = state.borrow::<NodeResolverRc>();
+ let referrer_url = Url::from_file_path(&referrer_filename).unwrap();
let url = node_resolver.package_imports_resolve(
&request,
Some(&referrer_url),
@@ -604,20 +651,11 @@ pub fn op_require_break_on_next_statement(state: Rc<RefCell<OpState>>) {
inspector.wait_for_session_and_break_on_next_statement()
}
-fn url_to_file_path_string(url: &Url) -> Result<String, AnyError> {
+fn url_to_file_path_string(url: &Url) -> Result<String, RequireError> {
let file_path = url_to_file_path(url)?;
Ok(file_path.to_string_lossy().into_owned())
}
-fn url_to_file_path(url: &Url) -> Result<PathBuf, AnyError> {
- match url.to_file_path() {
- Ok(file_path) => Ok(file_path),
- Err(()) => {
- deno_core::anyhow::bail!("failed to convert '{}' to file path", url)
- }
- }
-}
-
#[op2(fast)]
pub fn op_require_can_parse_as_esm(
scope: &mut v8::HandleScope,
diff --git a/ext/node/ops/util.rs b/ext/node/ops/util.rs
index 533d51c92..1c177ac04 100644
--- a/ext/node/ops/util.rs
+++ b/ext/node/ops/util.rs
@@ -1,6 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::OpState;
use deno_core::ResourceHandle;
@@ -22,7 +21,7 @@ enum HandleType {
pub fn op_node_guess_handle_type(
state: &mut OpState,
rid: u32,
-) -> Result<u32, AnyError> {
+) -> Result<u32, deno_core::error::AnyError> {
let handle = state.resource_table.get_handle(rid)?;
let handle_type = match handle {
diff --git a/ext/node/ops/v8.rs b/ext/node/ops/v8.rs
index 8813d2e18..61f67f11f 100644
--- a/ext/node/ops/v8.rs
+++ b/ext/node/ops/v8.rs
@@ -1,7 +1,5 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::generic_error;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
+
use deno_core::op2;
use deno_core::v8;
use deno_core::FastString;
@@ -206,10 +204,9 @@ pub fn op_v8_write_value(
scope: &mut v8::HandleScope,
#[cppgc] ser: &Serializer,
value: v8::Local<v8::Value>,
-) -> Result<(), AnyError> {
+) {
let context = scope.get_current_context();
ser.inner.write_value(context, value);
- Ok(())
}
struct DeserBuffer {
@@ -271,11 +268,13 @@ pub fn op_v8_new_deserializer(
scope: &mut v8::HandleScope,
obj: v8::Local<v8::Object>,
buffer: v8::Local<v8::ArrayBufferView>,
-) -> Result<Deserializer<'static>, AnyError> {
+) -> Result<Deserializer<'static>, deno_core::error::AnyError> {
let offset = buffer.byte_offset();
let len = buffer.byte_length();
let backing_store = buffer.get_backing_store().ok_or_else(|| {
- generic_error("deserialization buffer has no backing store")
+ deno_core::error::generic_error(
+ "deserialization buffer has no backing store",
+ )
})?;
let (buf_slice, buf_ptr) = if let Some(data) = backing_store.data() {
// SAFETY: the offset is valid for the underlying buffer because we're getting it directly from v8
@@ -317,10 +316,10 @@ pub fn op_v8_transfer_array_buffer_de(
#[op2(fast)]
pub fn op_v8_read_double(
#[cppgc] deser: &Deserializer,
-) -> Result<f64, AnyError> {
+) -> Result<f64, deno_core::error::AnyError> {
let mut double = 0f64;
if !deser.inner.read_double(&mut double) {
- return Err(type_error("ReadDouble() failed"));
+ return Err(deno_core::error::type_error("ReadDouble() failed"));
}
Ok(double)
}
@@ -355,10 +354,10 @@ pub fn op_v8_read_raw_bytes(
#[op2(fast)]
pub fn op_v8_read_uint32(
#[cppgc] deser: &Deserializer,
-) -> Result<u32, AnyError> {
+) -> Result<u32, deno_core::error::AnyError> {
let mut value = 0;
if !deser.inner.read_uint32(&mut value) {
- return Err(type_error("ReadUint32() failed"));
+ return Err(deno_core::error::type_error("ReadUint32() failed"));
}
Ok(value)
@@ -368,10 +367,10 @@ pub fn op_v8_read_uint32(
#[serde]
pub fn op_v8_read_uint64(
#[cppgc] deser: &Deserializer,
-) -> Result<(u32, u32), AnyError> {
+) -> Result<(u32, u32), deno_core::error::AnyError> {
let mut val = 0;
if !deser.inner.read_uint64(&mut val) {
- return Err(type_error("ReadUint64() failed"));
+ return Err(deno_core::error::type_error("ReadUint64() failed"));
}
Ok(((val >> 32) as u32, val as u32))
diff --git a/ext/node/ops/winerror.rs b/ext/node/ops/winerror.rs
index c0d66f7d0..cb053774e 100644
--- a/ext/node/ops/winerror.rs
+++ b/ext/node/ops/winerror.rs
@@ -62,10 +62,11 @@ pub fn op_node_sys_to_uv_error(err: i32) -> String {
WSAEHOSTUNREACH => "EHOSTUNREACH",
ERROR_INSUFFICIENT_BUFFER => "EINVAL",
ERROR_INVALID_DATA => "EINVAL",
- ERROR_INVALID_NAME => "EINVAL",
+ ERROR_INVALID_NAME => "ENOENT",
ERROR_INVALID_PARAMETER => "EINVAL",
WSAEINVAL => "EINVAL",
WSAEPFNOSUPPORT => "EINVAL",
+ ERROR_NOT_A_REPARSE_POINT => "EINVAL",
ERROR_BEGINNING_OF_MEDIA => "EIO",
ERROR_BUS_RESET => "EIO",
ERROR_CRC => "EIO",
diff --git a/ext/node/ops/worker_threads.rs b/ext/node/ops/worker_threads.rs
index 4c50092f2..d2e575882 100644
--- a/ext/node/ops/worker_threads.rs
+++ b/ext/node/ops/worker_threads.rs
@@ -1,39 +1,56 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::generic_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::url::Url;
use deno_core::OpState;
use deno_fs::FileSystemRc;
-use node_resolver::NodeResolution;
use std::borrow::Cow;
use std::path::Path;
use std::path::PathBuf;
use crate::NodePermissions;
-use crate::NodeRequireResolverRc;
-use crate::NodeResolverRc;
+use crate::NodeRequireLoaderRc;
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
fn ensure_read_permission<'a, P>(
state: &mut OpState,
file_path: &'a Path,
-) -> Result<Cow<'a, Path>, AnyError>
+) -> Result<Cow<'a, Path>, deno_core::error::AnyError>
where
P: NodePermissions + 'static,
{
- let resolver = state.borrow::<NodeRequireResolverRc>().clone();
+ let loader = state.borrow::<NodeRequireLoaderRc>().clone();
let permissions = state.borrow_mut::<P>();
- resolver.ensure_read_permission(permissions, file_path)
+ loader.ensure_read_permission(permissions, file_path)
}
+#[derive(Debug, thiserror::Error)]
+pub enum WorkerThreadsFilenameError {
+ #[error(transparent)]
+ Permission(deno_core::error::AnyError),
+ #[error("{0}")]
+ UrlParse(#[from] url::ParseError),
+ #[error("Relative path entries must start with '.' or '..'")]
+ InvalidRelativeUrl,
+ #[error("URL from Path-String")]
+ UrlFromPathString,
+ #[error("URL to Path-String")]
+ UrlToPathString,
+ #[error("URL to Path")]
+ UrlToPath,
+ #[error("File not found [{0:?}]")]
+ FileNotFound(PathBuf),
+ #[error(transparent)]
+ Fs(#[from] deno_io::fs::FsError),
+}
+
+// todo(dsherret): we should remove this and do all this work inside op_create_worker
#[op2]
#[string]
pub fn op_worker_threads_filename<P>(
state: &mut OpState,
#[string] specifier: String,
-) -> Result<String, AnyError>
+) -> Result<String, WorkerThreadsFilenameError>
where
P: NodePermissions + 'static,
{
@@ -45,44 +62,26 @@ where
} else {
let path = PathBuf::from(&specifier);
if path.is_relative() && !specifier.starts_with('.') {
- return Err(generic_error(
- "Relative path entries must start with '.' or '..'",
- ));
+ return Err(WorkerThreadsFilenameError::InvalidRelativeUrl);
}
- let path = ensure_read_permission::<P>(state, &path)?;
+ let path = ensure_read_permission::<P>(state, &path)
+ .map_err(WorkerThreadsFilenameError::Permission)?;
let fs = state.borrow::<FileSystemRc>();
let canonicalized_path =
- deno_core::strip_unc_prefix(fs.realpath_sync(&path)?);
+ deno_path_util::strip_unc_prefix(fs.realpath_sync(&path)?);
Url::from_file_path(canonicalized_path)
- .map_err(|e| generic_error(format!("URL from Path-String: {:#?}", e)))?
+ .map_err(|_| WorkerThreadsFilenameError::UrlFromPathString)?
};
let url_path = url
.to_file_path()
- .map_err(|e| generic_error(format!("URL to Path-String: {:#?}", e)))?;
- let url_path = ensure_read_permission::<P>(state, &url_path)?;
+ .map_err(|_| WorkerThreadsFilenameError::UrlToPathString)?;
+ let url_path = ensure_read_permission::<P>(state, &url_path)
+ .map_err(WorkerThreadsFilenameError::Permission)?;
let fs = state.borrow::<FileSystemRc>();
if !fs.exists_sync(&url_path) {
- return Err(generic_error(format!("File not found [{:?}]", url_path)));
- }
- let node_resolver = state.borrow::<NodeResolverRc>();
- match node_resolver.url_to_node_resolution(url)? {
- NodeResolution::Esm(u) => Ok(u.to_string()),
- NodeResolution::CommonJs(u) => wrap_cjs(u),
- NodeResolution::BuiltIn(_) => Err(generic_error("Neither ESM nor CJS")),
+ return Err(WorkerThreadsFilenameError::FileNotFound(
+ url_path.to_path_buf(),
+ ));
}
-}
-
-///
-/// Wrap a CJS file-URL and the required setup in a stringified `data:`-URL
-///
-fn wrap_cjs(url: Url) -> Result<String, AnyError> {
- let path = url
- .to_file_path()
- .map_err(|e| generic_error(format!("URL to Path: {:#?}", e)))?;
- let filename = path.file_name().unwrap().to_string_lossy();
- Ok(format!(
- "data:text/javascript,import {{ createRequire }} from \"node:module\";\
- const require = createRequire(\"{}\"); require(\"./{}\");",
- url, filename,
- ))
+ Ok(url.to_string())
}
diff --git a/ext/node/ops/zlib/brotli.rs b/ext/node/ops/zlib/brotli.rs
index 3e3905fc3..1a681ff7f 100644
--- a/ext/node/ops/zlib/brotli.rs
+++ b/ext/node/ops/zlib/brotli.rs
@@ -9,8 +9,6 @@ use brotli::BrotliDecompressStream;
use brotli::BrotliResult;
use brotli::BrotliState;
use brotli::Decompressor;
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::JsBuffer;
use deno_core::OpState;
@@ -19,7 +17,23 @@ use deno_core::ToJsBuffer;
use std::cell::RefCell;
use std::io::Read;
-fn encoder_mode(mode: u32) -> Result<BrotliEncoderMode, AnyError> {
+#[derive(Debug, thiserror::Error)]
+pub enum BrotliError {
+ #[error("Invalid encoder mode")]
+ InvalidEncoderMode,
+ #[error("Failed to compress")]
+ CompressFailed,
+ #[error("Failed to decompress")]
+ DecompressFailed,
+ #[error(transparent)]
+ Join(#[from] tokio::task::JoinError),
+ #[error(transparent)]
+ Resource(deno_core::error::AnyError),
+ #[error("{0}")]
+ Io(std::io::Error),
+}
+
+fn encoder_mode(mode: u32) -> Result<BrotliEncoderMode, BrotliError> {
Ok(match mode {
0 => BrotliEncoderMode::BROTLI_MODE_GENERIC,
1 => BrotliEncoderMode::BROTLI_MODE_TEXT,
@@ -28,7 +42,7 @@ fn encoder_mode(mode: u32) -> Result<BrotliEncoderMode, AnyError> {
4 => BrotliEncoderMode::BROTLI_FORCE_MSB_PRIOR,
5 => BrotliEncoderMode::BROTLI_FORCE_UTF8_PRIOR,
6 => BrotliEncoderMode::BROTLI_FORCE_SIGNED_PRIOR,
- _ => return Err(type_error("Invalid encoder mode")),
+ _ => return Err(BrotliError::InvalidEncoderMode),
})
}
@@ -40,7 +54,7 @@ pub fn op_brotli_compress(
#[smi] quality: i32,
#[smi] lgwin: i32,
#[smi] mode: u32,
-) -> Result<usize, AnyError> {
+) -> Result<usize, BrotliError> {
let mode = encoder_mode(mode)?;
let mut out_size = out.len();
@@ -57,7 +71,7 @@ pub fn op_brotli_compress(
&mut |_, _, _, _| (),
);
if result != 1 {
- return Err(type_error("Failed to compress"));
+ return Err(BrotliError::CompressFailed);
}
Ok(out_size)
@@ -87,7 +101,7 @@ pub async fn op_brotli_compress_async(
#[smi] quality: i32,
#[smi] lgwin: i32,
#[smi] mode: u32,
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, BrotliError> {
let mode = encoder_mode(mode)?;
tokio::task::spawn_blocking(move || {
let input = &*input;
@@ -107,7 +121,7 @@ pub async fn op_brotli_compress_async(
&mut |_, _, _, _| (),
);
if result != 1 {
- return Err(type_error("Failed to compress"));
+ return Err(BrotliError::CompressFailed);
}
out.truncate(out_size);
@@ -151,8 +165,11 @@ pub fn op_brotli_compress_stream(
#[smi] rid: u32,
#[buffer] input: &[u8],
#[buffer] output: &mut [u8],
-) -> Result<usize, AnyError> {
- let ctx = state.resource_table.get::<BrotliCompressCtx>(rid)?;
+) -> Result<usize, BrotliError> {
+ let ctx = state
+ .resource_table
+ .get::<BrotliCompressCtx>(rid)
+ .map_err(BrotliError::Resource)?;
let mut inst = ctx.inst.borrow_mut();
let mut output_offset = 0;
@@ -168,7 +185,7 @@ pub fn op_brotli_compress_stream(
&mut |_, _, _, _| (),
);
if !result {
- return Err(type_error("Failed to compress"));
+ return Err(BrotliError::CompressFailed);
}
Ok(output_offset)
@@ -180,8 +197,11 @@ pub fn op_brotli_compress_stream_end(
state: &mut OpState,
#[smi] rid: u32,
#[buffer] output: &mut [u8],
-) -> Result<usize, AnyError> {
- let ctx = state.resource_table.get::<BrotliCompressCtx>(rid)?;
+) -> Result<usize, BrotliError> {
+ let ctx = state
+ .resource_table
+ .get::<BrotliCompressCtx>(rid)
+ .map_err(BrotliError::Resource)?;
let mut inst = ctx.inst.borrow_mut();
let mut output_offset = 0;
@@ -197,13 +217,13 @@ pub fn op_brotli_compress_stream_end(
&mut |_, _, _, _| (),
);
if !result {
- return Err(type_error("Failed to compress"));
+ return Err(BrotliError::CompressFailed);
}
Ok(output_offset)
}
-fn brotli_decompress(buffer: &[u8]) -> Result<ToJsBuffer, AnyError> {
+fn brotli_decompress(buffer: &[u8]) -> Result<ToJsBuffer, std::io::Error> {
let mut output = Vec::with_capacity(4096);
let mut decompressor = Decompressor::new(buffer, buffer.len());
decompressor.read_to_end(&mut output)?;
@@ -214,7 +234,7 @@ fn brotli_decompress(buffer: &[u8]) -> Result<ToJsBuffer, AnyError> {
#[serde]
pub fn op_brotli_decompress(
#[buffer] buffer: &[u8],
-) -> Result<ToJsBuffer, AnyError> {
+) -> Result<ToJsBuffer, std::io::Error> {
brotli_decompress(buffer)
}
@@ -222,8 +242,11 @@ pub fn op_brotli_decompress(
#[serde]
pub async fn op_brotli_decompress_async(
#[buffer] buffer: JsBuffer,
-) -> Result<ToJsBuffer, AnyError> {
- tokio::task::spawn_blocking(move || brotli_decompress(&buffer)).await?
+) -> Result<ToJsBuffer, BrotliError> {
+ tokio::task::spawn_blocking(move || {
+ brotli_decompress(&buffer).map_err(BrotliError::Io)
+ })
+ .await?
}
struct BrotliDecompressCtx {
@@ -252,8 +275,11 @@ pub fn op_brotli_decompress_stream(
#[smi] rid: u32,
#[buffer] input: &[u8],
#[buffer] output: &mut [u8],
-) -> Result<usize, AnyError> {
- let ctx = state.resource_table.get::<BrotliDecompressCtx>(rid)?;
+) -> Result<usize, BrotliError> {
+ let ctx = state
+ .resource_table
+ .get::<BrotliDecompressCtx>(rid)
+ .map_err(BrotliError::Resource)?;
let mut inst = ctx.inst.borrow_mut();
let mut output_offset = 0;
@@ -268,7 +294,7 @@ pub fn op_brotli_decompress_stream(
&mut inst,
);
if matches!(result, BrotliResult::ResultFailure) {
- return Err(type_error("Failed to decompress"));
+ return Err(BrotliError::DecompressFailed);
}
Ok(output_offset)
@@ -280,8 +306,11 @@ pub fn op_brotli_decompress_stream_end(
state: &mut OpState,
#[smi] rid: u32,
#[buffer] output: &mut [u8],
-) -> Result<usize, AnyError> {
- let ctx = state.resource_table.get::<BrotliDecompressCtx>(rid)?;
+) -> Result<usize, BrotliError> {
+ let ctx = state
+ .resource_table
+ .get::<BrotliDecompressCtx>(rid)
+ .map_err(BrotliError::Resource)?;
let mut inst = ctx.inst.borrow_mut();
let mut output_offset = 0;
@@ -296,7 +325,7 @@ pub fn op_brotli_decompress_stream_end(
&mut inst,
);
if matches!(result, BrotliResult::ResultFailure) {
- return Err(type_error("Failed to decompress"));
+ return Err(BrotliError::DecompressFailed);
}
Ok(output_offset)
diff --git a/ext/node/ops/zlib/mod.rs b/ext/node/ops/zlib/mod.rs
index b1d6d21d2..991c0925d 100644
--- a/ext/node/ops/zlib/mod.rs
+++ b/ext/node/ops/zlib/mod.rs
@@ -1,14 +1,14 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-use deno_core::error::type_error;
-use deno_core::error::AnyError;
+
use deno_core::op2;
+use libc::c_ulong;
use std::borrow::Cow;
use std::cell::RefCell;
use zlib::*;
mod alloc;
pub mod brotli;
-mod mode;
+pub mod mode;
mod stream;
use mode::Flush;
@@ -17,11 +17,11 @@ use mode::Mode;
use self::stream::StreamWrapper;
#[inline]
-fn check(condition: bool, msg: &str) -> Result<(), AnyError> {
+fn check(condition: bool, msg: &str) -> Result<(), deno_core::error::AnyError> {
if condition {
Ok(())
} else {
- Err(type_error(msg.to_string()))
+ Err(deno_core::error::type_error(msg.to_string()))
}
}
@@ -56,7 +56,7 @@ impl ZlibInner {
out_off: u32,
out_len: u32,
flush: Flush,
- ) -> Result<(), AnyError> {
+ ) -> Result<(), deno_core::error::AnyError> {
check(self.init_done, "write before init")?;
check(!self.write_in_progress, "write already in progress")?;
check(!self.pending_close, "close already in progress")?;
@@ -65,11 +65,11 @@ impl ZlibInner {
let next_in = input
.get(in_off as usize..in_off as usize + in_len as usize)
- .ok_or_else(|| type_error("invalid input range"))?
+ .ok_or_else(|| deno_core::error::type_error("invalid input range"))?
.as_ptr() as *mut _;
let next_out = out
.get_mut(out_off as usize..out_off as usize + out_len as usize)
- .ok_or_else(|| type_error("invalid output range"))?
+ .ok_or_else(|| deno_core::error::type_error("invalid output range"))?
.as_mut_ptr();
self.strm.avail_in = in_len;
@@ -81,7 +81,10 @@ impl ZlibInner {
Ok(())
}
- fn do_write(&mut self, flush: Flush) -> Result<(), AnyError> {
+ fn do_write(
+ &mut self,
+ flush: Flush,
+ ) -> Result<(), deno_core::error::AnyError> {
self.flush = flush;
match self.mode {
Mode::Deflate | Mode::Gzip | Mode::DeflateRaw => {
@@ -127,7 +130,7 @@ impl ZlibInner {
self.mode = Mode::Inflate;
}
} else if next_expected_header_byte.is_some() {
- return Err(type_error(
+ return Err(deno_core::error::type_error(
"invalid number of gzip magic number bytes read",
));
}
@@ -181,7 +184,7 @@ impl ZlibInner {
Ok(())
}
- fn init_stream(&mut self) -> Result<(), AnyError> {
+ fn init_stream(&mut self) -> Result<(), deno_core::error::AnyError> {
match self.mode {
Mode::Gzip | Mode::Gunzip => self.window_bits += 16,
Mode::Unzip => self.window_bits += 32,
@@ -199,7 +202,7 @@ impl ZlibInner {
Mode::Inflate | Mode::Gunzip | Mode::InflateRaw | Mode::Unzip => {
self.strm.inflate_init(self.window_bits)
}
- Mode::None => return Err(type_error("Unknown mode")),
+ Mode::None => return Err(deno_core::error::type_error("Unknown mode")),
};
self.write_in_progress = false;
@@ -208,7 +211,7 @@ impl ZlibInner {
Ok(())
}
- fn close(&mut self) -> Result<bool, AnyError> {
+ fn close(&mut self) -> Result<bool, deno_core::error::AnyError> {
if self.write_in_progress {
self.pending_close = true;
return Ok(false);
@@ -222,10 +225,8 @@ impl ZlibInner {
Ok(true)
}
- fn reset_stream(&mut self) -> Result<(), AnyError> {
+ fn reset_stream(&mut self) {
self.err = self.strm.reset(self.mode);
-
- Ok(())
}
}
@@ -243,7 +244,7 @@ impl deno_core::Resource for Zlib {
#[op2]
#[cppgc]
-pub fn op_zlib_new(#[smi] mode: i32) -> Result<Zlib, AnyError> {
+pub fn op_zlib_new(#[smi] mode: i32) -> Result<Zlib, mode::ModeError> {
let mode = Mode::try_from(mode)?;
let inner = ZlibInner {
@@ -256,12 +257,20 @@ pub fn op_zlib_new(#[smi] mode: i32) -> Result<Zlib, AnyError> {
})
}
+#[derive(Debug, thiserror::Error)]
+pub enum ZlibError {
+ #[error("zlib not initialized")]
+ NotInitialized,
+ #[error(transparent)]
+ Mode(#[from] mode::ModeError),
+ #[error(transparent)]
+ Other(#[from] deno_core::error::AnyError),
+}
+
#[op2(fast)]
-pub fn op_zlib_close(#[cppgc] resource: &Zlib) -> Result<(), AnyError> {
+pub fn op_zlib_close(#[cppgc] resource: &Zlib) -> Result<(), ZlibError> {
let mut resource = resource.inner.borrow_mut();
- let zlib = resource
- .as_mut()
- .ok_or_else(|| type_error("zlib not initialized"))?;
+ let zlib = resource.as_mut().ok_or(ZlibError::NotInitialized)?;
// If there is a pending write, defer the close until the write is done.
zlib.close()?;
@@ -282,11 +291,9 @@ pub fn op_zlib_write(
#[smi] out_off: u32,
#[smi] out_len: u32,
#[buffer] result: &mut [u32],
-) -> Result<i32, AnyError> {
+) -> Result<i32, ZlibError> {
let mut zlib = resource.inner.borrow_mut();
- let zlib = zlib
- .as_mut()
- .ok_or_else(|| type_error("zlib not initialized"))?;
+ let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?;
let flush = Flush::try_from(flush)?;
zlib.start_write(input, in_off, in_len, out, out_off, out_len, flush)?;
@@ -307,11 +314,9 @@ pub fn op_zlib_init(
#[smi] mem_level: i32,
#[smi] strategy: i32,
#[buffer] dictionary: &[u8],
-) -> Result<i32, AnyError> {
+) -> Result<i32, ZlibError> {
let mut zlib = resource.inner.borrow_mut();
- let zlib = zlib
- .as_mut()
- .ok_or_else(|| type_error("zlib not initialized"))?;
+ let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?;
check((8..=15).contains(&window_bits), "invalid windowBits")?;
check((-1..=9).contains(&level), "invalid level")?;
@@ -348,13 +353,11 @@ pub fn op_zlib_init(
#[op2(fast)]
#[smi]
-pub fn op_zlib_reset(#[cppgc] resource: &Zlib) -> Result<i32, AnyError> {
+pub fn op_zlib_reset(#[cppgc] resource: &Zlib) -> Result<i32, ZlibError> {
let mut zlib = resource.inner.borrow_mut();
- let zlib = zlib
- .as_mut()
- .ok_or_else(|| type_error("zlib not initialized"))?;
+ let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?;
- zlib.reset_stream()?;
+ zlib.reset_stream();
Ok(zlib.err)
}
@@ -362,12 +365,10 @@ pub fn op_zlib_reset(#[cppgc] resource: &Zlib) -> Result<i32, AnyError> {
#[op2(fast)]
pub fn op_zlib_close_if_pending(
#[cppgc] resource: &Zlib,
-) -> Result<(), AnyError> {
+) -> Result<(), ZlibError> {
let pending_close = {
let mut zlib = resource.inner.borrow_mut();
- let zlib = zlib
- .as_mut()
- .ok_or_else(|| type_error("zlib not initialized"))?;
+ let zlib = zlib.as_mut().ok_or(ZlibError::NotInitialized)?;
zlib.write_in_progress = false;
zlib.pending_close
@@ -381,6 +382,15 @@ pub fn op_zlib_close_if_pending(
Ok(())
}
+#[op2(fast)]
+#[smi]
+pub fn op_zlib_crc32(#[buffer] data: &[u8], #[smi] value: u32) -> u32 {
+ // SAFETY: `data` is a valid buffer.
+ unsafe {
+ zlib::crc32(value as c_ulong, data.as_ptr(), data.len() as u32) as u32
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/ext/node/ops/zlib/mode.rs b/ext/node/ops/zlib/mode.rs
index 753300cc4..41565f9b1 100644
--- a/ext/node/ops/zlib/mode.rs
+++ b/ext/node/ops/zlib/mode.rs
@@ -1,19 +1,8 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
-#[derive(Debug)]
-pub enum Error {
- BadArgument,
-}
-
-impl std::fmt::Display for Error {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- match self {
- Error::BadArgument => write!(f, "bad argument"),
- }
- }
-}
-
-impl std::error::Error for Error {}
+#[derive(Debug, thiserror::Error)]
+#[error("bad argument")]
+pub struct ModeError;
macro_rules! repr_i32 {
($(#[$meta:meta])* $vis:vis enum $name:ident {
@@ -25,12 +14,12 @@ macro_rules! repr_i32 {
}
impl core::convert::TryFrom<i32> for $name {
- type Error = Error;
+ type Error = ModeError;
fn try_from(v: i32) -> Result<Self, Self::Error> {
match v {
$(x if x == $name::$vname as i32 => Ok($name::$vname),)*
- _ => Err(Error::BadArgument),
+ _ => Err(ModeError),
}
}
}