summaryrefslogtreecommitdiff
path: root/ext
diff options
context:
space:
mode:
authorDavid Sherret <dsherret@users.noreply.github.com>2024-11-15 23:22:50 -0500
committerGitHub <noreply@github.com>2024-11-15 23:22:50 -0500
commit48b94c099526eb262287e101a75cb4571b8972b0 (patch)
tree073d15b1d403f4b1172b2a954e0cdf7d1e9fce76 /ext
parentdee94473c435b38b2d7829731804ac96e6856d9c (diff)
refactor: use boxed_error in some places (#26887)
Diffstat (limited to 'ext')
-rw-r--r--ext/fs/Cargo.toml1
-rw-r--r--ext/fs/lib.rs1
-rw-r--r--ext/fs/ops.rs65
-rw-r--r--ext/kv/Cargo.toml1
-rw-r--r--ext/kv/lib.rs100
-rw-r--r--ext/node/Cargo.toml1
-rw-r--r--ext/node/ops/require.rs16
7 files changed, 111 insertions, 74 deletions
diff --git a/ext/fs/Cargo.toml b/ext/fs/Cargo.toml
index e85f349b1..ace1b89f3 100644
--- a/ext/fs/Cargo.toml
+++ b/ext/fs/Cargo.toml
@@ -19,6 +19,7 @@ sync_fs = []
[dependencies]
async-trait.workspace = true
base32.workspace = true
+boxed_error.workspace = true
deno_core.workspace = true
deno_io.workspace = true
deno_path_util.workspace = true
diff --git a/ext/fs/lib.rs b/ext/fs/lib.rs
index dd852e6be..aed9a7085 100644
--- a/ext/fs/lib.rs
+++ b/ext/fs/lib.rs
@@ -15,6 +15,7 @@ pub use crate::interface::FsDirEntry;
pub use crate::interface::FsFileType;
pub use crate::interface::OpenOptions;
pub use crate::ops::FsOpsError;
+pub use crate::ops::FsOpsErrorKind;
pub use crate::ops::OperationError;
pub use crate::std_fs::RealFs;
pub use crate::sync::MaybeSend;
diff --git a/ext/fs/ops.rs b/ext/fs/ops.rs
index 3d0d96ce6..e3a511f8e 100644
--- a/ext/fs/ops.rs
+++ b/ext/fs/ops.rs
@@ -16,6 +16,7 @@ use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
use crate::FsPermissions;
use crate::OpenOptions;
+use boxed_error::Boxed;
use deno_core::op2;
use deno_core::CancelFuture;
use deno_core::CancelHandle;
@@ -32,8 +33,11 @@ use rand::thread_rng;
use rand::Rng;
use serde::Serialize;
+#[derive(Debug, Boxed)]
+pub struct FsOpsError(pub Box<FsOpsErrorKind>);
+
#[derive(Debug, thiserror::Error)]
-pub enum FsOpsError {
+pub enum FsOpsErrorKind {
#[error("{0}")]
Io(#[source] std::io::Error),
#[error("{0}")]
@@ -73,15 +77,16 @@ pub enum FsOpsError {
impl From<FsError> for FsOpsError {
fn from(err: FsError) -> Self {
match err {
- FsError::Io(err) => FsOpsError::Io(err),
+ FsError::Io(err) => FsOpsErrorKind::Io(err),
FsError::FileBusy => {
- FsOpsError::Other(deno_core::error::resource_unavailable())
+ FsOpsErrorKind::Other(deno_core::error::resource_unavailable())
}
FsError::NotSupported => {
- FsOpsError::Other(deno_core::error::not_supported())
+ FsOpsErrorKind::Other(deno_core::error::not_supported())
}
- FsError::NotCapable(err) => FsOpsError::NotCapable(err),
+ FsError::NotCapable(err) => FsOpsErrorKind::NotCapable(err),
}
+ .into_box()
}
}
@@ -127,11 +132,12 @@ fn map_permission_error(
(path.as_str(), "")
};
- FsOpsError::NotCapableAccess {
+ FsOpsErrorKind::NotCapableAccess {
standalone: deno_permissions::is_standalone(),
err,
path: format!("{path}{truncated}"),
}
+ .into_box()
}
err => Err::<(), _>(err)
.context_path(operation, path)
@@ -1176,7 +1182,9 @@ fn validate_temporary_filename_component(
) -> Result<(), FsOpsError> {
// Ban ASCII and Unicode control characters: these will often fail
if let Some(c) = component.matches(|c: char| c.is_control()).next() {
- return Err(FsOpsError::InvalidControlCharacter(c.to_string()));
+ return Err(
+ FsOpsErrorKind::InvalidControlCharacter(c.to_string()).into_box(),
+ );
}
// Windows has the most restrictive filenames. As temp files aren't normal files, we just
// use this set of banned characters for all platforms because wildcard-like files can also
@@ -1192,13 +1200,13 @@ fn validate_temporary_filename_component(
.matches(|c: char| "<>:\"/\\|?*".contains(c))
.next()
{
- return Err(FsOpsError::InvalidCharacter(c.to_string()));
+ return Err(FsOpsErrorKind::InvalidCharacter(c.to_string()).into_box());
}
// This check is only for Windows
#[cfg(windows)]
if suffix && component.ends_with(|c: char| ". ".contains(c)) {
- return Err(FsOpsError::InvalidTrailingCharacter);
+ return Err(FsOpsErrorKind::InvalidTrailingCharacter.into_box());
}
Ok(())
@@ -1440,7 +1448,7 @@ fn to_seek_from(offset: i64, whence: i32) -> Result<SeekFrom, FsOpsError> {
1 => SeekFrom::Current(offset),
2 => SeekFrom::End(offset),
_ => {
- return Err(FsOpsError::InvalidSeekMode(whence));
+ return Err(FsOpsErrorKind::InvalidSeekMode(whence).into_box());
}
};
Ok(seek_from)
@@ -1456,7 +1464,7 @@ pub fn op_fs_seek_sync(
) -> Result<u64, FsOpsError> {
let pos = to_seek_from(offset, whence)?;
let file =
- FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
let cursor = file.seek_sync(pos)?;
Ok(cursor)
}
@@ -1471,7 +1479,7 @@ pub async fn op_fs_seek_async(
) -> Result<u64, FsOpsError> {
let pos = to_seek_from(offset, whence)?;
let file = FileResource::get_file(&state.borrow(), rid)
- .map_err(FsOpsError::Resource)?;
+ .map_err(FsOpsErrorKind::Resource)?;
let cursor = file.seek_async(pos).await?;
Ok(cursor)
}
@@ -1482,7 +1490,7 @@ pub fn op_fs_file_sync_data_sync(
#[smi] rid: ResourceId,
) -> Result<(), FsOpsError> {
let file =
- FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.datasync_sync()?;
Ok(())
}
@@ -1493,7 +1501,7 @@ pub async fn op_fs_file_sync_data_async(
#[smi] rid: ResourceId,
) -> Result<(), FsOpsError> {
let file = FileResource::get_file(&state.borrow(), rid)
- .map_err(FsOpsError::Resource)?;
+ .map_err(FsOpsErrorKind::Resource)?;
file.datasync_async().await?;
Ok(())
}
@@ -1504,7 +1512,7 @@ pub fn op_fs_file_sync_sync(
#[smi] rid: ResourceId,
) -> Result<(), FsOpsError> {
let file =
- FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.sync_sync()?;
Ok(())
}
@@ -1515,7 +1523,7 @@ pub async fn op_fs_file_sync_async(
#[smi] rid: ResourceId,
) -> Result<(), FsOpsError> {
let file = FileResource::get_file(&state.borrow(), rid)
- .map_err(FsOpsError::Resource)?;
+ .map_err(FsOpsErrorKind::Resource)?;
file.sync_async().await?;
Ok(())
}
@@ -1527,7 +1535,7 @@ pub fn op_fs_file_stat_sync(
#[buffer] stat_out_buf: &mut [u32],
) -> Result<(), FsOpsError> {
let file =
- FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
let stat = file.stat_sync()?;
let serializable_stat = SerializableStat::from(stat);
serializable_stat.write(stat_out_buf);
@@ -1541,7 +1549,7 @@ pub async fn op_fs_file_stat_async(
#[smi] rid: ResourceId,
) -> Result<SerializableStat, FsOpsError> {
let file = FileResource::get_file(&state.borrow(), rid)
- .map_err(FsOpsError::Resource)?;
+ .map_err(FsOpsErrorKind::Resource)?;
let stat = file.stat_async().await?;
Ok(stat.into())
}
@@ -1553,7 +1561,7 @@ pub fn op_fs_flock_sync(
exclusive: bool,
) -> Result<(), FsOpsError> {
let file =
- FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.lock_sync(exclusive)?;
Ok(())
}
@@ -1565,7 +1573,7 @@ pub async fn op_fs_flock_async(
exclusive: bool,
) -> Result<(), FsOpsError> {
let file = FileResource::get_file(&state.borrow(), rid)
- .map_err(FsOpsError::Resource)?;
+ .map_err(FsOpsErrorKind::Resource)?;
file.lock_async(exclusive).await?;
Ok(())
}
@@ -1576,7 +1584,7 @@ pub fn op_fs_funlock_sync(
#[smi] rid: ResourceId,
) -> Result<(), FsOpsError> {
let file =
- FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.unlock_sync()?;
Ok(())
}
@@ -1587,7 +1595,7 @@ pub async fn op_fs_funlock_async(
#[smi] rid: ResourceId,
) -> Result<(), FsOpsError> {
let file = FileResource::get_file(&state.borrow(), rid)
- .map_err(FsOpsError::Resource)?;
+ .map_err(FsOpsErrorKind::Resource)?;
file.unlock_async().await?;
Ok(())
}
@@ -1599,7 +1607,7 @@ pub fn op_fs_ftruncate_sync(
#[number] len: u64,
) -> Result<(), FsOpsError> {
let file =
- FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.truncate_sync(len)?;
Ok(())
}
@@ -1611,7 +1619,7 @@ pub async fn op_fs_file_truncate_async(
#[number] len: u64,
) -> Result<(), FsOpsError> {
let file = FileResource::get_file(&state.borrow(), rid)
- .map_err(FsOpsError::Resource)?;
+ .map_err(FsOpsErrorKind::Resource)?;
file.truncate_async(len).await?;
Ok(())
}
@@ -1626,7 +1634,7 @@ pub fn op_fs_futime_sync(
#[smi] mtime_nanos: u32,
) -> Result<(), FsOpsError> {
let file =
- FileResource::get_file(state, rid).map_err(FsOpsError::Resource)?;
+ FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.utime_sync(atime_secs, atime_nanos, mtime_secs, mtime_nanos)?;
Ok(())
}
@@ -1641,7 +1649,7 @@ pub async fn op_fs_futime_async(
#[smi] mtime_nanos: u32,
) -> Result<(), FsOpsError> {
let file = FileResource::get_file(&state.borrow(), rid)
- .map_err(FsOpsError::Resource)?;
+ .map_err(FsOpsErrorKind::Resource)?;
file
.utime_async(atime_secs, atime_nanos, mtime_secs, mtime_nanos)
.await?;
@@ -1717,7 +1725,7 @@ impl<T> MapErrContext for Result<T, FsError> {
where
F: FnOnce(FsError) -> OperationError,
{
- self.map_err(|err| FsOpsError::OperationError(f(err)))
+ self.map_err(|err| FsOpsErrorKind::OperationError(f(err)).into_box())
}
fn context(self, operation: &'static str) -> Self::R {
@@ -1754,7 +1762,8 @@ impl<T> MapErrContext for Result<T, FsError> {
}
fn path_into_string(s: std::ffi::OsString) -> Result<String, FsOpsError> {
- s.into_string().map_err(FsOpsError::InvalidUtf8)
+ s.into_string()
+ .map_err(|e| FsOpsErrorKind::InvalidUtf8(e).into_box())
}
macro_rules! create_struct_writer {
diff --git a/ext/kv/Cargo.toml b/ext/kv/Cargo.toml
index 1d7b91770..aa7381766 100644
--- a/ext/kv/Cargo.toml
+++ b/ext/kv/Cargo.toml
@@ -17,6 +17,7 @@ path = "lib.rs"
anyhow.workspace = true
async-trait.workspace = true
base64.workspace = true
+boxed_error.workspace = true
bytes.workspace = true
chrono = { workspace = true, features = ["now"] }
deno_core.workspace = true
diff --git a/ext/kv/lib.rs b/ext/kv/lib.rs
index a4ccfe3d6..5392b9721 100644
--- a/ext/kv/lib.rs
+++ b/ext/kv/lib.rs
@@ -14,6 +14,7 @@ use std::time::Duration;
use base64::prelude::BASE64_URL_SAFE;
use base64::Engine;
+use boxed_error::Boxed;
use chrono::DateTime;
use chrono::Utc;
use deno_core::error::get_custom_error_class;
@@ -114,8 +115,11 @@ impl Resource for DatabaseWatcherResource {
}
}
+#[derive(Debug, Boxed)]
+pub struct KvError(pub Box<KvErrorKind>);
+
#[derive(Debug, thiserror::Error)]
-pub enum KvError {
+pub enum KvErrorKind {
#[error(transparent)]
DatabaseHandler(deno_core::error::AnyError),
#[error(transparent)]
@@ -193,7 +197,7 @@ where
let db = handler
.open(state.clone(), path)
.await
- .map_err(KvError::DatabaseHandler)?;
+ .map_err(KvErrorKind::DatabaseHandler)?;
let rid = state.borrow_mut().resource_table.add(DatabaseResource {
db,
cancel_handle: CancelHandle::new_rc(),
@@ -329,7 +333,7 @@ where
let resource = state
.resource_table
.get::<DatabaseResource<DBH::DB>>(rid)
- .map_err(KvError::Resource)?;
+ .map_err(KvErrorKind::Resource)?;
resource.db.clone()
};
@@ -339,7 +343,7 @@ where
};
if ranges.len() > config.max_read_ranges {
- return Err(KvError::TooManyRanges(config.max_read_ranges));
+ return Err(KvErrorKind::TooManyRanges(config.max_read_ranges).into_box());
}
let mut total_entries = 0usize;
@@ -358,14 +362,16 @@ where
Ok(ReadRange {
start,
end,
- limit: NonZeroU32::new(limit).ok_or(KvError::InvalidLimit)?,
+ limit: NonZeroU32::new(limit).ok_or(KvErrorKind::InvalidLimit)?,
reverse,
})
})
.collect::<Result<Vec<_>, KvError>>()?;
if total_entries > config.max_read_entries {
- return Err(KvError::TooManyEntries(config.max_read_entries));
+ return Err(
+ KvErrorKind::TooManyEntries(config.max_read_entries).into_box(),
+ );
}
let opts = SnapshotReadOptions {
@@ -374,7 +380,7 @@ where
let output_ranges = db
.snapshot_read(read_ranges, opts)
.await
- .map_err(KvError::Kv)?;
+ .map_err(KvErrorKind::Kv)?;
let output_ranges = output_ranges
.into_iter()
.map(|x| {
@@ -415,7 +421,7 @@ where
if get_custom_error_class(&err) == Some("BadResource") {
return Ok(None);
} else {
- return Err(KvError::Resource(err));
+ return Err(KvErrorKind::Resource(err).into_box());
}
}
};
@@ -423,11 +429,11 @@ where
};
let Some(mut handle) =
- db.dequeue_next_message().await.map_err(KvError::Kv)?
+ db.dequeue_next_message().await.map_err(KvErrorKind::Kv)?
else {
return Ok(None);
};
- let payload = handle.take_payload().await.map_err(KvError::Kv)?.into();
+ let payload = handle.take_payload().await.map_err(KvErrorKind::Kv)?.into();
let handle_rid = {
let mut state = state.borrow_mut();
state.resource_table.add(QueueMessageResource { handle })
@@ -448,11 +454,11 @@ where
let resource = state
.resource_table
.get::<DatabaseResource<DBH::DB>>(rid)
- .map_err(KvError::Resource)?;
+ .map_err(KvErrorKind::Resource)?;
let config = state.borrow::<Rc<KvConfig>>().clone();
if keys.len() > config.max_watched_keys {
- return Err(KvError::TooManyKeys(config.max_watched_keys));
+ return Err(KvErrorKind::TooManyKeys(config.max_watched_keys).into_box());
}
let keys: Vec<Vec<u8>> = keys
@@ -493,7 +499,7 @@ async fn op_kv_watch_next(
let resource = state
.resource_table
.get::<DatabaseWatcherResource>(rid)
- .map_err(KvError::Resource)?;
+ .map_err(KvErrorKind::Resource)?;
resource.clone()
};
@@ -519,7 +525,7 @@ async fn op_kv_watch_next(
return Ok(None);
};
- let entries = res.map_err(KvError::Kv)?;
+ let entries = res.map_err(KvErrorKind::Kv)?;
let entries = entries
.into_iter()
.map(|entry| {
@@ -549,9 +555,9 @@ where
let handle = state
.resource_table
.take::<QueueMessageResource<<<DBH>::DB as Database>::QMH>>(handle_rid)
- .map_err(|_| KvError::QueueMessageNotFound)?;
+ .map_err(|_| KvErrorKind::QueueMessageNotFound)?;
Rc::try_unwrap(handle)
- .map_err(|_| KvError::QueueMessageNotFound)?
+ .map_err(|_| KvErrorKind::QueueMessageNotFound)?
.handle
};
// if we fail to finish the message, there is not much we can do and the
@@ -692,7 +698,7 @@ impl RawSelector {
}),
(Some(prefix), Some(start), None) => {
if !start.starts_with(&prefix) || start.len() == prefix.len() {
- return Err(KvError::StartKeyNotInKeyspace);
+ return Err(KvErrorKind::StartKeyNotInKeyspace.into_box());
}
Ok(Self::Prefixed {
prefix,
@@ -702,7 +708,7 @@ impl RawSelector {
}
(Some(prefix), None, Some(end)) => {
if !end.starts_with(&prefix) || end.len() == prefix.len() {
- return Err(KvError::EndKeyNotInKeyspace);
+ return Err(KvErrorKind::EndKeyNotInKeyspace.into_box());
}
Ok(Self::Prefixed {
prefix,
@@ -712,7 +718,7 @@ impl RawSelector {
}
(None, Some(start), Some(end)) => {
if start > end {
- return Err(KvError::StartKeyGreaterThanEndKey);
+ return Err(KvErrorKind::StartKeyGreaterThanEndKey.into_box());
}
Ok(Self::Range { start, end })
}
@@ -720,7 +726,7 @@ impl RawSelector {
let end = start.iter().copied().chain(Some(0)).collect();
Ok(Self::Range { start, end })
}
- _ => Err(KvError::InvalidRange),
+ _ => Err(KvErrorKind::InvalidRange.into_box()),
}
}
@@ -782,7 +788,7 @@ fn encode_cursor(
) -> Result<String, KvError> {
let common_prefix = selector.common_prefix();
if !boundary_key.starts_with(common_prefix) {
- return Err(KvError::InvalidBoundaryKey);
+ return Err(KvErrorKind::InvalidBoundaryKey.into_box());
}
Ok(BASE64_URL_SAFE.encode(&boundary_key[common_prefix.len()..]))
}
@@ -799,7 +805,7 @@ fn decode_selector_and_cursor(
let common_prefix = selector.common_prefix();
let cursor = BASE64_URL_SAFE
.decode(cursor)
- .map_err(|_| KvError::InvalidCursor)?;
+ .map_err(|_| KvErrorKind::InvalidCursor)?;
let first_key: Vec<u8>;
let last_key: Vec<u8>;
@@ -824,13 +830,13 @@ fn decode_selector_and_cursor(
// Defend against out-of-bounds reading
if let Some(start) = selector.start() {
if &first_key[..] < start {
- return Err(KvError::CursorOutOfBounds);
+ return Err(KvErrorKind::CursorOutOfBounds.into_box());
}
}
if let Some(end) = selector.end() {
if &last_key[..] > end {
- return Err(KvError::CursorOutOfBounds);
+ return Err(KvErrorKind::CursorOutOfBounds.into_box());
}
}
@@ -855,7 +861,7 @@ where
let resource = state
.resource_table
.get::<DatabaseResource<DBH::DB>>(rid)
- .map_err(KvError::Resource)?;
+ .map_err(KvErrorKind::Resource)?;
resource.db.clone()
};
@@ -865,28 +871,28 @@ where
};
if checks.len() > config.max_checks {
- return Err(KvError::TooManyChecks(config.max_checks));
+ return Err(KvErrorKind::TooManyChecks(config.max_checks).into_box());
}
if mutations.len() + enqueues.len() > config.max_mutations {
- return Err(KvError::TooManyMutations(config.max_mutations));
+ return Err(KvErrorKind::TooManyMutations(config.max_mutations).into_box());
}
let checks = checks
.into_iter()
.map(check_from_v8)
.collect::<Result<Vec<Check>, KvCheckError>>()
- .map_err(KvError::InvalidCheck)?;
+ .map_err(KvErrorKind::InvalidCheck)?;
let mutations = mutations
.into_iter()
.map(|mutation| mutation_from_v8((mutation, current_timestamp)))
.collect::<Result<Vec<Mutation>, KvMutationError>>()
- .map_err(KvError::InvalidMutation)?;
+ .map_err(KvErrorKind::InvalidMutation)?;
let enqueues = enqueues
.into_iter()
.map(|e| enqueue_from_v8(e, current_timestamp))
.collect::<Result<Vec<Enqueue>, std::io::Error>>()
- .map_err(KvError::InvalidEnqueue)?;
+ .map_err(KvErrorKind::InvalidEnqueue)?;
let mut total_payload_size = 0usize;
let mut total_key_size = 0usize;
@@ -897,7 +903,7 @@ where
.chain(mutations.iter().map(|m| &m.key))
{
if key.is_empty() {
- return Err(KvError::EmptyKey);
+ return Err(KvErrorKind::EmptyKey.into_box());
}
total_payload_size += check_write_key_size(key, &config)?;
@@ -921,13 +927,16 @@ where
}
if total_payload_size > config.max_total_mutation_size_bytes {
- return Err(KvError::TotalMutationTooLarge(
- config.max_total_mutation_size_bytes,
- ));
+ return Err(
+ KvErrorKind::TotalMutationTooLarge(config.max_total_mutation_size_bytes)
+ .into_box(),
+ );
}
if total_key_size > config.max_total_key_size_bytes {
- return Err(KvError::TotalKeyTooLarge(config.max_total_key_size_bytes));
+ return Err(
+ KvErrorKind::TotalKeyTooLarge(config.max_total_key_size_bytes).into_box(),
+ );
}
let atomic_write = AtomicWrite {
@@ -936,7 +945,10 @@ where
enqueues,
};
- let result = db.atomic_write(atomic_write).await.map_err(KvError::Kv)?;
+ let result = db
+ .atomic_write(atomic_write)
+ .await
+ .map_err(KvErrorKind::Kv)?;
Ok(result.map(|res| faster_hex::hex_string(&res.versionstamp)))
}
@@ -958,7 +970,9 @@ fn op_kv_encode_cursor(
fn check_read_key_size(key: &[u8], config: &KvConfig) -> Result<(), KvError> {
if key.len() > config.max_read_key_size_bytes {
- Err(KvError::KeyTooLargeToRead(config.max_read_key_size_bytes))
+ Err(
+ KvErrorKind::KeyTooLargeToRead(config.max_read_key_size_bytes).into_box(),
+ )
} else {
Ok(())
}
@@ -969,7 +983,10 @@ fn check_write_key_size(
config: &KvConfig,
) -> Result<usize, KvError> {
if key.len() > config.max_write_key_size_bytes {
- Err(KvError::KeyTooLargeToWrite(config.max_write_key_size_bytes))
+ Err(
+ KvErrorKind::KeyTooLargeToWrite(config.max_write_key_size_bytes)
+ .into_box(),
+ )
} else {
Ok(key.len())
}
@@ -986,7 +1003,7 @@ fn check_value_size(
};
if payload.len() > config.max_value_size_bytes {
- Err(KvError::ValueTooLarge(config.max_value_size_bytes))
+ Err(KvErrorKind::ValueTooLarge(config.max_value_size_bytes).into_box())
} else {
Ok(payload.len())
}
@@ -997,7 +1014,10 @@ fn check_enqueue_payload_size(
config: &KvConfig,
) -> Result<usize, KvError> {
if payload.len() > config.max_value_size_bytes {
- Err(KvError::EnqueuePayloadTooLarge(config.max_value_size_bytes))
+ Err(
+ KvErrorKind::EnqueuePayloadTooLarge(config.max_value_size_bytes)
+ .into_box(),
+ )
} else {
Ok(payload.len())
}
diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml
index 9e1a3495b..c4ee86a95 100644
--- a/ext/node/Cargo.toml
+++ b/ext/node/Cargo.toml
@@ -22,6 +22,7 @@ aes.workspace = true
async-trait.workspace = true
base64.workspace = true
blake2 = "0.10.6"
+boxed_error.workspace = true
brotli.workspace = true
bytes.workspace = true
cbc.workspace = true
diff --git a/ext/node/ops/require.rs b/ext/node/ops/require.rs
index e381ee91d..06c034fd5 100644
--- a/ext/node/ops/require.rs
+++ b/ext/node/ops/require.rs
@@ -1,5 +1,6 @@
// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license.
+use boxed_error::Boxed;
use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::url::Url;
@@ -40,8 +41,11 @@ where
loader.ensure_read_permission(permissions, file_path)
}
+#[derive(Debug, Boxed)]
+pub struct RequireError(pub Box<RequireErrorKind>);
+
#[derive(Debug, thiserror::Error)]
-pub enum RequireError {
+pub enum RequireErrorKind {
#[error(transparent)]
UrlParse(#[from] url::ParseError),
#[error(transparent)]
@@ -135,7 +139,7 @@ where
let from = if from.starts_with("file:///") {
url_to_file_path(&Url::parse(&from)?)?
} else {
- let current_dir = &fs.cwd().map_err(RequireError::UnableToGetCwd)?;
+ let current_dir = &fs.cwd().map_err(RequireErrorKind::UnableToGetCwd)?;
normalize_path(current_dir.join(from))
};
@@ -324,7 +328,7 @@ where
{
let path = PathBuf::from(request);
let path = ensure_read_permission::<P>(state, &path)
- .map_err(RequireError::Permission)?;
+ .map_err(RequireErrorKind::Permission)?;
let fs = state.borrow::<FileSystemRc>();
let canonicalized_path =
deno_path_util::strip_unc_prefix(fs.realpath_sync(&path)?);
@@ -484,11 +488,11 @@ where
let file_path = PathBuf::from(file_path);
// todo(dsherret): there's multiple borrows to NodeRequireLoaderRc here
let file_path = ensure_read_permission::<P>(state, &file_path)
- .map_err(RequireError::Permission)?;
+ .map_err(RequireErrorKind::Permission)?;
let loader = state.borrow::<NodeRequireLoaderRc>();
loader
.load_text_file_lossy(&file_path)
- .map_err(RequireError::ReadModule)
+ .map_err(|e| RequireErrorKind::ReadModule(e).into_box())
}
#[op2]
@@ -612,7 +616,7 @@ where
{
let referrer_path = PathBuf::from(&referrer_filename);
let referrer_path = ensure_read_permission::<P>(state, &referrer_path)
- .map_err(RequireError::Permission)?;
+ .map_err(RequireErrorKind::Permission)?;
let pkg_json_resolver = state.borrow::<PackageJsonResolverRc>();
let Some(pkg) =
pkg_json_resolver.get_closest_package_json_from_path(&referrer_path)?