summaryrefslogtreecommitdiff
path: root/ext/kv/lib.rs
diff options
context:
space:
mode:
authorIgor Zinkovsky <igor@deno.com>2023-08-26 18:26:09 -0700
committerGitHub <noreply@github.com>2023-08-26 18:26:09 -0700
commite4cebf3e0da539321727b0f0e43ddce5679635b1 (patch)
tree8c3999e9d7dfe219e8e8a1036d058c89285c631b /ext/kv/lib.rs
parentd104a09f796d3176ccd73a24906da87073cab85f (diff)
fix(kv) increase number of allowed mutations in atomic (#20126)
fixes #19741 Impose a limit on the total atomic payload size
Diffstat (limited to 'ext/kv/lib.rs')
-rw-r--r--ext/kv/lib.rs32
1 files changed, 21 insertions, 11 deletions
diff --git a/ext/kv/lib.rs b/ext/kv/lib.rs
index f226b11ae..ab78fe4c3 100644
--- a/ext/kv/lib.rs
+++ b/ext/kv/lib.rs
@@ -38,7 +38,8 @@ const MAX_VALUE_SIZE_BYTES: usize = 65536;
const MAX_READ_RANGES: usize = 10;
const MAX_READ_ENTRIES: usize = 1000;
const MAX_CHECKS: usize = 10;
-const MAX_MUTATIONS: usize = 10;
+const MAX_MUTATIONS: usize = 1000;
+const MAX_TOTAL_MUTATION_SIZE_BYTES: usize = 819200;
struct UnstableChecker {
pub unstable: bool,
@@ -638,6 +639,8 @@ where
.collect::<Result<Vec<Enqueue>, AnyError>>()
.with_context(|| "invalid enqueue")?;
+ let mut total_payload_size = 0usize;
+
for key in checks
.iter()
.map(|c| &c.key)
@@ -647,15 +650,22 @@ where
return Err(type_error("key cannot be empty"));
}
- check_write_key_size(key)?;
+ total_payload_size += check_write_key_size(key)?;
}
for value in mutations.iter().flat_map(|m| m.kind.value()) {
- check_value_size(value)?;
+ total_payload_size += check_value_size(value)?;
}
for enqueue in &enqueues {
- check_enqueue_payload_size(&enqueue.payload)?;
+ total_payload_size += check_enqueue_payload_size(&enqueue.payload)?;
+ }
+
+ if total_payload_size > MAX_TOTAL_MUTATION_SIZE_BYTES {
+ return Err(type_error(format!(
+ "total mutation size too large (max {} bytes)",
+ MAX_TOTAL_MUTATION_SIZE_BYTES
+ )));
}
let atomic_write = AtomicWrite {
@@ -694,22 +704,22 @@ fn check_read_key_size(key: &[u8]) -> Result<(), AnyError> {
}
}
-fn check_write_key_size(key: &[u8]) -> Result<(), AnyError> {
+fn check_write_key_size(key: &[u8]) -> Result<usize, AnyError> {
if key.len() > MAX_WRITE_KEY_SIZE_BYTES {
Err(type_error(format!(
"key too large for write (max {} bytes)",
MAX_WRITE_KEY_SIZE_BYTES
)))
} else {
- Ok(())
+ Ok(key.len())
}
}
-fn check_value_size(value: &Value) -> Result<(), AnyError> {
+fn check_value_size(value: &Value) -> Result<usize, AnyError> {
let payload = match value {
Value::Bytes(x) => x,
Value::V8(x) => x,
- Value::U64(_) => return Ok(()),
+ Value::U64(_) => return Ok(8),
};
if payload.len() > MAX_VALUE_SIZE_BYTES {
@@ -718,17 +728,17 @@ fn check_value_size(value: &Value) -> Result<(), AnyError> {
MAX_VALUE_SIZE_BYTES
)))
} else {
- Ok(())
+ Ok(payload.len())
}
}
-fn check_enqueue_payload_size(payload: &[u8]) -> Result<(), AnyError> {
+fn check_enqueue_payload_size(payload: &[u8]) -> Result<usize, AnyError> {
if payload.len() > MAX_VALUE_SIZE_BYTES {
Err(type_error(format!(
"enqueue payload too large (max {} bytes)",
MAX_VALUE_SIZE_BYTES
)))
} else {
- Ok(())
+ Ok(payload.len())
}
}