summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHeyang Zhou <zhy20000919@hotmail.com>2023-09-07 15:07:04 +0800
committerGitHub <noreply@github.com>2023-09-07 15:07:04 +0800
commit01a761f1d4f7ff4943fbf80464a276b434d8a8f7 (patch)
tree6dc4a065f46a2bbaa91ecac9b265ef9910d22a35
parent2cbd1b40cb4392b6a4f6b2e7315e610f488d66b7 (diff)
chore(ext/kv): limit total key size in an atomic op to 80 KiB (#20395)
Keys are expensive metadata. We track it for various purposes, e.g. transaction conflict check, and key expiration. This patch limits the total key size in an atomic operation to 80 KiB (81920 bytes). This helps ensure efficiency in implementations.
-rw-r--r--cli/tests/unit/kv_test.ts15
-rw-r--r--ext/kv/lib.rs15
2 files changed, 28 insertions, 2 deletions
diff --git a/cli/tests/unit/kv_test.ts b/cli/tests/unit/kv_test.ts
index de764cf80..ea1e328bc 100644
--- a/cli/tests/unit/kv_test.ts
+++ b/cli/tests/unit/kv_test.ts
@@ -1283,6 +1283,21 @@ dbTest("total mutation size limit", async (db) => {
);
});
+dbTest("total key size limit", async (db) => {
+ const longString = new Array(1100).fill("a").join("");
+ const keys: Deno.KvKey[] = new Array(80).fill(0).map(() => [longString]);
+
+ const atomic = db.atomic();
+ for (const key of keys) {
+ atomic.set(key, "foo");
+ }
+ await assertRejects(
+ () => atomic.commit(),
+ TypeError,
+ "total key size too large (max 81920 bytes)",
+ );
+});
+
dbTest("keys must be arrays", async (db) => {
await assertRejects(
// @ts-expect-error invalid type
diff --git a/ext/kv/lib.rs b/ext/kv/lib.rs
index ab78fe4c3..294f03289 100644
--- a/ext/kv/lib.rs
+++ b/ext/kv/lib.rs
@@ -39,7 +39,8 @@ const MAX_READ_RANGES: usize = 10;
const MAX_READ_ENTRIES: usize = 1000;
const MAX_CHECKS: usize = 10;
const MAX_MUTATIONS: usize = 1000;
-const MAX_TOTAL_MUTATION_SIZE_BYTES: usize = 819200;
+const MAX_TOTAL_MUTATION_SIZE_BYTES: usize = 800 * 1024;
+const MAX_TOTAL_KEY_SIZE_BYTES: usize = 80 * 1024;
struct UnstableChecker {
pub unstable: bool,
@@ -640,6 +641,7 @@ where
.with_context(|| "invalid enqueue")?;
let mut total_payload_size = 0usize;
+ let mut total_key_size = 0usize;
for key in checks
.iter()
@@ -650,7 +652,9 @@ where
return Err(type_error("key cannot be empty"));
}
- total_payload_size += check_write_key_size(key)?;
+ let checked_size = check_write_key_size(key)?;
+ total_payload_size += checked_size;
+ total_key_size += checked_size;
}
for value in mutations.iter().flat_map(|m| m.kind.value()) {
@@ -668,6 +672,13 @@ where
)));
}
+ if total_key_size > MAX_TOTAL_KEY_SIZE_BYTES {
+ return Err(type_error(format!(
+ "total key size too large (max {} bytes)",
+ MAX_TOTAL_KEY_SIZE_BYTES
+ )));
+ }
+
let atomic_write = AtomicWrite {
checks,
mutations,