summaryrefslogtreecommitdiff
path: root/cli/tests
diff options
context:
space:
mode:
Diffstat (limited to 'cli/tests')
-rw-r--r--cli/tests/unit/kv_test.ts133
1 files changed, 133 insertions, 0 deletions
diff --git a/cli/tests/unit/kv_test.ts b/cli/tests/unit/kv_test.ts
index fcb0c7894..f13479388 100644
--- a/cli/tests/unit/kv_test.ts
+++ b/cli/tests/unit/kv_test.ts
@@ -1004,3 +1004,136 @@ dbTest("key ordering", async (db) => {
[true],
]);
});
+
+dbTest("key size limit", async (db) => {
+ // 1 byte prefix + 1 byte suffix + 2045 bytes key
+ const lastValidKey = new Uint8Array(2046).fill(1);
+ const firstInvalidKey = new Uint8Array(2047).fill(1);
+
+ await db.set([lastValidKey], 1);
+
+ assertEquals(await db.get([lastValidKey]), {
+ key: [lastValidKey],
+ value: 1,
+ versionstamp: "00000000000000010000",
+ });
+
+ await assertRejects(
+ async () => await db.set([firstInvalidKey], 1),
+ TypeError,
+ "key too large for write (max 2048 bytes)",
+ );
+
+ await assertRejects(
+ async () => await db.get([firstInvalidKey]),
+ TypeError,
+ "key too large for read (max 2049 bytes)",
+ );
+});
+
+dbTest("value size limit", async (db) => {
+ const lastValidValue = new Uint8Array(65536);
+ const firstInvalidValue = new Uint8Array(65537);
+
+ await db.set(["a"], lastValidValue);
+ assertEquals(await db.get(["a"]), {
+ key: ["a"],
+ value: lastValidValue,
+ versionstamp: "00000000000000010000",
+ });
+
+ await assertRejects(
+ async () => await db.set(["b"], firstInvalidValue),
+ TypeError,
+ "value too large (max 65536 bytes)",
+ );
+});
+
+dbTest("operation size limit", async (db) => {
+ const lastValidKeys: Deno.KvKey[] = new Array(10).fill(0).map((
+ _,
+ i,
+ ) => ["a", i]);
+ const firstInvalidKeys: Deno.KvKey[] = new Array(11).fill(0).map((
+ _,
+ i,
+ ) => ["a", i]);
+
+ assertEquals((await db.getMany(lastValidKeys)).length, 10);
+
+ await assertRejects(
+ async () => await db.getMany(firstInvalidKeys),
+ TypeError,
+ "too many ranges (max 10)",
+ );
+
+ assertEquals(
+ (await collect(db.list({
+ prefix: ["a"],
+ }, {
+ batchSize: 1000,
+ }))).length,
+ 0,
+ );
+
+ assertRejects(
+ async () =>
+ await collect(db.list({
+ prefix: ["a"],
+ }, {
+ batchSize: 1001,
+ })),
+ TypeError,
+ "too many entries (max 1000)",
+ );
+
+ // when batchSize is not specified, limit is used but is clamped to 500
+ assertEquals(
+ (await collect(db.list({
+ prefix: ["a"],
+ }, {
+ limit: 1001,
+ }))).length,
+ 0,
+ );
+
+ assertEquals(
+ await db.atomic().check(...lastValidKeys.map((key) => ({
+ key,
+ versionstamp: null,
+ }))).mutate(...lastValidKeys.map((key) => ({
+ key,
+ type: "set",
+ value: 1,
+ } satisfies Deno.KvMutation))).commit(),
+ true,
+ );
+
+ await assertRejects(
+ async () =>
+ await db.atomic().check(...firstInvalidKeys.map((key) => ({
+ key,
+ versionstamp: null,
+ }))).mutate(...lastValidKeys.map((key) => ({
+ key,
+ type: "set",
+ value: 1,
+ } satisfies Deno.KvMutation))).commit(),
+ TypeError,
+ "too many checks (max 10)",
+ );
+
+ await assertRejects(
+ async () =>
+ await db.atomic().check(...lastValidKeys.map((key) => ({
+ key,
+ versionstamp: null,
+ }))).mutate(...firstInvalidKeys.map((key) => ({
+ key,
+ type: "set",
+ value: 1,
+ } satisfies Deno.KvMutation))).commit(),
+ TypeError,
+ "too many mutations (max 10)",
+ );
+});