diff options
author | David Sherret <dsherret@users.noreply.github.com> | 2021-10-11 09:45:02 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-10-11 09:45:02 -0400 |
commit | 426ebf854a82c63cdaa2413fbd1b005025dba95b (patch) | |
tree | 316a426e280db29745444e7606952c8c235c846a /cli | |
parent | 668b400ff2fa5634f575e54f40ab1f0b78fcdf16 (diff) |
feat(unstable/test): imperative test steps API (#12190)
Diffstat (limited to 'cli')
-rw-r--r-- | cli/dts/lib.deno.ns.d.ts | 12 | ||||
-rw-r--r-- | cli/dts/lib.deno.unstable.d.ts | 37 | ||||
-rw-r--r-- | cli/tests/integration/test_tests.rs | 36 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/failing_steps.out | 53 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/failing_steps.ts | 27 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/ignored_steps.out | 8 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/ignored_steps.ts | 16 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/invalid_usage.out | 111 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/invalid_usage.ts | 122 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/no_unstable_flag.out | 13 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/no_unstable_flag.ts | 4 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/passing_steps.out | 38 | ||||
-rw-r--r-- | cli/tests/testdata/test/steps/passing_steps.ts | 120 | ||||
-rw-r--r-- | cli/tests/unit/test_util.ts | 2 | ||||
-rw-r--r-- | cli/tests/unit/testing_test.ts | 40 | ||||
-rw-r--r-- | cli/tools/test.rs | 175 |
16 files changed, 803 insertions, 11 deletions
diff --git a/cli/dts/lib.deno.ns.d.ts b/cli/dts/lib.deno.ns.d.ts index 4d3dfe0d3..eb91d6fa4 100644 --- a/cli/dts/lib.deno.ns.d.ts +++ b/cli/dts/lib.deno.ns.d.ts @@ -113,8 +113,12 @@ declare namespace Deno { * See: https://no-color.org/ */ export const noColor: boolean; + /** **UNSTABLE**: New option, yet to be vetted. */ + export interface TestContext { + } + export interface TestDefinition { - fn: () => void | Promise<void>; + fn: (t: TestContext) => void | Promise<void>; name: string; ignore?: boolean; /** If at least one test has `only` set to true, only run tests that have @@ -127,7 +131,6 @@ declare namespace Deno { * after the test has exactly the same contents as before the test. Defaults * to true. */ sanitizeResources?: boolean; - /** Ensure the test case does not prematurely cause the process to exit, * for example via a call to `Deno.exit`. Defaults to true. */ sanitizeExit?: boolean; @@ -184,7 +187,10 @@ declare namespace Deno { * }); * ``` */ - export function test(name: string, fn: () => void | Promise<void>): void; + export function test( + name: string, + fn: (t: TestContext) => void | Promise<void>, + ): void; /** Exit the Deno process with optional exit code. If no exit code is supplied * then Deno will exit with return code of 0. diff --git a/cli/dts/lib.deno.unstable.d.ts b/cli/dts/lib.deno.unstable.d.ts index 73f4bfcb2..3bea165e5 100644 --- a/cli/dts/lib.deno.unstable.d.ts +++ b/cli/dts/lib.deno.unstable.d.ts @@ -948,6 +948,43 @@ declare namespace Deno { }; } + /** **UNSTABLE**: New option, yet to be vetted. */ + export interface TestContext { + /** Run a sub step of the parent test with a given name. Returns a promise + * that resolves to a boolean signifying if the step completed successfully. + * The returned promise never rejects unless the arguments are invalid. + * If the test was ignored, the promise returns `false`. + */ + step(t: TestStepDefinition): Promise<boolean>; + + /** Run a sub step of the parent test with a given name. Returns a promise + * that resolves to a boolean signifying if the step completed successfully. + * The returned promise never rejects unless the arguments are invalid. + * If the test was ignored, the promise returns `false`. + */ + step( + name: string, + fn: (t: TestContext) => void | Promise<void>, + ): Promise<boolean>; + } + + /** **UNSTABLE**: New option, yet to be vetted. */ + export interface TestStepDefinition { + fn: (t: TestContext) => void | Promise<void>; + name: string; + ignore?: boolean; + /** Check that the number of async completed ops after the test is the same + * as number of dispatched ops. Defaults to true. */ + sanitizeOps?: boolean; + /** Ensure the test case does not "leak" resources - ie. the resource table + * after the test has exactly the same contents as before the test. Defaults + * to true. */ + sanitizeResources?: boolean; + /** Ensure the test case does not prematurely cause the process to exit, + * for example via a call to `Deno.exit`. Defaults to true. */ + sanitizeExit?: boolean; + } + /** **UNSTABLE**: new API, yet to be vetted. * * A generic transport listener for message-oriented protocols. */ diff --git a/cli/tests/integration/test_tests.rs b/cli/tests/integration/test_tests.rs index 24ceeefb4..3ea8186b8 100644 --- a/cli/tests/integration/test_tests.rs +++ b/cli/tests/integration/test_tests.rs @@ -186,3 +186,39 @@ itest!(aggregate_error { exit_code: 1, output: "test/aggregate_error.out", }); + +itest!(steps_passing_steps { + args: "test --unstable test/steps/passing_steps.ts", + exit_code: 0, + output: "test/steps/passing_steps.out", +}); + +itest!(steps_passing_steps_concurrent { + args: "test --unstable --jobs=2 test/steps/passing_steps.ts", + exit_code: 0, + output: "test/steps/passing_steps.out", +}); + +itest!(steps_failing_steps { + args: "test --unstable test/steps/failing_steps.ts", + exit_code: 1, + output: "test/steps/failing_steps.out", +}); + +itest!(steps_ignored_steps { + args: "test --unstable test/steps/ignored_steps.ts", + exit_code: 0, + output: "test/steps/ignored_steps.out", +}); + +itest!(steps_invalid_usage { + args: "test --unstable test/steps/invalid_usage.ts", + exit_code: 1, + output: "test/steps/invalid_usage.out", +}); + +itest!(steps_no_unstable_flag { + args: "test test/steps/no_unstable_flag.ts", + exit_code: 1, + output: "test/steps/no_unstable_flag.out", +}); diff --git a/cli/tests/testdata/test/steps/failing_steps.out b/cli/tests/testdata/test/steps/failing_steps.out new file mode 100644 index 000000000..1c5e2e591 --- /dev/null +++ b/cli/tests/testdata/test/steps/failing_steps.out @@ -0,0 +1,53 @@ +[WILDCARD] +running 3 tests from [WILDCARD]/failing_steps.ts +test nested failure ... + test step 1 ... + test inner 1 ... FAILED ([WILDCARD]) + Error: Failed. + at [WILDCARD]/failing_steps.ts:[WILDCARD] + [WILDCARD] + test inner 2 ... ok ([WILDCARD]) + FAILED ([WILDCARD]) +FAILED ([WILDCARD]) +test multiple test step failures ... + test step 1 ... FAILED ([WILDCARD]) + Error: Fail. + [WILDCARD] + test step 2 ... FAILED ([WILDCARD]) + Error: Fail. + at [WILDCARD]/failing_steps.ts:[WILDCARD] + [WILDCARD] +FAILED ([WILDCARD]) +test failing step in failing test ... + test step 1 ... FAILED ([WILDCARD]) + Error: Fail. + at [WILDCARD]/failing_steps.ts:[WILDCARD] + at [WILDCARD] +FAILED ([WILDCARD]) + +failures: + +nested failure +Error: 1 test step failed. + at runTest (deno:runtime/js/40_testing.js:[WILDCARD]) + at async Object.runTests (deno:runtime/js/40_testing.js:[WILDCARD]) + +multiple test step failures +Error: 2 test steps failed. + at runTest (deno:runtime/js/40_testing.js:[WILDCARD]) + at async Object.runTests (deno:runtime/js/40_testing.js:[WILDCARD]) + +failing step in failing test +Error: Fail test. + at [WILDCARD]/failing_steps.ts:[WILDCARD] + at [WILDCARD] + +failures: + + nested failure + multiple test step failures + failing step in failing test + +test result: FAILED. 0 passed; 3 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) + +error: Test failed diff --git a/cli/tests/testdata/test/steps/failing_steps.ts b/cli/tests/testdata/test/steps/failing_steps.ts new file mode 100644 index 000000000..efa18d54e --- /dev/null +++ b/cli/tests/testdata/test/steps/failing_steps.ts @@ -0,0 +1,27 @@ +Deno.test("nested failure", async (t) => { + const success = await t.step("step 1", async (t) => { + let success = await t.step("inner 1", () => { + throw new Error("Failed."); + }); + if (success) throw new Error("Expected failure"); + + success = await t.step("inner 2", () => {}); + if (!success) throw new Error("Expected success"); + }); + + if (success) throw new Error("Expected failure"); +}); + +Deno.test("multiple test step failures", async (t) => { + await t.step("step 1", () => { + throw new Error("Fail."); + }); + await t.step("step 2", () => Promise.reject(new Error("Fail."))); +}); + +Deno.test("failing step in failing test", async (t) => { + await t.step("step 1", () => { + throw new Error("Fail."); + }); + throw new Error("Fail test."); +}); diff --git a/cli/tests/testdata/test/steps/ignored_steps.out b/cli/tests/testdata/test/steps/ignored_steps.out new file mode 100644 index 000000000..c667a3d95 --- /dev/null +++ b/cli/tests/testdata/test/steps/ignored_steps.out @@ -0,0 +1,8 @@ +[WILDCARD] +running 1 test from [WILDCARD]/ignored_steps.ts +test ignored step ... + test step 1 ... ignored ([WILDCARD]) + test step 2 ... ok ([WILDCARD]) +ok ([WILDCARD]) + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD] diff --git a/cli/tests/testdata/test/steps/ignored_steps.ts b/cli/tests/testdata/test/steps/ignored_steps.ts new file mode 100644 index 000000000..102b481fb --- /dev/null +++ b/cli/tests/testdata/test/steps/ignored_steps.ts @@ -0,0 +1,16 @@ +Deno.test("ignored step", async (t) => { + let result = await t.step({ + name: "step 1", + ignore: true, + fn: () => { + throw new Error("Fail."); + }, + }); + if (result !== false) throw new Error("Expected false."); + result = await t.step({ + name: "step 2", + ignore: false, + fn: () => {}, + }); + if (result !== true) throw new Error("Expected true."); +}); diff --git a/cli/tests/testdata/test/steps/invalid_usage.out b/cli/tests/testdata/test/steps/invalid_usage.out new file mode 100644 index 000000000..b03ca57b6 --- /dev/null +++ b/cli/tests/testdata/test/steps/invalid_usage.out @@ -0,0 +1,111 @@ +[WILDCARD] +running 7 tests from [WILDCARD]/invalid_usage.ts +test capturing ... + test some step ... ok ([WILDCARD]) +FAILED ([WILDCARD]) +test top level missing await ... + test step ... pending ([WILDCARD]) +FAILED ([WILDCARD]) +test inner missing await ... + test step ... + test inner ... pending ([WILDCARD]) + Error: Parent scope completed before test step finished execution. Ensure all steps are awaited (ex. `await t.step(...)`). + at postValidation [WILDCARD] + at testStepSanitizer [WILDCARD] + FAILED ([WILDCARD]) + Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`). + at postValidation [WILDCARD] + at testStepSanitizer [WILDCARD] + at async fn ([WILDCARD]/invalid_usage.ts:[WILDCARD]) + at async Object.testStepSanitizer [WILDCARD] +FAILED ([WILDCARD]) +test parallel steps with sanitizers ... + test step 1 ... pending ([WILDCARD]) + test step 2 ... FAILED ([WILDCARD]) + Error: Cannot start test step while another test step with sanitizers is running. + * parallel steps with sanitizers > step 1 + at preValidation ([WILDCARD]) + at testStepSanitizer ([WILDCARD]) + at [WILDCARD]/invalid_usage.ts:[WILDCARD] + at [WILDCARD] +FAILED ([WILDCARD]) +test parallel steps when first has sanitizer ... + test step 1 ... pending ([WILDCARD]) + test step 2 ... FAILED ([WILDCARD]) + Error: Cannot start test step while another test step with sanitizers is running. + * parallel steps when first has sanitizer > step 1 + at preValidation ([WILDCARD]) + at testStepSanitizer ([WILDCARD]) + at [WILDCARD]/invalid_usage.ts:[WILDCARD] + at [WILDCARD] +FAILED ([WILDCARD]) +test parallel steps when second has sanitizer ... + test step 1 ... ok ([WILDCARD]) + test step 2 ... FAILED ([WILDCARD]) + Error: Cannot start test step with sanitizers while another test step is running. + * parallel steps when second has sanitizer > step 1 + at preValidation ([WILDCARD]) + at testStepSanitizer ([WILDCARD]) + at [WILDCARD]/invalid_usage.ts:[WILDCARD] + at [WILDCARD] +FAILED ([WILDCARD]) +test parallel steps where only inner tests have sanitizers ... + test step 1 ... + test step inner ... ok ([WILDCARD]) + ok ([WILDCARD]) + test step 2 ... + test step inner ... FAILED ([WILDCARD]) + Error: Cannot start test step with sanitizers while another test step is running. + * parallel steps where only inner tests have sanitizers > step 1 + at preValidation ([WILDCARD]) + at testStepSanitizer ([WILDCARD]) + at [WILDCARD]/invalid_usage.ts:[WILDCARD] + FAILED ([WILDCARD]) +FAILED ([WILDCARD]) + +failures: + +capturing +Error: Cannot run test step after parent scope has finished execution. Ensure any `.step(...)` calls are executed before their parent scope completes execution. + at TestContext.step ([WILDCARD]) + at [WILDCARD]/invalid_usage.ts:[WILDCARD] + at [WILDCARD] + +top level missing await +Error: There were still test steps running after the current scope finished execution. Ensure all steps are awaited (ex. `await t.step(...)`). + at postValidation [WILDCARD] + at testStepSanitizer ([WILDCARD]) + [WILDCARD] + +inner missing await +Error: 1 test step failed. + at [WILDCARD] + +parallel steps with sanitizers +Error: 1 test step failed. + at runTest ([WILDCARD]) + at [WILDCARD] + +parallel steps when first has sanitizer +Error: 1 test step failed. + at runTest ([WILDCARD]) + at [WILDCARD] + +parallel steps when second has sanitizer +Error: 1 test step failed. + at runTest ([WILDCARD]) + at [WILDCARD] + +failures: + + capturing + top level missing await + inner missing await + parallel steps with sanitizers + parallel steps when first has sanitizer + parallel steps when second has sanitizer + parallel steps where only inner tests have sanitizers + +test result: FAILED. 0 passed; 7 failed; 0 ignored; 0 measured; 0 filtered out ([WILDCARD]) + +error: Test failed diff --git a/cli/tests/testdata/test/steps/invalid_usage.ts b/cli/tests/testdata/test/steps/invalid_usage.ts new file mode 100644 index 000000000..f670c842e --- /dev/null +++ b/cli/tests/testdata/test/steps/invalid_usage.ts @@ -0,0 +1,122 @@ +import { deferred } from "../../../../../test_util/std/async/deferred.ts"; + +Deno.test("capturing", async (t) => { + let capturedContext!: Deno.TestContext; + await t.step("some step", (t) => { + capturedContext = t; + }); + // this should error because the scope of the tester has already completed + await capturedContext.step("next step", () => {}); +}); + +Deno.test("top level missing await", (t) => { + t.step("step", () => { + return new Promise((resolve) => setTimeout(resolve, 10)); + }); +}); + +Deno.test({ + name: "inner missing await", + fn: async (t) => { + await t.step("step", (t) => { + t.step("inner", () => { + return new Promise((resolve) => setTimeout(resolve, 10)); + }); + }); + await new Promise((resolve) => setTimeout(resolve, 10)); + }, + sanitizeResources: false, + sanitizeOps: false, + sanitizeExit: false, +}); + +Deno.test("parallel steps with sanitizers", async (t) => { + // not allowed because steps with sanitizers cannot be run in parallel + const step1Entered = deferred(); + const step2Finished = deferred(); + const step1 = t.step("step 1", async () => { + step1Entered.resolve(); + await step2Finished; + }); + await step1Entered; + await t.step("step 2", () => {}); + step2Finished.resolve(); + await step1; +}); + +Deno.test("parallel steps when first has sanitizer", async (t) => { + const step1Entered = deferred(); + const step2Finished = deferred(); + const step1 = t.step({ + name: "step 1", + fn: async () => { + step1Entered.resolve(); + await step2Finished; + }, + }); + await step1Entered; + await t.step({ + name: "step 2", + fn: () => {}, + sanitizeOps: false, + sanitizeResources: false, + sanitizeExit: false, + }); + step2Finished.resolve(); + await step1; +}); + +Deno.test("parallel steps when second has sanitizer", async (t) => { + const step1Entered = deferred(); + const step2Finished = deferred(); + const step1 = t.step({ + name: "step 1", + fn: async () => { + step1Entered.resolve(); + await step2Finished; + }, + sanitizeOps: false, + sanitizeResources: false, + sanitizeExit: false, + }); + await step1Entered; + await t.step({ + name: "step 2", + fn: async () => { + await new Promise((resolve) => setTimeout(resolve, 100)); + }, + }); + step2Finished.resolve(); + await step1; +}); + +Deno.test({ + name: "parallel steps where only inner tests have sanitizers", + fn: async (t) => { + const step1Entered = deferred(); + const step2Finished = deferred(); + const step1 = t.step("step 1", async (t) => { + await t.step({ + name: "step inner", + fn: async () => { + step1Entered.resolve(); + await step2Finished; + }, + sanitizeOps: true, + }); + }); + await step1Entered; + await t.step("step 2", async (t) => { + await t.step({ + name: "step inner", + fn: () => {}, + sanitizeOps: true, + }); + }); + step2Finished.resolve(); + await step1; + }, + sanitizeResources: false, + sanitizeOps: false, + sanitizeExit: false, +}); diff --git a/cli/tests/testdata/test/steps/no_unstable_flag.out b/cli/tests/testdata/test/steps/no_unstable_flag.out new file mode 100644 index 000000000..8fe6ba4f7 --- /dev/null +++ b/cli/tests/testdata/test/steps/no_unstable_flag.out @@ -0,0 +1,13 @@ +[WILDCARD] +running 1 test from [WILDCARD]/no_unstable_flag.ts +test description ... FAILED ([WILDCARD]) + +failures: + +description +Error: Test steps are unstable. The --unstable flag must be provided. + at [WILDCARD] + +failures: + +[WILDCARD] diff --git a/cli/tests/testdata/test/steps/no_unstable_flag.ts b/cli/tests/testdata/test/steps/no_unstable_flag.ts new file mode 100644 index 000000000..737efba11 --- /dev/null +++ b/cli/tests/testdata/test/steps/no_unstable_flag.ts @@ -0,0 +1,4 @@ +Deno.test("description", async (t) => { + // deno-lint-ignore no-explicit-any + await (t as any).step("step", () => {}); +}); diff --git a/cli/tests/testdata/test/steps/passing_steps.out b/cli/tests/testdata/test/steps/passing_steps.out new file mode 100644 index 000000000..b92327d17 --- /dev/null +++ b/cli/tests/testdata/test/steps/passing_steps.out @@ -0,0 +1,38 @@ +[WILDCARD] +running 5 tests from [WILDCARD] +test description ... + test step 1 ... + test inner 1 ... ok ([WILDCARD]ms) + test inner 2 ... ok ([WILDCARD]ms) + ok ([WILDCARD]ms) +ok ([WILDCARD]ms) +test parallel steps without sanitizers ... + test step 1 ... ok ([WILDCARD]) + test step 2 ... ok ([WILDCARD]) +ok ([WILDCARD]) +test parallel steps without sanitizers due to parent ... + test step 1 ... ok ([WILDCARD]) + test step 2 ... ok ([WILDCARD]) +ok ([WILDCARD]) +test steps with disabled sanitizers, then enabled, then parallel disabled ... + test step 1 ... + test step 1 ... + test step 1 ... + test step 1 ... ok ([WILDCARD]) + test step 1 ... ok ([WILDCARD]) + ok ([WILDCARD]) + test step 2 ... ok ([WILDCARD]) + ok ([WILDCARD]) + ok ([WILDCARD]) +ok ([WILDCARD]) +test steps buffered then streaming reporting ... + test step 1 ... + test step 1 - 1 ... ok ([WILDCARD]) + test step 1 - 2 ... + test step 1 - 2 - 1 ... ok ([WILDCARD]) + ok ([WILDCARD]) + ok ([WILDCARD]) + test step 2 ... ok ([WILDCARD]) +ok ([WILDCARD]) + +test result: ok. 5 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out [WILDCARD] diff --git a/cli/tests/testdata/test/steps/passing_steps.ts b/cli/tests/testdata/test/steps/passing_steps.ts new file mode 100644 index 000000000..fbd52e2d3 --- /dev/null +++ b/cli/tests/testdata/test/steps/passing_steps.ts @@ -0,0 +1,120 @@ +import { deferred } from "../../../../../test_util/std/async/deferred.ts"; + +Deno.test("description", async (t) => { + const success = await t.step("step 1", async (t) => { + await t.step("inner 1", () => {}); + await t.step("inner 2", () => {}); + }); + + if (!success) throw new Error("Expected the step to return true."); +}); + +Deno.test("parallel steps without sanitizers", async (t) => { + // allowed + await Promise.all([ + t.step({ + name: "step 1", + fn: async () => { + await new Promise((resolve) => setTimeout(resolve, 10)); + }, + sanitizeOps: false, + sanitizeResources: false, + sanitizeExit: false, + }), + t.step({ + name: "step 2", + fn: async () => { + await new Promise((resolve) => setTimeout(resolve, 10)); + }, + sanitizeOps: false, + sanitizeResources: false, + sanitizeExit: false, + }), + ]); +}); + +Deno.test({ + name: "parallel steps without sanitizers due to parent", + fn: async (t) => { + // allowed because parent disabled the sanitizers + await Promise.all([ + t.step("step 1", async () => { + await new Promise((resolve) => setTimeout(resolve, 10)); + }), + t.step("step 2", async () => { + await new Promise((resolve) => setTimeout(resolve, 10)); + }), + ]); + }, + sanitizeResources: false, + sanitizeOps: false, + sanitizeExit: false, +}); + +Deno.test({ + name: "steps with disabled sanitizers, then enabled, then parallel disabled", + fn: async (t) => { + await t.step("step 1", async (t) => { + await t.step({ + name: "step 1", + fn: async (t) => { + await Promise.all([ + t.step({ + name: "step 1", + fn: async (t) => { + await new Promise((resolve) => setTimeout(resolve, 10)); + await Promise.all([ + t.step("step 1", () => {}), + t.step("step 1", () => {}), + ]); + }, + sanitizeExit: false, + sanitizeResources: false, + sanitizeOps: false, + }), + t.step({ + name: "step 2", + fn: () => {}, + sanitizeResources: false, + sanitizeOps: false, + sanitizeExit: false, + }), + ]); + }, + sanitizeResources: true, + sanitizeOps: true, + sanitizeExit: true, + }); + }); + }, + sanitizeResources: false, + sanitizeOps: false, + sanitizeExit: false, +}); + +Deno.test("steps buffered then streaming reporting", async (t) => { + // no sanitizers so this will be buffered + await t.step({ + name: "step 1", + fn: async (t) => { + // also ensure the buffered tests display in order regardless of the second one finishing first + const step2Finished = deferred(); + const step1 = t.step("step 1 - 1", async () => { + await step2Finished; + }); + const step2 = t.step("step 1 - 2", async (t) => { + await t.step("step 1 - 2 - 1", () => {}); + }); + await step2; + step2Finished.resolve(); + await step1; + }, + sanitizeResources: false, + sanitizeOps: false, + sanitizeExit: false, + }); + + // now this will start streaming and we want to + // ensure it flushes the buffer of the last test + await t.step("step 2", async () => {}); +}); diff --git a/cli/tests/unit/test_util.ts b/cli/tests/unit/test_util.ts index ee924fe8a..65d23af65 100644 --- a/cli/tests/unit/test_util.ts +++ b/cli/tests/unit/test_util.ts @@ -39,7 +39,7 @@ interface UnitTestOptions { permissions?: UnitTestPermissions; } -type TestFunction = () => void | Promise<void>; +type TestFunction = (tester: Deno.TestContext) => void | Promise<void>; export function unitTest(fn: TestFunction): void; export function unitTest(options: UnitTestOptions, fn: TestFunction): void; diff --git a/cli/tests/unit/testing_test.ts b/cli/tests/unit/testing_test.ts index 89b3cc31f..144246002 100644 --- a/cli/tests/unit/testing_test.ts +++ b/cli/tests/unit/testing_test.ts @@ -1,5 +1,5 @@ // Copyright 2018-2021 the Deno authors. All rights reserved. MIT license. -import { assertThrows, unitTest } from "./test_util.ts"; +import { assertRejects, assertThrows, unitTest } from "./test_util.ts"; unitTest(function testFnOverloading() { // just verifying that you can use this test definition syntax @@ -25,3 +25,41 @@ unitTest(function nameOfTestCaseCantBeEmpty() { "The test name can't be empty", ); }); + +unitTest(function invalidStepArguments(t) { + assertRejects( + async () => { + // deno-lint-ignore no-explicit-any + await (t as any).step("test"); + }, + TypeError, + "Expected function for second argument.", + ); + + assertRejects( + async () => { + // deno-lint-ignore no-explicit-any + await (t as any).step("test", "not a function"); + }, + TypeError, + "Expected function for second argument.", + ); + + assertRejects( + async () => { + // deno-lint-ignore no-explicit-any + await (t as any).step(); + }, + TypeError, + "Expected a test definition or name and function.", + ); + + assertRejects( + async () => { + // deno-lint-ignore no-explicit-any + await (t as any).step(() => {}); + }, + TypeError, + "Expected a test definition or name and function.", + ); +}); diff --git a/cli/tools/test.rs b/cli/tools/test.rs index e14b5cc8b..aec6e6856 100644 --- a/cli/tools/test.rs +++ b/cli/tools/test.rs @@ -39,7 +39,9 @@ use rand::seq::SliceRandom; use rand::SeedableRng; use regex::Regex; use serde::Deserialize; +use std::collections::HashMap; use std::collections::HashSet; +use std::io::Write; use std::num::NonZeroUsize; use std::path::PathBuf; use std::sync::mpsc::channel; @@ -60,7 +62,7 @@ enum TestMode { Both, } -#[derive(Debug, Clone, PartialEq, Deserialize)] +#[derive(Debug, Clone, PartialEq, Deserialize, Eq, Hash)] #[serde(rename_all = "camelCase")] pub struct TestDescription { pub origin: String, @@ -84,6 +86,33 @@ pub enum TestResult { #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(rename_all = "camelCase")] +pub struct TestStepDescription { + pub test: TestDescription, + pub level: usize, + pub name: String, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum TestStepResult { + Ok, + Ignored, + Failed(Option<String>), + Pending(Option<String>), +} + +impl TestStepResult { + fn error(&self) -> Option<&str> { + match self { + TestStepResult::Failed(Some(text)) => Some(text.as_str()), + TestStepResult::Pending(Some(text)) => Some(text.as_str()), + _ => None, + } + } +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +#[serde(rename_all = "camelCase")] pub struct TestPlan { pub origin: String, pub total: usize, @@ -98,6 +127,8 @@ pub enum TestEvent { Wait(TestDescription), Output(TestOutput), Result(TestDescription, TestResult, u64), + StepWait(TestStepDescription), + StepResult(TestStepDescription, TestStepResult, u64), } #[derive(Debug, Clone, Deserialize)] @@ -143,12 +174,26 @@ trait TestReporter { result: &TestResult, elapsed: u64, ); + fn report_step_wait(&mut self, description: &TestStepDescription); + fn report_step_result( + &mut self, + description: &TestStepDescription, + result: &TestStepResult, + elapsed: u64, + ); fn report_summary(&mut self, summary: &TestSummary, elapsed: &Duration); } +enum DeferredStepOutput { + StepWait(TestStepDescription), + StepResult(TestStepDescription, TestStepResult, u64), +} + struct PrettyTestReporter { concurrent: bool, echo_output: bool, + deferred_step_output: HashMap<TestDescription, Vec<DeferredStepOutput>>, + last_wait_output_level: usize, } impl PrettyTestReporter { @@ -156,6 +201,61 @@ impl PrettyTestReporter { PrettyTestReporter { concurrent, echo_output, + deferred_step_output: HashMap::new(), + last_wait_output_level: 0, + } + } + + fn force_report_wait(&mut self, description: &TestDescription) { + print!("test {} ...", description.name); + // flush for faster feedback when line buffered + std::io::stdout().flush().unwrap(); + self.last_wait_output_level = 0; + } + + fn force_report_step_wait(&mut self, description: &TestStepDescription) { + if self.last_wait_output_level < description.level { + println!(); + } + print!( + "{}test {} ...", + " ".repeat(description.level), + description.name + ); + // flush for faster feedback when line buffered + std::io::stdout().flush().unwrap(); + self.last_wait_output_level = description.level; + } + + fn force_report_step_result( + &mut self, + description: &TestStepDescription, + result: &TestStepResult, + elapsed: u64, + ) { + let status = match result { + TestStepResult::Ok => colors::green("ok").to_string(), + TestStepResult::Ignored => colors::yellow("ignored").to_string(), + TestStepResult::Pending(_) => colors::gray("pending").to_string(), + TestStepResult::Failed(_) => colors::red("FAILED").to_string(), + }; + + if self.last_wait_output_level == description.level { + print!(" "); + } else { + print!("{}", " ".repeat(description.level)); + } + + println!( + "{} {}", + status, + colors::gray(format!("({}ms)", elapsed)).to_string() + ); + + if let Some(error_text) = result.error() { + for line in error_text.lines() { + println!("{}{}", " ".repeat(description.level + 1), line); + } } } } @@ -168,7 +268,7 @@ impl TestReporter for PrettyTestReporter { fn report_wait(&mut self, description: &TestDescription) { if !self.concurrent { - print!("test {} ...", description.name); + self.force_report_wait(description); } } @@ -187,7 +287,27 @@ impl TestReporter for PrettyTestReporter { elapsed: u64, ) { if self.concurrent { - print!("test {} ...", description.name); + self.force_report_wait(description); + + if let Some(step_outputs) = self.deferred_step_output.remove(description) + { + for step_output in step_outputs { + match step_output { + DeferredStepOutput::StepWait(description) => { + self.force_report_step_wait(&description) + } + DeferredStepOutput::StepResult( + step_description, + step_result, + elapsed, + ) => self.force_report_step_result( + &step_description, + &step_result, + elapsed, + ), + } + } + } } let status = match result { @@ -196,13 +316,50 @@ impl TestReporter for PrettyTestReporter { TestResult::Failed(_) => colors::red("FAILED").to_string(), }; + if self.last_wait_output_level == 0 { + print!(" "); + } + println!( - " {} {}", + "{} {}", status, colors::gray(format!("({}ms)", elapsed)).to_string() ); } + fn report_step_wait(&mut self, description: &TestStepDescription) { + if self.concurrent { + self + .deferred_step_output + .entry(description.test.to_owned()) + .or_insert_with(Vec::new) + .push(DeferredStepOutput::StepWait(description.clone())); + } else { + self.force_report_step_wait(description); + } + } + + fn report_step_result( + &mut self, + description: &TestStepDescription, + result: &TestStepResult, + elapsed: u64, + ) { + if self.concurrent { + self + .deferred_step_output + .entry(description.test.to_owned()) + .or_insert_with(Vec::new) + .push(DeferredStepOutput::StepResult( + description.clone(), + result.clone(), + elapsed, + )); + } else { + self.force_report_step_result(description, result, elapsed); + } + } + fn report_summary(&mut self, summary: &TestSummary, elapsed: &Duration) { if !summary.failures.is_empty() { println!("\nfailures:\n"); @@ -650,11 +807,9 @@ async fn test_specifiers( TestResult::Ok => { summary.passed += 1; } - TestResult::Ignored => { summary.ignored += 1; } - TestResult::Failed(error) => { summary.failed += 1; summary.failures.push((description.clone(), error.clone())); @@ -663,6 +818,14 @@ async fn test_specifiers( reporter.report_result(&description, &result, elapsed); } + + TestEvent::StepWait(description) => { + reporter.report_step_wait(&description); + } + + TestEvent::StepResult(description, result, duration) => { + reporter.report_step_result(&description, &result, duration); + } } if let Some(x) = fail_fast { |