summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rwxr-xr-xtools/unit_tests.py58
-rw-r--r--tools/util.py22
-rw-r--r--tools/util_test.py32
3 files changed, 9 insertions, 103 deletions
diff --git a/tools/unit_tests.py b/tools/unit_tests.py
index 14284b325..10f6a4a47 100755
--- a/tools/unit_tests.py
+++ b/tools/unit_tests.py
@@ -1,67 +1,27 @@
#!/usr/bin/env python
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
-import util
import sys
import subprocess
-import re
+import http_server
-def run_unit_test2(cmd):
+def unit_tests(deno_exe):
+ cmd = [
+ deno_exe, "run", "--reload", "--allow-run", "js/unit_test_runner.ts"
+ ]
process = subprocess.Popen(
- cmd,
- bufsize=1,
- universal_newlines=True,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- (actual, expected) = util.parse_unit_test_output(process.stdout, True)
- process.wait()
- errcode = process.returncode
- if errcode != 0:
- sys.exit(errcode)
- # To avoid the case where we silently filter out all tests.
- assert expected > 0
- if actual == None and expected == None:
- raise AssertionError("Bad js/unit_test.ts output")
- if expected != actual:
- print "expected", expected, "actual", actual
- raise AssertionError("expected tests did not equal actual")
+ cmd, bufsize=1, universal_newlines=True, stderr=subprocess.STDOUT)
+
process.wait()
errcode = process.returncode
if errcode != 0:
sys.exit(errcode)
-def run_unit_test(deno_exe, permStr, flags=None):
- if flags is None:
- flags = []
- cmd = [deno_exe, "run"] + flags + ["js/unit_tests.ts", permStr]
- run_unit_test2(cmd)
-
-
-# We want to test many ops in deno which have different behavior depending on
-# the permissions set. These tests can specify which permissions they expect,
-# which appends a special string like "permW1N0" to the end of the test name.
-# Here we run several copies of deno with different permissions, filtering the
-# tests by the special string. permW1N0 means allow-write but not allow-net.
-# See js/test_util.ts for more details.
-def unit_tests(deno_exe):
- run_unit_test(deno_exe, "permR0W0N0E0U0H0", ["--reload"])
- run_unit_test(deno_exe, "permR1W0N0E0U0H0", ["--allow-read"])
- run_unit_test(deno_exe, "permR0W1N0E0U0H0", ["--allow-write"])
- run_unit_test(deno_exe, "permR0W0N1E0U0H0", ["--allow-net"])
- run_unit_test(deno_exe, "permR1W1N0E0U0H0",
- ["--allow-read", "--allow-write"])
- run_unit_test(deno_exe, "permR0W0N0E1U0H0", ["--allow-env"])
- run_unit_test(deno_exe, "permR0W0N0E0U0H1", ["--allow-high-precision"])
- run_unit_test(deno_exe, "permR0W0N0E0U1H0", ["--allow-run"])
- run_unit_test(deno_exe, "permR0W1N0E0U1H0",
- ["--allow-run", "--allow-write"])
- # TODO We might accidentally miss some. We should be smarter about which we
- # run. Maybe we can use the "filtered out" number to check this.
-
-
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage ./tools/unit_tests.py target/debug/deno"
sys.exit(1)
+
+ http_server.spawn()
unit_tests(sys.argv[1])
diff --git a/tools/util.py b/tools/util.py
index 5576bef91..007e21ba1 100644
--- a/tools/util.py
+++ b/tools/util.py
@@ -329,28 +329,6 @@ def enable_ansi_colors_win10():
return True
-def parse_unit_test_output(output, print_to_stdout):
- expected = None
- actual = None
- result = None
- for line in iter(output.readline, ''):
- if expected is None:
- # expect "running 30 tests"
- expected = extract_number(r'running (\d+) tests', line)
- elif "test result:" in line:
- result = line
- if print_to_stdout:
- sys.stdout.write(line)
- sys.stdout.flush()
- # Check that the number of expected tests equals what was reported at the
- # bottom.
- if result:
- # result should be a string like this:
- # "test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; ..."
- actual = extract_number(r'(\d+) passed', result)
- return (actual, expected)
-
-
def extract_number(pattern, string):
matches = re.findall(pattern, string)
if len(matches) != 1:
diff --git a/tools/util_test.py b/tools/util_test.py
index e4c8e697b..b5d5dcc98 100644
--- a/tools/util_test.py
+++ b/tools/util_test.py
@@ -48,37 +48,6 @@ def shell_quote_win_test():
'a"b""c\\d\\"e\\\\')
-def parse_unit_test_output_test():
- print "Testing util.parse_unit_test_output()..."
- # This is an example of a successful unit test output.
- output = open(
- os.path.join(util.root_path, "tools/testdata/unit_test_output1.txt"))
- (actual, expected) = util.parse_unit_test_output(output, False)
- assert actual == 96
- assert expected == 96
-
- # This is an example of a silently dying unit test.
- output = open(
- os.path.join(util.root_path, "tools/testdata/unit_test_output2.txt"))
- (actual, expected) = util.parse_unit_test_output(output, False)
- assert actual == None
- assert expected == 96
-
- # This is an example of compiling before successful unit tests.
- output = open(
- os.path.join(util.root_path, "tools/testdata/unit_test_output3.txt"))
- (actual, expected) = util.parse_unit_test_output(output, False)
- assert actual == 96
- assert expected == 96
-
- # Check what happens on empty output.
- from StringIO import StringIO
- output = StringIO("\n\n\n")
- (actual, expected) = util.parse_unit_test_output(output, False)
- assert actual == None
- assert expected == None
-
-
def parse_wrk_output_test():
print "Testing util.parse_wrk_output_test()..."
f = open(os.path.join(util.root_path, "tools/testdata/wrk1.txt"))
@@ -101,7 +70,6 @@ def util_test():
pattern_match_test()
parse_exit_code_test()
shell_quote_win_test()
- parse_unit_test_output_test()
parse_wrk_output_test()