summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBernard Lin <bernardlin12@gmail.com>2019-03-24 23:36:27 -0400
committerRyan Dahl <ry@tinyclouds.org>2019-03-24 23:36:27 -0400
commit3cc90d9bcf4be58bf88b433ae410f42fa4ad69c7 (patch)
tree8e92d0266bd71d5c92c2352e943f5b78ef19a097
parent129eae0265e3bd93c26c272d253ae9cbc6b88991 (diff)
Add benchmark for max latency (#1975)
-rwxr-xr-xtools/benchmark.py12
-rwxr-xr-xtools/http_benchmark.py24
-rw-r--r--tools/testdata/wrk2.txt8
-rw-r--r--tools/testdata/wrk3.txt8
-rw-r--r--tools/util.py28
-rw-r--r--tools/util_test.py15
-rw-r--r--website/app.js6
-rw-r--r--website/benchmarks.html8
8 files changed, 90 insertions, 19 deletions
diff --git a/tools/benchmark.py b/tools/benchmark.py
index 53037b1e7..6a4ec29c8 100755
--- a/tools/benchmark.py
+++ b/tools/benchmark.py
@@ -206,8 +206,16 @@ def main(argv):
hyper_hello_path = os.path.join(build_dir, "hyper_hello")
core_http_bench_exe = os.path.join(build_dir, "deno_core_http_bench")
new_data["throughput"] = run_throughput(deno_path)
- new_data["req_per_sec"] = http_benchmark(deno_path, hyper_hello_path,
- core_http_bench_exe)
+ stats = http_benchmark(deno_path, hyper_hello_path,
+ core_http_bench_exe)
+ new_data["req_per_sec"] = {
+ k: v["req_per_sec"]
+ for k, v in stats.items()
+ }
+ new_data["max_latency"] = {
+ k: v["max_latency"]
+ for k, v in stats.items()
+ }
if "linux" in sys.platform:
# Thread count test, only on linux
new_data["thread_count"] = run_thread_count_benchmark(deno_path)
diff --git a/tools/http_benchmark.py b/tools/http_benchmark.py
index 2df5fcb89..bd43a5ec4 100755
--- a/tools/http_benchmark.py
+++ b/tools/http_benchmark.py
@@ -59,16 +59,18 @@ def hyper_http_benchmark(hyper_hello_exe):
def http_benchmark(deno_exe, hyper_hello_exe, core_http_bench_exe):
- r = {}
+
# TODO Rename to "deno_tcp"
- r["deno"] = deno_http_benchmark(deno_exe)
- r["deno_net_http"] = deno_net_http_benchmark(deno_exe)
- r["deno_core_single"] = deno_core_single(core_http_bench_exe)
- r["deno_core_multi"] = deno_core_multi(core_http_bench_exe)
- r["node"] = node_http_benchmark()
- r["node_tcp"] = node_tcp_benchmark()
- r["hyper"] = hyper_http_benchmark(hyper_hello_exe)
- return r
+
+ return {
+ "deno": deno_http_benchmark(deno_exe),
+ "deno_net_http": deno_net_http_benchmark(deno_exe),
+ "deno_core_single": deno_core_single(core_http_bench_exe),
+ "deno_core_multi": deno_core_multi(core_http_bench_exe),
+ "node": node_http_benchmark(),
+ "node_tcp": node_tcp_benchmark(),
+ "hyper": hyper_http_benchmark(hyper_hello_exe)
+ }
def run(server_cmd, merge_env=None):
@@ -93,9 +95,9 @@ def run(server_cmd, merge_env=None):
DURATION, ADDR)
print cmd
output = subprocess.check_output(cmd, shell=True)
- req_per_sec = util.parse_wrk_output(output)
+ stats = util.parse_wrk_output(output)
print output
- return req_per_sec
+ return stats
finally:
server.kill()
diff --git a/tools/testdata/wrk2.txt b/tools/testdata/wrk2.txt
new file mode 100644
index 000000000..3be41437c
--- /dev/null
+++ b/tools/testdata/wrk2.txt
@@ -0,0 +1,8 @@
+Running 10s test @ http://127.0.0.1:4544/
+ 2 threads and 10 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 402.90us 1.15ms 1.25us 94.86%
+ Req/Sec 26.86k 2.01k 31.81k 78.71%
+ 539721 requests in 10.10s, 26.25MB read
+Requests/sec: 53435.75
+Transfer/sec: 2.60MB \ No newline at end of file
diff --git a/tools/testdata/wrk3.txt b/tools/testdata/wrk3.txt
new file mode 100644
index 000000000..71150f9f3
--- /dev/null
+++ b/tools/testdata/wrk3.txt
@@ -0,0 +1,8 @@
+Running 10s test @ http://127.0.0.1:4544/
+ 2 threads and 10 connections
+ Thread Stats Avg Stdev Max +/- Stdev
+ Latency 26.55ms 152.26ms 1.63s 97.45%
+ Req/Sec 48.26k 3.13k 61.41k 93.00%
+ 960491 requests in 10.00s, 80.61MB read
+Requests/sec: 96037.58
+Transfer/sec: 8.06MB \ No newline at end of file
diff --git a/tools/util.py b/tools/util.py
index fc307512d..5576bef91 100644
--- a/tools/util.py
+++ b/tools/util.py
@@ -358,12 +358,32 @@ def extract_number(pattern, string):
return int(matches[0])
+def extract_max_latency_in_milliseconds(pattern, string):
+ matches = re.findall(pattern, string)
+ if len(matches) != 1:
+ return None
+ num = float(matches[0][0])
+ unit = matches[0][1]
+ if (unit == 'ms'):
+ return num
+ elif (unit == 'us'):
+ return num / 1000
+ elif (unit == 's'):
+ return num * 1000
+
+
def parse_wrk_output(output):
- req_per_sec = None
+ stats = {}
+ stats['req_per_sec'] = None
+ stats['max_latency'] = None
for line in output.split("\n"):
- if req_per_sec is None:
- req_per_sec = extract_number(r'Requests/sec:\s+(\d+)', line)
- return req_per_sec
+ if stats['req_per_sec'] is None:
+ stats['req_per_sec'] = extract_number(r'Requests/sec:\s+(\d+)',
+ line)
+ if stats['max_latency'] is None:
+ stats['max_latency'] = extract_max_latency_in_milliseconds(
+ r'Latency(?:\s+(\d+.\d+)([a-z]+)){3}', line)
+ return stats
def platform():
diff --git a/tools/util_test.py b/tools/util_test.py
index b7c054b92..e4c8e697b 100644
--- a/tools/util_test.py
+++ b/tools/util_test.py
@@ -82,8 +82,19 @@ def parse_unit_test_output_test():
def parse_wrk_output_test():
print "Testing util.parse_wrk_output_test()..."
f = open(os.path.join(util.root_path, "tools/testdata/wrk1.txt"))
- req_per_sec = util.parse_wrk_output(f.read())
- assert req_per_sec == 1837
+ stats = util.parse_wrk_output(f.read())
+ assert stats['req_per_sec'] == 1837
+ assert stats['max_latency'] == 34.96
+
+ f2 = open(os.path.join(util.root_path, "tools/testdata/wrk2.txt"))
+ stats2 = util.parse_wrk_output(f2.read())
+ assert stats2['req_per_sec'] == 53435
+ assert stats2['max_latency'] == 0.00125
+
+ f3 = open(os.path.join(util.root_path, "tools/testdata/wrk3.txt"))
+ stats3 = util.parse_wrk_output(f3.read())
+ assert stats3['req_per_sec'] == 96037
+ assert stats3['max_latency'] == 1630.0
def util_test():
diff --git a/website/app.js b/website/app.js
index 6e23befa9..c012dfb4a 100644
--- a/website/app.js
+++ b/website/app.js
@@ -46,6 +46,10 @@ export function createReqPerSecColumns(data) {
return createColumns(data, "req_per_sec");
}
+export function createMaxLatencyColumns(data) {
+ return createColumns(data, "max_latency");
+}
+
export function createBinarySizeColumns(data) {
const propName = "binary_size";
const binarySizeNames = Object.keys(data[data.length - 1][propName]);
@@ -198,6 +202,7 @@ export async function drawChartsFromBenchmarkData(dataUrl) {
const execTimeColumns = createExecTimeColumns(data);
const throughputColumns = createThroughputColumns(data);
const reqPerSecColumns = createReqPerSecColumns(data);
+ const maxLatencyColumns = createMaxLatencyColumns(data);
const binarySizeColumns = createBinarySizeColumns(data);
const threadCountColumns = createThreadCountColumns(data);
const syscallCountColumns = createSyscallCountColumns(data);
@@ -225,6 +230,7 @@ export async function drawChartsFromBenchmarkData(dataUrl) {
gen("#exec-time-chart", execTimeColumns, "seconds", logScale);
gen("#throughput-chart", throughputColumns, "seconds", logScale);
gen("#req-per-sec-chart", reqPerSecColumns, "1000 req/sec", formatReqSec);
+ gen("#max-latency-chart", maxLatencyColumns, "milliseconds", logScale);
gen("#binary-size-chart", binarySizeColumns, "megabytes", formatMB);
gen("#thread-count-chart", threadCountColumns, "threads");
gen("#syscall-count-chart", syscallCountColumns, "syscalls");
diff --git a/website/benchmarks.html b/website/benchmarks.html
index 3fd4765d8..4d6d543dc 100644
--- a/website/benchmarks.html
+++ b/website/benchmarks.html
@@ -110,6 +110,14 @@
<div id="req-per-sec-chart"></div>
+ <h3 id="max-latency">Max Latency <a href="#max-latency">#</a></h3>
+
+ <p>
+ Max latency during the same test used above for requests/second. Smaller is better.
+ </p>
+
+ <div id="max-latency-chart"></div>
+
<h3 id="size">Executable size <a href="#size">#</a></h3>
<p>deno ships only a single binary. We track its size here.</p>
<div id="binary-size-chart"></div>