summaryrefslogtreecommitdiff
path: root/ext/http/00_serve.js
diff options
context:
space:
mode:
authorMatt Mastracci <matthew@mastracci.com>2023-04-30 10:50:24 +0200
committerGitHub <noreply@github.com>2023-04-30 08:50:24 +0000
commitbb1f5e4262940a966e6314f57a4267514911d262 (patch)
tree0b5b870e34fca10daf8e664eb4214e5e756daf53 /ext/http/00_serve.js
parent9c8ebce3dcc784f1a6ecd29d5fe0b3d35256ab82 (diff)
perf(core): async op pseudo-codegen and performance work (#18887)
Performance: ``` async_ops.js: 760k -> 1030k (!) async_ops_deferred.js: 730k -> 770k Deno.serve bench: 118k -> 124k WS test w/ third_party/prebuilt/mac/load_test 100 localhost 8000 0 0: unchanged Startup time: approx 0.5ms slower (13.7 -> 14.2ms) ```
Diffstat (limited to 'ext/http/00_serve.js')
-rw-r--r--ext/http/00_serve.js81
1 files changed, 57 insertions, 24 deletions
diff --git a/ext/http/00_serve.js b/ext/http/00_serve.js
index 0b2c60538..33742e122 100644
--- a/ext/http/00_serve.js
+++ b/ext/http/00_serve.js
@@ -1,4 +1,5 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
+// deno-lint-ignore-file camelcase
const core = globalThis.Deno.core;
const primordials = globalThis.__bootstrap.primordials;
const internals = globalThis.__bootstrap.internals;
@@ -46,6 +47,39 @@ const {
Uint8Array,
} = primordials;
+const {
+ op_http_wait,
+ op_upgrade,
+ op_get_request_headers,
+ op_get_request_method_and_url,
+ op_read_request_body,
+ op_serve_http,
+ op_set_promise_complete,
+ op_set_response_body_bytes,
+ op_set_response_body_resource,
+ op_set_response_body_stream,
+ op_set_response_body_text,
+ op_set_response_header,
+ op_set_response_headers,
+ op_upgrade_raw,
+ op_ws_server_create,
+} = Deno.core.generateAsyncOpHandler(
+ "op_http_wait",
+ "op_upgrade",
+ "op_get_request_headers",
+ "op_get_request_method_and_url",
+ "op_read_request_body",
+ "op_serve_http",
+ "op_set_promise_complete",
+ "op_set_response_body_bytes",
+ "op_set_response_body_resource",
+ "op_set_response_body_stream",
+ "op_set_response_body_text",
+ "op_set_response_header",
+ "op_set_response_headers",
+ "op_upgrade_raw",
+ "op_ws_server_create",
+);
const _upgraded = Symbol("_upgraded");
function internalServerError() {
@@ -143,7 +177,7 @@ class InnerRequest {
this.#upgraded = () => {};
- const upgradeRid = core.ops.op_upgrade_raw(slabId);
+ const upgradeRid = op_upgrade_raw(slabId);
const conn = new TcpConn(
upgradeRid,
@@ -174,12 +208,11 @@ class InnerRequest {
(async () => {
try {
// Returns the connection and extra bytes, which we can pass directly to op_ws_server_create
- const upgrade = await core.opAsync2(
- "op_upgrade",
+ const upgrade = await op_upgrade(
slabId,
response.headerList,
);
- const wsRid = core.ops.op_ws_server_create(upgrade[0], upgrade[1]);
+ const wsRid = op_ws_server_create(upgrade[0], upgrade[1]);
// We have to wait for the go-ahead signal
await goAhead;
@@ -214,7 +247,7 @@ class InnerRequest {
}
// TODO(mmastrac): This is quite slow as we're serializing a large number of values. We may want to consider
// splitting this up into multiple ops.
- this.#methodAndUri = core.ops.op_get_request_method_and_url(this.#slabId);
+ this.#methodAndUri = op_get_request_method_and_url(this.#slabId);
}
const path = this.#methodAndUri[2];
@@ -249,7 +282,7 @@ class InnerRequest {
if (this.#slabId === undefined) {
throw new TypeError("request closed");
}
- this.#methodAndUri = core.ops.op_get_request_method_and_url(this.#slabId);
+ this.#methodAndUri = op_get_request_method_and_url(this.#slabId);
}
return {
transport: "tcp",
@@ -263,7 +296,7 @@ class InnerRequest {
if (this.#slabId === undefined) {
throw new TypeError("request closed");
}
- this.#methodAndUri = core.ops.op_get_request_method_and_url(this.#slabId);
+ this.#methodAndUri = op_get_request_method_and_url(this.#slabId);
}
return this.#methodAndUri[0];
}
@@ -281,7 +314,7 @@ class InnerRequest {
this.#body = null;
return null;
}
- this.#streamRid = core.ops.op_read_request_body(this.#slabId);
+ this.#streamRid = op_read_request_body(this.#slabId);
this.#body = new InnerBody(readableStreamForRid(this.#streamRid, false));
return this.#body;
}
@@ -290,7 +323,7 @@ class InnerRequest {
if (this.#slabId === undefined) {
throw new TypeError("request closed");
}
- return core.ops.op_get_request_headers(this.#slabId);
+ return op_get_request_headers(this.#slabId);
}
get slabId() {
@@ -331,12 +364,12 @@ function fastSyncResponseOrStream(req, respBody) {
const body = stream.body;
if (ObjectPrototypeIsPrototypeOf(Uint8ArrayPrototype, body)) {
- core.ops.op_set_response_body_bytes(req, body);
+ op_set_response_body_bytes(req, body);
return null;
}
if (typeof body === "string") {
- core.ops.op_set_response_body_text(req, body);
+ op_set_response_body_text(req, body);
return null;
}
@@ -346,7 +379,7 @@ function fastSyncResponseOrStream(req, respBody) {
}
const resourceBacking = getReadableStreamResourceBacking(stream);
if (resourceBacking) {
- core.ops.op_set_response_body_resource(
+ op_set_response_body_resource(
req,
resourceBacking.rid,
resourceBacking.autoClose,
@@ -382,9 +415,9 @@ async function asyncResponse(responseBodies, req, status, stream) {
// and we race it.
let timeoutPromise;
timeout = setTimeout(() => {
- responseRid = core.ops.op_set_response_body_stream(req);
+ responseRid = op_set_response_body_stream(req);
SetPrototypeAdd(responseBodies, responseRid);
- core.ops.op_set_promise_complete(req, status);
+ op_set_promise_complete(req, status);
timeoutPromise = core.writeAll(responseRid, value1);
}, 250);
const { value: value2, done: done2 } = await reader.read();
@@ -409,13 +442,13 @@ async function asyncResponse(responseBodies, req, status, stream) {
// Reader will be closed by finally block
// No response stream
closed = true;
- core.ops.op_set_response_body_bytes(req, value1);
+ op_set_response_body_bytes(req, value1);
return;
}
- responseRid = core.ops.op_set_response_body_stream(req);
+ responseRid = op_set_response_body_stream(req);
SetPrototypeAdd(responseBodies, responseRid);
- core.ops.op_set_promise_complete(req, status);
+ op_set_promise_complete(req, status);
// Write our first packet
await core.writeAll(responseRid, value1);
}
@@ -447,7 +480,7 @@ async function asyncResponse(responseBodies, req, status, stream) {
core.tryClose(responseRid);
SetPrototypeDelete(responseBodies, responseRid);
} else {
- core.ops.op_set_promise_complete(req, status);
+ op_set_promise_complete(req, status);
}
}
}
@@ -511,9 +544,9 @@ function mapToCallback(responseBodies, context, signal, callback, onError) {
const headers = inner.headerList;
if (headers && headers.length > 0) {
if (headers.length == 1) {
- core.ops.op_set_response_header(req, headers[0][0], headers[0][1]);
+ op_set_response_header(req, headers[0][0], headers[0][1]);
} else {
- core.ops.op_set_response_headers(req, headers);
+ op_set_response_headers(req, headers);
}
}
@@ -523,7 +556,7 @@ function mapToCallback(responseBodies, context, signal, callback, onError) {
// Handle the stream asynchronously
await asyncResponse(responseBodies, req, status, stream);
} else {
- core.ops.op_set_promise_complete(req, status);
+ op_set_promise_complete(req, status);
}
innerRequest?.close();
@@ -591,13 +624,13 @@ async function serve(arg1, arg2) {
listenOpts.alpnProtocols = ["h2", "http/1.1"];
const listener = Deno.listenTls(listenOpts);
listenOpts.port = listener.addr.port;
- context.initialize(core.ops.op_serve_http(
+ context.initialize(op_serve_http(
listener.rid,
));
} else {
const listener = Deno.listen(listenOpts);
listenOpts.port = listener.addr.port;
- context.initialize(core.ops.op_serve_http(
+ context.initialize(op_serve_http(
listener.rid,
));
}
@@ -624,7 +657,7 @@ async function serve(arg1, arg2) {
const rid = context.serverRid;
let req;
try {
- req = await core.opAsync2("op_http_wait", rid);
+ req = await op_http_wait(rid);
} catch (error) {
if (ObjectPrototypeIsPrototypeOf(BadResourcePrototype, error)) {
break;