diff options
Diffstat (limited to 'core/examples')
-rw-r--r-- | core/examples/http_bench.js | 139 | ||||
-rw-r--r-- | core/examples/http_bench.rs | 302 |
2 files changed, 441 insertions, 0 deletions
diff --git a/core/examples/http_bench.js b/core/examples/http_bench.js new file mode 100644 index 000000000..a7142b09d --- /dev/null +++ b/core/examples/http_bench.js @@ -0,0 +1,139 @@ +// This is not a real HTTP server. We read blindly one time into 'requestBuf', +// then write this fixed 'responseBuf'. The point of this benchmark is to +// exercise the event loop in a simple yet semi-realistic way. +const requestBuf = new Uint8Array(64 * 1024); +const responseBuf = new Uint8Array( + "HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World\n" + .split("") + .map(c => c.charCodeAt(0)) +); +const promiseMap = new Map(); +let nextPromiseId = 1; + +function assert(cond) { + if (!cond) { + throw Error("assert"); + } +} + +function createResolvable() { + let methods; + const promise = new Promise((resolve, reject) => { + methods = { resolve, reject }; + }); + return Object.assign(promise, methods); +} + +const scratch32 = new Int32Array(3); +const scratchBytes = new Uint8Array( + scratch32.buffer, + scratch32.byteOffset, + scratch32.byteLength +); +assert(scratchBytes.byteLength === 3 * 4); + +function send(promiseId, opId, arg, zeroCopy = null) { + scratch32[0] = promiseId; + scratch32[1] = arg; + scratch32[2] = -1; + return Deno.core.dispatch(opId, scratchBytes, zeroCopy); +} + +/** Returns Promise<number> */ +function sendAsync(opId, arg, zeroCopy = null) { + const promiseId = nextPromiseId++; + const p = createResolvable(); + promiseMap.set(promiseId, p); + send(promiseId, opId, arg, zeroCopy); + return p; +} + +function recordFromBuf(buf) { + assert(buf.byteLength === 3 * 4); + const buf32 = new Int32Array(buf.buffer, buf.byteOffset, buf.byteLength / 4); + return { + promiseId: buf32[0], + arg: buf32[1], + result: buf32[2] + }; +} + +/** Returns i32 number */ +function sendSync(opId, arg) { + const buf = send(0, opId, arg); + const record = recordFromBuf(buf); + return record.result; +} + +function handleAsyncMsgFromRust(opId, buf) { + const record = recordFromBuf(buf); + const { promiseId, result } = record; + const p = promiseMap.get(promiseId); + promiseMap.delete(promiseId); + p.resolve(result); +} + +/** Listens on 0.0.0.0:4500, returns rid. */ +function listen() { + return sendSync(ops["listen"], -1); +} + +/** Accepts a connection, returns rid. */ +async function accept(rid) { + return await sendAsync(ops["accept"], rid); +} + +/** + * Reads a packet from the rid, presumably an http request. data is ignored. + * Returns bytes read. + */ +async function read(rid, data) { + return await sendAsync(ops["read"], rid, data); +} + +/** Writes a fixed HTTP response to the socket rid. Returns bytes written. */ +async function write(rid, data) { + return await sendAsync(ops["write"], rid, data); +} + +function close(rid) { + return sendSync(ops["close"], rid); +} + +async function serve(rid) { + while (true) { + const nread = await read(rid, requestBuf); + if (nread <= 0) { + break; + } + + const nwritten = await write(rid, responseBuf); + if (nwritten < 0) { + break; + } + } + close(rid); +} + +let ops; + +async function main() { + Deno.core.setAsyncHandler(handleAsyncMsgFromRust); + ops = Deno.core.ops(); + + Deno.core.print("http_bench.js start\n"); + + const listenerRid = listen(); + Deno.core.print(`listening http://127.0.0.1:4544/ rid = ${listenerRid}\n`); + while (true) { + const rid = await accept(listenerRid); + // Deno.core.print(`accepted ${rid}`); + if (rid < 0) { + Deno.core.print(`accept error ${rid}`); + return; + } + serve(rid); + } +} + +main(); diff --git a/core/examples/http_bench.rs b/core/examples/http_bench.rs new file mode 100644 index 000000000..c019d8a11 --- /dev/null +++ b/core/examples/http_bench.rs @@ -0,0 +1,302 @@ +/// To run this benchmark: +/// +/// > DENO_BUILD_MODE=release ./tools/build.py && \ +/// ./target/release/deno_core_http_bench --multi-thread +extern crate deno; +extern crate futures; +extern crate libc; +extern crate tokio; + +#[macro_use] +extern crate log; +#[macro_use] +extern crate lazy_static; + +use deno::*; +use futures::future::lazy; +use std::collections::HashMap; +use std::env; +use std::net::SocketAddr; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::sync::Mutex; +use tokio::prelude::*; + +static LOGGER: Logger = Logger; +struct Logger; +impl log::Log for Logger { + fn enabled(&self, metadata: &log::Metadata) -> bool { + metadata.level() <= log::max_level() + } + fn log(&self, record: &log::Record) { + if self.enabled(record.metadata()) { + println!("{} - {}", record.level(), record.args()); + } + } + fn flush(&self) {} +} + +#[derive(Clone, Debug, PartialEq)] +pub struct Record { + pub promise_id: i32, + pub arg: i32, + pub result: i32, +} + +impl Into<Buf> for Record { + fn into(self) -> Buf { + let buf32 = vec![self.promise_id, self.arg, self.result].into_boxed_slice(); + let ptr = Box::into_raw(buf32) as *mut [u8; 3 * 4]; + unsafe { Box::from_raw(ptr) } + } +} + +impl From<&[u8]> for Record { + fn from(s: &[u8]) -> Record { + #[allow(clippy::cast_ptr_alignment)] + let ptr = s.as_ptr() as *const i32; + let ints = unsafe { std::slice::from_raw_parts(ptr, 3) }; + Record { + promise_id: ints[0], + arg: ints[1], + result: ints[2], + } + } +} + +impl From<Buf> for Record { + fn from(buf: Buf) -> Record { + assert_eq!(buf.len(), 3 * 4); + #[allow(clippy::cast_ptr_alignment)] + let ptr = Box::into_raw(buf) as *mut [i32; 3]; + let ints: Box<[i32]> = unsafe { Box::from_raw(ptr) }; + assert_eq!(ints.len(), 3); + Record { + promise_id: ints[0], + arg: ints[1], + result: ints[2], + } + } +} + +#[test] +fn test_record_from() { + let r = Record { + promise_id: 1, + arg: 3, + result: 4, + }; + let expected = r.clone(); + let buf: Buf = r.into(); + #[cfg(target_endian = "little")] + assert_eq!( + buf, + vec![1u8, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0].into_boxed_slice() + ); + let actual = Record::from(buf); + assert_eq!(actual, expected); + // TODO test From<&[u8]> for Record +} + +pub type HttpOp = dyn Future<Item = i32, Error = std::io::Error> + Send; + +pub type HttpOpHandler = + fn(record: Record, zero_copy_buf: Option<PinnedBuf>) -> Box<HttpOp>; + +fn http_op( + handler: HttpOpHandler, +) -> impl Fn(&[u8], Option<PinnedBuf>) -> CoreOp { + move |control: &[u8], zero_copy_buf: Option<PinnedBuf>| -> CoreOp { + let record = Record::from(control); + let is_sync = record.promise_id == 0; + let op = handler(record.clone(), zero_copy_buf); + + let mut record_a = record.clone(); + let mut record_b = record.clone(); + + let fut = Box::new( + op.and_then(move |result| { + record_a.result = result; + Ok(record_a) + }) + .or_else(|err| -> Result<Record, ()> { + eprintln!("unexpected err {}", err); + record_b.result = -1; + Ok(record_b) + }) + .then(|result| -> Result<Buf, ()> { + let record = result.unwrap(); + Ok(record.into()) + }), + ); + + if is_sync { + Op::Sync(fut.wait().unwrap()) + } else { + Op::Async(fut) + } + } +} + +fn main() { + let main_future = lazy(move || { + // TODO currently isolate.execute() must be run inside tokio, hence the + // lazy(). It would be nice to not have that contraint. Probably requires + // using v8::MicrotasksPolicy::kExplicit + + let js_source = include_str!("http_bench.js"); + + let startup_data = StartupData::Script(Script { + source: js_source, + filename: "http_bench.js", + }); + + let mut isolate = deno::Isolate::new(startup_data, false); + isolate.register_op("listen", http_op(op_listen)); + isolate.register_op("accept", http_op(op_accept)); + isolate.register_op("read", http_op(op_read)); + isolate.register_op("write", http_op(op_write)); + isolate.register_op("close", http_op(op_close)); + + isolate.then(|r| { + js_check(r); + Ok(()) + }) + }); + + let args: Vec<String> = env::args().collect(); + // NOTE: `--help` arg will display V8 help and exit + let args = deno::v8_set_flags(args); + + log::set_logger(&LOGGER).unwrap(); + log::set_max_level(if args.iter().any(|a| a == "-D") { + log::LevelFilter::Debug + } else { + log::LevelFilter::Warn + }); + + if args.iter().any(|a| a == "--multi-thread") { + println!("multi-thread"); + tokio::run(main_future); + } else { + println!("single-thread"); + tokio::runtime::current_thread::run(main_future); + } +} + +enum Repr { + TcpListener(tokio::net::TcpListener), + TcpStream(tokio::net::TcpStream), +} + +type ResourceTable = HashMap<i32, Repr>; +lazy_static! { + static ref RESOURCE_TABLE: Mutex<ResourceTable> = Mutex::new(HashMap::new()); + static ref NEXT_RID: AtomicUsize = AtomicUsize::new(3); +} + +fn new_rid() -> i32 { + let rid = NEXT_RID.fetch_add(1, Ordering::SeqCst); + rid as i32 +} + +fn op_accept(record: Record, _zero_copy_buf: Option<PinnedBuf>) -> Box<HttpOp> { + let listener_rid = record.arg; + debug!("accept {}", listener_rid); + Box::new( + futures::future::poll_fn(move || { + let mut table = RESOURCE_TABLE.lock().unwrap(); + let maybe_repr = table.get_mut(&listener_rid); + match maybe_repr { + Some(Repr::TcpListener(ref mut listener)) => listener.poll_accept(), + _ => panic!("bad rid {}", listener_rid), + } + }) + .and_then(move |(stream, addr)| { + debug!("accept success {}", addr); + let rid = new_rid(); + + let mut guard = RESOURCE_TABLE.lock().unwrap(); + guard.insert(rid, Repr::TcpStream(stream)); + + Ok(rid as i32) + }), + ) +} + +fn op_listen( + _record: Record, + _zero_copy_buf: Option<PinnedBuf>, +) -> Box<HttpOp> { + debug!("listen"); + Box::new(lazy(move || { + let addr = "127.0.0.1:4544".parse::<SocketAddr>().unwrap(); + let listener = tokio::net::TcpListener::bind(&addr).unwrap(); + let rid = new_rid(); + + let mut guard = RESOURCE_TABLE.lock().unwrap(); + guard.insert(rid, Repr::TcpListener(listener)); + futures::future::ok(rid) + })) +} + +fn op_close(record: Record, _zero_copy_buf: Option<PinnedBuf>) -> Box<HttpOp> { + debug!("close"); + let rid = record.arg; + Box::new(lazy(move || { + let mut table = RESOURCE_TABLE.lock().unwrap(); + let r = table.remove(&rid); + let result = if r.is_some() { 0 } else { -1 }; + futures::future::ok(result) + })) +} + +fn op_read(record: Record, zero_copy_buf: Option<PinnedBuf>) -> Box<HttpOp> { + let rid = record.arg; + debug!("read rid={}", rid); + let mut zero_copy_buf = zero_copy_buf.unwrap(); + Box::new( + futures::future::poll_fn(move || { + let mut table = RESOURCE_TABLE.lock().unwrap(); + let maybe_repr = table.get_mut(&rid); + match maybe_repr { + Some(Repr::TcpStream(ref mut stream)) => { + stream.poll_read(&mut zero_copy_buf) + } + _ => panic!("bad rid"), + } + }) + .and_then(move |nread| { + debug!("read success {}", nread); + Ok(nread as i32) + }), + ) +} + +fn op_write(record: Record, zero_copy_buf: Option<PinnedBuf>) -> Box<HttpOp> { + let rid = record.arg; + debug!("write rid={}", rid); + let zero_copy_buf = zero_copy_buf.unwrap(); + Box::new( + futures::future::poll_fn(move || { + let mut table = RESOURCE_TABLE.lock().unwrap(); + let maybe_repr = table.get_mut(&rid); + match maybe_repr { + Some(Repr::TcpStream(ref mut stream)) => { + stream.poll_write(&zero_copy_buf) + } + _ => panic!("bad rid"), + } + }) + .and_then(move |nwritten| { + debug!("write success {}", nwritten); + Ok(nwritten as i32) + }), + ) +} + +fn js_check(r: Result<(), ErrBox>) { + if let Err(e) = r { + panic!(e.to_string()); + } +} |