summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Cargo.lock1
-rw-r--r--ext/http/lib.rs7
-rw-r--r--serde_v8/Cargo.toml1
-rw-r--r--serde_v8/magic/buffer.rs16
-rw-r--r--serde_v8/magic/mod.rs1
-rw-r--r--serde_v8/magic/rawbytes.rs96
-rw-r--r--serde_v8/magic/string_or_buffer.rs9
-rw-r--r--serde_v8/magic/v8slice.rs51
-rw-r--r--serde_v8/tests/magic.rs14
9 files changed, 191 insertions, 5 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 3d4e932b0..f17dfdb22 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -3615,6 +3615,7 @@ name = "serde_v8"
version = "0.45.0"
dependencies = [
"bencher",
+ "bytes",
"derive_more",
"serde",
"serde_json",
diff --git a/ext/http/lib.rs b/ext/http/lib.rs
index e34d4b80f..d1d884457 100644
--- a/ext/http/lib.rs
+++ b/ext/http/lib.rs
@@ -589,10 +589,7 @@ fn http_response(
Some(data) => {
// If a buffer was passed, but isn't compressible, we use it to
// construct a response body.
- Ok((
- HttpResponseWriter::Closed,
- Bytes::copy_from_slice(&data).into(),
- ))
+ Ok((HttpResponseWriter::Closed, Bytes::from(data).into()))
}
None if compressing => {
// Create a one way pipe that implements tokio's async io traits. To do
@@ -766,7 +763,7 @@ async fn op_http_write(
}
}
HttpResponseWriter::BodyUncompressed(body) => {
- let bytes = Bytes::copy_from_slice(&buf[..]);
+ let bytes = Bytes::from(buf);
match body.send_data(bytes).await {
Ok(_) => Ok(()),
Err(err) => {
diff --git a/serde_v8/Cargo.toml b/serde_v8/Cargo.toml
index 21fab88ea..9d6069e64 100644
--- a/serde_v8/Cargo.toml
+++ b/serde_v8/Cargo.toml
@@ -13,6 +13,7 @@ description = "Rust to V8 serialization and deserialization"
path = "lib.rs"
[dependencies]
+bytes = "1"
derive_more = "0.99.17"
serde = { version = "1.0.136", features = ["derive"] }
v8 = "0.42.0"
diff --git a/serde_v8/magic/buffer.rs b/serde_v8/magic/buffer.rs
index 81f550bcb..da87c8b86 100644
--- a/serde_v8/magic/buffer.rs
+++ b/serde_v8/magic/buffer.rs
@@ -144,3 +144,19 @@ impl FromV8 for ZeroCopyBuf {
Ok(Self::FromV8(V8Slice::from_v8(scope, value)?))
}
}
+
+impl From<ZeroCopyBuf> for bytes::Bytes {
+ fn from(zbuf: ZeroCopyBuf) -> bytes::Bytes {
+ match zbuf {
+ ZeroCopyBuf::FromV8(v) => v.into(),
+ // WARNING(AaronO): potential footgun, but will disappear in future ZeroCopyBuf refactor
+ ZeroCopyBuf::ToV8(v) => v
+ .lock()
+ .unwrap()
+ .take()
+ .expect("ZeroCopyBuf was empty")
+ .into(),
+ ZeroCopyBuf::Temp(v) => v.into(),
+ }
+ }
+}
diff --git a/serde_v8/magic/mod.rs b/serde_v8/magic/mod.rs
index 076699302..c9bfce573 100644
--- a/serde_v8/magic/mod.rs
+++ b/serde_v8/magic/mod.rs
@@ -2,6 +2,7 @@
pub mod buffer;
pub mod bytestring;
pub mod detached_buffer;
+pub(super) mod rawbytes;
pub mod string_or_buffer;
pub mod transl8;
pub mod u16string;
diff --git a/serde_v8/magic/rawbytes.rs b/serde_v8/magic/rawbytes.rs
new file mode 100644
index 000000000..d82e68717
--- /dev/null
+++ b/serde_v8/magic/rawbytes.rs
@@ -0,0 +1,96 @@
+// Copyright 2018-2022 the Deno authors. All rights reserved. MIT license.
+pub(crate) type AtomicPtr<T> = *mut T;
+#[allow(unused)]
+pub(crate) struct RawBytes {
+ ptr: *const u8,
+ len: usize,
+ // inlined "trait object"
+ data: AtomicPtr<()>,
+ vtable: &'static Vtable,
+}
+
+impl RawBytes {
+ pub fn new_raw(
+ ptr: *const u8,
+ len: usize,
+ data: AtomicPtr<()>,
+ vtable: &'static Vtable,
+ ) -> bytes::Bytes {
+ RawBytes {
+ ptr,
+ len,
+ data,
+ vtable,
+ }
+ .into()
+ }
+}
+
+#[allow(unused)]
+pub(crate) struct Vtable {
+ /// fn(data, ptr, len)
+ pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> bytes::Bytes,
+ /// fn(data, ptr, len)
+ pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
+}
+
+impl From<RawBytes> for bytes::Bytes {
+ fn from(b: RawBytes) -> Self {
+ // SAFETY: RawBytes has the same layout as bytes::Bytes
+ // this is tested below, both are composed of usize-d ptrs/values
+ // thus aren't currently subject to rust's field re-ordering to minimize padding
+ unsafe { std::mem::transmute(b) }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::mem;
+
+ const HELLO: &str = "hello";
+
+ // ===== impl StaticVtable =====
+
+ const STATIC_VTABLE: Vtable = Vtable {
+ clone: static_clone,
+ drop: static_drop,
+ };
+
+ unsafe fn static_clone(
+ _: &AtomicPtr<()>,
+ ptr: *const u8,
+ len: usize,
+ ) -> bytes::Bytes {
+ from_static(std::slice::from_raw_parts(ptr, len)).into()
+ }
+
+ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
+ // nothing to drop for &'static [u8]
+ }
+
+ fn from_static(bytes: &'static [u8]) -> RawBytes {
+ RawBytes {
+ ptr: bytes.as_ptr(),
+ len: bytes.len(),
+ data: std::ptr::null_mut(),
+ vtable: &STATIC_VTABLE,
+ }
+ }
+
+ #[test]
+ fn bytes_identity() {
+ let b1: bytes::Bytes = from_static(HELLO.as_bytes()).into();
+ let b2 = bytes::Bytes::from_static(HELLO.as_bytes());
+ assert_eq!(b1, b2); // Values are equal
+ }
+
+ #[test]
+ fn bytes_layout() {
+ let u1: [usize; 4] =
+ unsafe { mem::transmute(from_static(HELLO.as_bytes())) };
+ let u2: [usize; 4] =
+ unsafe { mem::transmute(bytes::Bytes::from_static(HELLO.as_bytes())) };
+ assert_eq!(u1[..3], u2[..3]); // Struct bytes are equal besides Vtables
+ }
+}
diff --git a/serde_v8/magic/string_or_buffer.rs b/serde_v8/magic/string_or_buffer.rs
index a48378b52..78954e60c 100644
--- a/serde_v8/magic/string_or_buffer.rs
+++ b/serde_v8/magic/string_or_buffer.rs
@@ -48,3 +48,12 @@ impl FromV8 for StringOrBuffer {
Err(Error::ExpectedBuffer)
}
}
+
+impl From<StringOrBuffer> for bytes::Bytes {
+ fn from(sob: StringOrBuffer) -> Self {
+ match sob {
+ StringOrBuffer::Buffer(b) => b.into(),
+ StringOrBuffer::String(s) => s.into_bytes().into(),
+ }
+ }
+}
diff --git a/serde_v8/magic/v8slice.rs b/serde_v8/magic/v8slice.rs
index 6950e29d9..452c857a3 100644
--- a/serde_v8/magic/v8slice.rs
+++ b/serde_v8/magic/v8slice.rs
@@ -3,7 +3,9 @@
use std::ops::Deref;
use std::ops::DerefMut;
use std::ops::Range;
+use std::rc::Rc;
+use super::rawbytes;
use super::transl8::FromV8;
/// A V8Slice encapsulates a slice that's been borrowed from a JavaScript
@@ -117,3 +119,52 @@ impl AsMut<[u8]> for V8Slice {
self.as_slice_mut()
}
}
+
+// Implement V8Slice -> bytes::Bytes
+impl V8Slice {
+ fn rc_into_byte_parts(self: Rc<Self>) -> (*const u8, usize, *mut V8Slice) {
+ let (ptr, len) = {
+ let slice = self.as_ref();
+ (slice.as_ptr(), slice.len())
+ };
+ let rc_raw = Rc::into_raw(self);
+ let data = rc_raw as *mut V8Slice;
+ (ptr, len, data)
+ }
+}
+
+impl From<V8Slice> for bytes::Bytes {
+ fn from(v8slice: V8Slice) -> Self {
+ let (ptr, len, data) = Rc::new(v8slice).rc_into_byte_parts();
+ rawbytes::RawBytes::new_raw(ptr, len, data.cast(), &V8SLICE_VTABLE)
+ }
+}
+
+// NOTE: in the limit we could avoid extra-indirection and use the C++ shared_ptr
+// but we can't store both the underlying data ptr & ctrl ptr ... so instead we
+// use a shared rust ptr (Rc/Arc) that itself controls the C++ shared_ptr
+const V8SLICE_VTABLE: rawbytes::Vtable = rawbytes::Vtable {
+ clone: v8slice_clone,
+ drop: v8slice_drop,
+};
+
+unsafe fn v8slice_clone(
+ data: &rawbytes::AtomicPtr<()>,
+ ptr: *const u8,
+ len: usize,
+) -> bytes::Bytes {
+ let rc = Rc::from_raw(*data as *const V8Slice);
+ let (_, _, data) = rc.clone().rc_into_byte_parts();
+ std::mem::forget(rc);
+ // NOTE: `bytes::Bytes` does bounds checking so we trust its ptr, len inputs
+ // and must use them to allow cloning Bytes it has sliced
+ rawbytes::RawBytes::new_raw(ptr, len, data.cast(), &V8SLICE_VTABLE)
+}
+
+unsafe fn v8slice_drop(
+ data: &mut rawbytes::AtomicPtr<()>,
+ _: *const u8,
+ _: usize,
+) {
+ drop(Rc::from_raw(*data as *const V8Slice))
+}
diff --git a/serde_v8/tests/magic.rs b/serde_v8/tests/magic.rs
index 6d60efd66..3fc46abeb 100644
--- a/serde_v8/tests/magic.rs
+++ b/serde_v8/tests/magic.rs
@@ -129,6 +129,20 @@ fn magic_buffer() {
assert!(eq.is_true());
let eq = js_exec(scope, "t3.b[4] === 11");
assert!(eq.is_true());
+
+ // ZeroCopyBuf as bytes::Bytes
+ let v8_array = js_exec(scope, "new Uint8Array([1,2,3,4,5])");
+ let zbuf: serde_v8::ZeroCopyBuf =
+ serde_v8::from_v8(scope, v8_array).unwrap();
+ let buf: bytes::Bytes = zbuf.into();
+ assert_eq!(buf, bytes::Bytes::from_static(&[1, 2, 3, 4, 5]));
+ assert_eq!(buf, bytes::Bytes::from_static(&[1, 2, 3, 4, 5]));
+ assert_eq!(buf.slice(0..2), bytes::Bytes::from_static(&[1, 2]));
+ assert_eq!(buf.slice(2..), bytes::Bytes::from_static(&[3, 4, 5]));
+ // We're specifically testing that slices are preserved post-clone
+ #[allow(clippy::redundant_clone)]
+ let buf2 = buf.slice(2..).clone();
+ assert_eq!(buf2, bytes::Bytes::from_static(&[3, 4, 5]));
})
}