summaryrefslogtreecommitdiff
path: root/core/shared_queue.rs
diff options
context:
space:
mode:
authorBert Belder <bertbelder@gmail.com>2020-01-21 20:24:31 +0100
committerBert Belder <bertbelder@gmail.com>2020-01-21 21:31:52 +0100
commit8c3cd634a86e19cb8bff6891b9eccbfd4bc051ee (patch)
tree282d8b2c264dbe7d30591ca0296719918f6cc162 /core/shared_queue.rs
parentfa7f34eb8cec07f4c68ca4e9c46a983bc3e2308f (diff)
Upgrade to rusty_v8 v0.1.1 (#3741)
Diffstat (limited to 'core/shared_queue.rs')
-rw-r--r--core/shared_queue.rs44
1 files changed, 33 insertions, 11 deletions
diff --git a/core/shared_queue.rs b/core/shared_queue.rs
index f449eb6b8..cd1856813 100644
--- a/core/shared_queue.rs
+++ b/core/shared_queue.rs
@@ -17,6 +17,7 @@ SharedQueue Binary Layout
*/
use crate::ops::OpId;
+use rusty_v8 as v8;
const MAX_RECORDS: usize = 100;
/// Total number of records added.
@@ -34,18 +35,39 @@ const HEAD_INIT: usize = 4 * INDEX_RECORDS;
pub const RECOMMENDED_SIZE: usize = 128 * MAX_RECORDS;
pub struct SharedQueue {
- pub bytes: Vec<u8>,
+ buf: v8::SharedRef<v8::BackingStore>,
}
impl SharedQueue {
pub fn new(len: usize) -> Self {
- let mut bytes = Vec::new();
- bytes.resize(HEAD_INIT + len, 0);
- let mut q = Self { bytes };
+ let mut buf = Vec::new();
+ buf.resize(HEAD_INIT + len, 0);
+ let buf = buf.into_boxed_slice();
+ let buf =
+ unsafe { v8::SharedArrayBuffer::new_backing_store_from_boxed_slice(buf) };
+ let mut q = Self { buf };
q.reset();
q
}
+ pub fn get_backing_store(&mut self) -> &mut v8::SharedRef<v8::BackingStore> {
+ &mut self.buf
+ }
+
+ pub fn bytes(&self) -> &[u8] {
+ unsafe {
+ // This is quite bad. The rusty_v8 issue that makes it necessitates it
+ // just barely missed the rusty_v8 v0.1.1 release cutoff.
+ #[allow(clippy::cast_ref_to_mut)]
+ let self_mut = &mut *(self as *const _ as *mut Self);
+ self_mut.bytes_mut()
+ }
+ }
+
+ pub fn bytes_mut(&mut self) -> &mut [u8] {
+ self.buf.data_bytes()
+ }
+
fn reset(&mut self) {
debug!("rust:shared_queue:reset");
let s: &mut [u32] = self.as_u32_slice_mut();
@@ -55,21 +77,21 @@ impl SharedQueue {
}
fn as_u32_slice(&self) -> &[u32] {
- let p = self.bytes.as_ptr();
+ let p = self.bytes().as_ptr();
// Assert pointer is 32 bit aligned before casting.
assert_eq!((p as usize) % std::mem::align_of::<u32>(), 0);
#[allow(clippy::cast_ptr_alignment)]
let p32 = p as *const u32;
- unsafe { std::slice::from_raw_parts(p32, self.bytes.len() / 4) }
+ unsafe { std::slice::from_raw_parts(p32, self.bytes().len() / 4) }
}
fn as_u32_slice_mut(&mut self) -> &mut [u32] {
- let p = self.bytes.as_mut_ptr();
+ let p = self.bytes_mut().as_mut_ptr();
// Assert pointer is 32 bit aligned before casting.
assert_eq!((p as usize) % std::mem::align_of::<u32>(), 0);
#[allow(clippy::cast_ptr_alignment)]
let p32 = p as *mut u32;
- unsafe { std::slice::from_raw_parts_mut(p32, self.bytes.len() / 4) }
+ unsafe { std::slice::from_raw_parts_mut(p32, self.bytes().len() / 4) }
}
pub fn size(&self) -> usize {
@@ -149,7 +171,7 @@ impl SharedQueue {
self.num_shifted_off(),
self.head()
);
- Some((op_id, &self.bytes[off..end]))
+ Some((op_id, &self.bytes()[off..end]))
}
/// Because JS-side may cast `record` to Int32Array it is required
@@ -166,13 +188,13 @@ impl SharedQueue {
);
assert_eq!(record.len() % 4, 0);
let index = self.num_records();
- if end > self.bytes.len() || index >= MAX_RECORDS {
+ if end > self.bytes().len() || index >= MAX_RECORDS {
debug!("WARNING the sharedQueue overflowed");
return false;
}
self.set_meta(index, end, op_id);
assert_eq!(end - off, record.len());
- self.bytes[off..end].copy_from_slice(record);
+ self.bytes_mut()[off..end].copy_from_slice(record);
let u32_slice = self.as_u32_slice_mut();
u32_slice[INDEX_NUM_RECORDS] += 1;
u32_slice[INDEX_HEAD] = end as u32;