summaryrefslogtreecommitdiff
path: root/serde_v8
diff options
context:
space:
mode:
Diffstat (limited to 'serde_v8')
-rw-r--r--serde_v8/de.rs2
-rw-r--r--serde_v8/magic/bytestring.rs2
-rw-r--r--serde_v8/magic/rawbytes.rs4
-rw-r--r--serde_v8/magic/u16string.rs2
-rw-r--r--serde_v8/magic/v8slice.rs2
5 files changed, 6 insertions, 6 deletions
diff --git a/serde_v8/de.rs b/serde_v8/de.rs
index 7b825f990..08b28c0f5 100644
--- a/serde_v8/de.rs
+++ b/serde_v8/de.rs
@@ -580,7 +580,7 @@ struct EnumAccess<'a, 'b, 's> {
// p1: std::marker::PhantomData<&'x ()>,
}
-impl<'de, 'a, 'b, 's, 'x> de::EnumAccess<'de> for EnumAccess<'a, 'b, 's> {
+impl<'de, 'a, 'b, 's> de::EnumAccess<'de> for EnumAccess<'a, 'b, 's> {
type Error = Error;
type Variant = VariantDeserializer<'a, 'b, 's>;
diff --git a/serde_v8/magic/bytestring.rs b/serde_v8/magic/bytestring.rs
index 3efb56f6a..c7c1b9de8 100644
--- a/serde_v8/magic/bytestring.rs
+++ b/serde_v8/magic/bytestring.rs
@@ -51,9 +51,9 @@ impl FromV8 for ByteString {
}
let len = v8str.length();
let mut buffer = SmallVec::with_capacity(len);
+ #[allow(clippy::uninit_vec)]
// SAFETY: we set length == capacity (see previous line),
// before immediately writing into that buffer and sanity check with an assert
- #[allow(clippy::uninit_vec)]
unsafe {
buffer.set_len(len);
let written = v8str.write_one_byte(
diff --git a/serde_v8/magic/rawbytes.rs b/serde_v8/magic/rawbytes.rs
index 2189ebfc3..4e41d313a 100644
--- a/serde_v8/magic/rawbytes.rs
+++ b/serde_v8/magic/rawbytes.rs
@@ -87,11 +87,11 @@ mod tests {
#[test]
fn bytes_layout() {
- // SAFETY: ensuring layout is the same
let u1: [usize; 4] =
+ // SAFETY: ensuring layout is the same
unsafe { mem::transmute(from_static(HELLO.as_bytes())) };
- // SAFETY: ensuring layout is the same
let u2: [usize; 4] =
+ // SAFETY: ensuring layout is the same
unsafe { mem::transmute(bytes::Bytes::from_static(HELLO.as_bytes())) };
assert_eq!(u1[..3], u2[..3]); // Struct bytes are equal besides Vtables
}
diff --git a/serde_v8/magic/u16string.rs b/serde_v8/magic/u16string.rs
index e304ea187..1e36879a4 100644
--- a/serde_v8/magic/u16string.rs
+++ b/serde_v8/magic/u16string.rs
@@ -34,9 +34,9 @@ impl FromV8 for U16String {
.map_err(|_| Error::ExpectedString)?;
let len = v8str.length();
let mut buffer = Vec::with_capacity(len);
+ #[allow(clippy::uninit_vec)]
// SAFETY: we set length == capacity (see previous line),
// before immediately writing into that buffer and sanity check with an assert
- #[allow(clippy::uninit_vec)]
unsafe {
buffer.set_len(len);
let written = v8str.write(
diff --git a/serde_v8/magic/v8slice.rs b/serde_v8/magic/v8slice.rs
index 452c857a3..67255fc53 100644
--- a/serde_v8/magic/v8slice.rs
+++ b/serde_v8/magic/v8slice.rs
@@ -50,6 +50,7 @@ impl V8Slice {
}
fn as_slice_mut(&mut self) -> &mut [u8] {
+ #[allow(clippy::cast_ref_to_mut)]
// SAFETY: v8::SharedRef<v8::BackingStore> is similar to Arc<[u8]>,
// it points to a fixed continuous slice of bytes on the heap.
// It's safe-ish to mutate concurrently because it can not be
@@ -59,7 +60,6 @@ impl V8Slice {
// concurrent mutation is simply an accepted fact of life.
// And in practice V8Slices also do not have overallping read/write phases.
// TLDR: permissive interior mutability on slices of bytes is "fine"
- #[allow(clippy::cast_ref_to_mut)]
unsafe {
&mut *(&self.store[self.range.clone()] as *const _ as *mut [u8])
}