diff options
author | Aapo Alasuutari <aapo.alasuutari@gmail.com> | 2024-05-30 05:30:11 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-05-30 08:00:11 +0530 |
commit | d67ee9a08be200dc1ce9a416c9cda82730e24b68 (patch) | |
tree | 40cfd61cfbf89e6a1347d40caaa9ab3cdd3b7e25 /ext/ffi | |
parent | a379009bfdddc56d6400740ad7be86f8930952ab (diff) |
BREAKING(ffi/unstable): use BigInt representation in turbocall (#23983)
Built ontop of #23981, this sets FFI
turbocalls (Fast Call API) to use the BigInt representation.
Diffstat (limited to 'ext/ffi')
-rw-r--r-- | ext/ffi/00_ffi.js | 65 | ||||
-rw-r--r-- | ext/ffi/dlfcn.rs | 48 | ||||
-rw-r--r-- | ext/ffi/repr.rs | 56 | ||||
-rw-r--r-- | ext/ffi/turbocall.rs | 276 |
4 files changed, 37 insertions, 408 deletions
diff --git a/ext/ffi/00_ffi.js b/ext/ffi/00_ffi.js index 7f39db13e..06caf7c6c 100644 --- a/ext/ffi/00_ffi.js +++ b/ext/ffi/00_ffi.js @@ -42,19 +42,14 @@ const { ArrayBufferPrototypeGetByteLength, ArrayPrototypeMap, ArrayPrototypeJoin, + BigInt, DataViewPrototypeGetByteLength, ObjectDefineProperty, ObjectHasOwn, ObjectPrototypeIsPrototypeOf, - NumberIsSafeInteger, - TypedArrayPrototypeGetBuffer, TypedArrayPrototypeGetByteLength, TypeError, Uint8Array, - Int32Array, - Uint32Array, - BigInt64Array, - BigUint64Array, Function, ReflectHas, PromisePrototypeThen, @@ -79,9 +74,6 @@ function getBufferSourceByteLength(source) { } return ArrayBufferPrototypeGetByteLength(source); } -const U32_BUFFER = new Uint32Array(2); -const U64_BUFFER = new BigUint64Array(TypedArrayPrototypeGetBuffer(U32_BUFFER)); -const I64_BUFFER = new BigInt64Array(TypedArrayPrototypeGetBuffer(U32_BUFFER)); class UnsafePointerView { pointer; @@ -139,21 +131,21 @@ class UnsafePointerView { } getBigUint64(offset = 0) { - op_ffi_read_u64( + return op_ffi_read_u64( this.pointer, - offset, - U32_BUFFER, + // We return a BigInt, so the turbocall + // is forced to use BigInts everywhere. + BigInt(offset), ); - return U64_BUFFER[0]; } getBigInt64(offset = 0) { - op_ffi_read_i64( + return op_ffi_read_i64( this.pointer, - offset, - U32_BUFFER, + // We return a BigInt, so the turbocall + // is forced to use BigInts everywhere. + BigInt(offset), ); - return I64_BUFFER[0]; } getFloat32(offset = 0) { @@ -226,10 +218,6 @@ class UnsafePointerView { } } -const OUT_BUFFER = new Uint32Array(2); -const OUT_BUFFER_64 = new BigInt64Array( - TypedArrayPrototypeGetBuffer(OUT_BUFFER), -); const POINTER_TO_BUFFER_WEAK_MAP = new SafeWeakMap(); class UnsafePointer { static create(value) { @@ -279,12 +267,7 @@ class UnsafePointer { if (ObjectPrototypeIsPrototypeOf(UnsafeCallbackPrototype, value)) { value = value.pointer; } - op_ffi_ptr_value(value, OUT_BUFFER); - const result = OUT_BUFFER[0] + 2 ** 32 * OUT_BUFFER[1]; - if (NumberIsSafeInteger(result)) { - return result; - } - return OUT_BUFFER_64[0]; + return op_ffi_ptr_value(value); } } @@ -342,11 +325,6 @@ class UnsafeFnPointer { } } -function isReturnedAsBigInt(type) { - return type === "u64" || type === "i64" || - type === "usize" || type === "isize"; -} - function isStruct(type) { return typeof type === "object" && type !== null && typeof type.struct === "object"; @@ -517,7 +495,6 @@ class DynamicLibrary { const structSize = isStructResult ? getTypeSizeAndAlignment(resultType)[0] : 0; - const needsUnpacking = isReturnedAsBigInt(resultType); const isNonBlocking = symbols[symbol].nonblocking; if (isNonBlocking) { @@ -553,27 +530,7 @@ class DynamicLibrary { ); } - if (needsUnpacking && !isNonBlocking) { - const call = this.symbols[symbol]; - const parameters = symbols[symbol].parameters; - const vi = new Int32Array(2); - const b = new BigInt64Array(TypedArrayPrototypeGetBuffer(vi)); - - const params = ArrayPrototypeJoin( - ArrayPrototypeMap(parameters, (_, index) => `p${index}`), - ", ", - ); - // Make sure V8 has no excuse to not optimize this function. - this.symbols[symbol] = new Function( - "vi", - "b", - "call", - `return function (${params}) { - call(${params}${parameters.length > 0 ? ", " : ""}vi); - return b[0]; - }`, - )(vi, b, call); - } else if (isStructResult && !isNonBlocking) { + if (isStructResult && !isNonBlocking) { const call = this.symbols[symbol]; const parameters = symbols[symbol].parameters; const params = ArrayPrototypeJoin( diff --git a/ext/ffi/dlfcn.rs b/ext/ffi/dlfcn.rs index bd46f14b2..02ab4bb6a 100644 --- a/ext/ffi/dlfcn.rs +++ b/ext/ffi/dlfcn.rs @@ -52,17 +52,6 @@ impl DynamicLibraryResource { } } -pub fn needs_unwrap(rv: &NativeType) -> bool { - matches!( - rv, - NativeType::I64 | NativeType::ISize | NativeType::U64 | NativeType::USize - ) -} - -fn is_i64(rv: &NativeType) -> bool { - matches!(rv, NativeType::I64 | NativeType::ISize) -} - #[derive(Deserialize, Debug)] #[serde(rename_all = "camelCase")] pub struct ForeignFunction { @@ -242,10 +231,6 @@ fn make_sync_fn<'s>( // SAFETY: The pointer will not be deallocated until the function is // garbage collected. let symbol = unsafe { &*(external.value() as *const Symbol) }; - let needs_unwrap = match needs_unwrap(&symbol.result_type) { - true => Some(args.get(symbol.parameter_types.len() as i32)), - false => None, - }; let out_buffer = match symbol.result_type { NativeType::Struct(_) => { let argc = args.length(); @@ -261,35 +246,10 @@ fn make_sync_fn<'s>( }; match crate::call::ffi_call_sync(scope, args, symbol, out_buffer) { Ok(result) => { - match needs_unwrap { - Some(v) => { - let view: v8::Local<v8::ArrayBufferView> = v.try_into().unwrap(); - let pointer = - view.buffer(scope).unwrap().data().unwrap().as_ptr() as *mut u8; - - if is_i64(&symbol.result_type) { - // SAFETY: v8::SharedRef<v8::BackingStore> is similar to Arc<[u8]>, - // it points to a fixed continuous slice of bytes on the heap. - let bs = unsafe { &mut *(pointer as *mut i64) }; - // SAFETY: We already checked that type == I64 - let value = unsafe { result.i64_value }; - *bs = value; - } else { - // SAFETY: v8::SharedRef<v8::BackingStore> is similar to Arc<[u8]>, - // it points to a fixed continuous slice of bytes on the heap. - let bs = unsafe { &mut *(pointer as *mut u64) }; - // SAFETY: We checked that type == U64 - let value = unsafe { result.u64_value }; - *bs = value; - } - } - None => { - let result = - // SAFETY: Same return type declared to libffi; trust user to have it right beyond that. - unsafe { result.to_v8(scope, symbol.result_type.clone()) }; - rv.set(result); - } - } + let result = + // SAFETY: Same return type declared to libffi; trust user to have it right beyond that. + unsafe { result.to_v8(scope, symbol.result_type.clone()) }; + rv.set(result); } Err(err) => { deno_core::_ops::throw_type_error(scope, err.to_string()); diff --git a/ext/ffi/repr.rs b/ext/ffi/repr.rs index d6acc8ad2..c3656f0fe 100644 --- a/ext/ffi/repr.rs +++ b/ext/ffi/repr.rs @@ -112,11 +112,11 @@ unsafe extern "C" fn noop_deleter_callback( } #[op2(fast)] +#[bigint] pub fn op_ffi_ptr_value<FP>( state: &mut OpState, ptr: *mut c_void, - #[buffer] out: &mut [u32], -) -> Result<(), AnyError> +) -> Result<usize, AnyError> where FP: FfiPermissions + 'static, { @@ -124,18 +124,7 @@ where let permissions = state.borrow_mut::<FP>(); permissions.check_partial(None)?; - let outptr = out.as_ptr() as *mut usize; - let length = out.len(); - assert!( - length >= (std::mem::size_of::<usize>() / std::mem::size_of::<u32>()) - ); - assert_eq!(outptr as usize % std::mem::size_of::<usize>(), 0); - - // SAFETY: Out buffer was asserted to be at least large enough to hold a usize, and properly aligned. - let out = unsafe { &mut *outptr }; - *out = ptr as usize; - - Ok(()) + Ok(ptr as usize) } #[op2] @@ -398,12 +387,14 @@ where } #[op2(fast)] +#[bigint] pub fn op_ffi_read_u64<FP>( state: &mut OpState, ptr: *mut c_void, - #[number] offset: isize, - #[buffer] out: &mut [u32], -) -> Result<(), AnyError> + // Note: The representation of 64-bit integers is function-wide. We cannot + // choose to take this parameter as a number while returning a bigint. + #[bigint] offset: isize, +) -> Result<u64, AnyError> where FP: FfiPermissions + 'static, { @@ -412,13 +403,6 @@ where let permissions = state.borrow_mut::<FP>(); permissions.check_partial(None)?; - let outptr = out.as_mut_ptr() as *mut u64; - - assert!( - out.len() >= (std::mem::size_of::<u64>() / std::mem::size_of::<u32>()) - ); - assert_eq!((outptr as usize % std::mem::size_of::<u64>()), 0); - if ptr.is_null() { return Err(type_error("Invalid u64 pointer, pointer is null")); } @@ -427,33 +411,26 @@ where // SAFETY: ptr and offset are user provided. unsafe { ptr::read_unaligned::<u64>(ptr.offset(offset) as *const u64) }; - // SAFETY: Length and alignment of out slice were asserted to be correct. - unsafe { *outptr = value }; - Ok(()) + Ok(value) } #[op2(fast)] +#[bigint] pub fn op_ffi_read_i64<FP>( state: &mut OpState, ptr: *mut c_void, - #[number] offset: isize, - #[buffer] out: &mut [u32], -) -> Result<(), AnyError> + // Note: The representation of 64-bit integers is function-wide. We cannot + // choose to take this parameter as a number while returning a bigint. + #[bigint] offset: isize, +) -> Result<i64, AnyError> where FP: FfiPermissions + 'static, { - check_unstable(state, "Deno.UnsafePointerView#getBigUint64"); + check_unstable(state, "Deno.UnsafePointerView#getBigInt64"); let permissions = state.borrow_mut::<FP>(); permissions.check_partial(None)?; - let outptr = out.as_mut_ptr() as *mut i64; - - assert!( - out.len() >= (std::mem::size_of::<i64>() / std::mem::size_of::<u32>()) - ); - assert_eq!((outptr as usize % std::mem::size_of::<i64>()), 0); - if ptr.is_null() { return Err(type_error("Invalid i64 pointer, pointer is null")); } @@ -462,8 +439,7 @@ where // SAFETY: ptr and offset are user provided. unsafe { ptr::read_unaligned::<i64>(ptr.offset(offset) as *const i64) }; // SAFETY: Length and alignment of out slice were asserted to be correct. - unsafe { *outptr = value }; - Ok(()) + Ok(value) } #[op2(fast)] diff --git a/ext/ffi/turbocall.rs b/ext/ffi/turbocall.rs index 0417da633..204388946 100644 --- a/ext/ffi/turbocall.rs +++ b/ext/ffi/turbocall.rs @@ -9,7 +9,6 @@ use dynasmrt::dynasm; use dynasmrt::DynasmApi; use dynasmrt::ExecutableBuffer; -use crate::dlfcn::needs_unwrap; use crate::NativeType; use crate::Symbol; @@ -46,21 +45,18 @@ pub(crate) fn make_template( sym: &Symbol, trampoline: &Trampoline, ) -> fast_api::FastFunction { - let mut params = once(fast_api::Type::V8Value) // Receiver + let params = once(fast_api::Type::V8Value) // Receiver .chain(sym.parameter_types.iter().map(|t| t.into())) .collect::<Vec<_>>(); - let ret = if needs_unwrap(&sym.result_type) { - params.push(fast_api::Type::TypedArray(fast_api::CType::Int32)); - fast_api::CType::Void - } else if sym.result_type == NativeType::Buffer { + let ret = if sym.result_type == NativeType::Buffer { // Buffer can be used as a return type and converts differently than in parameters. fast_api::CType::Pointer } else { fast_api::CType::from(&fast_api::Type::from(&sym.result_type)) }; - fast_api::FastFunction::new( + fast_api::FastFunction::new_with_bigint( Box::leak(params.into_boxed_slice()), ret, trampoline.ptr(), @@ -158,15 +154,9 @@ impl SysVAmd64 { let must_cast_return_value = compiler.must_cast_return_value(&sym.result_type); - let must_wrap_return_value = - compiler.must_wrap_return_value_in_typed_array(&sym.result_type); - let must_save_preserved_register = must_wrap_return_value; - let cannot_tailcall = must_cast_return_value || must_wrap_return_value; + let cannot_tailcall = must_cast_return_value; if cannot_tailcall { - if must_save_preserved_register { - compiler.save_preserved_register_to_stack(); - } compiler.allocate_stack(&sym.parameter_types); } @@ -177,22 +167,13 @@ impl SysVAmd64 { // the receiver object should never be expected. Avoid its unexpected or deliberate leak compiler.zero_first_arg(); } - if must_wrap_return_value { - compiler.save_out_array_to_preserved_register(); - } if cannot_tailcall { compiler.call(sym.ptr.as_ptr()); if must_cast_return_value { compiler.cast_return_value(&sym.result_type); } - if must_wrap_return_value { - compiler.wrap_return_value_in_out_array(); - } compiler.deallocate_stack(); - if must_save_preserved_register { - compiler.recover_preserved_register(); - } compiler.ret(); } else { compiler.tailcall(sym.ptr.as_ptr()); @@ -555,12 +536,6 @@ impl SysVAmd64 { ) } - fn must_wrap_return_value_in_typed_array(&self, rv: &NativeType) -> bool { - // V8 only supports i32 and u32 return types for integers - // We support 64 bit integers by wrapping them in a TypedArray out parameter - crate::dlfcn::needs_unwrap(rv) - } - fn finalize(self) -> ExecutableBuffer { self.assmblr.finalize().unwrap() } @@ -602,19 +577,6 @@ impl Aarch64Apple { fn compile(sym: &Symbol) -> Trampoline { let mut compiler = Self::new(); - let must_wrap_return_value = - compiler.must_wrap_return_value_in_typed_array(&sym.result_type); - let must_save_preserved_register = must_wrap_return_value; - let cannot_tailcall = must_wrap_return_value; - - if cannot_tailcall { - compiler.allocate_stack(sym); - compiler.save_frame_record(); - if compiler.must_save_preserved_register_to_stack(sym) { - compiler.save_preserved_register_to_stack(); - } - } - for param in sym.parameter_types.iter().cloned() { compiler.move_left(param) } @@ -622,24 +584,8 @@ impl Aarch64Apple { // the receiver object should never be expected. Avoid its unexpected or deliberate leak compiler.zero_first_arg(); } - if compiler.must_wrap_return_value_in_typed_array(&sym.result_type) { - compiler.save_out_array_to_preserved_register(); - } - if cannot_tailcall { - compiler.call(sym.ptr.as_ptr()); - if must_wrap_return_value { - compiler.wrap_return_value_in_out_array(); - } - if must_save_preserved_register { - compiler.recover_preserved_register(); - } - compiler.recover_frame_record(); - compiler.deallocate_stack(); - compiler.ret(); - } else { - compiler.tailcall(sym.ptr.as_ptr()); - } + compiler.tailcall(sym.ptr.as_ptr()); Trampoline(compiler.finalize()) } @@ -980,10 +926,6 @@ impl Aarch64Apple { // > Each frame shall link to the frame of its caller by means of a frame record of two 64-bit values on the stack stack_size += 16; - if self.must_save_preserved_register_to_stack(symbol) { - stack_size += 8; - } - // Section 6.2.2 of Aarch64 PCS: // > At any point at which memory is accessed via SP, the hardware requires that // > - SP mod 16 = 0. The stack must be quad-word aligned. @@ -1064,16 +1006,6 @@ impl Aarch64Apple { self.integral_params > 0 } - fn must_save_preserved_register_to_stack(&mut self, symbol: &Symbol) -> bool { - self.must_wrap_return_value_in_typed_array(&symbol.result_type) - } - - fn must_wrap_return_value_in_typed_array(&self, rv: &NativeType) -> bool { - // V8 only supports i32 and u32 return types for integers - // We support 64 bit integers by wrapping them in a TypedArray out parameter - crate::dlfcn::needs_unwrap(rv) - } - fn finalize(self) -> ExecutableBuffer { self.assmblr.finalize().unwrap() } @@ -1117,15 +1049,9 @@ impl Win64 { let must_cast_return_value = compiler.must_cast_return_value(&sym.result_type); - let must_wrap_return_value = - compiler.must_wrap_return_value_in_typed_array(&sym.result_type); - let must_save_preserved_register = must_wrap_return_value; - let cannot_tailcall = must_cast_return_value || must_wrap_return_value; + let cannot_tailcall = must_cast_return_value; if cannot_tailcall { - if must_save_preserved_register { - compiler.save_preserved_register_to_stack(); - } compiler.allocate_stack(&sym.parameter_types); } @@ -1136,22 +1062,13 @@ impl Win64 { // the receiver object should never be expected. Avoid its unexpected or deliberate leak compiler.zero_first_arg(); } - if must_wrap_return_value { - compiler.save_out_array_to_preserved_register(); - } if cannot_tailcall { compiler.call(sym.ptr.as_ptr()); if must_cast_return_value { compiler.cast_return_value(&sym.result_type); } - if must_wrap_return_value { - compiler.wrap_return_value_in_out_array(); - } compiler.deallocate_stack(); - if must_save_preserved_register { - compiler.recover_preserved_register(); - } compiler.ret(); } else { compiler.tailcall(sym.ptr.as_ptr()); @@ -1424,12 +1341,6 @@ impl Win64 { ) } - fn must_wrap_return_value_in_typed_array(&self, rv: &NativeType) -> bool { - // V8 only supports i32 and u32 return types for integers - // We support 64 bit integers by wrapping them in a TypedArray out parameter - crate::dlfcn::needs_unwrap(rv) - } - fn finalize(self) -> ExecutableBuffer { self.assmblr.finalize().unwrap() } @@ -1656,61 +1567,6 @@ mod tests { let expected = assembler.finalize().unwrap(); assert_eq!(trampoline.0.deref(), expected.deref()); } - - #[test] - fn return_u64_in_register_typed_array() { - let trampoline = SysVAmd64::compile(&symbol(vec![], U64)); - - let mut assembler = dynasmrt::x64::Assembler::new().unwrap(); - // See https://godbolt.org/z/8G7a488o7 - dynasm!(assembler - ; .arch x64 - ; push rbx - ; xor edi, edi // recv - ; mov rbx, [rsi + 8] // save data array pointer to non-volatile register - ; mov rax, QWORD 0 - ; call rax - ; mov [rbx], rax // copy return value to data pointer address - ; pop rbx - ; ret - ); - let expected = assembler.finalize().unwrap(); - assert_eq!(trampoline.0.deref(), expected.deref()); - } - - #[test] - fn return_u64_in_stack_typed_array() { - let trampoline = SysVAmd64::compile(&symbol( - vec![U64, U64, U64, U64, U64, U64, U64], - U64, - )); - - let mut assembler = dynasmrt::x64::Assembler::new().unwrap(); - // See https://godbolt.org/z/cPnPYWdWq - dynasm!(assembler - ; .arch x64 - ; push rbx - ; sub rsp, DWORD 16 - ; mov rdi, rsi // u64 - ; mov rsi, rdx // u64 - ; mov rdx, rcx // u64 - ; mov rcx, r8 // u64 - ; mov r8, r9 // u64 - ; mov r9, [DWORD rsp + 32] // u64 - ; mov rax, [DWORD rsp + 40] // u64 - ; mov [DWORD rsp + 0], rax // .. - ; mov rax, [DWORD rsp + 48] // save data array pointer to non-volatile register - ; mov rbx, [rax + 8] // .. - ; mov rax, QWORD 0 - ; call rax - ; mov [rbx], rax // copy return value to data pointer address - ; add rsp, DWORD 16 - ; pop rbx - ; ret - ); - let expected = assembler.finalize().unwrap(); - assert_eq!(trampoline.0.deref(), expected.deref()); - } } mod aarch64_apple { @@ -1832,73 +1688,6 @@ mod tests { let expected = assembler.finalize().unwrap(); assert_eq!(trampoline.0.deref(), expected.deref()); } - - #[test] - fn return_u64_in_register_typed_array() { - let trampoline = Aarch64Apple::compile(&symbol(vec![], U64)); - - let mut assembler = dynasmrt::aarch64::Assembler::new().unwrap(); - // See https://godbolt.org/z/47EvvYb83 - dynasm!(assembler - ; .arch aarch64 - ; sub sp, sp, 32 - ; stp x29, x30, [sp, 16] - ; add x29, sp, 16 - ; str x19, [sp, 8] - ; mov x0, xzr // recv - ; ldr x19, [x1, 8] // save data array pointer to non-volatile register - ; movz x8, 0 - ; blr x8 - ; str x0, [x19] // copy return value to data pointer address - ; ldr x19, [sp, 8] - ; ldp x29, x30, [sp, 16] - ; add sp, sp, 32 - ; ret - ); - let expected = assembler.finalize().unwrap(); - assert_eq!(trampoline.0.deref(), expected.deref()); - } - - #[test] - fn return_u64_in_stack_typed_array() { - let trampoline = Aarch64Apple::compile(&symbol( - vec![U64, U64, U64, U64, U64, U64, U64, U64, U8, U8], - U64, - )); - - let mut assembler = dynasmrt::aarch64::Assembler::new().unwrap(); - // See https://godbolt.org/z/PvYPbsE1b - dynasm!(assembler - ; .arch aarch64 - ; sub sp, sp, 32 - ; stp x29, x30, [sp, 16] - ; add x29, sp, 16 - ; str x19, [sp, 8] - ; mov x0, x1 // u64 - ; mov x1, x2 // u64 - ; mov x2, x3 // u64 - ; mov x3, x4 // u64 - ; mov x4, x5 // u64 - ; mov x5, x6 // u64 - ; mov x6, x7 // u64 - ; ldr x7, [sp, 32] // u64 - ; ldr w8, [sp, 40] // u8 - ; strb w8, [sp] // .. - ; ldr w8, [sp, 48] // u8 - ; strb w8, [sp, 1] // .. - ; ldr x19, [sp, 56] // save data array pointer to non-volatile register - ; ldr x19, [x19, 8] // .. - ; movz x8, 0 - ; blr x8 - ; str x0, [x19] // copy return value to data pointer address - ; ldr x19, [sp, 8] - ; ldp x29, x30, [sp, 16] - ; add sp, sp, 32 - ; ret - ); - let expected = assembler.finalize().unwrap(); - assert_eq!(trampoline.0.deref(), expected.deref()); - } } mod x64_windows { @@ -2008,58 +1797,5 @@ mod tests { let expected = assembler.finalize().unwrap(); assert_eq!(trampoline.0.deref(), expected.deref()); } - - #[test] - fn return_u64_in_register_typed_array() { - let trampoline = Win64::compile(&symbol(vec![], U64)); - - let mut assembler = dynasmrt::x64::Assembler::new().unwrap(); - // See https://godbolt.org/z/7EnPE7o3T - dynasm!(assembler - ; .arch x64 - ; push rbx - ; sub rsp, DWORD 32 - ; xor ecx, ecx // recv - ; mov rbx, [rdx + 8] // save data array pointer to non-volatile register - ; mov rax, QWORD 0 - ; call rax - ; mov [rbx], rax // copy return value to data pointer address - ; add rsp, DWORD 32 - ; pop rbx - ; ret - ); - let expected = assembler.finalize().unwrap(); - assert_eq!(trampoline.0.deref(), expected.deref()); - } - - #[test] - fn return_u64_in_stack_typed_array() { - let trampoline = - Win64::compile(&symbol(vec![U64, U64, U64, U64, U64], U64)); - - let mut assembler = dynasmrt::x64::Assembler::new().unwrap(); - // See https://godbolt.org/z/3966sfEex - dynasm!(assembler - ; .arch x64 - ; push rbx - ; sub rsp, DWORD 48 - ; mov rcx, rdx // u64 - ; mov rdx, r8 // u64 - ; mov r8, r9 // u64 - ; mov r9, [DWORD rsp + 96] // u64 - ; mov rax, [DWORD rsp + 104] // u64 - ; mov [DWORD rsp + 32], rax // .. - ; mov rax, [DWORD rsp + 112] // save data array pointer to non-volatile register - ; mov rbx, [rax + 8] // .. - ; mov rax, QWORD 0 - ; call rax - ; mov [rbx], rax // copy return value to data pointer address - ; add rsp, DWORD 48 - ; pop rbx - ; ret - ); - let expected = assembler.finalize().unwrap(); - assert_eq!(trampoline.0.deref(), expected.deref()); - } } } |