summaryrefslogtreecommitdiff
path: root/ext/ffi/turbocall.rs
diff options
context:
space:
mode:
Diffstat (limited to 'ext/ffi/turbocall.rs')
-rw-r--r--ext/ffi/turbocall.rs52
1 files changed, 31 insertions, 21 deletions
diff --git a/ext/ffi/turbocall.rs b/ext/ffi/turbocall.rs
index e8858df86..079743c16 100644
--- a/ext/ffi/turbocall.rs
+++ b/ext/ffi/turbocall.rs
@@ -14,11 +14,17 @@ use crate::NativeType;
use crate::Symbol;
pub(crate) fn is_compatible(sym: &Symbol) -> bool {
+ // TODO: Support structs by value in fast call
cfg!(any(
all(target_arch = "x86_64", target_family = "unix"),
all(target_arch = "x86_64", target_family = "windows"),
all(target_arch = "aarch64", target_vendor = "apple")
)) && !sym.can_callback
+ && !matches!(sym.result_type, NativeType::Struct(_))
+ && !sym
+ .parameter_types
+ .iter()
+ .any(|t| matches!(t, NativeType::Struct(_)))
}
pub(crate) fn compile_trampoline(sym: &Symbol) -> Trampoline {
@@ -39,7 +45,7 @@ pub(crate) fn make_template(sym: &Symbol, trampoline: &Trampoline) -> Template {
.chain(sym.parameter_types.iter().map(|t| t.into()))
.collect::<Vec<_>>();
- let ret = if needs_unwrap(sym.result_type) {
+ let ret = if needs_unwrap(&sym.result_type) {
params.push(fast_api::Type::TypedArray(fast_api::CType::Int32));
fast_api::Type::Void
} else {
@@ -104,6 +110,9 @@ impl From<&NativeType> for fast_api::Type {
fast_api::Type::Uint64
}
NativeType::Buffer => fast_api::Type::TypedArray(fast_api::CType::Uint8),
+ NativeType::Struct(_) => {
+ fast_api::Type::TypedArray(fast_api::CType::Uint8)
+ }
}
}
}
@@ -161,9 +170,9 @@ impl SysVAmd64 {
let mut compiler = Self::new();
let must_cast_return_value =
- compiler.must_cast_return_value(sym.result_type);
+ compiler.must_cast_return_value(&sym.result_type);
let must_wrap_return_value =
- compiler.must_wrap_return_value_in_typed_array(sym.result_type);
+ compiler.must_wrap_return_value_in_typed_array(&sym.result_type);
let must_save_preserved_register = must_wrap_return_value;
let cannot_tailcall = must_cast_return_value || must_wrap_return_value;
@@ -174,7 +183,7 @@ impl SysVAmd64 {
compiler.allocate_stack(&sym.parameter_types);
}
- for param in sym.parameter_types.iter().copied() {
+ for param in sym.parameter_types.iter().cloned() {
compiler.move_left(param)
}
if !compiler.is_recv_arg_overridden() {
@@ -188,7 +197,7 @@ impl SysVAmd64 {
if cannot_tailcall {
compiler.call(sym.ptr.as_ptr());
if must_cast_return_value {
- compiler.cast_return_value(sym.result_type);
+ compiler.cast_return_value(&sym.result_type);
}
if must_wrap_return_value {
compiler.wrap_return_value_in_out_array();
@@ -400,7 +409,7 @@ impl SysVAmd64 {
);
}
- fn cast_return_value(&mut self, rv: NativeType) {
+ fn cast_return_value(&mut self, rv: &NativeType) {
let s = &mut self.assmblr;
// V8 only supports 32bit integers. We support 8 and 16 bit integers casting them to 32bits.
// In SysV-AMD64 the convention dictates that the unused bits of the return value contain garbage, so we
@@ -550,7 +559,7 @@ impl SysVAmd64 {
self.integral_params > 0
}
- fn must_cast_return_value(&self, rv: NativeType) -> bool {
+ fn must_cast_return_value(&self, rv: &NativeType) -> bool {
// V8 only supports i32 and u32 return types for integers
// We support 8 and 16 bit integers by extending them to 32 bits in the trampoline before returning
matches!(
@@ -559,7 +568,7 @@ impl SysVAmd64 {
)
}
- fn must_wrap_return_value_in_typed_array(&self, rv: NativeType) -> bool {
+ fn must_wrap_return_value_in_typed_array(&self, rv: &NativeType) -> bool {
// V8 only supports i32 and u32 return types for integers
// We support 64 bit integers by wrapping them in a TypedArray out parameter
crate::dlfcn::needs_unwrap(rv)
@@ -607,7 +616,7 @@ impl Aarch64Apple {
let mut compiler = Self::new();
let must_wrap_return_value =
- compiler.must_wrap_return_value_in_typed_array(sym.result_type);
+ compiler.must_wrap_return_value_in_typed_array(&sym.result_type);
let must_save_preserved_register = must_wrap_return_value;
let cannot_tailcall = must_wrap_return_value;
@@ -619,14 +628,14 @@ impl Aarch64Apple {
}
}
- for param in sym.parameter_types.iter().copied() {
+ for param in sym.parameter_types.iter().cloned() {
compiler.move_left(param)
}
if !compiler.is_recv_arg_overridden() {
// the receiver object should never be expected. Avoid its unexpected or deliberate leak
compiler.zero_first_arg();
}
- if compiler.must_wrap_return_value_in_typed_array(sym.result_type) {
+ if compiler.must_wrap_return_value_in_typed_array(&sym.result_type) {
compiler.save_out_array_to_preserved_register();
}
@@ -963,7 +972,7 @@ impl Aarch64Apple {
let mut int_params = 0u32;
let mut float_params = 0u32;
let mut stack_size = 0u32;
- for param in symbol.parameter_types.iter().copied() {
+ for param in symbol.parameter_types.iter().cloned() {
match param.into() {
Float(float_param) => {
float_params += 1;
@@ -1069,10 +1078,10 @@ impl Aarch64Apple {
}
fn must_save_preserved_register_to_stack(&mut self, symbol: &Symbol) -> bool {
- self.must_wrap_return_value_in_typed_array(symbol.result_type)
+ self.must_wrap_return_value_in_typed_array(&symbol.result_type)
}
- fn must_wrap_return_value_in_typed_array(&self, rv: NativeType) -> bool {
+ fn must_wrap_return_value_in_typed_array(&self, rv: &NativeType) -> bool {
// V8 only supports i32 and u32 return types for integers
// We support 64 bit integers by wrapping them in a TypedArray out parameter
crate::dlfcn::needs_unwrap(rv)
@@ -1120,9 +1129,9 @@ impl Win64 {
let mut compiler = Self::new();
let must_cast_return_value =
- compiler.must_cast_return_value(sym.result_type);
+ compiler.must_cast_return_value(&sym.result_type);
let must_wrap_return_value =
- compiler.must_wrap_return_value_in_typed_array(sym.result_type);
+ compiler.must_wrap_return_value_in_typed_array(&sym.result_type);
let must_save_preserved_register = must_wrap_return_value;
let cannot_tailcall = must_cast_return_value || must_wrap_return_value;
@@ -1133,7 +1142,7 @@ impl Win64 {
compiler.allocate_stack(&sym.parameter_types);
}
- for param in sym.parameter_types.iter().copied() {
+ for param in sym.parameter_types.iter().cloned() {
compiler.move_left(param)
}
if !compiler.is_recv_arg_overridden() {
@@ -1147,7 +1156,7 @@ impl Win64 {
if cannot_tailcall {
compiler.call(sym.ptr.as_ptr());
if must_cast_return_value {
- compiler.cast_return_value(sym.result_type);
+ compiler.cast_return_value(&sym.result_type);
}
if must_wrap_return_value {
compiler.wrap_return_value_in_out_array();
@@ -1284,7 +1293,7 @@ impl Win64 {
x64!(self.assmblr; xor ecx, ecx);
}
- fn cast_return_value(&mut self, rv: NativeType) {
+ fn cast_return_value(&mut self, rv: &NativeType) {
let s = &mut self.assmblr;
// V8 only supports 32bit integers. We support 8 and 16 bit integers casting them to 32bits.
// Section "Return Values" of the Windows x64 Calling Convention doc:
@@ -1419,7 +1428,7 @@ impl Win64 {
self.params > 0
}
- fn must_cast_return_value(&self, rv: NativeType) -> bool {
+ fn must_cast_return_value(&self, rv: &NativeType) -> bool {
// V8 only supports i32 and u32 return types for integers
// We support 8 and 16 bit integers by extending them to 32 bits in the trampoline before returning
matches!(
@@ -1428,7 +1437,7 @@ impl Win64 {
)
}
- fn must_wrap_return_value_in_typed_array(&self, rv: NativeType) -> bool {
+ fn must_wrap_return_value_in_typed_array(&self, rv: &NativeType) -> bool {
// V8 only supports i32 and u32 return types for integers
// We support 64 bit integers by wrapping them in a TypedArray out parameter
crate::dlfcn::needs_unwrap(rv)
@@ -1510,6 +1519,7 @@ impl From<NativeType> for Param {
NativeType::I32 => Int(I(DW)),
NativeType::I64 | NativeType::ISize => Int(I(QW)),
NativeType::Buffer => Int(Buffer),
+ NativeType::Struct(_) => unimplemented!(),
}
}
}