summaryrefslogtreecommitdiff
path: root/ext/napi/lib.rs
diff options
context:
space:
mode:
authorMatt Mastracci <matthew@mastracci.com>2023-03-17 12:22:15 -0600
committerGitHub <noreply@github.com>2023-03-17 18:22:15 +0000
commite55b448730160a6e4df9815a268d4049ac89deab (patch)
tree35d80fd60f2f1d1d06903caff256484a7d703d76 /ext/napi/lib.rs
parent0bc6bf5d33b8198253954d7f04558270de45c925 (diff)
feat(core) deno_core::extension! macro to simplify extension registration (#18210)
This implements two macros to simplify extension registration and centralize a lot of the boilerplate as a base for future improvements: * `deno_core::ops!` registers a block of `#[op]`s, optionally with type parameters, useful for places where we share lists of ops * `deno_core::extension!` is used to register an extension, and creates two methods that can be used at runtime/snapshot generation time: `init_ops` and `init_ops_and_esm`. --------- Co-authored-by: Bartek IwaƄczuk <biwanczuk@gmail.com>
Diffstat (limited to 'ext/napi/lib.rs')
-rw-r--r--ext/napi/lib.rs125
1 files changed, 65 insertions, 60 deletions
diff --git a/ext/napi/lib.rs b/ext/napi/lib.rs
index 41004638e..2e7ceed67 100644
--- a/ext/napi/lib.rs
+++ b/ext/napi/lib.rs
@@ -13,7 +13,6 @@ use deno_core::futures::StreamExt;
use deno_core::op;
use deno_core::parking_lot::Mutex;
use deno_core::serde_v8;
-use deno_core::Extension;
use deno_core::OpState;
use std::cell::RefCell;
use std::ffi::CString;
@@ -514,72 +513,78 @@ impl Env {
}
}
-pub fn init_ops<P: NapiPermissions + 'static>() -> Extension {
- Extension::builder(env!("CARGO_PKG_NAME"))
- .ops(vec![op_napi_open::decl::<P>()])
- .event_loop_middleware(|op_state_rc, cx| {
- // `work` can call back into the runtime. It can also schedule an async task
- // but we don't know that now. We need to make the runtime re-poll to make
- // sure no pending NAPI tasks exist.
- let mut maybe_scheduling = false;
-
- {
- let mut op_state = op_state_rc.borrow_mut();
- let napi_state = op_state.borrow_mut::<NapiState>();
+deno_core::extension!(deno_napi,
+ parameters = [P: NapiPermissions],
+ ops = [
+ op_napi_open<P>
+ ],
+ state = |state| {
+ let (async_work_sender, async_work_receiver) =
+ mpsc::unbounded::<PendingNapiAsyncWork>();
+ let (threadsafe_function_sender, threadsafe_function_receiver) =
+ mpsc::unbounded::<ThreadSafeFunctionStatus>();
+ state.put(NapiState {
+ pending_async_work: Vec::new(),
+ async_work_sender,
+ async_work_receiver,
+ threadsafe_function_sender,
+ threadsafe_function_receiver,
+ active_threadsafe_functions: 0,
+ env_cleanup_hooks: Rc::new(RefCell::new(vec![])),
+ tsfn_ref_counters: Arc::new(Mutex::new(vec![])),
+ });
+ },
+ event_loop_middleware = event_loop_middleware,
+);
- while let Poll::Ready(Some(async_work_fut)) =
- napi_state.async_work_receiver.poll_next_unpin(cx)
- {
- napi_state.pending_async_work.push(async_work_fut);
- }
+fn event_loop_middleware(
+ op_state_rc: Rc<RefCell<OpState>>,
+ cx: &mut std::task::Context,
+) -> bool {
+ // `work` can call back into the runtime. It can also schedule an async task
+ // but we don't know that now. We need to make the runtime re-poll to make
+ // sure no pending NAPI tasks exist.
+ let mut maybe_scheduling = false;
+
+ {
+ let mut op_state = op_state_rc.borrow_mut();
+ let napi_state = op_state.borrow_mut::<NapiState>();
+
+ while let Poll::Ready(Some(async_work_fut)) =
+ napi_state.async_work_receiver.poll_next_unpin(cx)
+ {
+ napi_state.pending_async_work.push(async_work_fut);
+ }
- if napi_state.active_threadsafe_functions > 0 {
- maybe_scheduling = true;
- }
+ if napi_state.active_threadsafe_functions > 0 {
+ maybe_scheduling = true;
+ }
- let tsfn_ref_counters = napi_state.tsfn_ref_counters.lock().clone();
- for (_id, counter) in tsfn_ref_counters.iter() {
- if counter.load(std::sync::atomic::Ordering::SeqCst) > 0 {
- maybe_scheduling = true;
- break;
- }
- }
+ let tsfn_ref_counters = napi_state.tsfn_ref_counters.lock().clone();
+ for (_id, counter) in tsfn_ref_counters.iter() {
+ if counter.load(std::sync::atomic::Ordering::SeqCst) > 0 {
+ maybe_scheduling = true;
+ break;
}
+ }
+ }
- loop {
- let maybe_work = {
- let mut op_state = op_state_rc.borrow_mut();
- let napi_state = op_state.borrow_mut::<NapiState>();
- napi_state.pending_async_work.pop()
- };
+ loop {
+ let maybe_work = {
+ let mut op_state = op_state_rc.borrow_mut();
+ let napi_state = op_state.borrow_mut::<NapiState>();
+ napi_state.pending_async_work.pop()
+ };
- if let Some(work) = maybe_work {
- work();
- maybe_scheduling = true;
- } else {
- break;
- }
- }
+ if let Some(work) = maybe_work {
+ work();
+ maybe_scheduling = true;
+ } else {
+ break;
+ }
+ }
- maybe_scheduling
- })
- .state(move |state| {
- let (async_work_sender, async_work_receiver) =
- mpsc::unbounded::<PendingNapiAsyncWork>();
- let (threadsafe_function_sender, threadsafe_function_receiver) =
- mpsc::unbounded::<ThreadSafeFunctionStatus>();
- state.put(NapiState {
- pending_async_work: Vec::new(),
- async_work_sender,
- async_work_receiver,
- threadsafe_function_sender,
- threadsafe_function_receiver,
- active_threadsafe_functions: 0,
- env_cleanup_hooks: Rc::new(RefCell::new(vec![])),
- tsfn_ref_counters: Arc::new(Mutex::new(vec![])),
- });
- })
- .build()
+ maybe_scheduling
}
pub trait NapiPermissions {