summaryrefslogtreecommitdiff
path: root/ext/http/http_next.rs
diff options
context:
space:
mode:
authorMatt Mastracci <matthew@mastracci.com>2023-11-23 09:39:17 -0700
committerGitHub <noreply@github.com>2023-11-23 16:39:17 +0000
commit68a0877f8dc4a821b3688105b24953ddfaa9e189 (patch)
treefaffc99c4a1fbd986ef4c85a46fe8e4767da07c8 /ext/http/http_next.rs
parenteda3850f84f24b71e02512c1ba2d6bf2e3daa2fd (diff)
fix(ext/http): avoid lockup in graceful shutdown (#21253)
Follow-up to #20822. cc @lrowe The `httpServerExplicitResourceManagement` tests were randomly failing on CI because of a race. The `drain` waker was missing wakeup events if the listeners shut down after the last HTTP response finished. If we lost the race (rare), the server Rc would be dropped and we wouldn't poll it again. This replaces the drain waker system with a signalling Rc that always resolves when the refcount is about to become 1. Fix verified by running serve tests in a loop: ``` for i in {0..100}; do cargo run --features=__http_tracing -- test -A --unstable '/Users/matt/Documents/github/deno/deno/cli/tests/unit/ser ve_test.ts' --filter httpServerExplicitResourceManagement; done; ```
Diffstat (limited to 'ext/http/http_next.rs')
-rw-r--r--ext/http/http_next.rs13
1 files changed, 10 insertions, 3 deletions
diff --git a/ext/http/http_next.rs b/ext/http/http_next.rs
index 9504a6fa4..db63601a0 100644
--- a/ext/http/http_next.rs
+++ b/ext/http/http_next.rs
@@ -10,15 +10,18 @@ use crate::request_properties::HttpPropertyExtractor;
use crate::response_body::Compression;
use crate::response_body::ResponseBytesInner;
use crate::service::handle_request;
+use crate::service::http_general_trace;
use crate::service::http_trace;
use crate::service::HttpRecord;
use crate::service::HttpRecordResponse;
use crate::service::HttpRequestBodyAutocloser;
use crate::service::HttpServerState;
+use crate::service::SignallingRc;
use crate::websocket_upgrade::WebSocketUpgrade;
use crate::LocalExecutor;
use cache_control::CacheControl;
use deno_core::error::AnyError;
+use deno_core::futures::future::poll_fn;
use deno_core::futures::TryFutureExt;
use deno_core::op2;
use deno_core::serde_v8::from_v8;
@@ -924,7 +927,7 @@ where
struct HttpLifetime {
connection_cancel_handle: Rc<CancelHandle>,
listen_cancel_handle: Rc<CancelHandle>,
- server_state: Rc<HttpServerState>,
+ server_state: SignallingRc<HttpServerState>,
}
struct HttpJoinHandle {
@@ -932,7 +935,7 @@ struct HttpJoinHandle {
connection_cancel_handle: Rc<CancelHandle>,
listen_cancel_handle: Rc<CancelHandle>,
rx: AsyncRefCell<tokio::sync::mpsc::Receiver<Rc<HttpRecord>>>,
- server_state: Rc<HttpServerState>,
+ server_state: SignallingRc<HttpServerState>,
}
impl HttpJoinHandle {
@@ -1179,6 +1182,7 @@ pub async fn op_http_close(
.take::<HttpJoinHandle>(rid)?;
if graceful {
+ http_general_trace!("graceful shutdown");
// TODO(bartlomieju): replace with `state.feature_checker.check_or_exit`
// once we phase out `check_or_exit_with_legacy_fallback`
state
@@ -1191,8 +1195,9 @@ pub async fn op_http_close(
// In a graceful shutdown, we close the listener and allow all the remaining connections to drain
join_handle.listen_cancel_handle().cancel();
- join_handle.server_state.drain().await;
+ poll_fn(|cx| join_handle.server_state.poll_complete(cx)).await;
} else {
+ http_general_trace!("forceful shutdown");
// In a forceful shutdown, we close everything
join_handle.listen_cancel_handle().cancel();
join_handle.connection_cancel_handle().cancel();
@@ -1200,6 +1205,8 @@ pub async fn op_http_close(
tokio::task::yield_now().await;
}
+ http_general_trace!("awaiting shutdown");
+
let mut join_handle = RcRef::map(&join_handle, |this| &this.join_handle)
.borrow_mut()
.await;