diff options
author | Bartek IwaĆczuk <biwanczuk@gmail.com> | 2023-12-27 17:59:57 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-27 11:59:57 -0500 |
commit | c2414db1f68d27db8ca6f192f0ff877f1394164c (patch) | |
tree | 528da9796f400557204bfdb8e4d44d64173036ce /ext/http/lib.rs | |
parent | 33acd437f52b418a8413a302dd8902abad2eabec (diff) |
refactor: simplify hyper, http, h2 deps (#21715)
Main change is that:
- "hyper" has been renamed to "hyper_v014" to signal that it's legacy
- "hyper1" has been renamed to "hyper" and should be the default
Diffstat (limited to 'ext/http/lib.rs')
-rw-r--r-- | ext/http/lib.rs | 85 |
1 files changed, 44 insertions, 41 deletions
diff --git a/ext/http/lib.rs b/ext/http/lib.rs index cae2fcfcc..c2607e4f9 100644 --- a/ext/http/lib.rs +++ b/ext/http/lib.rs @@ -41,18 +41,18 @@ use deno_net::raw::NetworkStream; use deno_websocket::ws_create_server_stream; use flate2::write::GzEncoder; use flate2::Compression; -use hyper::body::Bytes; -use hyper::body::HttpBody; -use hyper::body::SizeHint; -use hyper::header::HeaderName; -use hyper::header::HeaderValue; -use hyper::server::conn::Http; -use hyper::service::Service; -use hyper::Body; -use hyper::HeaderMap; -use hyper::Request; -use hyper::Response; use hyper_util::rt::TokioIo; +use hyper_v014::body::Bytes; +use hyper_v014::body::HttpBody; +use hyper_v014::body::SizeHint; +use hyper_v014::header::HeaderName; +use hyper_v014::header::HeaderValue; +use hyper_v014::server::conn::Http; +use hyper_v014::service::Service; +use hyper_v014::Body; +use hyper_v014::HeaderMap; +use hyper_v014::Request; +use hyper_v014::Response; use serde::Serialize; use std::borrow::Cow; use std::cell::RefCell; @@ -157,7 +157,7 @@ struct HttpConnResource { addr: HttpSocketAddr, scheme: &'static str, acceptors_tx: mpsc::UnboundedSender<HttpAcceptor>, - closed_fut: Shared<RemoteHandle<Result<(), Arc<hyper::Error>>>>, + closed_fut: Shared<RemoteHandle<Result<(), Arc<hyper_v014::Error>>>>, cancel_handle: Rc<CancelHandle>, // Closes gracefully and cancels accept ops. } @@ -470,10 +470,10 @@ impl Default for HttpResponseWriter { } } -struct BodyUncompressedSender(Option<hyper::body::Sender>); +struct BodyUncompressedSender(Option<hyper_v014::body::Sender>); impl BodyUncompressedSender { - fn sender(&mut self) -> &mut hyper::body::Sender { + fn sender(&mut self) -> &mut hyper_v014::body::Sender { // This is safe because we only ever take the sender out of the option // inside of the shutdown method. self.0.as_mut().unwrap() @@ -486,8 +486,8 @@ impl BodyUncompressedSender { } } -impl From<hyper::body::Sender> for BodyUncompressedSender { - fn from(sender: hyper::body::Sender) -> Self { +impl From<hyper_v014::body::Sender> for BodyUncompressedSender { + fn from(sender: hyper_v014::body::Sender) -> Self { BodyUncompressedSender(Some(sender)) } } @@ -535,7 +535,7 @@ async fn op_http_accept( } fn req_url( - req: &hyper::Request<hyper::Body>, + req: &hyper_v014::Request<hyper_v014::Body>, scheme: &'static str, addr: &HttpSocketAddr, ) -> String { @@ -601,7 +601,7 @@ fn req_headers( let mut headers = Vec::with_capacity(header_map.len()); for (name, value) in header_map.iter() { - if name == hyper::header::COOKIE { + if name == hyper_v014::header::COOKIE { cookies.push(value.as_bytes()); } else { let name: &[u8] = name.as_ref(); @@ -657,10 +657,10 @@ async fn op_http_write_headers( if compressing { weaken_etag(hmap); // Drop 'content-length' header. Hyper will update it using compressed body. - hmap.remove(hyper::header::CONTENT_LENGTH); + hmap.remove(hyper_v014::header::CONTENT_LENGTH); // Content-Encoding header hmap.insert( - hyper::header::CONTENT_ENCODING, + hyper_v014::header::CONTENT_ENCODING, HeaderValue::from_static(match encoding { Encoding::Brotli => "br", Encoding::Gzip => "gzip", @@ -708,7 +708,7 @@ fn http_response( data: Option<StringOrBuffer>, compressing: bool, encoding: Encoding, -) -> Result<(HttpResponseWriter, hyper::Body), AnyError> { +) -> Result<(HttpResponseWriter, hyper_v014::Body), AnyError> { // Gzip, after level 1, doesn't produce significant size difference. // This default matches nginx default gzip compression level (1): // https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level @@ -780,8 +780,8 @@ fn http_response( // If user provided a ETag header for uncompressed data, we need to // ensure it is a Weak Etag header ("W/"). -fn weaken_etag(hmap: &mut hyper::HeaderMap) { - if let Some(etag) = hmap.get_mut(hyper::header::ETAG) { +fn weaken_etag(hmap: &mut hyper_v014::HeaderMap) { + if let Some(etag) = hmap.get_mut(hyper_v014::header::ETAG) { if !etag.as_bytes().starts_with(b"W/") { let mut v = Vec::with_capacity(etag.as_bytes().len() + 2); v.extend(b"W/"); @@ -795,8 +795,8 @@ fn weaken_etag(hmap: &mut hyper::HeaderMap) { // Note: we set the header irrespective of whether or not we compress the data // to make sure cache services do not serve uncompressed data to clients that // support compression. -fn ensure_vary_accept_encoding(hmap: &mut hyper::HeaderMap) { - if let Some(v) = hmap.get_mut(hyper::header::VARY) { +fn ensure_vary_accept_encoding(hmap: &mut hyper_v014::HeaderMap) { + if let Some(v) = hmap.get_mut(hyper_v014::header::VARY) { if let Ok(s) = v.to_str() { if !s.to_lowercase().contains("accept-encoding") { *v = format!("Accept-Encoding, {s}").try_into().unwrap() @@ -805,15 +805,17 @@ fn ensure_vary_accept_encoding(hmap: &mut hyper::HeaderMap) { } } hmap.insert( - hyper::header::VARY, + hyper_v014::header::VARY, HeaderValue::from_static("Accept-Encoding"), ); } -fn should_compress(headers: &hyper::HeaderMap) -> bool { +fn should_compress(headers: &hyper_v014::HeaderMap) -> bool { // skip compression if the cache-control header value is set to "no-transform" or not utf8 - fn cache_control_no_transform(headers: &hyper::HeaderMap) -> Option<bool> { - let v = headers.get(hyper::header::CACHE_CONTROL)?; + fn cache_control_no_transform( + headers: &hyper_v014::HeaderMap, + ) -> Option<bool> { + let v = headers.get(hyper_v014::header::CACHE_CONTROL)?; let s = match std::str::from_utf8(v.as_bytes()) { Ok(s) => s, Err(_) => return Some(true), @@ -824,15 +826,16 @@ fn should_compress(headers: &hyper::HeaderMap) -> bool { // we skip compression if the `content-range` header value is set, as it // indicates the contents of the body were negotiated based directly // with the user code and we can't compress the response - let content_range = headers.contains_key(hyper::header::CONTENT_RANGE); + let content_range = headers.contains_key(hyper_v014::header::CONTENT_RANGE); // assume body is already compressed if Content-Encoding header present, thus avoid recompressing - let is_precompressed = headers.contains_key(hyper::header::CONTENT_ENCODING); + let is_precompressed = + headers.contains_key(hyper_v014::header::CONTENT_ENCODING); !content_range && !is_precompressed && !cache_control_no_transform(headers).unwrap_or_default() && headers - .get(hyper::header::CONTENT_TYPE) + .get(hyper_v014::header::CONTENT_TYPE) .map(compressible::is_content_compressible) .unwrap_or_default() } @@ -1016,7 +1019,7 @@ async fn op_http_upgrade_websocket( }; let (transport, bytes) = - extract_network_stream(hyper::upgrade::on(request).await?); + extract_network_stream(hyper_v014::upgrade::on(request).await?); let ws_rid = ws_create_server_stream(&mut state.borrow_mut(), transport, bytes)?; Ok(ws_rid) @@ -1026,7 +1029,7 @@ async fn op_http_upgrade_websocket( #[derive(Clone)] struct LocalExecutor; -impl<Fut> hyper::rt::Executor<Fut> for LocalExecutor +impl<Fut> hyper_v014::rt::Executor<Fut> for LocalExecutor where Fut: Future + 'static, Fut::Output: 'static, @@ -1036,7 +1039,7 @@ where } } -impl<Fut> hyper1::rt::Executor<Fut> for LocalExecutor +impl<Fut> hyper::rt::Executor<Fut> for LocalExecutor where Fut: Future + 'static, Fut::Output: 'static, @@ -1052,8 +1055,8 @@ fn http_error(message: &'static str) -> AnyError { /// Filters out the ever-surprising 'shutdown ENOTCONN' errors. fn filter_enotconn( - result: Result<(), hyper::Error>, -) -> Result<(), hyper::Error> { + result: Result<(), hyper_v014::Error>, +) -> Result<(), hyper_v014::Error> { if result .as_ref() .err() @@ -1079,21 +1082,21 @@ trait CanDowncastUpgrade: Sized { ) -> Result<(T, Bytes), Self>; } -impl CanDowncastUpgrade for hyper1::upgrade::Upgraded { +impl CanDowncastUpgrade for hyper::upgrade::Upgraded { fn downcast<T: AsyncRead + AsyncWrite + Unpin + 'static>( self, ) -> Result<(T, Bytes), Self> { - let hyper1::upgrade::Parts { io, read_buf, .. } = + let hyper::upgrade::Parts { io, read_buf, .. } = self.downcast::<TokioIo<T>>()?; Ok((io.into_inner(), read_buf)) } } -impl CanDowncastUpgrade for hyper::upgrade::Upgraded { +impl CanDowncastUpgrade for hyper_v014::upgrade::Upgraded { fn downcast<T: AsyncRead + AsyncWrite + Unpin + 'static>( self, ) -> Result<(T, Bytes), Self> { - let hyper::upgrade::Parts { io, read_buf, .. } = self.downcast()?; + let hyper_v014::upgrade::Parts { io, read_buf, .. } = self.downcast()?; Ok((io, read_buf)) } } |