summaryrefslogtreecommitdiff
path: root/ext
diff options
context:
space:
mode:
authorBartek IwaƄczuk <biwanczuk@gmail.com>2023-12-27 17:59:57 +0100
committerGitHub <noreply@github.com>2023-12-27 11:59:57 -0500
commitc2414db1f68d27db8ca6f192f0ff877f1394164c (patch)
tree528da9796f400557204bfdb8e4d44d64173036ce /ext
parent33acd437f52b418a8413a302dd8902abad2eabec (diff)
refactor: simplify hyper, http, h2 deps (#21715)
Main change is that: - "hyper" has been renamed to "hyper_v014" to signal that it's legacy - "hyper1" has been renamed to "hyper" and should be the default
Diffstat (limited to 'ext')
-rw-r--r--ext/fetch/Cargo.toml2
-rw-r--r--ext/fetch/fs_fetch_handler.rs2
-rw-r--r--ext/fetch/lib.rs13
-rw-r--r--ext/http/Cargo.toml6
-rw-r--r--ext/http/fly_accept_encoding.rs15
-rw-r--r--ext/http/http_next.rs44
-rw-r--r--ext/http/lib.rs85
-rw-r--r--ext/http/request_body.rs6
-rw-r--r--ext/http/request_properties.rs6
-rw-r--r--ext/http/response_body.rs6
-rw-r--r--ext/http/service.rs51
-rw-r--r--ext/http/websocket_upgrade.rs8
-rw-r--r--ext/node/Cargo.toml4
-rw-r--r--ext/node/ops/http2.rs14
-rw-r--r--ext/websocket/Cargo.toml6
-rw-r--r--ext/websocket/lib.rs2
-rw-r--r--ext/websocket/stream.rs2
17 files changed, 138 insertions, 134 deletions
diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml
index 24d20b048..af4af29a6 100644
--- a/ext/fetch/Cargo.toml
+++ b/ext/fetch/Cargo.toml
@@ -19,7 +19,7 @@ data-url.workspace = true
deno_core.workspace = true
deno_tls.workspace = true
dyn-clone = "1"
-http.workspace = true
+http_v02.workspace = true
pin-project.workspace = true
reqwest.workspace = true
serde.workspace = true
diff --git a/ext/fetch/fs_fetch_handler.rs b/ext/fetch/fs_fetch_handler.rs
index 83880c4ca..76ef5ea24 100644
--- a/ext/fetch/fs_fetch_handler.rs
+++ b/ext/fetch/fs_fetch_handler.rs
@@ -31,7 +31,7 @@ impl FetchHandler for FsFetchHandler {
let file = tokio::fs::File::open(path).map_err(|_| ()).await?;
let stream = ReaderStream::new(file);
let body = reqwest::Body::wrap_stream(stream);
- let response = http::Response::builder()
+ let response = http_v02::Response::builder()
.status(StatusCode::OK)
.body(body)
.map_err(|_| ())?
diff --git a/ext/fetch/lib.rs b/ext/fetch/lib.rs
index 737bc45c1..6a2ac2ef9 100644
--- a/ext/fetch/lib.rs
+++ b/ext/fetch/lib.rs
@@ -44,8 +44,8 @@ use deno_tls::Proxy;
use deno_tls::RootCertStoreProvider;
use data_url::DataUrl;
-use http::header::CONTENT_LENGTH;
-use http::Uri;
+use http_v02::header::CONTENT_LENGTH;
+use http_v02::Uri;
use reqwest::header::HeaderMap;
use reqwest::header::HeaderName;
use reqwest::header::HeaderValue;
@@ -416,9 +416,12 @@ where
.decode_to_vec()
.map_err(|e| type_error(format!("{e:?}")))?;
- let response = http::Response::builder()
- .status(http::StatusCode::OK)
- .header(http::header::CONTENT_TYPE, data_url.mime_type().to_string())
+ let response = http_v02::Response::builder()
+ .status(http_v02::StatusCode::OK)
+ .header(
+ http_v02::header::CONTENT_TYPE,
+ data_url.mime_type().to_string(),
+ )
.body(reqwest::Body::from(body))?;
let fut = async move { Ok(Ok(Response::from(response))) };
diff --git a/ext/http/Cargo.toml b/ext/http/Cargo.toml
index 8192cf31e..a10a79449 100644
--- a/ext/http/Cargo.toml
+++ b/ext/http/Cargo.toml
@@ -32,11 +32,11 @@ deno_net.workspace = true
deno_websocket.workspace = true
flate2.workspace = true
http.workspace = true
-http_1 = { package = "http", version = "=1.0.0" }
+http_v02.workspace = true
httparse.workspace = true
-hyper = { workspace = true, features = ["server", "stream", "http1", "http2", "runtime"] }
+hyper.workspace = true
hyper-util.workspace = true
-hyper1.workspace = true
+hyper_v014 = { workspace = true, features = ["server", "stream", "http1", "http2", "runtime"] }
itertools = "0.10"
memmem.workspace = true
mime = "0.3.16"
diff --git a/ext/http/fly_accept_encoding.rs b/ext/http/fly_accept_encoding.rs
index af687c254..d48410d41 100644
--- a/ext/http/fly_accept_encoding.rs
+++ b/ext/http/fly_accept_encoding.rs
@@ -3,7 +3,6 @@
// Forked from https://github.com/superfly/accept-encoding/blob/1cded757ec7ff3916e5bfe7441db76cdc48170dc/
// Forked to support both http 0.3 and http 1.0 crates.
-use http as http_02;
use itertools::Itertools;
/// A list enumerating the categories of errors in this crate.
@@ -78,10 +77,10 @@ pub fn preferred(
///
/// Compatible with `http` crate for version 0.2.x.
pub fn encodings_iter_http_02(
- headers: &http_02::HeaderMap,
+ headers: &http_v02::HeaderMap,
) -> impl Iterator<Item = Result<(Option<Encoding>, f32), EncodingError>> + '_ {
let iter = headers
- .get_all(http_02::header::ACCEPT_ENCODING)
+ .get_all(http_v02::header::ACCEPT_ENCODING)
.iter()
.map(|hval| hval.to_str().map_err(|_| EncodingError::InvalidEncoding));
encodings_iter_inner(iter)
@@ -91,10 +90,10 @@ pub fn encodings_iter_http_02(
///
/// Compatible with `http` crate for version 1.x.
pub fn encodings_iter_http_1(
- headers: &http_1::HeaderMap,
+ headers: &http::HeaderMap,
) -> impl Iterator<Item = Result<(Option<Encoding>, f32), EncodingError>> + '_ {
let iter = headers
- .get_all(http_1::header::ACCEPT_ENCODING)
+ .get_all(http::header::ACCEPT_ENCODING)
.iter()
.map(|hval| hval.to_str().map_err(|_| EncodingError::InvalidEncoding));
encodings_iter_inner(iter)
@@ -126,9 +125,9 @@ fn encodings_iter_inner<'s>(
#[cfg(test)]
mod tests {
use super::*;
- use http::header::ACCEPT_ENCODING;
- use http::HeaderMap;
- use http::HeaderValue;
+ use http_v02::header::ACCEPT_ENCODING;
+ use http_v02::HeaderMap;
+ use http_v02::HeaderValue;
fn encodings(
headers: &HeaderMap,
diff --git a/ext/http/http_next.rs b/ext/http/http_next.rs
index 11efdad7b..b47e22fcc 100644
--- a/ext/http/http_next.rs
+++ b/ext/http/http_next.rs
@@ -43,22 +43,22 @@ use deno_core::ResourceId;
use deno_net::ops_tls::TlsStream;
use deno_net::raw::NetworkStream;
use deno_websocket::ws_create_server_stream;
-use hyper1::body::Incoming;
-use hyper1::header::HeaderMap;
-use hyper1::header::ACCEPT_ENCODING;
-use hyper1::header::CACHE_CONTROL;
-use hyper1::header::CONTENT_ENCODING;
-use hyper1::header::CONTENT_LENGTH;
-use hyper1::header::CONTENT_RANGE;
-use hyper1::header::CONTENT_TYPE;
-use hyper1::header::COOKIE;
-use hyper1::http::HeaderName;
-use hyper1::http::HeaderValue;
-use hyper1::server::conn::http1;
-use hyper1::server::conn::http2;
-use hyper1::service::service_fn;
-use hyper1::service::HttpService;
-use hyper1::StatusCode;
+use hyper::body::Incoming;
+use hyper::header::HeaderMap;
+use hyper::header::ACCEPT_ENCODING;
+use hyper::header::CACHE_CONTROL;
+use hyper::header::CONTENT_ENCODING;
+use hyper::header::CONTENT_LENGTH;
+use hyper::header::CONTENT_RANGE;
+use hyper::header::CONTENT_TYPE;
+use hyper::header::COOKIE;
+use hyper::http::HeaderName;
+use hyper::http::HeaderValue;
+use hyper::server::conn::http1;
+use hyper::server::conn::http2;
+use hyper::service::service_fn;
+use hyper::service::HttpService;
+use hyper::StatusCode;
use hyper_util::rt::TokioIo;
use once_cell::sync::Lazy;
use smallvec::SmallVec;
@@ -77,7 +77,7 @@ use fly_accept_encoding::Encoding;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
-type Request = hyper1::Request<Incoming>;
+type Request = hyper::Request<Incoming>;
static USE_WRITEV: Lazy<bool> = Lazy::new(|| {
let enable = std::env::var("DENO_USE_WRITEV").ok();
@@ -635,7 +635,7 @@ fn modify_compressibility_from_response(
/// If the user provided a ETag header for uncompressed data, we need to ensure it is a
/// weak Etag header ("W/").
fn weaken_etag(hmap: &mut HeaderMap) {
- if let Some(etag) = hmap.get_mut(hyper1::header::ETAG) {
+ if let Some(etag) = hmap.get_mut(hyper::header::ETAG) {
if !etag.as_bytes().starts_with(b"W/") {
let mut v = Vec::with_capacity(etag.as_bytes().len() + 2);
v.extend(b"W/");
@@ -650,7 +650,7 @@ fn weaken_etag(hmap: &mut HeaderMap) {
// to make sure cache services do not serve uncompressed data to clients that
// support compression.
fn ensure_vary_accept_encoding(hmap: &mut HeaderMap) {
- if let Some(v) = hmap.get_mut(hyper1::header::VARY) {
+ if let Some(v) = hmap.get_mut(hyper::header::VARY) {
if let Ok(s) = v.to_str() {
if !s.to_lowercase().contains("accept-encoding") {
*v = format!("Accept-Encoding, {s}").try_into().unwrap()
@@ -659,7 +659,7 @@ fn ensure_vary_accept_encoding(hmap: &mut HeaderMap) {
}
}
hmap.insert(
- hyper1::header::VARY,
+ hyper::header::VARY,
HeaderValue::from_static("Accept-Encoding"),
);
}
@@ -791,7 +791,7 @@ fn serve_http11_unconditional(
io: impl HttpServeStream,
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
cancel: Rc<CancelHandle>,
-) -> impl Future<Output = Result<(), hyper1::Error>> + 'static {
+) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
let conn = http1::Builder::new()
.keep_alive(true)
.writev(*USE_WRITEV)
@@ -813,7 +813,7 @@ fn serve_http2_unconditional(
io: impl HttpServeStream,
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
cancel: Rc<CancelHandle>,
-) -> impl Future<Output = Result<(), hyper1::Error>> + 'static {
+) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
let conn =
http2::Builder::new(LocalExecutor).serve_connection(TokioIo::new(io), svc);
async {
diff --git a/ext/http/lib.rs b/ext/http/lib.rs
index cae2fcfcc..c2607e4f9 100644
--- a/ext/http/lib.rs
+++ b/ext/http/lib.rs
@@ -41,18 +41,18 @@ use deno_net::raw::NetworkStream;
use deno_websocket::ws_create_server_stream;
use flate2::write::GzEncoder;
use flate2::Compression;
-use hyper::body::Bytes;
-use hyper::body::HttpBody;
-use hyper::body::SizeHint;
-use hyper::header::HeaderName;
-use hyper::header::HeaderValue;
-use hyper::server::conn::Http;
-use hyper::service::Service;
-use hyper::Body;
-use hyper::HeaderMap;
-use hyper::Request;
-use hyper::Response;
use hyper_util::rt::TokioIo;
+use hyper_v014::body::Bytes;
+use hyper_v014::body::HttpBody;
+use hyper_v014::body::SizeHint;
+use hyper_v014::header::HeaderName;
+use hyper_v014::header::HeaderValue;
+use hyper_v014::server::conn::Http;
+use hyper_v014::service::Service;
+use hyper_v014::Body;
+use hyper_v014::HeaderMap;
+use hyper_v014::Request;
+use hyper_v014::Response;
use serde::Serialize;
use std::borrow::Cow;
use std::cell::RefCell;
@@ -157,7 +157,7 @@ struct HttpConnResource {
addr: HttpSocketAddr,
scheme: &'static str,
acceptors_tx: mpsc::UnboundedSender<HttpAcceptor>,
- closed_fut: Shared<RemoteHandle<Result<(), Arc<hyper::Error>>>>,
+ closed_fut: Shared<RemoteHandle<Result<(), Arc<hyper_v014::Error>>>>,
cancel_handle: Rc<CancelHandle>, // Closes gracefully and cancels accept ops.
}
@@ -470,10 +470,10 @@ impl Default for HttpResponseWriter {
}
}
-struct BodyUncompressedSender(Option<hyper::body::Sender>);
+struct BodyUncompressedSender(Option<hyper_v014::body::Sender>);
impl BodyUncompressedSender {
- fn sender(&mut self) -> &mut hyper::body::Sender {
+ fn sender(&mut self) -> &mut hyper_v014::body::Sender {
// This is safe because we only ever take the sender out of the option
// inside of the shutdown method.
self.0.as_mut().unwrap()
@@ -486,8 +486,8 @@ impl BodyUncompressedSender {
}
}
-impl From<hyper::body::Sender> for BodyUncompressedSender {
- fn from(sender: hyper::body::Sender) -> Self {
+impl From<hyper_v014::body::Sender> for BodyUncompressedSender {
+ fn from(sender: hyper_v014::body::Sender) -> Self {
BodyUncompressedSender(Some(sender))
}
}
@@ -535,7 +535,7 @@ async fn op_http_accept(
}
fn req_url(
- req: &hyper::Request<hyper::Body>,
+ req: &hyper_v014::Request<hyper_v014::Body>,
scheme: &'static str,
addr: &HttpSocketAddr,
) -> String {
@@ -601,7 +601,7 @@ fn req_headers(
let mut headers = Vec::with_capacity(header_map.len());
for (name, value) in header_map.iter() {
- if name == hyper::header::COOKIE {
+ if name == hyper_v014::header::COOKIE {
cookies.push(value.as_bytes());
} else {
let name: &[u8] = name.as_ref();
@@ -657,10 +657,10 @@ async fn op_http_write_headers(
if compressing {
weaken_etag(hmap);
// Drop 'content-length' header. Hyper will update it using compressed body.
- hmap.remove(hyper::header::CONTENT_LENGTH);
+ hmap.remove(hyper_v014::header::CONTENT_LENGTH);
// Content-Encoding header
hmap.insert(
- hyper::header::CONTENT_ENCODING,
+ hyper_v014::header::CONTENT_ENCODING,
HeaderValue::from_static(match encoding {
Encoding::Brotli => "br",
Encoding::Gzip => "gzip",
@@ -708,7 +708,7 @@ fn http_response(
data: Option<StringOrBuffer>,
compressing: bool,
encoding: Encoding,
-) -> Result<(HttpResponseWriter, hyper::Body), AnyError> {
+) -> Result<(HttpResponseWriter, hyper_v014::Body), AnyError> {
// Gzip, after level 1, doesn't produce significant size difference.
// This default matches nginx default gzip compression level (1):
// https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level
@@ -780,8 +780,8 @@ fn http_response(
// If user provided a ETag header for uncompressed data, we need to
// ensure it is a Weak Etag header ("W/").
-fn weaken_etag(hmap: &mut hyper::HeaderMap) {
- if let Some(etag) = hmap.get_mut(hyper::header::ETAG) {
+fn weaken_etag(hmap: &mut hyper_v014::HeaderMap) {
+ if let Some(etag) = hmap.get_mut(hyper_v014::header::ETAG) {
if !etag.as_bytes().starts_with(b"W/") {
let mut v = Vec::with_capacity(etag.as_bytes().len() + 2);
v.extend(b"W/");
@@ -795,8 +795,8 @@ fn weaken_etag(hmap: &mut hyper::HeaderMap) {
// Note: we set the header irrespective of whether or not we compress the data
// to make sure cache services do not serve uncompressed data to clients that
// support compression.
-fn ensure_vary_accept_encoding(hmap: &mut hyper::HeaderMap) {
- if let Some(v) = hmap.get_mut(hyper::header::VARY) {
+fn ensure_vary_accept_encoding(hmap: &mut hyper_v014::HeaderMap) {
+ if let Some(v) = hmap.get_mut(hyper_v014::header::VARY) {
if let Ok(s) = v.to_str() {
if !s.to_lowercase().contains("accept-encoding") {
*v = format!("Accept-Encoding, {s}").try_into().unwrap()
@@ -805,15 +805,17 @@ fn ensure_vary_accept_encoding(hmap: &mut hyper::HeaderMap) {
}
}
hmap.insert(
- hyper::header::VARY,
+ hyper_v014::header::VARY,
HeaderValue::from_static("Accept-Encoding"),
);
}
-fn should_compress(headers: &hyper::HeaderMap) -> bool {
+fn should_compress(headers: &hyper_v014::HeaderMap) -> bool {
// skip compression if the cache-control header value is set to "no-transform" or not utf8
- fn cache_control_no_transform(headers: &hyper::HeaderMap) -> Option<bool> {
- let v = headers.get(hyper::header::CACHE_CONTROL)?;
+ fn cache_control_no_transform(
+ headers: &hyper_v014::HeaderMap,
+ ) -> Option<bool> {
+ let v = headers.get(hyper_v014::header::CACHE_CONTROL)?;
let s = match std::str::from_utf8(v.as_bytes()) {
Ok(s) => s,
Err(_) => return Some(true),
@@ -824,15 +826,16 @@ fn should_compress(headers: &hyper::HeaderMap) -> bool {
// we skip compression if the `content-range` header value is set, as it
// indicates the contents of the body were negotiated based directly
// with the user code and we can't compress the response
- let content_range = headers.contains_key(hyper::header::CONTENT_RANGE);
+ let content_range = headers.contains_key(hyper_v014::header::CONTENT_RANGE);
// assume body is already compressed if Content-Encoding header present, thus avoid recompressing
- let is_precompressed = headers.contains_key(hyper::header::CONTENT_ENCODING);
+ let is_precompressed =
+ headers.contains_key(hyper_v014::header::CONTENT_ENCODING);
!content_range
&& !is_precompressed
&& !cache_control_no_transform(headers).unwrap_or_default()
&& headers
- .get(hyper::header::CONTENT_TYPE)
+ .get(hyper_v014::header::CONTENT_TYPE)
.map(compressible::is_content_compressible)
.unwrap_or_default()
}
@@ -1016,7 +1019,7 @@ async fn op_http_upgrade_websocket(
};
let (transport, bytes) =
- extract_network_stream(hyper::upgrade::on(request).await?);
+ extract_network_stream(hyper_v014::upgrade::on(request).await?);
let ws_rid =
ws_create_server_stream(&mut state.borrow_mut(), transport, bytes)?;
Ok(ws_rid)
@@ -1026,7 +1029,7 @@ async fn op_http_upgrade_websocket(
#[derive(Clone)]
struct LocalExecutor;
-impl<Fut> hyper::rt::Executor<Fut> for LocalExecutor
+impl<Fut> hyper_v014::rt::Executor<Fut> for LocalExecutor
where
Fut: Future + 'static,
Fut::Output: 'static,
@@ -1036,7 +1039,7 @@ where
}
}
-impl<Fut> hyper1::rt::Executor<Fut> for LocalExecutor
+impl<Fut> hyper::rt::Executor<Fut> for LocalExecutor
where
Fut: Future + 'static,
Fut::Output: 'static,
@@ -1052,8 +1055,8 @@ fn http_error(message: &'static str) -> AnyError {
/// Filters out the ever-surprising 'shutdown ENOTCONN' errors.
fn filter_enotconn(
- result: Result<(), hyper::Error>,
-) -> Result<(), hyper::Error> {
+ result: Result<(), hyper_v014::Error>,
+) -> Result<(), hyper_v014::Error> {
if result
.as_ref()
.err()
@@ -1079,21 +1082,21 @@ trait CanDowncastUpgrade: Sized {
) -> Result<(T, Bytes), Self>;
}
-impl CanDowncastUpgrade for hyper1::upgrade::Upgraded {
+impl CanDowncastUpgrade for hyper::upgrade::Upgraded {
fn downcast<T: AsyncRead + AsyncWrite + Unpin + 'static>(
self,
) -> Result<(T, Bytes), Self> {
- let hyper1::upgrade::Parts { io, read_buf, .. } =
+ let hyper::upgrade::Parts { io, read_buf, .. } =
self.downcast::<TokioIo<T>>()?;
Ok((io.into_inner(), read_buf))
}
}
-impl CanDowncastUpgrade for hyper::upgrade::Upgraded {
+impl CanDowncastUpgrade for hyper_v014::upgrade::Upgraded {
fn downcast<T: AsyncRead + AsyncWrite + Unpin + 'static>(
self,
) -> Result<(T, Bytes), Self> {
- let hyper::upgrade::Parts { io, read_buf, .. } = self.downcast()?;
+ let hyper_v014::upgrade::Parts { io, read_buf, .. } = self.downcast()?;
Ok((io, read_buf))
}
}
diff --git a/ext/http/request_body.rs b/ext/http/request_body.rs
index 0c3f29320..0650892b6 100644
--- a/ext/http/request_body.rs
+++ b/ext/http/request_body.rs
@@ -9,9 +9,9 @@ use deno_core::AsyncResult;
use deno_core::BufView;
use deno_core::RcRef;
use deno_core::Resource;
-use hyper1::body::Body;
-use hyper1::body::Incoming;
-use hyper1::body::SizeHint;
+use hyper::body::Body;
+use hyper::body::Incoming;
+use hyper::body::SizeHint;
use std::borrow::Cow;
use std::pin::Pin;
use std::rc::Rc;
diff --git a/ext/http/request_properties.rs b/ext/http/request_properties.rs
index eb4232005..ee4c9c58c 100644
--- a/ext/http/request_properties.rs
+++ b/ext/http/request_properties.rs
@@ -8,9 +8,9 @@ use deno_net::raw::NetworkStream;
use deno_net::raw::NetworkStreamAddress;
use deno_net::raw::NetworkStreamListener;
use deno_net::raw::NetworkStreamType;
-use hyper1::header::HOST;
-use hyper1::HeaderMap;
-use hyper1::Uri;
+use hyper::header::HOST;
+use hyper::HeaderMap;
+use hyper::Uri;
use std::borrow::Cow;
use std::net::Ipv4Addr;
use std::net::SocketAddr;
diff --git a/ext/http/response_body.rs b/ext/http/response_body.rs
index 09365681b..8219c80ba 100644
--- a/ext/http/response_body.rs
+++ b/ext/http/response_body.rs
@@ -14,9 +14,9 @@ use deno_core::AsyncResult;
use deno_core::BufView;
use deno_core::Resource;
use flate2::write::GzEncoder;
-use hyper1::body::Frame;
-use hyper1::body::SizeHint;
-use hyper1::header::HeaderMap;
+use hyper::body::Frame;
+use hyper::body::SizeHint;
+use hyper::header::HeaderMap;
use pin_project::pin_project;
/// Simplification for nested types we use for our streams. We provide a way to convert from
diff --git a/ext/http/service.rs b/ext/http/service.rs
index 20e11e67f..7e76d00d7 100644
--- a/ext/http/service.rs
+++ b/ext/http/service.rs
@@ -7,13 +7,13 @@ use deno_core::futures::ready;
use deno_core::BufView;
use deno_core::OpState;
use deno_core::ResourceId;
-use http_1::request::Parts;
-use hyper1::body::Body;
-use hyper1::body::Frame;
-use hyper1::body::Incoming;
-use hyper1::body::SizeHint;
-use hyper1::header::HeaderMap;
-use hyper1::upgrade::OnUpgrade;
+use http::request::Parts;
+use hyper::body::Body;
+use hyper::body::Frame;
+use hyper::body::Incoming;
+use hyper::body::SizeHint;
+use hyper::header::HeaderMap;
+use hyper::upgrade::OnUpgrade;
use scopeguard::guard;
use scopeguard::ScopeGuard;
@@ -29,8 +29,8 @@ use std::task::Context;
use std::task::Poll;
use std::task::Waker;
-pub type Request = hyper1::Request<Incoming>;
-pub type Response = hyper1::Response<HttpRecordResponse>;
+pub type Request = hyper::Request<Incoming>;
+pub type Response = hyper::Response<HttpRecordResponse>;
#[cfg(feature = "__http_tracing")]
pub static RECORD_COUNT: std::sync::atomic::AtomicUsize =
@@ -181,7 +181,7 @@ pub(crate) async fn handle_request(
request_info: HttpConnectionProperties,
server_state: SignallingRc<HttpServerState>, // Keep server alive for duration of this future.
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
-) -> Result<Response, hyper::Error> {
+) -> Result<Response, hyper_v014::Error> {
// If the underlying TCP connection is closed, this future will be dropped
// and execution could stop at any await point.
// The HttpRecord must live until JavaScript is done processing so is wrapped
@@ -209,9 +209,9 @@ pub(crate) async fn handle_request(
struct HttpRecordInner {
server_state: SignallingRc<HttpServerState>,
request_info: HttpConnectionProperties,
- request_parts: http_1::request::Parts,
+ request_parts: http::request::Parts,
request_body: Option<RequestBodyState>,
- response_parts: Option<http_1::response::Parts>,
+ response_parts: Option<http::response::Parts>,
response_ready: bool,
response_waker: Option<Waker>,
response_body: ResponseBytesInner,
@@ -244,7 +244,7 @@ impl HttpRecord {
) -> Rc<Self> {
let (request_parts, request_body) = request.into_parts();
let request_body = Some(request_body.into());
- let (mut response_parts, _) = http_1::Response::new(()).into_parts();
+ let (mut response_parts, _) = http::Response::new(()).into_parts();
let record =
if let Some((record, headers)) = server_state.borrow_mut().pool.pop() {
response_parts.headers = headers;
@@ -425,7 +425,7 @@ impl HttpRecord {
}
/// Get a mutable reference to the response status and headers.
- pub fn response_parts(&self) -> RefMut<'_, http_1::response::Parts> {
+ pub fn response_parts(&self) -> RefMut<'_, http::response::Parts> {
RefMut::map(self.self_mut(), |inner| {
inner.response_parts.as_mut().unwrap()
})
@@ -594,18 +594,18 @@ mod tests {
use crate::response_body::ResponseBytesInner;
use bytes::Buf;
use deno_net::raw::NetworkStreamType;
- use hyper1::body::Body;
- use hyper1::service::service_fn;
- use hyper1::service::HttpService;
+ use hyper::body::Body;
+ use hyper::service::service_fn;
+ use hyper::service::HttpService;
use hyper_util::rt::TokioIo;
use std::error::Error as StdError;
/// Execute client request on service and concurrently map the response.
async fn serve_request<B, S, T, F>(
- req: http_1::Request<B>,
+ req: http::Request<B>,
service: S,
- map_response: impl FnOnce(hyper1::Response<Incoming>) -> F,
- ) -> hyper1::Result<T>
+ map_response: impl FnOnce(hyper::Response<Incoming>) -> F,
+ ) -> hyper::Result<T>
where
B: Body + Send + 'static, // Send bound due to DuplexStream
B::Data: Send,
@@ -614,10 +614,10 @@ mod tests {
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::ResBody: 'static,
<S::ResBody as Body>::Error: Into<Box<dyn StdError + Send + Sync>>,
- F: std::future::Future<Output = hyper1::Result<T>>,
+ F: std::future::Future<Output = hyper::Result<T>>,
{
- use hyper1::client::conn::http1::handshake;
- use hyper1::server::conn::http1::Builder;
+ use hyper::client::conn::http1::handshake;
+ use hyper::server::conn::http1::Builder;
let (stream_client, stream_server) = tokio::io::duplex(16 * 1024);
let conn_server =
Builder::new().serve_connection(TokioIo::new(stream_server), service);
@@ -646,7 +646,7 @@ mod tests {
local_port: None,
stream_type: NetworkStreamType::Tcp,
};
- let svc = service_fn(move |req: hyper1::Request<Incoming>| {
+ let svc = service_fn(move |req: hyper::Request<Incoming>| {
handle_request(
req,
request_info.clone(),
@@ -655,8 +655,7 @@ mod tests {
)
});
- let client_req =
- http_1::Request::builder().uri("/").body("".to_string())?;
+ let client_req = http::Request::builder().uri("/").body("".to_string())?;
// Response produced by concurrent tasks
tokio::try_join!(
diff --git a/ext/http/websocket_upgrade.rs b/ext/http/websocket_upgrade.rs
index d1eabfdf5..91bb81c74 100644
--- a/ext/http/websocket_upgrade.rs
+++ b/ext/http/websocket_upgrade.rs
@@ -6,9 +6,9 @@ use bytes::Bytes;
use bytes::BytesMut;
use deno_core::error::AnyError;
use httparse::Status;
-use hyper1::header::HeaderName;
-use hyper1::header::HeaderValue;
-use hyper1::Response;
+use hyper::header::HeaderName;
+use hyper::header::HeaderValue;
+use hyper::Response;
use memmem::Searcher;
use memmem::TwoWaySearcher;
use once_cell::sync::OnceCell;
@@ -152,7 +152,7 @@ impl<T: Default> WebSocketUpgrade<T> {
#[cfg(test)]
mod tests {
use super::*;
- use hyper::Body;
+ use hyper_v014::Body;
type ExpectedResponseAndHead = Option<(Response<Body>, &'static [u8])>;
diff --git a/ext/node/Cargo.toml b/ext/node/Cargo.toml
index b349fdbcd..a92766d94 100644
--- a/ext/node/Cargo.toml
+++ b/ext/node/Cargo.toml
@@ -32,10 +32,10 @@ dsa = "0.6.1"
ecb.workspace = true
elliptic-curve.workspace = true
errno = "0.2.8"
-h2.workspace = true
+h2 = { version = "0.3.17", features = ["unstable"] }
hex.workspace = true
hkdf.workspace = true
-http.workspace = true
+http_v02.workspace = true
idna = "0.3.0"
indexmap.workspace = true
k256 = "0.13.1"
diff --git a/ext/node/ops/http2.rs b/ext/node/ops/http2.rs
index 353a42e8b..bf295d542 100644
--- a/ext/node/ops/http2.rs
+++ b/ext/node/ops/http2.rs
@@ -25,11 +25,11 @@ use deno_net::raw::take_network_stream_resource;
use deno_net::raw::NetworkStream;
use h2;
use h2::RecvStream;
-use http;
-use http::request::Parts;
-use http::HeaderMap;
-use http::Response;
-use http::StatusCode;
+use http_v02;
+use http_v02::request::Parts;
+use http_v02::HeaderMap;
+use http_v02::Response;
+use http_v02::StatusCode;
use reqwest::header::HeaderName;
use reqwest::header::HeaderValue;
use url::Url;
@@ -310,7 +310,7 @@ pub async fn op_http2_client_request(
let url = url.join(&pseudo_path)?;
- let mut req = http::Request::builder()
+ let mut req = http_v02::Request::builder()
.uri(url.as_str())
.method(pseudo_method.as_str());
@@ -398,7 +398,7 @@ pub async fn op_http2_client_send_trailers(
.get::<Http2ClientStream>(stream_rid)?;
let mut stream = RcRef::map(&resource, |r| &r.stream).borrow_mut().await;
- let mut trailers_map = http::HeaderMap::new();
+ let mut trailers_map = http_v02::HeaderMap::new();
for (name, value) in trailers {
trailers_map.insert(
HeaderName::from_bytes(&name).unwrap(),
diff --git a/ext/websocket/Cargo.toml b/ext/websocket/Cargo.toml
index ac210f9ba..1ccbefd56 100644
--- a/ext/websocket/Cargo.toml
+++ b/ext/websocket/Cargo.toml
@@ -19,11 +19,11 @@ deno_core.workspace = true
deno_net.workspace = true
deno_tls.workspace = true
fastwebsockets.workspace = true
-h2 = "0.4"
-http = "1.0"
+h2.workspace = true
+http.workspace = true
http-body-util.workspace = true
+hyper.workspace = true
hyper-util.workspace = true
-hyper1.workspace = true
once_cell.workspace = true
rustls-tokio-stream.workspace = true
serde.workspace = true
diff --git a/ext/websocket/lib.rs b/ext/websocket/lib.rs
index d1692fe19..c0bf4ad00 100644
--- a/ext/websocket/lib.rs
+++ b/ext/websocket/lib.rs
@@ -891,7 +891,7 @@ pub fn get_network_error_class_name(e: &AnyError) -> Option<&'static str> {
#[derive(Clone)]
struct LocalExecutor;
-impl<Fut> hyper1::rt::Executor<Fut> for LocalExecutor
+impl<Fut> hyper::rt::Executor<Fut> for LocalExecutor
where
Fut: Future + 'static,
Fut::Output: 'static,
diff --git a/ext/websocket/stream.rs b/ext/websocket/stream.rs
index 88c053dc5..e82d7d1fc 100644
--- a/ext/websocket/stream.rs
+++ b/ext/websocket/stream.rs
@@ -4,7 +4,7 @@ use bytes::Bytes;
use deno_net::raw::NetworkStream;
use h2::RecvStream;
use h2::SendStream;
-use hyper1::upgrade::Upgraded;
+use hyper::upgrade::Upgraded;
use hyper_util::rt::TokioIo;
use std::io::ErrorKind;
use std::pin::Pin;