diff options
author | Ryan Dahl <ry@tinyclouds.org> | 2024-07-13 17:08:23 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-07-13 21:08:23 +0000 |
commit | e0cfc9da39e1d05e6a95c89c41cff8ae34fcbd66 (patch) | |
tree | 97e291e29e8e0e49796f3929e9bf5f42d0e5f76c /ext/fetch | |
parent | f6fd6619e708a515831f707438368d81b0c9aa56 (diff) |
Revert "refactor(fetch): reimplement fetch with hyper instead of reqwest (#24237)" (#24574)
This reverts commit f6fd6619e708a515831f707438368d81b0c9aa56.
I'm seeing a difference between canary and 1.45.2. In
`deno-docs/reference_gen` I can't download dax when running `deno task
types`
```
~/src/deno-docs/reference_gen# deno upgrade --canary
Looking up latest canary version
Found latest version f6fd6619e708a515831f707438368d81b0c9aa56
Downloading https://dl.deno.land/canary/f6fd6619e708a515831f707438368d81b0c9aa56/deno-aarch64-apple-darwin.zip
Deno is upgrading to version f6fd6619e708a515831f707438368d81b0c9aa56
Archive: /var/folders/9v/kys6gqns6kl8nksyn4l1f9v40000gn/T/.tmpb5lDnq/deno.zip
inflating: deno
Upgraded successfully
~/src/deno-docs/reference_gen# deno -v
deno 1.45.2+f6fd661
~/src/deno-docs/reference_gen# rm -rf /Users/ry/Library/Caches/deno
~/src/deno-docs/reference_gen# deno task types
Task types deno task types:deno && deno task types:node
Task types:deno deno run --allow-read --allow-write --allow-run --allow-env --allow-sys deno-docs.ts
error: JSR package manifest for '@david/dax' failed to load. expected value at line 1 column 1
at file:///Users/ry/src/deno-docs/reference_gen/deno-docs.ts:2:15
~/src/deno-docs/reference_gen# deno upgrade --version 1.45.2
Downloading https://github.com/denoland/deno/releases/download/v1.45.2/deno-aarch64-apple-darwin.zip
Deno is upgrading to version 1.45.2
Archive: /var/folders/9v/kys6gqns6kl8nksyn4l1f9v40000gn/T/.tmp3R7uhF/deno.zip
inflating: deno
Upgraded successfully
~/src/deno-docs/reference_gen# rm -rf /Users/ry/Library/Caches/deno
~/src/deno-docs/reference_gen# deno task types
Task types deno task types:deno && deno task types:node
Task types:deno deno run --allow-read --allow-write --allow-run --allow-env --allow-sys deno-docs.ts
Task types:node deno run --allow-read --allow-write=. --allow-env --allow-sys node-docs.ts
```
Diffstat (limited to 'ext/fetch')
-rw-r--r-- | ext/fetch/Cargo.toml | 12 | ||||
-rw-r--r-- | ext/fetch/fs_fetch_handler.rs | 18 | ||||
-rw-r--r-- | ext/fetch/lib.rs | 311 | ||||
-rw-r--r-- | ext/fetch/proxy.rs | 860 |
4 files changed, 118 insertions, 1083 deletions
diff --git a/ext/fetch/Cargo.toml b/ext/fetch/Cargo.toml index dc7cacd37..8785da7df 100644 --- a/ext/fetch/Cargo.toml +++ b/ext/fetch/Cargo.toml @@ -14,7 +14,6 @@ description = "Fetch API implementation for Deno" path = "lib.rs" [dependencies] -base64.workspace = true bytes.workspace = true data-url.workspace = true deno_core.workspace = true @@ -22,17 +21,8 @@ deno_permissions.workspace = true deno_tls.workspace = true dyn-clone = "1" http.workspace = true -http-body-util.workspace = true -hyper.workspace = true -hyper-rustls.workspace = true -hyper-util.workspace = true -ipnet.workspace = true +reqwest.workspace = true serde.workspace = true serde_json.workspace = true tokio.workspace = true -tokio-rustls.workspace = true -tokio-socks.workspace = true tokio-util = { workspace = true, features = ["io"] } -tower.workspace = true -tower-http.workspace = true -tower-service.workspace = true diff --git a/ext/fetch/fs_fetch_handler.rs b/ext/fetch/fs_fetch_handler.rs index 6f45ee664..29bad5992 100644 --- a/ext/fetch/fs_fetch_handler.rs +++ b/ext/fetch/fs_fetch_handler.rs @@ -7,12 +7,10 @@ use crate::FetchHandler; use deno_core::error::type_error; use deno_core::futures::FutureExt; use deno_core::futures::TryFutureExt; -use deno_core::futures::TryStreamExt; use deno_core::url::Url; use deno_core::CancelFuture; use deno_core::OpState; -use http::StatusCode; -use http_body_util::BodyExt; +use reqwest::StatusCode; use std::rc::Rc; use tokio_util::io::ReaderStream; @@ -25,21 +23,19 @@ impl FetchHandler for FsFetchHandler { fn fetch_file( &self, _state: &mut OpState, - url: &Url, + url: Url, ) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>) { let cancel_handle = CancelHandle::new_rc(); - let path_result = url.to_file_path(); let response_fut = async move { - let path = path_result?; + let path = url.to_file_path()?; let file = tokio::fs::File::open(path).map_err(|_| ()).await?; - let stream = ReaderStream::new(file) - .map_ok(hyper::body::Frame::data) - .map_err(Into::into); - let body = http_body_util::StreamBody::new(stream).boxed(); + let stream = ReaderStream::new(file); + let body = reqwest::Body::wrap_stream(stream); let response = http::Response::builder() .status(StatusCode::OK) .body(body) - .map_err(|_| ())?; + .map_err(|_| ())? + .into(); Ok::<_, ()>(response) } .map_err(move |_| { diff --git a/ext/fetch/lib.rs b/ext/fetch/lib.rs index 1343a9f56..75ceb86d9 100644 --- a/ext/fetch/lib.rs +++ b/ext/fetch/lib.rs @@ -1,7 +1,6 @@ // Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. mod fs_fetch_handler; -mod proxy; use std::borrow::Cow; use std::cell::RefCell; @@ -15,7 +14,7 @@ use std::sync::Arc; use std::task::Context; use std::task::Poll; -use deno_core::anyhow::anyhow; +use bytes::Bytes; use deno_core::anyhow::Error; use deno_core::error::type_error; use deno_core::error::AnyError; @@ -43,38 +42,34 @@ use deno_core::ResourceId; use deno_tls::rustls::RootCertStore; use deno_tls::Proxy; use deno_tls::RootCertStoreProvider; + +use data_url::DataUrl; use deno_tls::TlsKey; use deno_tls::TlsKeys; use deno_tls::TlsKeysHolder; - -use bytes::Bytes; -use data_url::DataUrl; -use http::header::HeaderName; -use http::header::HeaderValue; -use http::header::ACCEPT_ENCODING; use http::header::CONTENT_LENGTH; -use http::header::HOST; -use http::header::PROXY_AUTHORIZATION; -use http::header::RANGE; -use http::header::USER_AGENT; -use http::Method; use http::Uri; -use http_body_util::BodyExt; -use hyper::body::Frame; -use hyper_rustls::HttpsConnector; -use hyper_util::client::legacy::connect::HttpConnector; -use hyper_util::rt::TokioExecutor; -use hyper_util::rt::TokioIo; -use hyper_util::rt::TokioTimer; +use reqwest::header::HeaderMap; +use reqwest::header::HeaderName; +use reqwest::header::HeaderValue; +use reqwest::header::ACCEPT_ENCODING; +use reqwest::header::HOST; +use reqwest::header::RANGE; +use reqwest::header::USER_AGENT; +use reqwest::redirect::Policy; +use reqwest::Body; +use reqwest::Client; +use reqwest::Method; +use reqwest::RequestBuilder; +use reqwest::Response; use serde::Deserialize; use serde::Serialize; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; -use tower::ServiceExt; -use tower_http::decompression::Decompression; -// Re-export data_url +// Re-export reqwest and data_url pub use data_url; +pub use reqwest; pub use fs_fetch_handler::FsFetchHandler; @@ -83,9 +78,8 @@ pub struct Options { pub user_agent: String, pub root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>, pub proxy: Option<Proxy>, - #[allow(clippy::type_complexity)] pub request_builder_hook: - Option<fn(&mut http::Request<ReqBody>) -> Result<(), AnyError>>, + Option<fn(RequestBuilder) -> Result<RequestBuilder, AnyError>>, pub unsafely_ignore_certificate_errors: Option<Vec<String>>, pub client_cert_chain_and_key: TlsKeys, pub file_fetch_handler: Rc<dyn FetchHandler>, @@ -152,7 +146,7 @@ pub trait FetchHandler: dyn_clone::DynClone { fn fetch_file( &self, state: &mut OpState, - url: &Url, + url: Url, ) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>); } @@ -166,7 +160,7 @@ impl FetchHandler for DefaultFileFetchHandler { fn fetch_file( &self, _state: &mut OpState, - _url: &Url, + _url: Url, ) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>) { let fut = async move { Ok(Err(type_error( @@ -189,20 +183,20 @@ pub struct FetchReturn { pub fn get_or_create_client_from_state( state: &mut OpState, -) -> Result<Client, AnyError> { - if let Some(client) = state.try_borrow::<Client>() { +) -> Result<reqwest::Client, AnyError> { + if let Some(client) = state.try_borrow::<reqwest::Client>() { Ok(client.clone()) } else { let options = state.borrow::<Options>(); let client = create_client_from_options(options)?; - state.put::<Client>(client.clone()); + state.put::<reqwest::Client>(client.clone()); Ok(client) } } pub fn create_client_from_options( options: &Options, -) -> Result<Client, AnyError> { +) -> Result<reqwest::Client, AnyError> { create_http_client( &options.user_agent, CreateHttpClientOptions { @@ -259,11 +253,11 @@ impl Stream for ResourceToBodyAdapter { } Poll::Ready(res) => match res { Ok(buf) if buf.is_empty() => Poll::Ready(None), - Ok(buf) => { + Ok(_) => { this.1 = Some(this.0.clone().read(64 * 1024)); - Poll::Ready(Some(Ok(buf.to_vec().into()))) + Poll::Ready(Some(res.map(|b| b.to_vec().into()))) } - Err(err) => Poll::Ready(Some(Err(err))), + _ => Poll::Ready(Some(res.map(|b| b.to_vec().into()))), }, } } else { @@ -272,22 +266,6 @@ impl Stream for ResourceToBodyAdapter { } } -impl hyper::body::Body for ResourceToBodyAdapter { - type Data = Bytes; - type Error = Error; - - fn poll_frame( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> { - match self.poll_next(cx) { - Poll::Ready(Some(res)) => Poll::Ready(Some(res.map(Frame::data))), - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - } - } -} - impl Drop for ResourceToBodyAdapter { fn drop(&mut self) { self.0.clone().close() @@ -369,11 +347,9 @@ where file_fetch_handler, .. } = state.borrow_mut::<Options>(); let file_fetch_handler = file_fetch_handler.clone(); - let (future, maybe_cancel_handle) = - file_fetch_handler.fetch_file(state, &url); - let request_rid = state - .resource_table - .add(FetchRequestResource { future, url }); + let (request, maybe_cancel_handle) = + file_fetch_handler.fetch_file(state, url); + let request_rid = state.resource_table.add(FetchRequestResource(request)); let maybe_cancel_handle_rid = maybe_cancel_handle .map(|ch| state.resource_table.add(FetchCancelHandle(ch))); @@ -383,31 +359,31 @@ where let permissions = state.borrow_mut::<FP>(); permissions.check_net_url(&url, "fetch()")?; - let uri = url - .as_str() - .parse::<Uri>() - .map_err(|_| type_error("Invalid URL"))?; + // Make sure that we have a valid URI early, as reqwest's `RequestBuilder::send` + // internally uses `expect_uri`, which panics instead of returning a usable `Result`. + if url.as_str().parse::<Uri>().is_err() { + return Err(type_error("Invalid URL")); + } - let mut con_len = None; - let body = if has_body { + let mut request = client.request(method.clone(), url); + + if has_body { match (data, resource) { (Some(data), _) => { // If a body is passed, we use it, and don't return a body for streaming. - con_len = Some(data.len() as u64); - - http_body_util::Full::new(data.to_vec().into()) - .map_err(|never| match never {}) - .boxed() + request = request.body(data.to_vec()); } (_, Some(resource)) => { let resource = state.resource_table.take_any(resource)?; match resource.size_hint() { (body_size, Some(n)) if body_size == n && body_size > 0 => { - con_len = Some(body_size); + request = + request.header(CONTENT_LENGTH, HeaderValue::from(body_size)); } _ => {} } - ReqBody::new(ResourceToBodyAdapter::new(resource)) + request = request + .body(Body::wrap_stream(ResourceToBodyAdapter::new(resource))) } (None, None) => unreachable!(), } @@ -415,21 +391,11 @@ where // POST and PUT requests should always have a 0 length content-length, // if there is no body. https://fetch.spec.whatwg.org/#http-network-or-cache-fetch if matches!(method, Method::POST | Method::PUT) { - con_len = Some(0); + request = request.header(CONTENT_LENGTH, HeaderValue::from(0)); } - http_body_util::Empty::new() - .map_err(|never| match never {}) - .boxed() }; - let mut request = http::Request::new(body); - *request.method_mut() = method.clone(); - *request.uri_mut() = uri; - - if let Some(len) = con_len { - request.headers_mut().insert(CONTENT_LENGTH, len.into()); - } - + let mut header_map = HeaderMap::new(); for (key, value) in headers { let name = HeaderName::from_bytes(&key) .map_err(|err| type_error(err.to_string()))?; @@ -437,34 +403,38 @@ where .map_err(|err| type_error(err.to_string()))?; if (name != HOST || allow_host) && name != CONTENT_LENGTH { - request.headers_mut().append(name, v); + header_map.append(name, v); } } - if request.headers().contains_key(RANGE) { + if header_map.contains_key(RANGE) { // https://fetch.spec.whatwg.org/#http-network-or-cache-fetch step 18 // If httpRequest’s header list contains `Range`, then append (`Accept-Encoding`, `identity`) - request - .headers_mut() + header_map .insert(ACCEPT_ENCODING, HeaderValue::from_static("identity")); } + request = request.headers(header_map); let options = state.borrow::<Options>(); if let Some(request_builder_hook) = options.request_builder_hook { - request_builder_hook(&mut request) + request = request_builder_hook(request) .map_err(|err| type_error(err.to_string()))?; } let cancel_handle = CancelHandle::new_rc(); let cancel_handle_ = cancel_handle.clone(); - let fut = - async move { client.send(request).or_cancel(cancel_handle_).await }; + let fut = async move { + request + .send() + .or_cancel(cancel_handle_) + .await + .map(|res| res.map_err(|err| err.into())) + }; - let request_rid = state.resource_table.add(FetchRequestResource { - future: Box::pin(fut), - url, - }); + let request_rid = state + .resource_table + .add(FetchRequestResource(Box::pin(fut))); let cancel_handle_rid = state.resource_table.add(FetchCancelHandle(cancel_handle)); @@ -478,21 +448,17 @@ where let (body, _) = data_url .decode_to_vec() .map_err(|e| type_error(format!("{e:?}")))?; - let body = http_body_util::Full::new(body.into()) - .map_err(|never| match never {}) - .boxed(); let response = http::Response::builder() .status(http::StatusCode::OK) .header(http::header::CONTENT_TYPE, data_url.mime_type().to_string()) - .body(body)?; + .body(reqwest::Body::from(body))?; - let fut = async move { Ok(Ok(response)) }; + let fut = async move { Ok(Ok(Response::from(response))) }; - let request_rid = state.resource_table.add(FetchRequestResource { - future: Box::pin(fut), - url, - }); + let request_rid = state + .resource_table + .add(FetchRequestResource(Box::pin(fut))); (request_rid, None) } @@ -539,21 +505,24 @@ pub async fn op_fetch_send( .ok() .expect("multiple op_fetch_send ongoing"); - let res = match request.future.await { + let res = match request.0.await { Ok(Ok(res)) => res, Ok(Err(err)) => { // We're going to try and rescue the error cause from a stream and return it from this fetch. - // If any error in the chain is a hyper body error, return that as a special result we can use to + // If any error in the chain is a reqwest body error, return that as a special result we can use to // reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`). // TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead let mut err_ref: &dyn std::error::Error = err.as_ref(); while let Some(err) = std::error::Error::source(err_ref) { - if let Some(err) = err.downcast_ref::<hyper::Error>() { - if let Some(err) = std::error::Error::source(err) { - return Ok(FetchResponse { - error: Some(err.to_string()), - ..Default::default() - }); + if let Some(err) = err.downcast_ref::<reqwest::Error>() { + if err.is_body() { + // Extracts the next error cause and uses that for the message + if let Some(err) = std::error::Error::source(err) { + return Ok(FetchResponse { + error: Some(err.to_string()), + ..Default::default() + }); + } } } err_ref = err; @@ -565,17 +534,14 @@ pub async fn op_fetch_send( }; let status = res.status(); - let url = request.url.into(); + let url = res.url().to_string(); let mut res_headers = Vec::new(); for (key, val) in res.headers().iter() { res_headers.push((key.as_str().into(), val.as_bytes().into())); } - let content_length = hyper::body::Body::size_hint(res.body()).exact(); - let remote_addr = res - .extensions() - .get::<hyper_util::client::legacy::connect::HttpInfo>() - .map(|info| info.remote_addr()); + let content_length = res.content_length(); + let remote_addr = res.remote_addr(); let (remote_addr_ip, remote_addr_port) = if let Some(addr) = remote_addr { (Some(addr.ip().to_string()), Some(addr.port())) } else { @@ -619,8 +585,7 @@ pub async fn op_fetch_response_upgrade( let upgraded = raw_response.upgrade().await?; { // Stage 3: Pump the data - let (mut upgraded_rx, mut upgraded_tx) = - tokio::io::split(TokioIo::new(upgraded)); + let (mut upgraded_rx, mut upgraded_tx) = tokio::io::split(upgraded); spawn(async move { let mut buf = [0; 1024]; @@ -708,13 +673,11 @@ impl Resource for UpgradeStream { } } -type CancelableResponseResult = - Result<Result<http::Response<ResBody>, AnyError>, Canceled>; +type CancelableResponseResult = Result<Result<Response, AnyError>, Canceled>; -pub struct FetchRequestResource { - pub future: Pin<Box<dyn Future<Output = CancelableResponseResult>>>, - pub url: Url, -} +pub struct FetchRequestResource( + pub Pin<Box<dyn Future<Output = CancelableResponseResult>>>, +); impl Resource for FetchRequestResource { fn name(&self) -> Cow<str> { @@ -738,7 +701,7 @@ type BytesStream = Pin<Box<dyn Stream<Item = Result<bytes::Bytes, std::io::Error>> + Unpin>>; pub enum FetchResponseReader { - Start(http::Response<ResBody>), + Start(Response), BodyReader(Peekable<BytesStream>), } @@ -756,7 +719,7 @@ pub struct FetchResponseResource { } impl FetchResponseResource { - pub fn new(response: http::Response<ResBody>, size: Option<u64>) -> Self { + pub fn new(response: Response, size: Option<u64>) -> Self { Self { response_reader: AsyncRefCell::new(FetchResponseReader::Start(response)), cancel: CancelHandle::default(), @@ -764,10 +727,10 @@ impl FetchResponseResource { } } - pub async fn upgrade(self) -> Result<hyper::upgrade::Upgraded, AnyError> { + pub async fn upgrade(self) -> Result<reqwest::Upgraded, AnyError> { let reader = self.response_reader.into_inner(); match reader { - FetchResponseReader::Start(resp) => Ok(hyper::upgrade::on(resp).await?), + FetchResponseReader::Start(resp) => Ok(resp.upgrade().await?), _ => unreachable!(), } } @@ -791,12 +754,11 @@ impl Resource for FetchResponseResource { match std::mem::take(&mut *reader) { FetchResponseReader::Start(resp) => { - let stream: BytesStream = - Box::pin(resp.into_body().into_data_stream().map(|r| { - r.map_err(|err| { - std::io::Error::new(std::io::ErrorKind::Other, err) - }) - })); + let stream: BytesStream = Box::pin(resp.bytes_stream().map(|r| { + r.map_err(|err| { + std::io::Error::new(std::io::ErrorKind::Other, err) + }) + })); *reader = FetchResponseReader::BodyReader(stream.peekable()); } FetchResponseReader::BodyReader(_) => unreachable!(), @@ -960,7 +922,7 @@ impl Default for CreateHttpClientOptions { } } -/// Create new instance of async Client. This client supports +/// Create new instance of async reqwest::Client. This client supports /// proxies and doesn't follow redirects. pub fn create_http_client( user_agent: &str, @@ -982,64 +944,43 @@ pub fn create_http_client( alpn_protocols.push("http/1.1".into()); } tls_config.alpn_protocols = alpn_protocols; - let tls_config = Arc::from(tls_config); - let mut http_connector = HttpConnector::new(); - http_connector.enforce_http(false); - let connector = HttpsConnector::from((http_connector, tls_config.clone())); + let mut headers = HeaderMap::new(); + headers.insert(USER_AGENT, user_agent.parse().unwrap()); + let mut builder = Client::builder() + .redirect(Policy::none()) + .default_headers(headers) + .use_preconfigured_tls(tls_config); - let user_agent = user_agent - .parse::<HeaderValue>() - .map_err(|_| type_error("illegal characters in User-Agent"))?; - - let mut builder = - hyper_util::client::legacy::Builder::new(TokioExecutor::new()); - builder.timer(TokioTimer::new()); - builder.pool_timer(TokioTimer::new()); - - let mut proxies = proxy::from_env(); if let Some(proxy) = options.proxy { - let mut intercept = proxy::Intercept::all(&proxy.url) - .ok_or_else(|| type_error("invalid proxy url"))?; + let mut reqwest_proxy = reqwest::Proxy::all(&proxy.url)?; if let Some(basic_auth) = &proxy.basic_auth { - intercept.set_auth(&basic_auth.username, &basic_auth.password); + reqwest_proxy = + reqwest_proxy.basic_auth(&basic_auth.username, &basic_auth.password); } - proxies.prepend(intercept); + builder = builder.proxy(reqwest_proxy); } - let proxies = Arc::new(proxies); - let mut connector = - proxy::ProxyConnector::new(proxies.clone(), connector, tls_config); - connector.user_agent(user_agent.clone()); if let Some(pool_max_idle_per_host) = options.pool_max_idle_per_host { - builder.pool_max_idle_per_host(pool_max_idle_per_host); + builder = builder.pool_max_idle_per_host(pool_max_idle_per_host); } if let Some(pool_idle_timeout) = options.pool_idle_timeout { - builder.pool_idle_timeout( + builder = builder.pool_idle_timeout( pool_idle_timeout.map(std::time::Duration::from_millis), ); } match (options.http1, options.http2) { - (true, false) => {} // noop, handled by ALPN above - (false, true) => { - builder.http2_only(true); - } + (true, false) => builder = builder.http1_only(), + (false, true) => builder = builder.http2_prior_knowledge(), (true, true) => {} (false, false) => { return Err(type_error("Either `http1` or `http2` needs to be true")) } } - let pooled_client = builder.build(connector); - let decompress = Decompression::new(pooled_client).gzip(true).br(true); - - Ok(Client { - inner: decompress, - proxies, - user_agent, - }) + builder.build().map_err(|e| e.into()) } #[op2] @@ -1049,35 +990,3 @@ pub fn op_utf8_to_byte_string( ) -> Result<ByteString, AnyError> { Ok(input.into()) } - -#[derive(Clone, Debug)] -pub struct Client { - inner: Decompression<hyper_util::client::legacy::Client<Connector, ReqBody>>, - // Used to check whether to include a proxy-authorization header - proxies: Arc<proxy::Proxies>, - user_agent: HeaderValue, -} - -type Connector = proxy::ProxyConnector<HttpsConnector<HttpConnector>>; - -impl Client { - pub async fn send( - self, - mut req: http::Request<ReqBody>, - ) -> Result<http::Response<ResBody>, AnyError> { - req - .headers_mut() - .entry(USER_AGENT) - .or_insert_with(|| self.user_agent.clone()); - - if let Some(auth) = self.proxies.http_forward_auth(req.uri()) { - req.headers_mut().insert(PROXY_AUTHORIZATION, auth.clone()); - } - - let resp = self.inner.oneshot(req).await?; - Ok(resp.map(|b| b.map_err(|e| anyhow!(e)).boxed())) - } -} - -pub type ReqBody = http_body_util::combinators::BoxBody<Bytes, Error>; -pub type ResBody = http_body_util::combinators::BoxBody<Bytes, Error>; diff --git a/ext/fetch/proxy.rs b/ext/fetch/proxy.rs deleted file mode 100644 index db187c3f6..000000000 --- a/ext/fetch/proxy.rs +++ /dev/null @@ -1,860 +0,0 @@ -// Copyright 2018-2024 the Deno authors. All rights reserved. MIT license. - -//! Parts of this module should be able to be replaced with other crates -//! eventually, once generic versions appear in hyper-util, et al. - -use std::env; -use std::future::Future; -use std::net::IpAddr; -use std::pin::Pin; -use std::sync::Arc; -use std::task::Context; -use std::task::Poll; - -use deno_core::futures::TryFutureExt; -use deno_tls::rustls::ClientConfig as TlsConfig; - -use http::header::HeaderValue; -use http::uri::Scheme; -use http::Uri; -use hyper_util::client::legacy::connect::Connected; -use hyper_util::client::legacy::connect::Connection; -use hyper_util::rt::TokioIo; -use ipnet::IpNet; -use tokio::net::TcpStream; -use tokio_rustls::client::TlsStream; -use tokio_rustls::TlsConnector; -use tokio_socks::tcp::Socks5Stream; -use tower_service::Service; - -#[derive(Debug, Clone)] -pub(crate) struct ProxyConnector<C> { - connector: C, - proxies: Arc<Proxies>, - tls: Arc<TlsConfig>, - user_agent: Option<HeaderValue>, -} - -#[derive(Debug)] -pub(crate) struct Proxies { - no: Option<NoProxy>, - intercepts: Vec<Intercept>, -} - -#[derive(Clone)] -pub(crate) struct Intercept { - filter: Filter, - target: Target, -} - -#[derive(Clone)] -enum Target { - Http { - dst: Uri, - auth: Option<HeaderValue>, - }, - Https { - dst: Uri, - auth: Option<HeaderValue>, - }, - Socks { - dst: Uri, - auth: Option<(String, String)>, - }, -} - -#[derive(Debug, Clone, Copy)] -enum Filter { - Http, - Https, - All, -} - -pub(crate) fn from_env() -> Proxies { - let mut intercepts = Vec::new(); - - if let Some(proxy) = parse_env_var("ALL_PROXY", Filter::All) { - intercepts.push(proxy); - } else if let Some(proxy) = parse_env_var("all_proxy", Filter::All) { - intercepts.push(proxy); - } - - if let Some(proxy) = parse_env_var("HTTPS_PROXY", Filter::Https) { - intercepts.push(proxy); - } else if let Some(proxy) = parse_env_var("https_proxy", Filter::Https) { - intercepts.push(proxy); - } - - // In a CGI context, headers become environment variables. So, "Proxy:" becomes HTTP_PROXY. - // To prevent an attacker from injecting a proxy, check if we are in CGI. - if env::var_os("REQUEST_METHOD").is_none() { - if let Some(proxy) = parse_env_var("HTTP_PROXY", Filter::Http) { - intercepts.push(proxy); - } else if let Some(proxy) = parse_env_var("http_proxy", Filter::Https) { - intercepts.push(proxy); - } - } - - let no = NoProxy::from_env(); - - Proxies { intercepts, no } -} - -pub(crate) fn basic_auth(user: &str, pass: &str) -> HeaderValue { - use base64::prelude::BASE64_STANDARD; - use base64::write::EncoderWriter; - use std::io::Write; - - let mut buf = b"Basic ".to_vec(); - { - let mut encoder = EncoderWriter::new(&mut buf, &BASE64_STANDARD); - let _ = write!(encoder, "{user}:{pass}"); - } - let mut header = - HeaderValue::from_bytes(&buf).expect("base64 is always valid HeaderValue"); - header.set_sensitive(true); - header -} - -fn parse_env_var(name: &str, filter: Filter) -> Option<Intercept> { - let val = env::var(name).ok()?; - let target = Target::parse(&val)?; - Some(Intercept { filter, target }) -} - -impl Intercept { - pub(crate) fn all(s: &str) -> Option<Self> { - let target = Target::parse(s)?; - Some(Intercept { - filter: Filter::All, - target, - }) - } - - pub(crate) fn set_auth(&mut self, user: &str, pass: &str) { - match self.target { - Target::Http { ref mut auth, .. } => { - *auth = Some(basic_auth(user, pass)); - } - Target::Https { ref mut auth, .. } => { - *auth = Some(basic_auth(user, pass)); - } - Target::Socks { ref mut auth, .. } => { - *auth = Some((user.into(), pass.into())); - } - } - } -} - -impl std::fmt::Debug for Intercept { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Intercept") - .field("filter", &self.filter) - .finish() - } -} - -impl Target { - fn parse(val: &str) -> Option<Self> { - let uri = val.parse::<Uri>().ok()?; - - let mut builder = Uri::builder(); - let mut is_socks = false; - let mut http_auth = None; - let mut socks_auth = None; - - builder = builder.scheme(match uri.scheme() { - Some(s) => { - if s == &Scheme::HTTP || s == &Scheme::HTTPS { - s.clone() - } else if s.as_str() == "socks5" || s.as_str() == "socks5h" { - is_socks = true; - s.clone() - } else { - // can't use this proxy scheme - return None; - } - } - // if no scheme provided, assume they meant 'http' - None => Scheme::HTTP, - }); - - let authority = uri.authority()?; - - if let Some((userinfo, host_port)) = authority.as_str().split_once('@') { - let (user, pass) = userinfo.split_once(':')?; - if is_socks { - socks_auth = Some((user.into(), pass.into())); - } else { - http_auth = Some(basic_auth(user, pass)); - } - builder = builder.authority(host_port); - } else { - builder = builder.authority(authority.clone()); - } - - // removing any path, but we MUST specify one or the builder errors - builder = builder.path_and_query("/"); - - let dst = builder.build().ok()?; - - let target = match dst.scheme().unwrap().as_str() { - "https" => Target::Https { - dst, - auth: http_auth, - }, - "http" => Target::Http { - dst, - auth: http_auth, - }, - "socks5" | "socks5h" => Target::Socks { - dst, - auth: socks_auth, - }, - // shouldn't happen - _ => return None, - }; - - Some(target) - } -} - -#[derive(Debug)] -struct NoProxy { - domains: DomainMatcher, - ips: IpMatcher, -} - -/// Represents a possible matching entry for an IP address -#[derive(Clone, Debug)] -enum Ip { - Address(IpAddr), - Network(IpNet), -} - -/// A wrapper around a list of IP cidr blocks or addresses with a [IpMatcher::contains] method for -/// checking if an IP address is contained within the matcher -#[derive(Clone, Debug, Default)] -struct IpMatcher(Vec<Ip>); - -/// A wrapper around a list of domains with a [DomainMatcher::contains] method for checking if a -/// domain is contained within the matcher -#[derive(Clone, Debug, Default)] -struct DomainMatcher(Vec<String>); - -impl NoProxy { - /// Returns a new no-proxy configuration based on environment variables (or `None` if no variables are set) - /// see [self::NoProxy::from_string()] for the string format - fn from_env() -> Option<NoProxy> { - let raw = env::var("NO_PROXY") - .or_else(|_| env::var("no_proxy")) - .unwrap_or_default(); - - Self::from_string(&raw) - } - - /// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables - /// are set) - /// The rules are as follows: - /// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked - /// * If neither environment variable is set, `None` is returned - /// * Entries are expected to be comma-separated (whitespace between entries is ignored) - /// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /size, - /// for example "`192.168.1.0/24`"). - /// * An entry "`*`" matches all hostnames (this is the only wildcard allowed) - /// * Any other entry is considered a domain name (and may contain a leading dot, for example `google.com` - /// and `.google.com` are equivalent) and would match both that domain AND all subdomains. - /// - /// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would match - /// (and therefore would bypass the proxy): - /// * `http://google.com/` - /// * `http://www.google.com/` - /// * `http://192.168.1.42/` - /// - /// The URL `http://notgoogle.com/` would not match. - fn from_string(no_proxy_list: &str) -> Option<Self> { - if no_proxy_list.is_empty() { - return None; - } - let mut ips = Vec::new(); - let mut domains = Vec::new(); - let parts = no_proxy_list.split(',').map(str::trim); - for part in parts { - match part.parse::<IpNet>() { - // If we can parse an IP net or address, then use it, otherwise, assume it is a domain - Ok(ip) => ips.push(Ip::Network(ip)), - Err(_) => match part.parse::<IpAddr>() { - Ok(addr) => ips.push(Ip::Address(addr)), - Err(_) => domains.push(part.to_owned()), - }, - } - } - Some(NoProxy { - ips: IpMatcher(ips), - domains: DomainMatcher(domains), - }) - } - - fn contains(&self, host: &str) -> bool { - // According to RFC3986, raw IPv6 hosts will be wrapped in []. So we need to strip those off - // the end in order to parse correctly - let host = if host.starts_with('[') { - let x: &[_] = &['[', ']']; - host.trim_matches(x) - } else { - host - }; - match host.parse::<IpAddr>() { - // If we can parse an IP addr, then use it, otherwise, assume it is a domain - Ok(ip) => self.ips.contains(ip), - Err(_) => self.domains.contains(host), - } - } -} - -impl IpMatcher { - fn contains(&self, addr: IpAddr) -> bool { - for ip in &self.0 { - match ip { - Ip::Address(address) => { - if &addr == address { - return true; - } - } - Ip::Network(net) => { - if net.contains(&addr) { - return true; - } - } - } - } - false - } -} - -impl DomainMatcher { - // The following links may be useful to understand the origin of these rules: - // * https://curl.se/libcurl/c/CURLOPT_NOPROXY.html - // * https://github.com/curl/curl/issues/1208 - fn contains(&self, domain: &str) -> bool { - let domain_len = domain.len(); - for d in &self.0 { - if d == domain || d.strip_prefix('.') == Some(domain) { - return true; - } else if domain.ends_with(d) { - if d.starts_with('.') { - // If the first character of d is a dot, that means the first character of domain - // must also be a dot, so we are looking at a subdomain of d and that matches - return true; - } else if domain.as_bytes().get(domain_len - d.len() - 1) == Some(&b'.') - { - // Given that d is a prefix of domain, if the prior character in domain is a dot - // then that means we must be matching a subdomain of d, and that matches - return true; - } - } else if d == "*" { - return true; - } - } - false - } -} - -impl<C> ProxyConnector<C> { - pub(crate) fn new( - proxies: Arc<Proxies>, - connector: C, - tls: Arc<TlsConfig>, - ) -> Self { - ProxyConnector { - connector, - proxies, - tls, - user_agent: None, - } - } - - pub(crate) fn user_agent(&mut self, val: HeaderValue) { - self.user_agent = Some(val); - } - - fn intercept(&self, dst: &Uri) -> Option<&Intercept> { - self.proxies.intercept(dst) - } -} - -impl Proxies { - pub(crate) fn prepend(&mut self, intercept: Intercept) { - self.intercepts.insert(0, intercept); - } - - pub(crate) fn http_forward_auth(&self, dst: &Uri) -> Option<&HeaderValue> { - let intercept = self.intercept(dst)?; - match intercept.target { - // Only if the proxy target is http - Target::Http { ref auth, .. } => auth.as_ref(), - _ => None, - } - } - - fn intercept(&self, dst: &Uri) -> Option<&Intercept> { - if let Some(no_proxy) = self.no.as_ref() { - if no_proxy.contains(dst.host()?) { - return None; - } - } - - for intercept in &self.intercepts { - return match ( - intercept.filter, - dst.scheme().map(Scheme::as_str).unwrap_or(""), - ) { - (Filter::All, _) => Some(intercept), - (Filter::Https, "https") => Some(intercept), - (Filter::Http, "http") => Some(intercept), - _ => continue, - }; - } - None - } -} - -type BoxFuture<T> = Pin<Box<dyn Future<Output = T> + Send>>; -type BoxError = Box<dyn std::error::Error + Send + Sync>; - -// These variatns are not to be inspected. -pub enum Proxied<T> { - /// Not proxied - PassThrough(T), - /// An HTTP forwarding proxy needed absolute-form - HttpForward(T), - /// Tunneled through HTTP CONNECT - HttpTunneled(Box<TokioIo<TlsStream<TokioIo<T>>>>), - /// Tunneled through SOCKS - Socks(TokioIo<TcpStream>), - /// Tunneled through SOCKS and TLS - SocksTls(TokioIo<TlsStream<TokioIo<TokioIo<TcpStream>>>>), -} - -impl<C> Service<Uri> for ProxyConnector<C> -where - C: Service<Uri>, - C::Response: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static, - C::Future: Send + 'static, - C::Error: Into<BoxError> + 'static, -{ - type Response = Proxied<C::Response>; - type Error = BoxError; - type Future = BoxFuture<Result<Self::Response, Self::Error>>; - - fn poll_ready( - &mut self, - cx: &mut Context<'_>, - ) -> Poll<Result<(), Self::Error>> { - self.connector.poll_ready(cx).map_err(Into::into) - } - - fn call(&mut self, orig_dst: Uri) -> Self::Future { - if let Some(intercept) = self.intercept(&orig_dst).cloned() { - let is_https = orig_dst.scheme() == Some(&Scheme::HTTPS); - let user_agent = self.user_agent.clone(); - return match intercept.target { - Target::Http { - dst: proxy_dst, - auth, - } - | Target::Https { - dst: proxy_dst, - auth, - } => { - let connecting = self.connector.call(proxy_dst); - let tls = TlsConnector::from(self.tls.clone()); - Box::pin(async move { - let mut io = connecting.await.map_err(Into::into)?; - - if is_https { - tunnel(&mut io, &orig_dst, user_agent, auth).await?; - let tokio_io = TokioIo::new(io); - let io = tls - .connect( - TryFrom::try_from(orig_dst.host().unwrap().to_owned())?, - tokio_io, - ) - .await?; - Ok(Proxied::HttpTunneled(Box::new(TokioIo::new(io)))) - } else { - Ok(Proxied::HttpForward(io)) - } - }) - } - Target::Socks { - dst: proxy_dst, - auth, - } => { - let tls = TlsConnector::from(self.tls.clone()); - Box::pin(async move { - let socks_addr = ( - proxy_dst.host().unwrap(), - proxy_dst.port().map(|p| p.as_u16()).unwrap_or(1080), - ); - let host = orig_dst.host().ok_or("no host in url")?; - let port = match orig_dst.port() { - Some(p) => p.as_u16(), - None if is_https => 443, - _ => 80, - }; - let io = if let Some((user, pass)) = auth { - Socks5Stream::connect_with_password( - socks_addr, - (host, port), - &user, - &pass, - ) - .await? - } else { - Socks5Stream::connect(socks_addr, (host, port)).await? - }; - let io = TokioIo::new(io.into_inner()); - - if is_https { - let tokio_io = TokioIo::new(io); - let io = tls - .connect(TryFrom::try_from(host.to_owned())?, tokio_io) - .await?; - Ok(Proxied::SocksTls(TokioIo::new(io))) - } else { - Ok(Proxied::Socks(io)) - } - }) - } - }; - } - Box::pin( - self - .connector - .call(orig_dst) - .map_ok(Proxied::PassThrough) - .map_err(Into::into), - ) - } -} - -async fn tunnel<T>( - io: &mut T, - dst: &Uri, - user_agent: Option<HeaderValue>, - auth: Option<HeaderValue>, -) -> Result<(), BoxError> -where - T: hyper::rt::Read + hyper::rt::Write + Unpin, -{ - use tokio::io::AsyncReadExt; - use tokio::io::AsyncWriteExt; - - let host = dst.host().expect("proxy dst has host"); - let port = match dst.port() { - Some(p) => p.as_u16(), - None => match dst.scheme().map(Scheme::as_str).unwrap_or("") { - "https" => 443, - "http" => 80, - _ => return Err("proxy dst unexpected scheme".into()), - }, - }; - - let mut buf = format!( - "\ - CONNECT {host}:{port} HTTP/1.1\r\n\ - Host: {host}:{port}\r\n\ - " - ) - .into_bytes(); - - // user-agent - if let Some(user_agent) = user_agent { - buf.extend_from_slice(b"User-Agent: "); - buf.extend_from_slice(user_agent.as_bytes()); - buf.extend_from_slice(b"\r\n"); - } - - // proxy-authorization - if let Some(value) = auth { - buf.extend_from_slice(b"Proxy-Authorization: "); - buf.extend_from_slice(value.as_bytes()); - buf.extend_from_slice(b"\r\n"); - } - - // headers end - buf.extend_from_slice(b"\r\n"); - - let mut tokio_conn = TokioIo::new(io); - - tokio_conn.write_all(&buf).await?; - - let mut buf = [0; 8192]; - let mut pos = 0; - - loop { - let n = tokio_conn.read(&mut buf[pos..]).await?; - - if n == 0 { - return Err("unexpected eof while tunneling".into()); - } - pos += n; - - let recvd = &buf[..pos]; - if recvd.starts_with(b"HTTP/1.1 200") || recvd.starts_with(b"HTTP/1.0 200") - { - if recvd.ends_with(b"\r\n\r\n") { - return Ok(()); - } - if pos == buf.len() { - return Err("proxy headers too long for tunnel".into()); - } - // else read more - } else if recvd.starts_with(b"HTTP/1.1 407") { - return Err("proxy authentication required".into()); - } else { - return Err("unsuccessful tunnel".into()); - } - } -} - -impl<T> hyper::rt::Read for Proxied<T> -where - T: hyper::rt::Read + hyper::rt::Write + Unpin, -{ - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: hyper::rt::ReadBufCursor<'_>, - ) -> Poll<Result<(), std::io::Error>> { - match *self { - Proxied::PassThrough(ref mut p) => Pin::new(p).poll_read(cx, buf), - Proxied::HttpForward(ref mut p) => Pin::new(p).poll_read(cx, buf), - Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_read(cx, buf), - Proxied::Socks(ref mut p) => Pin::new(p).poll_read(cx, buf), - Proxied::SocksTls(ref mut p) => Pin::new(p).poll_read(cx, buf), - } - } -} - -impl<T> hyper::rt::Write for Proxied<T> -where - T: hyper::rt::Read + hyper::rt::Write + Unpin, -{ - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll<Result<usize, std::io::Error>> { - match *self { - Proxied::PassThrough(ref mut p) => Pin::new(p).poll_write(cx, buf), - Proxied::HttpForward(ref mut p) => Pin::new(p).poll_write(cx, buf), - Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_write(cx, buf), - Proxied::Socks(ref mut p) => Pin::new(p).poll_write(cx, buf), - Proxied::SocksTls(ref mut p) => Pin::new(p).poll_write(cx, buf), - } - } - - fn poll_flush( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll<Result<(), std::io::Error>> { - match *self { - Proxied::PassThrough(ref mut p) => Pin::new(p).poll_flush(cx), - Proxied::HttpForward(ref mut p) => Pin::new(p).poll_flush(cx), - Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_flush(cx), - Proxied::Socks(ref mut p) => Pin::new(p).poll_flush(cx), - Proxied::SocksTls(ref mut p) => Pin::new(p).poll_flush(cx), - } - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll<Result<(), std::io::Error>> { - match *self { - Proxied::PassThrough(ref mut p) => Pin::new(p).poll_shutdown(cx), - Proxied::HttpForward(ref mut p) => Pin::new(p).poll_shutdown(cx), - Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_shutdown(cx), - Proxied::Socks(ref mut p) => Pin::new(p).poll_shutdown(cx), - Proxied::SocksTls(ref mut p) => Pin::new(p).poll_shutdown(cx), - } - } - - fn is_write_vectored(&self) -> bool { - match *self { - Proxied::PassThrough(ref p) => p.is_write_vectored(), - Proxied::HttpForward(ref p) => p.is_write_vectored(), - Proxied::HttpTunneled(ref p) => p.is_write_vectored(), - Proxied::Socks(ref p) => p.is_write_vectored(), - Proxied::SocksTls(ref p) => p.is_write_vectored(), - } - } - - fn poll_write_vectored( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - bufs: &[std::io::IoSlice<'_>], - ) -> Poll<Result<usize, std::io::Error>> { - match *self { - Proxied::PassThrough(ref mut p) => { - Pin::new(p).poll_write_vectored(cx, bufs) - } - Proxied::HttpForward(ref mut p) => { - Pin::new(p).poll_write_vectored(cx, bufs) - } - Proxied::HttpTunneled(ref mut p) => { - Pin::new(p).poll_write_vectored(cx, bufs) - } - Proxied::Socks(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs), - Proxied::SocksTls(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs), - } - } -} - -impl<T> Connection for Proxied<T> -where - T: Connection, -{ - fn connected(&self) -> Connected { - match self { - Proxied::PassThrough(ref p) => p.connected(), - Proxied::HttpForward(ref p) => p.connected().proxy(true), - Proxied::HttpTunneled(ref p) => p.inner().get_ref().0.connected(), - Proxied::Socks(ref p) => p.connected(), - Proxied::SocksTls(ref p) => p.inner().get_ref().0.connected(), - } - } -} - -#[test] -fn test_proxy_parse_from_env() { - fn parse(s: &str) -> Target { - Target::parse(s).unwrap() - } - - // normal - match parse("http://127.0.0.1:6666") { - Target::Http { dst, auth } => { - assert_eq!(dst, "http://127.0.0.1:6666"); - assert!(auth.is_none()); - } - _ => panic!("bad target"), - } - - // without scheme - match parse("127.0.0.1:6666") { - Target::Http { dst, auth } => { - assert_eq!(dst, "http://127.0.0.1:6666"); - assert!(auth.is_none()); - } - _ => panic!("bad target"), - } - - // with userinfo - match parse("user:pass@127.0.0.1:6666") { - Target::Http { dst, auth } => { - assert_eq!(dst, "http://127.0.0.1:6666"); - assert!(auth.is_some()); - assert!(auth.unwrap().is_sensitive()); - } - _ => panic!("bad target"), - } - - // socks - match parse("socks5://user:pass@127.0.0.1:6666") { - Target::Socks { dst, auth } => { - assert_eq!(dst, "socks5://127.0.0.1:6666"); - assert!(auth.is_some()); - } - _ => panic!("bad target"), - } - - // socks5h - match parse("socks5h://localhost:6666") { - Target::Socks { dst, auth } => { - assert_eq!(dst, "socks5h://localhost:6666"); - assert!(auth.is_none()); - } - _ => panic!("bad target"), - } -} - -#[test] -fn test_domain_matcher() { - let domains = vec![".foo.bar".into(), "bar.foo".into()]; - let matcher = DomainMatcher(domains); - - // domains match with leading `.` - assert!(matcher.contains("foo.bar")); - // subdomains match with leading `.` - assert!(matcher.contains("www.foo.bar")); - - // domains match with no leading `.` - assert!(matcher.contains("bar.foo")); - // subdomains match with no leading `.` - assert!(matcher.contains("www.bar.foo")); - - // non-subdomain string prefixes don't match - assert!(!matcher.contains("notfoo.bar")); - assert!(!matcher.contains("notbar.foo")); -} - -#[test] -fn test_no_proxy_wildcard() { - let no_proxy = NoProxy::from_string("*").unwrap(); - assert!(no_proxy.contains("any.where")); -} - -#[test] -fn test_no_proxy_ip_ranges() { - let no_proxy = NoProxy::from_string( - ".foo.bar, bar.baz,10.42.1.1/24,::1,10.124.7.8,2001::/17", - ) - .unwrap(); - - let should_not_match = [ - // random url, not in no_proxy - "deno.com", - // make sure that random non-subdomain string prefixes don't match - "notfoo.bar", - // make sure that random non-subdomain string prefixes don't match - "notbar.baz", - // ipv4 address out of range - "10.43.1.1", - // ipv4 address out of range - "10.124.7.7", - // ipv6 address out of range - "[ffff:db8:a0b:12f0::1]", - // ipv6 address out of range - "[2005:db8:a0b:12f0::1]", - ]; - - for host in &should_not_match { - assert!(!no_proxy.contains(host), "should not contain {:?}", host); - } - - let should_match = [ - // make sure subdomains (with leading .) match - "hello.foo.bar", - // make sure exact matches (without leading .) match (also makes sure spaces between entries work) - "bar.baz", - // make sure subdomains (without leading . in no_proxy) match - "foo.bar.baz", - // make sure subdomains (without leading . in no_proxy) match - this differs from cURL - "foo.bar", - // ipv4 address match within range - "10.42.1.100", - // ipv6 address exact match - "[::1]", - // ipv6 address match within range - "[2001:db8:a0b:12f0::1]", - // ipv4 address exact match - "10.124.7.8", - ]; - - for host in &should_match { - assert!(no_proxy.contains(host), "should contain {:?}", host); - } -} |