summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Cargo.lock4
-rw-r--r--cli/Cargo.toml2
-rw-r--r--cli/args/flags.rs7
-rw-r--r--cli/args/mod.rs13
-rw-r--r--cli/args/package_json.rs27
-rw-r--r--cli/factory.rs50
-rw-r--r--cli/graph_util.rs4
-rw-r--r--cli/lsp/documents.rs6
-rw-r--r--cli/lsp/language_server.rs11
-rw-r--r--cli/main.rs3
-rw-r--r--cli/module_loader.rs76
-rw-r--r--cli/npm/installer.rs26
-rw-r--r--cli/npm/resolution.rs4
-rw-r--r--cli/npm/resolvers/common.rs6
-rw-r--r--cli/npm/resolvers/global.rs6
-rw-r--r--cli/npm/resolvers/local.rs5
-rw-r--r--cli/npm/resolvers/mod.rs26
-rw-r--r--cli/resolver.rs116
-rw-r--r--cli/standalone/binary.rs314
-rw-r--r--cli/standalone/file_system.rs337
-rw-r--r--cli/standalone/mod.rs255
-rw-r--r--cli/standalone/virtual_fs.rs983
-rw-r--r--cli/tests/integration/compile_tests.rs305
-rw-r--r--cli/tests/integration/npm_tests.rs10
-rw-r--r--cli/tests/testdata/compile/npm_fs/main.out1
-rw-r--r--cli/tests/testdata/compile/npm_fs/main.ts259
-rw-r--r--cli/tests/testdata/npm/registry/@denotest/esm-basic/1.0.0/main.d.mts1
-rw-r--r--cli/tests/testdata/npm/registry/@denotest/esm-basic/1.0.0/main.mjs2
-rw-r--r--cli/tests/testdata/package_json/basic/main.info.out2
-rw-r--r--cli/tools/compile.rs (renamed from cli/tools/standalone.rs)15
-rw-r--r--cli/tools/mod.rs2
-rw-r--r--cli/tools/task.rs3
-rw-r--r--cli/tools/vendor/test.rs9
-rw-r--r--cli/util/fs.rs9
-rw-r--r--ext/fs/interface.rs2
-rw-r--r--ext/io/fs.rs9
-rw-r--r--runtime/build.rs1
-rw-r--r--runtime/clippy.toml45
-rw-r--r--runtime/examples/hello_runtime.rs6
-rw-r--r--runtime/fs_util.rs25
-rw-r--r--runtime/ops/os/mod.rs1
-rw-r--r--runtime/ops/os/sys_info.rs1
-rw-r--r--test_util/src/builders.rs1
-rw-r--r--test_util/src/temp_dir.rs4
44 files changed, 2733 insertions, 261 deletions
diff --git a/Cargo.lock b/Cargo.lock
index 4fb268627..48351f7a3 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2966,9 +2966,9 @@ dependencies = [
[[package]]
name = "monch"
-version = "0.4.1"
+version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1120c1ab92ab8cdacb3b89ac9a214f512d2e78e90e3b57c00d9551ced19f646f"
+checksum = "bb73e1dc7d232e1ab47ef27f45fa1d173a0979b370e763a9d0584556011150e0"
[[package]]
name = "napi-build"
diff --git a/cli/Cargo.toml b/cli/Cargo.toml
index 7d0e99d39..b415f53d8 100644
--- a/cli/Cargo.toml
+++ b/cli/Cargo.toml
@@ -84,7 +84,7 @@ lazy-regex.workspace = true
libc.workspace = true
log = { workspace = true, features = ["serde"] }
lsp-types.workspace = true
-monch = "=0.4.1"
+monch = "=0.4.2"
notify.workspace = true
once_cell.workspace = true
os_pipe.workspace = true
diff --git a/cli/args/flags.rs b/cli/args/flags.rs
index 3d88cda91..fa28241a3 100644
--- a/cli/args/flags.rs
+++ b/cli/args/flags.rs
@@ -527,8 +527,11 @@ impl Flags {
.ok()
}
Task(_) | Check(_) | Coverage(_) | Cache(_) | Info(_) | Eval(_)
- | Test(_) | Bench(_) | Repl(_) => std::env::current_dir().ok(),
- _ => None,
+ | Test(_) | Bench(_) | Repl(_) | Compile(_) => {
+ std::env::current_dir().ok()
+ }
+ Bundle(_) | Completions(_) | Doc(_) | Fmt(_) | Init(_) | Install(_)
+ | Uninstall(_) | Lsp | Lint(_) | Types | Upgrade(_) | Vendor(_) => None,
}
}
diff --git a/cli/args/mod.rs b/cli/args/mod.rs
index b5975536a..31035fdd0 100644
--- a/cli/args/mod.rs
+++ b/cli/args/mod.rs
@@ -33,6 +33,7 @@ pub use config_file::TsTypeLib;
pub use flags::*;
pub use lockfile::Lockfile;
pub use lockfile::LockfileError;
+pub use package_json::PackageJsonDepsProvider;
use deno_ast::ModuleSpecifier;
use deno_core::anyhow::anyhow;
@@ -556,7 +557,7 @@ struct CliOptionOverrides {
import_map_specifier: Option<Option<ModuleSpecifier>>,
}
-/// Holds the resolved options of many sources used by sub commands
+/// Holds the resolved options of many sources used by subcommands
/// and provides some helper function for creating common objects.
pub struct CliOptions {
// the source of the options is a detail the rest of the
@@ -1303,6 +1304,16 @@ fn has_flag_env_var(name: &str) -> bool {
matches!(value.as_ref().map(|s| s.as_str()), Ok("1"))
}
+pub fn npm_pkg_req_ref_to_binary_command(
+ req_ref: &NpmPackageReqReference,
+) -> String {
+ let binary_name = req_ref
+ .sub_path
+ .as_deref()
+ .unwrap_or(req_ref.req.name.as_str());
+ binary_name.to_string()
+}
+
#[cfg(test)]
mod test {
use super::*;
diff --git a/cli/args/package_json.rs b/cli/args/package_json.rs
index c4d4ce956..a8c6eaad4 100644
--- a/cli/args/package_json.rs
+++ b/cli/args/package_json.rs
@@ -28,6 +28,33 @@ pub enum PackageJsonDepValueParseError {
pub type PackageJsonDeps =
BTreeMap<String, Result<NpmPackageReq, PackageJsonDepValueParseError>>;
+#[derive(Debug, Default)]
+pub struct PackageJsonDepsProvider(Option<PackageJsonDeps>);
+
+impl PackageJsonDepsProvider {
+ pub fn new(deps: Option<PackageJsonDeps>) -> Self {
+ Self(deps)
+ }
+
+ pub fn deps(&self) -> Option<&PackageJsonDeps> {
+ self.0.as_ref()
+ }
+
+ pub fn reqs(&self) -> Vec<&NpmPackageReq> {
+ match &self.0 {
+ Some(deps) => {
+ let mut package_reqs = deps
+ .values()
+ .filter_map(|r| r.as_ref().ok())
+ .collect::<Vec<_>>();
+ package_reqs.sort(); // deterministic resolution
+ package_reqs
+ }
+ None => Vec::new(),
+ }
+ }
+}
+
/// Gets an application level package.json's npm package requirements.
///
/// Note that this function is not general purpose. It is specifically for
diff --git a/cli/factory.rs b/cli/factory.rs
index 3bc5ef9e2..a3da40036 100644
--- a/cli/factory.rs
+++ b/cli/factory.rs
@@ -1,9 +1,11 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
+use crate::args::npm_pkg_req_ref_to_binary_command;
use crate::args::CliOptions;
use crate::args::DenoSubcommand;
use crate::args::Flags;
use crate::args::Lockfile;
+use crate::args::PackageJsonDepsProvider;
use crate::args::StorageKeyResolver;
use crate::args::TsConfigType;
use crate::cache::Caches;
@@ -30,6 +32,7 @@ use crate::npm::NpmCache;
use crate::npm::NpmResolution;
use crate::npm::PackageJsonDepsInstaller;
use crate::resolver::CliGraphResolver;
+use crate::standalone::DenoCompileBinaryWriter;
use crate::tools::check::TypeChecker;
use crate::util::progress_bar::ProgressBar;
use crate::util::progress_bar::ProgressBarStyle;
@@ -151,6 +154,7 @@ struct CliFactoryServices {
npm_cache: Deferred<Arc<NpmCache>>,
npm_resolver: Deferred<Arc<CliNpmResolver>>,
npm_resolution: Deferred<Arc<NpmResolution>>,
+ package_json_deps_provider: Deferred<Arc<PackageJsonDepsProvider>>,
package_json_deps_installer: Deferred<Arc<PackageJsonDepsInstaller>>,
text_only_progress_bar: Deferred<ProgressBar>,
type_checker: Deferred<Arc<TypeChecker>>,
@@ -301,8 +305,9 @@ impl CliFactory {
.npm_resolver
.get_or_try_init_async(async {
let npm_resolution = self.npm_resolution().await?;
+ let fs = self.fs().clone();
let npm_fs_resolver = create_npm_fs_resolver(
- self.fs().clone(),
+ fs.clone(),
self.npm_cache()?.clone(),
self.text_only_progress_bar(),
CliNpmRegistryApi::default_url().to_owned(),
@@ -310,6 +315,7 @@ impl CliFactory {
self.options.node_modules_dir_path(),
);
Ok(Arc::new(CliNpmResolver::new(
+ fs.clone(),
npm_resolution.clone(),
npm_fs_resolver,
self.maybe_lockfile().as_ref().cloned(),
@@ -318,6 +324,14 @@ impl CliFactory {
.await
}
+ pub fn package_json_deps_provider(&self) -> &Arc<PackageJsonDepsProvider> {
+ self.services.package_json_deps_provider.get_or_init(|| {
+ Arc::new(PackageJsonDepsProvider::new(
+ self.options.maybe_package_json_deps(),
+ ))
+ })
+ }
+
pub async fn package_json_deps_installer(
&self,
) -> Result<&Arc<PackageJsonDepsInstaller>, AnyError> {
@@ -325,12 +339,10 @@ impl CliFactory {
.services
.package_json_deps_installer
.get_or_try_init_async(async {
- let npm_api = self.npm_api()?;
- let npm_resolution = self.npm_resolution().await?;
Ok(Arc::new(PackageJsonDepsInstaller::new(
- npm_api.clone(),
- npm_resolution.clone(),
- self.options.maybe_package_json_deps(),
+ self.package_json_deps_provider().clone(),
+ self.npm_api()?.clone(),
+ self.npm_resolution().await?.clone(),
)))
})
.await
@@ -365,6 +377,7 @@ impl CliFactory {
self.options.no_npm(),
self.npm_api()?.clone(),
self.npm_resolution().await?.clone(),
+ self.package_json_deps_provider().clone(),
self.package_json_deps_installer().await?.clone(),
)))
})
@@ -535,6 +548,21 @@ impl CliFactory {
self.services.cjs_resolutions.get_or_init(Default::default)
}
+ pub async fn create_compile_binary_writer(
+ &self,
+ ) -> Result<DenoCompileBinaryWriter, AnyError> {
+ Ok(DenoCompileBinaryWriter::new(
+ self.file_fetcher()?,
+ self.http_client(),
+ self.deno_dir()?,
+ self.npm_api()?,
+ self.npm_cache()?,
+ self.npm_resolver().await?,
+ self.npm_resolution().await?,
+ self.package_json_deps_provider(),
+ ))
+ }
+
/// Gets a function that can be used to create a CliMainWorkerFactory
/// for a file watcher.
pub async fn create_cli_main_worker_factory_func(
@@ -572,6 +600,7 @@ impl CliFactory {
NpmModuleLoader::new(
cjs_resolutions.clone(),
node_code_translator.clone(),
+ fs.clone(),
node_resolver.clone(),
),
)),
@@ -587,6 +616,7 @@ impl CliFactory {
&self,
) -> Result<CliMainWorkerFactory, AnyError> {
let node_resolver = self.node_resolver().await?;
+ let fs = self.fs();
Ok(CliMainWorkerFactory::new(
StorageKeyResolver::from_options(&self.options),
self.npm_resolver().await?.clone(),
@@ -603,6 +633,7 @@ impl CliFactory {
NpmModuleLoader::new(
self.cjs_resolutions().clone(),
self.node_code_translator().await?.clone(),
+ fs.clone(),
node_resolver.clone(),
),
)),
@@ -637,11 +668,8 @@ impl CliFactory {
if let Ok(pkg_ref) = NpmPackageReqReference::from_str(&flags.script) {
// if the user ran a binary command, we'll need to set process.argv[0]
// to be the name of the binary command instead of deno
- let binary_name = pkg_ref
- .sub_path
- .as_deref()
- .unwrap_or(pkg_ref.req.name.as_str());
- maybe_binary_command_name = Some(binary_name.to_string());
+ maybe_binary_command_name =
+ Some(npm_pkg_req_ref_to_binary_command(&pkg_ref));
}
}
maybe_binary_command_name
diff --git a/cli/graph_util.rs b/cli/graph_util.rs
index f9dafbb57..53d06071c 100644
--- a/cli/graph_util.rs
+++ b/cli/graph_util.rs
@@ -345,10 +345,10 @@ pub fn error_for_any_npm_specifier(
for module in graph.modules() {
match module {
Module::Npm(module) => {
- bail!("npm specifiers have not yet been implemented for this sub command (https://github.com/denoland/deno/issues/15960). Found: {}", module.specifier)
+ bail!("npm specifiers have not yet been implemented for this subcommand (https://github.com/denoland/deno/issues/15960). Found: {}", module.specifier)
}
Module::Node(module) => {
- bail!("Node specifiers have not yet been implemented for this sub command (https://github.com/denoland/deno/issues/15960). Found: node:{}", module.module_name)
+ bail!("Node specifiers have not yet been implemented for this subcommand (https://github.com/denoland/deno/issues/15960). Found: node:{}", module.module_name)
}
Module::Esm(_) | Module::Json(_) | Module::External(_) => {}
}
diff --git a/cli/lsp/documents.rs b/cli/lsp/documents.rs
index 3f77eaaa2..b55d3ca20 100644
--- a/cli/lsp/documents.rs
+++ b/cli/lsp/documents.rs
@@ -46,6 +46,7 @@ use deno_semver::npm::NpmPackageReqReference;
use indexmap::IndexMap;
use lsp::Url;
use once_cell::sync::Lazy;
+use package_json::PackageJsonDepsProvider;
use std::collections::BTreeMap;
use std::collections::HashMap;
use std::collections::HashSet;
@@ -1218,10 +1219,12 @@ impl Documents {
maybe_jsx_config.as_ref(),
maybe_package_json_deps.as_ref(),
);
+ let deps_provider =
+ Arc::new(PackageJsonDepsProvider::new(maybe_package_json_deps));
let deps_installer = Arc::new(PackageJsonDepsInstaller::new(
+ deps_provider.clone(),
npm_registry_api.clone(),
npm_resolution.clone(),
- maybe_package_json_deps,
));
self.resolver = Arc::new(CliGraphResolver::new(
maybe_jsx_config,
@@ -1229,6 +1232,7 @@ impl Documents {
false,
npm_registry_api,
npm_resolution,
+ deps_provider,
deps_installer,
));
self.imports = Arc::new(
diff --git a/cli/lsp/language_server.rs b/cli/lsp/language_server.rs
index d00b8f313..d32d12ec8 100644
--- a/cli/lsp/language_server.rs
+++ b/cli/lsp/language_server.rs
@@ -457,8 +457,9 @@ fn create_lsp_structs(
));
let resolution =
Arc::new(NpmResolution::from_serialized(api.clone(), None, None));
+ let fs = Arc::new(deno_fs::RealFs);
let fs_resolver = create_npm_fs_resolver(
- Arc::new(deno_fs::RealFs),
+ fs.clone(),
npm_cache.clone(),
&progress_bar,
registry_url.clone(),
@@ -468,7 +469,12 @@ fn create_lsp_structs(
(
api,
npm_cache,
- Arc::new(CliNpmResolver::new(resolution.clone(), fs_resolver, None)),
+ Arc::new(CliNpmResolver::new(
+ fs,
+ resolution.clone(),
+ fs_resolver,
+ None,
+ )),
resolution,
)
}
@@ -711,6 +717,7 @@ impl Inner {
));
let node_fs = Arc::new(deno_fs::RealFs);
let npm_resolver = Arc::new(CliNpmResolver::new(
+ node_fs.clone(),
npm_resolution.clone(),
create_npm_fs_resolver(
node_fs.clone(),
diff --git a/cli/main.rs b/cli/main.rs
index c3421b0cd..03e7cf41e 100644
--- a/cli/main.rs
+++ b/cli/main.rs
@@ -36,7 +36,6 @@ static GLOBAL: Jemalloc = Jemalloc;
use crate::args::flags_from_vec;
use crate::args::DenoSubcommand;
use crate::args::Flags;
-use crate::resolver::CliGraphResolver;
use crate::util::display;
use crate::util::v8::get_v8_flags_from_env;
use crate::util::v8::init_v8_flags;
@@ -97,7 +96,7 @@ async fn run_subcommand(flags: Flags) -> Result<i32, AnyError> {
Ok(0)
}
DenoSubcommand::Compile(compile_flags) => {
- tools::standalone::compile(flags, compile_flags).await?;
+ tools::compile::compile(flags, compile_flags).await?;
Ok(0)
}
DenoSubcommand::Coverage(coverage_flags) => {
diff --git a/cli/module_loader.rs b/cli/module_loader.rs
index 0ed84a20f..5465ad1b8 100644
--- a/cli/module_loader.rs
+++ b/cli/module_loader.rs
@@ -46,11 +46,13 @@ use deno_graph::JsonModule;
use deno_graph::Module;
use deno_graph::Resolution;
use deno_lockfile::Lockfile;
+use deno_runtime::deno_fs;
use deno_runtime::deno_node;
use deno_runtime::deno_node::NodeResolution;
use deno_runtime::deno_node::NodeResolutionMode;
use deno_runtime::deno_node::NodeResolver;
use deno_runtime::permissions::PermissionsContainer;
+use deno_semver::npm::NpmPackageNvReference;
use deno_semver::npm::NpmPackageReqReference;
use std::borrow::Cow;
use std::cell::RefCell;
@@ -417,13 +419,12 @@ impl CliModuleLoader {
} else {
&self.root_permissions
};
- let code_source = if let Some(code_source) =
- self.shared.npm_module_loader.load_sync(
- specifier,
- maybe_referrer,
- permissions,
- )? {
- code_source
+ let code_source = if let Some(result) = self
+ .shared
+ .npm_module_loader
+ .load_sync_if_in_npm_package(specifier, maybe_referrer, permissions)
+ {
+ result?
} else {
self
.shared
@@ -494,7 +495,7 @@ impl ModuleLoader for CliModuleLoader {
Some(Module::Npm(module)) => self
.shared
.npm_module_loader
- .resolve_npm_module(module, permissions),
+ .resolve_nv_ref(&module.nv_reference, permissions),
Some(Module::Node(module)) => {
deno_node::resolve_builtin_node_module(&module.module_name)
}
@@ -547,7 +548,7 @@ impl ModuleLoader for CliModuleLoader {
return self
.shared
.npm_module_loader
- .resolve_for_repl(&reference, permissions);
+ .resolve_req_reference(&reference, permissions);
}
}
}
@@ -652,6 +653,7 @@ impl SourceMapGetter for CliSourceMapGetter {
pub struct NpmModuleLoader {
cjs_resolutions: Arc<CjsResolutionStore>,
node_code_translator: Arc<CliNodeCodeTranslator>,
+ fs: Arc<dyn deno_fs::FileSystem>,
node_resolver: Arc<NodeResolver>,
}
@@ -659,11 +661,13 @@ impl NpmModuleLoader {
pub fn new(
cjs_resolutions: Arc<CjsResolutionStore>,
node_code_translator: Arc<CliNodeCodeTranslator>,
+ fs: Arc<dyn deno_fs::FileSystem>,
node_resolver: Arc<NodeResolver>,
) -> Self {
Self {
cjs_resolutions,
node_code_translator,
+ fs,
node_resolver,
}
}
@@ -693,21 +697,21 @@ impl NpmModuleLoader {
}
}
- pub fn resolve_npm_module(
+ pub fn resolve_nv_ref(
&self,
- module: &deno_graph::NpmModule,
+ nv_ref: &NpmPackageNvReference,
permissions: &PermissionsContainer,
) -> Result<ModuleSpecifier, AnyError> {
self
.handle_node_resolve_result(self.node_resolver.resolve_npm_reference(
- &module.nv_reference,
+ nv_ref,
NodeResolutionMode::Execution,
permissions,
))
- .with_context(|| format!("Could not resolve '{}'.", module.nv_reference))
+ .with_context(|| format!("Could not resolve '{}'.", nv_ref))
}
- pub fn resolve_for_repl(
+ pub fn resolve_req_reference(
&self,
reference: &NpmPackageReqReference,
permissions: &PermissionsContainer,
@@ -733,25 +737,39 @@ impl NpmModuleLoader {
}
}
- pub fn load_sync(
+ pub fn load_sync_if_in_npm_package(
&self,
specifier: &ModuleSpecifier,
maybe_referrer: Option<&ModuleSpecifier>,
permissions: &PermissionsContainer,
- ) -> Result<Option<ModuleCodeSource>, AnyError> {
- if !self.node_resolver.in_npm_package(specifier) {
- return Ok(None);
+ ) -> Option<Result<ModuleCodeSource, AnyError>> {
+ if self.node_resolver.in_npm_package(specifier) {
+ Some(self.load_sync(specifier, maybe_referrer, permissions))
+ } else {
+ None
}
+ }
+
+ fn load_sync(
+ &self,
+ specifier: &ModuleSpecifier,
+ maybe_referrer: Option<&ModuleSpecifier>,
+ permissions: &PermissionsContainer,
+ ) -> Result<ModuleCodeSource, AnyError> {
let file_path = specifier.to_file_path().unwrap();
- let code = std::fs::read_to_string(&file_path).with_context(|| {
- let mut msg = "Unable to load ".to_string();
- msg.push_str(&file_path.to_string_lossy());
- if let Some(referrer) = &maybe_referrer {
- msg.push_str(" imported from ");
- msg.push_str(referrer.as_str());
- }
- msg
- })?;
+ let code = self
+ .fs
+ .read_to_string(&file_path)
+ .map_err(AnyError::from)
+ .with_context(|| {
+ let mut msg = "Unable to load ".to_string();
+ msg.push_str(&file_path.to_string_lossy());
+ if let Some(referrer) = &maybe_referrer {
+ msg.push_str(" imported from ");
+ msg.push_str(referrer.as_str());
+ }
+ msg
+ })?;
let code = if self.cjs_resolutions.contains(specifier) {
// translate cjs to esm if it's cjs and inject node globals
@@ -766,11 +784,11 @@ impl NpmModuleLoader {
.node_code_translator
.esm_code_with_node_globals(specifier, &code)?
};
- Ok(Some(ModuleCodeSource {
+ Ok(ModuleCodeSource {
code: code.into(),
found_url: specifier.clone(),
media_type: MediaType::from_specifier(specifier),
- }))
+ })
}
fn handle_node_resolve_result(
diff --git a/cli/npm/installer.rs b/cli/npm/installer.rs
index bdcafb542..43f79d8f0 100644
--- a/cli/npm/installer.rs
+++ b/cli/npm/installer.rs
@@ -10,7 +10,7 @@ use deno_npm::registry::NpmRegistryApi;
use deno_npm::registry::NpmRegistryPackageInfoLoadError;
use deno_semver::npm::NpmPackageReq;
-use crate::args::package_json::PackageJsonDeps;
+use crate::args::PackageJsonDepsProvider;
use crate::util::sync::AtomicFlag;
use super::CliNpmRegistryApi;
@@ -18,23 +18,13 @@ use super::NpmResolution;
#[derive(Debug)]
struct PackageJsonDepsInstallerInner {
+ deps_provider: Arc<PackageJsonDepsProvider>,
has_installed_flag: AtomicFlag,
npm_registry_api: Arc<CliNpmRegistryApi>,
npm_resolution: Arc<NpmResolution>,
- package_deps: PackageJsonDeps,
}
impl PackageJsonDepsInstallerInner {
- pub fn reqs(&self) -> Vec<&NpmPackageReq> {
- let mut package_reqs = self
- .package_deps
- .values()
- .filter_map(|r| r.as_ref().ok())
- .collect::<Vec<_>>();
- package_reqs.sort(); // deterministic resolution
- package_reqs
- }
-
pub fn reqs_with_info_futures(
&self,
) -> FuturesOrdered<
@@ -45,7 +35,7 @@ impl PackageJsonDepsInstallerInner {
>,
>,
> {
- let package_reqs = self.reqs();
+ let package_reqs = self.deps_provider.reqs();
FuturesOrdered::from_iter(package_reqs.into_iter().map(|req| {
let api = self.npm_registry_api.clone();
@@ -63,22 +53,18 @@ pub struct PackageJsonDepsInstaller(Option<PackageJsonDepsInstallerInner>);
impl PackageJsonDepsInstaller {
pub fn new(
+ deps_provider: Arc<PackageJsonDepsProvider>,
npm_registry_api: Arc<CliNpmRegistryApi>,
npm_resolution: Arc<NpmResolution>,
- deps: Option<PackageJsonDeps>,
) -> Self {
- Self(deps.map(|package_deps| PackageJsonDepsInstallerInner {
+ Self(Some(PackageJsonDepsInstallerInner {
+ deps_provider,
has_installed_flag: Default::default(),
npm_registry_api,
npm_resolution,
- package_deps,
}))
}
- pub fn package_deps(&self) -> Option<&PackageJsonDeps> {
- self.0.as_ref().map(|inner| &inner.package_deps)
- }
-
/// Installs the top level dependencies in the package.json file
/// without going through and resolving the descendant dependencies yet.
pub async fn ensure_top_level_install(&self) -> Result<(), AnyError> {
diff --git a/cli/npm/resolution.rs b/cli/npm/resolution.rs
index 1b191b245..edc7ec647 100644
--- a/cli/npm/resolution.rs
+++ b/cli/npm/resolution.rs
@@ -237,6 +237,10 @@ impl NpmResolution {
Ok(nv)
}
+ pub fn all_packages(&self) -> Vec<NpmResolutionPackage> {
+ self.snapshot.read().all_packages()
+ }
+
pub fn all_packages_partitioned(&self) -> NpmPackagesPartitioned {
self.snapshot.read().all_packages_partitioned()
}
diff --git a/cli/npm/resolvers/common.rs b/cli/npm/resolvers/common.rs
index ccba00d43..fc040a7cc 100644
--- a/cli/npm/resolvers/common.rs
+++ b/cli/npm/resolvers/common.rs
@@ -12,6 +12,7 @@ use deno_core::futures;
use deno_core::url::Url;
use deno_npm::NpmPackageId;
use deno_npm::NpmResolutionPackage;
+use deno_runtime::deno_fs::FileSystem;
use deno_runtime::deno_node::NodePermissions;
use deno_runtime::deno_node::NodeResolutionMode;
@@ -90,6 +91,7 @@ pub async fn cache_packages(
}
pub fn ensure_registry_read_permission(
+ fs: &Arc<dyn FileSystem>,
permissions: &dyn NodePermissions,
registry_path: &Path,
path: &Path,
@@ -101,8 +103,8 @@ pub fn ensure_registry_read_permission(
.all(|c| !matches!(c, std::path::Component::ParentDir))
{
// todo(dsherret): cache this?
- if let Ok(registry_path) = std::fs::canonicalize(registry_path) {
- match std::fs::canonicalize(path) {
+ if let Ok(registry_path) = fs.realpath_sync(registry_path) {
+ match fs.realpath_sync(path) {
Ok(path) if path.starts_with(registry_path) => {
return Ok(());
}
diff --git a/cli/npm/resolvers/global.rs b/cli/npm/resolvers/global.rs
index 79df16187..fe8764b0c 100644
--- a/cli/npm/resolvers/global.rs
+++ b/cli/npm/resolvers/global.rs
@@ -14,6 +14,7 @@ use deno_npm::resolution::PackageNotFoundFromReferrerError;
use deno_npm::NpmPackageCacheFolderId;
use deno_npm::NpmPackageId;
use deno_npm::NpmResolutionPackage;
+use deno_runtime::deno_fs::FileSystem;
use deno_runtime::deno_node::NodePermissions;
use deno_runtime::deno_node::NodeResolutionMode;
@@ -28,6 +29,7 @@ use super::common::NpmPackageFsResolver;
/// Resolves packages from the global npm cache.
#[derive(Debug)]
pub struct GlobalNpmPackageResolver {
+ fs: Arc<dyn FileSystem>,
cache: Arc<NpmCache>,
resolution: Arc<NpmResolution>,
registry_url: Url,
@@ -35,11 +37,13 @@ pub struct GlobalNpmPackageResolver {
impl GlobalNpmPackageResolver {
pub fn new(
+ fs: Arc<dyn FileSystem>,
cache: Arc<NpmCache>,
registry_url: Url,
resolution: Arc<NpmResolution>,
) -> Self {
Self {
+ fs,
cache,
resolution,
registry_url,
@@ -130,7 +134,7 @@ impl NpmPackageFsResolver for GlobalNpmPackageResolver {
path: &Path,
) -> Result<(), AnyError> {
let registry_path = self.cache.registry_folder(&self.registry_url);
- ensure_registry_read_permission(permissions, &registry_path, path)
+ ensure_registry_read_permission(&self.fs, permissions, &registry_path, path)
}
}
diff --git a/cli/npm/resolvers/local.rs b/cli/npm/resolvers/local.rs
index 038d9eea1..cd1dc3671 100644
--- a/cli/npm/resolvers/local.rs
+++ b/cli/npm/resolvers/local.rs
@@ -154,7 +154,7 @@ impl NpmPackageFsResolver for LocalNpmPackageResolver {
loop {
current_folder = get_next_node_modules_ancestor(current_folder);
let sub_dir = join_package_name(current_folder, name);
- if sub_dir.is_dir() {
+ if self.fs.is_dir(&sub_dir) {
// if doing types resolution, only resolve the package if it specifies a types property
if mode.is_types() && !name.starts_with("@types/") {
let package_json = PackageJson::load_skip_read_permission(
@@ -173,7 +173,7 @@ impl NpmPackageFsResolver for LocalNpmPackageResolver {
if mode.is_types() && !name.starts_with("@types/") {
let sub_dir =
join_package_name(current_folder, &types_package_name(name));
- if sub_dir.is_dir() {
+ if self.fs.is_dir(&sub_dir) {
return Ok(sub_dir);
}
}
@@ -214,6 +214,7 @@ impl NpmPackageFsResolver for LocalNpmPackageResolver {
path: &Path,
) -> Result<(), AnyError> {
ensure_registry_read_permission(
+ &self.fs,
permissions,
&self.root_node_modules_path,
path,
diff --git a/cli/npm/resolvers/mod.rs b/cli/npm/resolvers/mod.rs
index 86d3840f3..f54e509f0 100644
--- a/cli/npm/resolvers/mod.rs
+++ b/cli/npm/resolvers/mod.rs
@@ -18,7 +18,7 @@ use deno_npm::resolution::NpmResolutionSnapshot;
use deno_npm::resolution::PackageReqNotFoundError;
use deno_npm::resolution::SerializedNpmResolutionSnapshot;
use deno_npm::NpmPackageId;
-use deno_runtime::deno_fs;
+use deno_runtime::deno_fs::FileSystem;
use deno_runtime::deno_node::NodePermissions;
use deno_runtime::deno_node::NodeResolutionMode;
use deno_runtime::deno_node::NpmResolver;
@@ -32,7 +32,7 @@ use serde::Deserialize;
use serde::Serialize;
use crate::args::Lockfile;
-use crate::util::fs::canonicalize_path_maybe_not_exists;
+use crate::util::fs::canonicalize_path_maybe_not_exists_with_fs;
use crate::util::progress_bar::ProgressBar;
use self::common::NpmPackageFsResolver;
@@ -49,6 +49,7 @@ pub struct NpmProcessState {
/// Brings together the npm resolution with the file system.
pub struct CliNpmResolver {
+ fs: Arc<dyn FileSystem>,
fs_resolver: Arc<dyn NpmPackageFsResolver>,
resolution: Arc<NpmResolution>,
maybe_lockfile: Option<Arc<Mutex<Lockfile>>>,
@@ -57,6 +58,7 @@ pub struct CliNpmResolver {
impl std::fmt::Debug for CliNpmResolver {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NpmPackageResolver")
+ .field("fs", &"<omitted>")
.field("fs_resolver", &"<omitted>")
.field("resolution", &"<omitted>")
.field("maybe_lockfile", &"<omitted>")
@@ -66,11 +68,13 @@ impl std::fmt::Debug for CliNpmResolver {
impl CliNpmResolver {
pub fn new(
+ fs: Arc<dyn FileSystem>,
resolution: Arc<NpmResolution>,
fs_resolver: Arc<dyn NpmPackageFsResolver>,
maybe_lockfile: Option<Arc<Mutex<Lockfile>>>,
) -> Self {
Self {
+ fs,
fs_resolver,
resolution,
maybe_lockfile,
@@ -81,6 +85,10 @@ impl CliNpmResolver {
self.fs_resolver.root_dir_url()
}
+ pub fn node_modules_path(&self) -> Option<PathBuf> {
+ self.fs_resolver.node_modules_path()
+ }
+
pub fn resolve_pkg_id_from_pkg_req(
&self,
req: &NpmPackageReq,
@@ -88,12 +96,17 @@ impl CliNpmResolver {
self.resolution.resolve_pkg_id_from_pkg_req(req)
}
- fn resolve_pkg_folder_from_deno_module_at_pkg_id(
+ pub fn resolve_pkg_folder_from_pkg_id(
&self,
pkg_id: &NpmPackageId,
) -> Result<PathBuf, AnyError> {
let path = self.fs_resolver.package_folder(pkg_id)?;
- let path = canonicalize_path_maybe_not_exists(&path)?;
+ let path = canonicalize_path_maybe_not_exists_with_fs(&path, |path| {
+ self
+ .fs
+ .realpath_sync(path)
+ .map_err(|err| err.into_io_error())
+ })?;
log::debug!(
"Resolved package folder of {} to {}",
pkg_id.as_serialized(),
@@ -237,7 +250,7 @@ impl NpmResolver for CliNpmResolver {
pkg_nv: &NpmPackageNv,
) -> Result<PathBuf, AnyError> {
let pkg_id = self.resolution.resolve_pkg_id_from_deno_module(pkg_nv)?;
- self.resolve_pkg_folder_from_deno_module_at_pkg_id(&pkg_id)
+ self.resolve_pkg_folder_from_pkg_id(&pkg_id)
}
fn resolve_pkg_id_from_pkg_req(
@@ -270,7 +283,7 @@ impl NpmResolver for CliNpmResolver {
}
pub fn create_npm_fs_resolver(
- fs: Arc<dyn deno_fs::FileSystem>,
+ fs: Arc<dyn FileSystem>,
cache: Arc<NpmCache>,
progress_bar: &ProgressBar,
registry_url: Url,
@@ -287,6 +300,7 @@ pub fn create_npm_fs_resolver(
resolution,
)),
None => Arc::new(GlobalNpmPackageResolver::new(
+ fs,
cache,
registry_url,
resolution,
diff --git a/cli/resolver.rs b/cli/resolver.rs
index 113a06fe7..7f49116f4 100644
--- a/cli/resolver.rs
+++ b/cli/resolver.rs
@@ -20,16 +20,86 @@ use std::sync::Arc;
use crate::args::package_json::PackageJsonDeps;
use crate::args::JsxImportSourceConfig;
+use crate::args::PackageJsonDepsProvider;
use crate::npm::CliNpmRegistryApi;
use crate::npm::NpmResolution;
use crate::npm::PackageJsonDepsInstaller;
use crate::util::sync::AtomicFlag;
+/// Result of checking if a specifier is mapped via
+/// an import map or package.json.
+pub enum MappedResolution {
+ None,
+ PackageJson(ModuleSpecifier),
+ ImportMap(ModuleSpecifier),
+}
+
+impl MappedResolution {
+ pub fn into_specifier(self) -> Option<ModuleSpecifier> {
+ match self {
+ MappedResolution::None => Option::None,
+ MappedResolution::PackageJson(specifier) => Some(specifier),
+ MappedResolution::ImportMap(specifier) => Some(specifier),
+ }
+ }
+}
+
+/// Resolver for specifiers that could be mapped via an
+/// import map or package.json.
+#[derive(Debug)]
+pub struct MappedSpecifierResolver {
+ maybe_import_map: Option<Arc<ImportMap>>,
+ package_json_deps_provider: Arc<PackageJsonDepsProvider>,
+}
+
+impl MappedSpecifierResolver {
+ pub fn new(
+ maybe_import_map: Option<Arc<ImportMap>>,
+ package_json_deps_provider: Arc<PackageJsonDepsProvider>,
+ ) -> Self {
+ Self {
+ maybe_import_map,
+ package_json_deps_provider,
+ }
+ }
+
+ pub fn resolve(
+ &self,
+ specifier: &str,
+ referrer: &ModuleSpecifier,
+ ) -> Result<MappedResolution, AnyError> {
+ // attempt to resolve with the import map first
+ let maybe_import_map_err = match self
+ .maybe_import_map
+ .as_ref()
+ .map(|import_map| import_map.resolve(specifier, referrer))
+ {
+ Some(Ok(value)) => return Ok(MappedResolution::ImportMap(value)),
+ Some(Err(err)) => Some(err),
+ None => None,
+ };
+
+ // then with package.json
+ if let Some(deps) = self.package_json_deps_provider.deps() {
+ if let Some(specifier) = resolve_package_json_dep(specifier, deps)? {
+ return Ok(MappedResolution::PackageJson(specifier));
+ }
+ }
+
+ // otherwise, surface the import map error or try resolving when has no import map
+ if let Some(err) = maybe_import_map_err {
+ Err(err.into())
+ } else {
+ Ok(MappedResolution::None)
+ }
+ }
+}
+
/// A resolver that takes care of resolution, taking into account loaded
/// import map, JSX settings.
#[derive(Debug)]
pub struct CliGraphResolver {
- maybe_import_map: Option<Arc<ImportMap>>,
+ mapped_specifier_resolver: MappedSpecifierResolver,
maybe_default_jsx_import_source: Option<String>,
maybe_jsx_import_source_module: Option<String>,
no_npm: bool,
@@ -51,7 +121,10 @@ impl Default for CliGraphResolver {
None,
));
Self {
- maybe_import_map: Default::default(),
+ mapped_specifier_resolver: MappedSpecifierResolver {
+ maybe_import_map: Default::default(),
+ package_json_deps_provider: Default::default(),
+ },
maybe_default_jsx_import_source: Default::default(),
maybe_jsx_import_source_module: Default::default(),
no_npm: false,
@@ -71,10 +144,14 @@ impl CliGraphResolver {
no_npm: bool,
npm_registry_api: Arc<CliNpmRegistryApi>,
npm_resolution: Arc<NpmResolution>,
+ package_json_deps_provider: Arc<PackageJsonDepsProvider>,
package_json_deps_installer: Arc<PackageJsonDepsInstaller>,
) -> Self {
Self {
- maybe_import_map,
+ mapped_specifier_resolver: MappedSpecifierResolver {
+ maybe_import_map,
+ package_json_deps_provider,
+ },
maybe_default_jsx_import_source: maybe_jsx_import_source_config
.as_ref()
.and_then(|c| c.default_specifier.clone()),
@@ -135,31 +212,20 @@ impl Resolver for CliGraphResolver {
specifier: &str,
referrer: &ModuleSpecifier,
) -> Result<ModuleSpecifier, AnyError> {
- // attempt to resolve with the import map first
- let maybe_import_map_err = match self
- .maybe_import_map
- .as_ref()
- .map(|import_map| import_map.resolve(specifier, referrer))
- {
- Some(Ok(value)) => return Ok(value),
- Some(Err(err)) => Some(err),
- None => None,
- };
-
- // then with package.json
- if let Some(deps) = self.package_json_deps_installer.package_deps().as_ref()
+ use MappedResolution::*;
+ match self
+ .mapped_specifier_resolver
+ .resolve(specifier, referrer)?
{
- if let Some(specifier) = resolve_package_json_dep(specifier, deps)? {
+ ImportMap(specifier) => Ok(specifier),
+ PackageJson(specifier) => {
+ // found a specifier in the package.json, so mark that
+ // we need to do an "npm install" later
self.found_package_json_dep_flag.raise();
- return Ok(specifier);
+ Ok(specifier)
}
- }
-
- // otherwise, surface the import map error or try resolving when has no import map
- if let Some(err) = maybe_import_map_err {
- Err(err.into())
- } else {
- deno_graph::resolve_import(specifier, referrer).map_err(|err| err.into())
+ None => deno_graph::resolve_import(specifier, referrer)
+ .map_err(|err| err.into()),
}
}
}
diff --git a/cli/standalone/binary.rs b/cli/standalone/binary.rs
index 51d8db79e..9ccb39e54 100644
--- a/cli/standalone/binary.rs
+++ b/cli/standalone/binary.rs
@@ -1,10 +1,13 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
+use std::collections::BTreeMap;
+use std::env::current_exe;
use std::io::Read;
use std::io::Seek;
use std::io::SeekFrom;
use std::io::Write;
use std::path::Path;
+use std::path::PathBuf;
use deno_ast::ModuleSpecifier;
use deno_core::anyhow::Context;
@@ -14,22 +17,112 @@ use deno_core::futures::AsyncReadExt;
use deno_core::futures::AsyncSeekExt;
use deno_core::serde_json;
use deno_core::url::Url;
+use deno_npm::registry::PackageDepNpmSchemeValueParseError;
+use deno_npm::resolution::SerializedNpmResolutionSnapshot;
use deno_runtime::permissions::PermissionsOptions;
+use deno_semver::npm::NpmPackageReq;
+use deno_semver::npm::NpmVersionReqSpecifierParseError;
use log::Level;
use serde::Deserialize;
use serde::Serialize;
+use crate::args::package_json::PackageJsonDepValueParseError;
+use crate::args::package_json::PackageJsonDeps;
use crate::args::CaData;
use crate::args::CliOptions;
use crate::args::CompileFlags;
+use crate::args::PackageJsonDepsProvider;
use crate::cache::DenoDir;
use crate::file_fetcher::FileFetcher;
use crate::http_util::HttpClient;
+use crate::npm::CliNpmRegistryApi;
+use crate::npm::CliNpmResolver;
+use crate::npm::NpmCache;
+use crate::npm::NpmResolution;
use crate::util::progress_bar::ProgressBar;
use crate::util::progress_bar::ProgressBarStyle;
+use super::virtual_fs::FileBackedVfs;
+use super::virtual_fs::VfsBuilder;
+use super::virtual_fs::VfsRoot;
+use super::virtual_fs::VirtualDirectory;
+
const MAGIC_TRAILER: &[u8; 8] = b"d3n0l4nd";
+#[derive(Serialize, Deserialize)]
+enum SerializablePackageJsonDepValueParseError {
+ SchemeValue(String),
+ Specifier(String),
+ Unsupported { scheme: String },
+}
+
+impl SerializablePackageJsonDepValueParseError {
+ pub fn from_err(err: PackageJsonDepValueParseError) -> Self {
+ match err {
+ PackageJsonDepValueParseError::SchemeValue(err) => {
+ Self::SchemeValue(err.value)
+ }
+ PackageJsonDepValueParseError::Specifier(err) => {
+ Self::Specifier(err.source.to_string())
+ }
+ PackageJsonDepValueParseError::Unsupported { scheme } => {
+ Self::Unsupported { scheme }
+ }
+ }
+ }
+
+ pub fn into_err(self) -> PackageJsonDepValueParseError {
+ match self {
+ SerializablePackageJsonDepValueParseError::SchemeValue(value) => {
+ PackageJsonDepValueParseError::SchemeValue(
+ PackageDepNpmSchemeValueParseError { value },
+ )
+ }
+ SerializablePackageJsonDepValueParseError::Specifier(source) => {
+ PackageJsonDepValueParseError::Specifier(
+ NpmVersionReqSpecifierParseError {
+ source: monch::ParseErrorFailureError::new(source),
+ },
+ )
+ }
+ SerializablePackageJsonDepValueParseError::Unsupported { scheme } => {
+ PackageJsonDepValueParseError::Unsupported { scheme }
+ }
+ }
+ }
+}
+
+#[derive(Serialize, Deserialize)]
+pub struct SerializablePackageJsonDeps(
+ BTreeMap<
+ String,
+ Result<NpmPackageReq, SerializablePackageJsonDepValueParseError>,
+ >,
+);
+
+impl SerializablePackageJsonDeps {
+ pub fn from_deps(deps: PackageJsonDeps) -> Self {
+ Self(
+ deps
+ .into_iter()
+ .map(|(name, req)| {
+ let res =
+ req.map_err(SerializablePackageJsonDepValueParseError::from_err);
+ (name, res)
+ })
+ .collect(),
+ )
+ }
+
+ pub fn into_deps(self) -> PackageJsonDeps {
+ self
+ .0
+ .into_iter()
+ .map(|(name, res)| (name, res.map_err(|err| err.into_err())))
+ .collect()
+ }
+}
+
#[derive(Deserialize, Serialize)]
pub struct Metadata {
pub argv: Vec<String>,
@@ -44,27 +137,74 @@ pub struct Metadata {
pub unsafely_ignore_certificate_errors: Option<Vec<String>>,
pub maybe_import_map: Option<(Url, String)>,
pub entrypoint: ModuleSpecifier,
+ /// Whether this uses a node_modules directory (true) or the global cache (false).
+ pub node_modules_dir: bool,
+ pub npm_snapshot: Option<SerializedNpmResolutionSnapshot>,
+ pub package_json_deps: Option<SerializablePackageJsonDeps>,
+}
+
+pub fn load_npm_vfs(root_dir_path: PathBuf) -> Result<FileBackedVfs, AnyError> {
+ let file_path = current_exe().unwrap();
+ let mut file = std::fs::File::open(file_path)?;
+ file.seek(SeekFrom::End(-(TRAILER_SIZE as i64)))?;
+ let mut trailer = [0; TRAILER_SIZE];
+ file.read_exact(&mut trailer)?;
+ let trailer = Trailer::parse(&trailer)?.unwrap();
+ file.seek(SeekFrom::Start(trailer.npm_vfs_pos))?;
+ let mut vfs_data = vec![0; trailer.npm_vfs_len() as usize];
+ file.read_exact(&mut vfs_data)?;
+ let mut dir: VirtualDirectory = serde_json::from_slice(&vfs_data)?;
+
+ // align the name of the directory with the root dir
+ dir.name = root_dir_path
+ .file_name()
+ .unwrap()
+ .to_string_lossy()
+ .to_string();
+
+ let fs_root = VfsRoot {
+ dir,
+ root_path: root_dir_path,
+ start_file_offset: trailer.npm_files_pos,
+ };
+ Ok(FileBackedVfs::new(file, fs_root))
}
-pub fn write_binary_bytes(
+fn write_binary_bytes(
writer: &mut impl Write,
original_bin: Vec<u8>,
metadata: &Metadata,
eszip: eszip::EszipV2,
+ npm_vfs: Option<&VirtualDirectory>,
+ npm_files: &Vec<Vec<u8>>,
) -> Result<(), AnyError> {
let metadata = serde_json::to_string(metadata)?.as_bytes().to_vec();
+ let npm_vfs = serde_json::to_string(&npm_vfs)?.as_bytes().to_vec();
let eszip_archive = eszip.into_bytes();
- let eszip_pos = original_bin.len();
- let metadata_pos = eszip_pos + eszip_archive.len();
- let mut trailer = MAGIC_TRAILER.to_vec();
- trailer.write_all(&eszip_pos.to_be_bytes())?;
- trailer.write_all(&metadata_pos.to_be_bytes())?;
-
writer.write_all(&original_bin)?;
writer.write_all(&eszip_archive)?;
writer.write_all(&metadata)?;
- writer.write_all(&trailer)?;
+ writer.write_all(&npm_vfs)?;
+ for file in npm_files {
+ writer.write_all(file)?;
+ }
+
+ // write the trailer, which includes the positions
+ // of the data blocks in the file
+ writer.write_all(&{
+ let eszip_pos = original_bin.len() as u64;
+ let metadata_pos = eszip_pos + (eszip_archive.len() as u64);
+ let npm_vfs_pos = metadata_pos + (metadata.len() as u64);
+ let npm_files_pos = npm_vfs_pos + (npm_vfs.len() as u64);
+ Trailer {
+ eszip_pos,
+ metadata_pos,
+ npm_vfs_pos,
+ npm_files_pos,
+ }
+ .as_bytes()
+ })?;
Ok(())
}
@@ -73,12 +213,15 @@ pub fn is_standalone_binary(exe_path: &Path) -> bool {
let Ok(mut output_file) = std::fs::File::open(exe_path) else {
return false;
};
- if output_file.seek(SeekFrom::End(-24)).is_err() {
+ if output_file
+ .seek(SeekFrom::End(-(TRAILER_SIZE as i64)))
+ .is_err()
+ {
// This seek may fail because the file is too small to possibly be
// `deno compile` output.
return false;
}
- let mut trailer = [0; 24];
+ let mut trailer = [0; TRAILER_SIZE];
if output_file.read_exact(&mut trailer).is_err() {
return false;
};
@@ -88,13 +231,9 @@ pub fn is_standalone_binary(exe_path: &Path) -> bool {
/// This function will try to run this binary as a standalone binary
/// produced by `deno compile`. It determines if this is a standalone
-/// binary by checking for the magic trailer string `d3n0l4nd` at EOF-24 (8 bytes * 3).
-/// The magic trailer is followed by:
-/// - a u64 pointer to the JS bundle embedded in the binary
-/// - a u64 pointer to JSON metadata (serialized flags) embedded in the binary
-/// These are dereferenced, and the bundle is executed under the configuration
-/// specified by the metadata. If no magic trailer is present, this function
-/// exits with `Ok(None)`.
+/// binary by skipping over the trailer width at the end of the file,
+/// then checking for the magic trailer string `d3n0l4nd`. If found,
+/// the bundle is executed. If not, this function exits with `Ok(None)`.
pub async fn extract_standalone(
exe_path: &Path,
cli_args: Vec<String>,
@@ -104,21 +243,17 @@ pub async fn extract_standalone(
let mut bufreader =
deno_core::futures::io::BufReader::new(AllowStdIo::new(file));
- let trailer_pos = bufreader.seek(SeekFrom::End(-24)).await?;
- let mut trailer = [0; 24];
+ let _trailer_pos = bufreader
+ .seek(SeekFrom::End(-(TRAILER_SIZE as i64)))
+ .await?;
+ let mut trailer = [0; TRAILER_SIZE];
bufreader.read_exact(&mut trailer).await?;
- let (magic_trailer, rest) = trailer.split_at(8);
- if magic_trailer != MAGIC_TRAILER {
- return Ok(None);
- }
-
- let (eszip_archive_pos, rest) = rest.split_at(8);
- let metadata_pos = rest;
- let eszip_archive_pos = u64_from_bytes(eszip_archive_pos)?;
- let metadata_pos = u64_from_bytes(metadata_pos)?;
- let metadata_len = trailer_pos - metadata_pos;
+ let trailer = match Trailer::parse(&trailer)? {
+ None => return Ok(None),
+ Some(trailer) => trailer,
+ };
- bufreader.seek(SeekFrom::Start(eszip_archive_pos)).await?;
+ bufreader.seek(SeekFrom::Start(trailer.eszip_pos)).await?;
let (eszip, loader) = eszip::EszipV2::parse(bufreader)
.await
@@ -126,12 +261,14 @@ pub async fn extract_standalone(
let mut bufreader = loader.await.context("Failed to parse eszip archive")?;
- bufreader.seek(SeekFrom::Start(metadata_pos)).await?;
+ bufreader
+ .seek(SeekFrom::Start(trailer.metadata_pos))
+ .await?;
let mut metadata = String::new();
bufreader
- .take(metadata_len)
+ .take(trailer.metadata_len())
.read_to_string(&mut metadata)
.await
.context("Failed to read metadata from the current executable")?;
@@ -142,6 +279,57 @@ pub async fn extract_standalone(
Ok(Some((metadata, eszip)))
}
+const TRAILER_SIZE: usize = std::mem::size_of::<Trailer>() + 8; // 8 bytes for the magic trailer string
+
+struct Trailer {
+ eszip_pos: u64,
+ metadata_pos: u64,
+ npm_vfs_pos: u64,
+ npm_files_pos: u64,
+}
+
+impl Trailer {
+ pub fn parse(trailer: &[u8]) -> Result<Option<Trailer>, AnyError> {
+ let (magic_trailer, rest) = trailer.split_at(8);
+ if magic_trailer != MAGIC_TRAILER {
+ return Ok(None);
+ }
+
+ let (eszip_archive_pos, rest) = rest.split_at(8);
+ let (metadata_pos, rest) = rest.split_at(8);
+ let (npm_vfs_pos, npm_files_pos) = rest.split_at(8);
+ let eszip_archive_pos = u64_from_bytes(eszip_archive_pos)?;
+ let metadata_pos = u64_from_bytes(metadata_pos)?;
+ let npm_vfs_pos = u64_from_bytes(npm_vfs_pos)?;
+ let npm_files_pos = u64_from_bytes(npm_files_pos)?;
+ Ok(Some(Trailer {
+ eszip_pos: eszip_archive_pos,
+ metadata_pos,
+ npm_vfs_pos,
+ npm_files_pos,
+ }))
+ }
+
+ pub fn metadata_len(&self) -> u64 {
+ self.npm_vfs_pos - self.metadata_pos
+ }
+
+ pub fn npm_vfs_len(&self) -> u64 {
+ self.npm_files_pos - self.npm_vfs_pos
+ }
+
+ pub fn as_bytes(&self) -> Vec<u8> {
+ let mut trailer = MAGIC_TRAILER.to_vec();
+ trailer.write_all(&self.eszip_pos.to_be_bytes()).unwrap();
+ trailer.write_all(&self.metadata_pos.to_be_bytes()).unwrap();
+ trailer.write_all(&self.npm_vfs_pos.to_be_bytes()).unwrap();
+ trailer
+ .write_all(&self.npm_files_pos.to_be_bytes())
+ .unwrap();
+ trailer
+ }
+}
+
fn u64_from_bytes(arr: &[u8]) -> Result<u64, AnyError> {
let fixed_arr: &[u8; 8] = arr
.try_into()
@@ -153,18 +341,34 @@ pub struct DenoCompileBinaryWriter<'a> {
file_fetcher: &'a FileFetcher,
client: &'a HttpClient,
deno_dir: &'a DenoDir,
+ npm_api: &'a CliNpmRegistryApi,
+ npm_cache: &'a NpmCache,
+ npm_resolver: &'a CliNpmResolver,
+ resolution: &'a NpmResolution,
+ package_json_deps_provider: &'a PackageJsonDepsProvider,
}
impl<'a> DenoCompileBinaryWriter<'a> {
+ #[allow(clippy::too_many_arguments)]
pub fn new(
file_fetcher: &'a FileFetcher,
client: &'a HttpClient,
deno_dir: &'a DenoDir,
+ npm_api: &'a CliNpmRegistryApi,
+ npm_cache: &'a NpmCache,
+ npm_resolver: &'a CliNpmResolver,
+ resolution: &'a NpmResolution,
+ package_json_deps_provider: &'a PackageJsonDepsProvider,
) -> Self {
Self {
file_fetcher,
client,
deno_dir,
+ npm_api,
+ npm_cache,
+ npm_resolver,
+ resolution,
+ package_json_deps_provider,
}
}
@@ -284,6 +488,14 @@ impl<'a> DenoCompileBinaryWriter<'a> {
.resolve_import_map(self.file_fetcher)
.await?
.map(|import_map| (import_map.base_url().clone(), import_map.to_json()));
+ let (npm_snapshot, npm_vfs, npm_files) = if self.resolution.has_packages() {
+ let (root_dir, files) = self.build_vfs()?.into_dir_and_files();
+ let snapshot = self.resolution.serialized_snapshot();
+ (Some(snapshot), Some(root_dir), files)
+ } else {
+ (None, None, Vec::new())
+ };
+
let metadata = Metadata {
argv: compile_flags.args.clone(),
unstable: cli_options.unstable(),
@@ -299,8 +511,44 @@ impl<'a> DenoCompileBinaryWriter<'a> {
ca_data,
entrypoint: entrypoint.clone(),
maybe_import_map,
+ node_modules_dir: self.npm_resolver.node_modules_path().is_some(),
+ npm_snapshot,
+ package_json_deps: self
+ .package_json_deps_provider
+ .deps()
+ .map(|deps| SerializablePackageJsonDeps::from_deps(deps.clone())),
};
- write_binary_bytes(writer, original_bin, &metadata, eszip)
+ write_binary_bytes(
+ writer,
+ original_bin,
+ &metadata,
+ eszip,
+ npm_vfs.as_ref(),
+ &npm_files,
+ )
+ }
+
+ fn build_vfs(&self) -> Result<VfsBuilder, AnyError> {
+ if let Some(node_modules_path) = self.npm_resolver.node_modules_path() {
+ let mut builder = VfsBuilder::new(node_modules_path.clone());
+ builder.add_dir_recursive(&node_modules_path)?;
+ Ok(builder)
+ } else {
+ // DO NOT include the user's registry url as it may contain credentials,
+ // but also don't make this dependent on the registry url
+ let registry_url = self.npm_api.base_url();
+ let root_path = self.npm_cache.registry_folder(registry_url);
+ let mut builder = VfsBuilder::new(root_path);
+ for package in self.resolution.all_packages() {
+ let folder = self
+ .npm_resolver
+ .resolve_pkg_folder_from_pkg_id(&package.pkg_id)?;
+ builder.add_dir_recursive(&folder)?;
+ }
+ // overwrite the root directory's name to obscure the user's registry url
+ builder.set_root_dir_name("node_modules".to_string());
+ Ok(builder)
+ }
}
}
diff --git a/cli/standalone/file_system.rs b/cli/standalone/file_system.rs
new file mode 100644
index 000000000..f0891f71d
--- /dev/null
+++ b/cli/standalone/file_system.rs
@@ -0,0 +1,337 @@
+// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
+
+use std::path::Path;
+use std::path::PathBuf;
+use std::rc::Rc;
+use std::sync::Arc;
+
+use deno_runtime::deno_fs::FileSystem;
+use deno_runtime::deno_fs::FsDirEntry;
+use deno_runtime::deno_fs::FsFileType;
+use deno_runtime::deno_fs::OpenOptions;
+use deno_runtime::deno_fs::RealFs;
+use deno_runtime::deno_io::fs::File;
+use deno_runtime::deno_io::fs::FsError;
+use deno_runtime::deno_io::fs::FsResult;
+use deno_runtime::deno_io::fs::FsStat;
+
+use super::virtual_fs::FileBackedVfs;
+
+#[derive(Debug, Clone)]
+pub struct DenoCompileFileSystem(Arc<FileBackedVfs>);
+
+impl DenoCompileFileSystem {
+ pub fn new(vfs: FileBackedVfs) -> Self {
+ Self(Arc::new(vfs))
+ }
+
+ fn error_if_in_vfs(&self, path: &Path) -> FsResult<()> {
+ if self.0.is_path_within(path) {
+ Err(FsError::NotSupported)
+ } else {
+ Ok(())
+ }
+ }
+
+ fn copy_to_real_path(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
+ let old_file = self.0.file_entry(oldpath)?;
+ let old_file_bytes = self.0.read_file_all(old_file)?;
+ RealFs.write_file_sync(
+ newpath,
+ OpenOptions {
+ read: false,
+ write: true,
+ create: true,
+ truncate: true,
+ append: false,
+ create_new: false,
+ mode: None,
+ },
+ &old_file_bytes,
+ )
+ }
+}
+
+#[async_trait::async_trait(?Send)]
+impl FileSystem for DenoCompileFileSystem {
+ fn cwd(&self) -> FsResult<PathBuf> {
+ RealFs.cwd()
+ }
+
+ fn tmp_dir(&self) -> FsResult<PathBuf> {
+ RealFs.tmp_dir()
+ }
+
+ fn chdir(&self, path: &Path) -> FsResult<()> {
+ self.error_if_in_vfs(path)?;
+ RealFs.chdir(path)
+ }
+
+ fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
+ RealFs.umask(mask)
+ }
+
+ fn open_sync(
+ &self,
+ path: &Path,
+ options: OpenOptions,
+ ) -> FsResult<Rc<dyn File>> {
+ if self.0.is_path_within(path) {
+ Ok(self.0.open_file(path)?)
+ } else {
+ RealFs.open_sync(path, options)
+ }
+ }
+ async fn open_async(
+ &self,
+ path: PathBuf,
+ options: OpenOptions,
+ ) -> FsResult<Rc<dyn File>> {
+ if self.0.is_path_within(&path) {
+ Ok(self.0.open_file(&path)?)
+ } else {
+ RealFs.open_async(path, options).await
+ }
+ }
+
+ fn mkdir_sync(
+ &self,
+ path: &Path,
+ recursive: bool,
+ mode: u32,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(path)?;
+ RealFs.mkdir_sync(path, recursive, mode)
+ }
+ async fn mkdir_async(
+ &self,
+ path: PathBuf,
+ recursive: bool,
+ mode: u32,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(&path)?;
+ RealFs.mkdir_async(path, recursive, mode).await
+ }
+
+ fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> {
+ self.error_if_in_vfs(path)?;
+ RealFs.chmod_sync(path, mode)
+ }
+ async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> {
+ self.error_if_in_vfs(&path)?;
+ RealFs.chmod_async(path, mode).await
+ }
+
+ fn chown_sync(
+ &self,
+ path: &Path,
+ uid: Option<u32>,
+ gid: Option<u32>,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(path)?;
+ RealFs.chown_sync(path, uid, gid)
+ }
+ async fn chown_async(
+ &self,
+ path: PathBuf,
+ uid: Option<u32>,
+ gid: Option<u32>,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(&path)?;
+ RealFs.chown_async(path, uid, gid).await
+ }
+
+ fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> {
+ self.error_if_in_vfs(path)?;
+ RealFs.remove_sync(path, recursive)
+ }
+ async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> {
+ self.error_if_in_vfs(&path)?;
+ RealFs.remove_async(path, recursive).await
+ }
+
+ fn copy_file_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
+ self.error_if_in_vfs(newpath)?;
+ if self.0.is_path_within(oldpath) {
+ self.copy_to_real_path(oldpath, newpath)
+ } else {
+ RealFs.copy_file_sync(oldpath, newpath)
+ }
+ }
+ async fn copy_file_async(
+ &self,
+ oldpath: PathBuf,
+ newpath: PathBuf,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(&newpath)?;
+ if self.0.is_path_within(&oldpath) {
+ let fs = self.clone();
+ tokio::task::spawn_blocking(move || {
+ fs.copy_to_real_path(&oldpath, &newpath)
+ })
+ .await?
+ } else {
+ RealFs.copy_file_async(oldpath, newpath).await
+ }
+ }
+
+ fn stat_sync(&self, path: &Path) -> FsResult<FsStat> {
+ if self.0.is_path_within(path) {
+ Ok(self.0.stat(path)?)
+ } else {
+ RealFs.stat_sync(path)
+ }
+ }
+ async fn stat_async(&self, path: PathBuf) -> FsResult<FsStat> {
+ if self.0.is_path_within(&path) {
+ Ok(self.0.stat(&path)?)
+ } else {
+ RealFs.stat_async(path).await
+ }
+ }
+
+ fn lstat_sync(&self, path: &Path) -> FsResult<FsStat> {
+ if self.0.is_path_within(path) {
+ Ok(self.0.lstat(path)?)
+ } else {
+ RealFs.lstat_sync(path)
+ }
+ }
+ async fn lstat_async(&self, path: PathBuf) -> FsResult<FsStat> {
+ if self.0.is_path_within(&path) {
+ Ok(self.0.lstat(&path)?)
+ } else {
+ RealFs.lstat_async(path).await
+ }
+ }
+
+ fn realpath_sync(&self, path: &Path) -> FsResult<PathBuf> {
+ if self.0.is_path_within(path) {
+ Ok(self.0.canonicalize(path)?)
+ } else {
+ RealFs.realpath_sync(path)
+ }
+ }
+ async fn realpath_async(&self, path: PathBuf) -> FsResult<PathBuf> {
+ if self.0.is_path_within(&path) {
+ Ok(self.0.canonicalize(&path)?)
+ } else {
+ RealFs.realpath_async(path).await
+ }
+ }
+
+ fn read_dir_sync(&self, path: &Path) -> FsResult<Vec<FsDirEntry>> {
+ if self.0.is_path_within(path) {
+ Ok(self.0.read_dir(path)?)
+ } else {
+ RealFs.read_dir_sync(path)
+ }
+ }
+ async fn read_dir_async(&self, path: PathBuf) -> FsResult<Vec<FsDirEntry>> {
+ if self.0.is_path_within(&path) {
+ Ok(self.0.read_dir(&path)?)
+ } else {
+ RealFs.read_dir_async(path).await
+ }
+ }
+
+ fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
+ self.error_if_in_vfs(oldpath)?;
+ self.error_if_in_vfs(newpath)?;
+ RealFs.rename_sync(oldpath, newpath)
+ }
+ async fn rename_async(
+ &self,
+ oldpath: PathBuf,
+ newpath: PathBuf,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(&oldpath)?;
+ self.error_if_in_vfs(&newpath)?;
+ RealFs.rename_async(oldpath, newpath).await
+ }
+
+ fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
+ self.error_if_in_vfs(oldpath)?;
+ self.error_if_in_vfs(newpath)?;
+ RealFs.link_sync(oldpath, newpath)
+ }
+ async fn link_async(
+ &self,
+ oldpath: PathBuf,
+ newpath: PathBuf,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(&oldpath)?;
+ self.error_if_in_vfs(&newpath)?;
+ RealFs.link_async(oldpath, newpath).await
+ }
+
+ fn symlink_sync(
+ &self,
+ oldpath: &Path,
+ newpath: &Path,
+ file_type: Option<FsFileType>,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(oldpath)?;
+ self.error_if_in_vfs(newpath)?;
+ RealFs.symlink_sync(oldpath, newpath, file_type)
+ }
+ async fn symlink_async(
+ &self,
+ oldpath: PathBuf,
+ newpath: PathBuf,
+ file_type: Option<FsFileType>,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(&oldpath)?;
+ self.error_if_in_vfs(&newpath)?;
+ RealFs.symlink_async(oldpath, newpath, file_type).await
+ }
+
+ fn read_link_sync(&self, path: &Path) -> FsResult<PathBuf> {
+ if self.0.is_path_within(path) {
+ Ok(self.0.read_link(path)?)
+ } else {
+ RealFs.read_link_sync(path)
+ }
+ }
+ async fn read_link_async(&self, path: PathBuf) -> FsResult<PathBuf> {
+ if self.0.is_path_within(&path) {
+ Ok(self.0.read_link(&path)?)
+ } else {
+ RealFs.read_link_async(path).await
+ }
+ }
+
+ fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> {
+ self.error_if_in_vfs(path)?;
+ RealFs.truncate_sync(path, len)
+ }
+ async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> {
+ self.error_if_in_vfs(&path)?;
+ RealFs.truncate_async(path, len).await
+ }
+
+ fn utime_sync(
+ &self,
+ path: &Path,
+ atime_secs: i64,
+ atime_nanos: u32,
+ mtime_secs: i64,
+ mtime_nanos: u32,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(path)?;
+ RealFs.utime_sync(path, atime_secs, atime_nanos, mtime_secs, mtime_nanos)
+ }
+ async fn utime_async(
+ &self,
+ path: PathBuf,
+ atime_secs: i64,
+ atime_nanos: u32,
+ mtime_secs: i64,
+ mtime_nanos: u32,
+ ) -> FsResult<()> {
+ self.error_if_in_vfs(&path)?;
+ RealFs
+ .utime_async(path, atime_secs, atime_nanos, mtime_secs, mtime_nanos)
+ .await
+ }
+}
diff --git a/cli/standalone/mod.rs b/cli/standalone/mod.rs
index e00ab8ab2..db2743be8 100644
--- a/cli/standalone/mod.rs
+++ b/cli/standalone/mod.rs
@@ -1,17 +1,25 @@
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
use crate::args::get_root_cert_store;
+use crate::args::npm_pkg_req_ref_to_binary_command;
use crate::args::CaData;
use crate::args::CacheSetting;
+use crate::args::PackageJsonDepsProvider;
use crate::args::StorageKeyResolver;
+use crate::cache::Caches;
use crate::cache::DenoDir;
+use crate::cache::NodeAnalysisCache;
use crate::file_fetcher::get_source_from_data_url;
use crate::http_util::HttpClient;
+use crate::module_loader::CjsResolutionStore;
+use crate::module_loader::NpmModuleLoader;
+use crate::node::CliCjsEsmCodeAnalyzer;
use crate::npm::create_npm_fs_resolver;
use crate::npm::CliNpmRegistryApi;
use crate::npm::CliNpmResolver;
use crate::npm::NpmCache;
use crate::npm::NpmResolution;
+use crate::resolver::MappedSpecifierResolver;
use crate::util::progress_bar::ProgressBar;
use crate::util::progress_bar::ProgressBarStyle;
use crate::util::v8::construct_v8_flags;
@@ -19,7 +27,7 @@ use crate::worker::CliMainWorkerFactory;
use crate::worker::CliMainWorkerOptions;
use crate::worker::HasNodeSpecifierChecker;
use crate::worker::ModuleLoaderFactory;
-use crate::CliGraphResolver;
+use deno_ast::MediaType;
use deno_core::anyhow::Context;
use deno_core::error::type_error;
use deno_core::error::AnyError;
@@ -29,31 +37,44 @@ use deno_core::ModuleLoader;
use deno_core::ModuleSpecifier;
use deno_core::ModuleType;
use deno_core::ResolutionKind;
-use deno_graph::source::Resolver;
use deno_runtime::deno_fs;
+use deno_runtime::deno_node;
+use deno_runtime::deno_node::analyze::NodeCodeTranslator;
use deno_runtime::deno_node::NodeResolver;
use deno_runtime::deno_tls::rustls::RootCertStore;
use deno_runtime::deno_tls::RootCertStoreProvider;
use deno_runtime::deno_web::BlobStore;
use deno_runtime::permissions::Permissions;
use deno_runtime::permissions::PermissionsContainer;
+use deno_semver::npm::NpmPackageReqReference;
use import_map::parse_from_json;
use std::pin::Pin;
use std::rc::Rc;
use std::sync::Arc;
mod binary;
+mod file_system;
+mod virtual_fs;
pub use binary::extract_standalone;
pub use binary::is_standalone_binary;
pub use binary::DenoCompileBinaryWriter;
+use self::binary::load_npm_vfs;
use self::binary::Metadata;
+use self::file_system::DenoCompileFileSystem;
+
+struct SharedModuleLoaderState {
+ eszip: eszip::EszipV2,
+ mapped_specifier_resolver: MappedSpecifierResolver,
+ npm_module_loader: Arc<NpmModuleLoader>,
+}
#[derive(Clone)]
struct EmbeddedModuleLoader {
- eszip: Arc<eszip::EszipV2>,
- maybe_import_map_resolver: Option<Arc<CliGraphResolver>>,
+ shared: Arc<SharedModuleLoaderState>,
+ root_permissions: PermissionsContainer,
+ dynamic_permissions: PermissionsContainer,
}
impl ModuleLoader for EmbeddedModuleLoader {
@@ -61,10 +82,10 @@ impl ModuleLoader for EmbeddedModuleLoader {
&self,
specifier: &str,
referrer: &str,
- _kind: ResolutionKind,
+ kind: ResolutionKind,
) -> Result<ModuleSpecifier, AnyError> {
// Try to follow redirects when resolving.
- let referrer = match self.eszip.get_module(referrer) {
+ let referrer = match self.shared.eszip.get_module(referrer) {
Some(eszip::Module { ref specifier, .. }) => {
ModuleSpecifier::parse(specifier)?
}
@@ -74,27 +95,93 @@ impl ModuleLoader for EmbeddedModuleLoader {
}
};
- self
- .maybe_import_map_resolver
+ let permissions = if matches!(kind, ResolutionKind::DynamicImport) {
+ &self.dynamic_permissions
+ } else {
+ &self.root_permissions
+ };
+
+ if let Some(result) = self
+ .shared
+ .npm_module_loader
+ .resolve_if_in_npm_package(specifier, &referrer, permissions)
+ {
+ return result;
+ }
+
+ let maybe_mapped = self
+ .shared
+ .mapped_specifier_resolver
+ .resolve(specifier, &referrer)?
+ .into_specifier();
+
+ // npm specifier
+ let specifier_text = maybe_mapped
.as_ref()
- .map(|r| r.resolve(specifier, &referrer))
- .unwrap_or_else(|| {
- deno_core::resolve_import(specifier, referrer.as_str())
- .map_err(|err| err.into())
- })
+ .map(|r| r.as_str())
+ .unwrap_or(specifier);
+ if let Ok(reference) = NpmPackageReqReference::from_str(specifier_text) {
+ return self
+ .shared
+ .npm_module_loader
+ .resolve_req_reference(&reference, permissions);
+ }
+
+ // Built-in Node modules
+ if let Some(module_name) = specifier_text.strip_prefix("node:") {
+ return deno_node::resolve_builtin_node_module(module_name);
+ }
+
+ match maybe_mapped {
+ Some(resolved) => Ok(resolved),
+ None => deno_core::resolve_import(specifier, referrer.as_str())
+ .map_err(|err| err.into()),
+ }
}
fn load(
&self,
module_specifier: &ModuleSpecifier,
- _maybe_referrer: Option<&ModuleSpecifier>,
- _is_dynamic: bool,
+ maybe_referrer: Option<&ModuleSpecifier>,
+ is_dynamic: bool,
) -> Pin<Box<deno_core::ModuleSourceFuture>> {
let is_data_uri = get_source_from_data_url(module_specifier).ok();
+ let permissions = if is_dynamic {
+ &self.dynamic_permissions
+ } else {
+ &self.root_permissions
+ };
+
+ if let Some(result) =
+ self.shared.npm_module_loader.load_sync_if_in_npm_package(
+ module_specifier,
+ maybe_referrer,
+ permissions,
+ )
+ {
+ return match result {
+ Ok(code_source) => Box::pin(deno_core::futures::future::ready(Ok(
+ deno_core::ModuleSource::new_with_redirect(
+ match code_source.media_type {
+ MediaType::Json => ModuleType::Json,
+ _ => ModuleType::JavaScript,
+ },
+ code_source.code,
+ module_specifier,
+ &code_source.found_url,
+ ),
+ ))),
+ Err(err) => Box::pin(deno_core::futures::future::ready(Err(err))),
+ };
+ }
+
let module = self
+ .shared
.eszip
.get_module(module_specifier.as_str())
- .ok_or_else(|| type_error("Module not found"));
+ .ok_or_else(|| {
+ type_error(format!("Module not found: {}", module_specifier))
+ });
// TODO(mmastrac): This clone can probably be removed in the future if ModuleSpecifier is no longer a full-fledged URL
let module_specifier = module_specifier.clone();
@@ -128,24 +215,32 @@ impl ModuleLoader for EmbeddedModuleLoader {
}
struct StandaloneModuleLoaderFactory {
- loader: EmbeddedModuleLoader,
+ shared: Arc<SharedModuleLoaderState>,
}
impl ModuleLoaderFactory for StandaloneModuleLoaderFactory {
fn create_for_main(
&self,
- _root_permissions: PermissionsContainer,
- _dynamic_permissions: PermissionsContainer,
+ root_permissions: PermissionsContainer,
+ dynamic_permissions: PermissionsContainer,
) -> Rc<dyn ModuleLoader> {
- Rc::new(self.loader.clone())
+ Rc::new(EmbeddedModuleLoader {
+ shared: self.shared.clone(),
+ root_permissions,
+ dynamic_permissions,
+ })
}
fn create_for_worker(
&self,
- _root_permissions: PermissionsContainer,
- _dynamic_permissions: PermissionsContainer,
+ root_permissions: PermissionsContainer,
+ dynamic_permissions: PermissionsContainer,
) -> Rc<dyn ModuleLoader> {
- Rc::new(self.loader.clone())
+ Rc::new(EmbeddedModuleLoader {
+ shared: self.shared.clone(),
+ root_permissions,
+ dynamic_permissions,
+ })
}
fn create_source_map_getter(
@@ -183,6 +278,9 @@ pub async fn run(
metadata: Metadata,
) -> Result<(), AnyError> {
let main_module = &metadata.entrypoint;
+ let current_exe_path = std::env::current_exe().unwrap();
+ let current_exe_name =
+ current_exe_path.file_name().unwrap().to_string_lossy();
let dir = DenoDir::new(None)?;
let root_cert_store_provider = Arc::new(StandaloneRootCertStoreProvider {
ca_stores: metadata.ca_stores,
@@ -194,9 +292,14 @@ pub async fn run(
Some(root_cert_store_provider.clone()),
metadata.unsafely_ignore_certificate_errors.clone(),
));
- let npm_registry_url = CliNpmRegistryApi::default_url().to_owned();
+ // use a dummy npm registry url
+ let npm_registry_url = ModuleSpecifier::parse("https://localhost/").unwrap();
+ let root_path = std::env::temp_dir()
+ .join(format!("deno-compile-{}", current_exe_name))
+ .join("node_modules");
+
let npm_cache = Arc::new(NpmCache::new(
- dir.npm_folder_path(),
+ root_path.clone(),
CacheSetting::Use,
http_client.clone(),
progress_bar.clone(),
@@ -207,44 +310,92 @@ pub async fn run(
http_client.clone(),
progress_bar.clone(),
));
- let fs = Arc::new(deno_fs::RealFs);
- let npm_resolution =
- Arc::new(NpmResolution::from_serialized(npm_api.clone(), None, None));
+ let (fs, node_modules_path, snapshot) = if let Some(snapshot) =
+ metadata.npm_snapshot
+ {
+ let vfs_root_dir_path = if metadata.node_modules_dir {
+ root_path
+ } else {
+ npm_cache.registry_folder(&npm_registry_url)
+ };
+ let vfs =
+ load_npm_vfs(vfs_root_dir_path).context("Failed to load npm vfs.")?;
+ let node_modules_path = if metadata.node_modules_dir {
+ Some(vfs.root().to_path_buf())
+ } else {
+ None
+ };
+ (
+ Arc::new(DenoCompileFileSystem::new(vfs)) as Arc<dyn deno_fs::FileSystem>,
+ node_modules_path,
+ Some(snapshot.into_valid()?),
+ )
+ } else {
+ (
+ Arc::new(deno_fs::RealFs) as Arc<dyn deno_fs::FileSystem>,
+ None,
+ None,
+ )
+ };
+ let npm_resolution = Arc::new(NpmResolution::from_serialized(
+ npm_api.clone(),
+ snapshot,
+ None,
+ ));
+ let has_node_modules_dir = node_modules_path.is_some();
let npm_fs_resolver = create_npm_fs_resolver(
fs.clone(),
npm_cache,
&progress_bar,
npm_registry_url,
npm_resolution.clone(),
- None,
+ node_modules_path,
);
let npm_resolver = Arc::new(CliNpmResolver::new(
+ fs.clone(),
npm_resolution.clone(),
npm_fs_resolver,
None,
));
let node_resolver =
Arc::new(NodeResolver::new(fs.clone(), npm_resolver.clone()));
+ let cjs_resolutions = Arc::new(CjsResolutionStore::default());
+ let cache_db = Caches::new(dir.clone());
+ let node_analysis_cache = NodeAnalysisCache::new(cache_db.node_analysis_db());
+ let cjs_esm_code_analyzer = CliCjsEsmCodeAnalyzer::new(node_analysis_cache);
+ let node_code_translator = Arc::new(NodeCodeTranslator::new(
+ cjs_esm_code_analyzer,
+ fs.clone(),
+ node_resolver.clone(),
+ npm_resolver.clone(),
+ ));
+ let package_json_deps_provider = Arc::new(PackageJsonDepsProvider::new(
+ metadata
+ .package_json_deps
+ .map(|serialized| serialized.into_deps()),
+ ));
+ let maybe_import_map = metadata.maybe_import_map.map(|(base, source)| {
+ Arc::new(parse_from_json(&base, &source).unwrap().import_map)
+ });
let module_loader_factory = StandaloneModuleLoaderFactory {
- loader: EmbeddedModuleLoader {
- eszip: Arc::new(eszip),
- maybe_import_map_resolver: metadata.maybe_import_map.map(
- |(base, source)| {
- Arc::new(CliGraphResolver::new(
- None,
- Some(Arc::new(
- parse_from_json(&base, &source).unwrap().import_map,
- )),
- false,
- npm_api.clone(),
- npm_resolution.clone(),
- Default::default(),
- ))
- },
+ shared: Arc::new(SharedModuleLoaderState {
+ eszip,
+ mapped_specifier_resolver: MappedSpecifierResolver::new(
+ maybe_import_map.clone(),
+ package_json_deps_provider.clone(),
),
- },
+ npm_module_loader: Arc::new(NpmModuleLoader::new(
+ cjs_resolutions,
+ node_code_translator,
+ fs.clone(),
+ node_resolver.clone(),
+ )),
+ }),
};
+ let permissions = PermissionsContainer::new(Permissions::from_options(
+ &metadata.permissions,
+ )?);
let worker_factory = CliMainWorkerFactory::new(
StorageKeyResolver::empty(),
npm_resolver.clone(),
@@ -260,14 +411,17 @@ pub async fn run(
debug: false,
coverage_dir: None,
enable_testing_features: false,
- has_node_modules_dir: false,
+ has_node_modules_dir,
inspect_brk: false,
inspect_wait: false,
is_inspecting: false,
- is_npm_main: false,
+ is_npm_main: main_module.scheme() == "npm",
location: metadata.location,
- // todo(dsherret): support a binary command being compiled
- maybe_binary_npm_command_name: None,
+ maybe_binary_npm_command_name: NpmPackageReqReference::from_specifier(
+ main_module,
+ )
+ .ok()
+ .map(|req_ref| npm_pkg_req_ref_to_binary_command(&req_ref)),
origin_data_folder_path: None,
seed: metadata.seed,
unsafely_ignore_certificate_errors: metadata
@@ -278,9 +432,6 @@ pub async fn run(
v8_set_flags(construct_v8_flags(&metadata.v8_flags, vec![]));
- let permissions = PermissionsContainer::new(Permissions::from_options(
- &metadata.permissions,
- )?);
let mut worker = worker_factory
.create_main_worker(main_module.clone(), permissions)
.await?;
diff --git a/cli/standalone/virtual_fs.rs b/cli/standalone/virtual_fs.rs
new file mode 100644
index 000000000..9c0601bcc
--- /dev/null
+++ b/cli/standalone/virtual_fs.rs
@@ -0,0 +1,983 @@
+// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
+
+use std::borrow::Cow;
+use std::collections::HashMap;
+use std::collections::HashSet;
+use std::fs::File;
+use std::io::Read;
+use std::io::Seek;
+use std::io::SeekFrom;
+use std::path::Path;
+use std::path::PathBuf;
+use std::rc::Rc;
+use std::sync::Arc;
+
+use deno_core::anyhow::Context;
+use deno_core::error::AnyError;
+use deno_core::parking_lot::Mutex;
+use deno_core::BufMutView;
+use deno_core::BufView;
+use deno_runtime::deno_fs::FsDirEntry;
+use deno_runtime::deno_io;
+use deno_runtime::deno_io::fs::FsError;
+use deno_runtime::deno_io::fs::FsResult;
+use deno_runtime::deno_io::fs::FsStat;
+use serde::Deserialize;
+use serde::Serialize;
+
+use crate::util;
+
+pub struct VfsBuilder {
+ root_path: PathBuf,
+ root_dir: VirtualDirectory,
+ files: Vec<Vec<u8>>,
+ current_offset: u64,
+ file_offsets: HashMap<String, u64>,
+}
+
+impl VfsBuilder {
+ pub fn new(root_path: PathBuf) -> Self {
+ Self {
+ root_dir: VirtualDirectory {
+ name: root_path
+ .file_stem()
+ .unwrap()
+ .to_string_lossy()
+ .into_owned(),
+ entries: Vec::new(),
+ },
+ root_path,
+ files: Vec::new(),
+ current_offset: 0,
+ file_offsets: Default::default(),
+ }
+ }
+
+ pub fn set_root_dir_name(&mut self, name: String) {
+ self.root_dir.name = name;
+ }
+
+ pub fn add_dir_recursive(&mut self, path: &Path) -> Result<(), AnyError> {
+ self.add_dir(path);
+ let read_dir = std::fs::read_dir(path)
+ .with_context(|| format!("Reading {}", path.display()))?;
+
+ for entry in read_dir {
+ let entry = entry?;
+ let file_type = entry.file_type()?;
+ let path = entry.path();
+
+ if file_type.is_dir() {
+ self.add_dir_recursive(&path)?;
+ } else if file_type.is_file() {
+ let file_bytes = std::fs::read(&path)
+ .with_context(|| format!("Reading {}", path.display()))?;
+ self.add_file(&path, file_bytes);
+ } else if file_type.is_symlink() {
+ let target = std::fs::read_link(&path)
+ .with_context(|| format!("Reading symlink {}", path.display()))?;
+ self.add_symlink(&path, &target);
+ }
+ }
+
+ Ok(())
+ }
+
+ pub fn add_dir(&mut self, path: &Path) -> &mut VirtualDirectory {
+ let path = path.strip_prefix(&self.root_path).unwrap();
+ let mut current_dir = &mut self.root_dir;
+
+ for component in path.components() {
+ let name = component.as_os_str().to_string_lossy();
+ let index = match current_dir
+ .entries
+ .binary_search_by(|e| e.name().cmp(&name))
+ {
+ Ok(index) => index,
+ Err(insert_index) => {
+ current_dir.entries.insert(
+ insert_index,
+ VfsEntry::Dir(VirtualDirectory {
+ name: name.to_string(),
+ entries: Vec::new(),
+ }),
+ );
+ insert_index
+ }
+ };
+ match &mut current_dir.entries[index] {
+ VfsEntry::Dir(dir) => {
+ current_dir = dir;
+ }
+ _ => unreachable!(),
+ };
+ }
+
+ current_dir
+ }
+
+ pub fn add_file(&mut self, path: &Path, data: Vec<u8>) {
+ let checksum = util::checksum::gen(&[&data]);
+ let offset = if let Some(offset) = self.file_offsets.get(&checksum) {
+ // duplicate file, reuse an old offset
+ *offset
+ } else {
+ self.file_offsets.insert(checksum, self.current_offset);
+ self.current_offset
+ };
+
+ let dir = self.add_dir(path.parent().unwrap());
+ let name = path.file_name().unwrap().to_string_lossy();
+ let data_len = data.len();
+ match dir.entries.binary_search_by(|e| e.name().cmp(&name)) {
+ Ok(_) => unreachable!(),
+ Err(insert_index) => {
+ dir.entries.insert(
+ insert_index,
+ VfsEntry::File(VirtualFile {
+ name: name.to_string(),
+ offset,
+ len: data.len() as u64,
+ }),
+ );
+ }
+ }
+
+ // new file, update the list of files
+ if self.current_offset == offset {
+ self.files.push(data);
+ self.current_offset += data_len as u64;
+ }
+ }
+
+ pub fn add_symlink(&mut self, path: &Path, target: &Path) {
+ let dest = target.strip_prefix(&self.root_path).unwrap().to_path_buf();
+ let dir = self.add_dir(path.parent().unwrap());
+ let name = path.file_name().unwrap().to_string_lossy();
+ match dir.entries.binary_search_by(|e| e.name().cmp(&name)) {
+ Ok(_) => unreachable!(),
+ Err(insert_index) => {
+ dir.entries.insert(
+ insert_index,
+ VfsEntry::Symlink(VirtualSymlink {
+ name: name.to_string(),
+ dest_parts: dest
+ .components()
+ .map(|c| c.as_os_str().to_string_lossy().to_string())
+ .collect::<Vec<_>>(),
+ }),
+ );
+ }
+ }
+ }
+
+ pub fn into_dir_and_files(self) -> (VirtualDirectory, Vec<Vec<u8>>) {
+ (self.root_dir, self.files)
+ }
+}
+
+#[derive(Debug)]
+enum VfsEntryRef<'a> {
+ Dir(&'a VirtualDirectory),
+ File(&'a VirtualFile),
+ Symlink(&'a VirtualSymlink),
+}
+
+impl<'a> VfsEntryRef<'a> {
+ pub fn as_fs_stat(&self) -> FsStat {
+ match self {
+ VfsEntryRef::Dir(_) => FsStat {
+ is_directory: true,
+ is_file: false,
+ is_symlink: false,
+ atime: None,
+ birthtime: None,
+ mtime: None,
+ blksize: 0,
+ size: 0,
+ dev: 0,
+ ino: 0,
+ mode: 0,
+ nlink: 0,
+ uid: 0,
+ gid: 0,
+ rdev: 0,
+ blocks: 0,
+ },
+ VfsEntryRef::File(file) => FsStat {
+ is_directory: false,
+ is_file: true,
+ is_symlink: false,
+ atime: None,
+ birthtime: None,
+ mtime: None,
+ blksize: 0,
+ size: file.len,
+ dev: 0,
+ ino: 0,
+ mode: 0,
+ nlink: 0,
+ uid: 0,
+ gid: 0,
+ rdev: 0,
+ blocks: 0,
+ },
+ VfsEntryRef::Symlink(_) => FsStat {
+ is_directory: false,
+ is_file: false,
+ is_symlink: true,
+ atime: None,
+ birthtime: None,
+ mtime: None,
+ blksize: 0,
+ size: 0,
+ dev: 0,
+ ino: 0,
+ mode: 0,
+ nlink: 0,
+ uid: 0,
+ gid: 0,
+ rdev: 0,
+ blocks: 0,
+ },
+ }
+ }
+}
+
+// todo(dsherret): we should store this more efficiently in the binary
+#[derive(Debug, Serialize, Deserialize)]
+pub enum VfsEntry {
+ Dir(VirtualDirectory),
+ File(VirtualFile),
+ Symlink(VirtualSymlink),
+}
+
+impl VfsEntry {
+ pub fn name(&self) -> &str {
+ match self {
+ VfsEntry::Dir(dir) => &dir.name,
+ VfsEntry::File(file) => &file.name,
+ VfsEntry::Symlink(symlink) => &symlink.name,
+ }
+ }
+
+ fn as_ref(&self) -> VfsEntryRef {
+ match self {
+ VfsEntry::Dir(dir) => VfsEntryRef::Dir(dir),
+ VfsEntry::File(file) => VfsEntryRef::File(file),
+ VfsEntry::Symlink(symlink) => VfsEntryRef::Symlink(symlink),
+ }
+ }
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct VirtualDirectory {
+ pub name: String,
+ // should be sorted by name
+ pub entries: Vec<VfsEntry>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct VirtualFile {
+ pub name: String,
+ pub offset: u64,
+ pub len: u64,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+pub struct VirtualSymlink {
+ pub name: String,
+ pub dest_parts: Vec<String>,
+}
+
+impl VirtualSymlink {
+ pub fn resolve_dest_from_root(&self, root: &Path) -> PathBuf {
+ let mut dest = root.to_path_buf();
+ for part in &self.dest_parts {
+ dest.push(part);
+ }
+ dest
+ }
+}
+
+#[derive(Debug)]
+pub struct VfsRoot {
+ pub dir: VirtualDirectory,
+ pub root_path: PathBuf,
+ pub start_file_offset: u64,
+}
+
+impl VfsRoot {
+ fn find_entry<'a>(
+ &'a self,
+ path: &Path,
+ ) -> std::io::Result<(PathBuf, VfsEntryRef<'a>)> {
+ self.find_entry_inner(path, &mut HashSet::new())
+ }
+
+ fn find_entry_inner<'a>(
+ &'a self,
+ path: &Path,
+ seen: &mut HashSet<PathBuf>,
+ ) -> std::io::Result<(PathBuf, VfsEntryRef<'a>)> {
+ let mut path = Cow::Borrowed(path);
+ loop {
+ let (resolved_path, entry) =
+ self.find_entry_no_follow_inner(&path, seen)?;
+ match entry {
+ VfsEntryRef::Symlink(symlink) => {
+ if !seen.insert(path.to_path_buf()) {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "circular symlinks",
+ ));
+ }
+ path = Cow::Owned(symlink.resolve_dest_from_root(&self.root_path));
+ }
+ _ => {
+ return Ok((resolved_path, entry));
+ }
+ }
+ }
+ }
+
+ fn find_entry_no_follow(
+ &self,
+ path: &Path,
+ ) -> std::io::Result<(PathBuf, VfsEntryRef)> {
+ self.find_entry_no_follow_inner(path, &mut HashSet::new())
+ }
+
+ fn find_entry_no_follow_inner<'a>(
+ &'a self,
+ path: &Path,
+ seen: &mut HashSet<PathBuf>,
+ ) -> std::io::Result<(PathBuf, VfsEntryRef<'a>)> {
+ let relative_path = match path.strip_prefix(&self.root_path) {
+ Ok(p) => p,
+ Err(_) => {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::NotFound,
+ "path not found",
+ ));
+ }
+ };
+ let mut final_path = self.root_path.clone();
+ let mut current_entry = VfsEntryRef::Dir(&self.dir);
+ for component in relative_path.components() {
+ let component = component.as_os_str().to_string_lossy();
+ let current_dir = match current_entry {
+ VfsEntryRef::Dir(dir) => {
+ final_path.push(component.as_ref());
+ dir
+ }
+ VfsEntryRef::Symlink(symlink) => {
+ let dest = symlink.resolve_dest_from_root(&self.root_path);
+ let (resolved_path, entry) = self.find_entry_inner(&dest, seen)?;
+ final_path = resolved_path; // overwrite with the new resolved path
+ match entry {
+ VfsEntryRef::Dir(dir) => {
+ final_path.push(component.as_ref());
+ dir
+ }
+ _ => {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::NotFound,
+ "path not found",
+ ));
+ }
+ }
+ }
+ _ => {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::NotFound,
+ "path not found",
+ ));
+ }
+ };
+ match current_dir
+ .entries
+ .binary_search_by(|e| e.name().cmp(&component))
+ {
+ Ok(index) => {
+ current_entry = current_dir.entries[index].as_ref();
+ }
+ Err(_) => {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::NotFound,
+ "path not found",
+ ));
+ }
+ }
+ }
+
+ Ok((final_path, current_entry))
+ }
+}
+
+#[derive(Clone)]
+struct FileBackedVfsFile {
+ file: VirtualFile,
+ pos: Arc<Mutex<u64>>,
+ vfs: Arc<FileBackedVfs>,
+}
+
+impl FileBackedVfsFile {
+ fn seek(&self, pos: SeekFrom) -> FsResult<u64> {
+ match pos {
+ SeekFrom::Start(pos) => {
+ *self.pos.lock() = pos;
+ Ok(pos)
+ }
+ SeekFrom::End(offset) => {
+ if offset < 0 && -offset as u64 > self.file.len {
+ Err(std::io::Error::new(std::io::ErrorKind::PermissionDenied, "An attempt was made to move the file pointer before the beginning of the file.").into())
+ } else {
+ let mut current_pos = self.pos.lock();
+ *current_pos = if offset >= 0 {
+ self.file.len - (offset as u64)
+ } else {
+ self.file.len + (-offset as u64)
+ };
+ Ok(*current_pos)
+ }
+ }
+ SeekFrom::Current(offset) => {
+ let mut current_pos = self.pos.lock();
+ if offset >= 0 {
+ *current_pos += offset as u64;
+ } else if -offset as u64 > *current_pos {
+ return Err(std::io::Error::new(std::io::ErrorKind::PermissionDenied, "An attempt was made to move the file pointer before the beginning of the file.").into());
+ } else {
+ *current_pos -= -offset as u64;
+ }
+ Ok(*current_pos)
+ }
+ }
+ }
+
+ fn read_to_buf(&self, buf: &mut [u8]) -> FsResult<usize> {
+ let pos = {
+ let mut pos = self.pos.lock();
+ let read_pos = *pos;
+ // advance the position due to the read
+ *pos = std::cmp::min(self.file.len, *pos + buf.len() as u64);
+ read_pos
+ };
+ self
+ .vfs
+ .read_file(&self.file, pos, buf)
+ .map_err(|err| err.into())
+ }
+
+ fn read_to_end(&self) -> FsResult<Vec<u8>> {
+ let pos = {
+ let mut pos = self.pos.lock();
+ let read_pos = *pos;
+ // todo(dsherret): should this always set it to the end of the file?
+ if *pos < self.file.len {
+ // advance the position due to the read
+ *pos = self.file.len;
+ }
+ read_pos
+ };
+ if pos > self.file.len {
+ return Ok(Vec::new());
+ }
+ let size = (self.file.len - pos) as usize;
+ let mut buf = vec![0; size];
+ self.vfs.read_file(&self.file, pos, &mut buf)?;
+ Ok(buf)
+ }
+}
+
+#[async_trait::async_trait(?Send)]
+impl deno_io::fs::File for FileBackedVfsFile {
+ fn read_sync(self: Rc<Self>, buf: &mut [u8]) -> FsResult<usize> {
+ self.read_to_buf(buf)
+ }
+ async fn read_byob(
+ self: Rc<Self>,
+ mut buf: BufMutView,
+ ) -> FsResult<(usize, BufMutView)> {
+ let inner = (*self).clone();
+ tokio::task::spawn(async move {
+ let nread = inner.read_to_buf(&mut buf)?;
+ Ok((nread, buf))
+ })
+ .await?
+ }
+
+ fn write_sync(self: Rc<Self>, _buf: &[u8]) -> FsResult<usize> {
+ Err(FsError::NotSupported)
+ }
+ async fn write(
+ self: Rc<Self>,
+ _buf: BufView,
+ ) -> FsResult<deno_core::WriteOutcome> {
+ Err(FsError::NotSupported)
+ }
+
+ fn write_all_sync(self: Rc<Self>, _buf: &[u8]) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+ async fn write_all(self: Rc<Self>, _buf: BufView) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+
+ fn read_all_sync(self: Rc<Self>) -> FsResult<Vec<u8>> {
+ self.read_to_end()
+ }
+ async fn read_all_async(self: Rc<Self>) -> FsResult<Vec<u8>> {
+ let inner = (*self).clone();
+ tokio::task::spawn_blocking(move || inner.read_to_end()).await?
+ }
+
+ fn chmod_sync(self: Rc<Self>, _pathmode: u32) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+ async fn chmod_async(self: Rc<Self>, _mode: u32) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+
+ fn seek_sync(self: Rc<Self>, pos: SeekFrom) -> FsResult<u64> {
+ self.seek(pos)
+ }
+ async fn seek_async(self: Rc<Self>, pos: SeekFrom) -> FsResult<u64> {
+ self.seek(pos)
+ }
+
+ fn datasync_sync(self: Rc<Self>) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+ async fn datasync_async(self: Rc<Self>) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+
+ fn sync_sync(self: Rc<Self>) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+ async fn sync_async(self: Rc<Self>) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+
+ fn stat_sync(self: Rc<Self>) -> FsResult<FsStat> {
+ Err(FsError::NotSupported)
+ }
+ async fn stat_async(self: Rc<Self>) -> FsResult<FsStat> {
+ Err(FsError::NotSupported)
+ }
+
+ fn lock_sync(self: Rc<Self>, _exclusive: bool) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+ async fn lock_async(self: Rc<Self>, _exclusive: bool) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+
+ fn unlock_sync(self: Rc<Self>) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+ async fn unlock_async(self: Rc<Self>) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+
+ fn truncate_sync(self: Rc<Self>, _len: u64) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+ async fn truncate_async(self: Rc<Self>, _len: u64) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+
+ fn utime_sync(
+ self: Rc<Self>,
+ _atime_secs: i64,
+ _atime_nanos: u32,
+ _mtime_secs: i64,
+ _mtime_nanos: u32,
+ ) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+ async fn utime_async(
+ self: Rc<Self>,
+ _atime_secs: i64,
+ _atime_nanos: u32,
+ _mtime_secs: i64,
+ _mtime_nanos: u32,
+ ) -> FsResult<()> {
+ Err(FsError::NotSupported)
+ }
+
+ // lower level functionality
+ fn as_stdio(self: Rc<Self>) -> FsResult<std::process::Stdio> {
+ Err(FsError::NotSupported)
+ }
+ #[cfg(unix)]
+ fn backing_fd(self: Rc<Self>) -> Option<std::os::unix::prelude::RawFd> {
+ None
+ }
+ #[cfg(windows)]
+ fn backing_fd(self: Rc<Self>) -> Option<std::os::windows::io::RawHandle> {
+ None
+ }
+ fn try_clone_inner(self: Rc<Self>) -> FsResult<Rc<dyn deno_io::fs::File>> {
+ Ok(self)
+ }
+}
+
+#[derive(Debug)]
+pub struct FileBackedVfs {
+ file: Mutex<File>,
+ fs_root: VfsRoot,
+}
+
+impl FileBackedVfs {
+ pub fn new(file: File, fs_root: VfsRoot) -> Self {
+ Self {
+ file: Mutex::new(file),
+ fs_root,
+ }
+ }
+
+ pub fn root(&self) -> &Path {
+ &self.fs_root.root_path
+ }
+
+ pub fn is_path_within(&self, path: &Path) -> bool {
+ path.starts_with(&self.fs_root.root_path)
+ }
+
+ pub fn open_file(
+ self: &Arc<Self>,
+ path: &Path,
+ ) -> std::io::Result<Rc<dyn deno_io::fs::File>> {
+ let file = self.file_entry(path)?;
+ Ok(Rc::new(FileBackedVfsFile {
+ file: file.clone(),
+ vfs: self.clone(),
+ pos: Default::default(),
+ }))
+ }
+
+ pub fn read_dir(&self, path: &Path) -> std::io::Result<Vec<FsDirEntry>> {
+ let dir = self.dir_entry(path)?;
+ Ok(
+ dir
+ .entries
+ .iter()
+ .map(|entry| FsDirEntry {
+ name: entry.name().to_string(),
+ is_file: matches!(entry, VfsEntry::File(_)),
+ is_directory: matches!(entry, VfsEntry::Dir(_)),
+ is_symlink: matches!(entry, VfsEntry::Symlink(_)),
+ })
+ .collect(),
+ )
+ }
+
+ pub fn read_link(&self, path: &Path) -> std::io::Result<PathBuf> {
+ let (_, entry) = self.fs_root.find_entry_no_follow(path)?;
+ match entry {
+ VfsEntryRef::Symlink(symlink) => {
+ Ok(symlink.resolve_dest_from_root(&self.fs_root.root_path))
+ }
+ VfsEntryRef::Dir(_) | VfsEntryRef::File(_) => Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "not a symlink",
+ )),
+ }
+ }
+
+ pub fn lstat(&self, path: &Path) -> std::io::Result<FsStat> {
+ let (_, entry) = self.fs_root.find_entry_no_follow(path)?;
+ Ok(entry.as_fs_stat())
+ }
+
+ pub fn stat(&self, path: &Path) -> std::io::Result<FsStat> {
+ let (_, entry) = self.fs_root.find_entry(path)?;
+ Ok(entry.as_fs_stat())
+ }
+
+ pub fn canonicalize(&self, path: &Path) -> std::io::Result<PathBuf> {
+ let (path, _) = self.fs_root.find_entry(path)?;
+ Ok(path)
+ }
+
+ pub fn read_file_all(&self, file: &VirtualFile) -> std::io::Result<Vec<u8>> {
+ let mut buf = vec![0; file.len as usize];
+ self.read_file(file, 0, &mut buf)?;
+ Ok(buf)
+ }
+
+ pub fn read_file(
+ &self,
+ file: &VirtualFile,
+ pos: u64,
+ buf: &mut [u8],
+ ) -> std::io::Result<usize> {
+ let mut fs_file = self.file.lock();
+ fs_file.seek(SeekFrom::Start(
+ self.fs_root.start_file_offset + file.offset + pos,
+ ))?;
+ fs_file.read(buf)
+ }
+
+ pub fn dir_entry(&self, path: &Path) -> std::io::Result<&VirtualDirectory> {
+ let (_, entry) = self.fs_root.find_entry(path)?;
+ match entry {
+ VfsEntryRef::Dir(dir) => Ok(dir),
+ VfsEntryRef::Symlink(_) => unreachable!(),
+ VfsEntryRef::File(_) => Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "path is a file",
+ )),
+ }
+ }
+
+ pub fn file_entry(&self, path: &Path) -> std::io::Result<&VirtualFile> {
+ let (_, entry) = self.fs_root.find_entry(path)?;
+ match entry {
+ VfsEntryRef::Dir(_) => Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "path is a directory",
+ )),
+ VfsEntryRef::Symlink(_) => unreachable!(),
+ VfsEntryRef::File(file) => Ok(file),
+ }
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use std::io::Write;
+ use test_util::TempDir;
+
+ use super::*;
+
+ fn read_file(vfs: &FileBackedVfs, path: &Path) -> String {
+ let file = vfs.file_entry(path).unwrap();
+ String::from_utf8(vfs.read_file_all(file).unwrap()).unwrap()
+ }
+
+ #[test]
+ fn builds_and_uses_virtual_fs() {
+ let temp_dir = TempDir::new();
+ let src_path = temp_dir.path().join("src");
+ let mut builder = VfsBuilder::new(src_path.clone());
+ builder.add_file(&src_path.join("a.txt"), "data".into());
+ builder.add_file(&src_path.join("b.txt"), "data".into());
+ assert_eq!(builder.files.len(), 1); // because duplicate data
+ builder.add_file(&src_path.join("c.txt"), "c".into());
+ builder.add_file(&src_path.join("sub_dir").join("d.txt"), "d".into());
+ builder.add_file(&src_path.join("e.txt"), "e".into());
+ builder.add_symlink(
+ &src_path.join("sub_dir").join("e.txt"),
+ &src_path.join("e.txt"),
+ );
+
+ // get the virtual fs
+ let (dest_path, virtual_fs) = into_virtual_fs(builder, &temp_dir);
+
+ assert_eq!(read_file(&virtual_fs, &dest_path.join("a.txt")), "data");
+ assert_eq!(read_file(&virtual_fs, &dest_path.join("b.txt")), "data");
+
+ // attempt reading a symlink
+ assert_eq!(
+ read_file(&virtual_fs, &dest_path.join("sub_dir").join("e.txt")),
+ "e",
+ );
+
+ // canonicalize symlink
+ assert_eq!(
+ virtual_fs
+ .canonicalize(&dest_path.join("sub_dir").join("e.txt"))
+ .unwrap(),
+ dest_path.join("e.txt"),
+ );
+
+ // metadata
+ assert!(
+ virtual_fs
+ .lstat(&dest_path.join("sub_dir").join("e.txt"))
+ .unwrap()
+ .is_symlink
+ );
+ assert!(
+ virtual_fs
+ .stat(&dest_path.join("sub_dir").join("e.txt"))
+ .unwrap()
+ .is_file
+ );
+ assert!(
+ virtual_fs
+ .stat(&dest_path.join("sub_dir"))
+ .unwrap()
+ .is_directory,
+ );
+ assert!(virtual_fs.stat(&dest_path.join("e.txt")).unwrap().is_file,);
+ }
+
+ #[test]
+ fn test_include_dir_recursive() {
+ let temp_dir = TempDir::new();
+ temp_dir.create_dir_all("src/nested/sub_dir");
+ temp_dir.write("src/a.txt", "data");
+ temp_dir.write("src/b.txt", "data");
+ util::fs::symlink_dir(
+ &temp_dir.path().join("src/nested/sub_dir"),
+ &temp_dir.path().join("src/sub_dir_link"),
+ )
+ .unwrap();
+ temp_dir.write("src/nested/sub_dir/c.txt", "c");
+
+ // build and create the virtual fs
+ let src_path = temp_dir.path().join("src");
+ let mut builder = VfsBuilder::new(src_path.clone());
+ builder.add_dir_recursive(&src_path).unwrap();
+ let (dest_path, virtual_fs) = into_virtual_fs(builder, &temp_dir);
+
+ assert_eq!(read_file(&virtual_fs, &dest_path.join("a.txt")), "data",);
+ assert_eq!(read_file(&virtual_fs, &dest_path.join("b.txt")), "data",);
+
+ assert_eq!(
+ read_file(
+ &virtual_fs,
+ &dest_path.join("nested").join("sub_dir").join("c.txt")
+ ),
+ "c",
+ );
+ assert_eq!(
+ read_file(&virtual_fs, &dest_path.join("sub_dir_link").join("c.txt")),
+ "c",
+ );
+ assert!(
+ virtual_fs
+ .lstat(&dest_path.join("sub_dir_link"))
+ .unwrap()
+ .is_symlink
+ );
+
+ assert_eq!(
+ virtual_fs
+ .canonicalize(&dest_path.join("sub_dir_link").join("c.txt"))
+ .unwrap(),
+ dest_path.join("nested").join("sub_dir").join("c.txt"),
+ );
+ }
+
+ fn into_virtual_fs(
+ builder: VfsBuilder,
+ temp_dir: &TempDir,
+ ) -> (PathBuf, FileBackedVfs) {
+ let virtual_fs_file = temp_dir.path().join("virtual_fs");
+ let (root_dir, files) = builder.into_dir_and_files();
+ {
+ let mut file = std::fs::File::create(&virtual_fs_file).unwrap();
+ for file_data in &files {
+ file.write_all(file_data).unwrap();
+ }
+ }
+ let file = std::fs::File::open(&virtual_fs_file).unwrap();
+ let dest_path = temp_dir.path().join("dest");
+ (
+ dest_path.clone(),
+ FileBackedVfs::new(
+ file,
+ VfsRoot {
+ dir: root_dir,
+ root_path: dest_path,
+ start_file_offset: 0,
+ },
+ ),
+ )
+ }
+
+ #[test]
+ fn circular_symlink() {
+ let temp_dir = TempDir::new();
+ let src_path = temp_dir.path().join("src");
+ let mut builder = VfsBuilder::new(src_path.clone());
+ builder.add_symlink(&src_path.join("a.txt"), &src_path.join("b.txt"));
+ builder.add_symlink(&src_path.join("b.txt"), &src_path.join("c.txt"));
+ builder.add_symlink(&src_path.join("c.txt"), &src_path.join("a.txt"));
+ let (dest_path, virtual_fs) = into_virtual_fs(builder, &temp_dir);
+ assert_eq!(
+ virtual_fs
+ .file_entry(&dest_path.join("a.txt"))
+ .err()
+ .unwrap()
+ .to_string(),
+ "circular symlinks",
+ );
+ assert_eq!(
+ virtual_fs.read_link(&dest_path.join("a.txt")).unwrap(),
+ dest_path.join("b.txt")
+ );
+ assert_eq!(
+ virtual_fs.read_link(&dest_path.join("b.txt")).unwrap(),
+ dest_path.join("c.txt")
+ );
+ }
+
+ #[tokio::test]
+ async fn test_open_file() {
+ let temp_dir = TempDir::new();
+ let temp_path = temp_dir.path();
+ let mut builder = VfsBuilder::new(temp_path.to_path_buf());
+ builder.add_file(
+ &temp_path.join("a.txt"),
+ "0123456789".to_string().into_bytes(),
+ );
+ let (dest_path, virtual_fs) = into_virtual_fs(builder, &temp_dir);
+ let virtual_fs = Arc::new(virtual_fs);
+ let file = virtual_fs.open_file(&dest_path.join("a.txt")).unwrap();
+ file.clone().seek_sync(SeekFrom::Current(2)).unwrap();
+ let mut buf = vec![0; 2];
+ file.clone().read_sync(&mut buf).unwrap();
+ assert_eq!(buf, b"23");
+ file.clone().read_sync(&mut buf).unwrap();
+ assert_eq!(buf, b"45");
+ file.clone().seek_sync(SeekFrom::Current(-4)).unwrap();
+ file.clone().read_sync(&mut buf).unwrap();
+ assert_eq!(buf, b"23");
+ file.clone().seek_sync(SeekFrom::Start(2)).unwrap();
+ file.clone().read_sync(&mut buf).unwrap();
+ assert_eq!(buf, b"23");
+ file.clone().seek_sync(SeekFrom::End(2)).unwrap();
+ file.clone().read_sync(&mut buf).unwrap();
+ assert_eq!(buf, b"89");
+ file.clone().seek_sync(SeekFrom::Current(-8)).unwrap();
+ file.clone().read_sync(&mut buf).unwrap();
+ assert_eq!(buf, b"23");
+ assert_eq!(
+ file
+ .clone()
+ .seek_sync(SeekFrom::Current(-5))
+ .err()
+ .unwrap()
+ .into_io_error()
+ .to_string(),
+ "An attempt was made to move the file pointer before the beginning of the file."
+ );
+ // go beyond the file length, then back
+ file.clone().seek_sync(SeekFrom::Current(40)).unwrap();
+ file.clone().seek_sync(SeekFrom::Current(-38)).unwrap();
+ let read_buf = file.clone().read(2).await.unwrap();
+ assert_eq!(read_buf.to_vec(), b"67");
+ file.clone().seek_sync(SeekFrom::Current(-2)).unwrap();
+
+ // read to the end of the file
+ let all_buf = file.clone().read_all_sync().unwrap();
+ assert_eq!(all_buf.to_vec(), b"6789");
+ file.clone().seek_sync(SeekFrom::Current(-9)).unwrap();
+
+ // try try_clone_inner and read_all_async
+ let all_buf = file
+ .try_clone_inner()
+ .unwrap()
+ .read_all_async()
+ .await
+ .unwrap();
+ assert_eq!(all_buf.to_vec(), b"123456789");
+ }
+}
diff --git a/cli/tests/integration/compile_tests.rs b/cli/tests/integration/compile_tests.rs
index 7835d7f0d..ac088ca90 100644
--- a/cli/tests/integration/compile_tests.rs
+++ b/cli/tests/integration/compile_tests.rs
@@ -5,6 +5,8 @@ use std::process::Command;
use test_util as util;
use test_util::TempDir;
use util::assert_contains;
+use util::env_vars_for_npm_tests;
+use util::TestContextBuilder;
#[test]
fn compile() {
@@ -675,30 +677,40 @@ fn workers_basic() {
#[test]
fn workers_not_in_module_map() {
- let _guard = util::http_server();
- let dir = TempDir::new();
+ let context = TestContextBuilder::for_npm()
+ .use_http_server()
+ .use_temp_cwd()
+ .build();
+ let temp_dir = context.temp_dir();
let exe = if cfg!(windows) {
- dir.path().join("not_in_module_map.exe")
+ temp_dir.path().join("not_in_module_map.exe")
} else {
- dir.path().join("not_in_module_map")
+ temp_dir.path().join("not_in_module_map")
};
- let output = util::deno_cmd()
- .current_dir(util::root_path())
- .arg("compile")
- .arg("--output")
- .arg(&exe)
- .arg(util::testdata_path().join("./compile/workers/not_in_module_map.ts"))
- .output()
- .unwrap();
- assert!(output.status.success());
+ let main_path =
+ util::testdata_path().join("./compile/workers/not_in_module_map.ts");
+ let output = context
+ .new_command()
+ .args_vec([
+ "compile",
+ "--output",
+ &exe.to_string_lossy(),
+ &main_path.to_string_lossy(),
+ ])
+ .run();
+ output.assert_exit_code(0);
+ output.skip_output_check();
- let output = Command::new(&exe).env("NO_COLOR", "").output().unwrap();
- assert!(!output.status.success());
- let stderr = String::from_utf8(output.stderr).unwrap();
- assert!(stderr.starts_with(concat!(
- "error: Uncaught (in worker \"\") Module not found\n",
- "error: Uncaught (in promise) Error: Unhandled error in child worker.\n"
- )));
+ let output = context
+ .new_command()
+ .command_name(exe.to_string_lossy())
+ .env("NO_COLOR", "")
+ .run();
+ output.assert_exit_code(1);
+ output.assert_matches_text(concat!(
+ "error: Uncaught (in worker \"\") Module not found: [WILDCARD]",
+ "error: Uncaught (in promise) Error: Unhandled error in child worker.\n[WILDCARD]"
+ ));
}
#[test]
@@ -790,3 +802,256 @@ fn dynamic_import_unanalyzable() {
.unwrap();
assert_eq!(String::from_utf8(output.stdout).unwrap(), expected);
}
+
+itest!(npm_specifiers_errors_no_unstable {
+ args: "compile -A --quiet npm/cached_only/main.ts",
+ output_str: Some(
+ concat!(
+ "error: Using npm specifiers with deno compile requires the --unstable flag.",
+ "\n\n",
+ "Caused by:\n",
+ " npm specifiers have not yet been implemented for this subcommand (https://github.com/denoland/deno/issues/15960). Found: npm:chalk@5.0.1\n"
+ )
+ ),
+ exit_code: 1,
+ envs: env_vars_for_npm_tests(),
+ http_server: true,
+});
+
+#[test]
+fn compile_npm_specifiers() {
+ let context = TestContextBuilder::for_npm()
+ .use_sync_npm_download()
+ .use_temp_cwd()
+ .build();
+
+ let temp_dir = context.temp_dir();
+ temp_dir.write(
+ "main.ts",
+ concat!(
+ "import path from 'node:path';\n",
+ "import { getValue, setValue } from 'npm:@denotest/esm-basic';\n",
+ "import getValueDefault from 'npm:@denotest/esm-import-cjs-default';\n",
+ "setValue(2);\n",
+ "console.log(path.join('testing', 'this'));",
+ "console.log(getValue());",
+ "console.log(getValueDefault());",
+ ),
+ );
+
+ let binary_path = if cfg!(windows) {
+ temp_dir.path().join("binary.exe")
+ } else {
+ temp_dir.path().join("binary")
+ };
+
+ // try with and without --node-modules-dir
+ let compile_commands = &[
+ "compile --unstable --output binary main.ts",
+ "compile --unstable --node-modules-dir --output binary main.ts",
+ ];
+
+ for compile_command in compile_commands {
+ let output = context.new_command().args(compile_command).run();
+ output.assert_exit_code(0);
+ output.skip_output_check();
+
+ let output = context
+ .new_command()
+ .command_name(binary_path.to_string_lossy())
+ .run();
+ output.assert_matches_text(
+ r#"Node esm importing node cjs
+===========================
+{
+ default: [Function (anonymous)],
+ named: [Function (anonymous)],
+ MyClass: [class MyClass]
+}
+{ default: [Function (anonymous)], named: [Function (anonymous)] }
+[Module: null prototype] {
+ MyClass: [class MyClass],
+ __esModule: true,
+ default: {
+ default: [Function (anonymous)],
+ named: [Function (anonymous)],
+ MyClass: [class MyClass]
+ },
+ named: [Function (anonymous)]
+}
+[Module: null prototype] {
+ __esModule: true,
+ default: { default: [Function (anonymous)], named: [Function (anonymous)] },
+ named: [Function (anonymous)]
+}
+===========================
+static method
+testing[WILDCARD]this
+2
+5
+"#,
+ );
+ }
+
+ // try with a package.json
+ temp_dir.remove_dir_all("node_modules");
+ temp_dir.write(
+ "main.ts",
+ concat!(
+ "import { getValue, setValue } from '@denotest/esm-basic';\n",
+ "setValue(2);\n",
+ "console.log(getValue());",
+ ),
+ );
+ temp_dir.write(
+ "package.json",
+ r#"{ "dependencies": { "@denotest/esm-basic": "1" } }"#,
+ );
+
+ let output = context
+ .new_command()
+ .args("compile --unstable --output binary main.ts")
+ .run();
+ output.assert_exit_code(0);
+ output.skip_output_check();
+
+ let output = context
+ .new_command()
+ .command_name(binary_path.to_string_lossy())
+ .run();
+ output.assert_matches_text("2\n");
+}
+
+#[test]
+fn compile_npm_file_system() {
+ run_npm_bin_compile_test(RunNpmBinCompileOptions {
+ input_specifier: "compile/npm_fs/main.ts",
+ output_file: "compile/npm_fs/main.out",
+ node_modules_dir: true,
+ input_name: Some("binary"),
+ expected_name: "binary",
+ run_args: vec![],
+ });
+}
+
+#[test]
+fn compile_npm_bin_esm() {
+ run_npm_bin_compile_test(RunNpmBinCompileOptions {
+ input_specifier: "npm:@denotest/bin/cli-esm",
+ run_args: vec!["this", "is", "a", "test"],
+ output_file: "npm/deno_run_esm.out",
+ node_modules_dir: false,
+ input_name: None,
+ expected_name: "cli-esm",
+ });
+}
+
+#[test]
+fn compile_npm_bin_cjs() {
+ run_npm_bin_compile_test(RunNpmBinCompileOptions {
+ input_specifier: "npm:@denotest/bin/cli-cjs",
+ run_args: vec!["this", "is", "a", "test"],
+ output_file: "npm/deno_run_cjs.out",
+ node_modules_dir: false,
+ input_name: None,
+ expected_name: "cli-cjs",
+ });
+}
+
+#[test]
+fn compile_npm_cowsay() {
+ run_npm_bin_compile_test(RunNpmBinCompileOptions {
+ input_specifier: "npm:cowsay@1.5.0",
+ run_args: vec!["Hello"],
+ output_file: "npm/deno_run_cowsay.out",
+ node_modules_dir: false,
+ input_name: None,
+ expected_name: "cowsay",
+ });
+}
+
+#[test]
+fn compile_npm_cowsay_explicit() {
+ run_npm_bin_compile_test(RunNpmBinCompileOptions {
+ input_specifier: "npm:cowsay@1.5.0/cowsay",
+ run_args: vec!["Hello"],
+ output_file: "npm/deno_run_cowsay.out",
+ node_modules_dir: false,
+ input_name: None,
+ expected_name: "cowsay",
+ });
+}
+
+#[test]
+fn compile_npm_cowthink() {
+ run_npm_bin_compile_test(RunNpmBinCompileOptions {
+ input_specifier: "npm:cowsay@1.5.0/cowthink",
+ run_args: vec!["Hello"],
+ output_file: "npm/deno_run_cowthink.out",
+ node_modules_dir: false,
+ input_name: None,
+ expected_name: "cowthink",
+ });
+}
+
+struct RunNpmBinCompileOptions<'a> {
+ input_specifier: &'a str,
+ output_file: &'a str,
+ node_modules_dir: bool,
+ input_name: Option<&'a str>,
+ expected_name: &'a str,
+ run_args: Vec<&'a str>,
+}
+
+fn run_npm_bin_compile_test(opts: RunNpmBinCompileOptions) {
+ let context = TestContextBuilder::for_npm()
+ .use_sync_npm_download()
+ .use_temp_cwd()
+ .build();
+
+ let temp_dir = context.temp_dir();
+ let testdata_path = context.testdata_path();
+ let main_specifier = if opts.input_specifier.starts_with("npm:") {
+ opts.input_specifier.to_string()
+ } else {
+ testdata_path
+ .join(opts.input_specifier)
+ .to_string_lossy()
+ .to_string()
+ };
+
+ let mut args = vec![
+ "compile".to_string(),
+ "-A".to_string(),
+ "--unstable".to_string(),
+ ];
+
+ if opts.node_modules_dir {
+ args.push("--node-modules-dir".to_string());
+ }
+
+ if let Some(bin_name) = opts.input_name {
+ args.push("--output".to_string());
+ args.push(bin_name.to_string());
+ }
+
+ args.push(main_specifier);
+
+ // compile
+ let output = context.new_command().args_vec(args).run();
+ output.assert_exit_code(0);
+ output.skip_output_check();
+
+ // run
+ let binary_path = if cfg!(windows) {
+ temp_dir.path().join(format!("{}.exe", opts.expected_name))
+ } else {
+ temp_dir.path().join(opts.expected_name)
+ };
+ let output = context
+ .new_command()
+ .command_name(binary_path.to_string_lossy())
+ .args_vec(opts.run_args)
+ .run();
+ output.assert_matches_file(opts.output_file);
+}
diff --git a/cli/tests/integration/npm_tests.rs b/cli/tests/integration/npm_tests.rs
index d4f2d3e45..c04322027 100644
--- a/cli/tests/integration/npm_tests.rs
+++ b/cli/tests/integration/npm_tests.rs
@@ -855,17 +855,9 @@ fn ensure_registry_files_local() {
}
}
-itest!(compile_errors {
- args: "compile -A --quiet npm/cached_only/main.ts",
- output_str: Some("error: npm specifiers have not yet been implemented for this sub command (https://github.com/denoland/deno/issues/15960). Found: npm:chalk@5.0.1\n"),
- exit_code: 1,
- envs: env_vars_for_npm_tests(),
- http_server: true,
- });
-
itest!(bundle_errors {
args: "bundle --quiet npm/esm/main.js",
- output_str: Some("error: npm specifiers have not yet been implemented for this sub command (https://github.com/denoland/deno/issues/15960). Found: npm:chalk@5.0.1\n"),
+ output_str: Some("error: npm specifiers have not yet been implemented for this subcommand (https://github.com/denoland/deno/issues/15960). Found: npm:chalk@5.0.1\n"),
exit_code: 1,
envs: env_vars_for_npm_tests(),
http_server: true,
diff --git a/cli/tests/testdata/compile/npm_fs/main.out b/cli/tests/testdata/compile/npm_fs/main.out
new file mode 100644
index 000000000..2e9ba477f
--- /dev/null
+++ b/cli/tests/testdata/compile/npm_fs/main.out
@@ -0,0 +1 @@
+success
diff --git a/cli/tests/testdata/compile/npm_fs/main.ts b/cli/tests/testdata/compile/npm_fs/main.ts
new file mode 100644
index 000000000..f9951d7a4
--- /dev/null
+++ b/cli/tests/testdata/compile/npm_fs/main.ts
@@ -0,0 +1,259 @@
+import { url } from "npm:@denotest/esm-basic";
+import { fileURLToPath } from "node:url";
+import path from "node:path";
+import assert from "node:assert/strict";
+
+// will be at node_modules\.deno\@denotest+esm-basic@1.0.0\node_modules\@denotest\esm-basic
+const dirPath = path.dirname(fileURLToPath(url));
+const nodeModulesPath = path.join(dirPath, "../../../../../");
+const packageJsonText = `{
+ "name": "@denotest/esm-basic",
+ "version": "1.0.0",
+ "type": "module",
+ "main": "main.mjs",
+ "types": "main.d.mts"
+}
+`;
+const vfsPackageJsonPath = path.join(dirPath, "package.json");
+
+// reading a file in vfs
+{
+ const text = Deno.readTextFileSync(vfsPackageJsonPath);
+ assert.equal(text, packageJsonText);
+}
+
+// reading a file async in vfs
+{
+ const text = await Deno.readTextFile(vfsPackageJsonPath);
+ assert.equal(text, packageJsonText);
+}
+
+// copy file from vfs to real fs
+{
+ Deno.copyFileSync(vfsPackageJsonPath, "package.json");
+ assert.equal(Deno.readTextFileSync("package.json"), packageJsonText);
+}
+
+// copy to vfs
+assert.throws(
+ () => Deno.copyFileSync("package.json", vfsPackageJsonPath),
+ Deno.errors.NotSupported,
+);
+Deno.removeSync("package.json");
+
+// copy file async from vfs to real fs
+{
+ await Deno.copyFile(vfsPackageJsonPath, "package.json");
+ assert.equal(Deno.readTextFileSync("package.json"), packageJsonText);
+}
+
+// copy to vfs async
+await assert.rejects(
+ () => Deno.copyFile("package.json", vfsPackageJsonPath),
+ Deno.errors.NotSupported,
+);
+Deno.removeSync("package.json");
+
+// open
+{
+ const file = Deno.openSync(vfsPackageJsonPath);
+ const bytes = new Uint8Array(10);
+ file.seekSync(2, Deno.SeekMode.Start);
+ assert.equal(file.readSync(bytes), 10);
+ const text = new TextDecoder().decode(bytes);
+ assert.equal(text, packageJsonText.slice(2, 12));
+}
+{
+ const file = await Deno.open(vfsPackageJsonPath);
+ const bytes = new Uint8Array(10);
+ await file.seek(2, Deno.SeekMode.Start);
+ assert.equal(await file.read(bytes), 10);
+ const text = new TextDecoder().decode(bytes);
+ assert.equal(text, packageJsonText.slice(2, 12));
+}
+
+// chdir
+assert.throws(() => Deno.chdir(dirPath), Deno.errors.NotSupported);
+
+// mkdir
+assert.throws(
+ () => Deno.mkdirSync(path.join(dirPath, "subDir")),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.mkdir(path.join(dirPath, "subDir")),
+ Deno.errors.NotSupported,
+);
+
+// chmod
+assert.throws(
+ () => Deno.chmodSync(vfsPackageJsonPath, 0o777),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.chmod(vfsPackageJsonPath, 0o777),
+ Deno.errors.NotSupported,
+);
+
+// chown
+assert.throws(
+ () => Deno.chownSync(vfsPackageJsonPath, 1000, 1000),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.chown(vfsPackageJsonPath, 1000, 1000),
+ Deno.errors.NotSupported,
+);
+
+// remove
+assert.throws(
+ () => Deno.removeSync(vfsPackageJsonPath),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.remove(vfsPackageJsonPath),
+ Deno.errors.NotSupported,
+);
+
+// stat
+{
+ const result = Deno.statSync(vfsPackageJsonPath);
+ assert(result.isFile);
+}
+{
+ const result = await Deno.stat(vfsPackageJsonPath);
+ assert(result.isFile);
+}
+
+// lstat
+{
+ const result = Deno.lstatSync(
+ path.join(nodeModulesPath, "@denotest", "esm-basic"),
+ );
+ assert(result.isSymlink);
+}
+{
+ const result = await Deno.lstat(
+ path.join(nodeModulesPath, "@denotest", "esm-basic"),
+ );
+ assert(result.isSymlink);
+}
+
+// realpath
+{
+ const result = Deno.realPathSync(
+ path.join(nodeModulesPath, "@denotest", "esm-basic", "package.json"),
+ );
+ assert.equal(result, vfsPackageJsonPath);
+}
+{
+ const result = await Deno.realPath(
+ path.join(nodeModulesPath, "@denotest", "esm-basic", "package.json"),
+ );
+ assert.equal(result, vfsPackageJsonPath);
+}
+
+// read dir
+const readDirNames = ["main.d.mts", "main.mjs", "package.json"];
+{
+ const names = Array.from(Deno.readDirSync(dirPath))
+ .map((e) => e.name);
+ assert.deepEqual(readDirNames, names);
+}
+{
+ const names = [];
+ for await (const entry of Deno.readDir(dirPath)) {
+ names.push(entry.name);
+ }
+ assert.deepEqual(readDirNames, names);
+}
+
+// rename
+assert.throws(
+ () => Deno.renameSync("package.json", vfsPackageJsonPath),
+ Deno.errors.NotSupported,
+);
+assert.throws(
+ () => Deno.renameSync(vfsPackageJsonPath, "package.json"),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.rename("package.json", vfsPackageJsonPath),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.rename(vfsPackageJsonPath, "package.json"),
+ Deno.errors.NotSupported,
+);
+
+// link
+assert.throws(
+ () => Deno.linkSync("package.json", vfsPackageJsonPath),
+ Deno.errors.NotSupported,
+);
+assert.throws(
+ () => Deno.linkSync(vfsPackageJsonPath, "package.json"),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.link("package.json", vfsPackageJsonPath),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.link(vfsPackageJsonPath, "package.json"),
+ Deno.errors.NotSupported,
+);
+
+// symlink
+assert.throws(
+ () => Deno.symlinkSync("package.json", vfsPackageJsonPath),
+ Deno.errors.NotSupported,
+);
+assert.throws(
+ () => Deno.symlinkSync(vfsPackageJsonPath, "package.json"),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.symlink("package.json", vfsPackageJsonPath),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.symlink(vfsPackageJsonPath, "package.json"),
+ Deno.errors.NotSupported,
+);
+
+// read link
+{
+ const result = Deno.readLinkSync(
+ path.join(nodeModulesPath, "@denotest", "esm-basic"),
+ );
+ assert.equal(result, dirPath);
+}
+{
+ const result = await Deno.readLink(
+ path.join(nodeModulesPath, "@denotest", "esm-basic"),
+ );
+ assert.equal(result, dirPath);
+}
+
+// truncate
+assert.throws(
+ () => Deno.truncateSync(vfsPackageJsonPath, 0),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.truncate(vfsPackageJsonPath, 0),
+ Deno.errors.NotSupported,
+);
+
+// utime
+assert.throws(
+ () => Deno.utimeSync(vfsPackageJsonPath, 0, 0),
+ Deno.errors.NotSupported,
+);
+await assert.rejects(
+ () => Deno.utime(vfsPackageJsonPath, 0, 0),
+ Deno.errors.NotSupported,
+);
+
+console.log("success");
diff --git a/cli/tests/testdata/npm/registry/@denotest/esm-basic/1.0.0/main.d.mts b/cli/tests/testdata/npm/registry/@denotest/esm-basic/1.0.0/main.d.mts
index fa7814911..29da1e6d7 100644
--- a/cli/tests/testdata/npm/registry/@denotest/esm-basic/1.0.0/main.d.mts
+++ b/cli/tests/testdata/npm/registry/@denotest/esm-basic/1.0.0/main.d.mts
@@ -1,2 +1,3 @@
export declare function setValue(val: number): void;
export declare function getValue(): number;
+export declare const url: string;
diff --git a/cli/tests/testdata/npm/registry/@denotest/esm-basic/1.0.0/main.mjs b/cli/tests/testdata/npm/registry/@denotest/esm-basic/1.0.0/main.mjs
index 23df4221c..0a44f7585 100644
--- a/cli/tests/testdata/npm/registry/@denotest/esm-basic/1.0.0/main.mjs
+++ b/cli/tests/testdata/npm/registry/@denotest/esm-basic/1.0.0/main.mjs
@@ -7,3 +7,5 @@ export function setValue(newValue) {
export function getValue() {
return value;
}
+
+export const url = import.meta.url;
diff --git a/cli/tests/testdata/package_json/basic/main.info.out b/cli/tests/testdata/package_json/basic/main.info.out
index bf36f4f19..3572c75e1 100644
--- a/cli/tests/testdata/package_json/basic/main.info.out
+++ b/cli/tests/testdata/package_json/basic/main.info.out
@@ -5,4 +5,4 @@ size: [WILDCARD]
file:///[WILDCARD]/main.ts (63B)
└─┬ file:///[WILDCARD]/lib.ts (166B)
- └── npm:@denotest/esm-basic@1.0.0 (345B)
+ └── npm:@denotest/esm-basic@1.0.0 (416B)
diff --git a/cli/tools/standalone.rs b/cli/tools/compile.rs
index d34e5da83..f10a2d025 100644
--- a/cli/tools/standalone.rs
+++ b/cli/tools/compile.rs
@@ -5,7 +5,6 @@ use crate::args::Flags;
use crate::factory::CliFactory;
use crate::graph_util::error_for_any_npm_specifier;
use crate::standalone::is_standalone_binary;
-use crate::standalone::DenoCompileBinaryWriter;
use crate::util::path::path_has_trailing_slash;
use deno_core::anyhow::bail;
use deno_core::anyhow::Context;
@@ -25,14 +24,9 @@ pub async fn compile(
) -> Result<(), AnyError> {
let factory = CliFactory::from_flags(flags).await?;
let cli_options = factory.cli_options();
- let file_fetcher = factory.file_fetcher()?;
- let http_client = factory.http_client();
- let deno_dir = factory.deno_dir()?;
let module_graph_builder = factory.module_graph_builder().await?;
let parsed_source_cache = factory.parsed_source_cache()?;
-
- let binary_writer =
- DenoCompileBinaryWriter::new(file_fetcher, http_client, deno_dir);
+ let binary_writer = factory.create_compile_binary_writer().await?;
let module_specifier = cli_options.resolve_main_module()?;
let module_roots = {
let mut vec = Vec::with_capacity(compile_flags.include.len() + 1);
@@ -56,8 +50,11 @@ pub async fn compile(
)
.unwrap();
- // at the moment, we don't support npm specifiers in deno_compile, so show an error
- error_for_any_npm_specifier(&graph)?;
+ if !cli_options.unstable() {
+ error_for_any_npm_specifier(&graph).context(
+ "Using npm specifiers with deno compile requires the --unstable flag.",
+ )?;
+ }
let parser = parsed_source_cache.as_capturing_parser();
let eszip = eszip::EszipV2::from_graph(graph, &parser, Default::default())?;
diff --git a/cli/tools/mod.rs b/cli/tools/mod.rs
index cf29435a7..c4a8306ab 100644
--- a/cli/tools/mod.rs
+++ b/cli/tools/mod.rs
@@ -3,6 +3,7 @@
pub mod bench;
pub mod bundle;
pub mod check;
+pub mod compile;
pub mod coverage;
pub mod doc;
pub mod fmt;
@@ -12,7 +13,6 @@ pub mod installer;
pub mod lint;
pub mod repl;
pub mod run;
-pub mod standalone;
pub mod task;
pub mod test;
pub mod upgrade;
diff --git a/cli/tools/task.rs b/cli/tools/task.rs
index 6380d3822..bf972e2db 100644
--- a/cli/tools/task.rs
+++ b/cli/tools/task.rs
@@ -64,12 +64,13 @@ pub async fn execute_script(
.await;
Ok(exit_code)
} else if let Some(script) = package_json_scripts.get(task_name) {
+ let package_json_deps_provider = factory.package_json_deps_provider();
let package_json_deps_installer =
factory.package_json_deps_installer().await?;
let npm_resolver = factory.npm_resolver().await?;
let node_resolver = factory.node_resolver().await?;
- if let Some(package_deps) = package_json_deps_installer.package_deps() {
+ if let Some(package_deps) = package_json_deps_provider.deps() {
for (key, value) in package_deps {
if let Err(err) = value {
log::info!(
diff --git a/cli/tools/vendor/test.rs b/cli/tools/vendor/test.rs
index 774ff0d58..e8a474ed3 100644
--- a/cli/tools/vendor/test.rs
+++ b/cli/tools/vendor/test.rs
@@ -22,7 +22,6 @@ use import_map::ImportMap;
use crate::cache::ParsedSourceCache;
use crate::npm::CliNpmRegistryApi;
use crate::npm::NpmResolution;
-use crate::npm::PackageJsonDepsInstaller;
use crate::resolver::CliGraphResolver;
use super::build::VendorEnvironment;
@@ -270,18 +269,14 @@ async fn build_test_graph(
None,
None,
));
- let deps_installer = Arc::new(PackageJsonDepsInstaller::new(
- npm_registry_api.clone(),
- npm_resolution.clone(),
- None,
- ));
CliGraphResolver::new(
None,
Some(Arc::new(original_import_map)),
false,
npm_registry_api,
npm_resolution,
- deps_installer,
+ Default::default(),
+ Default::default(),
)
});
let mut graph = ModuleGraph::default();
diff --git a/cli/util/fs.rs b/cli/util/fs.rs
index 9d3c6fccb..94ec24fe6 100644
--- a/cli/util/fs.rs
+++ b/cli/util/fs.rs
@@ -93,11 +93,18 @@ pub fn canonicalize_path(path: &Path) -> Result<PathBuf, Error> {
pub fn canonicalize_path_maybe_not_exists(
path: &Path,
) -> Result<PathBuf, Error> {
+ canonicalize_path_maybe_not_exists_with_fs(path, canonicalize_path)
+}
+
+pub fn canonicalize_path_maybe_not_exists_with_fs(
+ path: &Path,
+ canonicalize: impl Fn(&Path) -> Result<PathBuf, Error>,
+) -> Result<PathBuf, Error> {
let path = path.to_path_buf().clean();
let mut path = path.as_path();
let mut names_stack = Vec::new();
loop {
- match canonicalize_path(path) {
+ match canonicalize(path) {
Ok(mut canonicalized_path) => {
for name in names_stack.into_iter().rev() {
canonicalized_path = canonicalized_path.join(name);
diff --git a/ext/fs/interface.rs b/ext/fs/interface.rs
index 2d9b68f55..7624535c9 100644
--- a/ext/fs/interface.rs
+++ b/ext/fs/interface.rs
@@ -100,7 +100,7 @@ pub trait FileSystem: std::fmt::Debug + MaybeSend + MaybeSync {
async fn mkdir_async(
&self,
path: PathBuf,
- recusive: bool,
+ recursive: bool,
mode: u32,
) -> FsResult<()>;
diff --git a/ext/io/fs.rs b/ext/io/fs.rs
index a333e1dd5..e335324f5 100644
--- a/ext/io/fs.rs
+++ b/ext/io/fs.rs
@@ -15,6 +15,7 @@ use deno_core::OpState;
use deno_core::ResourceId;
use tokio::task::JoinError;
+#[derive(Debug)]
pub enum FsError {
Io(io::Error),
FileBusy,
@@ -29,6 +30,14 @@ impl FsError {
Self::NotSupported => io::ErrorKind::Other,
}
}
+
+ pub fn into_io_error(self) -> io::Error {
+ match self {
+ FsError::Io(err) => err,
+ FsError::FileBusy => io::Error::new(self.kind(), "file busy"),
+ FsError::NotSupported => io::Error::new(self.kind(), "not supported"),
+ }
+ }
}
impl From<io::Error> for FsError {
diff --git a/runtime/build.rs b/runtime/build.rs
index 412257f12..bd141d297 100644
--- a/runtime/build.rs
+++ b/runtime/build.rs
@@ -358,6 +358,7 @@ fn main() {
if env::var_os("DOCS_RS").is_some() {
let snapshot_slice = &[];
#[allow(clippy::needless_borrow)]
+ #[allow(clippy::disallowed_methods)]
std::fs::write(&runtime_snapshot_path, snapshot_slice).unwrap();
}
diff --git a/runtime/clippy.toml b/runtime/clippy.toml
new file mode 100644
index 000000000..53676a90e
--- /dev/null
+++ b/runtime/clippy.toml
@@ -0,0 +1,45 @@
+disallowed-methods = [
+ { path = "std::env::current_dir", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::path::Path::canonicalize", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::Path::is_dir", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::Path::is_file", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::Path::is_symlink", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::Path::metadata", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::Path::read_dir", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::Path::read_link", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::Path::symlink_metadata", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::Path::try_exists", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::PathBuf::exists", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::PathBuf::canonicalize", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::PathBuf::is_dir", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::PathBuf::is_file", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::PathBuf::is_symlink", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::PathBuf::metadata", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::PathBuf::read_dir", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::PathBuf::read_link", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::PathBuf::symlink_metadata", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::path::PathBuf::try_exists", reason = "File system operations should be done using NodeFs trait" },
+ { path = "std::env::set_current_dir", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::env::temp_dir", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::canonicalize", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::copy", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::create_dir_all", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::create_dir", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::DirBuilder::new", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::hard_link", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::metadata", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::OpenOptions::new", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::read_dir", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::read_link", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::read_to_string", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::read", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::remove_dir_all", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::remove_dir", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::remove_file", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::rename", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::set_permissions", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::symlink_metadata", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::fs::write", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::path::Path::canonicalize", reason = "File system operations should be done using FileSystem trait" },
+ { path = "std::path::Path::exists", reason = "File system operations should be done using FileSystem trait" },
+]
diff --git a/runtime/examples/hello_runtime.rs b/runtime/examples/hello_runtime.rs
index 157a200f4..2bc371b68 100644
--- a/runtime/examples/hello_runtime.rs
+++ b/runtime/examples/hello_runtime.rs
@@ -2,6 +2,7 @@
use deno_core::error::AnyError;
use deno_core::FsModuleLoader;
+use deno_core::ModuleSpecifier;
use deno_runtime::permissions::PermissionsContainer;
use deno_runtime::worker::MainWorker;
use deno_runtime::worker::WorkerOptions;
@@ -14,10 +15,7 @@ deno_core::extension!(hello_runtime, esm = ["hello_runtime_bootstrap.js"]);
async fn main() -> Result<(), AnyError> {
let js_path =
Path::new(env!("CARGO_MANIFEST_DIR")).join("examples/hello_runtime.js");
- let main_module = deno_core::resolve_path(
- &js_path.to_string_lossy(),
- &std::env::current_dir()?,
- )?;
+ let main_module = ModuleSpecifier::from_file_path(js_path).unwrap();
let mut worker = MainWorker::bootstrap_from_options(
main_module.clone(),
PermissionsContainer::allow_all(),
diff --git a/runtime/fs_util.rs b/runtime/fs_util.rs
index eb4a2f899..204b0e4e8 100644
--- a/runtime/fs_util.rs
+++ b/runtime/fs_util.rs
@@ -3,23 +3,17 @@
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
pub use deno_core::normalize_path;
-use std::env::current_dir;
-use std::io::Error;
use std::path::Path;
use std::path::PathBuf;
-/// Similar to `std::fs::canonicalize()` but strips UNC prefixes on Windows.
-pub fn canonicalize_path(path: &Path) -> Result<PathBuf, Error> {
- Ok(deno_core::strip_unc_prefix(path.canonicalize()?))
-}
-
#[inline]
pub fn resolve_from_cwd(path: &Path) -> Result<PathBuf, AnyError> {
if path.is_absolute() {
Ok(normalize_path(path))
} else {
- let cwd =
- current_dir().context("Failed to get current working directory")?;
+ #[allow(clippy::disallowed_methods)]
+ let cwd = std::env::current_dir()
+ .context("Failed to get current working directory")?;
Ok(normalize_path(cwd.join(path)))
}
}
@@ -28,21 +22,26 @@ pub fn resolve_from_cwd(path: &Path) -> Result<PathBuf, AnyError> {
mod tests {
use super::*;
+ fn current_dir() -> PathBuf {
+ #[allow(clippy::disallowed_methods)]
+ std::env::current_dir().unwrap()
+ }
+
#[test]
fn resolve_from_cwd_child() {
- let cwd = current_dir().unwrap();
+ let cwd = current_dir();
assert_eq!(resolve_from_cwd(Path::new("a")).unwrap(), cwd.join("a"));
}
#[test]
fn resolve_from_cwd_dot() {
- let cwd = current_dir().unwrap();
+ let cwd = current_dir();
assert_eq!(resolve_from_cwd(Path::new(".")).unwrap(), cwd);
}
#[test]
fn resolve_from_cwd_parent() {
- let cwd = current_dir().unwrap();
+ let cwd = current_dir();
assert_eq!(resolve_from_cwd(Path::new("a/..")).unwrap(), cwd);
}
@@ -66,7 +65,7 @@ mod tests {
#[test]
fn resolve_from_cwd_absolute() {
let expected = Path::new("a");
- let cwd = current_dir().unwrap();
+ let cwd = current_dir();
let absolute_expected = cwd.join(expected);
assert_eq!(resolve_from_cwd(expected).unwrap(), absolute_expected);
}
diff --git a/runtime/ops/os/mod.rs b/runtime/ops/os/mod.rs
index 911cd327c..b997a89d9 100644
--- a/runtime/ops/os/mod.rs
+++ b/runtime/ops/os/mod.rs
@@ -339,6 +339,7 @@ fn rss() -> usize {
(out, idx)
}
+ #[allow(clippy::disallowed_methods)]
let statm_content = if let Ok(c) = std::fs::read_to_string("/proc/self/statm")
{
c
diff --git a/runtime/ops/os/sys_info.rs b/runtime/ops/os/sys_info.rs
index 1a9358dc0..795e6bb0a 100644
--- a/runtime/ops/os/sys_info.rs
+++ b/runtime/ops/os/sys_info.rs
@@ -48,6 +48,7 @@ pub fn loadavg() -> LoadAvg {
pub fn os_release() -> String {
#[cfg(target_os = "linux")]
{
+ #[allow(clippy::disallowed_methods)]
match std::fs::read_to_string("/proc/sys/kernel/osrelease") {
Ok(mut s) => {
s.pop(); // pop '\n'
diff --git a/test_util/src/builders.rs b/test_util/src/builders.rs
index a5f192b73..33a1a98f0 100644
--- a/test_util/src/builders.rs
+++ b/test_util/src/builders.rs
@@ -341,6 +341,7 @@ impl TestCommandBuilder {
))
}
+ #[track_caller]
pub fn run(&self) -> TestCommandOutput {
fn read_pipe_to_string(mut pipe: os_pipe::PipeReader) -> String {
let mut output = String::new();
diff --git a/test_util/src/temp_dir.rs b/test_util/src/temp_dir.rs
index db3c246dc..dc638c7ea 100644
--- a/test_util/src/temp_dir.rs
+++ b/test_util/src/temp_dir.rs
@@ -58,6 +58,10 @@ impl TempDir {
fs::create_dir_all(self.path().join(path)).unwrap();
}
+ pub fn remove_dir_all(&self, path: impl AsRef<Path>) {
+ fs::remove_dir_all(self.path().join(path)).unwrap();
+ }
+
pub fn read_to_string(&self, path: impl AsRef<Path>) -> String {
let file_path = self.path().join(path);
fs::read_to_string(&file_path)