From 38ff9faff639385c10ccb6412470e1355c73327c Mon Sep 17 00:00:00 2001 From: David Sherret Date: Fri, 31 May 2024 23:25:08 -0400 Subject: fix: retry writing lockfile on failure (#24052) Ran into this running the deno_graph ecosystem tests where many processes writing to the same path at the same time would cause an error. --- cli/args/lockfile.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cli/args') diff --git a/cli/args/lockfile.rs b/cli/args/lockfile.rs index 30e91eb92..7e59853b0 100644 --- a/cli/args/lockfile.rs +++ b/cli/args/lockfile.rs @@ -8,7 +8,7 @@ use deno_runtime::deno_node::PackageJson; use crate::args::ConfigFile; use crate::cache; -use crate::util::fs::atomic_write_file; +use crate::util::fs::atomic_write_file_with_retries; use crate::Flags; use super::DenoSubcommand; @@ -84,7 +84,7 @@ pub fn write_lockfile_if_has_changes( }; // do an atomic write to reduce the chance of multiple deno // processes corrupting the file - atomic_write_file(&lockfile.filename, bytes, cache::CACHE_PERM) + atomic_write_file_with_retries(&lockfile.filename, bytes, cache::CACHE_PERM) .context("Failed writing lockfile.")?; lockfile.has_content_changed = false; Ok(()) -- cgit v1.2.3