diff --git a/Cargo.lock b/Cargo.lock index f33233ab7256c0..5906abe3f9e909 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1092,6 +1092,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41b319d1b62ffbd002e057f36bebd1f42b9f97927c9577461d855f3513c4289f" +[[package]] +name = "debug-ignore" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffe7ed1d93f4553003e20b629abe9085e1e81b1429520f897f8f8860bc6dfc21" + [[package]] name = "debugid" version = "0.8.0" @@ -1160,6 +1166,7 @@ dependencies = [ "junction", "lazy-regex", "libc", + "libsui", "libz-sys", "log", "lsp-types", @@ -2524,6 +2531,19 @@ dependencies = [ "spki", ] +[[package]] +name = "editpe" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48cede2bb1b07dd598d269f973792c43e0cd92686d3b452bd6e01d7a8eb01211" +dependencies = [ + "debug-ignore", + "indexmap", + "log", + "thiserror", + "zerocopy", +] + [[package]] name = "either" version = "1.10.0" @@ -4025,6 +4045,19 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "libsui" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c2fedcf6cb4dd935f94a90e1c4300c727fe7112b8455615e902828c7401f84d" +dependencies = [ + "editpe", + "libc", + "sha2", + "windows-sys 0.48.0", + "zerocopy", +] + [[package]] name = "libz-sys" version = "1.1.16" @@ -8281,6 +8314,7 @@ version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ + "byteorder", "zerocopy-derive", ] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index a2d63ff328c176..fed1e5f4d82d25 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -79,6 +79,7 @@ deno_semver = "=0.5.7" deno_task_shell = "=0.17.0" deno_terminal.workspace = true eszip = "=0.73.0" +libsui = "0.1.0" napi_sym.workspace = true node_resolver.workspace = true diff --git a/cli/mainrt.rs b/cli/mainrt.rs index aafbf793208f60..e890b5dc05da46 100644 --- a/cli/mainrt.rs +++ b/cli/mainrt.rs @@ -82,9 +82,7 @@ fn load_env_vars(env_vars: &HashMap) { fn main() { let args: Vec<_> = env::args_os().collect(); - let current_exe_path = current_exe().unwrap(); - let standalone = - standalone::extract_standalone(¤t_exe_path, Cow::Owned(args)); + let standalone = standalone::extract_standalone(Cow::Owned(args)); let future = async move { match standalone { Ok(Some(future)) => { diff --git a/cli/standalone/binary.rs b/cli/standalone/binary.rs index 85d131f42c2903..588f0f7b7c405c 100644 --- a/cli/standalone/binary.rs +++ b/cli/standalone/binary.rs @@ -7,6 +7,7 @@ use std::collections::VecDeque; use std::env::current_exe; use std::ffi::OsString; use std::fs; +use std::fs::File; use std::future::Future; use std::io::Read; use std::io::Seek; @@ -106,16 +107,19 @@ pub struct Metadata { } pub fn load_npm_vfs(root_dir_path: PathBuf) -> Result { - let file_path = current_exe().unwrap(); - let mut file = std::fs::File::open(file_path)?; - file.seek(SeekFrom::End(-(TRAILER_SIZE as i64)))?; - let mut trailer = [0; TRAILER_SIZE]; - file.read_exact(&mut trailer)?; - let trailer = Trailer::parse(&trailer)?.unwrap(); - file.seek(SeekFrom::Start(trailer.npm_vfs_pos))?; - let mut vfs_data = vec![0; trailer.npm_vfs_len() as usize]; - file.read_exact(&mut vfs_data)?; - let mut dir: VirtualDirectory = serde_json::from_slice(&vfs_data)?; + let data = libsui::find_section("d3n0l4nd").unwrap(); + + // We do the first part sync so it can complete quickly + let trailer: [u8; TRAILER_SIZE] = data[0..TRAILER_SIZE].try_into().unwrap(); + let trailer = match Trailer::parse(&trailer)? { + None => panic!("Could not find trailer"), + Some(trailer) => trailer, + }; + let data = &data[TRAILER_SIZE..]; + + let vfs_data = + &data[trailer.npm_vfs_pos as usize..trailer.npm_files_pos as usize]; + let mut dir: VirtualDirectory = serde_json::from_slice(vfs_data)?; // align the name of the directory with the root dir dir.name = root_dir_path @@ -129,38 +133,32 @@ pub fn load_npm_vfs(root_dir_path: PathBuf) -> Result { root_path: root_dir_path, start_file_offset: trailer.npm_files_pos, }; - Ok(FileBackedVfs::new(file, fs_root)) + Ok(FileBackedVfs::new(data.to_vec(), fs_root)) } fn write_binary_bytes( - writer: &mut impl Write, + mut file_writer: File, original_bin: Vec, metadata: &Metadata, eszip: eszip::EszipV2, npm_vfs: Option<&VirtualDirectory>, npm_files: &Vec>, + compile_flags: &CompileFlags, ) -> Result<(), AnyError> { let metadata = serde_json::to_string(metadata)?.as_bytes().to_vec(); let npm_vfs = serde_json::to_string(&npm_vfs)?.as_bytes().to_vec(); let eszip_archive = eszip.into_bytes(); - writer.write_all(&original_bin)?; - writer.write_all(&eszip_archive)?; - writer.write_all(&metadata)?; - writer.write_all(&npm_vfs)?; - for file in npm_files { - writer.write_all(file)?; - } + let mut writer = Vec::new(); // write the trailer, which includes the positions // of the data blocks in the file writer.write_all(&{ - let eszip_pos = original_bin.len() as u64; - let metadata_pos = eszip_pos + (eszip_archive.len() as u64); + let metadata_pos = eszip_archive.len() as u64; let npm_vfs_pos = metadata_pos + (metadata.len() as u64); let npm_files_pos = npm_vfs_pos + (npm_vfs.len() as u64); Trailer { - eszip_pos, + eszip_pos: 0, metadata_pos, npm_vfs_pos, npm_files_pos, @@ -168,27 +166,36 @@ fn write_binary_bytes( .as_bytes() })?; + writer.write_all(&eszip_archive)?; + writer.write_all(&metadata)?; + writer.write_all(&npm_vfs)?; + for file in npm_files { + writer.write_all(file)?; + } + + let target = compile_flags.resolve_target(); + if target.contains("linux") { + libsui::Elf::new(&original_bin).append(&writer, &mut file_writer)?; + } else if target.contains("windows") { + libsui::PortableExecutable::from(&original_bin)? + .write_resource("d3n0l4nd", writer)? + .build(&mut file_writer)?; + } else if target.contains("darwin") { + libsui::Macho::from(original_bin)? + .write_section("d3n0l4nd", writer)? + .build(&mut file_writer)?; + } Ok(()) } pub fn is_standalone_binary(exe_path: &Path) -> bool { - let Ok(mut output_file) = std::fs::File::open(exe_path) else { - return false; - }; - if output_file - .seek(SeekFrom::End(-(TRAILER_SIZE as i64))) - .is_err() - { - // This seek may fail because the file is too small to possibly be - // `deno compile` output. - return false; - } - let mut trailer = [0; TRAILER_SIZE]; - if output_file.read_exact(&mut trailer).is_err() { + let Ok(data) = std::fs::read(exe_path) else { return false; }; - let (magic_trailer, _) = trailer.split_at(8); - magic_trailer == MAGIC_TRAILER + + libsui::utils::is_elf(&data) + | libsui::utils::is_pe(&data) + | libsui::utils::is_macho(&data) } /// This function will try to run this binary as a standalone binary @@ -197,40 +204,32 @@ pub fn is_standalone_binary(exe_path: &Path) -> bool { /// then checking for the magic trailer string `d3n0l4nd`. If found, /// the bundle is executed. If not, this function exits with `Ok(None)`. pub fn extract_standalone( - exe_path: &Path, cli_args: Cow>, ) -> Result< Option>>, AnyError, > { + let Some(data) = libsui::find_section("d3n0l4nd") else { + return Ok(None); + }; + // We do the first part sync so it can complete quickly - let mut file = std::fs::File::open(exe_path)?; - file.seek(SeekFrom::End(-(TRAILER_SIZE as i64)))?; - let mut trailer = [0; TRAILER_SIZE]; - file.read_exact(&mut trailer)?; - let trailer = match Trailer::parse(&trailer)? { + let trailer = match Trailer::parse(&data[0..TRAILER_SIZE])? { None => return Ok(None), Some(trailer) => trailer, }; - file.seek(SeekFrom::Start(trailer.eszip_pos))?; - let cli_args = cli_args.into_owned(); // If we have an eszip, read it out Ok(Some(async move { let bufreader = - deno_core::futures::io::BufReader::new(AllowStdIo::new(file)); + deno_core::futures::io::BufReader::new(&data[TRAILER_SIZE..]); let (eszip, loader) = eszip::EszipV2::parse(bufreader) .await .context("Failed to parse eszip header")?; - let mut bufreader = - loader.await.context("Failed to parse eszip archive")?; - - bufreader - .seek(SeekFrom::Start(trailer.metadata_pos)) - .await?; + let bufreader = loader.await.context("Failed to parse eszip archive")?; let mut metadata = String::new(); @@ -405,7 +404,7 @@ impl<'a> DenoCompileBinaryWriter<'a> { pub async fn write_bin( &self, - writer: &mut impl Write, + writer: File, eszip: eszip::EszipV2, root_dir_url: EszipRelativeFileBaseUrl<'_>, entrypoint: &ModuleSpecifier, @@ -518,7 +517,7 @@ impl<'a> DenoCompileBinaryWriter<'a> { #[allow(clippy::too_many_arguments)] fn write_standalone_binary( &self, - writer: &mut impl Write, + writer: File, original_bin: Vec, mut eszip: eszip::EszipV2, root_dir_url: EszipRelativeFileBaseUrl<'_>, @@ -654,6 +653,7 @@ impl<'a> DenoCompileBinaryWriter<'a> { eszip, npm_vfs.as_ref(), &npm_files, + compile_flags, ) } diff --git a/cli/standalone/virtual_fs.rs b/cli/standalone/virtual_fs.rs index 0d39f8e958c2c9..c44e2227babbfb 100644 --- a/cli/standalone/virtual_fs.rs +++ b/cli/standalone/virtual_fs.rs @@ -748,12 +748,12 @@ impl deno_io::fs::File for FileBackedVfsFile { #[derive(Debug)] pub struct FileBackedVfs { - file: Mutex, + file: Mutex>, fs_root: VfsRoot, } impl FileBackedVfs { - pub fn new(file: File, fs_root: VfsRoot) -> Self { + pub fn new(file: Vec, fs_root: VfsRoot) -> Self { Self { file: Mutex::new(file), fs_root, @@ -836,11 +836,18 @@ impl FileBackedVfs { pos: u64, buf: &mut [u8], ) -> std::io::Result { - let mut fs_file = self.file.lock(); - fs_file.seek(SeekFrom::Start( - self.fs_root.start_file_offset + file.offset + pos, - ))?; - fs_file.read(buf) + let data = self.file.lock(); + let start = self.fs_root.start_file_offset + file.offset + pos; + let end = start + buf.len() as u64; + if end > data.len() as u64 { + return Err(std::io::Error::new( + std::io::ErrorKind::UnexpectedEof, + "unexpected EOF", + )); + } + + buf.copy_from_slice(&data[start as usize..end as usize]); + Ok(buf.len()) } pub fn dir_entry(&self, path: &Path) -> std::io::Result<&VirtualDirectory> { @@ -1016,12 +1023,12 @@ mod test { file.write_all(file_data).unwrap(); } } - let file = std::fs::File::open(&virtual_fs_file).unwrap(); let dest_path = temp_dir.path().join("dest"); + let data = std::fs::read(&virtual_fs_file).unwrap(); ( dest_path.to_path_buf(), FileBackedVfs::new( - file, + data, VfsRoot { dir: root_dir, root_path: dest_path.to_path_buf(), diff --git a/cli/tools/compile.rs b/cli/tools/compile.rs index bb03f03d2e227c..f0534abc39705f 100644 --- a/cli/tools/compile.rs +++ b/cli/tools/compile.rs @@ -124,12 +124,13 @@ pub async fn compile( )); let temp_path = output_path.with_file_name(temp_filename); - let mut file = std::fs::File::create(&temp_path).with_context(|| { + let file = std::fs::File::create(&temp_path).with_context(|| { format!("Opening temporary file '{}'", temp_path.display()) })?; + let write_result = binary_writer .write_bin( - &mut file, + file, eszip, root_dir_url, &module_specifier, @@ -140,7 +141,6 @@ pub async fn compile( .with_context(|| { format!("Writing temporary file '{}'", temp_path.display()) }); - drop(file); // set it as executable #[cfg(unix)]