diff --git a/Cargo.lock b/Cargo.lock index f357d4b..eae2709 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -130,7 +130,7 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "avocado-cli" -version = "0.21.0" +version = "0.22.0" dependencies = [ "anyhow", "base64", diff --git a/Cargo.toml b/Cargo.toml index 814cc68..5142423 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "avocado-cli" -version = "0.21.0" +version = "0.22.0" edition = "2021" description = "Command line interface for Avocado." authors = ["Avocado"] diff --git a/src/commands/clean.rs b/src/commands/clean.rs index 40d0d3c..f6c461c 100644 --- a/src/commands/clean.rs +++ b/src/commands/clean.rs @@ -4,6 +4,7 @@ use std::path::{Path, PathBuf}; use crate::utils::config::Config; use crate::utils::container::{RunConfig, SdkContainer}; +use crate::utils::lockfile::LockFile; use crate::utils::output::{print_error, print_info, print_success, OutputLevel}; use crate::utils::target::resolve_target_required; use crate::utils::volume::{VolumeManager, VolumeState}; @@ -22,12 +23,14 @@ pub struct CleanCommand { verbose: bool, /// Whether to remove stamp files stamps: bool, - /// Path to configuration file (needed for --stamps) + /// Path to configuration file (needed for --stamps and --unlock) config_path: Option, - /// Target architecture (needed for --stamps) + /// Target architecture (needed for --stamps and --unlock) target: Option, /// Force removal by killing and removing containers using the volume force: bool, + /// Whether to unlock (clear lock file entries) for all sysroots + unlock: bool, } impl CleanCommand { @@ -53,6 +56,7 @@ impl CleanCommand { config_path: None, target: None, force: false, + unlock: false, } } @@ -80,6 +84,12 @@ impl CleanCommand { self } + /// Set whether to unlock (clear lock file entries) for all sysroots + pub fn with_unlock(mut self, unlock: bool) -> Self { + self.unlock = unlock; + self + } + /// Executes the clean command, removing volumes, state files, and optionally legacy directories. /// /// # Returns @@ -125,6 +135,64 @@ impl CleanCommand { self.clean_stamps(&directory_path).await?; } + // Unlock (clear lock file entries) if requested + if self.unlock { + self.unlock_all(&directory_path)?; + } + + Ok(()) + } + + /// Unlock (clear lock file entries) for all sysroots + fn unlock_all(&self, _directory_path: &Path) -> Result<()> { + let config_path = self.config_path.as_ref().ok_or_else(|| { + anyhow::anyhow!("--unlock requires a config file to be specified with -C/--config") + })?; + + let config = Config::load(config_path)?; + let target = resolve_target_required(self.target.as_deref(), &config)?; + + // Get src_dir from config + let src_dir = config.get_resolved_src_dir(config_path).unwrap_or_else(|| { + Path::new(config_path) + .parent() + .unwrap_or(Path::new(".")) + .to_path_buf() + }); + + // Load lock file + let mut lock_file = LockFile::load(&src_dir) + .with_context(|| format!("Failed to load lock file from {}", src_dir.display()))?; + + if lock_file.is_empty() { + if self.verbose { + print_info( + "Lock file is empty, nothing to unlock.", + OutputLevel::Normal, + ); + } + return Ok(()); + } + + // Clear all entries for the target + if self.verbose { + print_info( + &format!("Unlocking all entries for target '{}'", target), + OutputLevel::Normal, + ); + } + lock_file.clear_all(&target); + + // Save updated lock file + lock_file + .save(&src_dir) + .with_context(|| "Failed to save lock file")?; + + print_success( + &format!("Unlocked all entries for target '{}'.", target), + OutputLevel::Normal, + ); + Ok(()) } diff --git a/src/commands/ext/install.rs b/src/commands/ext/install.rs index 505f441..e4efd66 100644 --- a/src/commands/ext/install.rs +++ b/src/commands/ext/install.rs @@ -635,6 +635,7 @@ $DNF_SDK_HOST \ repo_url.cloned(), repo_release.cloned(), merged_container_args.clone(), + runs_on_context, ) .await?; diff --git a/src/commands/install.rs b/src/commands/install.rs index d796868..776a63b 100644 --- a/src/commands/install.rs +++ b/src/commands/install.rs @@ -11,6 +11,7 @@ use crate::utils::{ container::SdkContainer, lockfile::{build_package_spec_with_lock, LockFile, SysrootType}, output::{print_info, print_success, OutputLevel}, + stamps::get_local_arch, target::validate_and_log_target, }; @@ -807,6 +808,7 @@ $DNF_SDK_HOST \ repo_url, repo_release, merged_container_args, + None, // TODO: Add runs_on_context support to install.rs ) .await?; @@ -1050,6 +1052,7 @@ $DNF_SDK_HOST \ repo_url, repo_release, merged_container_args, + None, // TODO: Add runs_on_context support to install.rs ) .await?; @@ -1145,6 +1148,9 @@ $DNF_SDK_HOST \ }; // Build list of SDK packages to install (using lock file for version pinning) + // SDK packages are keyed by host architecture since they run on the host + let sdk_sysroot = SysrootType::Sdk(get_local_arch().to_string()); + let mut sdk_packages = Vec::new(); let mut sdk_package_names = Vec::new(); for (pkg_name_val, version_spec) in sdk_deps_map { @@ -1166,7 +1172,7 @@ $DNF_SDK_HOST \ let package_spec = build_package_spec_with_lock( lock_file, target, - &SysrootType::Sdk, + &sdk_sysroot, pkg_name, &config_version, ); @@ -1261,18 +1267,19 @@ $DNF_SDK_HOST \ if !sdk_package_names.is_empty() { let installed_versions = container_helper .query_installed_packages( - &SysrootType::Sdk, + &sdk_sysroot, &sdk_package_names, container_image, target, repo_url, repo_release, merged_container_args, + None, // TODO: Add runs_on_context support to install.rs ) .await?; if !installed_versions.is_empty() { - lock_file.update_sysroot_versions(target, &SysrootType::Sdk, installed_versions); + lock_file.update_sysroot_versions(target, &sdk_sysroot, installed_versions); if self.verbose { print_info( &format!("Updated lock file with SDK dependencies from external config '{external_config_path}'."), diff --git a/src/commands/mod.rs b/src/commands/mod.rs index 4e9c432..6f7050f 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -11,4 +11,5 @@ pub mod runtime; pub mod sdk; pub mod sign; pub mod signing_keys; +pub mod unlock; pub mod upgrade; diff --git a/src/commands/runtime/install.rs b/src/commands/runtime/install.rs index e388cc5..8e30479 100644 --- a/src/commands/runtime/install.rs +++ b/src/commands/runtime/install.rs @@ -527,6 +527,7 @@ $DNF_SDK_HOST \ repo_url.cloned(), repo_release.cloned(), merged_container_args.clone(), + runs_on_context, ) .await?; diff --git a/src/commands/sdk/install.rs b/src/commands/sdk/install.rs index c6414c5..27ff0db 100644 --- a/src/commands/sdk/install.rs +++ b/src/commands/sdk/install.rs @@ -182,6 +182,30 @@ impl SdkInstallCommand { merged_container_args: Option<&Vec>, runs_on_context: Option<&RunsOnContext>, ) -> Result<()> { + // Determine host architecture for SDK package tracking + // For remote execution, query the remote host; for local, use local arch + let host_arch = if let Some(context) = runs_on_context { + context + .get_host_arch() + .await + .with_context(|| "Failed to get remote host architecture")? + } else { + get_local_arch().to_string() + }; + + // Create SDK sysroot type with the host architecture + let sdk_sysroot = SysrootType::Sdk(host_arch.clone()); + + if self.verbose { + print_info( + &format!( + "Using host architecture '{}' for SDK package tracking.", + host_arch + ), + OutputLevel::Normal, + ); + } + // Load lock file for reproducible builds let src_dir = config .get_resolved_src_dir(&self.config_path) @@ -390,7 +414,7 @@ MACROS_EOF let sdk_target_pkg = build_package_spec_with_lock( &lock_file, target, - &SysrootType::Sdk, + &sdk_sysroot, &sdk_target_pkg_name, sdk_target_config_version, ); @@ -445,13 +469,15 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS \ )); } - // Run check-update to refresh metadata + // Run check-update to refresh metadata using the combined repo config. + // This uses arch-specific varsdir for correct architecture filtering, + // with repos from both arch-specific SDK and target-repoconf. let check_update_command = r#" RPM_CONFIGDIR=$AVOCADO_SDK_PREFIX/usr/lib/rpm \ RPM_ETCCONFIGDIR=$AVOCADO_SDK_PREFIX \ $DNF_SDK_HOST \ $DNF_SDK_HOST_OPTS \ - $DNF_SDK_REPO_CONF \ + $DNF_SDK_COMBINED_REPO_CONF \ check-update || true "#; @@ -484,18 +510,21 @@ $DNF_SDK_HOST \ let bootstrap_pkg = build_package_spec_with_lock( &lock_file, target, - &SysrootType::Sdk, + &sdk_sysroot, bootstrap_pkg_name, bootstrap_config_version, ); + // Use combined repo config for bootstrap installation. + // The bootstrap package is a nativesdk package that needs both the base repos + // (from arch-specific SDK) and target-specific repos (from target-repoconf). let bootstrap_command = format!( r#" RPM_CONFIGDIR=$AVOCADO_SDK_PREFIX/usr/lib/rpm \ RPM_ETCCONFIGDIR=$AVOCADO_SDK_PREFIX \ $DNF_SDK_HOST $DNF_NO_SCRIPTS \ $DNF_SDK_HOST_OPTS \ - $DNF_SDK_REPO_CONF \ + $DNF_SDK_COMBINED_REPO_CONF \ -y \ install \ {} @@ -583,7 +612,7 @@ fi dependencies, &lock_file, target, - &SysrootType::Sdk, + &sdk_sysroot, )); sdk_package_names.extend(self.extract_package_names(dependencies)); } @@ -597,7 +626,7 @@ fi ); } let ext_packages = - self.build_package_list_with_lock(ext_deps, &lock_file, target, &SysrootType::Sdk); + self.build_package_list_with_lock(ext_deps, &lock_file, target, &sdk_sysroot); sdk_packages.extend(ext_packages); sdk_package_names.extend(self.extract_package_names(ext_deps)); } @@ -610,13 +639,18 @@ fi String::new() }; + // Use combined repo config for SDK dependencies. + // SDK dependencies are nativesdk packages that need both the base repos + // (from arch-specific SDK) and target-specific repos (from target-repoconf). + // The combined config uses arch-specific varsdir for correct architecture + // filtering, which is critical for --runs-on with cross-arch targets. let command = format!( r#" RPM_ETCCONFIGDIR=$AVOCADO_SDK_PREFIX \ RPM_CONFIGDIR=$AVOCADO_SDK_PREFIX/usr/lib/rpm \ $DNF_SDK_HOST \ $DNF_SDK_HOST_OPTS \ - $DNF_SDK_REPO_CONF \ + $DNF_SDK_COMBINED_REPO_CONF \ --disablerepo=${{AVOCADO_TARGET}}-target-ext \ {} \ {} \ @@ -663,18 +697,19 @@ $DNF_SDK_HOST \ if !all_sdk_package_names.is_empty() { let installed_versions = container_helper .query_installed_packages( - &SysrootType::Sdk, + &sdk_sysroot, &all_sdk_package_names, container_image, target, repo_url.map(|s| s.to_string()), repo_release.map(|s| s.to_string()), merged_container_args.cloned(), + runs_on_context, ) .await?; if !installed_versions.is_empty() { - lock_file.update_sysroot_versions(target, &SysrootType::Sdk, installed_versions); + lock_file.update_sysroot_versions(target, &sdk_sysroot, installed_versions); if self.verbose { print_info( &format!( @@ -753,6 +788,7 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ repo_url.map(|s| s.to_string()), repo_release.map(|s| s.to_string()), merged_container_args.cloned(), + runs_on_context, ) .await?; @@ -880,6 +916,7 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ repo_url.map(|s| s.to_string()), repo_release.map(|s| s.to_string()), merged_container_args.cloned(), + runs_on_context, ) .await?; @@ -1019,6 +1056,7 @@ mod tests { let cmd = SdkInstallCommand::new("test.yaml".to_string(), false, false, None, None, None); let lock_file = LockFile::new(); let target = "qemux86-64"; + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); let mut deps = HashMap::new(); deps.insert("package1".to_string(), Value::String("*".to_string())); @@ -1028,8 +1066,7 @@ mod tests { serde_yaml::Value::Mapping(serde_yaml::Mapping::new()), ); - let packages = - cmd.build_package_list_with_lock(&deps, &lock_file, target, &SysrootType::Sdk); + let packages = cmd.build_package_list_with_lock(&deps, &lock_file, target, &sdk_x86); assert_eq!(packages.len(), 3); assert!(packages.contains(&"package1".to_string())); @@ -1042,11 +1079,12 @@ mod tests { let cmd = SdkInstallCommand::new("test.yaml".to_string(), false, false, None, None, None); let mut lock_file = LockFile::new(); let target = "qemux86-64"; + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); // Add a locked version for package1 lock_file.update_sysroot_versions( target, - &SysrootType::Sdk, + &sdk_x86, [("package1".to_string(), "2.0.0-r0.x86_64".to_string())] .into_iter() .collect(), @@ -1056,8 +1094,7 @@ mod tests { deps.insert("package1".to_string(), Value::String("*".to_string())); deps.insert("package2".to_string(), Value::String("1.0.0".to_string())); - let packages = - cmd.build_package_list_with_lock(&deps, &lock_file, target, &SysrootType::Sdk); + let packages = cmd.build_package_list_with_lock(&deps, &lock_file, target, &sdk_x86); assert_eq!(packages.len(), 2); // package1 should use locked version instead of "*" diff --git a/src/commands/unlock.rs b/src/commands/unlock.rs new file mode 100644 index 0000000..a6a55c6 --- /dev/null +++ b/src/commands/unlock.rs @@ -0,0 +1,373 @@ +//! Unlock command implementation for removing lock file entries. + +use anyhow::{Context, Result}; +use std::path::Path; + +use crate::utils::config::Config; +use crate::utils::lockfile::LockFile; +use crate::utils::output::{print_info, print_success, OutputLevel}; +use crate::utils::target::resolve_target_required; + +/// Command to unlock (remove lock entries for) sysroots. +/// +/// This command removes entries from the lock file, allowing packages to be +/// updated to newer versions on the next install. +pub struct UnlockCommand { + /// Path to configuration file + config_path: String, + /// Enable verbose output + verbose: bool, + /// Target architecture + target: Option, + /// Unlock specific extension + extension: Option, + /// Unlock specific runtime + runtime: Option, + /// Unlock SDK (includes rootfs, target-sysroot, and all SDK arches) + sdk: bool, +} + +impl UnlockCommand { + /// Create a new UnlockCommand instance + pub fn new( + config_path: String, + verbose: bool, + target: Option, + extension: Option, + runtime: Option, + sdk: bool, + ) -> Self { + Self { + config_path, + verbose, + target, + extension, + runtime, + sdk, + } + } + + /// Execute the unlock command + pub fn execute(&self) -> Result<()> { + // Load configuration + let config = Config::load(&self.config_path) + .with_context(|| format!("Failed to load config from {}", self.config_path))?; + + // Resolve target + let target = resolve_target_required(self.target.as_deref(), &config)?; + + // Get src_dir from config + let src_dir = config + .get_resolved_src_dir(&self.config_path) + .unwrap_or_else(|| { + Path::new(&self.config_path) + .parent() + .unwrap_or(Path::new(".")) + .to_path_buf() + }); + + // Load lock file + let mut lock_file = LockFile::load(&src_dir) + .with_context(|| format!("Failed to load lock file from {}", src_dir.display()))?; + + if lock_file.is_empty() { + print_info( + "Lock file is empty, nothing to unlock.", + OutputLevel::Normal, + ); + return Ok(()); + } + + // Determine what to unlock + let unlock_all = !self.sdk && self.extension.is_none() && self.runtime.is_none(); + + let mut unlocked_something = false; + + if unlock_all { + // Unlock everything for the target + if self.verbose { + print_info( + &format!("Unlocking all entries for target '{}'", target), + OutputLevel::Normal, + ); + } + lock_file.clear_all(&target); + unlocked_something = true; + print_success( + &format!("Unlocked all entries for target '{}'.", target), + OutputLevel::Normal, + ); + } else { + // Unlock SDK if requested + if self.sdk { + if self.verbose { + print_info( + &format!( + "Unlocking SDK, rootfs, and target-sysroot for target '{}'", + target + ), + OutputLevel::Normal, + ); + } + lock_file.clear_sdk(&target); + lock_file.clear_rootfs(&target); + lock_file.clear_target_sysroot(&target); + unlocked_something = true; + print_success( + &format!( + "Unlocked SDK, rootfs, and target-sysroot for target '{}'.", + target + ), + OutputLevel::Normal, + ); + } + + // Unlock extension if specified + if let Some(ref ext_name) = self.extension { + if self.verbose { + print_info( + &format!("Unlocking extension '{}' for target '{}'", ext_name, target), + OutputLevel::Normal, + ); + } + lock_file.clear_extension(&target, ext_name); + unlocked_something = true; + print_success( + &format!("Unlocked extension '{}' for target '{}'.", ext_name, target), + OutputLevel::Normal, + ); + } + + // Unlock runtime if specified + if let Some(ref runtime_name) = self.runtime { + if self.verbose { + print_info( + &format!( + "Unlocking runtime '{}' for target '{}'", + runtime_name, target + ), + OutputLevel::Normal, + ); + } + lock_file.clear_runtime(&target, runtime_name); + unlocked_something = true; + print_success( + &format!( + "Unlocked runtime '{}' for target '{}'.", + runtime_name, target + ), + OutputLevel::Normal, + ); + } + } + + if unlocked_something { + // Save updated lock file + lock_file + .save(&src_dir) + .with_context(|| "Failed to save lock file")?; + + if self.verbose { + print_info("Lock file updated.", OutputLevel::Normal); + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::utils::lockfile::SysrootType; + use std::fs; + use tempfile::TempDir; + + fn create_test_config(temp_dir: &TempDir) -> String { + let config_content = r#" +default_target: "qemux86-64" +sdk: + image: "test-image" +ext: + my-app: + version: "1.0.0" +runtime: + dev: + target: "qemux86-64" +"#; + let config_path = temp_dir.path().join("avocado.yaml"); + fs::write(&config_path, config_content).unwrap(); + config_path.to_string_lossy().to_string() + } + + fn create_test_lock_file(temp_dir: &TempDir) { + let mut lock = LockFile::new(); + let target = "qemux86-64"; + + // Add some test entries + lock.set_locked_version( + target, + &SysrootType::Sdk("x86_64".to_string()), + "test-sdk-pkg", + "1.0.0-r0", + ); + lock.set_locked_version(target, &SysrootType::Rootfs, "test-rootfs-pkg", "1.0.0-r0"); + lock.set_locked_version( + target, + &SysrootType::TargetSysroot, + "test-sysroot-pkg", + "1.0.0-r0", + ); + lock.set_locked_version( + target, + &SysrootType::Extension("my-app".to_string()), + "test-ext-pkg", + "1.0.0-r0", + ); + lock.set_locked_version( + target, + &SysrootType::Runtime("dev".to_string()), + "test-runtime-pkg", + "1.0.0-r0", + ); + + lock.save(temp_dir.path()).unwrap(); + } + + #[test] + fn test_unlock_all() { + let temp_dir = TempDir::new().unwrap(); + let config_path = create_test_config(&temp_dir); + create_test_lock_file(&temp_dir); + + let cmd = UnlockCommand::new(config_path, false, None, None, None, false); + let result = cmd.execute(); + assert!(result.is_ok()); + + // Verify lock file is now empty for target + let lock = LockFile::load(temp_dir.path()).unwrap(); + assert!(lock + .get_locked_version( + "qemux86-64", + &SysrootType::Sdk("x86_64".to_string()), + "test-sdk-pkg" + ) + .is_none()); + } + + #[test] + fn test_unlock_sdk() { + let temp_dir = TempDir::new().unwrap(); + let config_path = create_test_config(&temp_dir); + create_test_lock_file(&temp_dir); + + let cmd = UnlockCommand::new(config_path, false, None, None, None, true); + let result = cmd.execute(); + assert!(result.is_ok()); + + // Verify SDK, rootfs, and target-sysroot are cleared but extensions/runtimes remain + let lock = LockFile::load(temp_dir.path()).unwrap(); + assert!(lock + .get_locked_version( + "qemux86-64", + &SysrootType::Sdk("x86_64".to_string()), + "test-sdk-pkg" + ) + .is_none()); + assert!(lock + .get_locked_version("qemux86-64", &SysrootType::Rootfs, "test-rootfs-pkg") + .is_none()); + assert!(lock + .get_locked_version( + "qemux86-64", + &SysrootType::TargetSysroot, + "test-sysroot-pkg" + ) + .is_none()); + // Extensions and runtimes should still be present + assert!(lock + .get_locked_version( + "qemux86-64", + &SysrootType::Extension("my-app".to_string()), + "test-ext-pkg" + ) + .is_some()); + assert!(lock + .get_locked_version( + "qemux86-64", + &SysrootType::Runtime("dev".to_string()), + "test-runtime-pkg" + ) + .is_some()); + } + + #[test] + fn test_unlock_extension() { + let temp_dir = TempDir::new().unwrap(); + let config_path = create_test_config(&temp_dir); + create_test_lock_file(&temp_dir); + + let cmd = UnlockCommand::new( + config_path, + false, + None, + Some("my-app".to_string()), + None, + false, + ); + let result = cmd.execute(); + assert!(result.is_ok()); + + // Verify extension is cleared but others remain + let lock = LockFile::load(temp_dir.path()).unwrap(); + assert!(lock + .get_locked_version( + "qemux86-64", + &SysrootType::Extension("my-app".to_string()), + "test-ext-pkg" + ) + .is_none()); + assert!(lock + .get_locked_version( + "qemux86-64", + &SysrootType::Sdk("x86_64".to_string()), + "test-sdk-pkg" + ) + .is_some()); + } + + #[test] + fn test_unlock_runtime() { + let temp_dir = TempDir::new().unwrap(); + let config_path = create_test_config(&temp_dir); + create_test_lock_file(&temp_dir); + + let cmd = UnlockCommand::new( + config_path, + false, + None, + None, + Some("dev".to_string()), + false, + ); + let result = cmd.execute(); + assert!(result.is_ok()); + + // Verify runtime is cleared but others remain + let lock = LockFile::load(temp_dir.path()).unwrap(); + assert!(lock + .get_locked_version( + "qemux86-64", + &SysrootType::Runtime("dev".to_string()), + "test-runtime-pkg" + ) + .is_none()); + assert!(lock + .get_locked_version( + "qemux86-64", + &SysrootType::Sdk("x86_64".to_string()), + "test-sdk-pkg" + ) + .is_some()); + } +} diff --git a/src/main.rs b/src/main.rs index 10a150e..b78f1fc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -30,6 +30,7 @@ use commands::sign::SignCommand; use commands::signing_keys::{ SigningKeysCreateCommand, SigningKeysListCommand, SigningKeysRemoveCommand, }; +use commands::unlock::UnlockCommand; use commands::upgrade::UpgradeCommand; #[derive(Parser)] @@ -116,15 +117,18 @@ enum Commands { /// Also remove stamp files (requires -C/--config and --target) #[arg(long)] stamps: bool, - /// Path to avocado.yaml configuration file (required when --stamps is used) + /// Path to avocado.yaml configuration file (required when --stamps or --unlock is used) #[arg(short = 'C', long)] config: Option, - /// Target architecture (required when --stamps is used) + /// Target architecture (required when --stamps or --unlock is used) #[arg(long)] target: Option, /// Force removal by killing and removing containers using the volume #[arg(short, long)] force: bool, + /// Also unlock (clear lock file entries) for all sysroots (requires -C/--config) + #[arg(long)] + unlock: bool, }, /// Install all components (SDK, extensions, and runtime dependencies) Install { @@ -300,6 +304,27 @@ enum Commands { #[arg(long)] dry_run: bool, }, + /// Unlock (remove lock entries for) sysroots to allow package updates + Unlock { + /// Path to avocado.yaml configuration file + #[arg(short = 'C', long, default_value = "avocado.yaml")] + config: String, + /// Enable verbose output + #[arg(short, long)] + verbose: bool, + /// Target architecture + #[arg(short, long)] + target: Option, + /// Unlock a specific extension + #[arg(short = 'e', long = "extension")] + extension: Option, + /// Unlock a specific runtime + #[arg(short = 'r', long = "runtime")] + runtime: Option, + /// Unlock SDK (rootfs, target-sysroot, and all SDK arches) + #[arg(long)] + sdk: bool, + }, } #[derive(Subcommand)] @@ -747,13 +772,15 @@ async fn main() -> Result<()> { config, target, force, + unlock, } => { let clean_cmd = CleanCommand::new(directory, !skip_volumes, Some(container_tool), verbose) .with_stamps(stamps) .with_config_path(config) .with_target(target.or(cli.target.clone())) - .with_force(force); + .with_force(force) + .with_unlock(unlock); clean_cmd.execute().await?; Ok(()) } @@ -943,6 +970,25 @@ async fn main() -> Result<()> { prune_cmd.execute().await?; Ok(()) } + Commands::Unlock { + config, + verbose, + target, + extension, + runtime, + sdk, + } => { + let unlock_cmd = UnlockCommand::new( + config, + verbose, + target.or(cli.target), + extension, + runtime, + sdk, + ); + unlock_cmd.execute()?; + Ok(()) + } Commands::Runtime { command } => match command { RuntimeCommands::Install { runtime, diff --git a/src/utils/container.rs b/src/utils/container.rs index 64de531..db67afd 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -777,6 +777,7 @@ impl SdkContainer { /// * `repo_url` - Optional repository URL /// * `repo_release` - Optional repository release /// * `container_args` - Optional additional container arguments + /// * `runs_on_context` - Optional remote execution context for --runs-on support /// /// # Returns /// A HashMap of package name to version string (NEVRA format without name prefix) @@ -790,6 +791,7 @@ impl SdkContainer { repo_url: Option, repo_release: Option, container_args: Option>, + runs_on_context: Option<&crate::utils::runs_on::RunsOnContext>, ) -> Result> { if packages.is_empty() { return Ok(std::collections::HashMap::new()); @@ -824,10 +826,18 @@ impl SdkContainer { ..Default::default() }; - match self.run_in_container_with_output(run_config).await? { + // Use remote execution context if provided, otherwise run locally + let output = if let Some(context) = runs_on_context { + self.run_in_container_with_output_remote(&run_config, context) + .await? + } else { + self.run_in_container_with_output(run_config).await? + }; + + match output { Some(output) => { - // For SDK sysroots, strip architecture to make lock file portable across host architectures - let strip_arch = matches!(sysroot, crate::utils::lockfile::SysrootType::Sdk); + // For SDK sysroots, strip architecture from version string (but arch is tracked in sysroot key) + let strip_arch = matches!(sysroot, crate::utils::lockfile::SysrootType::Sdk(_)); let versions = crate::utils::lockfile::parse_rpm_query_output(&output, strip_arch); if self.verbose { print_info( @@ -871,6 +881,92 @@ impl SdkContainer { } } + /// Run a command in a remote container and capture its output + /// + /// This is similar to `run_in_container_with_output` but uses the provided + /// RunsOnContext for remote execution. + async fn run_in_container_with_output_remote( + &self, + config: &RunConfig, + context: &crate::utils::runs_on::RunsOnContext, + ) -> Result> { + if !context.is_active() { + anyhow::bail!("RunsOnContext is not active (already torn down)"); + } + + // Build environment variables + let mut env_vars = config.env_vars.clone().unwrap_or_default(); + + // Set host platform - the remote is running the container + env_vars.insert("AVOCADO_HOST_PLATFORM".to_string(), "linux".to_string()); + + if let Some(url) = &config.repo_url { + env_vars.insert("AVOCADO_SDK_REPO_URL".to_string(), url.clone()); + } + if let Some(release) = &config.repo_release { + env_vars.insert("AVOCADO_SDK_REPO_RELEASE".to_string(), release.clone()); + } + if let Some(dnf_args) = &config.dnf_args { + env_vars.insert("AVOCADO_DNF_ARGS".to_string(), dnf_args.join(" ")); + } + if config.verbose || self.verbose { + env_vars.insert("AVOCADO_VERBOSE".to_string(), "1".to_string()); + } + + // Set target and SDK-related env vars + env_vars.insert("AVOCADO_TARGET".to_string(), config.target.clone()); + env_vars.insert("AVOCADO_SDK_TARGET".to_string(), config.target.clone()); + env_vars.insert("AVOCADO_SRC_DIR".to_string(), "/opt/src".to_string()); + + // Set host UID/GID for bindfs permission mapping on remote + let (host_uid, host_gid) = crate::utils::config::resolve_host_uid_gid(None); + env_vars.insert("AVOCADO_HOST_UID".to_string(), host_uid.to_string()); + env_vars.insert("AVOCADO_HOST_GID".to_string(), host_gid.to_string()); + env_vars.insert( + "AVOCADO_SDK_IMAGE".to_string(), + config.container_image.clone(), + ); + + // Build the complete command with entrypoint + let mut full_command = String::new(); + if config.use_entrypoint { + full_command.push_str(&self.create_entrypoint_script_for_remote( + config.source_environment, + config.extension_sysroot.as_deref(), + config.runtime_sysroot.as_deref(), + &config.target, + config.no_bootstrap, + config.disable_weak_dependencies, + )); + full_command.push('\n'); + } + full_command.push_str(&config.command); + + // Build extra Docker args + let mut extra_args: Vec = vec![ + "--device".to_string(), + "/dev/fuse".to_string(), + "--cap-add".to_string(), + "SYS_ADMIN".to_string(), + "--security-opt".to_string(), + "label=disable".to_string(), + ]; + + if let Some(ref args) = config.container_args { + extra_args.extend(args.clone()); + } + + // Run the container on the remote and capture output + context + .run_container_command_with_output( + &config.container_image, + &full_command, + env_vars, + &extra_args, + ) + .await + } + /// Execute the container command async fn execute_container_command( &self, @@ -1157,6 +1253,16 @@ export DNF_SDK_REPO_CONF="\ --setopt=reposdir=${{DNF_SDK_TARGET_PREFIX}}/etc/yum.repos.d \ " +# Combined repo config for SDK package installations (nativesdk packages). +# Uses arch-specific varsdir for correct architecture filtering, but includes +# BOTH repo directories: arch-specific SDK repos (base repos from container) +# and target-repoconf repos (target-specific repos like {target}-sdk). +# This ensures correct arch selection when running --runs-on with cross-arch targets. +export DNF_SDK_COMBINED_REPO_CONF="\ +--setopt=varsdir=${{DNF_SDK_HOST_PREFIX}}/etc/dnf/vars \ +--setopt=reposdir=${{DNF_SDK_HOST_PREFIX}}/etc/yum.repos.d,${{DNF_SDK_TARGET_PREFIX}}/etc/yum.repos.d \ +" + export DNF_SDK_TARGET_REPO_CONF="\ --setopt=varsdir=${{DNF_SDK_TARGET_PREFIX}}/etc/dnf/vars \ --setopt=reposdir=${{DNF_SDK_TARGET_PREFIX}}/etc/yum.repos.d \ @@ -1328,6 +1434,16 @@ export DNF_SDK_REPO_CONF="\ --setopt=reposdir=${{DNF_SDK_TARGET_PREFIX}}/etc/yum.repos.d \ " +# Combined repo config for SDK package installations (nativesdk packages). +# Uses arch-specific varsdir for correct architecture filtering, but includes +# BOTH repo directories: arch-specific SDK repos (base repos from container) +# and target-repoconf repos (target-specific repos like {target}-sdk). +# This ensures correct arch selection when running --runs-on with cross-arch targets. +export DNF_SDK_COMBINED_REPO_CONF="\ +--setopt=varsdir=${{DNF_SDK_HOST_PREFIX}}/etc/dnf/vars \ +--setopt=reposdir=${{DNF_SDK_HOST_PREFIX}}/etc/yum.repos.d,${{DNF_SDK_TARGET_PREFIX}}/etc/yum.repos.d \ +" + export DNF_SDK_TARGET_REPO_CONF="\ --setopt=varsdir=${{DNF_SDK_TARGET_PREFIX}}/etc/dnf/vars \ --setopt=reposdir=${{DNF_SDK_TARGET_PREFIX}}/etc/yum.repos.d \ diff --git a/src/utils/lockfile.rs b/src/utils/lockfile.rs index 6a51512..2725a1e 100644 --- a/src/utils/lockfile.rs +++ b/src/utils/lockfile.rs @@ -10,7 +10,8 @@ use std::fs; use std::path::{Path, PathBuf}; /// Current lock file format version -const LOCKFILE_VERSION: u32 = 1; +/// Version 2: SDK packages are now keyed by host architecture (sdk/{arch}) instead of just "sdk" +const LOCKFILE_VERSION: u32 = 2; /// Lock file name const LOCKFILE_NAME: &str = "lock.json"; @@ -21,8 +22,11 @@ const LOCKFILE_DIR: &str = ".avocado"; /// Represents different sysroot types for package installation #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum SysrootType { - /// SDK sysroot ($AVOCADO_SDK_PREFIX) - Sdk, + /// SDK sysroot ($AVOCADO_SDK_PREFIX) - keyed by host architecture + /// SDK packages are nativesdk packages that run on the host, so they need + /// to be tracked per host architecture (e.g., x86_64, aarch64). + /// The String parameter is the host architecture. + Sdk(String), /// Rootfs sysroot ($AVOCADO_PREFIX/rootfs) Rootfs, /// Target sysroot ($AVOCADO_PREFIX/sdk/target-sysroot) @@ -38,38 +42,6 @@ pub enum SysrootType { } impl SysrootType { - /// Convert sysroot type to its string key for the lock file - pub fn to_key(&self) -> String { - match self { - SysrootType::Sdk => "sdk".to_string(), - SysrootType::Rootfs => "rootfs".to_string(), - SysrootType::TargetSysroot => "target-sysroot".to_string(), - // Both Extension and VersionedExtension use the same key format - // They're distinguished at query time but stored the same in lock file - SysrootType::Extension(name) | SysrootType::VersionedExtension(name) => { - format!("extensions/{}", name) - } - SysrootType::Runtime(name) => format!("runtimes/{}", name), - } - } - - /// Parse a string key back to a SysrootType - #[allow(dead_code)] - pub fn from_key(key: &str) -> Option { - match key { - "sdk" => Some(SysrootType::Sdk), - "rootfs" => Some(SysrootType::Rootfs), - "target-sysroot" => Some(SysrootType::TargetSysroot), - _ if key.starts_with("extensions/") => Some(SysrootType::Extension( - key.strip_prefix("extensions/")?.to_string(), - )), - _ if key.starts_with("runtimes/") => Some(SysrootType::Runtime( - key.strip_prefix("runtimes/")?.to_string(), - )), - _ => None, - } - } - /// Get the RPM query command environment and root path for this sysroot type /// Returns (rpm_etcconfigdir, rpm_configdir, root_path) as shell variable expressions /// @@ -78,7 +50,8 @@ impl SysrootType { /// point to $AVOCADO_SDK_PREFIX/var/lib/rpm. pub fn get_rpm_query_config(&self) -> RpmQueryConfig { match self { - SysrootType::Sdk => RpmQueryConfig { + // SDK config is the same regardless of which host arch we're tracking + SysrootType::Sdk(_arch) => RpmQueryConfig { // SDK needs custom RPM config to find the SDK's RPM database rpm_etcconfigdir: Some("$AVOCADO_SDK_PREFIX".to_string()), rpm_configdir: Some("$AVOCADO_SDK_PREFIX/usr/lib/rpm".to_string()), @@ -184,15 +157,50 @@ impl RpmQueryConfig { } } +/// Package versions map: package_name -> version +pub type PackageVersions = HashMap; + +/// Nested package versions map: sub_key -> package_name -> version +/// Used for SDK (keyed by host arch), extensions (keyed by name), runtimes (keyed by name) +pub type NestedPackageVersions = HashMap; + +/// Lock data for a single target +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct TargetLocks { + /// SDK packages keyed by host architecture (x86_64, aarch64, etc.) + /// SDK packages are nativesdk packages that run on the host, so versions + /// can differ per host architecture. + #[serde(default, skip_serializing_if = "HashMap::is_empty")] + pub sdk: NestedPackageVersions, + + /// Rootfs packages (shared across all host architectures) + #[serde(default, skip_serializing_if = "HashMap::is_empty")] + pub rootfs: PackageVersions, + + /// Target-sysroot packages (shared across all host architectures) + #[serde( + default, + skip_serializing_if = "HashMap::is_empty", + rename = "target-sysroot" + )] + pub target_sysroot: PackageVersions, + + /// Extension packages keyed by extension name + #[serde(default, skip_serializing_if = "HashMap::is_empty")] + pub extensions: NestedPackageVersions, + + /// Runtime packages keyed by runtime name + #[serde(default, skip_serializing_if = "HashMap::is_empty")] + pub runtimes: NestedPackageVersions, +} + /// Lock file structure for tracking installed package versions #[derive(Debug, Clone, Serialize, Deserialize)] pub struct LockFile { /// Lock file format version pub version: u32, - /// Package versions organized by target architecture, then by sysroot - /// Structure: targets -> target_name -> sysroot_key -> package_name -> version - /// Example: targets["qemux86-64"]["sdk"]["avocado-sdk-toolchain"] = "0.1.0-r0.x86_64" - pub targets: HashMap>>, + /// Package versions organized by target + pub targets: HashMap, } impl Default for LockFile { @@ -216,6 +224,10 @@ impl LockFile { } /// Load lock file from disk, or return a new one if it doesn't exist + /// + /// This function also handles migration from older lock file versions: + /// - Version 1 -> 2: SDK packages now nested under arch key, extensions/runtimes + /// restructured. Old format is migrated automatically. pub fn load(src_dir: &Path) -> Result { let path = Self::get_path(src_dir); @@ -226,19 +238,98 @@ impl LockFile { let content = fs::read_to_string(&path) .with_context(|| format!("Failed to read lock file: {}", path.display()))?; - let lock_file: LockFile = serde_json::from_str(&content) + // First, try to parse as v2 format + if let Ok(lock_file) = serde_json::from_str::(&content) { + if lock_file.version >= LOCKFILE_VERSION { + if lock_file.version > LOCKFILE_VERSION { + anyhow::bail!( + "Lock file version {} is newer than supported version {}. Please upgrade avocado-cli.", + lock_file.version, + LOCKFILE_VERSION + ); + } + return Ok(lock_file); + } + } + + // Try to parse as v1 format and migrate + let v1_lock: serde_json::Value = serde_json::from_str(&content) .with_context(|| format!("Failed to parse lock file: {}", path.display()))?; - // Check version compatibility - if lock_file.version > LOCKFILE_VERSION { - anyhow::bail!( - "Lock file version {} is newer than supported version {}. Please upgrade avocado-cli.", - lock_file.version, - LOCKFILE_VERSION - ); + let version = v1_lock.get("version").and_then(|v| v.as_u64()).unwrap_or(1) as u32; + + if version == 1 { + return Ok(Self::migrate_v1_to_v2(&v1_lock)); } - Ok(lock_file) + // Unknown format + anyhow::bail!("Unable to parse lock file format"); + } + + /// Migrate lock file from version 1 to version 2 + /// + /// Version 1 stored: + /// - SDK packages under flat "sdk" key + /// - Extensions under "extensions/{name}" + /// - Runtimes under "runtimes/{name}" + /// + /// Version 2 stores: + /// - SDK packages under nested sdk -> {arch} -> packages + /// - Extensions under nested extensions -> {name} -> packages + /// - Runtimes under nested runtimes -> {name} -> packages + /// + /// Since we can't know what architecture the v1 SDK packages were installed for, + /// we discard them. Users will need to re-run `avocado sdk install`. + fn migrate_v1_to_v2(v1_lock: &serde_json::Value) -> LockFile { + let mut lock_file = LockFile::new(); + + if let Some(targets) = v1_lock.get("targets").and_then(|t| t.as_object()) { + for (target_name, sysroots) in targets { + let target_locks = lock_file.targets.entry(target_name.clone()).or_default(); + + if let Some(sysroots_map) = sysroots.as_object() { + for (key, packages) in sysroots_map { + if let Some(packages_map) = packages.as_object() { + let pkg_versions: PackageVersions = packages_map + .iter() + .filter_map(|(k, v)| v.as_str().map(|s| (k.clone(), s.to_string()))) + .collect(); + + match key.as_str() { + "sdk" => { + // Discard - we don't know the host arch + } + "rootfs" => { + target_locks.rootfs = pkg_versions; + } + "target-sysroot" => { + target_locks.target_sysroot = pkg_versions; + } + _ if key.starts_with("extensions/") => { + if let Some(name) = key.strip_prefix("extensions/") { + target_locks + .extensions + .insert(name.to_string(), pkg_versions); + } + } + _ if key.starts_with("runtimes/") => { + if let Some(name) = key.strip_prefix("runtimes/") { + target_locks + .runtimes + .insert(name.to_string(), pkg_versions); + } + } + _ => { + // Unknown key, ignore + } + } + } + } + } + } + } + + lock_file } /// Save lock file to disk using JSON Canonicalization Scheme (RFC 8785) @@ -273,11 +364,24 @@ impl LockFile { sysroot: &SysrootType, package: &str, ) -> Option<&String> { - let sysroot_key = sysroot.to_key(); - self.targets - .get(target) - .and_then(|sysroots| sysroots.get(&sysroot_key)) - .and_then(|packages| packages.get(package)) + let target_locks = self.targets.get(target)?; + + match sysroot { + SysrootType::Sdk(arch) => target_locks + .sdk + .get(arch) + .and_then(|pkgs| pkgs.get(package)), + SysrootType::Rootfs => target_locks.rootfs.get(package), + SysrootType::TargetSysroot => target_locks.target_sysroot.get(package), + SysrootType::Extension(name) | SysrootType::VersionedExtension(name) => target_locks + .extensions + .get(name) + .and_then(|pkgs| pkgs.get(package)), + SysrootType::Runtime(name) => target_locks + .runtimes + .get(name) + .and_then(|pkgs| pkgs.get(package)), + } } /// Set the locked version for a package in a specific target and sysroot @@ -289,13 +393,19 @@ impl LockFile { package: &str, version: &str, ) { - let sysroot_key = sysroot.to_key(); - self.targets - .entry(target.to_string()) - .or_default() - .entry(sysroot_key) - .or_default() - .insert(package.to_string(), version.to_string()); + let target_locks = self.targets.entry(target.to_string()).or_default(); + + let packages = match sysroot { + SysrootType::Sdk(arch) => target_locks.sdk.entry(arch.clone()).or_default(), + SysrootType::Rootfs => &mut target_locks.rootfs, + SysrootType::TargetSysroot => &mut target_locks.target_sysroot, + SysrootType::Extension(name) | SysrootType::VersionedExtension(name) => { + target_locks.extensions.entry(name.clone()).or_default() + } + SysrootType::Runtime(name) => target_locks.runtimes.entry(name.clone()).or_default(), + }; + + packages.insert(package.to_string(), version.to_string()); } /// Update multiple package versions for a target and sysroot at once @@ -305,38 +415,126 @@ impl LockFile { sysroot: &SysrootType, versions: HashMap, ) { - let sysroot_key = sysroot.to_key(); - let entry = self - .targets - .entry(target.to_string()) - .or_default() - .entry(sysroot_key) - .or_default(); + let target_locks = self.targets.entry(target.to_string()).or_default(); + + let packages = match sysroot { + SysrootType::Sdk(arch) => target_locks.sdk.entry(arch.clone()).or_default(), + SysrootType::Rootfs => &mut target_locks.rootfs, + SysrootType::TargetSysroot => &mut target_locks.target_sysroot, + SysrootType::Extension(name) | SysrootType::VersionedExtension(name) => { + target_locks.extensions.entry(name.clone()).or_default() + } + SysrootType::Runtime(name) => target_locks.runtimes.entry(name.clone()).or_default(), + }; + for (package, version) in versions { - entry.insert(package, version); + packages.insert(package, version); } } /// Get all locked versions for a target and sysroot + /// Returns None if no packages are recorded for this sysroot #[allow(dead_code)] pub fn get_sysroot_versions( &self, target: &str, sysroot: &SysrootType, ) -> Option<&HashMap> { - let sysroot_key = sysroot.to_key(); - self.targets - .get(target) - .and_then(|sysroots| sysroots.get(&sysroot_key)) + let target_locks = self.targets.get(target)?; + + let result = match sysroot { + SysrootType::Sdk(arch) => target_locks.sdk.get(arch), + SysrootType::Rootfs => Some(&target_locks.rootfs), + SysrootType::TargetSysroot => Some(&target_locks.target_sysroot), + SysrootType::Extension(name) | SysrootType::VersionedExtension(name) => { + target_locks.extensions.get(name) + } + SysrootType::Runtime(name) => target_locks.runtimes.get(name), + }; + + // Return None for empty collections (matches expected behavior) + result.filter(|m| !m.is_empty()) } /// Check if the lock file has any entries pub fn is_empty(&self) -> bool { self.targets.is_empty() - || self.targets.values().all(|sysroots| { - sysroots.is_empty() || sysroots.values().all(|packages| packages.is_empty()) + || self.targets.values().all(|target_locks| { + target_locks.sdk.is_empty() + && target_locks.rootfs.is_empty() + && target_locks.target_sysroot.is_empty() + && target_locks.extensions.is_empty() + && target_locks.runtimes.is_empty() }) } + + /// Clear all SDK entries for a specific target (all architectures) + pub fn clear_sdk(&mut self, target: &str) { + if let Some(target_locks) = self.targets.get_mut(target) { + target_locks.sdk.clear(); + } + } + + /// Clear rootfs entries for a specific target + pub fn clear_rootfs(&mut self, target: &str) { + if let Some(target_locks) = self.targets.get_mut(target) { + target_locks.rootfs.clear(); + } + } + + /// Clear target-sysroot entries for a specific target + pub fn clear_target_sysroot(&mut self, target: &str) { + if let Some(target_locks) = self.targets.get_mut(target) { + target_locks.target_sysroot.clear(); + } + } + + /// Clear a specific extension's entries for a target + pub fn clear_extension(&mut self, target: &str, extension_name: &str) { + if let Some(target_locks) = self.targets.get_mut(target) { + target_locks.extensions.remove(extension_name); + } + } + + /// Clear all extension entries for a target + #[allow(dead_code)] + pub fn clear_all_extensions(&mut self, target: &str) { + if let Some(target_locks) = self.targets.get_mut(target) { + target_locks.extensions.clear(); + } + } + + /// Clear a specific runtime's entries for a target + pub fn clear_runtime(&mut self, target: &str, runtime_name: &str) { + if let Some(target_locks) = self.targets.get_mut(target) { + target_locks.runtimes.remove(runtime_name); + } + } + + /// Clear all runtime entries for a target + #[allow(dead_code)] + pub fn clear_all_runtimes(&mut self, target: &str) { + if let Some(target_locks) = self.targets.get_mut(target) { + target_locks.runtimes.clear(); + } + } + + /// Clear all entries for a target (SDK, rootfs, target-sysroot, extensions, runtimes) + pub fn clear_all(&mut self, target: &str) { + if let Some(target_locks) = self.targets.get_mut(target) { + target_locks.sdk.clear(); + target_locks.rootfs.clear(); + target_locks.target_sysroot.clear(); + target_locks.extensions.clear(); + target_locks.runtimes.clear(); + } + } + + /// Get all target names in the lock file + #[allow(dead_code)] + pub fn get_targets(&self) -> Vec { + self.targets.keys().cloned().collect() + } } /// Parse rpm -q output into a map of package names to versions @@ -421,46 +619,6 @@ mod tests { use super::*; use tempfile::TempDir; - #[test] - fn test_sysroot_type_to_key() { - assert_eq!(SysrootType::Sdk.to_key(), "sdk"); - assert_eq!(SysrootType::Rootfs.to_key(), "rootfs"); - assert_eq!(SysrootType::TargetSysroot.to_key(), "target-sysroot"); - assert_eq!( - SysrootType::Extension("my-app".to_string()).to_key(), - "extensions/my-app" - ); - // VersionedExtension and Extension produce the same key format - // They're only distinguished at query time, not in the lock file - assert_eq!( - SysrootType::VersionedExtension("my-versioned-app".to_string()).to_key(), - "extensions/my-versioned-app" - ); - assert_eq!( - SysrootType::Runtime("dev".to_string()).to_key(), - "runtimes/dev" - ); - } - - #[test] - fn test_sysroot_type_from_key() { - assert_eq!(SysrootType::from_key("sdk"), Some(SysrootType::Sdk)); - assert_eq!(SysrootType::from_key("rootfs"), Some(SysrootType::Rootfs)); - assert_eq!( - SysrootType::from_key("target-sysroot"), - Some(SysrootType::TargetSysroot) - ); - assert_eq!( - SysrootType::from_key("extensions/my-app"), - Some(SysrootType::Extension("my-app".to_string())) - ); - assert_eq!( - SysrootType::from_key("runtimes/dev"), - Some(SysrootType::Runtime("dev".to_string())) - ); - assert_eq!(SysrootType::from_key("invalid"), None); - } - #[test] fn test_lock_file_new() { let lock = LockFile::new(); @@ -472,16 +630,24 @@ mod tests { fn test_lock_file_get_set_version() { let mut lock = LockFile::new(); let target = "qemux86-64"; + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); + let sdk_aarch64 = SysrootType::Sdk("aarch64".to_string()); - lock.set_locked_version(target, &SysrootType::Sdk, "test-package", "1.0.0-r0.x86_64"); + lock.set_locked_version(target, &sdk_x86, "test-package", "1.0.0-r0.x86_64"); assert_eq!( - lock.get_locked_version(target, &SysrootType::Sdk, "test-package"), + lock.get_locked_version(target, &sdk_x86, "test-package"), Some(&"1.0.0-r0.x86_64".to_string()) ); assert_eq!( - lock.get_locked_version(target, &SysrootType::Sdk, "nonexistent"), + lock.get_locked_version(target, &sdk_x86, "nonexistent"), + None + ); + + // Different host architecture should not have the package + assert_eq!( + lock.get_locked_version(target, &sdk_aarch64, "test-package"), None ); @@ -492,7 +658,7 @@ mod tests { // Different target should not have the package assert_eq!( - lock.get_locked_version("qemuarm64", &SysrootType::Sdk, "test-package"), + lock.get_locked_version("qemuarm64", &sdk_x86, "test-package"), None ); } @@ -566,29 +732,37 @@ wget 1.21-r0.core2_64 fn test_build_package_spec_with_lock() { let mut lock = LockFile::new(); let target = "qemux86-64"; - lock.set_locked_version(target, &SysrootType::Sdk, "curl", "7.88.1-r0.x86_64"); + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); + let sdk_aarch64 = SysrootType::Sdk("aarch64".to_string()); + lock.set_locked_version(target, &sdk_x86, "curl", "7.88.1-r0.x86_64"); // Should use locked version assert_eq!( - build_package_spec_with_lock(&lock, target, &SysrootType::Sdk, "curl", "*"), + build_package_spec_with_lock(&lock, target, &sdk_x86, "curl", "*"), "curl-7.88.1-r0.x86_64" ); // No lock, config says latest assert_eq!( - build_package_spec_with_lock(&lock, target, &SysrootType::Sdk, "wget", "*"), + build_package_spec_with_lock(&lock, target, &sdk_x86, "wget", "*"), "wget" ); // No lock, config specifies version assert_eq!( - build_package_spec_with_lock(&lock, target, &SysrootType::Sdk, "wget", "1.21"), + build_package_spec_with_lock(&lock, target, &sdk_x86, "wget", "1.21"), "wget-1.21" ); // Different target should not have curl locked assert_eq!( - build_package_spec_with_lock(&lock, "qemuarm64", &SysrootType::Sdk, "curl", "*"), + build_package_spec_with_lock(&lock, "qemuarm64", &sdk_x86, "curl", "*"), + "curl" + ); + + // Different host architecture should not have curl locked + assert_eq!( + build_package_spec_with_lock(&lock, target, &sdk_aarch64, "curl", "*"), "curl" ); } @@ -627,39 +801,49 @@ wget 1.21-r0.core2_64 fn test_update_sysroot_versions() { let mut lock = LockFile::new(); let target = "qemux86-64"; + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); let mut versions = HashMap::new(); versions.insert("pkg1".to_string(), "1.0.0-r0.x86_64".to_string()); versions.insert("pkg2".to_string(), "2.0.0-r0.x86_64".to_string()); - lock.update_sysroot_versions(target, &SysrootType::Sdk, versions); + lock.update_sysroot_versions(target, &sdk_x86, versions); assert_eq!( - lock.get_locked_version(target, &SysrootType::Sdk, "pkg1"), + lock.get_locked_version(target, &sdk_x86, "pkg1"), Some(&"1.0.0-r0.x86_64".to_string()) ); assert_eq!( - lock.get_locked_version(target, &SysrootType::Sdk, "pkg2"), + lock.get_locked_version(target, &sdk_x86, "pkg2"), Some(&"2.0.0-r0.x86_64".to_string()) ); } #[test] - fn test_multiple_targets() { + fn test_multiple_targets_and_host_archs() { let mut lock = LockFile::new(); + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); + let sdk_aarch64 = SysrootType::Sdk("aarch64".to_string()); - // Set versions for two different targets - lock.set_locked_version("qemux86-64", &SysrootType::Sdk, "curl", "7.88.1-r0.x86_64"); - lock.set_locked_version("qemuarm64", &SysrootType::Sdk, "curl", "7.88.1-r0.aarch64"); + // Set versions for two different targets on same host arch + lock.set_locked_version("qemux86-64", &sdk_x86, "curl", "7.88.1-r0"); + lock.set_locked_version("qemuarm64", &sdk_x86, "curl", "7.88.1-r0"); - // Each target should have its own version + // Set version for same target but different host arch + lock.set_locked_version("qemux86-64", &sdk_aarch64, "curl", "7.88.1-r0.4"); + + // Each target+arch combo should have its own version assert_eq!( - lock.get_locked_version("qemux86-64", &SysrootType::Sdk, "curl"), - Some(&"7.88.1-r0.x86_64".to_string()) + lock.get_locked_version("qemux86-64", &sdk_x86, "curl"), + Some(&"7.88.1-r0".to_string()) + ); + assert_eq!( + lock.get_locked_version("qemuarm64", &sdk_x86, "curl"), + Some(&"7.88.1-r0".to_string()) ); assert_eq!( - lock.get_locked_version("qemuarm64", &SysrootType::Sdk, "curl"), - Some(&"7.88.1-r0.aarch64".to_string()) + lock.get_locked_version("qemux86-64", &sdk_aarch64, "curl"), + Some(&"7.88.1-r0.4".to_string()) ); } @@ -669,7 +853,8 @@ wget 1.21-r0.core2_64 assert!(lock.is_empty()); let mut lock = LockFile::new(); - lock.set_locked_version("qemux86-64", &SysrootType::Sdk, "curl", "7.88.1-r0.x86_64"); + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); + lock.set_locked_version("qemux86-64", &sdk_x86, "curl", "7.88.1-r0.x86_64"); assert!(!lock.is_empty()); } @@ -686,9 +871,10 @@ wget 1.21-r0.core2_64 fn test_multiple_sysroots_same_target() { let mut lock = LockFile::new(); let target = "qemux86-64"; + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); // Set versions for different sysroots under the same target - lock.set_locked_version(target, &SysrootType::Sdk, "toolchain", "1.0.0-r0.x86_64"); + lock.set_locked_version(target, &sdk_x86, "toolchain", "1.0.0-r0.x86_64"); lock.set_locked_version( target, &SysrootType::Rootfs, @@ -716,7 +902,7 @@ wget 1.21-r0.core2_64 // Verify each sysroot has its package assert_eq!( - lock.get_locked_version(target, &SysrootType::Sdk, "toolchain"), + lock.get_locked_version(target, &sdk_x86, "toolchain"), Some(&"1.0.0-r0.x86_64".to_string()) ); assert_eq!( @@ -746,7 +932,7 @@ wget 1.21-r0.core2_64 // Verify cross-sysroot isolation assert_eq!( - lock.get_locked_version(target, &SysrootType::Sdk, "base-files"), + lock.get_locked_version(target, &sdk_x86, "base-files"), None ); } @@ -755,17 +941,18 @@ wget 1.21-r0.core2_64 fn test_version_update_overwrites() { let mut lock = LockFile::new(); let target = "qemux86-64"; + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); - lock.set_locked_version(target, &SysrootType::Sdk, "curl", "7.88.0-r0.x86_64"); + lock.set_locked_version(target, &sdk_x86, "curl", "7.88.0-r0.x86_64"); assert_eq!( - lock.get_locked_version(target, &SysrootType::Sdk, "curl"), + lock.get_locked_version(target, &sdk_x86, "curl"), Some(&"7.88.0-r0.x86_64".to_string()) ); // Update to new version - lock.set_locked_version(target, &SysrootType::Sdk, "curl", "7.88.1-r0.x86_64"); + lock.set_locked_version(target, &sdk_x86, "curl", "7.88.1-r0.x86_64"); assert_eq!( - lock.get_locked_version(target, &SysrootType::Sdk, "curl"), + lock.get_locked_version(target, &sdk_x86, "curl"), Some(&"7.88.1-r0.x86_64".to_string()) ); } @@ -774,24 +961,25 @@ wget 1.21-r0.core2_64 fn test_update_sysroot_versions_merges() { let mut lock = LockFile::new(); let target = "qemux86-64"; + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); // Add initial packages let mut versions1 = HashMap::new(); versions1.insert("pkg1".to_string(), "1.0.0-r0.x86_64".to_string()); - lock.update_sysroot_versions(target, &SysrootType::Sdk, versions1); + lock.update_sysroot_versions(target, &sdk_x86, versions1); // Add more packages (should merge, not replace) let mut versions2 = HashMap::new(); versions2.insert("pkg2".to_string(), "2.0.0-r0.x86_64".to_string()); - lock.update_sysroot_versions(target, &SysrootType::Sdk, versions2); + lock.update_sysroot_versions(target, &sdk_x86, versions2); // Both packages should exist assert_eq!( - lock.get_locked_version(target, &SysrootType::Sdk, "pkg1"), + lock.get_locked_version(target, &sdk_x86, "pkg1"), Some(&"1.0.0-r0.x86_64".to_string()) ); assert_eq!( - lock.get_locked_version(target, &SysrootType::Sdk, "pkg2"), + lock.get_locked_version(target, &sdk_x86, "pkg2"), Some(&"2.0.0-r0.x86_64".to_string()) ); } @@ -800,11 +988,12 @@ wget 1.21-r0.core2_64 fn test_get_sysroot_versions() { let mut lock = LockFile::new(); let target = "qemux86-64"; + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); - lock.set_locked_version(target, &SysrootType::Sdk, "pkg1", "1.0.0-r0.x86_64"); - lock.set_locked_version(target, &SysrootType::Sdk, "pkg2", "2.0.0-r0.x86_64"); + lock.set_locked_version(target, &sdk_x86, "pkg1", "1.0.0-r0.x86_64"); + lock.set_locked_version(target, &sdk_x86, "pkg2", "2.0.0-r0.x86_64"); - let versions = lock.get_sysroot_versions(target, &SysrootType::Sdk); + let versions = lock.get_sysroot_versions(target, &sdk_x86); assert!(versions.is_some()); let versions = versions.unwrap(); assert_eq!(versions.len(), 2); @@ -817,9 +1006,7 @@ wget 1.21-r0.core2_64 .is_none()); // Non-existent target should return None - assert!(lock - .get_sysroot_versions("nonexistent", &SysrootType::Sdk) - .is_none()); + assert!(lock.get_sysroot_versions("nonexistent", &sdk_x86).is_none()); } #[test] @@ -939,7 +1126,8 @@ avocado-sdk-toolchain 0.1.0-r0.x86_64_avocadosdk #[test] fn test_sysroot_type_get_rpm_query_config() { // Test SDK config - no root_path because SDK uses native container with custom RPM_CONFIGDIR - let sdk_config = SysrootType::Sdk.get_rpm_query_config(); + // The arch in SDK sysroot type doesn't affect the RPM query config + let sdk_config = SysrootType::Sdk("x86_64".to_string()).get_rpm_query_config(); assert_eq!( sdk_config.rpm_etcconfigdir, Some("$AVOCADO_SDK_PREFIX".to_string()) @@ -947,6 +1135,14 @@ avocado-sdk-toolchain 0.1.0-r0.x86_64_avocadosdk assert!(sdk_config.rpm_configdir.is_some()); assert!(sdk_config.root_path.is_none()); // SDK doesn't use --root + // Different arch should produce the same RPM query config + let sdk_config_aarch64 = SysrootType::Sdk("aarch64".to_string()).get_rpm_query_config(); + assert_eq!( + sdk_config.rpm_etcconfigdir, + sdk_config_aarch64.rpm_etcconfigdir + ); + assert_eq!(sdk_config.rpm_configdir, sdk_config_aarch64.rpm_configdir); + // Test Rootfs config - installroots don't need RPM_ETCCONFIGDIR, just --root let rootfs_config = SysrootType::Rootfs.get_rpm_query_config(); assert!(rootfs_config.rpm_etcconfigdir.is_none()); @@ -1000,50 +1196,46 @@ avocado-sdk-toolchain 0.1.0-r0.x86_64_avocadosdk #[test] fn test_lock_file_json_format() { let mut lock = LockFile::new(); - lock.set_locked_version("qemux86-64", &SysrootType::Sdk, "curl", "7.88.1-r0.x86_64"); + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); + let sdk_aarch64 = SysrootType::Sdk("aarch64".to_string()); + lock.set_locked_version("qemux86-64", &sdk_x86, "curl", "7.88.1-r0.x86_64"); lock.set_locked_version( "qemux86-64", &SysrootType::Extension("app".to_string()), "libfoo", "1.0.0-r0.core2_64", ); - lock.set_locked_version("qemuarm64", &SysrootType::Sdk, "curl", "7.88.1-r0.aarch64"); + lock.set_locked_version("qemuarm64", &sdk_aarch64, "curl", "7.88.1-r0.aarch64"); let json = serde_json::to_string_pretty(&lock).unwrap(); - // Verify JSON structure + // Verify JSON structure - SDK is nested under arch, extensions under name let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); - assert_eq!(parsed["version"], 1); - assert!(parsed["targets"]["qemux86-64"]["sdk"]["curl"].is_string()); - assert!(parsed["targets"]["qemux86-64"]["extensions/app"]["libfoo"].is_string()); - assert!(parsed["targets"]["qemuarm64"]["sdk"]["curl"].is_string()); + assert_eq!(parsed["version"], 2); + // SDK packages nested under sdk -> {arch} -> {package} + assert!(parsed["targets"]["qemux86-64"]["sdk"]["x86_64"]["curl"].is_string()); + // Extensions nested under extensions -> {name} -> {package} + assert!(parsed["targets"]["qemux86-64"]["extensions"]["app"]["libfoo"].is_string()); + assert!(parsed["targets"]["qemuarm64"]["sdk"]["aarch64"]["curl"].is_string()); } #[test] fn test_lock_file_persistence_multiple_targets() { let temp_dir = TempDir::new().unwrap(); let src_dir = temp_dir.path(); + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); + let sdk_aarch64 = SysrootType::Sdk("aarch64".to_string()); // Create lock file with multiple targets and sysroots let mut lock = LockFile::new(); - lock.set_locked_version( - "qemux86-64", - &SysrootType::Sdk, - "toolchain", - "1.0.0-r0.x86_64", - ); + lock.set_locked_version("qemux86-64", &sdk_x86, "toolchain", "1.0.0-r0.x86_64"); lock.set_locked_version( "qemux86-64", &SysrootType::Rootfs, "base", "1.0.0-r0.core2_64", ); - lock.set_locked_version( - "qemuarm64", - &SysrootType::Sdk, - "toolchain", - "1.0.0-r0.aarch64", - ); + lock.set_locked_version("qemuarm64", &sdk_aarch64, "toolchain", "1.0.0-r0.aarch64"); lock.set_locked_version( "qemuarm64", &SysrootType::Extension("app".to_string()), @@ -1057,7 +1249,7 @@ avocado-sdk-toolchain 0.1.0-r0.x86_64_avocadosdk let loaded = LockFile::load(src_dir).unwrap(); assert_eq!( - loaded.get_locked_version("qemux86-64", &SysrootType::Sdk, "toolchain"), + loaded.get_locked_version("qemux86-64", &sdk_x86, "toolchain"), Some(&"1.0.0-r0.x86_64".to_string()) ); assert_eq!( @@ -1065,7 +1257,7 @@ avocado-sdk-toolchain 0.1.0-r0.x86_64_avocadosdk Some(&"1.0.0-r0.core2_64".to_string()) ); assert_eq!( - loaded.get_locked_version("qemuarm64", &SysrootType::Sdk, "toolchain"), + loaded.get_locked_version("qemuarm64", &sdk_aarch64, "toolchain"), Some(&"1.0.0-r0.aarch64".to_string()) ); assert_eq!( @@ -1082,19 +1274,20 @@ avocado-sdk-toolchain 0.1.0-r0.x86_64_avocadosdk fn test_build_package_spec_locked_overrides_config() { let mut lock = LockFile::new(); let target = "qemux86-64"; + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); // Set a locked version - lock.set_locked_version(target, &SysrootType::Sdk, "curl", "7.88.1-r0.x86_64"); + lock.set_locked_version(target, &sdk_x86, "curl", "7.88.1-r0.x86_64"); // Even if config specifies a different version, locked version should be used assert_eq!( - build_package_spec_with_lock(&lock, target, &SysrootType::Sdk, "curl", "7.80.0"), + build_package_spec_with_lock(&lock, target, &sdk_x86, "curl", "7.80.0"), "curl-7.88.1-r0.x86_64" ); // And if config says "*", locked version should still be used assert_eq!( - build_package_spec_with_lock(&lock, target, &SysrootType::Sdk, "curl", "*"), + build_package_spec_with_lock(&lock, target, &sdk_x86, "curl", "*"), "curl-7.88.1-r0.x86_64" ); } @@ -1113,23 +1306,25 @@ avocado-sdk-toolchain 0.1.0-r0.x86_64_avocadosdk let temp_dir = TempDir::new().unwrap(); let src_dir = temp_dir.path(); + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); + let sdk_aarch64 = SysrootType::Sdk("aarch64".to_string()); // Create a lock file with packages in non-alphabetical order let mut lock1 = LockFile::new(); - lock1.set_locked_version("qemux86-64", &SysrootType::Sdk, "zebra", "1.0.0-r0"); - lock1.set_locked_version("qemux86-64", &SysrootType::Sdk, "alpha", "2.0.0-r0"); + lock1.set_locked_version("qemux86-64", &sdk_x86, "zebra", "1.0.0-r0"); + lock1.set_locked_version("qemux86-64", &sdk_x86, "alpha", "2.0.0-r0"); lock1.set_locked_version("qemux86-64", &SysrootType::Rootfs, "beta", "3.0.0-r0"); - lock1.set_locked_version("qemuarm64", &SysrootType::Sdk, "gamma", "4.0.0-r0"); + lock1.set_locked_version("qemuarm64", &sdk_aarch64, "gamma", "4.0.0-r0"); lock1.save(src_dir).unwrap(); let content1 = fs::read_to_string(LockFile::get_path(src_dir)).unwrap(); // Create another lock file with same data but added in different order let mut lock2 = LockFile::new(); - lock2.set_locked_version("qemuarm64", &SysrootType::Sdk, "gamma", "4.0.0-r0"); + lock2.set_locked_version("qemuarm64", &sdk_aarch64, "gamma", "4.0.0-r0"); lock2.set_locked_version("qemux86-64", &SysrootType::Rootfs, "beta", "3.0.0-r0"); - lock2.set_locked_version("qemux86-64", &SysrootType::Sdk, "alpha", "2.0.0-r0"); - lock2.set_locked_version("qemux86-64", &SysrootType::Sdk, "zebra", "1.0.0-r0"); + lock2.set_locked_version("qemux86-64", &sdk_x86, "alpha", "2.0.0-r0"); + lock2.set_locked_version("qemux86-64", &sdk_x86, "zebra", "1.0.0-r0"); // Remove the first lock file and save the second fs::remove_file(LockFile::get_path(src_dir)).unwrap(); @@ -1148,13 +1343,83 @@ avocado-sdk-toolchain 0.1.0-r0.x86_64_avocadosdk "Target keys should be alphabetically sorted" ); - // Verify package keys are sorted within each sysroot + // Verify package keys are sorted within sdk -> x86_64 nested object let sdk_start = content1.find("\"sdk\"").unwrap(); - let alpha_pos = content1[sdk_start..].find("\"alpha\"").unwrap() + sdk_start; - let zebra_pos = content1[sdk_start..].find("\"zebra\"").unwrap() + sdk_start; + let x86_start = content1[sdk_start..].find("\"x86_64\"").unwrap() + sdk_start; + let alpha_pos = content1[x86_start..].find("\"alpha\"").unwrap() + x86_start; + let zebra_pos = content1[x86_start..].find("\"zebra\"").unwrap() + x86_start; assert!( alpha_pos < zebra_pos, "Package keys should be alphabetically sorted" ); } + + #[test] + fn test_migrate_v1_to_v2() { + use tempfile::TempDir; + + let temp_dir = TempDir::new().unwrap(); + let src_dir = temp_dir.path(); + + // Create a v1 lock file manually (with old flat key format) + // v1 had: "sdk", "rootfs", "extensions/name", "runtimes/name" as flat keys + let v1_content = r#"{"targets":{"jetson-orin-nano-devkit":{"extensions/my-app":{"libfoo":"1.0.0-r0"},"rootfs":{"avocado-pkg-rootfs":"0.1.0-r0.0.avocado_jetson_orin_nano_devkit"},"runtimes/dev":{"runtime-base":"2.0.0-r0"},"sdk":{"avocado-sdk-bootstrap":"0.1.0-r0.0","avocado-sdk-toolchain":"0.1.0-r0.4"}}},"version":1} +"#; + + let lock_path = LockFile::get_path(src_dir); + std::fs::create_dir_all(lock_path.parent().unwrap()).unwrap(); + std::fs::write(&lock_path, v1_content).unwrap(); + + // Load the lock file - should trigger migration + let lock = LockFile::load(src_dir).unwrap(); + + // Version should be updated to 2 + assert_eq!(lock.version, 2); + + // Old "sdk" entries should be removed (we can't determine their host arch) + let sdk_x86 = SysrootType::Sdk("x86_64".to_string()); + let sdk_aarch64 = SysrootType::Sdk("aarch64".to_string()); + assert_eq!( + lock.get_locked_version("jetson-orin-nano-devkit", &sdk_x86, "avocado-sdk-toolchain"), + None + ); + assert_eq!( + lock.get_locked_version( + "jetson-orin-nano-devkit", + &sdk_aarch64, + "avocado-sdk-toolchain" + ), + None + ); + + // Rootfs entries should be preserved (they're not arch-dependent) + assert_eq!( + lock.get_locked_version( + "jetson-orin-nano-devkit", + &SysrootType::Rootfs, + "avocado-pkg-rootfs" + ), + Some(&"0.1.0-r0.0.avocado_jetson_orin_nano_devkit".to_string()) + ); + + // Extensions should be migrated from "extensions/name" to nested structure + assert_eq!( + lock.get_locked_version( + "jetson-orin-nano-devkit", + &SysrootType::Extension("my-app".to_string()), + "libfoo" + ), + Some(&"1.0.0-r0".to_string()) + ); + + // Runtimes should be migrated from "runtimes/name" to nested structure + assert_eq!( + lock.get_locked_version( + "jetson-orin-nano-devkit", + &SysrootType::Runtime("dev".to_string()), + "runtime-base" + ), + Some(&"2.0.0-r0".to_string()) + ); + } } diff --git a/src/utils/nfs_server.rs b/src/utils/nfs_server.rs index fd66d3f..f7d7b86 100644 --- a/src/utils/nfs_server.rs +++ b/src/utils/nfs_server.rs @@ -134,12 +134,18 @@ NFS_Core_Param {{ allow_set_io_flusher_fail = true; Nb_Max_Fd = 65536; Max_Open_Files = 10000; - DRC_Max_Size = 32768; + # Large duplicate request cache for builds with many files + DRC_Max_Size = 65536; # Short attribute cache for fresher file metadata during builds Attr_Expiration_Time = 3; # Single-client use case doesn't need many workers Nb_Worker = 32; Bind_addr = {}; + # TCP keepalive for long-running builds over potentially unstable networks + Enable_TCP_Keepalive = true; + TCP_Keepidle = 60; + TCP_Keepintvl = 10; + TCP_Keepcnt = 5; }} NFSV4 {{ diff --git a/src/utils/remote.rs b/src/utils/remote.rs index d60266a..20995df 100644 --- a/src/utils/remote.rs +++ b/src/utils/remote.rs @@ -69,6 +69,8 @@ impl RemoteHost { pub struct SshClient { remote: RemoteHost, verbose: bool, + /// Optional path to SSH ControlMaster socket for connection reuse + control_path: Option, } impl SshClient { @@ -77,6 +79,7 @@ impl SshClient { Self { remote, verbose: false, + control_path: None, } } @@ -86,6 +89,30 @@ impl SshClient { self } + /// Set the ControlMaster socket path for SSH connection reuse + pub fn with_control_path(mut self, control_path: std::path::PathBuf) -> Self { + self.control_path = Some(control_path); + self + } + + /// Get base SSH arguments including ControlMaster options if configured + fn base_ssh_args(&self) -> Vec { + let mut args = vec![ + "-o".to_string(), + "BatchMode=yes".to_string(), + "-o".to_string(), + "StrictHostKeyChecking=accept-new".to_string(), + ]; + + // If ControlMaster is configured, use the existing connection + if let Some(ref control_path) = self.control_path { + args.push("-o".to_string()); + args.push(format!("ControlPath={}", control_path.display())); + } + + args + } + /// Check SSH connectivity to the remote host /// /// This runs a simple command to verify we can connect via SSH. @@ -100,18 +127,17 @@ impl SshClient { ); } + let mut args = self.base_ssh_args(); + args.extend([ + "-o".to_string(), + "ConnectTimeout=10".to_string(), + self.remote.ssh_target(), + "echo".to_string(), + "ok".to_string(), + ]); + let output = AsyncCommand::new("ssh") - .args([ - "-o", - "BatchMode=yes", - "-o", - "ConnectTimeout=10", - "-o", - "StrictHostKeyChecking=accept-new", - &self.remote.ssh_target(), - "echo", - "ok", - ]) + .args(&args) .output() .await .context("Failed to execute SSH command")?; @@ -175,17 +201,16 @@ impl SshClient { // ~/.cargo/bin, ~/.local/bin, or other user-specific locations. // We use POSIX-compatible syntax (test -f && . instead of source) because // some embedded systems use /bin/sh which doesn't support bash-specific commands. + let mut args = self.base_ssh_args(); + args.extend([ + "-o".to_string(), + "ConnectTimeout=10".to_string(), + self.remote.ssh_target(), + "test -f ~/.profile && . ~/.profile; test -f ~/.bashrc && . ~/.bashrc; avocado --version 2>/dev/null || echo 'not-installed'".to_string(), + ]); + let output = AsyncCommand::new("ssh") - .args([ - "-o", - "BatchMode=yes", - "-o", - "ConnectTimeout=10", - "-o", - "StrictHostKeyChecking=accept-new", - &self.remote.ssh_target(), - "test -f ~/.profile && . ~/.profile; test -f ~/.bashrc && . ~/.bashrc; avocado --version 2>/dev/null || echo 'not-installed'", - ]) + .args(&args) .output() .await .context("Failed to check remote avocado version")?; @@ -248,15 +273,11 @@ impl SshClient { ); } + let mut args = self.base_ssh_args(); + args.extend([self.remote.ssh_target(), command.to_string()]); + let output = AsyncCommand::new("ssh") - .args([ - "-o", - "BatchMode=yes", - "-o", - "StrictHostKeyChecking=accept-new", - &self.remote.ssh_target(), - command, - ]) + .args(&args) .output() .await .with_context(|| format!("Failed to run command on remote: {}", command))?; @@ -273,7 +294,10 @@ impl SshClient { Ok(String::from_utf8_lossy(&output.stdout).trim().to_string()) } - /// Run a command on the remote host, inheriting stdout/stderr + /// Run a command on the remote host, inheriting stdin/stdout/stderr + /// + /// This method properly forwards Ctrl+C and other signals to the remote process + /// by allocating a pseudo-terminal and inheriting all standard streams. pub async fn run_command_interactive(&self, command: &str) -> Result { if self.verbose { print_info( @@ -282,16 +306,28 @@ impl SshClient { ); } + // Build args manually for interactive commands to avoid BatchMode=yes + // which can interfere with signal handling + let mut args = vec![ + "-o".to_string(), + "StrictHostKeyChecking=accept-new".to_string(), + ]; + + // If ControlMaster is configured, use the existing connection + if let Some(ref control_path) = self.control_path { + args.push("-o".to_string()); + args.push(format!("ControlPath={}", control_path.display())); + } + + args.extend([ + "-tt".to_string(), // Force pseudo-terminal allocation (double -t for forced allocation) + self.remote.ssh_target(), + command.to_string(), + ]); + let status = AsyncCommand::new("ssh") - .args([ - "-o", - "BatchMode=yes", - "-o", - "StrictHostKeyChecking=accept-new", - "-t", // Force pseudo-terminal allocation for interactive commands - &self.remote.ssh_target(), - command, - ]) + .args(&args) + .stdin(Stdio::inherit()) // Inherit stdin for Ctrl+C forwarding .stdout(Stdio::inherit()) .stderr(Stdio::inherit()) .status() @@ -323,17 +359,16 @@ impl SshClient { ); } + let mut args = self.base_ssh_args(); + args.extend([ + "-o".to_string(), + "ConnectTimeout=10".to_string(), + self.remote.ssh_target(), + "uname -m".to_string(), + ]); + let output = AsyncCommand::new("ssh") - .args([ - "-o", - "BatchMode=yes", - "-o", - "ConnectTimeout=10", - "-o", - "StrictHostKeyChecking=accept-new", - &self.remote.ssh_target(), - "uname -m", - ]) + .args(&args) .output() .await .context("Failed to get remote architecture")?; @@ -360,6 +395,155 @@ impl SshClient { } } +/// SSH ControlMaster for persistent SSH connection reuse +/// +/// This creates a background SSH connection that can be reused by multiple +/// SSH commands via the ControlPath socket. This significantly reduces +/// connection overhead when running many commands on the same remote host. +pub struct SshControlMaster { + /// Path to the control socket + control_path: std::path::PathBuf, + /// The master SSH process + process: Option, + /// Remote host for connection + remote: RemoteHost, + /// Whether verbose output is enabled + verbose: bool, +} + +impl SshControlMaster { + /// Create and start a new ControlMaster connection + pub async fn start(remote: RemoteHost, verbose: bool) -> Result { + // Create a unique control socket path + let session_id = uuid::Uuid::new_v4().to_string()[..8].to_string(); + let control_path = + std::path::PathBuf::from(format!("/tmp/avocado-ssh-{}-{}", remote.host, session_id)); + + if verbose { + print_info( + &format!("Starting SSH ControlMaster for {}...", remote.ssh_target()), + OutputLevel::Normal, + ); + } + + // Start the ControlMaster connection + // -M: Master mode + // -N: Don't execute a remote command + // -f: Go to background after authentication + // -o ControlPath: Path to the control socket + // -o ControlPersist: Keep the master connection alive + let process = AsyncCommand::new("ssh") + .args([ + "-o", + "BatchMode=yes", + "-o", + "StrictHostKeyChecking=accept-new", + "-o", + "ConnectTimeout=10", + "-M", // Master mode + "-N", // Don't execute a remote command + "-o", + &format!("ControlPath={}", control_path.display()), + "-o", + "ControlPersist=yes", + &remote.ssh_target(), + ]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn() + .context("Failed to start SSH ControlMaster")?; + + // Give it a moment to establish the connection + tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; + + // Verify the control socket was created + if !control_path.exists() { + // Wait a bit longer and try again + tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await; + } + + if verbose { + print_info( + &format!( + "SSH ControlMaster established at {}", + control_path.display() + ), + OutputLevel::Normal, + ); + } + + Ok(Self { + control_path, + process: Some(process), + remote, + verbose, + }) + } + + /// Get the control socket path + #[allow(dead_code)] + pub fn control_path(&self) -> &std::path::Path { + &self.control_path + } + + /// Create an SshClient that uses this ControlMaster + pub fn create_client(&self) -> SshClient { + SshClient::new(self.remote.clone()) + .with_verbose(self.verbose) + .with_control_path(self.control_path.clone()) + } + + /// Stop the ControlMaster connection + pub async fn stop(&mut self) -> Result<()> { + if self.verbose { + print_info("Stopping SSH ControlMaster...", OutputLevel::Normal); + } + + // Send exit command to the control socket + let _ = AsyncCommand::new("ssh") + .args([ + "-o", + &format!("ControlPath={}", self.control_path.display()), + "-O", + "exit", + &self.remote.ssh_target(), + ]) + .output() + .await; + + // Kill the process if still running + if let Some(mut process) = self.process.take() { + let _ = process.kill().await; + } + + // Clean up the socket file + if self.control_path.exists() { + let _ = std::fs::remove_file(&self.control_path); + } + + Ok(()) + } +} + +impl Drop for SshControlMaster { + fn drop(&mut self) { + // Best effort cleanup + if let Some(ref mut process) = self.process { + #[cfg(unix)] + if let Some(pid) = process.id() { + unsafe { + libc::kill(pid as i32, libc::SIGTERM); + } + } + } + + // Clean up the socket file + if self.control_path.exists() { + let _ = std::fs::remove_file(&self.control_path); + } + } +} + /// Manager for creating and removing NFS-backed Docker volumes on remote hosts pub struct RemoteVolumeManager { ssh: SshClient, @@ -389,26 +573,84 @@ impl RemoteVolumeManager { nfs_port: u16, export_path: &str, ) -> Result<()> { - // Mount options: + // Mount options for reliability: + // - hard: Never give up retrying requests (safer for builds) + // - timeo=600: 60-second timeout per retry (in tenths of seconds) + // - retrans=5: Retry 5 times before marking server unreachable // - actimeo=3: Short attribute cache timeout (3 seconds) for fresher metadata // - lookupcache=positive: Only cache successful lookups, not failures - // These help with stale handle issues from Docker Desktop's VirtioFS - // while maintaining reasonable performance + // - noatime: Don't update access times (reduces NFS traffic) + // - nconnect=4: Use multiple TCP connections for better throughput (kernel 5.3+) + // These help with stale handle issues and network reliability let command = format!( "{} volume create \ --driver local \ --opt type=nfs \ - --opt o=addr={},rw,nfsvers=4,port={},actimeo=3,lookupcache=positive \ + --opt o=addr={},rw,nfsvers=4,port={},hard,timeo=600,retrans=5,actimeo=3,lookupcache=positive,noatime,nconnect=4 \ --opt device=:{} \ {}", self.container_tool, nfs_host, nfs_port, export_path, volume_name ); - self.ssh.run_command(&command).await?; + // Retry logic for transient network issues + const MAX_RETRIES: u32 = 3; + const RETRY_DELAY_SECS: u64 = 2; + let mut last_error = None; + + for attempt in 1..=MAX_RETRIES { + match self.ssh.run_command(&command).await { + Ok(_) => { + if self.ssh.verbose { + print_info( + &format!("Created NFS volume '{}' on remote", volume_name), + OutputLevel::Normal, + ); + } + + // Verify the mount is functional by doing a quick I/O test + self.verify_nfs_volume(volume_name).await?; + + return Ok(()); + } + Err(e) => { + last_error = Some(e); + if attempt < MAX_RETRIES { + if self.ssh.verbose { + print_info( + &format!( + "NFS volume creation attempt {}/{} failed, retrying in {}s...", + attempt, MAX_RETRIES, RETRY_DELAY_SECS + ), + OutputLevel::Normal, + ); + } + tokio::time::sleep(tokio::time::Duration::from_secs(RETRY_DELAY_SECS)) + .await; + } + } + } + } + + Err(last_error.unwrap_or_else(|| anyhow::anyhow!("Failed to create NFS volume"))) + } + + /// Verify an NFS volume is functional by performing a quick I/O test + async fn verify_nfs_volume(&self, volume_name: &str) -> Result<()> { + // Run a quick container that writes and removes a test file + // This verifies the NFS mount is actually working + let verify_command = format!( + "{} run --rm -v {}:/test:rw alpine:latest sh -c 'touch /test/.nfs-health-check && rm /test/.nfs-health-check'", + self.container_tool, volume_name + ); + + self.ssh + .run_command(&verify_command) + .await + .context("NFS volume health check failed - mount may not be functional")?; if self.ssh.verbose { print_info( - &format!("Created NFS volume '{}' on remote", volume_name), + &format!("NFS volume '{}' health check passed", volume_name), OutputLevel::Normal, ); } diff --git a/src/utils/runs_on.rs b/src/utils/runs_on.rs index 9ad56b3..2c99840 100644 --- a/src/utils/runs_on.rs +++ b/src/utils/runs_on.rs @@ -14,7 +14,9 @@ use crate::utils::nfs_server::{ NfsServerConfig, DEFAULT_NFS_PORT_RANGE, }; use crate::utils::output::{print_info, print_success, OutputLevel}; -use crate::utils::remote::{get_local_ip_for_remote, RemoteHost, RemoteVolumeManager, SshClient}; +use crate::utils::remote::{ + get_local_ip_for_remote, RemoteHost, RemoteVolumeManager, SshClient, SshControlMaster, +}; #[cfg(unix)] use crate::utils::remote::SshTunnel; @@ -22,13 +24,16 @@ use crate::utils::remote::SshTunnel; /// Context for remote execution via `--runs-on` /// /// This manages the lifecycle of: +/// - SSH ControlMaster for connection reuse /// - NFS server on the local host /// - NFS-backed Docker volumes on the remote host /// - SSH tunnel for signing (if needed) pub struct RunsOnContext { /// The remote host remote_host: RemoteHost, - /// SSH client for remote operations + /// SSH ControlMaster for connection reuse + ssh_master: Option, + /// SSH client for remote operations (uses ControlMaster if available) ssh: SshClient, /// The running NFS server nfs_server: Option, @@ -88,23 +93,35 @@ impl RunsOnContext { println!(); print_info( &format!( - "๐ŸŒ Remote execution mode: running on {}", + "Remote execution mode: running on {}", remote_host.ssh_target() ), OutputLevel::Normal, ); println!(); - // Create SSH client and verify connectivity + // Start SSH ControlMaster for connection reuse + // This creates a persistent SSH connection that all subsequent commands will share + print_info( + "Establishing persistent SSH connection...", + OutputLevel::Normal, + ); + let ssh_master = SshControlMaster::start(remote_host.clone(), verbose) + .await + .context("Failed to establish SSH ControlMaster connection")?; + + // Create SSH client that uses the ControlMaster + let ssh = ssh_master.create_client(); + + // Verify connectivity using the multiplexed connection print_info("Checking SSH connectivity...", OutputLevel::Normal); - let ssh = SshClient::new(remote_host.clone()).with_verbose(verbose); ssh.check_connectivity().await?; // Check remote CLI version compatibility print_info("Checking remote avocado version...", OutputLevel::Normal); let remote_version = ssh.check_cli_version().await?; print_success( - &format!("Remote avocado version: {} โœ“", remote_version), + &format!("Remote avocado version: {}", remote_version), OutputLevel::Normal, ); @@ -221,15 +238,13 @@ impl RunsOnContext { let src_volume_name = format!("avocado-src-{}", session_id); let state_volume_name = format!("avocado-state-{}", session_id); - // Create NFS-backed volumes on remote + // Create NFS-backed volumes on remote (use ControlMaster client) print_info( "Creating NFS volumes on remote host...", OutputLevel::Normal, ); - let remote_vm = RemoteVolumeManager::new( - SshClient::new(remote_host.clone()).with_verbose(verbose), - container_tool.to_string(), - ); + let remote_vm = + RemoteVolumeManager::new(ssh_master.create_client(), container_tool.to_string()); // Create source volume remote_vm @@ -253,15 +268,15 @@ impl RunsOnContext { ) })?; - print_success("Remote NFS volumes ready โœ“", OutputLevel::Normal); + print_success("Remote NFS volumes ready.", OutputLevel::Normal); println!(); print_info( - &format!("๐Ÿ“‚ src_dir: {} โ†’ remote:/opt/src", src_dir.display()), + &format!("src_dir: {} -> remote:/opt/src", src_dir.display()), OutputLevel::Normal, ); print_info( &format!( - "๐Ÿ“‚ _avocado: {} โ†’ remote:/opt/_avocado", + "_avocado: {} -> remote:/opt/_avocado", state_export_path.display() ), OutputLevel::Normal, @@ -270,6 +285,7 @@ impl RunsOnContext { Ok(Self { remote_host, + ssh_master: Some(ssh_master), ssh, nfs_server: Some(nfs_server), nfs_port: port, @@ -310,6 +326,14 @@ impl RunsOnContext { self.remote_state_volume.as_deref() } + /// Get the CPU architecture of the remote host + /// + /// Returns the architecture string from `uname -m` (e.g., "x86_64", "aarch64"). + /// This is used to track SDK packages per host architecture in the lock file. + pub async fn get_host_arch(&self) -> anyhow::Result { + self.ssh.get_architecture().await + } + /// Setup SSH tunnel for signing /// /// This creates an SSH tunnel that forwards signing requests from the remote @@ -389,6 +413,69 @@ impl RunsOnContext { env_vars: HashMap, extra_docker_args: &[String], ) -> Result { + let docker_cmd = self.build_docker_command(image, command, &env_vars, extra_docker_args)?; + + print_info( + &format!("Executing on {}...", self.remote_host.ssh_target()), + OutputLevel::Normal, + ); + println!(); + + if self.verbose { + print_info( + &format!("Running on remote: {}", docker_cmd), + OutputLevel::Verbose, + ); + } + + // Execute on remote + self.ssh.run_command_interactive(&docker_cmd).await + } + + /// Run a command on the remote host inside a container and capture output + /// + /// This is similar to `run_container_command` but captures stdout instead + /// of inheriting it. Used for commands that need to return output (like rpm queries). + /// + /// # Arguments + /// * `image` - Container image to use + /// * `command` - Command to run inside the container + /// * `env_vars` - Environment variables to set + /// * `extra_docker_args` - Additional Docker arguments + /// + /// # Returns + /// Some(output) if the command succeeded, None if it failed + pub async fn run_container_command_with_output( + &self, + image: &str, + command: &str, + env_vars: HashMap, + extra_docker_args: &[String], + ) -> Result> { + let docker_cmd = self.build_docker_command(image, command, &env_vars, extra_docker_args)?; + + if self.verbose { + print_info( + &format!("Running on remote (capturing output): {}", docker_cmd), + OutputLevel::Verbose, + ); + } + + // Execute on remote and capture output + match self.ssh.run_command(&docker_cmd).await { + Ok(output) => Ok(Some(output)), + Err(_) => Ok(None), + } + } + + /// Build the docker run command string + fn build_docker_command( + &self, + image: &str, + command: &str, + env_vars: &HashMap, + extra_docker_args: &[String], + ) -> Result { let src_volume = self .remote_src_volume .as_ref() @@ -398,12 +485,6 @@ impl RunsOnContext { .as_ref() .context("State volume not created")?; - print_info( - &format!("โ–ถ Executing on {}...", self.remote_host.ssh_target()), - OutputLevel::Normal, - ); - println!(); - // Build the docker run command with --rm to ensure cleanup // Mount src volume to /mnt/src so bindfs can remap to /opt/src with UID translation // Mount state volume directly to /opt/_avocado (no UID mapping needed) @@ -418,7 +499,7 @@ impl RunsOnContext { ); // Add environment variables - for (key, value) in &env_vars { + for (key, value) in env_vars { docker_cmd.push_str(&format!(" -e {}={}", key, shell_escape(value))); } @@ -449,15 +530,7 @@ impl RunsOnContext { // Add image and command docker_cmd.push_str(&format!(" {} bash -c {}", image, shell_escape(command))); - if self.verbose { - print_info( - &format!("Running on remote: {}", docker_cmd), - OutputLevel::Verbose, - ); - } - - // Execute on remote - self.ssh.run_command_interactive(&docker_cmd).await + Ok(docker_cmd) } /// Check if the context is still active (not yet torn down) @@ -476,6 +549,7 @@ impl RunsOnContext { /// - Remove NFS-backed volumes from remote /// - Close SSH tunnel (if any) /// - Stop NFS server + /// - Stop SSH ControlMaster /// /// After calling this method, the context should not be used for running commands. /// This method can be called multiple times safely (subsequent calls are no-ops). @@ -484,12 +558,13 @@ impl RunsOnContext { if !self.is_active() && self.remote_src_volume.is_none() && self.remote_state_volume.is_none() + && self.ssh_master.is_none() { return Ok(()); } println!(); - print_info("๐Ÿงน Cleaning up remote resources...", OutputLevel::Normal); + print_info("Cleaning up remote resources...", OutputLevel::Normal); // Close signing tunnel first #[cfg(unix)] @@ -506,11 +581,14 @@ impl RunsOnContext { .await; } - // Remove remote volumes - let remote_vm = RemoteVolumeManager::new( - SshClient::new(self.remote_host.clone()).with_verbose(self.verbose), - self.container_tool.clone(), - ); + // Remove remote volumes - use the existing SSH client which has ControlMaster + // Create a new client from the master if available, otherwise use a plain one + let cleanup_ssh = if let Some(ref master) = self.ssh_master { + master.create_client() + } else { + SshClient::new(self.remote_host.clone()).with_verbose(self.verbose) + }; + let remote_vm = RemoteVolumeManager::new(cleanup_ssh, self.container_tool.clone()); let mut cleanup_errors = Vec::new(); @@ -548,16 +626,26 @@ impl RunsOnContext { } } + // Stop SSH ControlMaster (do this last since other cleanup uses it) + if self.verbose { + print_info("Closing SSH connection...", OutputLevel::Normal); + } + if let Some(mut master) = self.ssh_master.take() { + if let Err(e) = master.stop().await { + cleanup_errors.push(format!("Failed to stop SSH ControlMaster: {}", e)); + } + } + // Report any cleanup errors (but don't fail - cleanup is best-effort) if !cleanup_errors.is_empty() { for error in &cleanup_errors { - print_info(&format!("โš  {}", error), OutputLevel::Normal); + print_info(&format!("Warning: {}", error), OutputLevel::Normal); } } print_success( &format!( - "๐ŸŒ Remote volumes cleaned up on {}", + "Remote resources cleaned up on {}.", self.remote_host.ssh_target() ), OutputLevel::Normal,