diff --git a/Cargo.lock b/Cargo.lock index a3db91e..ee97179 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -130,7 +130,7 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "avocado-cli" -version = "0.22.1" +version = "0.23.0" dependencies = [ "anyhow", "base64", @@ -159,7 +159,6 @@ dependencies = [ "thiserror", "tokio", "tokio-test", - "toml", "uuid", "walkdir", ] @@ -1509,15 +1508,6 @@ dependencies = [ "serde_core", ] -[[package]] -name = "serde_spanned" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" -dependencies = [ - "serde_core", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -1814,45 +1804,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "toml" -version = "0.9.9+spec-1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5238e643fc34a1d5d7e753e1532a91912d74b63b92b3ea51fde8d1b7bc79dd" -dependencies = [ - "indexmap", - "serde_core", - "serde_spanned", - "toml_datetime", - "toml_parser", - "toml_writer", - "winnow", -] - -[[package]] -name = "toml_datetime" -version = "0.7.4+spec-1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe3cea6b2aa3b910092f6abd4053ea464fab5f9c170ba5e9a6aead16ec4af2b6" -dependencies = [ - "serde_core", -] - -[[package]] -name = "toml_parser" -version = "1.0.5+spec-1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c03bee5ce3696f31250db0bbaff18bc43301ce0e8db2ed1f07cbb2acf89984c" -dependencies = [ - "winnow", -] - -[[package]] -name = "toml_writer" -version = "1.0.5+spec-1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9cd6190959dce0994aa8970cd32ab116d1851ead27e866039acaf2524ce44fa" - [[package]] name = "tower" version = "0.5.2" @@ -2367,12 +2318,6 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" -[[package]] -name = "winnow" -version = "0.7.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" - [[package]] name = "wit-bindgen" version = "0.46.0" diff --git a/Cargo.toml b/Cargo.toml index 4c05524..fbf608a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "avocado-cli" -version = "0.22.1" +version = "0.23.0" edition = "2021" description = "Command line interface for Avocado." authors = ["Avocado"] @@ -19,7 +19,6 @@ path = "src/lib.rs" [dependencies] serde = { version = "1.0", features = ["derive"] } -toml = "0.9" serde_yaml = "0.9" anyhow = "1.0" clap = { version = "4.0", features = ["derive"] } diff --git a/configs/advantech/icam-540.yaml b/configs/advantech/icam-540.yaml index 02cbe0a..ec56752 100644 --- a/configs/advantech/icam-540.yaml +++ b/configs/advantech/icam-540.yaml @@ -5,32 +5,35 @@ src_dir: ../../ distro: channel: apollo-edge version: 0.1.0 -runtime: +runtimes: dev: - dependencies: + extensions: + - avocado-ext-dev + - avocado-bsp-{{ avocado.target }} + - config + - app + packages: avocado-img-bootfiles: '*' avocado-img-rootfs: '*' avocado-img-initramfs: '*' - avocado-ext-dev: - ext: avocado-ext-dev - vsn: "*" - avocado-bsp: - ext: avocado-bsp-{{ avocado.target }} - vsn: "*" - config: - ext: config - app: - ext: app icam-540: - dependencies: + packages: avocado-img-tegraflash: '*' -ext: +extensions: + avocado-ext-dev: + source: + type: package + version: '*' + avocado-bsp-{{ avocado.target }}: + source: + type: package + version: '*' app: types: - sysext - confext version: 0.1.0 - dependencies: + packages: i2c-tools: '*' pylon: '*' pylon-dev: '*' @@ -58,11 +61,11 @@ ext: password: '' sdk: image: docker.io/avocadolinux/sdk:{{ config.distro.channel }} - dependencies: + packages: avocado-sdk-toolchain: "{{ config.distro.version }}" nativesdk-avocado-ext-dev: "*" icam-540: - dependencies: + packages: nativesdk-util-linux-mount: '*' container_args: - --network=host @@ -71,7 +74,7 @@ sdk: - -v - /sys:/sys:ro - --privileged -provision: +provision_profiles: tegraflash: container_args: - -v diff --git a/configs/default.yaml b/configs/default.yaml index 581c398..8775bad 100644 --- a/configs/default.yaml +++ b/configs/default.yaml @@ -16,31 +16,39 @@ distro: ## Runtimes ## -runtime: +runtimes: dev: - dependencies: + extensions: + - avocado-ext-dev + - avocado-ext-sshd-dev + - avocado-bsp-{{ avocado.target }} + - config + - app + packages: avocado-img-bootfiles: "*" avocado-img-rootfs: "*" avocado-img-initramfs: "*" - avocado-ext-dev: - ext: avocado-ext-dev - vsn: "*" - avocado-ext-sshd-dev: - ext: avocado-ext-sshd-dev - vsn: "*" - avocado-bsp: - ext: avocado-bsp-{{ avocado.target }} - vsn: "*" - config: - ext: config - app: - ext: app ## ## Extensions ## -ext: +extensions: + avocado-ext-dev: + source: + type: package + version: "*" + + avocado-ext-sshd-dev: + source: + type: package + version: "*" + + avocado-bsp-{{ avocado.target }}: + source: + type: package + version: "*" + # Generated default app extension # Use or modify this to install dependencies and or include sdk compiled code app: @@ -50,7 +58,7 @@ ext: version: "0.1.0" # Install application dependencies - # dependencies: + # packages: #curl = "*" #iperf3 = "*" @@ -74,7 +82,7 @@ ext: sdk: image: "docker.io/avocadolinux/sdk:{{ config.distro.channel }}" - dependencies: + packages: avocado-sdk-toolchain: "{{ config.distro.version }}" nativesdk-avocado-ext-dev: "*" @@ -89,7 +97,7 @@ sdk: # When provisioning using usb or sd provisioning profiles, set extra sdk # container arguments to allow access to these devices -provision: +provision_profiles: usb: container_args: - -v diff --git a/configs/nvidia/jetson-orin-nano-devkit.yaml b/configs/nvidia/jetson-orin-nano-devkit.yaml index ed3b21d..012a3f1 100644 --- a/configs/nvidia/jetson-orin-nano-devkit.yaml +++ b/configs/nvidia/jetson-orin-nano-devkit.yaml @@ -4,29 +4,30 @@ supported_targets: distro: channel: apollo-edge version: 0.1.0 -runtime: +runtimes: dev: - dependencies: + extensions: + - avocado-bsp-{{ avocado.target }} + - config + - app + packages: avocado-img-bootfiles: '*' avocado-img-rootfs: '*' avocado-img-initramfs: '*' - avocado-bsp: - ext: avocado-bsp-{{ avocado.target }} - vsn: "*" - config: - ext: config - app: - ext: app jetson-orin-nano-devkit: - dependencies: + packages: avocado-img-tegraflash: '*' -ext: +extensions: + avocado-bsp-{{ avocado.target }}: + source: + type: package + version: '*' app: types: - sysext - confext version: 0.1.0 - dependencies: {} + packages: {} config: types: - confext @@ -36,7 +37,7 @@ ext: password: '' sdk: image: docker.io/avocadolinux/sdk:{{ config.distro.channel }} - dependencies: + packages: avocado-sdk-toolchain: "{{ config.distro.version }}" nativesdk-avocado-ext-dev: "*" jetson-orin-nano-devkit: @@ -47,9 +48,9 @@ sdk: - -v - /sys:/sys:ro - --privileged - dependencies: + packages: nativesdk-util-linux-mount: '*' -provision: +provision_profiles: tegraflash: container_args: - -v diff --git a/configs/raspberry-pi/raspberrypi-4-model-b.yaml b/configs/raspberry-pi/raspberrypi-4-model-b.yaml index ab434f7..19ca35b 100644 --- a/configs/raspberry-pi/raspberrypi-4-model-b.yaml +++ b/configs/raspberry-pi/raspberrypi-4-model-b.yaml @@ -4,32 +4,37 @@ supported_targets: distro: channel: apollo-edge version: 0.1.0 -runtime: +runtimes: dev: - dependencies: + extensions: + - avocado-ext-dev + - avocado-ext-sshd-dev + - avocado-bsp-{{ avocado.target }} + - config + - app + packages: avocado-img-bootfiles: '*' avocado-img-rootfs: '*' avocado-img-initramfs: '*' - avocado-ext-dev: - ext: avocado-ext-dev - vsn: '*' - avocado-ext-sshd-dev: - ext: avocado-ext-sshd-dev - vsn: '*' - avocado-bsp: - ext: avocado-bsp-{{ avocado.target }} - vsn: "*" - config: - ext: config - app: - ext: app -ext: +extensions: + avocado-ext-dev: + source: + type: package + version: '*' + avocado-ext-sshd-dev: + source: + type: package + version: '*' + avocado-bsp-{{ avocado.target }}: + source: + type: package + version: '*' app: types: - sysext - confext version: 0.1.0 - dependencies: {} + packages: {} config: types: - confext @@ -39,13 +44,13 @@ ext: password: '' sdk: image: docker.io/avocadolinux/sdk:{{ config.distro.channel }} - dependencies: + packages: avocado-sdk-toolchain: "{{ config.distro.version }}" nativesdk-avocado-ext-dev: "*" raspberrypi4: container_args: - --network=host -provision: +provision_profiles: img: container_args: - -v diff --git a/configs/raspberry-pi/raspberrypi-5.yaml b/configs/raspberry-pi/raspberrypi-5.yaml index bbc7e48..a66c175 100644 --- a/configs/raspberry-pi/raspberrypi-5.yaml +++ b/configs/raspberry-pi/raspberrypi-5.yaml @@ -4,32 +4,37 @@ supported_targets: distro: channel: apollo-edge version: 0.1.0 -runtime: +runtimes: dev: - dependencies: + extensions: + - avocado-ext-dev + - avocado-ext-sshd-dev + - avocado-bsp-{{ avocado.target }} + - config + - app + packages: avocado-img-bootfiles: '*' avocado-img-rootfs: '*' avocado-img-initramfs: '*' - avocado-ext-dev: - ext: avocado-ext-dev - vsn: '*' - avocado-ext-sshd-dev: - ext: avocado-ext-sshd-dev - vsn: '*' - avocado-bsp: - ext: avocado-bsp-{{ avocado.target }} - vsn: "*" - config: - ext: config - app: - ext: app -ext: +extensions: + avocado-ext-dev: + source: + type: package + version: '*' + avocado-ext-sshd-dev: + source: + type: package + version: '*' + avocado-bsp-{{ avocado.target }}: + source: + type: package + version: '*' app: types: - sysext - confext version: 0.1.0 - dependencies: {} + packages: {} config: types: - confext @@ -39,13 +44,13 @@ ext: password: '' sdk: image: docker.io/avocadolinux/sdk:{{ config.distro.channel }} - dependencies: + packages: avocado-sdk-toolchain: "{{ config.distro.version }}" nativesdk-avocado-ext-dev: "*" raspberrypi5: container_args: - --network=host -provision: +provision_profiles: img: container_args: - -v diff --git a/configs/seeed/reterminal-dm.yaml b/configs/seeed/reterminal-dm.yaml index 78fb998..26c1de1 100644 --- a/configs/seeed/reterminal-dm.yaml +++ b/configs/seeed/reterminal-dm.yaml @@ -4,32 +4,37 @@ supported_targets: distro: channel: apollo-edge version: 0.1.0 -runtime: +runtimes: dev: - dependencies: + extensions: + - avocado-ext-dev + - avocado-ext-sshd-dev + - avocado-bsp-{{ avocado.target }} + - config + - app + packages: avocado-img-bootfiles: '*' avocado-img-rootfs: '*' avocado-img-initramfs: '*' - avocado-ext-dev: - ext: avocado-ext-dev - vsn: '*' - avocado-ext-sshd-dev: - ext: avocado-ext-sshd-dev - vsn: '*' - avocado-bsp: - ext: avocado-bsp-{{ avocado.target }} - vsn: "*" - config: - ext: config - app: - ext: app -ext: +extensions: + avocado-ext-dev: + source: + type: package + version: '*' + avocado-ext-sshd-dev: + source: + type: package + version: '*' + avocado-bsp-{{ avocado.target }}: + source: + type: package + version: '*' app: types: - sysext - confext version: 0.1.0 - dependencies: {} + packages: {} config: types: - confext @@ -39,13 +44,13 @@ ext: password: '' sdk: image: docker.io/avocadolinux/sdk:{{ config.distro.channel }} - dependencies: + packages: avocado-sdk-toolchain: "{{ config.distro.version }}" nativesdk-avocado-ext-dev: "*" reterminal-dm: container_args: - --network=host -provision: +provision_profiles: usb: container_args: - -v diff --git a/configs/seeed/reterminal.yaml b/configs/seeed/reterminal.yaml index e0f374d..21166bc 100644 --- a/configs/seeed/reterminal.yaml +++ b/configs/seeed/reterminal.yaml @@ -4,32 +4,37 @@ supported_targets: distro: channel: apollo-edge version: 0.1.0 -runtime: +runtimes: dev: - dependencies: + extensions: + - avocado-ext-dev + - avocado-ext-sshd-dev + - avocado-bsp-{{ avocado.target }} + - config + - app + packages: avocado-img-bootfiles: '*' avocado-img-rootfs: '*' avocado-img-initramfs: '*' - avocado-ext-dev: - ext: avocado-ext-dev - vsn: '*' - avocado-ext-sshd-dev: - ext: avocado-ext-sshd-dev - vsn: '*' - avocado-bsp: - ext: avocado-bsp-{{ avocado.target }} - vsn: "*" - config: - ext: config - app: - ext: app -ext: +extensions: + avocado-ext-dev: + source: + type: package + version: '*' + avocado-ext-sshd-dev: + source: + type: package + version: '*' + avocado-bsp-{{ avocado.target }}: + source: + type: package + version: '*' app: types: - sysext - confext version: 0.1.0 - dependencies: {} + packages: {} config: types: - confext @@ -39,13 +44,13 @@ ext: password: '' sdk: image: docker.io/avocadolinux/sdk:{{ config.distro.channel }} - dependencies: + packages: avocado-sdk-toolchain: "{{ config.distro.version }}" nativesdk-avocado-ext-dev: "*" reterminal: container_args: - --network=host -provision: +provision_profiles: usb: container_args: - -v diff --git a/src/commands/build.rs b/src/commands/build.rs index af8a57e..1781b75 100644 --- a/src/commands/build.rs +++ b/src/commands/build.rs @@ -2,25 +2,31 @@ use anyhow::{Context, Result}; use std::collections::HashSet; +use std::sync::Arc; use crate::commands::{ ext::{ExtBuildCommand, ExtImageCommand}, runtime::RuntimeBuildCommand, }; use crate::utils::{ - config::Config, + config::{ComposedConfig, Config, ExtensionSource}, output::{print_info, print_success, OutputLevel}, }; -/// Represents an extension dependency that can be either local, external, or version-based +/// Represents an extension dependency that can be either local, external, remote, or version-based #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum ExtensionDependency { /// Extension defined in the main config file Local(String), - /// Extension defined in an external config file + /// Extension defined in an external config file (deprecated) External { name: String, config_path: String }, /// Extension resolved via DNF with a version specification Versioned { name: String, version: String }, + /// Remote extension with source field (repo, git, or path) + Remote { + name: String, + source: ExtensionSource, + }, } /// Implementation of the 'build' command that runs all build subcommands. @@ -45,6 +51,10 @@ pub struct BuildCommand { pub runs_on: Option, /// NFS port for remote execution pub nfs_port: Option, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl BuildCommand { @@ -69,6 +79,8 @@ impl BuildCommand { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, + composed_config: None, } } @@ -85,32 +97,46 @@ impl BuildCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + /// Execute the build command pub async fn execute(&self) -> Result<()> { - // Early target validation - load basic config first - let basic_config = Config::load(&self.config_path) - .with_context(|| format!("Failed to load config from {}", self.config_path))?; - let target = - crate::utils::target::validate_and_log_target(self.target.as_deref(), &basic_config)?; - - // Load the composed configuration (merges external configs, applies interpolation) - let composed = Config::load_composed(&self.config_path, self.target.as_deref()) - .with_context(|| format!("Failed to load composed config from {}", self.config_path))?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; let config = &composed.config; let parsed = &composed.merged_value; + let target = crate::utils::target::validate_and_log_target(self.target.as_deref(), config)?; // If a specific extension is requested, build only that extension if let Some(ref ext_name) = self.extension { return self - .build_single_extension(config, parsed, ext_name, &target) + .build_single_extension(&composed, ext_name, &target) .await; } // If a specific runtime is requested, build only that runtime and its dependencies if let Some(ref runtime_name) = self.runtime { return self - .build_single_runtime(config, parsed, runtime_name, &target) + .build_single_runtime(&composed, runtime_name, &target) .await; } @@ -157,7 +183,9 @@ impl BuildCommand { self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(&composed)); ext_build_cmd.execute().await.with_context(|| { format!("Failed to build extension '{extension_name}'") })?; @@ -199,6 +227,31 @@ impl BuildCommand { } // Versioned extensions are installed via DNF and don't need building } + ExtensionDependency::Remote { name, source: _ } => { + if self.verbose { + print_info( + &format!("Building remote extension '{name}'"), + OutputLevel::Normal, + ); + } + + // Build remote extension - ExtBuildCommand will load config from container + let ext_build_cmd = ExtBuildCommand::new( + name.clone(), + self.config_path.clone(), + self.verbose, + self.target.clone(), + self.container_args.clone(), + self.dnf_args.clone(), + ) + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(&composed)); + ext_build_cmd.execute().await.with_context(|| { + format!("Failed to build remote extension '{name}'") + })?; + } } } } else { @@ -227,7 +280,9 @@ impl BuildCommand { self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(&composed)); ext_image_cmd.execute().await.with_context(|| { format!("Failed to create image for extension '{extension_name}'") })?; @@ -258,6 +313,31 @@ impl BuildCommand { format!("Failed to create image for versioned extension '{name}' version '{version}'") })?; } + ExtensionDependency::Remote { name, source: _ } => { + if self.verbose { + print_info( + &format!("Creating image for remote extension '{name}'"), + OutputLevel::Normal, + ); + } + + // Create image for remote extension - ExtImageCommand will load config from container + let ext_image_cmd = ExtImageCommand::new( + name.clone(), + self.config_path.clone(), + self.verbose, + self.target.clone(), + self.container_args.clone(), + self.dnf_args.clone(), + ) + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(&composed)); + ext_image_cmd.execute().await.with_context(|| { + format!("Failed to create image for remote extension '{name}'") + })?; + } } } } else { @@ -291,7 +371,9 @@ impl BuildCommand { self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(&composed)); runtime_build_cmd .execute() .await @@ -310,7 +392,7 @@ impl BuildCommand { target: &str, ) -> Result> { let runtime_section = parsed - .get("runtime") + .get("runtimes") .and_then(|r| r.as_mapping()) .ok_or_else(|| anyhow::anyhow!("No runtime configuration found"))?; @@ -379,55 +461,54 @@ impl BuildCommand { runtimes: &[String], target: &str, ) -> Result> { + use crate::utils::interpolation::interpolate_name; + let mut required_extensions = HashSet::new(); - let mut visited = HashSet::new(); // For cycle detection + let _visited = HashSet::::new(); // For cycle detection // If no runtimes are found for this target, don't build any extensions if runtimes.is_empty() { return Ok(vec![]); } - let _runtime_section = parsed.get("runtime").and_then(|r| r.as_mapping()).unwrap(); + // Build a map of interpolated ext names to their source config + // This is needed because ext section keys may contain templates like {{ avocado.target }} + let mut ext_sources: std::collections::HashMap> = + std::collections::HashMap::new(); + if let Some(ext_section) = parsed.get("extensions").and_then(|e| e.as_mapping()) { + for (ext_key, ext_config) in ext_section { + if let Some(raw_name) = ext_key.as_str() { + // Interpolate the extension name with the target + let interpolated_name = interpolate_name(raw_name, target); + // Use parse_extension_source which properly deserializes the source field + let source = Config::parse_extension_source(&interpolated_name, ext_config) + .ok() + .flatten(); + ext_sources.insert(interpolated_name, source); + } + } + } for runtime_name in runtimes { // Get merged runtime config for this target let merged_runtime = config.get_merged_runtime_config(runtime_name, target, &self.config_path)?; if let Some(merged_value) = merged_runtime { - if let Some(dependencies) = merged_value - .get("dependencies") - .and_then(|d| d.as_mapping()) + // Read extensions from the new `extensions` array format + if let Some(extensions) = + merged_value.get("extensions").and_then(|e| e.as_sequence()) { - for (_dep_name, dep_spec) in dependencies { - // Check for extension dependency - if let Some(ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { - // Check if this is a versioned extension (has vsn field) - if let Some(version) = dep_spec.get("vsn").and_then(|v| v.as_str()) { - let ext_dep = ExtensionDependency::Versioned { + for ext in extensions { + if let Some(ext_name) = ext.as_str() { + // Check if this extension has a source: field (remote extension) + if let Some(Some(source)) = ext_sources.get(ext_name) { + // Remote extension with source field + required_extensions.insert(ExtensionDependency::Remote { name: ext_name.to_string(), - version: version.to_string(), - }; - required_extensions.insert(ext_dep); - } - // Check if this is an external extension (has config field) - else if let Some(external_config) = - dep_spec.get("config").and_then(|v| v.as_str()) - { - let ext_dep = ExtensionDependency::External { - name: ext_name.to_string(), - config_path: external_config.to_string(), - }; - required_extensions.insert(ext_dep.clone()); - - // Recursively find nested external extension dependencies - self.find_nested_external_extensions( - config, - &ext_dep, - &mut required_extensions, - &mut visited, - )?; + source: source.clone(), + }); } else { - // Local extension + // Local extension (defined in ext section without source, or not in ext section) required_extensions .insert(ExtensionDependency::Local(ext_name.to_string())); } @@ -443,133 +524,19 @@ impl BuildCommand { ExtensionDependency::Local(name) => name, ExtensionDependency::External { name, .. } => name, ExtensionDependency::Versioned { name, .. } => name, + ExtensionDependency::Remote { name, .. } => name, }; let name_b = match b { ExtensionDependency::Local(name) => name, ExtensionDependency::External { name, .. } => name, ExtensionDependency::Versioned { name, .. } => name, + ExtensionDependency::Remote { name, .. } => name, }; name_a.cmp(name_b) }); Ok(extensions) } - /// Recursively find nested external extension dependencies - fn find_nested_external_extensions( - &self, - config: &Config, - ext_dep: &ExtensionDependency, - required_extensions: &mut HashSet, - visited: &mut HashSet, - ) -> Result<()> { - let (ext_name, ext_config_path) = match ext_dep { - ExtensionDependency::External { name, config_path } => (name, config_path), - ExtensionDependency::Local(_) => return Ok(()), // Local extensions don't have nested external deps - ExtensionDependency::Versioned { .. } => return Ok(()), // Versioned extensions don't have nested deps - }; - - // Cycle detection: check if we've already processed this extension - let ext_key = format!("{ext_name}:{ext_config_path}"); - if visited.contains(&ext_key) { - if self.verbose { - print_info( - &format!("Skipping already processed extension '{ext_name}' to avoid cycles"), - OutputLevel::Normal, - ); - } - return Ok(()); - } - visited.insert(ext_key); - - // Load the external extension configuration - let resolved_external_config_path = - config.resolve_path_relative_to_src_dir(&self.config_path, ext_config_path); - let external_extensions = - config.load_external_extensions(&self.config_path, ext_config_path)?; - - let extension_config = external_extensions.get(ext_name).ok_or_else(|| { - anyhow::anyhow!( - "Extension '{ext_name}' not found in external config file '{ext_config_path}'" - ) - })?; - - // Load the nested config file to get its src_dir setting - let nested_config_content = std::fs::read_to_string(&resolved_external_config_path) - .with_context(|| { - format!( - "Failed to read nested config file: {}", - resolved_external_config_path.display() - ) - })?; - let nested_config: serde_yaml::Value = serde_yaml::from_str(&nested_config_content) - .with_context(|| { - format!( - "Failed to parse nested config file: {}", - resolved_external_config_path.display() - ) - })?; - - // Create a temporary Config object for the nested config to handle its src_dir - let nested_config_obj = serde_yaml::from_value::(nested_config.clone())?; - - // Check if this external extension has dependencies - if let Some(dependencies) = extension_config - .get("dependencies") - .and_then(|d| d.as_mapping()) - { - for (_dep_name, dep_spec) in dependencies { - // Check for nested extension dependency - if let Some(nested_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { - // Check if this is a nested external extension (has config field) - if let Some(nested_external_config) = - dep_spec.get("config").and_then(|v| v.as_str()) - { - // Resolve the nested config path relative to the nested config's src_dir - let nested_config_path = nested_config_obj - .resolve_path_relative_to_src_dir( - &resolved_external_config_path, - nested_external_config, - ); - - let nested_ext_dep = ExtensionDependency::External { - name: nested_ext_name.to_string(), - config_path: nested_config_path.to_string_lossy().to_string(), - }; - - // Add the nested extension to required extensions - required_extensions.insert(nested_ext_dep.clone()); - - if self.verbose { - print_info( - &format!("Found nested external extension '{nested_ext_name}' required by '{ext_name}' at '{}'", nested_config_path.display()), - OutputLevel::Normal, - ); - } - - // Recursively process the nested extension - self.find_nested_external_extensions( - config, - &nested_ext_dep, - required_extensions, - visited, - )?; - } else { - // This is a local extension dependency within the external config - // We don't need to process it further as it will be handled during build - if self.verbose { - print_info( - &format!("Found local extension dependency '{nested_ext_name}' in external extension '{ext_name}'"), - OutputLevel::Normal, - ); - } - } - } - } - } - - Ok(()) - } - /// Build an external extension using its own config file async fn build_external_extension( &self, @@ -617,7 +584,8 @@ impl BuildCommand { self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()); // Execute the extension build using the external config match ext_build_cmd.execute().await { @@ -679,7 +647,8 @@ impl BuildCommand { self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()); // Execute the image creation ext_image_cmd.execute().await.with_context(|| { @@ -893,18 +862,22 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION /// Build a single extension without building runtimes async fn build_single_extension( &self, - config: &Config, - parsed: &serde_yaml::Value, + composed: &Arc, extension_name: &str, target: &str, ) -> Result<()> { + let config = &composed.config; + let parsed = &composed.merged_value; + print_info( &format!("Building single extension '{extension_name}' for target '{target}'"), OutputLevel::Normal, ); // Check if this is a local extension or needs to be found in external configs - let ext_config = parsed.get("ext").and_then(|ext| ext.get(extension_name)); + let ext_config = parsed + .get("extensions") + .and_then(|ext| ext.get(extension_name)); let extension_dep = if ext_config.is_some() { // Local extension @@ -936,7 +909,9 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(composed)); ext_build_cmd .execute() .await @@ -955,6 +930,24 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION "Cannot build individual versioned extension '{name}' version '{version}'. Versioned extensions are installed via DNF." )); } + ExtensionDependency::Remote { name, source: _ } => { + let ext_build_cmd = ExtBuildCommand::new( + name.clone(), + self.config_path.clone(), + self.verbose, + self.target.clone(), + self.container_args.clone(), + self.dnf_args.clone(), + ) + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(composed)); + ext_build_cmd + .execute() + .await + .with_context(|| format!("Failed to build remote extension '{name}'"))?; + } } // Step 2: Create extension image @@ -974,7 +967,9 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(composed)); ext_image_cmd.execute().await.with_context(|| { format!("Failed to create image for extension '{ext_name}'") })?; @@ -998,6 +993,23 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION "Cannot create image for individual versioned extension '{name}' version '{version}'. Versioned extensions are installed via DNF." )); } + ExtensionDependency::Remote { name, source: _ } => { + let ext_image_cmd = ExtImageCommand::new( + name.clone(), + self.config_path.clone(), + self.verbose, + self.target.clone(), + self.container_args.clone(), + self.dnf_args.clone(), + ) + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(composed)); + ext_image_cmd.execute().await.with_context(|| { + format!("Failed to create image for remote extension '{name}'") + })?; + } } print_success( @@ -1010,11 +1022,13 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION /// Build a single runtime and its required extensions async fn build_single_runtime( &self, - config: &Config, - parsed: &serde_yaml::Value, + composed: &Arc, runtime_name: &str, target: &str, ) -> Result<()> { + let config = &composed.config; + let parsed = &composed.merged_value; + print_info( &format!("Building single runtime '{runtime_name}' for target '{target}'"), OutputLevel::Normal, @@ -1022,7 +1036,7 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION // Verify the runtime exists and is configured for this target let runtime_section = parsed - .get("runtime") + .get("runtimes") .and_then(|r| r.as_mapping()) .ok_or_else(|| anyhow::anyhow!("No runtime configuration found"))?; @@ -1091,7 +1105,9 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(composed)); ext_build_cmd.execute().await.with_context(|| { format!("Failed to build extension '{extension_name}'") })?; @@ -1106,7 +1122,9 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(composed)); ext_image_cmd.execute().await.with_context(|| { format!("Failed to create image for extension '{extension_name}'") })?; @@ -1152,6 +1170,48 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION format!("Failed to create image for versioned extension '{name}' version '{version}'") })?; } + ExtensionDependency::Remote { name, source: _ } => { + if self.verbose { + print_info( + &format!("Building remote extension '{name}'"), + OutputLevel::Normal, + ); + } + + // Build remote extension + let ext_build_cmd = ExtBuildCommand::new( + name.clone(), + self.config_path.clone(), + self.verbose, + self.target.clone(), + self.container_args.clone(), + self.dnf_args.clone(), + ) + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(composed)); + ext_build_cmd.execute().await.with_context(|| { + format!("Failed to build remote extension '{name}'") + })?; + + // Create extension image + let ext_image_cmd = ExtImageCommand::new( + name.clone(), + self.config_path.clone(), + self.verbose, + self.target.clone(), + self.container_args.clone(), + self.dnf_args.clone(), + ) + .with_no_stamps(self.no_stamps) + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(composed)); + ext_image_cmd.execute().await.with_context(|| { + format!("Failed to create image for remote extension '{name}'") + })?; + } } } } else { @@ -1173,7 +1233,9 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(composed)); runtime_build_cmd .execute() .await @@ -1196,13 +1258,10 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION let mut visited = HashSet::new(); // Check runtime dependencies for extensions - if let Some(dependencies) = runtime_config - .get("dependencies") - .and_then(|d| d.as_mapping()) - { + if let Some(dependencies) = runtime_config.get("packages").and_then(|d| d.as_mapping()) { for (_dep_name, dep_spec) in dependencies { // Check for extension dependency - if let Some(ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { + if let Some(ext_name) = dep_spec.get("extensions").and_then(|v| v.as_str()) { // Check if this is a versioned extension (has vsn field) if let Some(version) = dep_spec.get("vsn").and_then(|v| v.as_str()) { let ext_dep = ExtensionDependency::Versioned { @@ -1252,11 +1311,13 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION ExtensionDependency::Local(name) => name, ExtensionDependency::External { name, .. } => name, ExtensionDependency::Versioned { name, .. } => name, + ExtensionDependency::Remote { name, .. } => name, }; let name_b = match b { ExtensionDependency::Local(name) => name, ExtensionDependency::External { name, .. } => name, ExtensionDependency::Versioned { name, .. } => name, + ExtensionDependency::Remote { name, .. } => name, }; name_a.cmp(name_b) }); @@ -1277,7 +1338,7 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION // Get all extensions from runtime dependencies (this will recursively traverse) let runtime_section = parsed - .get("runtime") + .get("runtimes") .and_then(|r| r.as_mapping()) .ok_or_else(|| anyhow::anyhow!("No runtime configuration found"))?; @@ -1292,13 +1353,13 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION let merged_runtime = config.get_merged_runtime_config(runtime_name, target, &self.config_path)?; if let Some(merged_value) = merged_runtime { - if let Some(dependencies) = merged_value - .get("dependencies") - .and_then(|d| d.as_mapping()) + if let Some(dependencies) = + merged_value.get("packages").and_then(|d| d.as_mapping()) { for (_dep_name, dep_spec) in dependencies { // Check for extension dependency - if let Some(ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { + if let Some(ext_name) = dep_spec.get("extensions").and_then(|v| v.as_str()) + { // Check if this is a versioned extension (has vsn field) if let Some(version) = dep_spec.get("vsn").and_then(|v| v.as_str()) { let ext_dep = ExtensionDependency::Versioned { @@ -1350,6 +1411,7 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION ExtensionDependency::Local(name) => name, ExtensionDependency::External { name, .. } => name, ExtensionDependency::Versioned { name, .. } => name, + ExtensionDependency::Remote { name, .. } => name, }; if found_name == extension_name { @@ -1381,6 +1443,7 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION ); } ExtensionDependency::Versioned { .. } => return Ok(()), // Versioned extensions don't have nested deps + ExtensionDependency::Remote { .. } => return Ok(()), // Remote extensions are handled separately }; // Cycle detection: check if we've already processed this extension @@ -1429,12 +1492,12 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION // Check if this external extension has dependencies if let Some(dependencies) = extension_config - .get("dependencies") + .get("packages") .and_then(|d| d.as_mapping()) { for (_dep_name, dep_spec) in dependencies { // Check for nested extension dependency - if let Some(nested_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { + if let Some(nested_ext_name) = dep_spec.get("extensions").and_then(|v| v.as_str()) { // Check if this is a nested external extension (has config field) if let Some(nested_external_config) = dep_spec.get("config").and_then(|v| v.as_str()) @@ -1534,13 +1597,17 @@ echo "Successfully created image for versioned extension '$EXT_NAME-$EXT_VERSION visited.insert(ext_key); // Get the local extension configuration - if let Some(ext_config) = parsed_config.get("ext").and_then(|ext| ext.get(ext_name)) { + if let Some(ext_config) = parsed_config + .get("extensions") + .and_then(|ext| ext.get(ext_name)) + { // Check if this local extension has dependencies - if let Some(dependencies) = ext_config.get("dependencies").and_then(|d| d.as_mapping()) - { + if let Some(dependencies) = ext_config.get("packages").and_then(|d| d.as_mapping()) { for (_dep_name, dep_spec) in dependencies { // Check for extension dependency - if let Some(nested_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { + if let Some(nested_ext_name) = + dep_spec.get("extensions").and_then(|v| v.as_str()) + { // Check if this is an external extension (has config field) if let Some(external_config) = dep_spec.get("config").and_then(|v| v.as_str()) diff --git a/src/commands/clean.rs b/src/commands/clean.rs index ce39228..5acb7a2 100644 --- a/src/commands/clean.rs +++ b/src/commands/clean.rs @@ -263,11 +263,13 @@ fi ) })?; } else { + // Default behavior: automatically stop VS Code explorer containers + // (these are safe to stop), but fail if other containers are using the volume volume_manager - .remove_volume(&volume_state.volume_name) + .remove_volume_with_explorer_cleanup(&volume_state.volume_name) .await .with_context(|| { - format!("Failed to remove volume: {}", volume_state.volume_name) + format!("Failed to remove volume: {}. If other containers are using this volume, try using --force to remove them.", volume_state.volume_name) })?; } diff --git a/src/commands/ext/build.rs b/src/commands/ext/build.rs index 7baeab8..fd3324c 100644 --- a/src/commands/ext/build.rs +++ b/src/commands/ext/build.rs @@ -1,7 +1,11 @@ +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] + use anyhow::{Context, Result}; +use std::sync::Arc; use crate::commands::sdk::SdkCompileCommand; -use crate::utils::config::{Config, ExtensionLocation}; +use crate::utils::config::{ComposedConfig, Config, ExtensionLocation}; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::output::{print_error, print_info, print_success, OutputLevel}; use crate::utils::stamps::{ @@ -34,6 +38,9 @@ pub struct ExtBuildCommand { pub no_stamps: bool, pub runs_on: Option, pub nfs_port: Option, + pub sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl ExtBuildCommand { @@ -55,6 +62,8 @@ impl ExtBuildCommand { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, + composed_config: None, } } @@ -71,11 +80,30 @@ impl ExtBuildCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - // Load configuration and parse raw TOML - let config = Config::load(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let _parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; + let config = &composed.config; + let parsed = &composed.merged_value; // Merge container args from config and CLI (similar to SDK commands) let processed_container_args = @@ -83,7 +111,7 @@ impl ExtBuildCommand { // Get repo_url and repo_release from config let repo_url = config.get_sdk_repo_url(); let repo_release = config.get_sdk_repo_release(); - let target = resolve_target_required(self.target.as_deref(), &config)?; + let target = resolve_target_required(self.target.as_deref(), config)?; // Get SDK configuration from interpolated config let container_image = config @@ -93,7 +121,7 @@ impl ExtBuildCommand { // Validate stamps before proceeding (unless --no-stamps) if !self.no_stamps { let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); // Resolve required stamps for extension build let required = resolve_required_stamps( @@ -116,6 +144,7 @@ impl ExtBuildCommand { repo_release: repo_release.clone(), container_args: processed_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -142,9 +171,19 @@ impl ExtBuildCommand { })?; // Get the config path where this extension is actually defined + // Note: For SDK compile operations, we need a path that's accessible from the host. + // For remote extensions, the sdk.compile sections are already merged into the main + // config via load_composed(), so we use the main config path for SDK compile. + // The ext_src_path (for overlay/scripts) is computed separately below. let ext_config_path = match &extension_location { ExtensionLocation::Local { config_path, .. } => config_path.clone(), ExtensionLocation::External { config_path, .. } => config_path.clone(), + ExtensionLocation::Remote { .. } => { + // For remote extensions, use the main config path because: + // 1. Remote extension sdk.compile sections are merged into main config via load_composed + // 2. The Docker volume path is not accessible from the host for SDK compile operations + self.config_path.clone() + } }; if self.verbose { @@ -161,21 +200,73 @@ impl ExtBuildCommand { OutputLevel::Normal, ); } + ExtensionLocation::Remote { name, source } => { + print_info( + &format!("Found remote extension '{name}' with source: {source:?}"), + OutputLevel::Normal, + ); + } } } - // Get merged extension configuration with target-specific overrides - // Use the config path where the extension is actually defined for proper interpolation - let ext_config = config - .get_merged_ext_config(&self.extension, &target, &ext_config_path)? - .ok_or_else(|| { - anyhow::anyhow!("Extension '{}' not found in configuration.", self.extension) - })?; + // Get extension configuration from the composed/merged config + // For remote extensions, this comes from the merged remote extension config (already read via container) + // For local extensions, this uses get_merged_ext_config which reads from the file + let ext_config = match &extension_location { + ExtensionLocation::Remote { .. } => { + // Use the already-merged config from `parsed` which contains remote extension configs + // Then apply target-specific overrides manually + let ext_section = parsed + .get("extensions") + .and_then(|ext| ext.get(&self.extension)); + if let Some(ext_val) = ext_section { + let base_ext = ext_val.clone(); + // Check for target-specific override within this extension + let target_override = ext_val.get(&target).cloned(); + if let Some(override_val) = target_override { + // Merge target override into base, filtering out other target sections + Some(config.merge_target_override(base_ext, override_val, &target)) + } else { + Some(base_ext) + } + } else { + None + } + } + ExtensionLocation::Local { config_path, .. } => { + // For local extensions, read from the file with proper target merging + config.get_merged_ext_config(&self.extension, &target, config_path)? + } + #[allow(deprecated)] + ExtensionLocation::External { config_path, .. } => { + // For deprecated external configs, read from the file + config.get_merged_ext_config(&self.extension, &target, config_path)? + } + } + .ok_or_else(|| { + anyhow::anyhow!("Extension '{}' not found in configuration.", self.extension) + })?; + + // Determine the extension source path for compile/install scripts + // For remote extensions, scripts are in $AVOCADO_PREFIX/includes// + // For local extensions, scripts are in /opt/src (the mounted src_dir) + let ext_script_workdir = match &extension_location { + ExtensionLocation::Remote { name, .. } => { + Some(format!("$AVOCADO_PREFIX/includes/{name}")) + } + ExtensionLocation::Local { .. } | ExtensionLocation::External { .. } => None, + }; // Handle compile dependencies with install scripts before building the extension // Pass the ext_config_path so SDK compile sections are loaded from the correct config - self.handle_compile_dependencies(&config, &ext_config, &target, &ext_config_path) - .await?; + self.handle_compile_dependencies( + config, + &ext_config, + &target, + &ext_config_path, + ext_script_workdir.as_deref(), + ) + .await?; // Get extension types from the types array (defaults to ["sysext", "confext"]) let ext_types = ext_config @@ -338,10 +429,22 @@ impl ExtBuildCommand { .ok_or_else(|| anyhow::anyhow!("No SDK container image specified in configuration."))?; // Resolve target with proper precedence - let target_arch = resolve_target_required(self.target.as_deref(), &config)?; + let target_arch = resolve_target_required(self.target.as_deref(), config)?; // Initialize SDK container helper - let container_helper = SdkContainer::from_config(&self.config_path, &config)?; + let container_helper = SdkContainer::from_config(&self.config_path, config)?; + + // Determine the extension source path for overlay resolution + // For remote extensions, files are in $AVOCADO_PREFIX/includes// + // For local extensions, files are in /opt/src (the mounted src_dir) + let ext_src_path = match &extension_location { + ExtensionLocation::Remote { name, .. } => { + format!("$AVOCADO_PREFIX/includes/{name}") + } + ExtensionLocation::Local { .. } | ExtensionLocation::External { .. } => { + "/opt/src".to_string() + } + }; // Build extensions based on configuration let mut overall_success = true; @@ -370,6 +473,7 @@ impl ExtBuildCommand { users_config, groups_config, reload_service_manager, + &ext_src_path, ) .await? } @@ -390,6 +494,7 @@ impl ExtBuildCommand { users_config, groups_config, reload_service_manager, + &ext_src_path, ) .await? } @@ -442,11 +547,12 @@ impl ExtBuildCommand { repo_release: repo_release.clone(), container_args: processed_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); container_helper.run_in_container(run_config).await?; if self.verbose { @@ -478,6 +584,7 @@ impl ExtBuildCommand { users_config: Option<&serde_yaml::Mapping>, groups_config: Option<&serde_yaml::Mapping>, reload_service_manager: bool, + ext_src_path: &str, ) -> Result { // Create the build script for sysext extension let build_script = self.create_sysext_build_script( @@ -490,6 +597,7 @@ impl ExtBuildCommand { users_config, groups_config, reload_service_manager, + ext_src_path, ); // Execute the build script in the SDK container @@ -511,6 +619,7 @@ impl ExtBuildCommand { repo_release: repo_release.cloned(), container_args: processed_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let result = container_helper.run_in_container(config).await?; @@ -543,6 +652,7 @@ impl ExtBuildCommand { users_config: Option<&serde_yaml::Mapping>, groups_config: Option<&serde_yaml::Mapping>, reload_service_manager: bool, + ext_src_path: &str, ) -> Result { // Create the build script for confext extension let build_script = self.create_confext_build_script( @@ -555,6 +665,7 @@ impl ExtBuildCommand { users_config, groups_config, reload_service_manager, + ext_src_path, ); // Execute the build script in the SDK container @@ -576,6 +687,7 @@ impl ExtBuildCommand { repo_release: repo_release.cloned(), container_args: processed_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let result = container_helper.run_in_container(config).await?; @@ -602,58 +714,52 @@ impl ExtBuildCommand { users_config: Option<&serde_yaml::Mapping>, groups_config: Option<&serde_yaml::Mapping>, reload_service_manager: bool, + ext_src_path: &str, ) -> String { let overlay_section = if let Some(overlay_config) = overlay_config { match overlay_config.mode { OverlayMode::Merge => format!( r#" # Merge overlay directory into extension sysroot -if [ -d "/opt/src/{}" ]; then - echo "Merging overlay directory '{}' into extension sysroot with root:root ownership" +if [ -d "{src_path}/{overlay_dir}" ]; then + echo "Merging overlay directory '{overlay_dir}' into extension sysroot with root:root ownership" # Use rsync to merge directories and set ownership during copy - rsync -a --chown=root:root /opt/src/{}/ "$AVOCADO_EXT_SYSROOTS/{}/" + rsync -a --chown=root:root {src_path}/{overlay_dir}/ "$AVOCADO_EXT_SYSROOTS/{ext_name}/" else - echo "Error: Overlay directory '{}' not found in source" + echo "Error: Overlay directory '{overlay_dir}' not found in source" exit 1 fi "#, - overlay_config.dir, - overlay_config.dir, - overlay_config.dir, - self.extension, - overlay_config.dir + src_path = ext_src_path, + overlay_dir = overlay_config.dir, + ext_name = self.extension, ), OverlayMode::Opaque => format!( r#" # Copy overlay directory to extension sysroot (opaque mode) -if [ -d "/opt/src/{}" ]; then - echo "Copying overlay directory '{}' to extension sysroot (opaque mode)" +if [ -d "{src_path}/{overlay_dir}" ]; then + echo "Copying overlay directory '{overlay_dir}' to extension sysroot (opaque mode)" # Use cp -a to replace directory contents completely while preserving permissions - cp -a /opt/src/{}/* "$AVOCADO_EXT_SYSROOTS/{}/" + cp -a {src_path}/{overlay_dir}/* "$AVOCADO_EXT_SYSROOTS/{ext_name}/" # Fix ownership to root:root for copied overlay files only (permissions are preserved) echo "Setting ownership to root:root for overlay files" - find "/opt/src/{}" -mindepth 1 | while IFS= read -r srcpath; do - relpath="$(echo "$srcpath" | sed "s|^/opt/src/{}||" | sed "s|^/||")" + find "{src_path}/{overlay_dir}" -mindepth 1 | while IFS= read -r srcpath; do + relpath="$(echo "$srcpath" | sed "s|^{src_path}/{overlay_dir}||" | sed "s|^/||")" if [ -n "$relpath" ]; then - destpath="$AVOCADO_EXT_SYSROOTS/{}/$relpath" + destpath="$AVOCADO_EXT_SYSROOTS/{ext_name}/$relpath" if [ -e "$destpath" ]; then chown root:root "$destpath" 2>/dev/null || true fi fi done else - echo "Error: Overlay directory '{}' not found in source" + echo "Error: Overlay directory '{overlay_dir}' not found in source" exit 1 fi "#, - overlay_config.dir, - overlay_config.dir, - overlay_config.dir, - self.extension, - overlay_config.dir, - overlay_config.dir, - self.extension, - overlay_config.dir + src_path = ext_src_path, + overlay_dir = overlay_config.dir, + ext_name = self.extension, ), } } else { @@ -775,58 +881,52 @@ fi users_config: Option<&serde_yaml::Mapping>, groups_config: Option<&serde_yaml::Mapping>, reload_service_manager: bool, + ext_src_path: &str, ) -> String { let overlay_section = if let Some(overlay_config) = overlay_config { match overlay_config.mode { OverlayMode::Merge => format!( r#" # Merge overlay directory into extension sysroot -if [ -d "/opt/src/{}" ]; then - echo "Merging overlay directory '{}' into extension sysroot with root:root ownership" +if [ -d "{src_path}/{overlay_dir}" ]; then + echo "Merging overlay directory '{overlay_dir}' into extension sysroot with root:root ownership" # Use rsync to merge directories and set ownership during copy - rsync -a --chown=root:root /opt/src/{}/ "$AVOCADO_EXT_SYSROOTS/{}/" + rsync -a --chown=root:root {src_path}/{overlay_dir}/ "$AVOCADO_EXT_SYSROOTS/{ext_name}/" else - echo "Error: Overlay directory '{}' not found in source" + echo "Error: Overlay directory '{overlay_dir}' not found in source" exit 1 fi "#, - overlay_config.dir, - overlay_config.dir, - overlay_config.dir, - self.extension, - overlay_config.dir + src_path = ext_src_path, + overlay_dir = overlay_config.dir, + ext_name = self.extension, ), OverlayMode::Opaque => format!( r#" # Copy overlay directory to extension sysroot (opaque mode) -if [ -d "/opt/src/{}" ]; then - echo "Copying overlay directory '{}' to extension sysroot (opaque mode)" +if [ -d "{src_path}/{overlay_dir}" ]; then + echo "Copying overlay directory '{overlay_dir}' to extension sysroot (opaque mode)" # Use cp -a to replace directory contents completely while preserving permissions - cp -a /opt/src/{}/* "$AVOCADO_EXT_SYSROOTS/{}/" + cp -a {src_path}/{overlay_dir}/* "$AVOCADO_EXT_SYSROOTS/{ext_name}/" # Fix ownership to root:root for copied overlay files only (permissions are preserved) echo "Setting ownership to root:root for overlay files" - find "/opt/src/{}" -mindepth 1 | while IFS= read -r srcpath; do - relpath="$(echo "$srcpath" | sed "s|^/opt/src/{}||" | sed "s|^/||")" + find "{src_path}/{overlay_dir}" -mindepth 1 | while IFS= read -r srcpath; do + relpath="$(echo "$srcpath" | sed "s|^{src_path}/{overlay_dir}||" | sed "s|^/||")" if [ -n "$relpath" ]; then - destpath="$AVOCADO_EXT_SYSROOTS/{}/$relpath" + destpath="$AVOCADO_EXT_SYSROOTS/{ext_name}/$relpath" if [ -e "$destpath" ]; then chown root:root "$destpath" 2>/dev/null || true fi fi done else - echo "Error: Overlay directory '{}' not found in source" + echo "Error: Overlay directory '{overlay_dir}' not found in source" exit 1 fi "#, - overlay_config.dir, - overlay_config.dir, - overlay_config.dir, - self.extension, - overlay_config.dir, - overlay_config.dir, - self.extension, - overlay_config.dir + src_path = ext_src_path, + overlay_dir = overlay_config.dir, + ext_name = self.extension, ), } } else { @@ -1435,15 +1535,19 @@ echo "Set proper permissions on authentication files""#, /// /// `sdk_config_path` is the path to the config file that contains the sdk.compile sections. /// For external extensions, this should be the external config path, not the main config. + /// + /// `ext_script_workdir` is the optional working directory for compile/install scripts + /// (container path). For remote extensions, this is `$AVOCADO_PREFIX/includes//`. async fn handle_compile_dependencies( &self, config: &Config, ext_config: &serde_yaml::Value, target: &str, sdk_config_path: &str, + ext_script_workdir: Option<&str>, ) -> Result<()> { // Get dependencies from extension configuration - let dependencies = ext_config.get("dependencies").and_then(|v| v.as_mapping()); + let dependencies = ext_config.get("packages").and_then(|v| v.as_mapping()); let Some(deps_table) = dependencies else { return Ok(()); @@ -1510,6 +1614,12 @@ echo "Set proper permissions on authentication files""#, &format!("Using config path for SDK compile: {sdk_config_path}"), OutputLevel::Normal, ); + if let Some(workdir) = ext_script_workdir { + print_info( + &format!("Using script workdir: {workdir}"), + OutputLevel::Normal, + ); + } } let compile_command = SdkCompileCommand::new( sdk_config_path.to_string(), @@ -1518,7 +1628,8 @@ echo "Set proper permissions on authentication files""#, Some(target.to_string()), self.container_args.clone(), self.dnf_args.clone(), - ); + ) + .with_workdir(ext_script_workdir.map(|s| s.to_string())); compile_command.execute().await.with_context(|| { format!( @@ -1527,12 +1638,20 @@ echo "Set proper permissions on authentication files""#, })?; // Then, run the install script - // Note: install_script is already relative to /opt/src (the mounted src_dir in the container) - // so we don't need to prepend src_dir here - just use it directly like compile scripts do - let install_command = format!( - r#"if [ -f '{install_script}' ]; then echo 'Running install script: {install_script}'; export AVOCADO_BUILD_EXT_SYSROOT="$AVOCADO_EXT_SYSROOTS/{extension_name}"; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{install_script}'; else echo 'Install script {install_script} not found.'; ls -la; exit 1; fi"#, - extension_name = self.extension - ); + // For remote extensions, use ext_script_workdir to find the script + // For local extensions, scripts are relative to /opt/src (the mounted src_dir) + // Note: Use double quotes for workdir so $AVOCADO_PREFIX gets expanded by the shell + let install_command = if let Some(workdir) = ext_script_workdir { + format!( + r#"cd "{workdir}" && if [ -f '{install_script}' ]; then echo 'Running install script: {install_script}'; export AVOCADO_BUILD_EXT_SYSROOT="$AVOCADO_EXT_SYSROOTS/{extension_name}"; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{install_script}'; else echo 'Install script {install_script} not found.'; ls -la; exit 1; fi"#, + extension_name = self.extension + ) + } else { + format!( + r#"if [ -f '{install_script}' ]; then echo 'Running install script: {install_script}'; export AVOCADO_BUILD_EXT_SYSROOT="$AVOCADO_EXT_SYSROOTS/{extension_name}"; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{install_script}'; else echo 'Install script {install_script} not found.'; ls -la; exit 1; fi"#, + extension_name = self.extension + ) + }; if self.verbose { print_info( @@ -1561,6 +1680,7 @@ echo "Set proper permissions on authentication files""#, repo_release: repo_release.clone(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -1640,6 +1760,7 @@ mod tests { None, None, false, + "/opt/src", ); // Print the actual script for debugging @@ -1698,6 +1819,7 @@ mod tests { None, None, false, + "/opt/src", ); assert!(script @@ -1753,6 +1875,7 @@ mod tests { None, None, false, + "/opt/src", ); assert!(script.contains("echo \"SYSEXT_SCOPE=system portable\" >> \"$release_file\"")); @@ -1782,6 +1905,7 @@ mod tests { None, None, false, + "/opt/src", ); assert!(script.contains("echo \"CONFEXT_SCOPE=system portable\" >> \"$release_file\"")); @@ -1812,6 +1936,7 @@ mod tests { None, None, false, + "/opt/src", ); // Check that service enabling commands are present using [Install] section parser @@ -1864,6 +1989,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify the find command looks for common kernel module extensions @@ -1902,6 +2028,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify overlay merging commands are present @@ -1942,6 +2069,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify overlay merging commands are present @@ -1982,6 +2110,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify overlay opaque mode commands are present @@ -2024,6 +2153,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify overlay opaque mode commands are present @@ -2062,6 +2192,7 @@ mod tests { None, None, false, + "/opt/src", ); let script_confext = cmd.create_confext_build_script( "1.0", @@ -2073,6 +2204,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify no overlay merging commands are present @@ -2108,6 +2240,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify separate AVOCADO_ON_MERGE entries are added for kernel modules and modprobe commands @@ -2152,6 +2285,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify sysusers.d detection logic is present @@ -2190,6 +2324,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify sysusers.d detection logic is present for confext @@ -2225,6 +2360,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify ld.so.conf.d detection logic is present for confext @@ -2263,6 +2399,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify custom on_merge commands are added as separate entries @@ -2303,6 +2440,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify custom on_unmerge commands are added as separate entries @@ -2343,6 +2481,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify custom on_unmerge commands are added as separate entries @@ -2378,6 +2517,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify custom on_merge commands are added as separate entries @@ -2412,6 +2552,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify both kernel modules and sysusers.d are handled correctly with separate lines @@ -2445,6 +2586,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify that without modprobe modules, only depmod is added when kernel modules are found @@ -2478,6 +2620,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify separate AVOCADO_ON_MERGE entries are added @@ -2531,6 +2674,7 @@ mod tests { None, None, false, + "/opt/src", ); // Verify that each command gets its own separate AVOCADO_ON_MERGE entry @@ -2735,6 +2879,7 @@ mod tests { Some(&users_config), None, false, + "/opt/src", ); // Verify the complete build script includes users functionality @@ -2790,6 +2935,7 @@ mod tests { Some(&users_config), None, false, + "/opt/src", ); // Verify the complete build script includes users functionality @@ -3274,6 +3420,7 @@ mod tests { None, None, true, + "/opt/src", ); // Verify that reload_service_manager = true sets EXTENSION_RELOAD_MANAGER=1 @@ -3303,6 +3450,7 @@ mod tests { None, None, true, + "/opt/src", ); // Verify that reload_service_manager = true sets EXTENSION_RELOAD_MANAGER=1 @@ -3313,11 +3461,11 @@ mod tests { fn test_handle_compile_dependencies_parsing() { // Test that the new compile dependency syntax is properly parsed let config_content = r#" -ext: +extensions: my-extension: types: - sysext - dependencies: + packages: my-app: compile: my-app install: ext-install.sh @@ -3335,19 +3483,19 @@ sdk: let parsed: serde_yaml::Value = serde_yaml::from_str(config_content).unwrap(); let ext_config = parsed - .get(serde_yaml::Value::String("ext".to_string())) + .get(serde_yaml::Value::String("extensions".to_string())) .unwrap() .get(serde_yaml::Value::String("my-extension".to_string())) .unwrap(); - let dependencies = ext_config - .get(serde_yaml::Value::String("dependencies".to_string())) + let packages = ext_config + .get(serde_yaml::Value::String("packages".to_string())) .unwrap() .as_mapping() .unwrap(); // Check that we can identify compile dependencies with install scripts let mut compile_install_deps = Vec::new(); - for (dep_name, dep_spec) in dependencies { + for (dep_name, dep_spec) in packages { if let serde_yaml::Value::Mapping(spec_map) = dep_spec { if let ( Some(serde_yaml::Value::String(compile_section)), diff --git a/src/commands/ext/checkout.rs b/src/commands/ext/checkout.rs index f5e7bf6..7ad3713 100644 --- a/src/commands/ext/checkout.rs +++ b/src/commands/ext/checkout.rs @@ -1,8 +1,9 @@ use anyhow::{Context, Result}; use std::path::Path; +use std::sync::Arc; use tokio::process::Command as AsyncCommand; -use crate::utils::config::Config; +use crate::utils::config::{ComposedConfig, Config}; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::output::{print_error, print_info, print_success, OutputLevel}; use crate::utils::stamps::{ @@ -20,6 +21,9 @@ pub struct ExtCheckoutCommand { container_tool: String, target: Option, no_stamps: bool, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl ExtCheckoutCommand { @@ -41,6 +45,8 @@ impl ExtCheckoutCommand { container_tool, target, no_stamps: false, + sdk_arch: None, + composed_config: None, } } @@ -50,18 +56,40 @@ impl ExtCheckoutCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { let cwd = std::env::current_dir().context("Failed to get current directory")?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()) + .context("Failed to load config")?, + ), + }; + let config = &composed.config; + // Validate stamps before proceeding (unless --no-stamps) // Checkout requires extension to be installed if !self.no_stamps { - let config = Config::load(&self.config_path)?; - let target = resolve_target_required(self.target.as_deref(), &config)?; + let target = resolve_target_required(self.target.as_deref(), config)?; if let Some(container_image) = config.get_sdk_image() { let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); let requirements = vec![ StampRequirement::sdk_install(), @@ -79,6 +107,7 @@ impl ExtCheckoutCommand { repo_url: config.get_sdk_repo_url(), repo_release: config.get_sdk_repo_release(), container_args: config.merge_sdk_container_args(None), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -291,7 +320,7 @@ impl ExtCheckoutCommand { // Get target from runtime configuration let target = parsed - .get("runtime") + .get("runtimes") .and_then(|runtime| runtime.as_mapping()) .and_then(|runtime_table| { if runtime_table.len() == 1 { diff --git a/src/commands/ext/clean.rs b/src/commands/ext/clean.rs index 2f5f0d9..7a279ca 100644 --- a/src/commands/ext/clean.rs +++ b/src/commands/ext/clean.rs @@ -1,8 +1,15 @@ -use anyhow::Result; +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] -use crate::utils::config::{Config, ExtensionLocation}; +use anyhow::{Context, Result}; +use std::sync::Arc; + +use crate::utils::config::{ComposedConfig, Config, ExtensionLocation}; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::output::{print_error, print_info, print_success, OutputLevel}; +use crate::utils::stamps::{ + generate_batch_read_stamps_script, validate_stamps_batch, StampRequirement, +}; use crate::utils::target::resolve_target_required; pub struct ExtCleanCommand { @@ -12,6 +19,9 @@ pub struct ExtCleanCommand { target: Option, container_args: Option>, dnf_args: Option>, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl ExtCleanCommand { @@ -30,21 +40,292 @@ impl ExtCleanCommand { target, container_args, dnf_args, + sdk_arch: None, + composed_config: None, } } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - let config = Config::load(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let _parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; + let config = &composed.config; + let parsed = &composed.merged_value; + + let target = resolve_target_required(self.target.as_deref(), config)?; + let extension_location = self.find_extension_in_dependency_tree(config, &target)?; + let container_image = self.get_container_image(config)?; + + // Get extension configuration from the composed/merged config + let ext_config = self.get_extension_config(config, parsed, &extension_location, &target)?; + + // Determine the extension source path for clean scripts + // For remote extensions, scripts are in $AVOCADO_PREFIX/includes// + // For local extensions, scripts are in /opt/src (the mounted src_dir) + let ext_script_workdir = match &extension_location { + ExtensionLocation::Remote { name, .. } => { + Some(format!("$AVOCADO_PREFIX/includes/{name}")) + } + ExtensionLocation::Local { .. } | ExtensionLocation::External { .. } => None, + }; - let target = resolve_target_required(self.target.as_deref(), &config)?; - let _extension_location = self.find_extension_in_dependency_tree(&config, &target)?; - let container_image = self.get_container_image(&config)?; + // Execute clean scripts for compile dependencies BEFORE cleaning the extension + // This allows clean scripts to access build artifacts if needed + self.execute_compile_clean_scripts( + config, + &ext_config, + &container_image, + &target, + ext_script_workdir.as_deref(), + ) + .await?; self.clean_extension(&container_image, &target).await } + /// Get extension configuration from the composed/merged config + fn get_extension_config( + &self, + config: &Config, + parsed: &serde_yaml::Value, + extension_location: &ExtensionLocation, + target: &str, + ) -> Result { + match extension_location { + ExtensionLocation::Remote { .. } => { + // Use the already-merged config from `parsed` which contains remote extension configs + let ext_section = parsed + .get("extensions") + .and_then(|ext| ext.get(&self.extension)); + if let Some(ext_val) = ext_section { + let base_ext = ext_val.clone(); + // Check for target-specific override within this extension + let target_override = ext_val.get(target).cloned(); + if let Some(override_val) = target_override { + Ok(config.merge_target_override(base_ext, override_val, target)) + } else { + Ok(base_ext) + } + } else { + Ok(serde_yaml::Value::Mapping(serde_yaml::Mapping::new())) + } + } + ExtensionLocation::Local { config_path, .. } => config + .get_merged_ext_config(&self.extension, target, config_path)? + .ok_or_else(|| { + anyhow::anyhow!("Extension '{}' not found in configuration.", self.extension) + }), + #[allow(deprecated)] + ExtensionLocation::External { config_path, .. } => config + .get_merged_ext_config(&self.extension, target, config_path)? + .ok_or_else(|| { + anyhow::anyhow!("Extension '{}' not found in configuration.", self.extension) + }), + } + } + + /// Execute clean scripts for compile dependencies + async fn execute_compile_clean_scripts( + &self, + config: &Config, + ext_config: &serde_yaml::Value, + container_image: &str, + target: &str, + ext_script_workdir: Option<&str>, + ) -> Result<()> { + // Get dependencies from extension configuration + let dependencies = ext_config.get("packages").and_then(|v| v.as_mapping()); + + let Some(deps_table) = dependencies else { + return Ok(()); + }; + + // Find compile dependencies that may have clean scripts + let mut compile_sections_to_clean = Vec::new(); + + for (dep_name_val, dep_spec) in deps_table { + if let Some(dep_name) = dep_name_val.as_str() { + if let serde_yaml::Value::Mapping(spec_map) = dep_spec { + // Check for compile dependency: { compile = "section-name", ... } + if let Some(serde_yaml::Value::String(compile_section)) = + spec_map.get("compile") + { + compile_sections_to_clean + .push((dep_name.to_string(), compile_section.clone())); + } + } + } + } + + if compile_sections_to_clean.is_empty() { + return Ok(()); + } + + // Get clean scripts from SDK compile sections + let clean_scripts = self.get_clean_scripts_for_sections(config, &compile_sections_to_clean); + + if clean_scripts.is_empty() { + if self.verbose { + print_info( + "No clean scripts defined for compile dependencies", + OutputLevel::Normal, + ); + } + return Ok(()); + } + + // Get SDK configuration for container setup + let repo_url = config.get_sdk_repo_url(); + let repo_release = config.get_sdk_repo_release(); + let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); + + // Initialize SDK container helper + let container_helper = SdkContainer::from_config(&self.config_path, config)?; + + // Validate SDK is installed before running clean scripts + let requirements = vec![StampRequirement::sdk_install()]; + let batch_script = generate_batch_read_stamps_script(&requirements); + let run_config = RunConfig { + container_image: container_image.to_string(), + target: target.to_string(), + command: batch_script, + verbose: false, + source_environment: true, + interactive: false, + repo_url: repo_url.clone(), + repo_release: repo_release.clone(), + container_args: merged_container_args.clone(), + dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), + ..Default::default() + }; + + let output = container_helper + .run_in_container_with_output(run_config) + .await?; + + let validation = + validate_stamps_batch(&requirements, output.as_deref().unwrap_or(""), None); + + if !validation.is_satisfied() { + let error = validation.into_error("Cannot run clean scripts for compile dependencies"); + return Err(error.into()); + } + + print_info( + &format!( + "Executing {} clean script(s) for compile dependencies", + clean_scripts.len() + ), + OutputLevel::Normal, + ); + + // Execute each clean script + for (section_name, clean_script) in clean_scripts { + print_info( + &format!( + "Running clean script for compile section '{section_name}': {clean_script}" + ), + OutputLevel::Normal, + ); + + // Build clean command with optional workdir prefix + // For remote extensions, scripts are in $AVOCADO_PREFIX/includes// instead of /opt/src + let clean_command = if let Some(workdir) = ext_script_workdir { + format!( + r#"cd "{workdir}" && if [ -f '{clean_script}' ]; then echo 'Running clean script: {clean_script}'; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{clean_script}'; else echo 'Clean script {clean_script} not found, skipping.'; fi"# + ) + } else { + format!( + r#"if [ -f '{clean_script}' ]; then echo 'Running clean script: {clean_script}'; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{clean_script}'; else echo 'Clean script {clean_script} not found, skipping.'; fi"# + ) + }; + + if self.verbose { + print_info( + &format!("Running command: {clean_command}"), + OutputLevel::Normal, + ); + } + + let run_config = RunConfig { + container_image: container_image.to_string(), + target: target.to_string(), + command: clean_command, + verbose: self.verbose, + source_environment: true, + interactive: false, + repo_url: repo_url.clone(), + repo_release: repo_release.clone(), + container_args: merged_container_args.clone(), + dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), + ..Default::default() + }; + + let success = container_helper.run_in_container(run_config).await?; + + if success { + print_success( + &format!("Completed clean script for section '{section_name}'."), + OutputLevel::Normal, + ); + } else { + print_error( + &format!("Failed to run clean script for section '{section_name}'."), + OutputLevel::Normal, + ); + return Err(anyhow::anyhow!( + "Clean script failed for section '{section_name}'" + )); + } + } + + Ok(()) + } + + /// Get clean scripts for the specified compile sections + fn get_clean_scripts_for_sections( + &self, + config: &Config, + compile_sections: &[(String, String)], + ) -> Vec<(String, String)> { + let mut clean_scripts = Vec::new(); + + if let Some(sdk) = &config.sdk { + if let Some(compile) = &sdk.compile { + for (_dep_name, section_name) in compile_sections { + if let Some(section_config) = compile.get(section_name) { + if let Some(clean_script) = §ion_config.clean { + clean_scripts.push((section_name.clone(), clean_script.clone())); + } + } + } + } + } + + clean_scripts + } + fn find_extension_in_dependency_tree( &self, config: &Config, @@ -74,6 +355,12 @@ impl ExtCleanCommand { OutputLevel::Normal, ); } + ExtensionLocation::Remote { name, source } => { + print_info( + &format!("Found remote extension '{name}' with source: {source:?}"), + OutputLevel::Normal, + ); + } } } Ok(location) @@ -127,7 +414,7 @@ rm -rf "$AVOCADO_PREFIX/.stamps/ext/{ext}" ); } - let config = RunConfig { + let run_config = RunConfig { container_image: container_image.to_string(), target: target.to_string(), command: clean_command, @@ -138,9 +425,10 @@ rm -rf "$AVOCADO_PREFIX/.stamps/ext/{ext}" self.container_args.as_ref(), ), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; - let success = container_helper.run_in_container(config).await?; + let success = container_helper.run_in_container(run_config).await?; if success { print_success( @@ -294,4 +582,170 @@ mod tests { ); assert!(script.contains(".stamps/ext"), "Should clean stamps"); } + + #[test] + fn test_get_clean_scripts_for_sections_with_clean_script() { + use std::io::Write; + use tempfile::NamedTempFile; + + let config_content = r#" +sdk: + image: "test-image" + compile: + my-library: + compile: "build.sh" + clean: "clean.sh" + packages: + gcc: "*" + other-library: + compile: "build-other.sh" + packages: + make: "*" +"#; + let mut temp_file = NamedTempFile::new().unwrap(); + write!(temp_file, "{config_content}").unwrap(); + let config = Config::load(temp_file.path()).unwrap(); + + let cmd = ExtCleanCommand::new( + "test-ext".to_string(), + temp_file.path().to_string_lossy().to_string(), + false, + None, + None, + None, + ); + + // Test with compile sections - one has clean script, one doesn't + let compile_sections = vec![ + ("dep1".to_string(), "my-library".to_string()), + ("dep2".to_string(), "other-library".to_string()), + ]; + + let clean_scripts = cmd.get_clean_scripts_for_sections(&config, &compile_sections); + + // Only my-library has a clean script + assert_eq!(clean_scripts.len(), 1); + assert_eq!(clean_scripts[0].0, "my-library"); + assert_eq!(clean_scripts[0].1, "clean.sh"); + } + + #[test] + fn test_get_clean_scripts_for_sections_no_clean_scripts() { + use std::io::Write; + use tempfile::NamedTempFile; + + let config_content = r#" +sdk: + image: "test-image" + compile: + my-library: + compile: "build.sh" + packages: + gcc: "*" +"#; + let mut temp_file = NamedTempFile::new().unwrap(); + write!(temp_file, "{config_content}").unwrap(); + let config = Config::load(temp_file.path()).unwrap(); + + let cmd = ExtCleanCommand::new( + "test-ext".to_string(), + temp_file.path().to_string_lossy().to_string(), + false, + None, + None, + None, + ); + + let compile_sections = vec![("dep1".to_string(), "my-library".to_string())]; + + let clean_scripts = cmd.get_clean_scripts_for_sections(&config, &compile_sections); + + // No clean script defined + assert!(clean_scripts.is_empty()); + } + + #[test] + fn test_get_clean_scripts_for_nonexistent_section() { + use std::io::Write; + use tempfile::NamedTempFile; + + let config_content = r#" +sdk: + image: "test-image" + compile: + my-library: + compile: "build.sh" + clean: "clean.sh" +"#; + let mut temp_file = NamedTempFile::new().unwrap(); + write!(temp_file, "{config_content}").unwrap(); + let config = Config::load(temp_file.path()).unwrap(); + + let cmd = ExtCleanCommand::new( + "test-ext".to_string(), + temp_file.path().to_string_lossy().to_string(), + false, + None, + None, + None, + ); + + // Reference a section that doesn't exist + let compile_sections = vec![("dep1".to_string(), "nonexistent-library".to_string())]; + + let clean_scripts = cmd.get_clean_scripts_for_sections(&config, &compile_sections); + + // No clean script found for nonexistent section + assert!(clean_scripts.is_empty()); + } + + #[test] + fn test_get_clean_scripts_multiple_sections_with_clean() { + use std::io::Write; + use tempfile::NamedTempFile; + + let config_content = r#" +sdk: + image: "test-image" + compile: + lib-a: + compile: "build-a.sh" + clean: "clean-a.sh" + lib-b: + compile: "build-b.sh" + clean: "clean-b.sh" + lib-c: + compile: "build-c.sh" +"#; + let mut temp_file = NamedTempFile::new().unwrap(); + write!(temp_file, "{config_content}").unwrap(); + let config = Config::load(temp_file.path()).unwrap(); + + let cmd = ExtCleanCommand::new( + "test-ext".to_string(), + temp_file.path().to_string_lossy().to_string(), + false, + None, + None, + None, + ); + + let compile_sections = vec![ + ("dep-a".to_string(), "lib-a".to_string()), + ("dep-b".to_string(), "lib-b".to_string()), + ("dep-c".to_string(), "lib-c".to_string()), + ]; + + let clean_scripts = cmd.get_clean_scripts_for_sections(&config, &compile_sections); + + // lib-a and lib-b have clean scripts, lib-c doesn't + assert_eq!(clean_scripts.len(), 2); + + let section_names: Vec<&str> = clean_scripts + .iter() + .map(|(name, _)| name.as_str()) + .collect(); + assert!(section_names.contains(&"lib-a")); + assert!(section_names.contains(&"lib-b")); + } } diff --git a/src/commands/ext/deps.rs b/src/commands/ext/deps.rs index 7c1cf95..6418f28 100644 --- a/src/commands/ext/deps.rs +++ b/src/commands/ext/deps.rs @@ -1,7 +1,11 @@ +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] + use anyhow::Result; use std::collections::HashSet; +use std::sync::Arc; -use crate::utils::config::{Config, ExtensionLocation}; +use crate::utils::config::{ComposedConfig, Config, ExtensionLocation}; use crate::utils::output::{print_error, print_info, OutputLevel}; use crate::utils::target::resolve_target_required; @@ -9,6 +13,8 @@ pub struct ExtDepsCommand { config_path: String, extension: Option, target: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl ExtDepsCommand { @@ -17,18 +23,33 @@ impl ExtDepsCommand { config_path, extension, target, + composed_config: None, } } + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub fn execute(&self) -> Result<()> { - let config = Config::load(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new(Config::load_composed( + &self.config_path, + self.target.as_deref(), + )?), + }; + let config = &composed.config; + let parsed = &composed.merged_value; - let target = resolve_target_required(self.target.as_deref(), &config)?; - let extensions_to_process = self.get_extensions_to_process(&config, &parsed, &target)?; + let target = resolve_target_required(self.target.as_deref(), config)?; + let extensions_to_process = self.get_extensions_to_process(config, parsed, &target)?; - self.display_dependencies(&parsed, &extensions_to_process); + self.display_dependencies(parsed, &extensions_to_process); Ok(()) } @@ -65,7 +86,7 @@ impl ExtDepsCommand { } None => { // For listing all extensions, still use local extensions only - let ext_section = parsed.get("ext"); + let ext_section = parsed.get("extensions"); match ext_section { Some(ext) => { let ext_table = ext @@ -162,7 +183,7 @@ impl ExtDepsCommand { } // Try extension reference - if let Some(serde_yaml::Value::String(ext_name)) = spec_map.get("ext") { + if let Some(serde_yaml::Value::String(ext_name)) = spec_map.get("extensions") { // Check if this is a versioned extension (has vsn field) if let Some(serde_yaml::Value::String(version)) = spec_map.get("vsn") { return vec![("ext".to_string(), ext_name.clone(), version.clone())]; @@ -203,7 +224,7 @@ impl ExtDepsCommand { ext_name: &str, ) -> Vec<(String, String, String)> { let version = config - .get("ext") + .get("extensions") .and_then(|ext_section| ext_section.get(ext_name)) .and_then(|ext_config| ext_config.get("version")) .and_then(|v| v.as_str()) @@ -221,7 +242,7 @@ impl ExtDepsCommand { .get("sdk") .and_then(|sdk| sdk.get("compile")) .and_then(|compile| compile.get(compile_name)) - .and_then(|compile_config| compile_config.get("dependencies")) + .and_then(|compile_config| compile_config.get("packages")) .and_then(|deps| deps.as_mapping()); let Some(deps_table) = compile_deps else { @@ -261,9 +282,9 @@ impl ExtDepsCommand { extension: &str, ) -> Vec<(String, String, String)> { let dependencies = config - .get("ext") + .get("extensions") .and_then(|ext_section| ext_section.get(extension)) - .and_then(|ext_config| ext_config.get("dependencies")) + .and_then(|ext_config| ext_config.get("packages")) .and_then(|deps| deps.as_mapping()); let Some(deps_table) = dependencies else { @@ -311,11 +332,11 @@ mod tests { #[test] fn test_resolve_compile_dependency_with_install() { let config_content = r#" -ext: +extensions: my-extension: types: - sysext - dependencies: + packages: my-app: compile: my-app install: ext-install.sh @@ -336,6 +357,7 @@ sdk: config_path: "test.yaml".to_string(), extension: Some("my-extension".to_string()), target: None, + composed_config: None, }; // Test new syntax with install script @@ -370,7 +392,7 @@ sdk: #[test] fn test_resolve_regular_dependencies() { let config_content = r#" -ext: +extensions: test-ext: types: - sysext @@ -381,6 +403,7 @@ ext: config_path: "test.yaml".to_string(), extension: Some("test-ext".to_string()), target: None, + composed_config: None, }; // Test version dependency diff --git a/src/commands/ext/dnf.rs b/src/commands/ext/dnf.rs index f5d1abb..4144492 100644 --- a/src/commands/ext/dnf.rs +++ b/src/commands/ext/dnf.rs @@ -1,6 +1,10 @@ -use anyhow::Result; +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] -use crate::utils::config::{Config, ExtensionLocation}; +use anyhow::{Context, Result}; +use std::sync::Arc; + +use crate::utils::config::{ComposedConfig, Config, ExtensionLocation}; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::output::{print_error, print_info, print_success, OutputLevel}; use crate::utils::target::resolve_target_required; @@ -13,6 +17,9 @@ pub struct ExtDnfCommand { target: Option, container_args: Option>, dnf_args: Option>, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl ExtDnfCommand { @@ -33,25 +40,47 @@ impl ExtDnfCommand { target, container_args, dnf_args, + sdk_arch: None, + composed_config: None, } } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - let config = Config::load(&self.config_path)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()) + .context("Failed to load composed config")?, + ), + }; + let config = &composed.config; let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + let parsed = &composed.merged_value; - let target = self.resolve_target_architecture(&config)?; - let extension_location = self.find_extension_in_dependency_tree(&config, &target)?; - let container_image = self.get_container_image(&config)?; + let target = self.resolve_target_architecture(config)?; + let extension_location = self.find_extension_in_dependency_tree(config, &target)?; + let container_image = self.get_container_image(config)?; // Get repo_url and repo_release from config let repo_url = config.get_sdk_repo_url(); let repo_release = config.get_sdk_repo_release(); self.execute_dnf_command( - &parsed, + parsed, &container_image, &target, repo_url.as_ref(), @@ -91,6 +120,12 @@ impl ExtDnfCommand { OutputLevel::Normal, ); } + ExtensionLocation::Remote { name, source } => { + print_info( + &format!("Found remote extension '{name}' with source: {source:?}"), + OutputLevel::Normal, + ); + } } } Ok(location) @@ -173,6 +208,7 @@ impl ExtDnfCommand { let extension_name = match extension_location { ExtensionLocation::Local { name, .. } => name, ExtensionLocation::External { name, .. } => name, + ExtensionLocation::Remote { name, .. } => name, }; let check_cmd = format!("test -d $AVOCADO_EXT_SYSROOTS/{extension_name}"); @@ -187,6 +223,7 @@ impl ExtDnfCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let dir_exists = container_helper.run_in_container(config).await?; @@ -222,6 +259,7 @@ impl ExtDnfCommand { let extension_name = match extension_location { ExtensionLocation::Local { name, .. } => name, ExtensionLocation::External { name, .. } => name, + ExtensionLocation::Remote { name, .. } => name, }; let setup_cmd = format!( "mkdir -p $AVOCADO_EXT_SYSROOTS/{extension_name}/var/lib && cp -rf $AVOCADO_PREFIX/rootfs/var/lib/rpm $AVOCADO_EXT_SYSROOTS/{extension_name}/var/lib" @@ -238,6 +276,7 @@ impl ExtDnfCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let setup_success = container_helper.run_in_container(config).await?; @@ -289,6 +328,7 @@ impl ExtDnfCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_helper.run_in_container(config).await?; @@ -306,6 +346,7 @@ impl ExtDnfCommand { let extension_name = match extension_location { ExtensionLocation::Local { name, .. } => name, ExtensionLocation::External { name, .. } => name, + ExtensionLocation::Remote { name, .. } => name, }; let installroot = format!("$AVOCADO_EXT_SYSROOTS/{extension_name}"); let command_args_str = self.command.join(" "); diff --git a/src/commands/ext/fetch.rs b/src/commands/ext/fetch.rs new file mode 100644 index 0000000..b71cc91 --- /dev/null +++ b/src/commands/ext/fetch.rs @@ -0,0 +1,259 @@ +//! Extension fetch command implementation. +//! +//! This command fetches remote extensions from various sources (repo, git, path) +//! and installs them to `$AVOCADO_PREFIX/includes//`. + +use anyhow::{Context, Result}; +use std::sync::Arc; + +use crate::utils::config::{ComposedConfig, Config, ExtensionSource}; +use crate::utils::ext_fetch::ExtensionFetcher; +use crate::utils::output::{print_info, print_success, OutputLevel}; +use crate::utils::target::resolve_target_required; + +/// Command to fetch remote extensions +pub struct ExtFetchCommand { + /// Path to configuration file + pub config_path: String, + /// Specific extension to fetch (if None, fetches all remote extensions) + pub extension: Option, + /// Enable verbose output + pub verbose: bool, + /// Force re-fetch even if already installed + pub force: bool, + /// Target architecture + pub target: Option, + /// Additional arguments to pass to the container runtime + pub container_args: Option>, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, + /// Run command on remote host + pub runs_on: Option, + /// NFS port for remote execution + pub nfs_port: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, +} + +impl ExtFetchCommand { + /// Create a new ExtFetchCommand instance + pub fn new( + config_path: String, + extension: Option, + verbose: bool, + force: bool, + target: Option, + container_args: Option>, + ) -> Self { + Self { + config_path, + extension, + verbose, + force, + target, + container_args, + sdk_arch: None, + runs_on: None, + nfs_port: None, + composed_config: None, + } + } + + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set remote execution host and NFS port + pub fn with_runs_on(mut self, runs_on: String, nfs_port: Option) -> Self { + self.runs_on = Some(runs_on); + self.nfs_port = nfs_port; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + + /// Execute the fetch command + pub async fn execute(&self) -> Result<()> { + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()) + .with_context(|| format!("Failed to load config from {}", self.config_path))?, + ), + }; + let config = &composed.config; + + // Resolve target + let target = resolve_target_required(self.target.as_deref(), config)?; + + // Get container image + let container_image = config + .get_sdk_image() + .ok_or_else(|| anyhow::anyhow!("No SDK container image specified in configuration"))?; + + // Discover remote extensions (with target interpolation for extension names) + let remote_extensions = + Config::discover_remote_extensions(&self.config_path, Some(&target))?; + + if remote_extensions.is_empty() { + print_info( + "No remote extensions found in configuration.", + OutputLevel::Normal, + ); + return Ok(()); + } + + // Filter to specific extension if requested + let extensions_to_fetch: Vec<(String, ExtensionSource)> = + if let Some(ref ext_name) = self.extension { + remote_extensions + .into_iter() + .filter(|(name, _)| name == ext_name) + .collect() + } else { + remote_extensions + }; + + if extensions_to_fetch.is_empty() { + if let Some(ref ext_name) = self.extension { + return Err(anyhow::anyhow!( + "Extension '{ext_name}' not found in configuration or is not a remote extension" + )); + } + return Ok(()); + } + + // Get the extensions install directory (container path) + // The directory will be created inside the container, not on the host + let extensions_dir = config.get_extensions_dir(&self.config_path, &target); + + if self.verbose { + print_info( + &format!( + "Fetching {} remote extension(s) to {}", + extensions_to_fetch.len(), + extensions_dir.display() + ), + OutputLevel::Normal, + ); + } + + // Create the fetcher + // If container_args were already passed (e.g., from sdk install), use them directly + // Otherwise, merge from config + let effective_container_args = if self.container_args.is_some() { + self.container_args.clone() + } else { + config.merge_sdk_container_args(None) + }; + + // Get the resolved src_dir for resolving relative extension paths + let src_dir = config.get_resolved_src_dir(&self.config_path); + + let fetcher = ExtensionFetcher::new( + self.config_path.clone(), + target.clone(), + container_image.to_string(), + self.verbose, + ) + .with_repo_url(config.get_sdk_repo_url()) + .with_repo_release(config.get_sdk_repo_release()) + .with_container_args(effective_container_args) + .with_sdk_arch(self.sdk_arch.clone()) + .with_src_dir(src_dir); + + // Fetch each extension + let mut fetched_count = 0; + let mut skipped_count = 0; + + for (ext_name, source) in &extensions_to_fetch { + // Check if already installed + if !self.force && ExtensionFetcher::is_extension_installed(&extensions_dir, ext_name) { + if self.verbose { + print_info( + &format!("Extension '{ext_name}' is already installed, skipping (use --force to re-fetch)"), + OutputLevel::Normal, + ); + } + skipped_count += 1; + continue; + } + + print_info( + &format!("Fetching extension '{ext_name}'..."), + OutputLevel::Normal, + ); + + match fetcher.fetch(ext_name, source, &extensions_dir).await { + Ok(install_path) => { + print_success( + &format!( + "Successfully fetched extension '{ext_name}' to {}", + install_path.display() + ), + OutputLevel::Normal, + ); + fetched_count += 1; + } + Err(e) => { + return Err(anyhow::anyhow!( + "Failed to fetch extension '{ext_name}': {e}" + )); + } + } + } + + // Summary + if fetched_count > 0 || skipped_count > 0 { + let mut summary_parts = Vec::new(); + if fetched_count > 0 { + summary_parts.push(format!("{fetched_count} fetched")); + } + if skipped_count > 0 { + summary_parts.push(format!("{skipped_count} skipped")); + } + print_info( + &format!("Extension fetch complete: {}", summary_parts.join(", ")), + OutputLevel::Normal, + ); + } + + Ok(()) + } + + /// Get the list of remote extensions that would be fetched + #[allow(dead_code)] + pub fn get_remote_extensions(&self) -> Result> { + Config::discover_remote_extensions(&self.config_path, self.target.as_deref()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_ext_fetch_command_creation() { + let cmd = ExtFetchCommand::new( + "avocado.yaml".to_string(), + Some("test-ext".to_string()), + true, + false, + Some("x86_64-unknown-linux-gnu".to_string()), + None, + ); + + assert_eq!(cmd.config_path, "avocado.yaml"); + assert_eq!(cmd.extension, Some("test-ext".to_string())); + assert!(cmd.verbose); + assert!(!cmd.force); + } +} diff --git a/src/commands/ext/image.rs b/src/commands/ext/image.rs index 82a6145..11bc927 100644 --- a/src/commands/ext/image.rs +++ b/src/commands/ext/image.rs @@ -1,6 +1,10 @@ +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] + use anyhow::{Context, Result}; +use std::sync::Arc; -use crate::utils::config::{Config, ExtensionLocation}; +use crate::utils::config::{ComposedConfig, Config, ExtensionLocation}; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::output::{print_info, print_success, OutputLevel}; use crate::utils::stamps::{ @@ -20,6 +24,9 @@ pub struct ExtImageCommand { no_stamps: bool, runs_on: Option, nfs_port: Option, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl ExtImageCommand { @@ -41,6 +48,8 @@ impl ExtImageCommand { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, + composed_config: None, } } @@ -57,11 +66,30 @@ impl ExtImageCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - // Load configuration and parse raw TOML - let config = Config::load(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; + let config = &composed.config; + let parsed = &composed.merged_value; // Merge container args from config and CLI let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); @@ -69,7 +97,7 @@ impl ExtImageCommand { // Get repo_url and repo_release from config let repo_url = config.get_sdk_repo_url(); let repo_release = config.get_sdk_repo_release(); - let target = resolve_target_required(self.target.as_deref(), &config)?; + let target = resolve_target_required(self.target.as_deref(), config)?; // Get SDK configuration from interpolated config (needed for stamp validation) let container_image = config @@ -79,7 +107,7 @@ impl ExtImageCommand { // Validate stamps before proceeding (unless --no-stamps) if !self.no_stamps { let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); // Resolve required stamps for extension image let required = resolve_required_stamps( @@ -104,6 +132,7 @@ impl ExtImageCommand { dnf_args: self.dnf_args.clone(), runs_on: self.runs_on.clone(), nfs_port: self.nfs_port, + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -124,17 +153,63 @@ impl ExtImageCommand { } } - // Find extension using comprehensive lookup - let extension_location = config - .find_extension_in_dependency_tree(&self.config_path, &self.extension, &target)? - .ok_or_else(|| { - anyhow::anyhow!("Extension '{}' not found in configuration.", self.extension) - })?; + // Determine extension location by checking the composed (interpolated) config + // This is more reliable than find_extension_in_dependency_tree which reads the raw file + // and may not find templated extension names like "avocado-bsp-{{ avocado.target }}" + let extension_location = { + // First check if extension exists in the composed config's ext section + let ext_in_composed = parsed + .get("extensions") + .and_then(|e| e.get(&self.extension)); + + if let Some(ext_config) = ext_in_composed { + // Check if it has a source: field (indicating remote extension) + if ext_config.get("source").is_some() { + // Parse the source to get ExtensionSource + let source = Config::parse_extension_source(&self.extension, ext_config)? + .ok_or_else(|| { + anyhow::anyhow!( + "Extension '{}' has source field but failed to parse it", + self.extension + ) + })?; + ExtensionLocation::Remote { + name: self.extension.clone(), + source, + } + } else { + // Local extension defined in main config + ExtensionLocation::Local { + name: self.extension.clone(), + config_path: self.config_path.clone(), + } + } + } else { + // Fall back to comprehensive lookup for external extensions + config + .find_extension_in_dependency_tree(&self.config_path, &self.extension, &target)? + .ok_or_else(|| { + anyhow::anyhow!( + "Extension '{}' not found in configuration.", + self.extension + ) + })? + } + }; // Get the config path where this extension is actually defined - let ext_config_path = match &extension_location { + let _ext_config_path = match &extension_location { ExtensionLocation::Local { config_path, .. } => config_path.clone(), ExtensionLocation::External { config_path, .. } => config_path.clone(), + ExtensionLocation::Remote { name, .. } => { + // Remote extensions are installed to $AVOCADO_PREFIX/includes// + let ext_install_path = + config.get_extension_install_path(&self.config_path, name, &target); + ext_install_path + .join("avocado.yaml") + .to_string_lossy() + .to_string() + } }; if self.verbose { @@ -151,16 +226,85 @@ impl ExtImageCommand { OutputLevel::Normal, ); } + ExtensionLocation::Remote { name, source } => { + print_info( + &format!("Found remote extension '{name}' with source: {source:?}"), + OutputLevel::Normal, + ); + } } } - // Get merged extension configuration with target-specific overrides - // Use the config path where the extension is actually defined for proper interpolation - let ext_config = config - .get_merged_ext_config(&self.extension, &target, &ext_config_path)? - .ok_or_else(|| { - anyhow::anyhow!("Extension '{}' not found in configuration.", self.extension) - })?; + // Get extension configuration from the composed/merged config + // For remote extensions, this comes from the merged remote extension config (already read via container) + // For local extensions, this uses get_merged_ext_config which reads from the file + let ext_config = match &extension_location { + ExtensionLocation::Remote { .. } => { + // Use the already-merged config from `parsed` which contains remote extension configs + // Then apply target-specific overrides manually + let ext_section = parsed + .get("extensions") + .and_then(|ext| ext.get(&self.extension)); + + if self.verbose { + if let Some(all_ext) = parsed.get("extensions") { + if let Some(ext_map) = all_ext.as_mapping() { + let ext_names: Vec<_> = + ext_map.keys().filter_map(|k| k.as_str()).collect(); + eprintln!( + "[DEBUG] Available extensions in composed config: {ext_names:?}" + ); + } + } + eprintln!( + "[DEBUG] Looking for extension '{}' in composed config, found: {}", + self.extension, + ext_section.is_some() + ); + if let Some(ext_val) = &ext_section { + eprintln!( + "[DEBUG] Extension '{}' config:\n{}", + self.extension, + serde_yaml::to_string(ext_val).unwrap_or_default() + ); + } + } + + if let Some(ext_val) = ext_section { + let base_ext = ext_val.clone(); + // Check for target-specific override within this extension + let target_override = ext_val.get(&target).cloned(); + if let Some(override_val) = target_override { + // Merge target override into base, filtering out other target sections + Some(config.merge_target_override(base_ext, override_val, &target)) + } else { + Some(base_ext) + } + } else { + None + } + } + ExtensionLocation::Local { config_path, .. } => { + // For local extensions, read from the file with proper target merging + config.get_merged_ext_config(&self.extension, &target, config_path)? + } + #[allow(deprecated)] + ExtensionLocation::External { config_path, .. } => { + // For deprecated external configs, read from the file + config.get_merged_ext_config(&self.extension, &target, config_path)? + } + } + .ok_or_else(|| { + anyhow::anyhow!("Extension '{}' not found in configuration.", self.extension) + })?; + + if self.verbose { + eprintln!( + "[DEBUG] Final ext_config for '{}':\n{}", + self.extension, + serde_yaml::to_string(&ext_config).unwrap_or_default() + ); + } // Get extension version let ext_version = ext_config @@ -190,7 +334,7 @@ impl ExtImageCommand { // Use resolved target (from CLI/env) if available, otherwise fall back to config let _config_target = parsed - .get("runtime") + .get("runtimes") .and_then(|runtime| runtime.as_mapping()) .and_then(|runtime_table| { if runtime_table.len() == 1 { @@ -202,7 +346,7 @@ impl ExtImageCommand { .and_then(|runtime_config| runtime_config.get("target")) .and_then(|target| target.as_str()) .map(|s| s.to_string()); - let target_arch = resolve_target_required(self.target.as_deref(), &config)?; + let target_arch = resolve_target_required(self.target.as_deref(), config)?; // Initialize SDK container helper let container_helper = SdkContainer::new(); @@ -244,7 +388,7 @@ impl ExtImageCommand { // Write extension image stamp (unless --no-stamps) if !self.no_stamps { - let inputs = compute_ext_input_hash(&parsed, &self.extension)?; + let inputs = compute_ext_input_hash(parsed, &self.extension)?; let outputs = StampOutputs::default(); let stamp = Stamp::ext_image(&self.extension, &target, inputs, outputs); let stamp_script = generate_write_stamp_script(&stamp)?; @@ -260,11 +404,12 @@ impl ExtImageCommand { repo_release: repo_release.clone(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); container_helper.run_in_container(run_config).await?; if self.verbose { diff --git a/src/commands/ext/install.rs b/src/commands/ext/install.rs index f225b11..f4feade 100644 --- a/src/commands/ext/install.rs +++ b/src/commands/ext/install.rs @@ -1,7 +1,11 @@ +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] + use anyhow::{Context, Result}; use std::path::{Path, PathBuf}; +use std::sync::Arc; -use crate::utils::config::{Config, ExtensionLocation}; +use crate::utils::config::{ComposedConfig, Config, ExtensionLocation}; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::lockfile::{build_package_spec_with_lock, LockFile, SysrootType}; use crate::utils::output::{print_debug, print_error, print_info, print_success, OutputLevel}; @@ -22,6 +26,9 @@ pub struct ExtInstallCommand { no_stamps: bool, runs_on: Option, nfs_port: Option, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl ExtInstallCommand { @@ -45,6 +52,8 @@ impl ExtInstallCommand { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, + composed_config: None, } } @@ -61,10 +70,28 @@ impl ExtInstallCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - // Load the composed configuration (merges external configs, applies interpolation) - let composed = Config::load_composed(&self.config_path, self.target.as_deref()) - .with_context(|| format!("Failed to load composed config from {}", self.config_path))?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; let config = &composed.config; let parsed = &composed.merged_value; @@ -106,6 +133,14 @@ impl ExtInstallCommand { OutputLevel::Normal, ); } + ExtensionLocation::Remote { name, source } => { + print_info( + &format!( + "Found remote extension '{name}' with source: {source:?}" + ), + OutputLevel::Normal, + ); + } } } vec![(extension_name.clone(), location)] @@ -120,7 +155,7 @@ impl ExtInstallCommand { } } else { // No extension specified - install all local extensions - match parsed.get("ext") { + match parsed.get("extensions") { Some(ext_section) => match ext_section.as_mapping() { Some(table) => table .keys() @@ -170,7 +205,7 @@ impl ExtInstallCommand { // Use resolved target (from CLI/env) if available, otherwise fall back to config let _config_target = parsed - .get("runtime") + .get("runtimes") .and_then(|runtime| runtime.as_mapping()) .and_then(|runtime_table| { if runtime_table.len() == 1 { @@ -271,26 +306,12 @@ impl ExtInstallCommand { ); } - // Get the config path where this extension is actually defined - let ext_config_path = match ext_location { - ExtensionLocation::Local { config_path, .. } => config_path.clone(), - ExtensionLocation::External { config_path, .. } => { - // Resolve relative path against main config directory - let main_config_dir = std::path::Path::new(&self.config_path) - .parent() - .unwrap_or(std::path::Path::new(".")); - main_config_dir - .join(config_path) - .to_string_lossy() - .to_string() - } - }; - if !self .install_single_extension( config, + parsed, ext_name, - &ext_config_path, + ext_location, container_helper, container_image, target, @@ -326,6 +347,7 @@ impl ExtInstallCommand { container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), // runs_on handled by shared context + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -354,8 +376,9 @@ impl ExtInstallCommand { async fn install_single_extension( &self, config: &Config, + parsed: &serde_yaml::Value, extension: &str, - ext_config_path: &str, + ext_location: &ExtensionLocation, container_helper: &SdkContainer, container_image: &str, target: &str, @@ -424,12 +447,26 @@ impl ExtInstallCommand { } } - // Get merged extension configuration from the correct config file - // This properly handles both local and external extensions - let ext_config = config.get_merged_ext_config(extension, target, ext_config_path)?; + // Get extension configuration from the composed/merged config + // For remote extensions, this comes from the merged remote extension config + // For local extensions, this comes from the main config's ext section + let ext_config = match ext_location { + ExtensionLocation::Remote { .. } | ExtensionLocation::Local { .. } => { + // Use the already-merged config from `parsed` which contains remote extension configs + parsed + .get("extensions") + .and_then(|ext| ext.get(extension)) + .cloned() + } + #[allow(deprecated)] + ExtensionLocation::External { config_path, .. } => { + // For deprecated external configs, read from the file + config.get_merged_ext_config(extension, target, config_path)? + } + }; // Install dependencies if they exist - let dependencies = ext_config.as_ref().and_then(|ec| ec.get("dependencies")); + let dependencies = ext_config.as_ref().and_then(|ec| ec.get("packages")); let sysroot = SysrootType::Extension(extension.to_string()); @@ -477,7 +514,8 @@ impl ExtInstallCommand { // Check for extension dependency // Format: { ext: "extension-name" } or { ext: "name", config: "path" } or { ext: "name", vsn: "version" } - if let Some(ext_name) = spec_map.get("ext").and_then(|v| v.as_str()) { + if let Some(ext_name) = spec_map.get("extensions").and_then(|v| v.as_str()) + { // Check if this is a versioned extension (has vsn field) if let Some(version) = spec_map.get("vsn").and_then(|v| v.as_str()) { extension_dependencies @@ -611,6 +649,7 @@ $DNF_SDK_HOST \ dnf_args: self.dnf_args.clone(), disable_weak_dependencies, // runs_on handled by shared context + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let install_success = @@ -636,6 +675,7 @@ $DNF_SDK_HOST \ repo_release.cloned(), merged_container_args.clone(), runs_on_context, + self.sdk_arch.as_ref(), ) .await?; diff --git a/src/commands/ext/list.rs b/src/commands/ext/list.rs index 7b1bee8..7a3027a 100644 --- a/src/commands/ext/list.rs +++ b/src/commands/ext/list.rs @@ -1,23 +1,39 @@ use anyhow::Result; +use std::sync::Arc; -use crate::utils::config::load_config; +use crate::utils::config::{ComposedConfig, Config}; use crate::utils::output::{print_success, OutputLevel}; pub struct ExtListCommand { config_path: String, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl ExtListCommand { pub fn new(config_path: String) -> Self { - Self { config_path } + Self { + config_path, + composed_config: None, + } + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self } pub fn execute(&self) -> Result<()> { - let _config = load_config(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new(Config::load_composed(&self.config_path, None)?), + }; + let parsed = &composed.merged_value; - let extensions = self.get_extensions(&parsed); + let extensions = self.get_extensions(parsed); self.display_extensions(&extensions); print_success( @@ -30,7 +46,7 @@ impl ExtListCommand { fn get_extensions(&self, parsed: &serde_yaml::Value) -> Vec { parsed - .get("ext") + .get("extensions") .and_then(|ext_section| ext_section.as_mapping()) .map(|table| { table diff --git a/src/commands/ext/mod.rs b/src/commands/ext/mod.rs index b8c5f0e..36297f5 100644 --- a/src/commands/ext/mod.rs +++ b/src/commands/ext/mod.rs @@ -3,6 +3,7 @@ pub mod checkout; pub mod clean; pub mod deps; pub mod dnf; +pub mod fetch; pub mod image; pub mod install; pub mod list; @@ -17,6 +18,7 @@ pub use clean::ExtCleanCommand; pub use deps::ExtDepsCommand; #[allow(unused_imports)] pub use dnf::ExtDnfCommand; +pub use fetch::ExtFetchCommand; pub use image::ExtImageCommand; pub use install::ExtInstallCommand; #[allow(unused_imports)] diff --git a/src/commands/ext/package.rs b/src/commands/ext/package.rs index f67fd81..421d905 100644 --- a/src/commands/ext/package.rs +++ b/src/commands/ext/package.rs @@ -1,15 +1,17 @@ +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] + use anyhow::{Context, Result}; +use std::sync::Arc; -use std::collections::HashMap; use std::fs; use std::path::PathBuf; -use crate::utils::config::{Config, ExtensionLocation}; -use crate::utils::container::{RunConfig, SdkContainer}; -use crate::utils::output::{print_info, print_success, OutputLevel}; -use crate::utils::stamps::{ - generate_batch_read_stamps_script, validate_stamps_batch, StampRequirement, -}; +use crate::utils::config::{ComposedConfig, Config, ExtensionLocation}; +use crate::utils::container::SdkContainer; +use crate::utils::output::{print_info, print_success, print_warning, OutputLevel}; +// Note: Stamp imports removed - we no longer validate build stamps for packaging +// since we now package src_dir instead of built sysroot use crate::utils::target::resolve_target_required; /// Command to package an extension sysroot into an RPM @@ -22,7 +24,13 @@ pub struct ExtPackageCommand { pub container_args: Option>, #[allow(dead_code)] pub dnf_args: Option>, + /// Note: no_stamps is kept for API compatibility but is not used for ext package + /// since we now package src_dir directly without requiring build stamps. + #[allow(dead_code)] pub no_stamps: bool, + pub sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl ExtPackageCommand { @@ -44,6 +52,8 @@ impl ExtPackageCommand { container_args, dnf_args, no_stamps: false, + sdk_arch: None, + composed_config: None, } } @@ -53,58 +63,48 @@ impl ExtPackageCommand { self } - pub async fn execute(&self) -> Result<()> { - // Load configuration - let config = Config::load(&self.config_path)?; - - // Resolve target early for stamp validation - let target = resolve_target_required(self.target.as_deref(), &config)?; - - // Validate stamps before proceeding (unless --no-stamps) - // Package requires extension to be installed AND built - if !self.no_stamps { - let container_image = config - .get_sdk_image() - .context("No SDK container image specified in configuration")?; - let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); - - let requirements = vec![ - StampRequirement::sdk_install(), - StampRequirement::ext_install(&self.extension), - StampRequirement::ext_build(&self.extension), - ]; - - let batch_script = generate_batch_read_stamps_script(&requirements); - let run_config = RunConfig { - container_image: container_image.to_string(), - target: target.clone(), - command: batch_script, - verbose: false, - source_environment: true, - interactive: false, - repo_url: config.get_sdk_repo_url(), - repo_release: config.get_sdk_repo_release(), - container_args: config.merge_sdk_container_args(self.container_args.as_ref()), - ..Default::default() - }; - - let output = container_helper - .run_in_container_with_output(run_config) - .await?; + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } - let validation = - validate_stamps_batch(&requirements, output.as_deref().unwrap_or(""), None); + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } - if !validation.is_satisfied() { - let error = validation - .into_error(&format!("Cannot package extension '{}'", self.extension)); - return Err(error.into()); - } - } + pub async fn execute(&self) -> Result<()> { + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; + let config = &composed.config; + let parsed = &composed.merged_value; + + // Resolve target + let target = resolve_target_required(self.target.as_deref(), config)?; + + // With the new src_dir packaging approach, we no longer require + // ext_install and ext_build stamps. We're packaging the source directory, + // not the built sysroot. The consumer will build the extension themselves. + // + // Issue a warning to remind users to test builds before packaging. + print_warning( + "Packaging extension source directory. It is recommended to run \ + 'avocado ext build' before packaging to verify the extension builds correctly.", + OutputLevel::Normal, + ); - // Read config content for extension SDK dependencies parsing - let content = std::fs::read_to_string(&self.config_path)?; + // Note: We no longer need to parse SDK dependencies since they're merged + // from the extension's config when it's installed // Find extension using comprehensive lookup let extension_location = config @@ -117,6 +117,15 @@ impl ExtPackageCommand { let ext_config_path = match &extension_location { ExtensionLocation::Local { config_path, .. } => config_path.clone(), ExtensionLocation::External { config_path, .. } => config_path.clone(), + ExtensionLocation::Remote { name, .. } => { + // Remote extensions are installed to $AVOCADO_PREFIX/includes// + let ext_install_path = + config.get_extension_install_path(&self.config_path, name, &target); + ext_install_path + .join("avocado.yaml") + .to_string_lossy() + .to_string() + } }; if self.verbose { @@ -133,20 +142,71 @@ impl ExtPackageCommand { OutputLevel::Normal, ); } + ExtensionLocation::Remote { name, source } => { + print_info( + &format!("Found remote extension '{name}' with source: {source:?}"), + OutputLevel::Normal, + ); + } } } - // Get merged extension configuration with target-specific overrides and interpolation - // Use the config path where the extension is actually defined for proper interpolation - let ext_config = config - .get_merged_ext_config(&self.extension, &target, &ext_config_path)? - .ok_or_else(|| { - anyhow::anyhow!("Extension '{}' not found in configuration.", self.extension) - })?; + // Get extension configuration from the composed/merged config + // For remote extensions, this comes from the merged remote extension config (already read via container) + // For local extensions, this uses get_merged_ext_config which reads from the file + let ext_config = match &extension_location { + ExtensionLocation::Remote { .. } => { + // Use the already-merged config from `parsed` which contains remote extension configs + // Then apply target-specific overrides manually + let ext_section = parsed + .get("extensions") + .and_then(|ext| ext.get(&self.extension)); + if let Some(ext_val) = ext_section { + let base_ext = ext_val.clone(); + // Check for target-specific override within this extension + let target_override = ext_val.get(&target).cloned(); + if let Some(override_val) = target_override { + // Merge target override into base, filtering out other target sections + Some(config.merge_target_override(base_ext, override_val, &target)) + } else { + Some(base_ext) + } + } else { + None + } + } + ExtensionLocation::Local { config_path, .. } => { + // For local extensions, read from the file with proper target merging + config.get_merged_ext_config(&self.extension, &target, config_path)? + } + #[allow(deprecated)] + ExtensionLocation::External { config_path, .. } => { + // For deprecated external configs, read from the file + config.get_merged_ext_config(&self.extension, &target, config_path)? + } + } + .ok_or_else(|| { + anyhow::anyhow!("Extension '{}' not found in configuration.", self.extension) + })?; + + // Also get the raw (unmerged) extension config to find all target-specific overlays + // For remote extensions, use the parsed config; for local, read from file + let raw_ext_config = match &extension_location { + ExtensionLocation::Remote { .. } => parsed + .get("extensions") + .and_then(|ext| ext.get(&self.extension)) + .cloned(), + _ => self.get_raw_extension_config(&ext_config_path)?, + }; // Extract RPM metadata with defaults let rpm_metadata = self.extract_rpm_metadata(&ext_config, &target)?; + // Determine which files to package + // Pass both merged config (for package_files), raw config (for all target overlays), + // and full parsed config (for sdk.compile scripts) + let package_files = self.get_package_files(&ext_config, raw_ext_config.as_ref(), parsed); + if self.verbose { print_info( &format!( @@ -155,11 +215,22 @@ impl ExtPackageCommand { ), OutputLevel::Normal, ); + print_info( + &format!("Package files: {package_files:?}"), + OutputLevel::Normal, + ); } // Create main RPM package in container + // This packages the extension's src_dir (directory containing avocado.yaml) let output_path = self - .create_rpm_package_in_container(&rpm_metadata, &config, &target) + .create_rpm_package_in_container( + &rpm_metadata, + config, + &target, + &ext_config_path, + &package_files, + ) .await?; print_success( @@ -170,45 +241,158 @@ impl ExtPackageCommand { OutputLevel::Normal, ); - // Check if extension has SDK dependencies and create SDK package if needed - let sdk_dependencies = self.get_extension_sdk_dependencies(&config, &content, &target)?; - if !sdk_dependencies.is_empty() { - if self.verbose { - print_info( - &format!( - "Extension '{}' has SDK dependencies, creating SDK package...", - self.extension - ), - OutputLevel::Normal, - ); + // Note: SDK dependencies are now merged from the extension's config when installed, + // so we no longer need to create a separate SDK package. + + Ok(()) + } + + /// Get the raw (unmerged) extension configuration from the config file. + /// + /// This is used to find all target-specific overlays that should be included + /// in the package (since the package is noarch and needs all target overlays). + fn get_raw_extension_config(&self, ext_config_path: &str) -> Result> { + let content = fs::read_to_string(ext_config_path) + .with_context(|| format!("Failed to read config file: {ext_config_path}"))?; + + let parsed: serde_yaml::Value = serde_yaml::from_str(&content) + .with_context(|| format!("Failed to parse config file: {ext_config_path}"))?; + + // Get the ext section + let ext_section = parsed.get("extensions"); + if ext_section.is_none() { + return Ok(None); + } + + // Get this specific extension's config + Ok(ext_section + .and_then(|ext| ext.get(&self.extension)) + .cloned()) + } + + /// Extract overlay directory from an overlay configuration value. + fn extract_overlay_dir(overlay_value: &serde_yaml::Value) -> Option { + if let Some(overlay_dir) = overlay_value.as_str() { + // Simple string format: overlay = "directory" + Some(overlay_dir.to_string()) + } else if let Some(overlay_table) = overlay_value.as_mapping() { + // Table format: overlay = { dir = "directory", ... } + overlay_table + .get("dir") + .and_then(|d| d.as_str()) + .map(|s| s.to_string()) + } else { + None + } + } + + /// Determine which files to package based on the extension configuration. + /// + /// If `package_files` is specified in the extension config, use those patterns. + /// Otherwise, default to: + /// - The avocado config file (avocado.yaml or avocado.yml) + /// - All overlay directories (base level and target-specific) + /// - Compile scripts from sdk.compile sections + /// - Install scripts from extension package dependencies + /// + /// # Arguments + /// * `ext_config` - The merged extension config (for package_files check) + /// * `raw_ext_config` - The raw unmerged extension config (to find all target-specific overlays) + /// * `full_parsed_config` - The full parsed config (to find sdk.compile scripts) + fn get_package_files( + &self, + ext_config: &serde_yaml::Value, + raw_ext_config: Option<&serde_yaml::Value>, + full_parsed_config: &serde_yaml::Value, + ) -> Vec { + // Check if package_files is explicitly defined + if let Some(package_files) = ext_config.get("package_files") { + if let Some(files_array) = package_files.as_sequence() { + let files: Vec = files_array + .iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect(); + if !files.is_empty() { + return files; + } } + } - let sdk_output_path = self - .create_sdk_rpm_package_in_container( - &rpm_metadata, - &config, - &sdk_dependencies, - &target, - ) - .await?; + // Default behavior: avocado.yaml + overlays + compile scripts + install scripts + let mut default_files = vec!["avocado.yaml".to_string()]; + let mut seen_files = std::collections::HashSet::new(); + + // If we have the raw extension config, scan for all overlays + if let Some(raw_config) = raw_ext_config { + if let Some(mapping) = raw_config.as_mapping() { + for (key, value) in mapping { + // Check if this is the base-level overlay + if key.as_str() == Some("overlay") { + if let Some(overlay_dir) = Self::extract_overlay_dir(value) { + if seen_files.insert(overlay_dir.clone()) { + default_files.push(overlay_dir); + } + } + } + // Check if this is a target-specific section with an overlay + else if let Some(target_config) = value.as_mapping() { + if let Some(overlay_value) = target_config.get("overlay") { + if let Some(overlay_dir) = Self::extract_overlay_dir(overlay_value) { + if seen_files.insert(overlay_dir.clone()) { + default_files.push(overlay_dir); + } + } + } + } + } + } + } else { + // Fallback: just check the merged config for overlay (current target only) + if let Some(overlay) = ext_config.get("overlay") { + if let Some(overlay_dir) = Self::extract_overlay_dir(overlay) { + if seen_files.insert(overlay_dir.clone()) { + default_files.push(overlay_dir); + } + } + } + } - print_success( - &format!( - "Successfully created SDK RPM package: {}", - sdk_output_path.display() - ), - OutputLevel::Normal, - ); + // Collect compile scripts from sdk.compile sections + if let Some(sdk_compile) = full_parsed_config + .get("sdk") + .and_then(|s| s.get("compile")) + .and_then(|c| c.as_mapping()) + { + for (_section_name, section_config) in sdk_compile { + if let Some(compile_script) = section_config.get("compile").and_then(|c| c.as_str()) + { + if seen_files.insert(compile_script.to_string()) { + default_files.push(compile_script.to_string()); + } + } + } } - Ok(()) + // Collect install scripts from extension package dependencies + // Format: extensions..packages..install = "script.sh" + if let Some(packages) = ext_config.get("packages").and_then(|p| p.as_mapping()) { + for (_dep_name, dep_spec) in packages { + if let Some(install_script) = dep_spec.get("install").and_then(|i| i.as_str()) { + if seen_files.insert(install_script.to_string()) { + default_files.push(install_script.to_string()); + } + } + } + } + + default_files } /// Extract RPM metadata from extension configuration with defaults fn extract_rpm_metadata( &self, ext_config: &serde_yaml::Value, - target: &str, + _target: &str, // Not used - extensions default to noarch ) -> Result { // Version is required let version = ext_config @@ -256,11 +440,13 @@ impl ExtPackageCommand { .unwrap_or("Unspecified") .to_string(); + // Default to noarch for extension source packages since they contain + // configs/code, not compiled binaries. Can be overridden in ext config. let arch = ext_config .get("arch") .and_then(|v| v.as_str()) .map(|s| s.to_string()) - .unwrap_or_else(|| self.generate_arch_from_target(target)); + .unwrap_or_else(|| "noarch".to_string()); let vendor = ext_config .get("vendor") @@ -312,11 +498,6 @@ impl ExtPackageCommand { format!("System extension package for {name}") } - /// Generate architecture from target by replacing dashes with underscores - fn generate_arch_from_target(&self, target: &str) -> String { - format!("avocado_{}", target.replace('-', "_")) - } - /// Validate semantic versioning format (X.Y.Z where X, Y, Z are non-negative integers) fn validate_semver(version: &str) -> Result<()> { let parts: Vec<&str> = version.split('.').collect(); @@ -348,12 +529,25 @@ impl ExtPackageCommand { Ok(()) } - /// Create the RPM package inside the container at $AVOCADO_PREFIX/output/extensions + /// Create the RPM package containing the extension's src_dir + /// + /// The package root (/) maps to the extension's src_dir contents. + /// This allows the extension to be installed to $AVOCADO_PREFIX/includes// + /// and its config merged into the main config. + /// + /// # Arguments + /// * `metadata` - RPM metadata for the package + /// * `config` - The avocado configuration + /// * `target` - The target architecture + /// * `ext_config_path` - Path to the extension's config file + /// * `package_files` - List of files/directories to package (supports glob patterns like * and **) async fn create_rpm_package_in_container( &self, metadata: &RpmMetadata, config: &Config, target: &str, + ext_config_path: &str, + package_files: &[String], ) -> Result { let container_image = config .get_sdk_image() @@ -367,55 +561,115 @@ impl ExtPackageCommand { crate::utils::volume::VolumeManager::new("docker".to_string(), self.verbose); let volume_state = volume_manager.get_or_create_volume(&cwd).await?; + // Determine the extension's src_dir (directory containing avocado.yaml) + let ext_src_dir = std::path::Path::new(ext_config_path) + .parent() + .unwrap_or(std::path::Path::new(".")) + .to_string_lossy() + .to_string(); + + // Convert to container path (relative paths become /opt/src/) + let container_src_dir = if ext_src_dir.starts_with('/') { + ext_src_dir.clone() + } else { + format!("/opt/src/{ext_src_dir}") + }; + // Create the RPM filename let rpm_filename = format!( "{}-{}-{}.{}.rpm", metadata.name, metadata.version, metadata.release, metadata.arch ); + // Convert package_files to a space-separated string for the shell script + let package_files_str = package_files.join(" "); + // Create RPM using rpmbuild in container + // Package root (/) maps to the extension's src_dir contents let rpm_build_script = format!( r#" +set -e + +# Extension source directory +EXT_SRC_DIR="{container_src_dir}" + +# Package files patterns (may contain globs like * and **) +PACKAGE_FILES="{package_files_str}" + # Ensure output directory exists mkdir -p $AVOCADO_PREFIX/output/extensions -# Check if extension sysroot exists -if [ ! -d "$AVOCADO_EXT_SYSROOTS/{}" ]; then - echo "Extension sysroot not found: $AVOCADO_EXT_SYSROOTS/{}" +# Check if extension source directory exists +if [ ! -d "$EXT_SRC_DIR" ]; then + echo "Extension source directory not found: $EXT_SRC_DIR" exit 1 fi -# Count files -FILE_COUNT=$(find "$AVOCADO_EXT_SYSROOTS/{}" -type f | wc -l) -echo "Creating RPM with $FILE_COUNT files..." - -if [ "$FILE_COUNT" -eq 0 ]; then - echo "No files found in sysroot" +# Check for avocado config file +if [ ! -f "$EXT_SRC_DIR/avocado.yaml" ] && [ ! -f "$EXT_SRC_DIR/avocado.yml" ]; then + echo "No avocado.yaml/yml found in $EXT_SRC_DIR" exit 1 fi # Create temporary directory for RPM build TMPDIR=$(mktemp -d) +STAGING_DIR="$TMPDIR/staging" +mkdir -p "$STAGING_DIR" cd "$TMPDIR" # Create directory structure for rpmbuild mkdir -p BUILD RPMS SOURCES SPECS SRPMS +# Enable globstar for ** pattern support +shopt -s globstar nullglob + +# Copy files matching patterns to staging directory +cd "$EXT_SRC_DIR" +FILE_COUNT=0 +for pattern in $PACKAGE_FILES; do + # Expand the glob pattern + for file in $pattern; do + if [ -e "$file" ]; then + # Create parent directory in staging and copy + parent_dir=$(dirname "$file") + if [ "$parent_dir" != "." ]; then + mkdir -p "$STAGING_DIR/$parent_dir" + fi + cp -rp "$file" "$STAGING_DIR/$file" + if [ -f "$file" ]; then + FILE_COUNT=$((FILE_COUNT + 1)) + elif [ -d "$file" ]; then + dir_files=$(find "$file" -type f | wc -l) + FILE_COUNT=$((FILE_COUNT + dir_files)) + fi + fi + done +done +cd "$TMPDIR" + +echo "Creating RPM with $FILE_COUNT files from source directory..." + +if [ "$FILE_COUNT" -eq 0 ]; then + echo "No files matched the package_files patterns: $PACKAGE_FILES" + exit 1 +fi + # Create spec file -cat > SPECS/package.spec << 'SPEC_EOF' +# Package root (/) maps to the extension's src_dir +cat > SPECS/package.spec << SPEC_EOF %define _buildhost reproducible AutoReqProv: no -Name: {} -Version: {} -Release: {} -Summary: {} -License: {} -Vendor: {} -Group: {}{} +Name: {name} +Version: {version} +Release: {release} +Summary: {summary} +License: {license} +Vendor: {vendor} +Group: {group}{url_line} %description -{} +{description} %files /* @@ -428,7 +682,9 @@ Group: {}{} %install mkdir -p %{{buildroot}} -cp -rp $AVOCADO_EXT_SYSROOTS/{}/* %{{buildroot}}/ +# Copy staged files to buildroot root +# This allows installation to \$AVOCADO_PREFIX/includes// +cp -rp "$STAGING_DIR"/* %{{buildroot}}/ %clean # Skip clean section - not needed for our use case @@ -436,45 +692,39 @@ cp -rp $AVOCADO_EXT_SYSROOTS/{}/* %{{buildroot}}/ %changelog SPEC_EOF -# Build the RPM with custom architecture target and define the arch macro -rpmbuild --define "_topdir $TMPDIR" --define "_arch {}" --target {} -bb SPECS/package.spec +# Build the RPM with custom architecture target +rpmbuild --define "_topdir $TMPDIR" --define "_arch {arch}" --target {arch} -bb SPECS/package.spec # Move RPM to output directory -mv RPMS/{}/*.rpm $AVOCADO_PREFIX/output/extensions/{} || {{ - mv RPMS/*/*.rpm $AVOCADO_PREFIX/output/extensions/{} 2>/dev/null || {{ +mv RPMS/{arch}/*.rpm $AVOCADO_PREFIX/output/extensions/{rpm_filename} || {{ + mv RPMS/*/*.rpm $AVOCADO_PREFIX/output/extensions/{rpm_filename} 2>/dev/null || {{ echo "Failed to find built RPM" exit 1 }} }} -echo "RPM created successfully: $AVOCADO_PREFIX/output/extensions/{}" +echo "RPM created successfully: $AVOCADO_PREFIX/output/extensions/{rpm_filename}" # Cleanup rm -rf "$TMPDIR" "#, - self.extension, - self.extension, - self.extension, - metadata.name, - metadata.version, - metadata.release, - metadata.summary, - metadata.license, - metadata.vendor, - metadata.group, - if let Some(url) = &metadata.url { + name = metadata.name, + version = metadata.version, + release = metadata.release, + summary = metadata.summary, + license = metadata.license, + vendor = metadata.vendor, + group = metadata.group, + url_line = if let Some(url) = &metadata.url { format!("\nURL: {url}") } else { String::new() }, - metadata.description, - self.extension, - metadata.arch, - metadata.arch, - metadata.arch, - rpm_filename, - rpm_filename, - rpm_filename, + description = metadata.description, + arch = metadata.arch, + rpm_filename = rpm_filename, + container_src_dir = container_src_dir, + package_files_str = package_files_str, ); // Run the RPM build in the container @@ -490,6 +740,7 @@ rm -rf "$TMPDIR" repo_release: config.get_sdk_repo_release(), container_args: merged_container_args, dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -634,232 +885,6 @@ rm -rf "$TMPDIR" let container_id = String::from_utf8_lossy(&output.stdout).trim().to_string(); Ok(container_id) } - - /// Get SDK dependencies for the current extension - fn get_extension_sdk_dependencies( - &self, - config: &Config, - config_content: &str, - target: &str, - ) -> Result> { - let extension_sdk_deps = config - .get_extension_sdk_dependencies_with_config_path_and_target( - config_content, - Some(&self.config_path), - Some(target), - )?; - - // Return the SDK dependencies for this specific extension, or empty if none - Ok(extension_sdk_deps - .get(&self.extension) - .cloned() - .unwrap_or_default()) - } - - /// Create the SDK RPM package inside the container at $AVOCADO_PREFIX/output/extensions - async fn create_sdk_rpm_package_in_container( - &self, - metadata: &RpmMetadata, - config: &Config, - sdk_dependencies: &HashMap, - target: &str, - ) -> Result { - let container_image = config - .get_sdk_image() - .ok_or_else(|| anyhow::anyhow!("No SDK container image specified in configuration."))?; - - let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); - - // Get the volume state - let cwd = std::env::current_dir().context("Failed to get current directory")?; - let volume_manager = - crate::utils::volume::VolumeManager::new("docker".to_string(), self.verbose); - let volume_state = volume_manager.get_or_create_volume(&cwd).await?; - - // Create SDK RPM metadata with nativesdk- prefix and all_avocadosdk architecture - let sdk_metadata = RpmMetadata { - name: format!("nativesdk-{}", metadata.name), - version: metadata.version.clone(), - release: metadata.release.clone(), - summary: format!("{} SDK dependencies", metadata.summary), - description: format!("SDK dependencies for {}", metadata.description), - license: metadata.license.clone(), - arch: "all_avocadosdk".to_string(), - vendor: metadata.vendor.clone(), - group: metadata.group.clone(), - url: metadata.url.clone(), - }; - - // Create the RPM filename - let rpm_filename = format!( - "{}-{}-{}.{}.rpm", - sdk_metadata.name, sdk_metadata.version, sdk_metadata.release, sdk_metadata.arch - ); - - // Build dependency list for RPM spec - let mut requires_list = Vec::new(); - for (dep_name, dep_value) in sdk_dependencies { - let version_spec = match dep_value { - serde_yaml::Value::String(version) if version == "*" => String::new(), - serde_yaml::Value::String(version) => format!(" = {version}"), - _ => String::new(), - }; - requires_list.push(format!("{dep_name}{version_spec}")); - } - let requires_section = if requires_list.is_empty() { - String::new() - } else { - format!("Requires: {}", requires_list.join(", ")) - }; - - // Create SDK RPM using rpmbuild in container - let rpm_build_script = format!( - r#" -# Ensure output directory exists -mkdir -p $AVOCADO_PREFIX/output/extensions - -# Create temporary directory for RPM build -TMPDIR=$(mktemp -d) -cd "$TMPDIR" - -# Create directory structure for rpmbuild -mkdir -p BUILD RPMS SOURCES SPECS SRPMS - -# Create spec file for SDK package (no files, only dependencies) -cat > SPECS/sdk-package.spec << 'SPEC_EOF' -%define _buildhost reproducible - -Name: {} -Version: {} -Release: {} -Summary: {} -License: {} -Vendor: {} -Group: {}{} -{} - -%description -{} - -%files -# No files - this is a dependency-only package - -%prep -# No prep needed - -%build -# No build needed - -%install -# No install needed - dependency-only package - -%clean -# Skip clean section - not needed for our use case - -%changelog -SPEC_EOF - -# Build the RPM with custom architecture target and define the arch macro -rpmbuild --define "_topdir $TMPDIR" --define "_arch {}" --target {} -bb SPECS/sdk-package.spec - -# Move RPM to output directory -mv RPMS/{}/*.rpm $AVOCADO_PREFIX/output/extensions/{} || {{ - mv RPMS/*/*.rpm $AVOCADO_PREFIX/output/extensions/{} 2>/dev/null || {{ - echo "Failed to find built SDK RPM" - exit 1 - }} -}} - -echo "SDK RPM created successfully: $AVOCADO_PREFIX/output/extensions/{}" - -# Cleanup -rm -rf "$TMPDIR" -"#, - sdk_metadata.name, - sdk_metadata.version, - sdk_metadata.release, - sdk_metadata.summary, - sdk_metadata.license, - sdk_metadata.vendor, - sdk_metadata.group, - if let Some(url) = &sdk_metadata.url { - format!("\nURL: {url}") - } else { - String::new() - }, - requires_section, - sdk_metadata.description, - sdk_metadata.arch, - sdk_metadata.arch, - sdk_metadata.arch, - rpm_filename, - rpm_filename, - rpm_filename, - ); - - // Run the RPM build in the container - let container_helper = SdkContainer::new(); - let run_config = crate::utils::container::RunConfig { - container_image: container_image.to_string(), - target: target.to_string(), - command: rpm_build_script, - verbose: self.verbose, - source_environment: true, - interactive: false, - repo_url: config.get_sdk_repo_url(), - repo_release: config.get_sdk_repo_release(), - container_args: merged_container_args, - dnf_args: self.dnf_args.clone(), - ..Default::default() - }; - - if self.verbose { - print_info( - "Creating SDK RPM package in container...", - OutputLevel::Normal, - ); - } - - let success = container_helper.run_in_container(run_config).await?; - if !success { - return Err(anyhow::anyhow!( - "Failed to create SDK RPM package in container" - )); - } - - // RPM is now created in the container at $AVOCADO_PREFIX/output/extensions/{rpm_filename} - let container_rpm_path = format!("/opt/_avocado/{target}/output/extensions/{rpm_filename}"); - - // If --out is specified, copy the RPM to the host - if let Some(output_dir) = &self.output_dir { - self.copy_rpm_to_host( - &volume_state.volume_name, - &container_rpm_path, - output_dir, - &rpm_filename, - container_image, - ) - .await?; - - // Return the host path (canonicalized for clean display) - let host_output_path = if output_dir.starts_with('/') { - // Absolute path - PathBuf::from(output_dir).join(&rpm_filename) - } else { - // Relative path from current directory - std::env::current_dir()? - .join(output_dir) - .join(&rpm_filename) - }; - - // Canonicalize the path to resolve . and .. components for clean display - let canonical_path = host_output_path.canonicalize().unwrap_or(host_output_path); - Ok(canonical_path) - } else { - // Return the container path for informational purposes - Ok(PathBuf::from(container_rpm_path)) - } - } } /// RPM metadata structure @@ -933,36 +958,6 @@ mod tests { ); } - #[test] - fn test_generate_arch_from_target() { - let cmd = ExtPackageCommand::new( - "test.yaml".to_string(), - "test-ext".to_string(), - Some("x86_64-unknown-linux-gnu".to_string()), - None, - false, - None, - None, - ); - - assert_eq!( - cmd.generate_arch_from_target("x86_64-unknown-linux-gnu"), - "avocado_x86_64_unknown_linux_gnu" - ); - assert_eq!( - cmd.generate_arch_from_target("aarch64-unknown-linux-gnu"), - "avocado_aarch64_unknown_linux_gnu" - ); - assert_eq!( - cmd.generate_arch_from_target("riscv64-unknown-linux-gnu"), - "avocado_riscv64_unknown_linux_gnu" - ); - assert_eq!( - cmd.generate_arch_from_target("i686-unknown-linux-gnu"), - "avocado_i686_unknown_linux_gnu" - ); - } - #[test] fn test_extract_rpm_metadata_minimal() { let cmd = ExtPackageCommand::new( @@ -994,7 +989,7 @@ mod tests { "System extension package for test-extension" ); assert_eq!(metadata.license, "Unspecified"); - assert_eq!(metadata.arch, "avocado_x86_64_unknown_linux_gnu"); + assert_eq!(metadata.arch, "noarch"); // Extension source packages default to noarch assert_eq!(metadata.vendor, "Unspecified"); assert_eq!(metadata.group, "system-extension"); assert_eq!(metadata.url, None); @@ -1088,7 +1083,7 @@ mod tests { } #[test] - fn test_arch_generation_with_different_targets() { + fn test_arch_defaults_to_noarch_for_all_targets() { let cmd = ExtPackageCommand::new( "test.yaml".to_string(), "test-ext".to_string(), @@ -1105,148 +1100,123 @@ mod tests { serde_yaml::Value::String("1.0.0".to_string()), ); - // Test various target architectures - let test_cases = vec![ - ( - "x86_64-unknown-linux-gnu", - "avocado_x86_64_unknown_linux_gnu", - ), - ( - "aarch64-unknown-linux-gnu", - "avocado_aarch64_unknown_linux_gnu", - ), - ( - "riscv64-unknown-linux-gnu", - "avocado_riscv64_unknown_linux_gnu", - ), - ("i686-unknown-linux-gnu", "avocado_i686_unknown_linux_gnu"), - ( - "armv7-unknown-linux-gnueabihf", - "avocado_armv7_unknown_linux_gnueabihf", - ), + // Extension source packages should default to noarch regardless of target + // since they contain configs/code, not compiled binaries + let targets = vec![ + "x86_64-unknown-linux-gnu", + "aarch64-unknown-linux-gnu", + "riscv64-unknown-linux-gnu", + "i686-unknown-linux-gnu", + "armv7-unknown-linux-gnueabihf", + "raspberrypi4", ]; - for (target, expected_arch) in test_cases { + for target in targets { let metadata = cmd.extract_rpm_metadata(&ext_config, target).unwrap(); - assert_eq!(metadata.arch, expected_arch, "Failed for target: {target}"); + assert_eq!( + metadata.arch, "noarch", + "Extension should default to noarch for target: {target}" + ); } } - #[test] - fn test_get_extension_sdk_dependencies_empty() { - use crate::utils::config::Config; + // ======================================================================== + // Note: Stamp Dependency Tests Removed + // ======================================================================== + // The stamp validation tests have been removed because ext package now + // packages the extension's src_dir directly instead of the built sysroot. + // This means we no longer require ext_install and ext_build stamps before + // packaging - the consumer will build the extension themselves. + // + // The old behavior required: + // - SDK install stamp + // - Extension install stamp + // - Extension build stamp + // + // The new behavior only requires the extension's avocado.yaml to exist + // in its src_dir. + #[test] + fn test_package_with_no_stamps_flag() { let cmd = ExtPackageCommand::new( "test.yaml".to_string(), "test-ext".to_string(), - Some("x86_64-unknown-linux-gnu".to_string()), + None, None, false, None, None, ); - // Create a minimal config without SDK dependencies - let config_content = r#" -ext: - test-ext: - version: "1.0.0" -"#; - - let config = serde_yaml::from_str::(config_content).unwrap(); - let sdk_deps = cmd - .get_extension_sdk_dependencies(&config, config_content, "x86_64-unknown-linux-gnu") - .unwrap(); + // Default should have stamps enabled (though not used for src_dir packaging) + assert!(!cmd.no_stamps); - assert!(sdk_deps.is_empty()); + // Test with_no_stamps builder + let cmd = cmd.with_no_stamps(true); + assert!(cmd.no_stamps); } #[test] - fn test_get_extension_sdk_dependencies_with_deps() { - use crate::utils::config::Config; - + fn test_get_package_files_default_no_overlay() { let cmd = ExtPackageCommand::new( "test.yaml".to_string(), "test-ext".to_string(), - Some("x86_64-unknown-linux-gnu".to_string()), + None, None, false, None, None, ); - // Create a config with SDK dependencies - let config_content = r#" -ext: - test-ext: - version: "1.0.0" - sdk: - dependencies: - nativesdk-avocado-hitl: "*" - nativesdk-openssh-ssh: "*" - nativesdk-rsync: "1.2.3" -"#; - - let config = serde_yaml::from_str::(config_content).unwrap(); - let sdk_deps = cmd - .get_extension_sdk_dependencies(&config, config_content, "x86_64-unknown-linux-gnu") - .unwrap(); - - assert_eq!(sdk_deps.len(), 3); - assert!(sdk_deps.contains_key("nativesdk-avocado-hitl")); - assert!(sdk_deps.contains_key("nativesdk-openssh-ssh")); - assert!(sdk_deps.contains_key("nativesdk-rsync")); - - // Check version values - assert_eq!( - sdk_deps["nativesdk-avocado-hitl"], - serde_yaml::Value::String("*".to_string()) - ); - assert_eq!( - sdk_deps["nativesdk-openssh-ssh"], - serde_yaml::Value::String("*".to_string()) - ); - assert_eq!( - sdk_deps["nativesdk-rsync"], - serde_yaml::Value::String("1.2.3".to_string()) + // Config without package_files or overlay - should default to just avocado.yaml + let mut ext_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + ext_config.as_mapping_mut().unwrap().insert( + serde_yaml::Value::String("version".to_string()), + serde_yaml::Value::String("1.0.0".to_string()), ); - } - // ======================================================================== - // Stamp Dependency Tests - // ======================================================================== + // Pass empty full config since we're not testing compile script extraction + let empty_full_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let files = cmd.get_package_files(&ext_config, None, &empty_full_config); + assert_eq!(files, vec!["avocado.yaml".to_string()]); + } #[test] - fn test_package_stamp_requirements() { - use crate::utils::stamps::get_local_arch; - - // ext package requires: SDK install + ext install + ext build - // Verify the stamp requirements are correct - let requirements = [ - StampRequirement::sdk_install(), - StampRequirement::ext_install("my-ext"), - StampRequirement::ext_build("my-ext"), - ]; + fn test_get_package_files_default_with_overlay_string() { + let cmd = ExtPackageCommand::new( + "test.yaml".to_string(), + "test-ext".to_string(), + None, + None, + false, + None, + None, + ); - // Verify correct stamp paths (SDK path includes local architecture) - assert_eq!( - requirements[0].relative_path(), - format!("sdk/{}/install.stamp", get_local_arch()) + // Config with overlay as string - should include avocado.yaml and overlay dir + let mut ext_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let config_map = ext_config.as_mapping_mut().unwrap(); + config_map.insert( + serde_yaml::Value::String("version".to_string()), + serde_yaml::Value::String("1.0.0".to_string()), + ); + config_map.insert( + serde_yaml::Value::String("overlay".to_string()), + serde_yaml::Value::String("my-overlay".to_string()), ); - assert_eq!(requirements[1].relative_path(), "ext/my-ext/install.stamp"); - assert_eq!(requirements[2].relative_path(), "ext/my-ext/build.stamp"); - // Verify fix commands are correct - assert_eq!(requirements[0].fix_command(), "avocado sdk install"); + // Use the same config as raw config to test overlay extraction + // Pass empty full config since we're not testing compile script extraction + let empty_full_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let files = cmd.get_package_files(&ext_config, Some(&ext_config), &empty_full_config); assert_eq!( - requirements[1].fix_command(), - "avocado ext install -e my-ext" + files, + vec!["avocado.yaml".to_string(), "my-overlay".to_string()] ); - assert_eq!(requirements[2].fix_command(), "avocado ext build -e my-ext"); } #[test] - fn test_package_with_no_stamps_flag() { + fn test_get_package_files_default_with_overlay_table() { let cmd = ExtPackageCommand::new( "test.yaml".to_string(), "test-ext".to_string(), @@ -1257,192 +1227,185 @@ ext: None, ); - // Default should have stamps enabled - assert!(!cmd.no_stamps); - - // Test with_no_stamps builder - let cmd = cmd.with_no_stamps(true); - assert!(cmd.no_stamps); - } - - #[test] - fn test_package_fails_without_sdk_install() { - use crate::utils::stamps::{get_local_arch, validate_stamps_batch}; - - let requirements = vec![ - StampRequirement::sdk_install(), - StampRequirement::ext_install("my-ext"), - StampRequirement::ext_build("my-ext"), - ]; + // Config with overlay as table { dir = "..." } - should include avocado.yaml and overlay dir + let mut ext_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let config_map = ext_config.as_mapping_mut().unwrap(); + config_map.insert( + serde_yaml::Value::String("version".to_string()), + serde_yaml::Value::String("1.0.0".to_string()), + ); - // All stamps missing - let output = format!( - "sdk/{}/install.stamp:::null\next/my-ext/install.stamp:::null\next/my-ext/build.stamp:::null", - get_local_arch() + let mut overlay_table = serde_yaml::Mapping::new(); + overlay_table.insert( + serde_yaml::Value::String("dir".to_string()), + serde_yaml::Value::String("overlays/prod".to_string()), + ); + overlay_table.insert( + serde_yaml::Value::String("mode".to_string()), + serde_yaml::Value::String("opaque".to_string()), + ); + config_map.insert( + serde_yaml::Value::String("overlay".to_string()), + serde_yaml::Value::Mapping(overlay_table), ); - let result = validate_stamps_batch(&requirements, &output, None); - assert!(!result.is_satisfied()); - assert_eq!(result.missing.len(), 3); + // Use the same config as raw config to test overlay extraction + // Pass empty full config since we're not testing compile script extraction + let empty_full_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let files = cmd.get_package_files(&ext_config, Some(&ext_config), &empty_full_config); + assert_eq!( + files, + vec!["avocado.yaml".to_string(), "overlays/prod".to_string()] + ); } #[test] - fn test_package_fails_without_ext_build() { - use crate::utils::stamps::{ - get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, - }; - - let requirements = vec![ - StampRequirement::sdk_install(), - StampRequirement::ext_install("my-ext"), - StampRequirement::ext_build("my-ext"), - ]; - - // SDK and ext install present, but build missing - let sdk_stamp = Stamp::sdk_install( - get_local_arch(), - StampInputs::new("hash1".to_string()), - StampOutputs::default(), - ); - let ext_install_stamp = Stamp::ext_install( - "my-ext", - "qemux86-64", - StampInputs::new("hash2".to_string()), - StampOutputs::default(), + fn test_get_package_files_explicit_list() { + let cmd = ExtPackageCommand::new( + "test.yaml".to_string(), + "test-ext".to_string(), + None, + None, + false, + None, + None, ); - let sdk_json = serde_json::to_string(&sdk_stamp).unwrap(); - let ext_json = serde_json::to_string(&ext_install_stamp).unwrap(); + // Config with explicit package_files list + let mut ext_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let config_map = ext_config.as_mapping_mut().unwrap(); + config_map.insert( + serde_yaml::Value::String("version".to_string()), + serde_yaml::Value::String("1.0.0".to_string()), + ); - let output = format!( - "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::{}\next/my-ext/build.stamp:::null", - get_local_arch(), - sdk_json, - ext_json + let package_files = vec![ + serde_yaml::Value::String("avocado.yaml".to_string()), + serde_yaml::Value::String("config/**".to_string()), + serde_yaml::Value::String("scripts/*.sh".to_string()), + serde_yaml::Value::String("README.md".to_string()), + ]; + config_map.insert( + serde_yaml::Value::String("package_files".to_string()), + serde_yaml::Value::Sequence(package_files), ); - let result = validate_stamps_batch(&requirements, &output, None); + // Also add overlay - should be ignored when package_files is set + config_map.insert( + serde_yaml::Value::String("overlay".to_string()), + serde_yaml::Value::String("my-overlay".to_string()), + ); - assert!(!result.is_satisfied()); - assert_eq!(result.missing.len(), 1); - assert_eq!(result.missing[0].relative_path(), "ext/my-ext/build.stamp"); + // Pass empty full config since we're not testing compile script extraction + let empty_full_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let files = cmd.get_package_files(&ext_config, Some(&ext_config), &empty_full_config); + assert_eq!( + files, + vec![ + "avocado.yaml".to_string(), + "config/**".to_string(), + "scripts/*.sh".to_string(), + "README.md".to_string(), + ] + ); } #[test] - fn test_package_succeeds_with_all_stamps() { - use crate::utils::stamps::{ - get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, - }; - - let requirements = vec![ - StampRequirement::sdk_install(), - StampRequirement::ext_install("my-ext"), - StampRequirement::ext_build("my-ext"), - ]; - - // All stamps present - let sdk_stamp = Stamp::sdk_install( - get_local_arch(), - StampInputs::new("hash1".to_string()), - StampOutputs::default(), - ); - let ext_install_stamp = Stamp::ext_install( - "my-ext", - "qemux86-64", - StampInputs::new("hash2".to_string()), - StampOutputs::default(), - ); - let ext_build_stamp = Stamp::ext_build( - "my-ext", - "qemux86-64", - StampInputs::new("hash3".to_string()), - StampOutputs::default(), + fn test_get_package_files_empty_list_uses_default() { + let cmd = ExtPackageCommand::new( + "test.yaml".to_string(), + "test-ext".to_string(), + None, + None, + false, + None, + None, ); - let sdk_json = serde_json::to_string(&sdk_stamp).unwrap(); - let ext_install_json = serde_json::to_string(&ext_install_stamp).unwrap(); - let ext_build_json = serde_json::to_string(&ext_build_stamp).unwrap(); - - let output = format!( - "sdk/{}/install.stamp:::{}\next/my-ext/install.stamp:::{}\next/my-ext/build.stamp:::{}", - get_local_arch(), - sdk_json, - ext_install_json, - ext_build_json + // Config with empty package_files list - should fall back to default + let mut ext_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let config_map = ext_config.as_mapping_mut().unwrap(); + config_map.insert( + serde_yaml::Value::String("version".to_string()), + serde_yaml::Value::String("1.0.0".to_string()), + ); + config_map.insert( + serde_yaml::Value::String("package_files".to_string()), + serde_yaml::Value::Sequence(vec![]), ); - let result = validate_stamps_batch(&requirements, &output, None); - - assert!(result.is_satisfied()); - assert_eq!(result.satisfied.len(), 3); + // Pass empty full config since we're not testing compile script extraction + let empty_full_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let files = cmd.get_package_files(&ext_config, None, &empty_full_config); + assert_eq!(files, vec!["avocado.yaml".to_string()]); } #[test] - fn test_package_clean_lifecycle() { - use crate::utils::stamps::{ - get_local_arch, validate_stamps_batch, Stamp, StampInputs, StampOutputs, - }; - - let requirements = vec![ - StampRequirement::sdk_install(), - StampRequirement::ext_install("gpu-driver"), - StampRequirement::ext_build("gpu-driver"), - ]; - - // Before clean: all stamps present - let sdk_stamp = Stamp::sdk_install( - get_local_arch(), - StampInputs::new("hash1".to_string()), - StampOutputs::default(), - ); - let ext_install = Stamp::ext_install( - "gpu-driver", - "qemux86-64", - StampInputs::new("hash2".to_string()), - StampOutputs::default(), - ); - let ext_build = Stamp::ext_build( - "gpu-driver", - "qemux86-64", - StampInputs::new("hash3".to_string()), - StampOutputs::default(), + fn test_get_package_files_with_target_specific_overlays() { + let cmd = ExtPackageCommand::new( + "test.yaml".to_string(), + "test-ext".to_string(), + None, + None, + false, + None, + None, ); - let sdk_json = serde_json::to_string(&sdk_stamp).unwrap(); - let install_json = serde_json::to_string(&ext_install).unwrap(); - let build_json = serde_json::to_string(&ext_build).unwrap(); + // Create a raw config that simulates target-specific overlays + // like: ext.test-ext.reterminal.overlay and ext.test-ext.reterminal-dm.overlay + let mut raw_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let config_map = raw_config.as_mapping_mut().unwrap(); - let output_before = format!( - "sdk/{}/install.stamp:::{}\next/gpu-driver/install.stamp:::{}\next/gpu-driver/build.stamp:::{}", - get_local_arch(), - sdk_json, - install_json, - build_json + config_map.insert( + serde_yaml::Value::String("version".to_string()), + serde_yaml::Value::String("1.0.0".to_string()), ); - let result_before = validate_stamps_batch(&requirements, &output_before, None); - assert!( - result_before.is_satisfied(), - "Should be satisfied before clean" + // Target: reterminal with overlay + let mut reterminal_config = serde_yaml::Mapping::new(); + reterminal_config.insert( + serde_yaml::Value::String("overlay".to_string()), + serde_yaml::Value::String("overlays/reterminal".to_string()), + ); + config_map.insert( + serde_yaml::Value::String("reterminal".to_string()), + serde_yaml::Value::Mapping(reterminal_config), ); - // After ext clean: SDK still there, ext stamps gone (simulating rm -rf .stamps/ext/gpu-driver) - let output_after = format!( - "sdk/{}/install.stamp:::{}\next/gpu-driver/install.stamp:::null\next/gpu-driver/build.stamp:::null", - get_local_arch(), - sdk_json + // Target: reterminal-dm with overlay + let mut reterminal_dm_config = serde_yaml::Mapping::new(); + reterminal_dm_config.insert( + serde_yaml::Value::String("overlay".to_string()), + serde_yaml::Value::String("overlays/reterminal-dm".to_string()), + ); + config_map.insert( + serde_yaml::Value::String("reterminal-dm".to_string()), + serde_yaml::Value::Mapping(reterminal_dm_config), ); - let result_after = validate_stamps_batch(&requirements, &output_after, None); - assert!(!result_after.is_satisfied(), "Should fail after clean"); - assert_eq!( - result_after.missing.len(), - 2, - "Both ext stamps should be missing" + // Target: icam-540 without overlay (should not add anything) + let mut icam_config = serde_yaml::Mapping::new(); + icam_config.insert( + serde_yaml::Value::String("some_other_setting".to_string()), + serde_yaml::Value::String("value".to_string()), ); - assert!( - result_after.satisfied.len() == 1, - "Only SDK should be satisfied" + config_map.insert( + serde_yaml::Value::String("icam-540".to_string()), + serde_yaml::Value::Mapping(icam_config), ); + + // Merged config (for a specific target, but package_files not set) + let merged_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + + // Pass empty full config since we're not testing compile script extraction + let empty_full_config = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let files = cmd.get_package_files(&merged_config, Some(&raw_config), &empty_full_config); + + // Should include avocado.yaml and both target-specific overlays + assert!(files.contains(&"avocado.yaml".to_string())); + assert!(files.contains(&"overlays/reterminal".to_string())); + assert!(files.contains(&"overlays/reterminal-dm".to_string())); + assert_eq!(files.len(), 3); } } diff --git a/src/commands/fetch.rs b/src/commands/fetch.rs index 3255534..0c61881 100644 --- a/src/commands/fetch.rs +++ b/src/commands/fetch.rs @@ -1,10 +1,14 @@ +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] + use anyhow::{Context, Result}; use std::collections::HashSet; +use std::sync::Arc; use tokio::process::Command as AsyncCommand; use crate::commands::install::ExtensionDependency; use crate::utils::{ - config::Config, + config::{ComposedConfig, Config}, container::{RunConfig, SdkContainer}, output::{print_error, print_info, print_success, OutputLevel}, target::resolve_target_required, @@ -29,6 +33,9 @@ pub struct FetchCommand { target: Option, container_args: Option>, dnf_args: Option>, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl FetchCommand { @@ -49,17 +56,38 @@ impl FetchCommand { target, container_args, dnf_args, + sdk_arch: None, + composed_config: None, } } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - // Load configuration - let config = Config::load(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let config_toml: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()) + .with_context(|| format!("Failed to load config from {}", self.config_path))?, + ), + }; + let config = &composed.config; + let config_toml = &composed.merged_value; // Resolve target architecture - let target_arch = resolve_target_required(self.target.as_deref(), &config)?; + let target_arch = resolve_target_required(self.target.as_deref(), config)?; // Get container configuration from interpolated config let container_image = config @@ -99,7 +127,7 @@ impl FetchCommand { repo_release: repo_release.as_ref(), container_args: &merged_container_args, }; - self.fetch_extension_metadata(&config_toml, extension, &container_config) + self.fetch_extension_metadata(config_toml, extension, &container_config) .await?; } (None, Some(runtime)) => { @@ -112,7 +140,7 @@ impl FetchCommand { repo_release: repo_release.as_ref(), container_args: &merged_container_args, }; - self.fetch_runtime_metadata(&config_toml, runtime, &container_config) + self.fetch_runtime_metadata(config_toml, runtime, &container_config) .await?; } (None, None) => { @@ -125,7 +153,7 @@ impl FetchCommand { repo_release: repo_release.as_ref(), container_args: &merged_container_args, }; - self.fetch_all_metadata(&config_toml, &container_config) + self.fetch_all_metadata(config_toml, &container_config) .await?; } (Some(_), Some(_)) => { @@ -152,7 +180,7 @@ impl FetchCommand { // Check if extension exists in configuration if config_toml - .get("ext") + .get("extensions") .and_then(|ext| ext.get(extension)) .is_none() { @@ -174,6 +202,7 @@ impl FetchCommand { repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let sysroot_exists = container_config.helper.run_in_container(run_config).await?; @@ -224,6 +253,7 @@ $DNF_SDK_HOST \ repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_config.helper.run_in_container(run_config).await?; @@ -254,7 +284,7 @@ $DNF_SDK_HOST \ // Check if runtime exists in configuration if config_toml - .get("runtime") + .get("runtimes") .and_then(|rt| rt.get(runtime)) .is_none() { @@ -277,6 +307,7 @@ $DNF_SDK_HOST \ repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let sysroot_exists = container_config.helper.run_in_container(run_config).await?; @@ -326,6 +357,7 @@ $DNF_SDK_HOST \ repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_config.helper.run_in_container(run_config).await?; @@ -363,7 +395,10 @@ $DNF_SDK_HOST \ self.fetch_sdk_target_metadata(container_config).await?; // 4. Fetch all extension metadata (including nested external extensions) - if let Some(extensions) = config_toml.get("ext").and_then(|ext| ext.as_mapping()) { + if let Some(extensions) = config_toml + .get("extensions") + .and_then(|ext| ext.as_mapping()) + { for extension_name_val in extensions.keys() { if let Some(extension_name) = extension_name_val.as_str() { if let Err(e) = self @@ -404,7 +439,7 @@ $DNF_SDK_HOST \ } // 5. Fetch all runtime metadata - if let Some(runtimes) = config_toml.get("runtime").and_then(|rt| rt.as_mapping()) { + if let Some(runtimes) = config_toml.get("runtimes").and_then(|rt| rt.as_mapping()) { for runtime_name_val in runtimes.keys() { if let Some(runtime_name) = runtime_name_val.as_str() { if let Err(e) = self @@ -461,6 +496,7 @@ $DNF_SDK_HOST $DNF_SDK_HOST_OPTS $DNF_SDK_HOST_REPO_CONF \ repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_config.helper.run_in_container(run_config).await?; @@ -492,6 +528,7 @@ $DNF_SDK_HOST $DNF_SDK_HOST_OPTS $DNF_SDK_HOST_REPO_CONF \ repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let rootfs_exists = container_config.helper.run_in_container(run_config).await?; @@ -540,6 +577,7 @@ $DNF_SDK_HOST \ repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_config.helper.run_in_container(run_config).await?; @@ -571,6 +609,7 @@ $DNF_SDK_HOST \ repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let target_sysroot_exists = container_config.helper.run_in_container(run_config).await?; @@ -619,6 +658,7 @@ $DNF_SDK_HOST \ repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_config.helper.run_in_container(run_config).await?; @@ -686,11 +726,14 @@ $DNF_SDK_HOST \ let mut visited = HashSet::new(); // Find external extensions from main config - if let Some(extensions) = config_toml.get("ext").and_then(|ext| ext.as_mapping()) { + if let Some(extensions) = config_toml + .get("extensions") + .and_then(|ext| ext.as_mapping()) + { for (ext_name_val, ext_config) in extensions { if let Some(ext_name) = ext_name_val.as_str() { if let Some(dependencies) = - ext_config.get("dependencies").and_then(|d| d.as_mapping()) + ext_config.get("packages").and_then(|d| d.as_mapping()) { for (_dep_name, dep_spec) in dependencies { // Check for external extension dependency @@ -780,12 +823,12 @@ $DNF_SDK_HOST \ // Check if this external extension has dependencies if let Some(dependencies) = extension_config - .get("dependencies") + .get("packages") .and_then(|d| d.as_mapping()) { for (_dep_name, dep_spec) in dependencies { // Check for nested extension dependency - if let Some(nested_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { + if let Some(nested_ext_name) = dep_spec.get("extensions").and_then(|v| v.as_str()) { // Check if this is a nested external extension (has config field) if let Some(nested_external_config) = dep_spec.get("config").and_then(|v| v.as_str()) @@ -856,6 +899,7 @@ $DNF_SDK_HOST \ repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let sysroot_exists = container_config.helper.run_in_container(run_config).await?; @@ -910,6 +954,7 @@ $DNF_SDK_HOST \ repo_release: container_config.repo_release.cloned(), container_args: container_config.container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_config.helper.run_in_container(run_config).await?; diff --git a/src/commands/hitl/server.rs b/src/commands/hitl/server.rs index ddae204..22b2ce0 100644 --- a/src/commands/hitl/server.rs +++ b/src/commands/hitl/server.rs @@ -1,4 +1,4 @@ -use crate::utils::config::Config; +use crate::utils::config::{ComposedConfig, Config}; use crate::utils::container::{is_docker_desktop, RunConfig, SdkContainer}; use crate::utils::nfs_server::{NfsExport, HITL_DEFAULT_PORT}; use crate::utils::output::{print_debug, print_info, OutputLevel}; @@ -6,13 +6,14 @@ use crate::utils::stamps::{ generate_batch_read_stamps_script, validate_stamps_batch, StampRequirement, }; use crate::utils::target::validate_and_log_target; -use anyhow::Result; +use anyhow::{Context, Result}; use clap::Args; use std::path::PathBuf; +use std::sync::Arc; #[derive(Args, Debug)] pub struct HitlServerCommand { - /// Path to the avocado.toml configuration file + /// Path to the avocado.yaml configuration file #[arg(short, long, default_value = "avocado.yaml")] pub config_path: String, @@ -42,15 +43,38 @@ pub struct HitlServerCommand { /// Disable stamp validation #[arg(long)] pub no_stamps: bool, + + /// SDK container architecture for cross-arch emulation + #[arg(skip)] + pub sdk_arch: Option, + + /// Pre-composed configuration to avoid reloading + #[arg(skip)] + pub composed_config: Option>, } impl HitlServerCommand { + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - let config = Config::load(&self.config_path)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()) + .with_context(|| format!("Failed to load config from {}", self.config_path))?, + ), + }; + let config = &composed.config; let container_helper = SdkContainer::new().verbose(self.verbose); // Use shared target resolution logic with early validation and logging - let target = validate_and_log_target(self.target.as_deref(), &config)?; + let target = validate_and_log_target(self.target.as_deref(), config)?; // Get SDK configuration let (container_image, repo_url, repo_release) = if let Some(sdk_config) = &config.sdk { @@ -96,6 +120,7 @@ impl HitlServerCommand { interactive: false, repo_url: repo_url.cloned(), repo_release: repo_release.cloned(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -198,6 +223,7 @@ impl HitlServerCommand { repo_release: repo_release.cloned(), container_args: Some(container_args), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -307,6 +333,8 @@ mod tests { verbose: false, port: None, no_stamps: false, + sdk_arch: None, + composed_config: None, }; let commands = cmd.generate_export_setup_commands(); @@ -329,6 +357,8 @@ mod tests { verbose: false, port: Some(2049), no_stamps: false, + sdk_arch: None, + composed_config: None, }; let commands = cmd.generate_export_setup_commands(); @@ -349,6 +379,8 @@ mod tests { verbose: true, port: Some(3049), no_stamps: false, + sdk_arch: None, + composed_config: None, }; let commands = cmd.generate_export_setup_commands(); @@ -370,6 +402,8 @@ mod tests { verbose: false, port: Some(4049), no_stamps: false, + sdk_arch: None, + composed_config: None, }; let commands = cmd.generate_export_setup_commands(); @@ -418,6 +452,8 @@ mod tests { verbose: false, port: None, no_stamps: true, + sdk_arch: None, + composed_config: None, }; // With no_stamps, validation should be skipped @@ -436,6 +472,8 @@ mod tests { verbose: false, port: None, no_stamps: false, + sdk_arch: None, + composed_config: None, }; // With no extensions, the stamp validation loop is skipped entirely diff --git a/src/commands/init.rs b/src/commands/init.rs index 7b9cbaf..b0c5a0d 100644 --- a/src/commands/init.rs +++ b/src/commands/init.rs @@ -278,16 +278,16 @@ impl InitCommand { /// Checks if a target is supported in the given TOML content. /// /// # Arguments - /// * `toml_content` - The content of the avocado.yaml file + /// * `yaml_content` - The content of the avocado.yaml file /// * `target` - The target to check for /// /// # Returns /// * `Ok(true)` if the target is supported or if supported_targets contains "*" /// * `Ok(false)` if the target is not supported - /// * `Err` if the TOML cannot be parsed or doesn't have supported_targets - fn is_target_supported(toml_content: &str, target: &str) -> Result { - let config: toml::Value = - toml::from_str(toml_content).with_context(|| "Failed to parse avocado.yaml")?; + /// * `Err` if the YAML cannot be parsed or doesn't have supported_targets + fn is_target_supported(yaml_content: &str, target: &str) -> Result { + let config: serde_yaml::Value = + serde_yaml::from_str(yaml_content).with_context(|| "Failed to parse avocado.yaml")?; let supported_targets_value = config.get("supported_targets").ok_or_else(|| { anyhow::anyhow!("Reference avocado.yaml missing 'supported_targets' field") @@ -299,7 +299,7 @@ impl InitCommand { } // Handle supported_targets as an array - if let Some(array) = supported_targets_value.as_array() { + if let Some(array) = supported_targets_value.as_sequence() { // Check if "*" is in supported_targets (means all targets supported) let has_wildcard = array.iter().any(|v| v.as_str() == Some("*")); @@ -319,38 +319,38 @@ impl InitCommand { /// Updates the default_target in the avocado.yaml file. /// /// # Arguments - /// * `toml_path` - Path to the avocado.yaml file + /// * `yaml_path` - Path to the avocado.yaml file /// * `new_target` - The new target to set as default /// /// # Returns /// * `Ok(())` if successful /// * `Err` if the file cannot be read, parsed, or written - fn update_default_target(toml_path: &Path, new_target: &str) -> Result<()> { - let content = fs::read_to_string(toml_path) - .with_context(|| format!("Failed to read '{}'", toml_path.display()))?; + fn update_default_target(yaml_path: &Path, new_target: &str) -> Result<()> { + let content = fs::read_to_string(yaml_path) + .with_context(|| format!("Failed to read '{}'", yaml_path.display()))?; - // Parse as toml::Value to preserve structure - let mut config: toml::Value = - toml::from_str(&content).with_context(|| "Failed to parse avocado.yaml")?; + // Parse as serde_yaml::Value to preserve structure + let mut config: serde_yaml::Value = + serde_yaml::from_str(&content).with_context(|| "Failed to parse avocado.yaml")?; // Update the default_target field - if let Some(table) = config.as_table_mut() { - table.insert( - "default_target".to_string(), - toml::Value::String(new_target.to_string()), + if let Some(mapping) = config.as_mapping_mut() { + mapping.insert( + serde_yaml::Value::String("default_target".to_string()), + serde_yaml::Value::String(new_target.to_string()), ); } else { - anyhow::bail!("avocado.yaml is not a valid TOML table"); + anyhow::bail!("avocado.yaml is not a valid YAML mapping"); } // Write back to file - let updated_content = toml::to_string_pretty(&config) - .with_context(|| "Failed to serialize updated config")?; + let updated_content = + serde_yaml::to_string(&config).with_context(|| "Failed to serialize updated config")?; - fs::write(toml_path, updated_content).with_context(|| { + fs::write(yaml_path, updated_content).with_context(|| { format!( "Failed to write updated config to '{}'", - toml_path.display() + yaml_path.display() ) })?; @@ -913,19 +913,18 @@ mod tests { assert!(content.contains("distro:")); assert!(content.contains("channel: apollo-edge")); assert!(content.contains("version: 0.1.0")); - assert!(content.contains("runtime:")); + assert!(content.contains("runtimes:")); assert!(content.contains("dev:")); - assert!(content.contains("dependencies:")); - assert!(content.contains("avocado-img-bootfiles: \"*\"")); - assert!(content.contains("avocado-img-rootfs: \"*\"")); - assert!(content.contains("avocado-img-initramfs: \"*\"")); + assert!(content.contains("packages:")); + assert!(content.contains("avocado-img-bootfiles:")); + assert!(content.contains("avocado-img-rootfs:")); + assert!(content.contains("avocado-img-initramfs:")); assert!(content.contains("avocado-ext-dev:")); - assert!(content.contains("ext: avocado-ext-dev")); - assert!(content.contains("vsn: \"*\"")); + assert!(content.contains("type: package")); assert!( content.contains("image: \"docker.io/avocadolinux/sdk:{{ config.distro.channel }}\"") ); - assert!(content.contains("ext:")); + assert!(content.contains("extensions:")); assert!(content.contains("app:")); assert!(content.contains("- sysext")); assert!(content.contains("- confext")); diff --git a/src/commands/install.rs b/src/commands/install.rs index 776a63b..8d6d4d6 100644 --- a/src/commands/install.rs +++ b/src/commands/install.rs @@ -1,7 +1,11 @@ //! Install command implementation that runs SDK, extension, and runtime installs. +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] + use anyhow::{Context, Result}; use std::path::PathBuf; +use std::sync::Arc; use crate::commands::{ ext::ExtInstallCommand, runtime::RuntimeInstallCommand, sdk::SdkInstallCommand, @@ -15,14 +19,17 @@ use crate::utils::{ target::validate_and_log_target, }; -/// Represents an extension dependency that can be either local or external +/// Represents an extension dependency #[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[allow(dead_code)] // Deprecated variants kept for backward compatibility pub enum ExtensionDependency { - /// Extension defined in the main config file + /// Extension defined in the config (local or fetched remote) Local(String), - /// Extension defined in an external config file + /// DEPRECATED: Extension from an external config file + /// Use source: path in the extensions section instead External { name: String, config_path: String }, - /// Extension resolved via DNF with a version specification + /// DEPRECATED: Extension resolved via DNF with a version specification + /// Use source: package in the extensions section instead Versioned { name: String, version: String }, } @@ -48,6 +55,10 @@ pub struct InstallCommand { pub runs_on: Option, /// NFS port for remote execution pub nfs_port: Option, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl InstallCommand { @@ -72,6 +83,8 @@ impl InstallCommand { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, + composed_config: None, } } @@ -88,19 +101,35 @@ impl InstallCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + /// Execute the install command pub async fn execute(&self) -> Result<()> { - // Early target validation - load basic config first to validate target - let basic_config = Config::load(&self.config_path) - .with_context(|| format!("Failed to load config from {}", self.config_path))?; - let _target = validate_and_log_target(self.target.as_deref(), &basic_config)?; - - // Load the composed configuration (merges external configs, applies interpolation) - let composed = Config::load_composed(&self.config_path, self.target.as_deref()) - .with_context(|| format!("Failed to load composed config from {}", self.config_path))?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; let config = &composed.config; - let parsed = &composed.merged_value; + // parsed from initial load is not used after sdk install reloads config + let _parsed = &composed.merged_value; + let _target = validate_and_log_target(self.target.as_deref(), config)?; print_info( "Starting comprehensive install process...", @@ -108,7 +137,7 @@ impl InstallCommand { ); // Load lock file for reproducible builds (used for versioned extensions in this command) - let src_dir = config + let _src_dir = config .get_resolved_src_dir(&self.config_path) .unwrap_or_else(|| { PathBuf::from(&self.config_path) @@ -117,8 +146,8 @@ impl InstallCommand { .to_path_buf() }); - // We'll load the lock file lazily when needed (for external/versioned extensions) - let mut lock_file; + // Note: Lock file loading for external/versioned extensions has been removed + // as those deprecated code paths now error out with migration messages // 1. Install SDK dependencies print_info("Step 1/3: Installing SDK dependencies", OutputLevel::Normal); @@ -131,12 +160,29 @@ impl InstallCommand { self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(&composed)); sdk_install_cmd .execute() .await .with_context(|| "Failed to install SDK dependencies")?; + // Reload composed config after SDK install to pick up newly fetched remote extensions + // SDK install includes ext fetch which downloads remote extensions to $AVOCADO_PREFIX/includes/ + let composed = Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || { + format!( + "Failed to reload composed config from {} after SDK install", + self.config_path + ) + }, + )?, + ); + let config = &composed.config; + let parsed = &composed.merged_value; + // 2. Install extension dependencies print_info( "Step 2/3: Installing extension dependencies", @@ -167,7 +213,9 @@ impl InstallCommand { self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(&composed)); ext_install_cmd.execute().await.with_context(|| { format!( "Failed to install extension dependencies for '{extension_name}'" @@ -178,38 +226,31 @@ impl InstallCommand { name, config_path: ext_config_path, } => { - if self.verbose { - print_info( - &format!("Installing external extension dependencies for '{name}' from config '{ext_config_path}'"), - OutputLevel::Normal, - ); - } - - // Reload lock file from disk to get latest state from previous installs - lock_file = LockFile::load(&src_dir)?; - - // Install external extension to ${AVOCADO_PREFIX}/extensions/ - self.install_external_extension(config, &self.config_path, name, ext_config_path, &_target, &mut lock_file).await.with_context(|| { - format!("Failed to install external extension '{name}' from config '{ext_config_path}'") - })?; + // DEPRECATED: config: syntax is no longer supported + // Users should migrate to the new source-based approach in the ext section + return Err(anyhow::anyhow!( + "Deprecated 'config:' syntax found for extension '{name}' with config '{ext_config_path}'.\n\n\ + The 'config:' syntax for external extensions is no longer supported.\n\n\ + To use extensions from another path, define them in the 'ext' section with a 'source' field:\n\n\ + ext:\n {name}:\n source:\n type: path\n path: \"{ext_config_path}\"\n\n\ + Then reference the extension in runtime dependencies simply by name:\n\n\ + runtime:\n your-runtime:\n dependencies:\n {name}: ext\n\n\ + Path-based extensions are automatically processed during config loading." + )); } ExtensionDependency::Versioned { name, version } => { - if self.verbose { - print_info( - &format!( - "Installing versioned extension '{name}' version '{version}'" - ), - OutputLevel::Normal, - ); - } - - // Reload lock file from disk to get latest state from previous installs - lock_file = LockFile::load(&src_dir)?; - - // Install versioned extension to its own sysroot - self.install_versioned_extension(config, name, version, &_target, &mut lock_file).await.with_context(|| { - format!("Failed to install versioned extension '{name}' version '{version}'") - })?; + // DEPRECATED: vsn: syntax is no longer supported + // Users should migrate to the new source-based approach in the ext section + return Err(anyhow::anyhow!( + "Deprecated 'vsn:' syntax found for extension '{name}' version '{version}'.\n\n\ + The 'vsn:' syntax for versioned extensions is no longer supported.\n\n\ + To use remote extensions, define them in the 'extensions' section with a 'source' field:\n\n\ + extensions:\n {name}:\n source:\n type: package\n version: \"{version}\"\n\n\ + Then reference the extension in runtimes packages simply by name:\n\n\ + runtimes:\n your-runtime:\n packages:\n {name}: ext\n\n\ + Remote extensions are automatically fetched during 'avocado sdk install' or\n\ + can be manually fetched with 'avocado ext fetch'." + )); } } } @@ -259,7 +300,9 @@ impl InstallCommand { self.dnf_args.clone(), ) .with_no_stamps(self.no_stamps) - .with_runs_on(self.runs_on.clone(), self.nfs_port); + .with_runs_on(self.runs_on.clone(), self.nfs_port) + .with_sdk_arch(self.sdk_arch.clone()) + .with_composed_config(Arc::clone(&composed)); runtime_install_cmd.execute().await.with_context(|| { format!("Failed to install runtime dependencies for '{runtime_name}'") })?; @@ -282,7 +325,6 @@ impl InstallCommand { use std::collections::HashSet; let mut required_extensions = HashSet::new(); - let mut visited = HashSet::new(); // For cycle detection let config = &composed.config; let parsed = &composed.merged_value; @@ -299,7 +341,7 @@ impl InstallCommand { ); } // If no runtimes match this target, install all local extensions - if let Some(ext_section) = parsed.get("ext").and_then(|e| e.as_mapping()) { + if let Some(ext_section) = parsed.get("extensions").and_then(|e| e.as_mapping()) { for ext_name_val in ext_section.keys() { if let Some(ext_name) = ext_name_val.as_str() { required_extensions @@ -309,56 +351,22 @@ impl InstallCommand { } } else { // Only install extensions needed by the target-relevant runtimes - if let Some(runtime_section) = parsed.get("runtime").and_then(|r| r.as_mapping()) { + if let Some(runtime_section) = parsed.get("runtimes").and_then(|r| r.as_mapping()) { for runtime_name in &target_runtimes { if let Some(_runtime_config) = runtime_section.get(runtime_name) { // Check both base dependencies and target-specific dependencies let merged_runtime = config.get_merged_runtime_config(runtime_name, target, config_path)?; if let Some(merged_value) = merged_runtime { - if let Some(dependencies) = merged_value - .get("dependencies") - .and_then(|d| d.as_mapping()) + // NEW FORMAT: Extensions are listed directly under runtimes..extensions + if let Some(extensions_list) = + merged_value.get("extensions").and_then(|e| e.as_sequence()) { - for (_dep_name, dep_spec) in dependencies { - // Check for extension dependency - if let Some(ext_name) = - dep_spec.get("ext").and_then(|v| v.as_str()) - { - // Check if this is a versioned extension (has vsn field) - if let Some(version) = - dep_spec.get("vsn").and_then(|v| v.as_str()) - { - let ext_dep = ExtensionDependency::Versioned { - name: ext_name.to_string(), - version: version.to_string(), - }; - required_extensions.insert(ext_dep); - } - // Check if this is an external extension (has config field) - else if let Some(external_config) = - dep_spec.get("config").and_then(|v| v.as_str()) - { - let ext_dep = ExtensionDependency::External { - name: ext_name.to_string(), - config_path: external_config.to_string(), - }; - required_extensions.insert(ext_dep.clone()); - - // Recursively find nested external extension dependencies - self.find_nested_external_extensions( - config, - config_path, - &ext_dep, - &mut required_extensions, - &mut visited, - )?; - } else { - // Local extension - required_extensions.insert(ExtensionDependency::Local( - ext_name.to_string(), - )); - } + for ext_val in extensions_list { + if let Some(ext_name) = ext_val.as_str() { + required_extensions.insert(ExtensionDependency::Local( + ext_name.to_string(), + )); } } } @@ -385,124 +393,6 @@ impl InstallCommand { Ok(extensions) } - /// Recursively find nested external extension dependencies - fn find_nested_external_extensions( - &self, - config: &Config, - base_config_path: &str, - ext_dep: &ExtensionDependency, - required_extensions: &mut std::collections::HashSet, - visited: &mut std::collections::HashSet, - ) -> Result<()> { - let (ext_name, ext_config_path) = match ext_dep { - ExtensionDependency::External { name, config_path } => (name, config_path), - ExtensionDependency::Local(_) => return Ok(()), // Local extensions don't have nested external deps - ExtensionDependency::Versioned { .. } => return Ok(()), // Versioned extensions don't have nested deps - }; - - // Cycle detection: check if we've already processed this extension - let ext_key = format!("{ext_name}:{ext_config_path}"); - if visited.contains(&ext_key) { - if self.verbose { - print_info( - &format!("Skipping already processed extension '{ext_name}' to avoid cycles"), - OutputLevel::Normal, - ); - } - return Ok(()); - } - visited.insert(ext_key); - - // Load the external extension configuration - let resolved_external_config_path = - config.resolve_path_relative_to_src_dir(base_config_path, ext_config_path); - let external_extensions = - config.load_external_extensions(base_config_path, ext_config_path)?; - - let extension_config = external_extensions.get(ext_name).ok_or_else(|| { - anyhow::anyhow!( - "Extension '{ext_name}' not found in external config file '{ext_config_path}'" - ) - })?; - - // Load the nested config file to get its src_dir setting - let nested_config_content = std::fs::read_to_string(&resolved_external_config_path) - .with_context(|| { - format!( - "Failed to read nested config file: {}", - resolved_external_config_path.display() - ) - })?; - let nested_config: serde_yaml::Value = serde_yaml::from_str(&nested_config_content) - .with_context(|| { - format!( - "Failed to parse nested config file: {}", - resolved_external_config_path.display() - ) - })?; - - // Create a temporary Config object for the nested config to handle its src_dir - let nested_config_obj = serde_yaml::from_value::(nested_config.clone())?; - - // Check if this external extension has dependencies - if let Some(dependencies) = extension_config - .get("dependencies") - .and_then(|d| d.as_mapping()) - { - for (_dep_name, dep_spec) in dependencies { - // Check for nested extension dependency - if let Some(nested_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { - // Check if this is a nested external extension (has config field) - if let Some(nested_external_config) = - dep_spec.get("config").and_then(|v| v.as_str()) - { - // Resolve the nested config path relative to the nested config's src_dir - let nested_config_path = nested_config_obj - .resolve_path_relative_to_src_dir( - &resolved_external_config_path, - nested_external_config, - ); - - let nested_ext_dep = ExtensionDependency::External { - name: nested_ext_name.to_string(), - config_path: nested_config_path.to_string_lossy().to_string(), - }; - - // Add the nested extension to required extensions - required_extensions.insert(nested_ext_dep.clone()); - - if self.verbose { - print_info( - &format!("Found nested external extension '{nested_ext_name}' required by '{ext_name}' at '{}'", nested_config_path.display()), - OutputLevel::Normal, - ); - } - - // Recursively process the nested extension - self.find_nested_external_extensions( - config, - base_config_path, - &nested_ext_dep, - required_extensions, - visited, - )?; - } else { - // This is a local extension dependency within the external config - // We don't need to process it further as it will be handled during installation - if self.verbose { - print_info( - &format!("Found local extension dependency '{nested_ext_name}' in external extension '{ext_name}'"), - OutputLevel::Normal, - ); - } - } - } - } - } - - Ok(()) - } - /// Find runtimes that are relevant for the specified target fn find_target_relevant_runtimes( &self, @@ -512,7 +402,7 @@ impl InstallCommand { ) -> Result> { let mut relevant_runtimes = Vec::new(); - if let Some(runtime_section) = parsed.get("runtime").and_then(|r| r.as_mapping()) { + if let Some(runtime_section) = parsed.get("runtimes").and_then(|r| r.as_mapping()) { for runtime_name_val in runtime_section.keys() { if let Some(runtime_name) = runtime_name_val.as_str() { // If a specific runtime is requested, only check that one @@ -564,6 +454,7 @@ impl InstallCommand { } /// Install an external extension to ${AVOCADO_PREFIX}/extensions/ + #[allow(dead_code)] async fn install_external_extension( &self, config: &Config, @@ -610,6 +501,7 @@ impl InstallCommand { container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let sysroot_exists = container_helper.run_in_container(run_config).await?; @@ -631,6 +523,7 @@ impl InstallCommand { container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_helper.run_in_container(run_config).await?; @@ -678,7 +571,7 @@ impl InstallCommand { // Process the extension's dependencies (packages, not extension or compile dependencies) let sysroot = SysrootType::Extension(extension_name.to_string()); - if let Some(serde_yaml::Value::Mapping(deps_map)) = extension_config.get("dependencies") { + if let Some(serde_yaml::Value::Mapping(deps_map)) = extension_config.get("packages") { if !deps_map.is_empty() { let mut packages = Vec::new(); let mut package_names = Vec::new(); @@ -694,7 +587,7 @@ impl InstallCommand { // Skip non-package dependencies (extension or compile dependencies) if let serde_yaml::Value::Mapping(spec_map) = version_spec { // Skip extension dependencies (they have "ext" field) - handled by recursive logic - if spec_map.get("ext").is_some() { + if spec_map.get("extensions").is_some() { continue; } // Skip compile dependencies (they have "compile" field) - SDK-compiled, not from repo @@ -786,6 +679,7 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -809,6 +703,7 @@ $DNF_SDK_HOST \ repo_release, merged_container_args, None, // TODO: Add runs_on_context support to install.rs + self.sdk_arch.as_ref(), ) .await?; @@ -889,6 +784,7 @@ $DNF_SDK_HOST \ repo_release: stamp_repo_release, container_args: stamp_container_args, dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -906,6 +802,7 @@ $DNF_SDK_HOST \ } /// Install a versioned extension using DNF to its own sysroot + #[allow(dead_code)] async fn install_versioned_extension( &self, config: &Config, @@ -940,6 +837,7 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let sysroot_exists = container_helper.run_in_container(run_config).await?; @@ -960,6 +858,7 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_helper.run_in_container(run_config).await?; @@ -1031,6 +930,7 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -1053,6 +953,7 @@ $DNF_SDK_HOST \ repo_release, merged_container_args, None, // TODO: Add runs_on_context support to install.rs + self.sdk_arch.as_ref(), ) .await?; @@ -1091,6 +992,7 @@ $DNF_SDK_HOST \ } /// Install SDK dependencies from an external extension's config + #[allow(dead_code)] async fn install_external_extension_sdk_deps( &self, config: &Config, @@ -1132,7 +1034,7 @@ $DNF_SDK_HOST \ // Check if the external config has SDK dependencies let sdk_deps = external_config .get("sdk") - .and_then(|sdk| sdk.get("dependencies")) + .and_then(|sdk| sdk.get("packages")) .and_then(|deps| deps.as_mapping()); let Some(sdk_deps_map) = sdk_deps else { @@ -1252,6 +1154,7 @@ $DNF_SDK_HOST \ container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -1275,6 +1178,7 @@ $DNF_SDK_HOST \ repo_release, merged_container_args, None, // TODO: Add runs_on_context support to install.rs + self.sdk_arch.as_ref(), ) .await?; diff --git a/src/commands/provision.rs b/src/commands/provision.rs index d61ddcc..e864b72 100644 --- a/src/commands/provision.rs +++ b/src/commands/provision.rs @@ -1,9 +1,11 @@ //! Provision command implementation that acts as a shortcut to runtime provision. -use anyhow::Result; +use anyhow::{Context, Result}; use std::collections::HashMap; +use std::sync::Arc; use crate::commands::runtime::RuntimeProvisionCommand; +use crate::utils::config::{ComposedConfig, Config}; /// Configuration for provision command pub struct ProvisionConfig { @@ -33,23 +35,46 @@ pub struct ProvisionConfig { pub runs_on: Option, /// NFS port for remote execution pub nfs_port: Option, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, } /// Implementation of the 'provision' command that calls through to runtime provision. pub struct ProvisionCommand { config: ProvisionConfig, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl ProvisionCommand { /// Create a new ProvisionCommand instance pub fn new(config: ProvisionConfig) -> Self { - Self { config } + Self { + config, + composed_config: None, + } + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self } /// Execute the provision command by calling runtime provision pub async fn execute(&self) -> Result<()> { - // Load config to access provision profiles - let config = crate::utils::config::Config::load(&self.config.config_path)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config.config_path, self.config.target.as_deref()) + .with_context(|| { + format!("Failed to load config from {}", self.config.config_path) + })?, + ), + }; + let config = &composed.config; // Get state file path from provision profile if available let state_file = self @@ -76,8 +101,10 @@ impl ProvisionCommand { no_stamps: self.config.no_stamps, runs_on: self.config.runs_on.clone(), nfs_port: self.config.nfs_port, + sdk_arch: self.config.sdk_arch.clone(), }, - ); + ) + .with_composed_config(composed); runtime_provision_cmd.execute().await } @@ -106,6 +133,7 @@ mod tests { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, }; let cmd = ProvisionCommand::new(config); @@ -139,6 +167,7 @@ mod tests { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, }; let cmd = ProvisionCommand::new(config); @@ -175,6 +204,7 @@ mod tests { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, }; let cmd = ProvisionCommand::new(config); @@ -198,6 +228,7 @@ mod tests { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, }; let cmd = ProvisionCommand::new(config); diff --git a/src/commands/runtime/build.rs b/src/commands/runtime/build.rs index bc7b400..14cb534 100644 --- a/src/commands/runtime/build.rs +++ b/src/commands/runtime/build.rs @@ -1,5 +1,5 @@ use crate::utils::{ - config::load_config, + config::{ComposedConfig, Config}, container::{RunConfig, SdkContainer}, output::{print_error, print_info, print_success, OutputLevel}, runs_on::RunsOnContext, @@ -11,6 +11,7 @@ use crate::utils::{ }; use anyhow::{Context, Result}; use std::collections::{HashMap, HashSet}; +use std::sync::Arc; pub struct RuntimeBuildCommand { runtime_name: String, @@ -22,6 +23,9 @@ pub struct RuntimeBuildCommand { no_stamps: bool, runs_on: Option, nfs_port: Option, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl RuntimeBuildCommand { @@ -43,6 +47,8 @@ impl RuntimeBuildCommand { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, + composed_config: None, } } @@ -59,11 +65,30 @@ impl RuntimeBuildCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - // Load configuration and parse raw TOML - let config = load_config(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; + let config = &composed.config; + let parsed = &composed.merged_value; // Merge container args from config and CLI with environment variable expansion let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); @@ -79,7 +104,7 @@ impl RuntimeBuildCommand { // Get runtime configuration let runtime_config = parsed - .get("runtime") + .get("runtimes") .context("No runtime configuration found")?; // Check if runtime exists @@ -94,11 +119,11 @@ impl RuntimeBuildCommand { .map(|s| s.to_string()); // Resolve target architecture - let target_arch = resolve_target_required(self.target.as_deref(), &config)?; + let target_arch = resolve_target_required(self.target.as_deref(), config)?; // Initialize SDK container helper let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); // Create shared RunsOnContext if running on remote host let mut runs_on_context: Option = if let Some(ref runs_on) = self.runs_on { @@ -114,8 +139,8 @@ impl RuntimeBuildCommand { // Execute the build and ensure cleanup let result = self .execute_build_internal( - &config, - &parsed, + config, + parsed, container_image, &target_arch, &merged_container_args, @@ -182,6 +207,7 @@ impl RuntimeBuildCommand { container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), // runs_on handled by shared context + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -220,7 +246,8 @@ impl RuntimeBuildCommand { .await?; // Build var image - let build_script = self.create_build_script(parsed, target_arch, &resolved_extensions)?; + let build_script = + self.create_build_script(config, parsed, target_arch, &resolved_extensions)?; if self.verbose { print_info( @@ -295,6 +322,7 @@ impl RuntimeBuildCommand { dnf_args: self.dnf_args.clone(), env_vars, // runs_on handled by shared context + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let complete_result = run_container_command(container_helper, run_config, runs_on_context) @@ -332,6 +360,7 @@ impl RuntimeBuildCommand { container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), // runs_on handled by shared context + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -350,12 +379,12 @@ impl RuntimeBuildCommand { fn create_build_script( &self, + config: &Config, parsed: &serde_yaml::Value, target_arch: &str, resolved_extensions: &[String], ) -> Result { // Get merged runtime configuration including target-specific dependencies - let config = crate::utils::config::Config::load(&self.config_path)?; let merged_runtime = config .get_merged_runtime_config(&self.runtime_name, target_arch, &self.config_path)? .with_context(|| { @@ -365,38 +394,25 @@ impl RuntimeBuildCommand { ) })?; - let binding = serde_yaml::Mapping::new(); - let runtime_deps = merged_runtime - .get("dependencies") - .and_then(|v| v.as_mapping()) - .unwrap_or(&binding); - - // Extract extension names and any type overrides from runtime dependencies + // Extract extension names from the `extensions` array let mut required_extensions = HashSet::new(); - let mut extension_type_overrides: HashMap> = HashMap::new(); - - // First, collect direct runtime dependencies - for (_dep_name, dep_spec) in runtime_deps { - if let Some(ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { - required_extensions.insert(ext_name.to_string()); - - // Check if the runtime dependency specifies custom types - if let Some(types) = dep_spec.get("types").and_then(|v| v.as_sequence()) { - let type_strings: Vec = types - .iter() - .filter_map(|v| v.as_str()) - .map(|s| s.to_string()) - .collect(); - if !type_strings.is_empty() { - extension_type_overrides.insert(ext_name.to_string(), type_strings); - } + let _extension_type_overrides: HashMap> = HashMap::new(); + + // Collect extensions from the new `extensions` array format + if let Some(extensions) = merged_runtime + .get("extensions") + .and_then(|e| e.as_sequence()) + { + for ext in extensions { + if let Some(ext_name) = ext.as_str() { + required_extensions.insert(ext_name.to_string()); } } } // Recursively discover all extension dependencies (including nested external extensions) let all_required_extensions = - self.find_all_extension_dependencies(&config, &required_extensions, target_arch)?; + self.find_all_extension_dependencies(config, &required_extensions, target_arch)?; // Build a map from extension name to versioned name from resolved_extensions // Format of resolved_extensions items: "ext_name-version" (e.g., "my-ext-1.0.0") @@ -426,7 +442,7 @@ impl RuntimeBuildCommand { let mut processed_extensions = HashSet::new(); // Process local extensions defined in [ext.*] sections - if let Some(ext_config) = parsed.get("ext").and_then(|v| v.as_mapping()) { + if let Some(ext_config) = parsed.get("extensions").and_then(|v| v.as_mapping()) { for (ext_name_val, ext_data) in ext_config { if let Some(ext_name) = ext_name_val.as_str() { // Only process extensions that are required by this runtime @@ -634,11 +650,11 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME /// Recursively collect all dependencies for a single extension fn collect_extension_dependencies( &self, - config: &crate::utils::config::Config, + _config: &crate::utils::config::Config, ext_name: &str, all_extensions: &mut HashSet, visited: &mut HashSet, - target_arch: &str, + _target_arch: &str, ) -> Result<()> { // Avoid infinite loops if visited.contains(ext_name) { @@ -653,119 +669,26 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME let content = std::fs::read_to_string(&self.config_path)?; let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; - // Check if this is a local extension + // Check if this is a local extension defined in the ext section + // Extension source configuration (repo, git, path) is now in the ext section if let Some(ext_config) = parsed - .get("ext") + .get("extensions") .and_then(|e| e.as_mapping()) .and_then(|table| table.get(ext_name)) { - // This is a local extension - check its dependencies - if let Some(dependencies) = ext_config.get("dependencies").and_then(|d| d.as_mapping()) + // This is a local extension - check if it has an extensions array for nested deps + if let Some(nested_extensions) = + ext_config.get("extensions").and_then(|e| e.as_sequence()) { - for (_dep_name, dep_spec) in dependencies { - if let Some(nested_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { - // Check if this is an external extension dependency - if let Some(external_config_path) = - dep_spec.get("config").and_then(|v| v.as_str()) - { - // This is an external extension - load its config and process recursively - let external_extensions = config.load_external_extensions( - &self.config_path, - external_config_path, - )?; - - // Add the external extension itself - self.collect_extension_dependencies( - config, - nested_ext_name, - all_extensions, - visited, - target_arch, - )?; - - // Process its dependencies from the external config - if let Some(ext_config) = external_extensions.get(nested_ext_name) { - if let Some(nested_deps) = - ext_config.get("dependencies").and_then(|d| d.as_mapping()) - { - for (_nested_dep_name, nested_dep_spec) in nested_deps { - if let Some(nested_nested_ext_name) = - nested_dep_spec.get("ext").and_then(|v| v.as_str()) - { - self.collect_extension_dependencies( - config, - nested_nested_ext_name, - all_extensions, - visited, - target_arch, - )?; - } - } - } - } - } else { - // This is a local extension dependency - self.collect_extension_dependencies( - config, - nested_ext_name, - all_extensions, - visited, - target_arch, - )?; - } - } - } - } - } else { - // This might be an external extension - we need to find it in the runtime dependencies - // to get its config path, then process its dependencies - let merged_runtime = config - .get_merged_runtime_config(&self.runtime_name, target_arch, &self.config_path)? - .with_context(|| { - format!( - "Runtime '{}' not found or has no configuration for target '{}'", - self.runtime_name, target_arch - ) - })?; - - if let Some(runtime_deps) = merged_runtime - .get("dependencies") - .and_then(|v| v.as_mapping()) - { - for (_dep_name, dep_spec) in runtime_deps { - if let Some(dep_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { - if dep_ext_name == ext_name { - if let Some(external_config_path) = - dep_spec.get("config").and_then(|v| v.as_str()) - { - // Found the external extension - process its dependencies - let external_extensions = config.load_external_extensions( - &self.config_path, - external_config_path, - )?; - - if let Some(ext_config) = external_extensions.get(ext_name) { - if let Some(nested_deps) = - ext_config.get("dependencies").and_then(|d| d.as_mapping()) - { - for (_nested_dep_name, nested_dep_spec) in nested_deps { - if let Some(nested_ext_name) = - nested_dep_spec.get("ext").and_then(|v| v.as_str()) - { - self.collect_extension_dependencies( - config, - nested_ext_name, - all_extensions, - visited, - target_arch, - )?; - } - } - } - } - } - break; - } + for nested_ext in nested_extensions { + if let Some(nested_ext_name) = nested_ext.as_str() { + self.collect_extension_dependencies( + _config, + nested_ext_name, + all_extensions, + visited, + _target_arch, + )?; } } } @@ -793,29 +716,29 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME let merged_runtime = config.get_merged_runtime_config(runtime_name, target_arch, config_path)?; - let runtime_dep_table = merged_runtime + let mut extensions = Vec::new(); + + // Read extensions from the new `extensions` array format + let ext_list = merged_runtime .as_ref() - .and_then(|value| value.get("dependencies").and_then(|d| d.as_mapping())) + .and_then(|value| value.get("extensions").and_then(|e| e.as_sequence())) .or_else(|| { parsed - .get("runtime") + .get("runtimes") .and_then(|r| r.get(runtime_name)) - .and_then(|runtime_value| runtime_value.get("dependencies")) - .and_then(|d| d.as_mapping()) + .and_then(|runtime_value| runtime_value.get("extensions")) + .and_then(|e| e.as_sequence()) }); - let mut extensions = Vec::new(); - - if let Some(deps) = runtime_dep_table { - for dep_spec in deps.values() { - if let Some(ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { + if let Some(ext_seq) = ext_list { + for ext in ext_seq { + if let Some(ext_name) = ext.as_str() { let version = self .resolve_extension_version( parsed, config, config_path, ext_name, - dep_spec, container_image, target_arch, container_args.clone(), @@ -832,54 +755,25 @@ avocado-build-$TARGET_ARCH $RUNTIME_NAME Ok(extensions) } - /// Resolve the version for an extension dependency. + /// Resolve the version for an extension. /// /// Priority order: - /// 1. Explicit `vsn` field in the dependency spec (unless it's "*") - /// 2. Version from external config file (if `config` field is specified) - /// 3. Version from local `[ext]` section - /// 4. Query RPM database for installed version + /// 1. Version from local `[ext]` section + /// 2. Query RPM database for installed version (for repo-sourced extensions) #[allow(clippy::too_many_arguments)] async fn resolve_extension_version( &self, parsed: &serde_yaml::Value, - config: &crate::utils::config::Config, - config_path: &str, + _config: &crate::utils::config::Config, + _config_path: &str, ext_name: &str, - dep_spec: &serde_yaml::Value, container_image: &str, target_arch: &str, container_args: Option>, ) -> Result { - // If version is explicitly specified with vsn field, use it (unless it's a wildcard) - if let Some(version) = dep_spec.get("vsn").and_then(|v| v.as_str()) { - if version != "*" { - return Ok(version.to_string()); - } - // If vsn is "*", fall through to query RPM for the actual installed version - } - - // If external config is specified, try to get version from it - if let Some(external_config_path) = dep_spec.get("config").and_then(|v| v.as_str()) { - let external_extensions = - config.load_external_extensions(config_path, external_config_path)?; - if let Some(ext_config) = external_extensions.get(ext_name) { - if let Some(version) = ext_config.get("version").and_then(|v| v.as_str()) { - if version != "*" { - return Ok(version.to_string()); - } - // If version is "*", fall through to query RPM - } - } - // External config but no version found or version is "*" - query RPM database - return self - .query_rpm_version(ext_name, container_image, target_arch, container_args) - .await; - } - // Try to get version from local [ext] section if let Some(version) = parsed - .get("ext") + .get("extensions") .and_then(|ext_section| ext_section.as_mapping()) .and_then(|ext_table| ext_table.get(ext_name)) .and_then(|ext_config| ext_config.get("version")) @@ -930,6 +824,7 @@ rpm --root="$AVOCADO_EXT_SYSROOTS/{ext_name}" --dbpath=/var/lib/extension.d/rpm interactive: false, // runs_on handled by shared context container_args, + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -1012,10 +907,10 @@ mod tests { sdk: image: "test-image" -runtime: +runtimes: test-runtime: target: "x86_64" - dependencies: + packages: test-dep: ext: test-ext "#; @@ -1031,9 +926,10 @@ runtime: ); // Pass empty resolved_extensions since no extensions are defined with versions + let config = Config::load(&cmd.config_path).unwrap(); let resolved_extensions: Vec = vec![]; let script = cmd - .create_build_script(&parsed, "x86_64", &resolved_extensions) + .create_build_script(&config, &parsed, "x86_64", &resolved_extensions) .unwrap(); assert!(script.contains("RUNTIME_NAME=\"test-runtime\"")); @@ -1050,14 +946,13 @@ runtime: sdk: image: "test-image" -runtime: +runtimes: test-runtime: target: "x86_64" - dependencies: - test-dep: - ext: test-ext + extensions: + - test-ext -ext: +extensions: test-ext: version: "1.0.0" types: @@ -1074,9 +969,10 @@ ext: None, ); + let config = Config::load(&cmd.config_path).unwrap(); let resolved_extensions = vec!["test-ext-1.0.0".to_string()]; let script = cmd - .create_build_script(&parsed, "x86_64", &resolved_extensions) + .create_build_script(&config, &parsed, "x86_64", &resolved_extensions) .unwrap(); assert!(script.contains("test-ext-1.0.0.raw")); @@ -1087,22 +983,19 @@ ext: } #[test] - fn test_create_build_script_with_type_overrides() { + fn test_create_build_script_with_extension_types() { let temp_dir = TempDir::new().unwrap(); let config_content = r#" sdk: image: "test-image" -runtime: +runtimes: test-runtime: target: "x86_64" - dependencies: - test-dep: - ext: test-ext - types: - - sysext + extensions: + - test-ext -ext: +extensions: test-ext: version: "1.0.0" types: @@ -1120,9 +1013,10 @@ ext: None, ); + let config = Config::load(&cmd.config_path).unwrap(); let resolved_extensions = vec!["test-ext-1.0.0".to_string()]; let script = cmd - .create_build_script(&parsed, "x86_64", &resolved_extensions) + .create_build_script(&config, &parsed, "x86_64", &resolved_extensions) .unwrap(); // Extension should be copied from output/extensions to runtime-specific directory @@ -1135,20 +1029,19 @@ ext: } #[test] - fn test_create_build_script_no_type_override_uses_extension_defaults() { + fn test_create_build_script_uses_extension_defaults() { let temp_dir = TempDir::new().unwrap(); let config_content = r#" sdk: image: "test-image" -runtime: +runtimes: test-runtime: target: "x86_64" - dependencies: - test-dep: - ext: test-ext + extensions: + - test-ext -ext: +extensions: test-ext: version: "1.0.0" types: @@ -1165,9 +1058,10 @@ ext: None, ); + let config = Config::load(&cmd.config_path).unwrap(); let resolved_extensions = vec!["test-ext-1.0.0".to_string()]; let script = cmd - .create_build_script(&parsed, "x86_64", &resolved_extensions) + .create_build_script(&config, &parsed, "x86_64", &resolved_extensions) .unwrap(); // Extension should be copied from output/extensions to runtime-specific directory diff --git a/src/commands/runtime/clean.rs b/src/commands/runtime/clean.rs index fd4f7d5..6b6bb0d 100644 --- a/src/commands/runtime/clean.rs +++ b/src/commands/runtime/clean.rs @@ -1,6 +1,7 @@ use anyhow::Result; +use std::sync::Arc; -use crate::utils::config::{load_config, Config}; +use crate::utils::config::{ComposedConfig, Config}; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::output::{print_error, print_info, print_success, OutputLevel}; use crate::utils::target::resolve_target_required; @@ -12,6 +13,9 @@ pub struct RuntimeCleanCommand { target: Option, container_args: Option>, dnf_args: Option>, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl RuntimeCleanCommand { @@ -30,23 +34,45 @@ impl RuntimeCleanCommand { target, container_args, dnf_args, + sdk_arch: None, + composed_config: None, } } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - let config = load_config(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new(Config::load_composed( + &self.config_path, + self.target.as_deref(), + )?), + }; + let config = &composed.config; + let parsed = &composed.merged_value; - self.validate_runtime_exists(&parsed)?; - let container_image = self.get_container_image(&config)?; - let target = self.resolve_target_architecture(&config)?; + self.validate_runtime_exists(parsed)?; + let container_image = self.get_container_image(config)?; + let target = self.resolve_target_architecture(config)?; self.clean_runtime(&container_image, &target).await } fn validate_runtime_exists(&self, parsed: &serde_yaml::Value) -> Result<()> { - let runtime_section = parsed.get("runtime").ok_or_else(|| { + let runtime_section = parsed.get("runtimes").ok_or_else(|| { print_error( &format!("Runtime '{}' not found in configuration.", self.runtime), OutputLevel::Normal, @@ -122,6 +148,7 @@ rm -rf "$AVOCADO_PREFIX/.stamps/runtime/{runtime}" self.container_args.as_ref(), ), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_helper.run_in_container(config).await?; diff --git a/src/commands/runtime/deploy.rs b/src/commands/runtime/deploy.rs index 6c0688e..4fe6d58 100644 --- a/src/commands/runtime/deploy.rs +++ b/src/commands/runtime/deploy.rs @@ -1,5 +1,5 @@ use crate::utils::{ - config::load_config, + config::{ComposedConfig, Config}, container::{RunConfig, SdkContainer}, output::{print_info, print_success, OutputLevel}, stamps::{generate_batch_read_stamps_script, validate_stamps_batch, StampRequirement}, @@ -7,6 +7,7 @@ use crate::utils::{ }; use anyhow::{Context, Result}; use std::collections::HashMap; +use std::sync::Arc; pub struct RuntimeDeployCommand { runtime_name: String, @@ -17,6 +18,9 @@ pub struct RuntimeDeployCommand { container_args: Option>, dnf_args: Option>, no_stamps: bool, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl RuntimeDeployCommand { @@ -38,6 +42,8 @@ impl RuntimeDeployCommand { container_args, dnf_args, no_stamps: false, + sdk_arch: None, + composed_config: None, } } @@ -47,11 +53,30 @@ impl RuntimeDeployCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - // Load configuration - let config = load_config(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new(Config::load_composed( + &self.config_path, + self.target.as_deref(), + )?), + }; + let config = &composed.config; + let parsed = &composed.merged_value; // Get SDK configuration from interpolated config let container_image = config @@ -60,7 +85,7 @@ impl RuntimeDeployCommand { // Get runtime configuration let runtime_config = parsed - .get("runtime") + .get("runtimes") .context("No runtime configuration found")?; // Check if runtime exists @@ -75,11 +100,11 @@ impl RuntimeDeployCommand { .map(|s| s.to_string()); // Resolve target architecture - let target_arch = resolve_target_required(self.target.as_deref(), &config)?; + let target_arch = resolve_target_required(self.target.as_deref(), config)?; // Initialize SDK container helper let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); // Validate stamps before proceeding (unless --no-stamps) if !self.no_stamps { @@ -99,6 +124,7 @@ impl RuntimeDeployCommand { verbose: false, source_environment: true, interactive: false, + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -149,6 +175,7 @@ impl RuntimeDeployCommand { env_vars: Some(env_vars), container_args: config.merge_sdk_container_args(self.container_args.as_ref()), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let deploy_result = container_helper diff --git a/src/commands/runtime/deps.rs b/src/commands/runtime/deps.rs index 8ac2295..f31e838 100644 --- a/src/commands/runtime/deps.rs +++ b/src/commands/runtime/deps.rs @@ -1,12 +1,15 @@ use crate::utils::{ - config::load_config, + config::{ComposedConfig, Config}, output::{print_success, OutputLevel}, }; use anyhow::{Context, Result}; +use std::sync::Arc; pub struct RuntimeDepsCommand { config_path: String, runtime_name: String, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl RuntimeDepsCommand { @@ -14,16 +17,27 @@ impl RuntimeDepsCommand { Self { config_path, runtime_name, + composed_config: None, } } + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub fn execute(&self) -> Result<()> { - let _config = load_config(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new(Config::load_composed(&self.config_path, None)?), + }; + let parsed = &composed.merged_value; - self.validate_runtime_exists(&parsed)?; - let dependencies = self.list_runtime_dependencies(&parsed, &self.runtime_name)?; + self.validate_runtime_exists(parsed)?; + let dependencies = self.list_runtime_dependencies(parsed, &self.runtime_name)?; self.display_dependencies(&dependencies); print_success( @@ -36,7 +50,7 @@ impl RuntimeDepsCommand { fn validate_runtime_exists(&self, parsed: &serde_yaml::Value) -> Result<()> { let runtime_config = parsed - .get("runtime") + .get("runtimes") .context("No runtime configuration found")?; runtime_config.get(&self.runtime_name).with_context(|| { @@ -58,25 +72,29 @@ impl RuntimeDepsCommand { runtime_name: &str, ) -> Result> { let runtime_config = parsed - .get("runtime") + .get("runtimes") .context("No runtime configuration found")?; let runtime_spec = runtime_config .get(runtime_name) .with_context(|| format!("Runtime '{runtime_name}' not found"))?; - let runtime_deps = runtime_spec - .get("dependencies") - .and_then(|v| v.as_mapping()); - let mut dependencies = Vec::new(); - if let Some(deps_table) = runtime_deps { + // New way: Read extensions from the `extensions` array + if let Some(extensions) = runtime_spec.get("extensions").and_then(|e| e.as_sequence()) { + for ext in extensions { + if let Some(ext_name) = ext.as_str() { + dependencies.push(self.resolve_extension_dependency(parsed, ext_name)); + } + } + } + + // Read package dependencies from the `dependencies` section + if let Some(deps_table) = runtime_spec.get("packages").and_then(|v| v.as_mapping()) { for (dep_name_val, dep_spec) in deps_table { if let Some(dep_name) = dep_name_val.as_str() { - if let Some(dependency) = self.resolve_dependency(parsed, dep_name, dep_spec) { - dependencies.push(dependency); - } + dependencies.push(self.resolve_package_dependency(dep_name, dep_spec)); } } } @@ -85,39 +103,13 @@ impl RuntimeDepsCommand { Ok(dependencies) } - fn resolve_dependency( - &self, - parsed: &serde_yaml::Value, - dep_name: &str, - dep_spec: &serde_yaml::Value, - ) -> Option<(String, String, String)> { - // Try to resolve as extension reference first - if let Some(ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { - // Check if this is a versioned extension (has vsn field) - if let Some(version) = dep_spec.get("vsn").and_then(|v| v.as_str()) { - return Some(("ext".to_string(), ext_name.to_string(), version.to_string())); - } - // Check if this is an external extension (has config field) - else if dep_spec.get("config").is_some() { - // For external extensions, we don't have a local version, so use "*" - return Some(("ext".to_string(), ext_name.to_string(), "*".to_string())); - } else { - // Local extension - resolve from local config - return Some(self.resolve_extension_dependency(parsed, ext_name)); - } - } - - // Otherwise treat as package dependency - Some(self.resolve_package_dependency(dep_name, dep_spec)) - } - fn resolve_extension_dependency( &self, parsed: &serde_yaml::Value, ext_name: &str, ) -> (String, String, String) { let version = parsed - .get("ext") + .get("extensions") .and_then(|ext_config| ext_config.as_mapping()) .and_then(|ext_table| ext_table.get(ext_name)) .and_then(|ext_spec| ext_spec.get("version")) @@ -132,12 +124,18 @@ impl RuntimeDepsCommand { dep_name: &str, dep_spec: &serde_yaml::Value, ) -> (String, String, String) { - let version = dep_spec - .get("version") - .and_then(|v| v.as_str()) - .unwrap_or("*"); - - ("pkg".to_string(), dep_name.to_string(), version.to_string()) + // Version can be a string directly or in a mapping with 'version' key + let version = if let Some(v) = dep_spec.as_str() { + v.to_string() + } else { + dep_spec + .get("version") + .and_then(|v| v.as_str()) + .unwrap_or("*") + .to_string() + }; + + ("pkg".to_string(), dep_name.to_string(), version) } fn sort_dependencies(&self, dependencies: &mut [(String, String, String)]) { @@ -166,16 +164,15 @@ mod tests { sdk: image: "test-image" -runtime: +runtimes: test-runtime: target: "x86_64" - dependencies: - gcc: - version: "11.0" - app-ext: - ext: my-extension + extensions: + - my-extension + packages: + gcc: "11.0" -ext: +extensions: my-extension: version: "2.0.0" types: diff --git a/src/commands/runtime/dnf.rs b/src/commands/runtime/dnf.rs index 4ef18a8..98f7d4b 100644 --- a/src/commands/runtime/dnf.rs +++ b/src/commands/runtime/dnf.rs @@ -1,6 +1,7 @@ -use anyhow::Result; +use anyhow::{Context, Result}; +use std::sync::Arc; -use crate::utils::config::Config; +use crate::utils::config::{ComposedConfig, Config}; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::output::{print_error, print_info, print_success, OutputLevel}; use crate::utils::target::resolve_target_required; @@ -13,6 +14,9 @@ pub struct RuntimeDnfCommand { target: Option, container_args: Option>, dnf_args: Option>, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl RuntimeDnfCommand { @@ -33,25 +37,47 @@ impl RuntimeDnfCommand { target, container_args, dnf_args, + sdk_arch: None, + composed_config: None, } } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - let config = Config::load(&self.config_path)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()) + .context("Failed to load composed config")?, + ), + }; + let config = &composed.config; let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + let parsed = &composed.merged_value; - self.validate_runtime_exists(&parsed)?; - let container_image = self.get_container_image(&config)?; - let target = self.resolve_target_architecture(&config)?; + self.validate_runtime_exists(parsed)?; + let container_image = self.get_container_image(config)?; + let target = self.resolve_target_architecture(config)?; // Get repo_url and repo_release from config let repo_url = config.get_sdk_repo_url(); let repo_release = config.get_sdk_repo_release(); self.execute_dnf_command( - &parsed, + parsed, &container_image, &target, repo_url.as_ref(), @@ -62,7 +88,7 @@ impl RuntimeDnfCommand { } fn validate_runtime_exists(&self, parsed: &serde_yaml::Value) -> Result<()> { - let runtime_section = parsed.get("runtime").ok_or_else(|| { + let runtime_section = parsed.get("runtimes").ok_or_else(|| { print_error( &format!("Runtime '{}' not found in configuration.", self.runtime), OutputLevel::Normal, @@ -160,6 +186,7 @@ impl RuntimeDnfCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let dir_exists = container_helper.run_in_container(config).await?; @@ -205,6 +232,7 @@ impl RuntimeDnfCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let setup_success = container_helper.run_in_container(config).await?; @@ -256,6 +284,7 @@ impl RuntimeDnfCommand { repo_release: repo_release.cloned(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_helper.run_in_container(config).await?; diff --git a/src/commands/runtime/install.rs b/src/commands/runtime/install.rs index c4f4e50..2f5f227 100644 --- a/src/commands/runtime/install.rs +++ b/src/commands/runtime/install.rs @@ -1,7 +1,8 @@ use anyhow::{Context, Result}; use std::path::{Path, PathBuf}; +use std::sync::Arc; -use crate::utils::config::Config; +use crate::utils::config::{ComposedConfig, Config}; use crate::utils::container::{RunConfig, SdkContainer}; use crate::utils::lockfile::{build_package_spec_with_lock, LockFile, SysrootType}; use crate::utils::output::{print_debug, print_error, print_info, print_success, OutputLevel}; @@ -22,6 +23,9 @@ pub struct RuntimeInstallCommand { no_stamps: bool, runs_on: Option, nfs_port: Option, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl RuntimeInstallCommand { @@ -45,6 +49,8 @@ impl RuntimeInstallCommand { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, + composed_config: None, } } @@ -61,11 +67,30 @@ impl RuntimeInstallCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - // Load the configuration and parse raw TOML - let config = Config::load(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; + let config = &composed.config; + let parsed = &composed.merged_value; // Merge container args from config and CLI (similar to SDK commands) let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); @@ -75,7 +100,7 @@ impl RuntimeInstallCommand { let repo_release = config.get_sdk_repo_release(); // Check if runtime section exists - let runtime_section = match parsed.get("runtime") { + let runtime_section = match parsed.get("runtimes") { Some(runtime) => runtime, None => { if self.runtime.is_some() { @@ -150,8 +175,8 @@ impl RuntimeInstallCommand { // Execute installation and ensure cleanup let result = self .execute_install_internal( - &parsed, - &config, + parsed, + config, &runtimes_to_install, &container_helper, container_image, @@ -264,6 +289,7 @@ impl RuntimeInstallCommand { container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), // runs_on handled by shared context + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -337,6 +363,7 @@ impl RuntimeInstallCommand { container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), // runs_on handled by shared context + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let installroot_exists = @@ -356,6 +383,7 @@ impl RuntimeInstallCommand { container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), // runs_on handled by shared context + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = @@ -380,12 +408,14 @@ impl RuntimeInstallCommand { config.get_merged_runtime_config(runtime, &target_arch, &self.config_path)?; let dependencies = merged_runtime .as_ref() - .and_then(|merged| merged.get("dependencies")); + .and_then(|merged| merged.get("packages")); let sysroot = SysrootType::Runtime(runtime.to_string()); if let Some(serde_yaml::Value::Mapping(deps_map)) = dependencies { - // Build list of packages to install (excluding extension references) + // Build list of packages to install + // Note: Extensions are now listed in the separate `extensions` array, + // so dependencies should only contain package references. let mut packages = Vec::new(); let mut package_names = Vec::new(); for (package_name_val, version_spec) in deps_map { @@ -395,32 +425,6 @@ impl RuntimeInstallCommand { None => continue, // Skip if package name is not a string }; - // Skip extension dependencies (identified by 'ext' key) - // Note: Extension dependencies are handled by the main install command, - // not by individual runtime install - if let serde_yaml::Value::Mapping(spec_map) = version_spec { - if spec_map.contains_key(serde_yaml::Value::String("ext".to_string())) { - if self.verbose { - let dep_type = if spec_map - .contains_key(serde_yaml::Value::String("vsn".to_string())) - { - "versioned extension" - } else if spec_map - .contains_key(serde_yaml::Value::String("config".to_string())) - { - "external extension" - } else { - "local extension" - }; - print_debug( - &format!("Skipping {dep_type} dependency '{package_name}' (handled by main install command)"), - OutputLevel::Normal, - ); - } - continue; - } - } - let config_version = if let Some(version) = version_spec.as_str() { version.to_string() } else if let serde_yaml::Value::Mapping(spec_map) = version_spec { @@ -498,6 +502,7 @@ $DNF_SDK_HOST \ dnf_args: self.dnf_args.clone(), disable_weak_dependencies: config.get_sdk_disable_weak_dependencies(), // runs_on handled by shared context + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = @@ -528,6 +533,7 @@ $DNF_SDK_HOST \ repo_release.cloned(), merged_container_args.clone(), runs_on_context, + self.sdk_arch.as_ref(), ) .await?; @@ -664,7 +670,7 @@ sdk: sdk: image: "test-image" -runtime: +runtimes: other-runtime: target: "x86_64" "#; @@ -689,10 +695,10 @@ runtime: async fn test_execute_no_sdk_config() { let temp_dir = TempDir::new().unwrap(); let config_content = r#" -runtime: +runtimes: test-runtime: target: "x86_64" - dependencies: + packages: gcc: "11.0" "#; let config_path = create_test_config_file(&temp_dir, config_content); @@ -723,10 +729,10 @@ runtime: sdk: # Missing image field -runtime: +runtimes: test-runtime: target: "x86_64" - dependencies: + packages: gcc: "11.0" "#; let config_path = create_test_config_file(&temp_dir, config_content); diff --git a/src/commands/runtime/list.rs b/src/commands/runtime/list.rs index 77d9961..dc8be11 100644 --- a/src/commands/runtime/list.rs +++ b/src/commands/runtime/list.rs @@ -1,26 +1,41 @@ use crate::utils::{ - config::load_config, + config::{ComposedConfig, Config}, output::{print_success, OutputLevel}, }; use anyhow::Result; +use std::sync::Arc; pub struct RuntimeListCommand { config_path: String, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl RuntimeListCommand { pub fn new(config_path: String) -> Self { - Self { config_path } + Self { + config_path, + composed_config: None, + } + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self } pub fn execute(&self) -> Result<()> { - // Load configuration and parse raw TOML - let _config = load_config(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new(Config::load_composed(&self.config_path, None)?), + }; + let parsed = &composed.merged_value; // Check if runtime section exists - if let Some(runtime_config) = parsed.get("runtime").and_then(|v| v.as_mapping()) { + if let Some(runtime_config) = parsed.get("runtimes").and_then(|v| v.as_mapping()) { // List all runtime names let mut runtimes: Vec = runtime_config .keys() @@ -69,7 +84,7 @@ mod tests { sdk: image: "test-image" -runtime: +runtimes: app: target: "x86_64" server: diff --git a/src/commands/runtime/provision.rs b/src/commands/runtime/provision.rs index f34981a..c55b4ca 100644 --- a/src/commands/runtime/provision.rs +++ b/src/commands/runtime/provision.rs @@ -1,7 +1,7 @@ #[cfg(unix)] use crate::utils::signing_service::{generate_helper_script, SigningService, SigningServiceConfig}; use crate::utils::{ - config::load_config, + config::{ComposedConfig, Config}, container::{RunConfig, SdkContainer}, output::{print_info, print_success, OutputLevel}, remote::{RemoteHost, SshClient}, @@ -16,6 +16,7 @@ use crate::utils::{ use anyhow::{Context, Result}; use std::collections::HashMap; use std::path::PathBuf; +use std::sync::Arc; pub struct RuntimeProvisionConfig { pub runtime_name: String, @@ -37,12 +38,16 @@ pub struct RuntimeProvisionConfig { pub runs_on: Option, /// NFS port for remote execution pub nfs_port: Option, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, } pub struct RuntimeProvisionCommand { config: RuntimeProvisionConfig, #[cfg(unix)] signing_service: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl RuntimeProvisionCommand { @@ -51,14 +56,27 @@ impl RuntimeProvisionCommand { config, #[cfg(unix)] signing_service: None, + composed_config: None, } } + /// Set pre-composed configuration to avoid reloading + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&mut self) -> Result<()> { - // Load configuration - let config = load_config(&self.config.config_path)?; - let content = std::fs::read_to_string(&self.config.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new(Config::load_composed( + &self.config.config_path, + self.config.target.as_deref(), + )?), + }; + let config = &composed.config; + let parsed = &composed.merged_value; // Get SDK configuration from interpolated config let container_image = config @@ -67,7 +85,7 @@ impl RuntimeProvisionCommand { // Get runtime configuration let runtime_config = parsed - .get("runtime") + .get("runtimes") .context("No runtime configuration found")?; // Check if runtime exists @@ -87,7 +105,7 @@ impl RuntimeProvisionCommand { .map(|s| s.to_string()); // Resolve target architecture - let target_arch = resolve_target_required(self.config.target.as_deref(), &config)?; + let target_arch = resolve_target_required(self.config.target.as_deref(), config)?; // Detect remote host architecture if using --runs-on // This is needed to check if the SDK is installed for the remote's architecture @@ -110,7 +128,7 @@ impl RuntimeProvisionCommand { // Validate stamps before proceeding (unless --no-stamps) if !self.config.no_stamps { - let container_helper = SdkContainer::from_config(&self.config.config_path, &config)? + let container_helper = SdkContainer::from_config(&self.config.config_path, config)? .verbose(self.config.verbose); // Provision requires runtime build stamp @@ -134,6 +152,7 @@ impl RuntimeProvisionCommand { interactive: false, runs_on: self.config.runs_on.clone(), nfs_port: self.config.nfs_port, + sdk_arch: self.config.sdk_arch.clone(), ..Default::default() }; @@ -176,8 +195,8 @@ impl RuntimeProvisionCommand { // For package repository extensions, we query the RPM database to get actual installed versions let resolved_extensions = self .collect_runtime_extensions( - &parsed, - &config, + parsed, + config, &self.config.runtime_name, target_arch.as_str(), &self.config.config_path, @@ -292,6 +311,7 @@ impl RuntimeProvisionCommand { state_file_path, container_state_path, &target_arch, + container_image, ) .await? } else { @@ -299,7 +319,7 @@ impl RuntimeProvisionCommand { }; // Check if runtime has signing configured - let signing_config = self.setup_signing_service(&config).await?; + let signing_config = self.setup_signing_service(config).await?; // Initialize SDK container helper let container_helper = SdkContainer::new(); @@ -326,6 +346,7 @@ impl RuntimeProvisionCommand { dnf_args: self.config.dnf_args.clone(), runs_on: self.config.runs_on.clone(), nfs_port: self.config.nfs_port, + sdk_arch: self.config.sdk_arch.clone(), ..Default::default() }; @@ -362,6 +383,7 @@ impl RuntimeProvisionCommand { container_state_path, &target_arch, state_file_existed, + container_image, ) .await?; } @@ -376,7 +398,7 @@ impl RuntimeProvisionCommand { // Write provision stamp (unless --no-stamps) if !self.config.no_stamps { - let container_helper = SdkContainer::from_config(&self.config.config_path, &config)? + let container_helper = SdkContainer::from_config(&self.config.config_path, config)? .verbose(self.config.verbose); let inputs = StampInputs::new("provision".to_string()); @@ -394,6 +416,7 @@ impl RuntimeProvisionCommand { interactive: false, runs_on: self.config.runs_on.clone(), nfs_port: self.config.nfs_port, + sdk_arch: self.config.sdk_arch.clone(), ..Default::default() }; @@ -452,7 +475,7 @@ impl RuntimeProvisionCommand { // Get checksum algorithm (defaults to sha256) let checksum_str = config - .runtime + .runtimes .as_ref() .and_then(|r| r.get(&self.config.runtime_name)) .and_then(|rc| rc.signing.as_ref()) @@ -554,6 +577,7 @@ avocado-provision-{} {} state_file_path: &str, container_state_path: &str, _target_arch: &str, + container_image: &str, ) -> Result { let host_state_file = src_dir.join(state_file_path); @@ -582,12 +606,6 @@ avocado-provision-{} {} ); } - // Load configuration to get container image - let config = load_config(&self.config.config_path)?; - let container_image = config - .get_sdk_image() - .context("No SDK container image specified in configuration")?; - let container_tool = "docker"; let volume_manager = VolumeManager::new(container_tool.to_string(), self.config.verbose); let volume_state = volume_manager.get_or_create_volume(src_dir).await?; @@ -664,6 +682,7 @@ avocado-provision-{} {} container_state_path: &str, _target_arch: &str, _original_existed: bool, + container_image: &str, ) -> Result<()> { if self.config.verbose { print_info( @@ -672,12 +691,6 @@ avocado-provision-{} {} ); } - // Load configuration to get container image - let config = load_config(&self.config.config_path)?; - let container_image = config - .get_sdk_image() - .context("No SDK container image specified in configuration")?; - let container_tool = "docker"; let volume_manager = VolumeManager::new(container_tool.to_string(), self.config.verbose); let volume_state = volume_manager.get_or_create_volume(src_dir).await?; @@ -783,12 +796,12 @@ avocado-provision-{} {} let runtime_dep_table = merged_runtime .as_ref() - .and_then(|value| value.get("dependencies").and_then(|d| d.as_mapping())) + .and_then(|value| value.get("packages").and_then(|d| d.as_mapping())) .or_else(|| { parsed - .get("runtime") + .get("runtimes") .and_then(|r| r.get(runtime_name)) - .and_then(|runtime_value| runtime_value.get("dependencies")) + .and_then(|runtime_value| runtime_value.get("packages")) .and_then(|d| d.as_mapping()) }); @@ -796,7 +809,7 @@ avocado-provision-{} {} if let Some(deps) = runtime_dep_table { for dep_spec in deps.values() { - if let Some(ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { + if let Some(ext_name) = dep_spec.get("extensions").and_then(|v| v.as_str()) { let version = self .resolve_extension_version( parsed, @@ -858,7 +871,7 @@ avocado-provision-{} {} // Try to get version from local [ext] section if let Some(version) = parsed - .get("ext") + .get("extensions") .and_then(|ext_section| ext_section.as_mapping()) .and_then(|ext_table| ext_table.get(ext_name)) .and_then(|ext_config| ext_config.get("version")) @@ -960,6 +973,7 @@ mod tests { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, }; let cmd = RuntimeProvisionCommand::new(config); @@ -989,6 +1003,7 @@ mod tests { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, }; let cmd = RuntimeProvisionCommand::new(config); @@ -998,69 +1013,8 @@ mod tests { assert!(script.contains("Running SDK lifecycle hook 'avocado-provision'")); } - #[tokio::test] - async fn test_collect_runtime_extensions() { - use std::fs; - use tempfile::TempDir; - - let config_content = r#" -sdk: - image: "docker.io/avocado/sdk:latest" - -runtime: - test-runtime: - dependencies: - ext_one: - ext: alpha-ext - vsn: "1.0.0" - ext_two: - ext: beta-ext - vsn: "2.0.0" - "#; - - let temp_dir = TempDir::new().unwrap(); - let config_path = temp_dir.path().join("avocado.yaml"); - fs::write(&config_path, config_content).unwrap(); - - let parsed: serde_yaml::Value = serde_yaml::from_str(config_content).unwrap(); - let config = crate::utils::config::Config::load(&config_path).unwrap(); - - let provision_config = RuntimeProvisionConfig { - runtime_name: "test-runtime".to_string(), - config_path: config_path.to_str().unwrap().to_string(), - verbose: false, - force: false, - target: Some("x86_64".to_string()), - provision_profile: None, - env_vars: None, - out: None, - container_args: None, - dnf_args: None, - state_file: None, - no_stamps: false, - runs_on: None, - nfs_port: None, - }; - - let command = RuntimeProvisionCommand::new(provision_config); - - let extensions = command - .collect_runtime_extensions( - &parsed, - &config, - "test-runtime", - "x86_64", - config_path.to_str().unwrap(), - "docker.io/avocado/sdk:latest", - ) - .await - .unwrap(); - - assert_eq!( - extensions, - vec!["alpha-ext-1.0.0".to_string(), "beta-ext-2.0.0".to_string()] - ); - } + // NOTE: test_collect_runtime_extensions was removed as it tested the deprecated + // ext:/vsn: format inside runtime packages. The new format uses an extensions array. #[test] fn test_new_with_container_args() { @@ -1085,6 +1039,7 @@ runtime: no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, }; let cmd = RuntimeProvisionCommand::new(config); @@ -1123,6 +1078,7 @@ runtime: no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, }; let cmd = RuntimeProvisionCommand::new(config); diff --git a/src/commands/runtime/sign.rs b/src/commands/runtime/sign.rs index 2686e2a..481c1c2 100644 --- a/src/commands/runtime/sign.rs +++ b/src/commands/runtime/sign.rs @@ -3,7 +3,7 @@ //! Signs runtime images (extension images) using configured signing keys. use crate::utils::{ - config::load_config, + config::{ComposedConfig, Config}, container::{RunConfig, SdkContainer}, image_signing::{validate_signing_key_for_use, ChecksumAlgorithm}, output::{print_info, print_success, print_warning, OutputLevel}, @@ -15,6 +15,7 @@ use crate::utils::{ }; use anyhow::{Context, Result}; use std::collections::HashSet; +use std::sync::Arc; /// Command to sign runtime images pub struct RuntimeSignCommand { @@ -27,6 +28,9 @@ pub struct RuntimeSignCommand { #[allow(dead_code)] // Included for API consistency with other commands dnf_args: Option>, no_stamps: bool, + sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl RuntimeSignCommand { @@ -46,6 +50,8 @@ impl RuntimeSignCommand { container_args, dnf_args, no_stamps: false, + sdk_arch: None, + composed_config: None, } } @@ -55,14 +61,32 @@ impl RuntimeSignCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + pub async fn execute(&self) -> Result<()> { - // Load configuration - let config = load_config(&self.config_path)?; - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new(Config::load_composed( + &self.config_path, + self.target.as_deref(), + )?), + }; + let config = &composed.config; + let parsed = &composed.merged_value; // Resolve target architecture - let target_arch = resolve_target_required(self.target.as_deref(), &config)?; + let target_arch = resolve_target_required(self.target.as_deref(), config)?; // Validate stamps before proceeding (unless --no-stamps) if !self.no_stamps { @@ -70,7 +94,7 @@ impl RuntimeSignCommand { .get_sdk_image() .context("No SDK container image specified in configuration")?; let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); // Sign requires runtime build stamp let required = resolve_required_stamps( @@ -89,6 +113,7 @@ impl RuntimeSignCommand { verbose: false, source_environment: true, interactive: false, + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -117,7 +142,7 @@ impl RuntimeSignCommand { // Verify runtime exists let runtime_config = parsed - .get("runtime") + .get("runtimes") .context("No runtime configuration found")?; runtime_config.get(&self.runtime_name).with_context(|| { @@ -136,22 +161,22 @@ impl RuntimeSignCommand { let binding = serde_yaml::Mapping::new(); let runtime_deps = merged_runtime - .get("dependencies") + .get("packages") .and_then(|v| v.as_mapping()) .unwrap_or(&binding); let mut required_extensions = HashSet::new(); for (_dep_name, dep_spec) in runtime_deps { - if let Some(ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { + if let Some(ext_name) = dep_spec.get("extensions").and_then(|v| v.as_str()) { required_extensions.insert(ext_name.to_string()); } } let all_required_extensions = - self.find_all_extension_dependencies(&config, &required_extensions, &target_arch)?; + self.find_all_extension_dependencies(config, &required_extensions, &target_arch)?; // Sign images - self.sign_runtime_images(&config, &target_arch, &all_required_extensions) + self.sign_runtime_images(config, &target_arch, &all_required_extensions) .await?; print_success( @@ -165,7 +190,7 @@ impl RuntimeSignCommand { .get_sdk_image() .context("No SDK container image specified")?; let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); let inputs = StampInputs::new("sign".to_string()); let outputs = StampOutputs::default(); @@ -179,6 +204,7 @@ impl RuntimeSignCommand { verbose: self.verbose, source_environment: true, interactive: false, + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -243,15 +269,16 @@ impl RuntimeSignCommand { // Check if this is a local extension if let Some(ext_config) = parsed - .get("ext") + .get("extensions") .and_then(|e| e.as_mapping()) .and_then(|table| table.get(ext_name)) { // This is a local extension - check its dependencies - if let Some(dependencies) = ext_config.get("dependencies").and_then(|d| d.as_mapping()) - { + if let Some(dependencies) = ext_config.get("packages").and_then(|d| d.as_mapping()) { for (_dep_name, dep_spec) in dependencies { - if let Some(nested_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { + if let Some(nested_ext_name) = + dep_spec.get("extensions").and_then(|v| v.as_str()) + { // Check if this is an external extension dependency if let Some(external_config_path) = dep_spec.get("config").and_then(|v| v.as_str()) @@ -274,11 +301,12 @@ impl RuntimeSignCommand { // Process its dependencies from the external config if let Some(ext_config) = external_extensions.get(nested_ext_name) { if let Some(nested_deps) = - ext_config.get("dependencies").and_then(|d| d.as_mapping()) + ext_config.get("packages").and_then(|d| d.as_mapping()) { for (_nested_dep_name, nested_dep_spec) in nested_deps { - if let Some(nested_nested_ext_name) = - nested_dep_spec.get("ext").and_then(|v| v.as_str()) + if let Some(nested_nested_ext_name) = nested_dep_spec + .get("extensions") + .and_then(|v| v.as_str()) { self.collect_extension_dependencies( config, @@ -316,12 +344,11 @@ impl RuntimeSignCommand { ) })?; - if let Some(runtime_deps) = merged_runtime - .get("dependencies") - .and_then(|v| v.as_mapping()) + if let Some(runtime_deps) = merged_runtime.get("packages").and_then(|v| v.as_mapping()) { for (_dep_name, dep_spec) in runtime_deps { - if let Some(dep_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { + if let Some(dep_ext_name) = dep_spec.get("extensions").and_then(|v| v.as_str()) + { if dep_ext_name == ext_name { if let Some(external_config_path) = dep_spec.get("config").and_then(|v| v.as_str()) @@ -334,11 +361,12 @@ impl RuntimeSignCommand { if let Some(ext_config) = external_extensions.get(ext_name) { if let Some(nested_deps) = - ext_config.get("dependencies").and_then(|d| d.as_mapping()) + ext_config.get("packages").and_then(|d| d.as_mapping()) { for (_nested_dep_name, nested_dep_spec) in nested_deps { - if let Some(nested_ext_name) = - nested_dep_spec.get("ext").and_then(|v| v.as_str()) + if let Some(nested_ext_name) = nested_dep_spec + .get("extensions") + .and_then(|v| v.as_str()) { self.collect_extension_dependencies( config, @@ -402,7 +430,7 @@ impl RuntimeSignCommand { // Get checksum algorithm (defaults to sha256) let checksum_str = config - .runtime + .runtimes .as_ref() .and_then(|r| r.get(&self.runtime_name)) .and_then(|rc| rc.signing.as_ref()) diff --git a/src/commands/sdk/clean.rs b/src/commands/sdk/clean.rs index 2f0bd73..24780e0 100644 --- a/src/commands/sdk/clean.rs +++ b/src/commands/sdk/clean.rs @@ -1,26 +1,44 @@ //! SDK clean command implementation. use anyhow::{Context, Result}; +use std::sync::Arc; use crate::utils::{ - config::Config, + config::{ComposedConfig, Config}, container::{RunConfig, SdkContainer}, output::{print_error, print_info, print_success, OutputLevel}, + stamps::{generate_batch_read_stamps_script, validate_stamps_batch, StampRequirement}, target::resolve_target_required, }; +/// Context for running clean operations in containers +struct CleanContext<'a> { + container_helper: &'a SdkContainer, + container_image: &'a str, + target: &'a str, + repo_url: Option, + repo_release: Option, + merged_container_args: Option>, +} + /// Implementation of the 'sdk clean' command. pub struct SdkCleanCommand { /// Path to configuration file pub config_path: String, /// Enable verbose output pub verbose: bool, + /// Specific compile sections to clean + pub sections: Vec, /// Global target architecture pub target: Option, /// Additional arguments to pass to the container runtime pub container_args: Option>, /// Additional arguments to pass to DNF commands pub dnf_args: Option>, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl SdkCleanCommand { @@ -28,6 +46,7 @@ impl SdkCleanCommand { pub fn new( config_path: String, verbose: bool, + sections: Vec, target: Option, container_args: Option>, dnf_args: Option>, @@ -35,17 +54,39 @@ impl SdkCleanCommand { Self { config_path, verbose, + sections, target, container_args, dnf_args, + sdk_arch: None, + composed_config: None, } } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + /// Execute the sdk clean command pub async fn execute(&self) -> Result<()> { - // Load the configuration - let config = Config::load(&self.config_path) - .with_context(|| format!("Failed to load config from {}", self.config_path))?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()) + .with_context(|| format!("Failed to load config from {}", self.config_path))?, + ), + }; + let config = &composed.config; // Merge container args from config with CLI args let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); @@ -60,12 +101,56 @@ impl SdkCleanCommand { let repo_release = config.get_sdk_repo_release(); // Resolve target with proper precedence - let target = resolve_target_required(self.target.as_deref(), &config)?; + let target = resolve_target_required(self.target.as_deref(), config)?; // Create container helper - let container_helper = SdkContainer::new().verbose(self.verbose); + let container_helper = + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); + + // If sections are specified, run clean scripts for those sections + if !self.sections.is_empty() { + // Validate SDK is installed before running clean scripts + let requirements = vec![StampRequirement::sdk_install()]; + let batch_script = generate_batch_read_stamps_script(&requirements); + let run_config = RunConfig { + container_image: container_image.to_string(), + target: target.clone(), + command: batch_script, + verbose: false, + source_environment: true, + interactive: false, + repo_url: repo_url.clone(), + repo_release: repo_release.clone(), + container_args: merged_container_args.clone(), + dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), + ..Default::default() + }; + + let output = container_helper + .run_in_container_with_output(run_config) + .await?; + + let validation = + validate_stamps_batch(&requirements, output.as_deref().unwrap_or(""), None); + + if !validation.is_satisfied() { + let error = validation.into_error("Cannot run SDK clean scripts"); + return Err(error.into()); + } + + let ctx = CleanContext { + container_helper: &container_helper, + container_image, + target: &target, + repo_url: repo_url.clone(), + repo_release: repo_release.clone(), + merged_container_args: merged_container_args.clone(), + }; + return self.clean_sections(config, &ctx).await; + } - // Remove the directory using container helper + // Default behavior: Remove the entire SDK directory if self.verbose { print_info( "Removing SDK directory: $AVOCADO_SDK_PREFIX", @@ -74,7 +159,7 @@ impl SdkCleanCommand { } let remove_command = "rm -rf $AVOCADO_SDK_PREFIX"; - let config = RunConfig { + let run_config = RunConfig { container_image: container_image.to_string(), target: target.clone(), command: remove_command.to_string(), @@ -85,9 +170,10 @@ impl SdkCleanCommand { repo_release, container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; - let success = container_helper.run_in_container(config).await?; + let success = container_helper.run_in_container(run_config).await?; if success { print_success("Successfully removed SDK directory.", OutputLevel::Normal); @@ -98,17 +184,164 @@ impl SdkCleanCommand { Ok(()) } + + /// Clean specific compile sections by running their clean scripts + async fn clean_sections(&self, config: &Config, ctx: &CleanContext<'_>) -> Result<()> { + // Get clean scripts for the requested sections + let clean_scripts = self.get_clean_scripts_for_sections(config)?; + + if clean_scripts.is_empty() { + print_info( + "No clean scripts defined for the specified sections.", + OutputLevel::Normal, + ); + return Ok(()); + } + + let section_list = clean_scripts + .iter() + .map(|(name, _)| name.as_str()) + .collect::>() + .join(", "); + print_info( + &format!( + "Executing clean scripts for {} section(s): {section_list}", + clean_scripts.len() + ), + OutputLevel::Normal, + ); + + let mut overall_success = true; + + for (section_name, clean_script) in &clean_scripts { + print_info( + &format!("Running clean script for section '{section_name}': {clean_script}"), + OutputLevel::Normal, + ); + + // Build clean command - scripts are relative to src_dir (/opt/src in container) + let clean_command = format!( + r#"if [ -f '{clean_script}' ]; then echo 'Running clean script: {clean_script}'; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{clean_script}'; else echo 'Clean script {clean_script} not found, skipping.'; fi"# + ); + + if self.verbose { + print_info( + &format!("Running command: {clean_command}"), + OutputLevel::Normal, + ); + } + + let run_config = RunConfig { + container_image: ctx.container_image.to_string(), + target: ctx.target.to_string(), + command: clean_command, + verbose: self.verbose, + source_environment: true, + interactive: false, + repo_url: ctx.repo_url.clone(), + repo_release: ctx.repo_release.clone(), + container_args: ctx.merged_container_args.clone(), + dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), + ..Default::default() + }; + + let success = ctx.container_helper.run_in_container(run_config).await?; + + if success { + print_success( + &format!("Completed clean script for section '{section_name}'."), + OutputLevel::Normal, + ); + } else { + print_error( + &format!("Failed to run clean script for section '{section_name}'."), + OutputLevel::Normal, + ); + overall_success = false; + } + } + + if overall_success { + print_success( + &format!( + "All {} clean script(s) completed successfully!", + clean_scripts.len() + ), + OutputLevel::Normal, + ); + } + + if !overall_success { + return Err(anyhow::anyhow!("One or more clean scripts failed.")); + } + + Ok(()) + } + + /// Get clean scripts for the specified sections + fn get_clean_scripts_for_sections(&self, config: &Config) -> Result> { + let mut clean_scripts = Vec::new(); + let mut missing_sections = Vec::new(); + let mut sections_without_clean = Vec::new(); + + if let Some(sdk) = &config.sdk { + if let Some(compile) = &sdk.compile { + for section_name in &self.sections { + if let Some(section_config) = compile.get(section_name) { + if let Some(clean_script) = §ion_config.clean { + clean_scripts.push((section_name.clone(), clean_script.clone())); + } else { + sections_without_clean.push(section_name.clone()); + } + } else { + missing_sections.push(section_name.clone()); + } + } + } else { + // No compile sections at all + missing_sections = self.sections.clone(); + } + } else { + // No SDK section at all + missing_sections = self.sections.clone(); + } + + // Report missing sections as errors + if !missing_sections.is_empty() { + return Err(anyhow::anyhow!( + "The following compile sections were not found: {}", + missing_sections.join(", ") + )); + } + + // Report sections without clean scripts as info + if !sections_without_clean.is_empty() && self.verbose { + print_info( + &format!( + "The following sections have no clean script defined: {}", + sections_without_clean.join(", ") + ), + OutputLevel::Normal, + ); + } + + Ok(clean_scripts) + } } #[cfg(test)] mod tests { use super::*; + use std::io::Write; + use tempfile::NamedTempFile; #[test] fn test_new() { let cmd = SdkCleanCommand::new( "config.toml".to_string(), true, + vec!["section1".to_string()], Some("test-target".to_string()), None, None, @@ -116,15 +349,160 @@ mod tests { assert_eq!(cmd.config_path, "config.toml"); assert!(cmd.verbose); + assert_eq!(cmd.sections, vec!["section1"]); assert_eq!(cmd.target, Some("test-target".to_string())); } #[test] fn test_new_minimal() { - let cmd = SdkCleanCommand::new("config.toml".to_string(), false, None, None, None); + let cmd = SdkCleanCommand::new("config.toml".to_string(), false, vec![], None, None, None); assert_eq!(cmd.config_path, "config.toml"); assert!(!cmd.verbose); + assert!(cmd.sections.is_empty()); assert_eq!(cmd.target, None); } + + #[test] + fn test_get_clean_scripts_for_sections_with_clean_script() { + let config_content = r#" +sdk: + image: "test-image" + compile: + my-library: + compile: "build.sh" + clean: "clean.sh" + packages: + gcc: "*" + other-library: + compile: "build-other.sh" + packages: + make: "*" +"#; + let mut temp_file = NamedTempFile::new().unwrap(); + write!(temp_file, "{config_content}").unwrap(); + let config = Config::load(temp_file.path()).unwrap(); + + let cmd = SdkCleanCommand::new( + temp_file.path().to_string_lossy().to_string(), + false, + vec!["my-library".to_string()], + None, + None, + None, + ); + + let clean_scripts = cmd.get_clean_scripts_for_sections(&config).unwrap(); + + assert_eq!(clean_scripts.len(), 1); + assert_eq!(clean_scripts[0].0, "my-library"); + assert_eq!(clean_scripts[0].1, "clean.sh"); + } + + #[test] + fn test_get_clean_scripts_for_sections_no_clean_script() { + let config_content = r#" +sdk: + image: "test-image" + compile: + my-library: + compile: "build.sh" + packages: + gcc: "*" +"#; + let mut temp_file = NamedTempFile::new().unwrap(); + write!(temp_file, "{config_content}").unwrap(); + let config = Config::load(temp_file.path()).unwrap(); + + let cmd = SdkCleanCommand::new( + temp_file.path().to_string_lossy().to_string(), + false, + vec!["my-library".to_string()], + None, + None, + None, + ); + + let clean_scripts = cmd.get_clean_scripts_for_sections(&config).unwrap(); + + // Section exists but has no clean script + assert!(clean_scripts.is_empty()); + } + + #[test] + fn test_get_clean_scripts_for_nonexistent_section() { + let config_content = r#" +sdk: + image: "test-image" + compile: + my-library: + compile: "build.sh" + clean: "clean.sh" +"#; + let mut temp_file = NamedTempFile::new().unwrap(); + write!(temp_file, "{config_content}").unwrap(); + let config = Config::load(temp_file.path()).unwrap(); + + let cmd = SdkCleanCommand::new( + temp_file.path().to_string_lossy().to_string(), + false, + vec!["nonexistent-library".to_string()], + None, + None, + None, + ); + + // Should return an error for nonexistent section + let result = cmd.get_clean_scripts_for_sections(&config); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("nonexistent-library")); + } + + #[test] + fn test_get_clean_scripts_multiple_sections() { + let config_content = r#" +sdk: + image: "test-image" + compile: + lib-a: + compile: "build-a.sh" + clean: "clean-a.sh" + lib-b: + compile: "build-b.sh" + clean: "clean-b.sh" + lib-c: + compile: "build-c.sh" +"#; + let mut temp_file = NamedTempFile::new().unwrap(); + write!(temp_file, "{config_content}").unwrap(); + let config = Config::load(temp_file.path()).unwrap(); + + let cmd = SdkCleanCommand::new( + temp_file.path().to_string_lossy().to_string(), + false, + vec![ + "lib-a".to_string(), + "lib-b".to_string(), + "lib-c".to_string(), + ], + None, + None, + None, + ); + + let clean_scripts = cmd.get_clean_scripts_for_sections(&config).unwrap(); + + // lib-a and lib-b have clean scripts, lib-c doesn't + assert_eq!(clean_scripts.len(), 2); + + let section_names: Vec<&str> = clean_scripts + .iter() + .map(|(name, _)| name.as_str()) + .collect(); + assert!(section_names.contains(&"lib-a")); + assert!(section_names.contains(&"lib-b")); + } } diff --git a/src/commands/sdk/compile.rs b/src/commands/sdk/compile.rs index 73ba11f..9f060fa 100644 --- a/src/commands/sdk/compile.rs +++ b/src/commands/sdk/compile.rs @@ -1,9 +1,10 @@ //! SDK compile command implementation. use anyhow::{Context, Result}; +use std::sync::Arc; use crate::utils::{ - config::Config, + config::{ComposedConfig, Config}, container::{RunConfig, SdkContainer}, output::{print_error, print_info, print_success, OutputLevel}, stamps::{generate_batch_read_stamps_script, validate_stamps_batch, StampRequirement}, @@ -33,6 +34,14 @@ pub struct SdkCompileCommand { pub dnf_args: Option>, /// Disable stamp validation pub no_stamps: bool, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, + /// Working directory for compile scripts (container path). + /// If set, scripts are executed from this directory instead of /opt/src. + /// Used for remote extensions where scripts are in $AVOCADO_PREFIX/includes// + pub workdir: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl SdkCompileCommand { @@ -53,26 +62,55 @@ impl SdkCompileCommand { container_args, dnf_args, no_stamps: false, + sdk_arch: None, + workdir: None, + composed_config: None, } } + /// Set the working directory for compile scripts (container path) + pub fn with_workdir(mut self, workdir: Option) -> Self { + self.workdir = workdir; + self + } + /// Set the no_stamps flag pub fn with_no_stamps(mut self, no_stamps: bool) -> Self { self.no_stamps = no_stamps; self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + /// Execute the sdk compile command pub async fn execute(&self) -> Result<()> { - // Load the configuration + // Use provided config or load fresh if self.verbose { print_info( &format!("Loading SDK compile config from: {}", self.config_path), OutputLevel::Normal, ); } - let config = Config::load(&self.config_path) - .with_context(|| format!("Failed to load config from {}", self.config_path))?; + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; + let config = &composed.config; // Validate stamps before proceeding (unless --no-stamps) // SDK compile requires SDK to be installed @@ -80,9 +118,9 @@ impl SdkCompileCommand { let container_image = config .get_sdk_image() .context("No SDK container image specified in configuration")?; - let target = resolve_target_required(self.target.as_deref(), &config)?; + let target = resolve_target_required(self.target.as_deref(), config)?; let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); let requirements = vec![StampRequirement::sdk_install()]; @@ -98,6 +136,7 @@ impl SdkCompileCommand { repo_release: config.get_sdk_repo_release(), container_args: config.merge_sdk_container_args(self.container_args.as_ref()), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; @@ -143,7 +182,7 @@ impl SdkCompileCommand { let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); // Get compile sections from config - let compile_sections = self.get_compile_sections_from_config(&config); + let compile_sections = self.get_compile_sections_from_config(config); if compile_sections.is_empty() { // If specific sections were requested but none found, this is an error @@ -210,7 +249,7 @@ impl SdkCompileCommand { let repo_release = config.get_sdk_repo_release(); // Resolve target with proper precedence - let target = resolve_target_required(self.target.as_deref(), &config)?; + let target = resolve_target_required(self.target.as_deref(), config)?; let mut overall_success = true; @@ -224,12 +263,22 @@ impl SdkCompileCommand { ); let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); - - let compile_command = format!( - r#"if [ -f '{}' ]; then echo 'Running compile script: {}'; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{}'; else echo 'Compile script {} not found.' && ls -la; exit 1; fi"#, - section.script, section.script, section.script, section.script - ); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); + + // Build compile command with optional workdir prefix + // For remote extensions, scripts are in $AVOCADO_PREFIX/includes// instead of /opt/src + // Note: Use double quotes for workdir so $AVOCADO_PREFIX gets expanded by the shell + let compile_command = if let Some(ref workdir) = self.workdir { + format!( + r#"cd "{workdir}" && if [ -f '{}' ]; then echo 'Running compile script: {}'; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{}'; else echo 'Compile script {} not found.' && ls -la; exit 1; fi"#, + section.script, section.script, section.script, section.script + ) + } else { + format!( + r#"if [ -f '{}' ]; then echo 'Running compile script: {}'; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{}'; else echo 'Compile script {} not found.' && ls -la; exit 1; fi"#, + section.script, section.script, section.script, section.script + ) + }; let config = RunConfig { container_image: container_image.to_string(), @@ -242,6 +291,7 @@ impl SdkCompileCommand { repo_release: repo_release.clone(), container_args: merged_container_args.clone(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; let success = container_helper.run_in_container(config).await?; @@ -337,16 +387,17 @@ mod tests { let cmd = SdkCompileCommand::new("test.yaml".to_string(), false, vec![], None, None, None); let config_content = r#" -[sdk] -image = "test-image" - -[sdk.compile.app] -compile = "build.sh" -dependencies = { gcc = "*" } - -[sdk.compile.library] -compile = "lib_build.sh" -dependencies = { make = "*" } +sdk: + image: "test-image" + compile: + app: + compile: "build.sh" + packages: + gcc: "*" + library: + compile: "lib_build.sh" + packages: + make: "*" "#; let mut temp_file = NamedTempFile::new().unwrap(); write!(temp_file, "{config_content}").unwrap(); @@ -405,7 +456,8 @@ dependencies = { gcc = "*" } let section_config = crate::utils::config::CompileConfig { compile: Some("my_script.sh".to_string()), - dependencies: Some(deps), + clean: None, + packages: Some(deps), }; let script = cmd.find_compile_script_in_section(§ion_config); @@ -414,7 +466,8 @@ dependencies = { gcc = "*" } // Test section with no compile script let section_config_no_script = crate::utils::config::CompileConfig { compile: None, - dependencies: None, + clean: None, + packages: None, }; let script = cmd.find_compile_script_in_section(§ion_config_no_script); diff --git a/src/commands/sdk/deps.rs b/src/commands/sdk/deps.rs index 1cfecc4..3acfe92 100644 --- a/src/commands/sdk/deps.rs +++ b/src/commands/sdk/deps.rs @@ -2,9 +2,10 @@ use anyhow::{Context, Result}; use std::collections::HashMap; +use std::sync::Arc; use crate::utils::{ - config::Config, + config::{ComposedConfig, Config}, output::{print_success, OutputLevel}, }; @@ -15,24 +16,43 @@ type DependencySections = HashMap>; pub struct SdkDepsCommand { /// Path to configuration file pub config_path: String, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl SdkDepsCommand { /// Create a new SdkDepsCommand instance pub fn new(config_path: String) -> Self { - Self { config_path } + Self { + config_path, + composed_config: None, + } + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self } /// Execute the sdk deps command pub fn execute(&self) -> Result<()> { - let config = Config::load(&self.config_path) - .with_context(|| format!("Failed to load config from {}", self.config_path))?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, None) + .with_context(|| format!("Failed to load config from {}", self.config_path))?, + ), + }; + let config = &composed.config; // Read the config file content for extension parsing let config_content = std::fs::read_to_string(&self.config_path) .with_context(|| format!("Failed to read config file {}", self.config_path))?; - let sections = self.list_packages_by_section(&config, &config_content)?; + let sections = self.list_packages_by_section(config, &config_content)?; let total_count = self.display_packages_by_section(§ions); print_success( @@ -209,7 +229,7 @@ impl SdkDepsCommand { } // Try extension reference - if let Some(serde_yaml::Value::String(ext_name)) = table.get("ext") { + if let Some(serde_yaml::Value::String(ext_name)) = table.get("extensions") { let version = self.get_extension_version(config, ext_name); return vec![("ext".to_string(), ext_name.clone(), version)]; } @@ -240,7 +260,7 @@ impl SdkDepsCommand { .as_ref() .and_then(|sdk| sdk.compile.as_ref()) .and_then(|compile| compile.get(compile_name)) - .and_then(|compile_config| compile_config.dependencies.as_ref()); + .and_then(|compile_config| compile_config.packages.as_ref()); let Some(deps) = compile_deps else { return Vec::new(); @@ -287,11 +307,10 @@ mod tests { // Create a minimal config for testing let config_content = r#" -[sdk] -image = "test-image" - -[sdk.dependencies] -cmake = "*" +sdk: + image: "test-image" + packages: + cmake: "*" "#; let mut temp_file = NamedTempFile::new().unwrap(); write!(temp_file, "{config_content}").unwrap(); @@ -342,12 +361,12 @@ cmake = "*" let config_content = r#" sdk: image: "test-image" - dependencies: + packages: cmake: "*" gcc: "11.0.0" compile: app: - dependencies: + packages: make: "4.3" "#; let mut temp_file = tempfile::Builder::new().suffix(".yaml").tempfile().unwrap(); @@ -384,16 +403,16 @@ sdk: let config_content = r#" sdk: image: "test-image" - dependencies: + packages: cmake: "*" -ext: +extensions: avocado-dev: types: - sysext - confext sdk: - dependencies: + packages: nativesdk-avocado-hitl: "*" avocado-dev1: @@ -401,7 +420,7 @@ ext: - sysext - confext sdk: - dependencies: + packages: nativesdk-avocado-hitl: "*" "#; let mut temp_file = tempfile::Builder::new().suffix(".yaml").tempfile().unwrap(); diff --git a/src/commands/sdk/dnf.rs b/src/commands/sdk/dnf.rs index ed7b25e..a089260 100644 --- a/src/commands/sdk/dnf.rs +++ b/src/commands/sdk/dnf.rs @@ -1,9 +1,10 @@ //! SDK DNF command implementation. use anyhow::{Context, Result}; +use std::sync::Arc; use crate::utils::{ - config::Config, + config::{ComposedConfig, Config}, container::{RunConfig, SdkContainer}, output::{print_error, print_success, OutputLevel}, target::resolve_target_required, @@ -23,6 +24,10 @@ pub struct SdkDnfCommand { pub container_args: Option>, /// Additional arguments to pass to DNF commands pub dnf_args: Option>, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl SdkDnfCommand { @@ -42,9 +47,24 @@ impl SdkDnfCommand { target, container_args, dnf_args, + sdk_arch: None, + composed_config: None, } } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + /// Execute the sdk dnf command pub async fn execute(&self) -> Result<()> { if self.command.is_empty() { @@ -53,9 +73,16 @@ impl SdkDnfCommand { )); } - // Load the configuration - let config = Config::load(&self.config_path) - .with_context(|| format!("Failed to load config from {}", self.config_path))?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; + let config = &composed.config; // Get the SDK image from configuration let container_image = config.get_sdk_image().ok_or_else(|| { @@ -70,18 +97,20 @@ impl SdkDnfCommand { let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); // Resolve target with proper precedence - let target = resolve_target_required(self.target.as_deref(), &config)?; + let target = resolve_target_required(self.target.as_deref(), config)?; let container_helper = SdkContainer::new(); // Build DNF command + // Use $DNF_SDK_COMBINED_REPO_CONF to include both SDK repos and target-specific repos + // (including the extension repo: ${AVOCADO_TARGET}-target-ext) let dnf_args_str = if let Some(args) = &self.dnf_args { format!(" {} ", args.join(" ")) } else { String::new() }; let command = format!( - "RPM_CONFIGDIR=$AVOCADO_SDK_PREFIX/usr/lib/rpm $DNF_SDK_HOST $DNF_SDK_HOST_OPTS $DNF_SDK_REPO_CONF --disablerepo=${{AVOCADO_TARGET}}-target-ext {} {}", + "RPM_CONFIGDIR=$AVOCADO_SDK_PREFIX/usr/lib/rpm $DNF_SDK_HOST $DNF_SDK_HOST_OPTS $DNF_SDK_COMBINED_REPO_CONF {} {}", dnf_args_str, self.command.join(" ") ); @@ -134,6 +163,7 @@ impl SdkDnfCommand { repo_release: repo_release.cloned(), container_args: container_args.cloned(), dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; container_helper.run_in_container(config).await diff --git a/src/commands/sdk/install.rs b/src/commands/sdk/install.rs index 460b683..0bc5c3f 100644 --- a/src/commands/sdk/install.rs +++ b/src/commands/sdk/install.rs @@ -3,10 +3,11 @@ use anyhow::{Context, Result}; use std::collections::HashMap; use std::path::PathBuf; +use std::sync::Arc; use crate::utils::{ - config::Config, - container::{RunConfig, SdkContainer}, + config::{ComposedConfig, Config}, + container::{normalize_sdk_arch, RunConfig, SdkContainer}, lockfile::{build_package_spec_with_lock, LockFile, SysrootType}, output::{print_error, print_info, print_success, OutputLevel}, runs_on::RunsOnContext, @@ -37,6 +38,10 @@ pub struct SdkInstallCommand { pub runs_on: Option, /// NFS port for remote execution pub nfs_port: Option, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl SdkInstallCommand { @@ -59,6 +64,8 @@ impl SdkInstallCommand { no_stamps: false, runs_on: None, nfs_port: None, + sdk_arch: None, + composed_config: None, } } @@ -75,26 +82,36 @@ impl SdkInstallCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + /// Execute the sdk install command pub async fn execute(&self) -> Result<()> { - // Early target validation - load basic config first - let basic_config = Config::load(&self.config_path) - .with_context(|| format!("Failed to load config from {}", self.config_path))?; - let target = validate_and_log_target(self.target.as_deref(), &basic_config)?; - - // Load the composed configuration (merges external configs, applies interpolation) - let composed = Config::load_composed(&self.config_path, self.target.as_deref()) - .with_context(|| format!("Failed to load composed config from {}", self.config_path))?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()).with_context( + || format!("Failed to load composed config from {}", self.config_path), + )?, + ), + }; let config = &composed.config; + let target = validate_and_log_target(self.target.as_deref(), config)?; // Merge container args from config with CLI args let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); - // Serialize the merged config back to string for extension parsing methods - let config_content = serde_yaml::to_string(&composed.merged_value) - .with_context(|| "Failed to serialize composed config")?; - // Get the SDK image from configuration let container_image = config.get_sdk_image().ok_or_else(|| { anyhow::anyhow!("No container image specified in config under 'sdk.image'") @@ -107,14 +124,8 @@ impl SdkInstallCommand { .get_sdk_dependencies_for_target(&self.config_path, &target) .with_context(|| "Failed to get SDK dependencies with target interpolation")?; - // Get extension SDK dependencies (from the composed, interpolated config) - let extension_sdk_dependencies = config - .get_extension_sdk_dependencies_with_config_path_and_target( - &config_content, - Some(&self.config_path), - Some(&target), - ) - .with_context(|| "Failed to parse extension SDK dependencies")?; + // Note: extension_sdk_dependencies is computed inside execute_install after + // fetching remote extensions, since we need SDK repos to be available first // Get repo_url and repo_release from config let repo_url = config.get_sdk_repo_url(); @@ -140,11 +151,9 @@ impl SdkInstallCommand { let result = self .execute_install( config, - &composed, &target, container_image, &sdk_dependencies, - &extension_sdk_dependencies, repo_url.as_deref(), repo_release.as_deref(), &container_helper, @@ -166,16 +175,62 @@ impl SdkInstallCommand { result } + /// Fetch remote extensions after SDK bootstrap + /// + /// This discovers extensions with a `source` field and fetches them + /// using the SDK environment where repos are already configured. + async fn fetch_remote_extensions_in_sdk( + &self, + target: &str, + merged_container_args: Option<&Vec>, + ) -> Result<()> { + use crate::commands::ext::ExtFetchCommand; + + // Discover remote extensions (with target interpolation for extension names) + let remote_extensions = + Config::discover_remote_extensions(&self.config_path, Some(target))?; + + if remote_extensions.is_empty() { + return Ok(()); + } + + print_info( + &format!( + "Fetching {} remote extension(s)...", + remote_extensions.len() + ), + OutputLevel::Normal, + ); + + // Use ExtFetchCommand to fetch extensions with SDK environment + let mut fetch_cmd = ExtFetchCommand::new( + self.config_path.clone(), + None, // Fetch all remote extensions + self.verbose, + false, // Don't force re-fetch + Some(target.to_string()), + merged_container_args.cloned(), + ) + .with_sdk_arch(self.sdk_arch.clone()); + + // Pass through the runs_on context for remote execution + if let Some(runs_on) = &self.runs_on { + fetch_cmd = fetch_cmd.with_runs_on(runs_on.clone(), self.nfs_port); + } + + fetch_cmd.execute().await?; + + Ok(()) + } + /// Internal implementation of the install logic #[allow(clippy::too_many_arguments)] async fn execute_install( &self, config: &Config, - composed: &crate::utils::config::ComposedConfig, target: &str, container_image: &str, sdk_dependencies: &Option>, - extension_sdk_dependencies: &HashMap>, repo_url: Option<&str>, repo_release: Option<&str>, container_helper: &SdkContainer, @@ -183,8 +238,11 @@ impl SdkInstallCommand { runs_on_context: Option<&RunsOnContext>, ) -> Result<()> { // Determine host architecture for SDK package tracking - // For remote execution, query the remote host; for local, use local arch - let host_arch = if let Some(context) = runs_on_context { + // Priority: sdk_arch (for cross-arch emulation) > runs_on remote arch > local arch + let host_arch = if let Some(ref arch) = self.sdk_arch { + // Convert sdk_arch to normalized architecture name (e.g., "aarch64", "x86_64") + normalize_sdk_arch(arch)? + } else if let Some(context) = runs_on_context { context .get_host_arch() .await @@ -387,8 +445,13 @@ MACROS_EOF ..Default::default() }; - let init_success = - run_container_command(container_helper, run_config, runs_on_context).await?; + let init_success = run_container_command( + container_helper, + run_config, + runs_on_context, + self.sdk_arch.as_ref(), + ) + .await?; if init_success { print_success("Initialized SDK environment.", OutputLevel::Normal); @@ -445,8 +508,13 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS \ ..Default::default() }; - let sdk_target_success = - run_container_command(container_helper, run_config, runs_on_context).await?; + let sdk_target_success = run_container_command( + container_helper, + run_config, + runs_on_context, + self.sdk_arch.as_ref(), + ) + .await?; // Track all SDK packages installed for lock file update at the end let mut all_sdk_package_names: Vec = Vec::new(); @@ -492,7 +560,13 @@ $DNF_SDK_HOST \ ..Default::default() }; - run_container_command(container_helper, run_config, runs_on_context).await?; + run_container_command( + container_helper, + run_config, + runs_on_context, + self.sdk_arch.as_ref(), + ) + .await?; // Install avocado-sdk-bootstrap with version from distro.version print_info("Installing SDK bootstrap.", OutputLevel::Normal); @@ -542,8 +616,13 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS \ ..Default::default() }; - let bootstrap_success = - run_container_command(container_helper, run_config, runs_on_context).await?; + let bootstrap_success = run_container_command( + container_helper, + run_config, + runs_on_context, + self.sdk_arch.as_ref(), + ) + .await?; if bootstrap_success { print_success("Installed SDK bootstrap.", OutputLevel::Normal); @@ -553,6 +632,27 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS \ return Err(anyhow::anyhow!("Failed to install SDK bootstrap.")); } + // Fetch remote extensions now that SDK repos are available + // This uses the SDK environment with configured repos to download extension packages + self.fetch_remote_extensions_in_sdk(target, merged_container_args) + .await?; + + // Reload composed config to include extension configs + let composed = Config::load_composed(&self.config_path, Some(target)) + .with_context(|| "Failed to reload composed config after fetching extensions")?; + let config = &composed.config; + + // Re-compute extension SDK dependencies now that extension configs are available + let config_content = serde_yaml::to_string(&composed.merged_value) + .with_context(|| "Failed to serialize composed config")?; + let extension_sdk_dependencies = config + .get_extension_sdk_dependencies_with_config_path_and_target( + &config_content, + Some(&self.config_path), + Some(target), + ) + .with_context(|| "Failed to parse extension SDK dependencies")?; + // After bootstrap, source environment-setup and configure SSL certs for subsequent commands if self.verbose { print_info( @@ -594,7 +694,13 @@ fi ..Default::default() }; - run_container_command(container_helper, run_config, runs_on_context).await?; + run_container_command( + container_helper, + run_config, + runs_on_context, + self.sdk_arch.as_ref(), + ) + .await?; // Install SDK dependencies (into SDK) let mut sdk_packages = Vec::new(); @@ -612,7 +718,7 @@ fi } // Add extension SDK dependencies to the package list - for (ext_name, ext_deps) in extension_sdk_dependencies { + for (ext_name, ext_deps) in &extension_sdk_dependencies { if self.verbose { print_info( &format!("Adding SDK dependencies from extension '{ext_name}'"), @@ -672,8 +778,13 @@ $DNF_SDK_HOST \ // runs_on handled by shared context ..Default::default() }; - let install_success = - run_container_command(container_helper, run_config, runs_on_context).await?; + let install_success = run_container_command( + container_helper, + run_config, + runs_on_context, + self.sdk_arch.as_ref(), + ) + .await?; if install_success { print_success("Installed SDK dependencies.", OutputLevel::Normal); @@ -699,6 +810,7 @@ $DNF_SDK_HOST \ repo_release.map(|s| s.to_string()), merged_container_args.cloned(), runs_on_context, + self.sdk_arch.as_ref(), ) .await?; @@ -765,8 +877,13 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ ..Default::default() }; - let rootfs_success = - run_container_command(container_helper, run_config, runs_on_context).await?; + let rootfs_success = run_container_command( + container_helper, + run_config, + runs_on_context, + self.sdk_arch.as_ref(), + ) + .await?; if rootfs_success { print_success("Installed rootfs sysroot.", OutputLevel::Normal); @@ -782,6 +899,7 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ repo_release.map(|s| s.to_string()), merged_container_args.cloned(), runs_on_context, + self.sdk_arch.as_ref(), ) .await?; @@ -887,8 +1005,13 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ ..Default::default() }; - let install_success = - run_container_command(container_helper, run_config, runs_on_context).await?; + let install_success = run_container_command( + container_helper, + run_config, + runs_on_context, + self.sdk_arch.as_ref(), + ) + .await?; if install_success { print_success( @@ -910,6 +1033,7 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ repo_release.map(|s| s.to_string()), merged_container_args.cloned(), runs_on_context, + self.sdk_arch.as_ref(), ) .await?; @@ -972,7 +1096,13 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ ..Default::default() }; - run_container_command(container_helper, run_config, runs_on_context).await?; + run_container_command( + container_helper, + run_config, + runs_on_context, + self.sdk_arch.as_ref(), + ) + .await?; if self.verbose { print_info("Wrote SDK install stamp.", OutputLevel::Normal); @@ -1024,9 +1154,15 @@ $DNF_SDK_HOST $DNF_NO_SCRIPTS $DNF_SDK_TARGET_REPO_CONF \ /// Helper function to run a container command, using shared context if available async fn run_container_command( container_helper: &SdkContainer, - config: RunConfig, + mut config: RunConfig, runs_on_context: Option<&RunsOnContext>, + sdk_arch: Option<&String>, ) -> Result { + // Inject sdk_arch if provided + if let Some(arch) = sdk_arch { + config.sdk_arch = Some(arch.clone()); + } + if let Some(context) = runs_on_context { // Use the shared context - don't set runs_on in config as we're handling it container_helper diff --git a/src/commands/sdk/run.rs b/src/commands/sdk/run.rs index 3c2c760..0a03b00 100644 --- a/src/commands/sdk/run.rs +++ b/src/commands/sdk/run.rs @@ -5,9 +5,10 @@ use crate::utils::signing_service::{generate_helper_script, SigningService, Sign use anyhow::{Context, Result}; #[cfg(unix)] use std::path::PathBuf; +use std::sync::Arc; use crate::utils::{ - config::Config, + config::{ComposedConfig, Config}, container::{RunConfig, SdkContainer}, output::{print_info, print_success, OutputLevel}, target::validate_and_log_target, @@ -47,9 +48,13 @@ pub struct SdkRunCommand { pub runs_on: Option, /// NFS port for remote execution pub nfs_port: Option, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, /// Signing service handle (Unix only) #[cfg(unix)] signing_service: Option, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl SdkRunCommand { @@ -88,8 +93,10 @@ impl SdkRunCommand { no_bootstrap, runs_on: None, nfs_port: None, + sdk_arch: None, #[cfg(unix)] signing_service: None, + composed_config: None, } } @@ -100,6 +107,19 @@ impl SdkRunCommand { self } + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + /// Setup signing service for runtime if signing is configured #[cfg(unix)] async fn setup_signing_service( @@ -138,7 +158,7 @@ impl SdkRunCommand { // Get checksum algorithm (defaults to sha256) let checksum_str = config - .runtime + .runtimes .as_ref() .and_then(|r| r.get(runtime_name)) .and_then(|rc| rc.signing.as_ref()) @@ -242,12 +262,18 @@ impl SdkRunCommand { )); } - // Load the configuration - let config = Config::load(&self.config_path) - .with_context(|| format!("Failed to load config from {}", self.config_path))?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()) + .with_context(|| format!("Failed to load config from {}", self.config_path))?, + ), + }; + let config = &composed.config; // Early target validation and logging - fail fast if target is unsupported - let target = validate_and_log_target(self.target.as_deref(), &config)?; + let target = validate_and_log_target(self.target.as_deref(), config)?; // Get merged SDK configuration for the target let merged_sdk_config = config.get_merged_sdk_config(&target, &self.config_path)?; @@ -297,14 +323,14 @@ impl SdkRunCommand { // Setup signing service if a runtime is specified let signing_config = if let Some(runtime_name) = self.runtime.clone() { - self.setup_signing_service(&config, &runtime_name).await? + self.setup_signing_service(config, &runtime_name).await? } else { None }; // Use the container helper to run the command let container_helper = - SdkContainer::from_config(&self.config_path, &config)?.verbose(self.verbose); + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); // Create RunConfig - detach mode is now handled by the shared run_in_container let mut run_config = RunConfig { @@ -326,6 +352,7 @@ impl SdkRunCommand { no_bootstrap: self.no_bootstrap, runs_on: self.runs_on.clone(), nfs_port: self.nfs_port, + sdk_arch: self.sdk_arch.clone(), ..Default::default() }; diff --git a/src/commands/sign.rs b/src/commands/sign.rs index 9273554..ff4612f 100644 --- a/src/commands/sign.rs +++ b/src/commands/sign.rs @@ -4,10 +4,11 @@ //! It signs all runtimes with signing configuration, or a specific runtime with `-r`. use anyhow::{Context, Result}; +use std::sync::Arc; use crate::commands::runtime::RuntimeSignCommand; use crate::utils::{ - config::Config, + config::{ComposedConfig, Config}, output::{print_info, print_success, OutputLevel}, }; @@ -25,6 +26,8 @@ pub struct SignCommand { pub container_args: Option>, /// Additional arguments to pass to DNF commands pub dnf_args: Option>, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl SignCommand { @@ -44,30 +47,50 @@ impl SignCommand { target, container_args, dnf_args, + composed_config: None, } } + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + /// Execute the sign command pub async fn execute(&self) -> Result<()> { - // Load the configuration - let config = Config::load(&self.config_path) - .with_context(|| format!("Failed to load config from {}", self.config_path))?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()) + .with_context(|| format!("Failed to load config from {}", self.config_path))?, + ), + }; + let config = &composed.config; // Early target validation and logging - fail fast if target is unsupported - let target = - crate::utils::target::validate_and_log_target(self.target.as_deref(), &config)?; + let target = crate::utils::target::validate_and_log_target(self.target.as_deref(), config)?; // If a specific runtime is requested, sign only that runtime if let Some(ref runtime_name) = self.runtime { - return self.sign_single_runtime(runtime_name, &target).await; + return self + .sign_single_runtime(runtime_name, &target, Arc::clone(&composed)) + .await; } // Otherwise, sign all runtimes that have signing configuration - self.sign_all_runtimes(&config, &target).await + self.sign_all_runtimes(&composed, &target).await } /// Sign a single runtime - async fn sign_single_runtime(&self, runtime_name: &str, target: &str) -> Result<()> { + async fn sign_single_runtime( + &self, + runtime_name: &str, + target: &str, + composed: Arc, + ) -> Result<()> { print_info( &format!("Signing runtime '{runtime_name}' for target '{target}'"), OutputLevel::Normal, @@ -80,7 +103,8 @@ impl SignCommand { Some(target.to_string()), self.container_args.clone(), self.dnf_args.clone(), - ); + ) + .with_composed_config(composed); sign_cmd .execute() @@ -91,12 +115,12 @@ impl SignCommand { } /// Sign all runtimes that have signing configuration - async fn sign_all_runtimes(&self, config: &Config, target: &str) -> Result<()> { - let content = std::fs::read_to_string(&self.config_path)?; - let parsed: serde_yaml::Value = serde_yaml::from_str(&content)?; + async fn sign_all_runtimes(&self, composed: &Arc, target: &str) -> Result<()> { + let config = &composed.config; + let parsed = &composed.merged_value; let runtime_section = parsed - .get("runtime") + .get("runtimes") .and_then(|r| r.as_mapping()) .ok_or_else(|| anyhow::anyhow!("No runtime configuration found"))?; @@ -189,7 +213,8 @@ impl SignCommand { Some(target.to_string()), self.container_args.clone(), self.dnf_args.clone(), - ); + ) + .with_composed_config(Arc::clone(composed)); sign_cmd .execute() diff --git a/src/commands/unlock.rs b/src/commands/unlock.rs index 7ee099a..5112015 100644 --- a/src/commands/unlock.rs +++ b/src/commands/unlock.rs @@ -2,8 +2,9 @@ use anyhow::{Context, Result}; use std::path::Path; +use std::sync::Arc; -use crate::utils::config::Config; +use crate::utils::config::{ComposedConfig, Config}; use crate::utils::lockfile::LockFile; use crate::utils::output::{print_info, print_success, OutputLevel}; use crate::utils::target::resolve_target_required; @@ -25,6 +26,8 @@ pub struct UnlockCommand { runtime: Option, /// Unlock SDK (includes rootfs, target-sysroot, and all SDK arches) sdk: bool, + /// Pre-composed configuration to avoid reloading + composed_config: Option>, } impl UnlockCommand { @@ -44,17 +47,31 @@ impl UnlockCommand { extension, runtime, sdk, + composed_config: None, } } + /// Set pre-composed configuration to avoid reloading + #[allow(dead_code)] + pub fn with_composed_config(mut self, config: Arc) -> Self { + self.composed_config = Some(config); + self + } + /// Execute the unlock command pub fn execute(&self) -> Result<()> { - // Load configuration - let config = Config::load(&self.config_path) - .with_context(|| format!("Failed to load config from {}", self.config_path))?; + // Use provided config or load fresh + let composed = match &self.composed_config { + Some(cc) => Arc::clone(cc), + None => Arc::new( + Config::load_composed(&self.config_path, self.target.as_deref()) + .with_context(|| format!("Failed to load config from {}", self.config_path))?, + ), + }; + let config = &composed.config; // Resolve target - let target = resolve_target_required(self.target.as_deref(), &config)?; + let target = resolve_target_required(self.target.as_deref(), config)?; // Get src_dir from config let src_dir = config @@ -168,6 +185,8 @@ impl UnlockCommand { mod tests { use super::*; use crate::utils::lockfile::SysrootType; + use serial_test::serial; + use std::env; use std::fs; use tempfile::TempDir; @@ -176,10 +195,10 @@ mod tests { default_target: "qemux86-64" sdk: image: "test-image" -ext: +extensions: my-app: version: "1.0.0" -runtime: +runtimes: dev: target: "qemux86-64" "#; @@ -223,7 +242,9 @@ runtime: } #[test] + #[serial] fn test_unlock_all() { + env::remove_var("AVOCADO_TARGET"); let temp_dir = TempDir::new().unwrap(); let config_path = create_test_config(&temp_dir); create_test_lock_file(&temp_dir); @@ -244,7 +265,9 @@ runtime: } #[test] + #[serial] fn test_unlock_sdk() { + env::remove_var("AVOCADO_TARGET"); let temp_dir = TempDir::new().unwrap(); let config_path = create_test_config(&temp_dir); create_test_lock_file(&temp_dir); @@ -290,7 +313,9 @@ runtime: } #[test] + #[serial] fn test_unlock_extension() { + env::remove_var("AVOCADO_TARGET"); let temp_dir = TempDir::new().unwrap(); let config_path = create_test_config(&temp_dir); create_test_lock_file(&temp_dir); @@ -325,7 +350,9 @@ runtime: } #[test] + #[serial] fn test_unlock_runtime() { + env::remove_var("AVOCADO_TARGET"); let temp_dir = TempDir::new().unwrap(); let config_path = create_test_config(&temp_dir); create_test_lock_file(&temp_dir); diff --git a/src/main.rs b/src/main.rs index b78f1fc..4ddda6d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -9,7 +9,7 @@ use commands::build::BuildCommand; use commands::clean::CleanCommand; use commands::ext::{ ExtBuildCommand, ExtCheckoutCommand, ExtCleanCommand, ExtDepsCommand, ExtDnfCommand, - ExtImageCommand, ExtInstallCommand, ExtListCommand, ExtPackageCommand, + ExtFetchCommand, ExtImageCommand, ExtInstallCommand, ExtListCommand, ExtPackageCommand, }; use commands::fetch::FetchCommand; use commands::hitl::HitlServerCommand; @@ -57,6 +57,10 @@ struct Cli { /// NFS port for remote execution (auto-selects from 12050-12099 if not specified) #[arg(long, global = true)] nfs_port: Option, + + /// SDK container architecture for cross-arch emulation via Docker buildx/QEMU (aarch64 or x86-64) + #[arg(long, value_name = "ARCH", global = true)] + sdk_arch: Option, } #[derive(Subcommand)] @@ -226,7 +230,7 @@ enum Commands { #[arg(short, long)] target: Option, /// Provision profile to use - #[arg(long = "provision-profile")] + #[arg(long = "profile")] provision_profile: Option, /// Environment variables to pass to the provision process #[arg(long = "env", num_args = 1, action = clap::ArgAction::Append)] @@ -489,6 +493,7 @@ enum SdkCommands { dnf_args: Option>, }, /// Remove the SDK directory + /// Clean the SDK or run clean scripts for specific compile sections Clean { /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] @@ -499,6 +504,8 @@ enum SdkCommands { /// Target architecture #[arg(short, long)] target: Option, + /// Specific compile sections to clean (runs their clean scripts) + sections: Vec, /// Additional arguments to pass to the container runtime #[arg(long = "container-arg", num_args = 1, allow_hyphen_values = true, action = clap::ArgAction::Append)] container_args: Option>, @@ -576,7 +583,7 @@ enum RuntimeCommands { #[arg(short, long)] target: Option, /// Provision profile to use - #[arg(long = "provision-profile")] + #[arg(long = "profile")] provision_profile: Option, /// Environment variables to pass to the provision process #[arg(long = "env", num_args = 1, action = clap::ArgAction::Append)] @@ -803,7 +810,8 @@ async fn main() -> Result<()> { dnf_args, ) .with_no_stamps(cli.no_stamps) - .with_runs_on(cli.runs_on.clone(), cli.nfs_port); + .with_runs_on(cli.runs_on.clone(), cli.nfs_port) + .with_sdk_arch(cli.sdk_arch.clone()); install_cmd.execute().await?; Ok(()) } @@ -826,7 +834,8 @@ async fn main() -> Result<()> { dnf_args, ) .with_no_stamps(cli.no_stamps) - .with_runs_on(cli.runs_on.clone(), cli.nfs_port); + .with_runs_on(cli.runs_on.clone(), cli.nfs_port) + .with_sdk_arch(cli.sdk_arch.clone()); build_cmd.execute().await?; Ok(()) } @@ -847,7 +856,8 @@ async fn main() -> Result<()> { target.or(cli.target), container_args, dnf_args, - ); + ) + .with_sdk_arch(cli.sdk_arch.clone()); fetch_cmd.execute().await?; Ok(()) } @@ -883,6 +893,7 @@ async fn main() -> Result<()> { no_stamps: cli.no_stamps, runs_on: cli.runs_on.clone(), nfs_port: cli.nfs_port, + sdk_arch: cli.sdk_arch.clone(), }); provision_cmd.execute().await?; Ok(()) @@ -905,7 +916,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); deploy_cmd.execute().await?; Ok(()) } @@ -1008,7 +1020,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); install_cmd.execute().await?; Ok(()) } @@ -1029,7 +1042,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); build_cmd.execute().await?; Ok(()) } @@ -1061,6 +1075,7 @@ async fn main() -> Result<()> { no_stamps: cli.no_stamps, runs_on: cli.runs_on.clone(), nfs_port: cli.nfs_port, + sdk_arch: cli.sdk_arch.clone(), }, ); provision_cmd.execute().await?; @@ -1097,7 +1112,8 @@ async fn main() -> Result<()> { target.or(cli.target), container_args, dnf_args, - ); + ) + .with_sdk_arch(cli.sdk_arch.clone()); dnf_cmd.execute().await?; Ok(()) } @@ -1116,7 +1132,8 @@ async fn main() -> Result<()> { target.or(cli.target), container_args, dnf_args, - ); + ) + .with_sdk_arch(cli.sdk_arch.clone()); clean_cmd.execute().await?; Ok(()) } @@ -1138,7 +1155,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); deploy_cmd.execute().await?; Ok(()) } @@ -1158,7 +1176,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); sign_cmd.execute().await?; Ok(()) } @@ -1182,10 +1201,31 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); install_cmd.execute().await?; Ok(()) } + ExtCommands::Fetch { + config, + verbose, + force, + extension, + target, + container_args, + } => { + let fetch_cmd = ExtFetchCommand::new( + config, + extension, + verbose, + force, + target.or(cli.target.clone()), + container_args, + ) + .with_sdk_arch(cli.sdk_arch.clone()); + fetch_cmd.execute().await?; + Ok(()) + } ExtCommands::Build { extension, config, @@ -1202,7 +1242,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); build_cmd.execute().await?; Ok(()) } @@ -1224,7 +1265,8 @@ async fn main() -> Result<()> { container_tool, target.or(cli.target), ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); checkout_cmd.execute().await?; Ok(()) } @@ -1259,7 +1301,8 @@ async fn main() -> Result<()> { target.or(cli.target), container_args, dnf_args, - ); + ) + .with_sdk_arch(cli.sdk_arch.clone()); dnf_cmd.execute().await?; Ok(()) } @@ -1278,7 +1321,8 @@ async fn main() -> Result<()> { target.or(cli.target), container_args, dnf_args, - ); + ) + .with_sdk_arch(cli.sdk_arch.clone()); clean_cmd.execute().await?; Ok(()) } @@ -1298,7 +1342,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); image_cmd.execute().await?; Ok(()) } @@ -1320,7 +1365,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); package_cmd.execute().await?; Ok(()) } @@ -1345,6 +1391,8 @@ async fn main() -> Result<()> { verbose, port, no_stamps: no_stamps || cli.no_stamps, + sdk_arch: cli.sdk_arch.clone(), + composed_config: None, }; hitl_cmd.execute().await?; Ok(()) @@ -1368,7 +1416,8 @@ async fn main() -> Result<()> { dnf_args, ) .with_no_stamps(cli.no_stamps) - .with_runs_on(cli.runs_on.clone(), cli.nfs_port); + .with_runs_on(cli.runs_on.clone(), cli.nfs_port) + .with_sdk_arch(cli.sdk_arch.clone()); install_cmd.execute().await?; Ok(()) } @@ -1409,7 +1458,8 @@ async fn main() -> Result<()> { dnf_args, no_bootstrap, ) - .with_runs_on(cli.runs_on.clone(), cli.nfs_port); + .with_runs_on(cli.runs_on.clone(), cli.nfs_port) + .with_sdk_arch(cli.sdk_arch.clone()); run_cmd.execute().await?; Ok(()) } @@ -1439,7 +1489,8 @@ async fn main() -> Result<()> { container_args, dnf_args, ) - .with_no_stamps(cli.no_stamps); + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); compile_cmd.execute().await?; Ok(()) } @@ -1458,7 +1509,8 @@ async fn main() -> Result<()> { target.or(cli.target), container_args, dnf_args, - ); + ) + .with_sdk_arch(cli.sdk_arch.clone()); dnf_cmd.execute().await?; Ok(()) } @@ -1466,16 +1518,19 @@ async fn main() -> Result<()> { config, verbose, target, + sections, container_args, dnf_args, } => { let clean_cmd = SdkCleanCommand::new( config, verbose, + sections, target.or(cli.target), container_args, dnf_args, - ); + ) + .with_sdk_arch(cli.sdk_arch.clone()); clean_cmd.execute().await?; Ok(()) } @@ -1509,6 +1564,27 @@ enum ExtCommands { #[arg(long = "dnf-arg", num_args = 1, allow_hyphen_values = true, action = clap::ArgAction::Append)] dnf_args: Option>, }, + /// Fetch remote extensions from repo, git, or path sources + Fetch { + /// Path to avocado.yaml configuration file + #[arg(short = 'C', long, default_value = "avocado.yaml")] + config: String, + /// Enable verbose output + #[arg(short, long)] + verbose: bool, + /// Force re-fetch even if already installed + #[arg(short, long)] + force: bool, + /// Name of the extension to fetch (if not provided, fetches all remote extensions) + #[arg(short = 'e', long = "extension")] + extension: Option, + /// Target architecture + #[arg(short, long)] + target: Option, + /// Additional arguments to pass to the container runtime + #[arg(long = "container-arg", num_args = 1, allow_hyphen_values = true, action = clap::ArgAction::Append)] + container_args: Option>, + }, /// Build sysext and/or confext extensions from configuration Build { /// Path to avocado.yaml configuration file diff --git a/src/utils/config.rs b/src/utils/config.rs index 1fe3ac4..ef2e7c9 100644 --- a/src/utils/config.rs +++ b/src/utils/config.rs @@ -7,18 +7,6 @@ use std::env; use std::fs; use std::path::{Path, PathBuf}; -// ============================================================================= -// DEPRECATION NOTE: TOML Support (Pre-1.0.0) -// ============================================================================= -// TOML configuration file support is DEPRECATED and maintained only for -// backward compatibility and migration purposes. The default format is now YAML. -// -// TOML support will be removed before the 1.0.0 release. -// -// Migration: When a legacy avocado.toml file is detected, it will be -// automatically converted to avocado.yaml format. -// ============================================================================= - /// Custom deserializer module for container_args mod container_args_deserializer { use serde::{Deserialize, Deserializer}; @@ -103,24 +91,125 @@ mod container_args_deserializer { } } -/// Represents the location of an extension (local or external) +/// Represents the location of an extension #[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[allow(dead_code)] pub enum ExtensionLocation { /// Extension defined in the main config file Local { name: String, config_path: String }, - /// Extension defined in an external config file + /// DEPRECATED: Extension from an external config file + /// Use source: path in the extensions section instead + #[deprecated(since = "0.23.0", note = "Use Local with source: path instead")] External { name: String, config_path: String }, + /// Remote extension fetched from a source (package, git, or path) + Remote { + name: String, + source: ExtensionSource, + }, +} + +/// Represents the source configuration for fetching a remote extension +#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[serde(tag = "type", rename_all = "lowercase")] +pub enum ExtensionSource { + /// Extension from the avocado package repository + #[serde(alias = "repo")] + Package { + /// Version to fetch (e.g., "0.1.0" or "*") + version: String, + /// Optional RPM package name (defaults to extension name if not specified) + #[serde(skip_serializing_if = "Option::is_none")] + package: Option, + /// Optional custom repository name + #[serde(skip_serializing_if = "Option::is_none")] + repo_name: Option, + /// Optional list of config sections to include from the remote extension. + /// Supports dot-separated paths (e.g., "provision_profiles.tegraflash") and wildcards (e.g., "provision_profiles.*"). + /// The extension's own `ext.` section is always included. + /// Referenced `sdk.compile.*` sections are auto-included based on compile dependencies. + #[serde(default, skip_serializing_if = "Option::is_none")] + include: Option>, + }, + /// Extension from a git repository + Git { + /// Git repository URL + url: String, + /// Git ref (branch, tag, or commit hash) + #[serde(rename = "ref", skip_serializing_if = "Option::is_none")] + git_ref: Option, + /// Optional sparse checkout paths + #[serde(skip_serializing_if = "Option::is_none")] + sparse_checkout: Option>, + /// Optional list of config sections to include from the remote extension. + /// Supports dot-separated paths (e.g., "provision_profiles.tegraflash") and wildcards (e.g., "provision_profiles.*"). + /// The extension's own `ext.` section is always included. + /// Referenced `sdk.compile.*` sections are auto-included based on compile dependencies. + #[serde(default, skip_serializing_if = "Option::is_none")] + include: Option>, + }, + /// Extension from a local filesystem path + Path { + /// Path to the extension directory (relative to config or absolute) + path: String, + /// Optional list of config sections to include from the remote extension. + /// Supports dot-separated paths (e.g., "provision_profiles.tegraflash") and wildcards (e.g., "provision_profiles.*"). + /// The extension's own `ext.` section is always included. + /// Referenced `sdk.compile.*` sections are auto-included based on compile dependencies. + #[serde(default, skip_serializing_if = "Option::is_none")] + include: Option>, + }, +} + +impl ExtensionSource { + /// Get the include patterns for this extension source. + /// Returns an empty slice if no include patterns are specified. + pub fn get_include_patterns(&self) -> &[String] { + match self { + ExtensionSource::Package { include, .. } => { + include.as_ref().map(|v| v.as_slice()).unwrap_or(&[]) + } + ExtensionSource::Git { include, .. } => { + include.as_ref().map(|v| v.as_slice()).unwrap_or(&[]) + } + ExtensionSource::Path { include, .. } => { + include.as_ref().map(|v| v.as_slice()).unwrap_or(&[]) + } + } + } + + /// Check if a config path matches any of the include patterns. + /// + /// Supports: + /// - Exact matches: "provision_profiles.tegraflash" matches "provision_profiles.tegraflash" + /// - Wildcard suffix: "provision_profiles.*" matches "provision_profiles.tegraflash", "provision_profiles.usb", etc. + /// + /// Returns true if the path matches at least one include pattern. + pub fn matches_include_pattern(config_path: &str, patterns: &[String]) -> bool { + for pattern in patterns { + if pattern.ends_with(".*") { + // Wildcard pattern: check if config_path starts with the prefix + let prefix = &pattern[..pattern.len() - 2]; // Remove ".*" + if config_path.starts_with(prefix) + && (config_path.len() == prefix.len() + || config_path.chars().nth(prefix.len()) == Some('.')) + { + return true; + } + } else if config_path == pattern { + // Exact match + return true; + } + } + false + } } /// Represents an extension dependency for a runtime with type information #[derive(Debug, Clone, PartialEq, Eq, Hash)] +#[allow(dead_code)] pub enum RuntimeExtDep { - /// Extension defined in the main config file (needs install + build) + /// Extension defined in the config (local or fetched remote) Local(String), - /// Extension from an external config file (needs install + build) - External { name: String, config_path: String }, - /// Prebuilt extension from package repo (needs install only, no build) - Versioned { name: String, version: String }, } impl RuntimeExtDep { @@ -128,8 +217,6 @@ impl RuntimeExtDep { pub fn name(&self) -> &str { match self { RuntimeExtDep::Local(name) => name, - RuntimeExtDep::External { name, .. } => name, - RuntimeExtDep::Versioned { name, .. } => name, } } } @@ -139,7 +226,7 @@ impl RuntimeExtDep { /// This struct provides a unified view where: /// - `distro`, `default_target`, `supported_targets` come from the main config only /// - `ext` sections are merged from both main and external configs -/// - `sdk.dependencies` and `sdk.compile` are merged from both main and external configs +/// - `sdk.packages` and `sdk.compile` are merged from both main and external configs /// /// Interpolation is applied after merging, so external configs can reference /// `{{ config.distro.version }}` and resolve to the main config's values. @@ -151,6 +238,94 @@ pub struct ComposedConfig { pub merged_value: serde_yaml::Value, /// The path to the main config file pub config_path: String, + /// Maps extension names to their source config file paths. + /// + /// This is used to resolve relative paths within extension configs. + /// Extensions from the main config will map to the main config path. + /// Extensions from remote/external sources will map to their respective config paths. + pub extension_sources: std::collections::HashMap, +} + +impl ComposedConfig { + /// Get the source config path for an extension. + /// + /// Returns the path to the config file where the extension is defined. + /// Falls back to the main config path if the extension is not found. + #[allow(dead_code)] + pub fn get_extension_source_config(&self, ext_name: &str) -> &str { + self.extension_sources + .get(ext_name) + .map(|s| s.as_str()) + .unwrap_or(&self.config_path) + } + + /// Resolve a path relative to an extension's source directory. + /// + /// For extensions from remote/external sources, paths are resolved relative to + /// that extension's src_dir (or config directory if src_dir is not specified). + /// For extensions from the main config, paths resolve relative to the main src_dir. + /// + /// # Arguments + /// * `ext_name` - The name of the extension + /// * `path` - The path to resolve (may be relative or absolute) + /// + /// # Returns + /// The resolved absolute path + #[allow(dead_code)] + pub fn resolve_path_for_extension(&self, ext_name: &str, path: &str) -> PathBuf { + let target_path = Path::new(path); + + // If it's already absolute, return as-is + if target_path.is_absolute() { + return target_path.to_path_buf(); + } + + // Get the source config path for this extension + let source_config = self.get_extension_source_config(ext_name); + let source_config_path = Path::new(source_config); + + // Try to load the source config to get its src_dir + // This handles the case where the extension's config has its own src_dir + if let Ok(content) = fs::read_to_string(source_config_path) { + if let Ok(parsed) = Config::parse_config_value(source_config, &content) { + if let Ok(ext_config) = serde_yaml::from_value::(parsed) { + // Use the extension's resolved src_dir + if let Some(src_dir) = ext_config.get_resolved_src_dir(source_config) { + return src_dir.join(target_path); + } + } + } + } + + // Fallback: resolve relative to the source config's directory + let config_dir = source_config_path.parent().unwrap_or(Path::new(".")); + config_dir.join(target_path) + } + + /// Get the src_dir for an extension. + /// + /// Returns the src_dir from the extension's source config, or the directory + /// containing that config file if src_dir is not specified. + #[allow(dead_code)] + pub fn get_extension_src_dir(&self, ext_name: &str) -> PathBuf { + let source_config = self.get_extension_source_config(ext_name); + let source_config_path = Path::new(source_config); + let config_dir = source_config_path.parent().unwrap_or(Path::new(".")); + + // Try to load the source config to get its src_dir + if let Ok(content) = fs::read_to_string(source_config_path) { + if let Ok(parsed) = Config::parse_config_value(source_config, &content) { + if let Ok(ext_config) = serde_yaml::from_value::(parsed) { + if let Some(src_dir) = ext_config.get_resolved_src_dir(source_config) { + return src_dir; + } + } + } + } + + // Fallback: use the config directory + config_dir.to_path_buf() + } } /// Configuration error type @@ -194,7 +369,8 @@ pub struct RuntimeConfig { #[derive(Debug, Clone, Deserialize, Serialize, Default)] pub struct SdkConfig { pub image: Option, - pub dependencies: Option>, + #[serde(alias = "dependencies")] + pub packages: Option>, pub compile: Option>, pub repo_url: Option, pub repo_release: Option, @@ -211,7 +387,10 @@ pub struct SdkConfig { #[derive(Debug, Clone, Deserialize, Serialize)] pub struct CompileConfig { pub compile: Option, - pub dependencies: Option>, + /// Path to clean script relative to src_dir, executed during `ext clean` + pub clean: Option, + #[serde(alias = "dependencies")] + pub packages: Option>, } /// Provision profile configuration @@ -277,9 +456,11 @@ pub struct Config { pub supported_targets: Option, pub src_dir: Option, pub distro: Option, - pub runtime: Option>, + #[serde(alias = "runtime")] + pub runtimes: Option>, pub sdk: Option, - pub provision: Option>, + #[serde(alias = "provision")] + pub provision_profiles: Option>, /// Signing keys mapping friendly names to key IDs /// Acts as a local bridge between the config and the global signing keys registry #[serde(default, deserialize_with = "signing_keys_deserializer::deserialize")] @@ -295,7 +476,7 @@ impl Config { /// - For named sections: [section_type.name] + [section_type.name.] /// /// # Arguments - /// * `section_path` - The base section path (e.g., "sdk", "runtime.prod", "ext.avocado-dev") + /// * `section_path` - The base section path (e.g., "sdk", "runtimes.prod", "extensions.avocado-dev") /// * `target` - The target architecture /// * `config_path` - Path to the configuration file for raw TOML access /// @@ -346,19 +527,10 @@ impl Config { } } - /// Parse a config file content into a YAML value (supports both YAML and TOML) + /// Parse a config file content into a YAML value fn parse_config_value(path: &str, content: &str) -> Result { - let is_yaml = path.ends_with(".yaml") || path.ends_with(".yml"); - - if is_yaml { - serde_yaml::from_str(content) - .with_context(|| format!("Failed to parse config file: {path}")) - } else { - // DEPRECATED: Parse TOML and convert to YAML value - let toml_val: toml::Value = toml::from_str(content) - .with_context(|| format!("Failed to parse config file: {path}"))?; - Self::toml_to_yaml(&toml_val) - } + serde_yaml::from_str(content) + .with_context(|| format!("Failed to parse config file: {path}")) } /// Parse config content and apply interpolation with the given target. @@ -384,10 +556,11 @@ impl Config { /// /// This method: /// 1. Loads the main config (raw, without interpolation) - /// 2. Discovers all external config references in runtime and ext dependencies - /// 3. Loads each external config (raw) - /// 4. Merges external `ext.*`, `sdk.dependencies`, and `sdk.compile` sections - /// 5. Applies interpolation to the composed model + /// 2. Discovers installed remote extensions in avocado-extensions/ and merges their configs + /// 3. Discovers all external config references in runtime and ext dependencies + /// 4. Loads each external config (raw) + /// 5. Merges external `extensions.*`, `sdk.packages`, and `sdk.compile` sections + /// 6. Applies interpolation to the composed model /// /// The `distro`, `default_target`, and `supported_targets` sections come from the main config only, /// allowing external configs to reference `{{ config.distro.version }}` and resolve to main config values. @@ -398,11 +571,31 @@ impl Config { let path = config_path.as_ref(); let config_path_str = path.to_string_lossy().to_string(); + // Track which config file each extension comes from + let mut extension_sources: std::collections::HashMap = + std::collections::HashMap::new(); + // Load main config content (raw, no interpolation yet) let content = fs::read_to_string(path) .with_context(|| format!("Failed to read config file: {}", path.display()))?; let mut main_config = Self::parse_config_value(&config_path_str, &content)?; + // Record extensions from the main config + if let Some(ext_section) = main_config.get("extensions").and_then(|e| e.as_mapping()) { + for (ext_key, _) in ext_section { + if let Some(ext_name) = ext_key.as_str() { + extension_sources.insert(ext_name.to_string(), config_path_str.clone()); + } + } + } + + // Discover and merge installed remote extension configs + // Remote extensions are those with a 'source' field that have been fetched + // to $AVOCADO_PREFIX/includes// + let remote_ext_sources = + Self::merge_installed_remote_extensions(&mut main_config, path, target)?; + extension_sources.extend(remote_ext_sources); + // Discover all external config references let external_refs = Self::discover_external_config_refs(&main_config); @@ -429,8 +622,41 @@ impl Config { &external_content, )?; + // For external configs (deprecated `config: path` syntax), use permissive include patterns + // to maintain backward compatibility - merge all sections + let legacy_include_patterns = vec![ + "provision_profiles.*".to_string(), + "sdk.packages.*".to_string(), + "sdk.compile.*".to_string(), + ]; + let auto_include_compile = + Self::find_compile_dependencies_in_ext(&external_config, ext_name); + // Merge external config into main config - Self::merge_external_config(&mut main_config, &external_config, ext_name); + Self::merge_external_config( + &mut main_config, + &external_config, + ext_name, + &legacy_include_patterns, + &auto_include_compile, + ); + + // Record this extension's source (the external config path) + let resolved_path_str = resolved_path.to_string_lossy().to_string(); + extension_sources.insert(ext_name.clone(), resolved_path_str.clone()); + + // Also record any extensions defined within this external config + if let Some(nested_ext_section) = external_config + .get("extensions") + .and_then(|e| e.as_mapping()) + { + for (nested_ext_key, _) in nested_ext_section { + if let Some(nested_ext_name) = nested_ext_key.as_str() { + extension_sources + .insert(nested_ext_name.to_string(), resolved_path_str.clone()); + } + } + } } // Apply interpolation to the composed model @@ -445,9 +671,437 @@ impl Config { config, merged_value: main_config, config_path: config_path_str, + extension_sources, }) } + /// Merge installed remote extension configs into the main config + /// + /// For each extension with a `source` field that has been installed to + /// `$AVOCADO_PREFIX/includes//`, load and merge its avocado.yaml + /// + /// Returns a HashMap mapping extension names to their source config file paths. + fn merge_installed_remote_extensions( + main_config: &mut serde_yaml::Value, + config_path: &Path, + target: Option<&str>, + ) -> Result> { + let mut extension_sources: std::collections::HashMap = + std::collections::HashMap::new(); + + // Get the src_dir and target to find the extensions directory + // First deserialize just to get src_dir and default_target + let temp_config: Config = + serde_yaml::from_value(main_config.clone()).unwrap_or_else(|_| Config { + default_target: None, + supported_targets: None, + src_dir: None, + distro: None, + runtimes: None, + sdk: None, + provision_profiles: None, + signing_keys: None, + }); + + // Resolve target: CLI arg > env var > config default + let resolved_target = target + .map(|s| s.to_string()) + .or_else(|| std::env::var("AVOCADO_TARGET").ok()) + .or_else(|| temp_config.default_target.clone()); + + // If we don't have a target, we can't determine the extensions path + let resolved_target = match resolved_target { + Some(t) => t, + None => { + // No target available - can't locate extensions, skip merging + return Ok(extension_sources); + } + }; + + // Discover remote extensions from the main config (with target interpolation) + let remote_extensions = + Self::discover_remote_extensions_from_value(main_config, Some(&resolved_target))?; + + if remote_extensions.is_empty() { + return Ok(extension_sources); + } + + // Get src_dir for loading volume state + let config_path_str = config_path.to_string_lossy(); + let src_dir = temp_config + .get_resolved_src_dir(config_path_str.as_ref()) + .unwrap_or_else(|| config_path.parent().unwrap_or(Path::new(".")).to_path_buf()); + + // Try to load volume state for container-based config reading + let volume_state = crate::utils::volume::VolumeState::load_from_dir(&src_dir) + .ok() + .flatten(); + + // Check for verbose/debug mode via environment variable + let verbose = + std::env::var("AVOCADO_DEBUG").is_ok() || std::env::var("AVOCADO_VERBOSE").is_ok(); + + if verbose { + eprintln!( + "[DEBUG] merge_installed_remote_extensions: found {} remote extensions: {:?}", + remote_extensions.len(), + remote_extensions.iter().map(|(n, _)| n).collect::>() + ); + } + + // Load extension path state for path-based extensions + let ext_path_state = crate::utils::ext_fetch::ExtensionPathState::load_from_dir(&src_dir) + .ok() + .flatten(); + + // For each remote extension, try to read its config + for (ext_name, source) in remote_extensions { + // Try multiple methods to read the extension config: + // 0. Path-based extension: read directly from source path (for source: { type: path }) + // 1. Direct container path (when running inside a container) + // 2. Via container command (when running on host) + // 3. Local fallback path (for development) + + let ext_content = { + // Method 0: Check if this is a path-based extension (source: { type: path }) + // For path-based extensions, read from the registered source path on the host + if let Some(ref state) = ext_path_state { + if let Some(source_path) = state.path_mounts.get(&ext_name) { + let config_path_yaml = source_path.join("avocado.yaml"); + let config_path_yml = source_path.join("avocado.yml"); + + if verbose { + eprintln!( + "[DEBUG] Extension '{}' is path-based, checking: {}", + ext_name, + config_path_yaml.display() + ); + } + + if config_path_yaml.exists() { + match fs::read_to_string(&config_path_yaml) { + Ok(content) => { + if verbose { + eprintln!( + "[DEBUG] Read {} bytes from path-based source", + content.len() + ); + } + content + } + Err(e) => { + if verbose { + eprintln!("[DEBUG] Failed to read: {e}"); + } + continue; + } + } + } else if config_path_yml.exists() { + match fs::read_to_string(&config_path_yml) { + Ok(content) => { + if verbose { + eprintln!( + "[DEBUG] Read {} bytes from path-based source (.yml)", + content.len() + ); + } + content + } + Err(e) => { + if verbose { + eprintln!("[DEBUG] Failed to read: {e}"); + } + continue; + } + } + } else { + if verbose { + eprintln!( + "[DEBUG] Path-based source path has no avocado.yaml/yml: {}", + source_path.display() + ); + } + continue; + } + } else { + // Not a path-based extension, fall through to other methods + "".to_string() + } + } else { + // No path state, fall through to other methods + "".to_string() + } + }; + + // If we got content from path-based source, skip other methods + let ext_content = if !ext_content.is_empty() { + ext_content + } else { + // Method 1: Check if we're inside a container and can read directly + // The standard container path is /opt/_avocado//includes//avocado.yaml + let container_direct_path = + format!("/opt/_avocado/{resolved_target}/includes/{ext_name}/avocado.yaml"); + let container_path = Path::new(&container_direct_path); + + if verbose { + eprintln!( + "[DEBUG] Checking for remote extension '{ext_name}' config at: {container_direct_path}" + ); + eprintln!("[DEBUG] Path exists: {}", container_path.exists()); + } + + if container_path.exists() { + // We're inside a container, read directly + match fs::read_to_string(container_path) { + Ok(content) => { + if verbose { + eprintln!( + "[DEBUG] Read {} bytes from container path", + content.len() + ); + } + content + } + Err(e) => { + if verbose { + eprintln!("[DEBUG] Failed to read: {e}"); + } + continue; + } + } + } else if let Some(vs) = &volume_state { + // Method 2: Use container command to read from Docker volume + if verbose { + eprintln!( + "[DEBUG] Trying via container command (volume: {})", + vs.volume_name + ); + } + match Self::read_extension_config_via_container(vs, &resolved_target, &ext_name) + { + Ok(content) => { + if verbose { + eprintln!("[DEBUG] Read {} bytes via container", content.len()); + } + content + } + Err(e) => { + if verbose { + eprintln!("[DEBUG] Container read failed: {e}"); + } + // Extension not installed yet or config not found, skip + continue; + } + } + } else { + // Method 3: Fallback to local path (for development) + let fallback_dir = src_dir + .join(".avocado") + .join(&resolved_target) + .join("includes") + .join(&ext_name); + let config_path_local = fallback_dir.join("avocado.yaml"); + if verbose { + eprintln!( + "[DEBUG] Trying fallback path: {}", + config_path_local.display() + ); + } + if config_path_local.exists() { + match fs::read_to_string(&config_path_local) { + Ok(content) => content, + Err(_) => continue, + } + } else { + if verbose { + eprintln!("[DEBUG] No config found for '{ext_name}', skipping"); + } + continue; + } + } + }; + + // Use a .yaml extension so parse_config_value knows to parse as YAML + let ext_config_path = format!("{ext_name}/avocado.yaml"); + let ext_config = match Self::parse_config_value(&ext_config_path, &ext_content) { + Ok(cfg) => { + if verbose { + eprintln!("[DEBUG] Successfully parsed config for '{ext_name}'"); + // Show what extensions are defined in this remote config + if let Some(ext_section) = + cfg.get("extensions").and_then(|e| e.as_mapping()) + { + let ext_names: Vec<_> = + ext_section.keys().filter_map(|k| k.as_str()).collect(); + eprintln!("[DEBUG] Remote config defines extensions: {ext_names:?}"); + // Show the extension section that matches our name + if let Some(our_ext) = + ext_section.get(serde_yaml::Value::String(ext_name.clone())) + { + eprintln!( + "[DEBUG] Extension '{}' in remote config:\n{}", + ext_name, + serde_yaml::to_string(our_ext).unwrap_or_default() + ); + } else { + eprintln!( + "[DEBUG] Extension '{ext_name}' NOT found in remote config's ext section" + ); + } + } else { + eprintln!("[DEBUG] Remote config has no 'ext' section"); + } + } + cfg + } + Err(e) => { + if verbose { + eprintln!("[DEBUG] Failed to parse config for '{ext_name}': {e}"); + } + // Failed to parse config, skip this extension + continue; + } + }; + + // Record this extension's source path + // For path-based extensions, use the actual host path + // For other remote extensions, use the container path + let ext_config_path_str = if let Some(ref state) = ext_path_state { + if let Some(source_path) = state.path_mounts.get(&ext_name) { + source_path + .join("avocado.yaml") + .to_string_lossy() + .to_string() + } else { + format!("/opt/_avocado/{resolved_target}/includes/{ext_name}/avocado.yaml") + } + } else { + format!("/opt/_avocado/{resolved_target}/includes/{ext_name}/avocado.yaml") + }; + extension_sources.insert(ext_name.clone(), ext_config_path_str.clone()); + + // Also record any extensions defined within this remote extension's config + if let Some(nested_ext_section) = + ext_config.get("extensions").and_then(|e| e.as_mapping()) + { + for (nested_ext_key, _) in nested_ext_section { + if let Some(nested_ext_name) = nested_ext_key.as_str() { + extension_sources + .insert(nested_ext_name.to_string(), ext_config_path_str.clone()); + } + } + } + + // Get include patterns from the extension source + // For path-based extensions (type: path), use permissive patterns similar to legacy + // external configs to ensure sdk.compile sections are included + let include_patterns: Vec = match &source { + ExtensionSource::Path { include, .. } => { + if let Some(patterns) = include { + patterns.clone() + } else { + // Default: include all sdk sections for path-based extensions + vec!["sdk.packages.*".to_string(), "sdk.compile.*".to_string()] + } + } + _ => source.get_include_patterns().to_vec(), + }; + let include_patterns = include_patterns.as_slice(); + + // Find compile dependencies to auto-include from the extension's own section + let auto_include_compile = + Self::find_compile_dependencies_in_ext(&ext_config, &ext_name); + + if verbose { + eprintln!( + "[DEBUG] Merging '{ext_name}' with include_patterns: {include_patterns:?}, auto_include_compile: {auto_include_compile:?}" + ); + } + + // Merge the remote extension config with include patterns + Self::merge_external_config( + main_config, + &ext_config, + &ext_name, + include_patterns, + &auto_include_compile, + ); + + if verbose { + // Show what the main config's ext section looks like after merge + if let Some(main_ext) = main_config.get("extensions").and_then(|e| e.get(&ext_name)) + { + eprintln!( + "[DEBUG] After merge, main config ext.{}:\n{}", + ext_name, + serde_yaml::to_string(main_ext).unwrap_or_default() + ); + } + } + } + + Ok(extension_sources) + } + + /// Read a remote extension's config file by running a container command. + /// + /// This runs a lightweight container to cat the extension's avocado.yaml from + /// the Docker volume, avoiding permission issues with direct host access. + fn read_extension_config_via_container( + volume_state: &crate::utils::volume::VolumeState, + target: &str, + ext_name: &str, + ) -> Result { + // The extension config path inside the container + let container_config_path = + format!("/opt/_avocado/{target}/includes/{ext_name}/avocado.yaml"); + + // Run a minimal container to cat the config file + // We use busybox as a lightweight image, but fall back to alpine if needed + let images_to_try = [ + "busybox:latest", + "alpine:latest", + "docker.io/library/busybox:latest", + ]; + + for image in &images_to_try { + let output = std::process::Command::new(&volume_state.container_tool) + .args([ + "run", + "--rm", + "-v", + &format!("{}:/opt/_avocado:ro", volume_state.volume_name), + image, + "cat", + &container_config_path, + ]) + .output(); + + match output { + Ok(out) if out.status.success() => { + let content = String::from_utf8_lossy(&out.stdout).to_string(); + if content.is_empty() { + anyhow::bail!("Extension config file is empty"); + } + return Ok(content); + } + Ok(out) => { + let stderr = String::from_utf8_lossy(&out.stderr); + // If file not found, bail immediately (no point trying other images) + if stderr.contains("No such file") || stderr.contains("not found") { + anyhow::bail!("Extension config not found: {container_config_path}"); + } + // Otherwise, continue to try next image + } + Err(_) => { + // Continue to try next image + } + } + } + + anyhow::bail!("Failed to read extension config via container for '{ext_name}'") + } + /// Discover all external config references in runtime and ext dependencies. /// /// Scans these locations: @@ -461,7 +1115,7 @@ impl Config { let mut visited = std::collections::HashSet::new(); // Scan runtime dependencies - if let Some(runtime_section) = config.get("runtime").and_then(|r| r.as_mapping()) { + if let Some(runtime_section) = config.get("runtimes").and_then(|r| r.as_mapping()) { for (_runtime_name, runtime_config) in runtime_section { Self::collect_external_refs_from_dependencies( runtime_config, @@ -475,7 +1129,7 @@ impl Config { // Skip known non-target keys if let Some(key_str) = key.as_str() { if ![ - "dependencies", + "packages", "target", "stone_include_paths", "stone_manifest", @@ -497,7 +1151,7 @@ impl Config { } // Scan ext dependencies - if let Some(ext_section) = config.get("ext").and_then(|e| e.as_mapping()) { + if let Some(ext_section) = config.get("extensions").and_then(|e| e.as_mapping()) { for (_ext_name, ext_config) in ext_section { Self::collect_external_refs_from_dependencies(ext_config, &mut refs, &mut visited); @@ -516,7 +1170,6 @@ impl Config { "vendor", "types", "packages", - "dependencies", "sdk", "enable_services", "on_merge", @@ -553,14 +1206,14 @@ impl Config { refs: &mut Vec<(String, String)>, visited: &mut std::collections::HashSet, ) { - let dependencies = section.get("dependencies").and_then(|d| d.as_mapping()); + let dependencies = section.get("packages").and_then(|d| d.as_mapping()); if let Some(deps_map) = dependencies { for (_dep_name, dep_spec) in deps_map { if let Some(spec_map) = dep_spec.as_mapping() { // Check for external extension reference if let (Some(ext_name), Some(config_path)) = ( - spec_map.get("ext").and_then(|v| v.as_str()), + spec_map.get("extensions").and_then(|v| v.as_str()), spec_map.get("config").and_then(|v| v.as_str()), ) { let key = format!("{ext_name}:{config_path}"); @@ -576,92 +1229,353 @@ impl Config { /// Merge an external config into the main config. /// - /// Merges: - /// - `ext.*` sections (external extensions added to main ext section) - /// - `sdk.dependencies` (merged, main takes precedence on conflicts) - /// - `sdk.compile` (merged, main takes precedence on conflicts) + /// Always merges: + /// - `ext.` section (the extension's own section) + /// + /// Conditionally merges (based on include_patterns): + /// - `provision.` sections (if pattern matches) + /// - `sdk.packages.` (if pattern matches) + /// - `sdk.compile.
` (if pattern matches) + /// + /// Does NOT merge (main config only): + /// - `distro` + /// - `default_target` + /// - `supported_targets` + /// - `sdk.image`, `sdk.container_args`, etc. (base SDK settings) /// - /// Does NOT merge: - /// - `distro` (main config only) - /// - `default_target` (main config only) - /// - `supported_targets` (main config only) + /// # Arguments + /// * `main_config` - The main config to merge into + /// * `external_config` - The external config to merge from + /// * `ext_name` - The name of the extension (its `ext.` is always merged) + /// * `include_patterns` - Patterns for additional sections to include (e.g., "provision_profiles.*") + /// * `auto_include_compile` - List of sdk.compile section names to auto-include (from compile deps) fn merge_external_config( main_config: &mut serde_yaml::Value, external_config: &serde_yaml::Value, - _ext_name: &str, + ext_name: &str, + include_patterns: &[String], + auto_include_compile: &[String], ) { - // Merge ext sections - if let Some(external_ext) = external_config.get("ext").and_then(|e| e.as_mapping()) { + // Always merge the extension's own extensions. section + if let Some(external_ext) = external_config + .get("extensions") + .and_then(|e| e.as_mapping()) + { let main_ext = main_config .as_mapping_mut() .and_then(|m| { - if !m.contains_key(serde_yaml::Value::String("ext".to_string())) { + if !m.contains_key(serde_yaml::Value::String("extensions".to_string())) { m.insert( - serde_yaml::Value::String("ext".to_string()), + serde_yaml::Value::String("extensions".to_string()), serde_yaml::Value::Mapping(serde_yaml::Mapping::new()), ); } - m.get_mut(serde_yaml::Value::String("ext".to_string())) + m.get_mut(serde_yaml::Value::String("extensions".to_string())) }) .and_then(|e| e.as_mapping_mut()); if let Some(main_ext_map) = main_ext { - for (ext_key, ext_value) in external_ext { - // Only add if not already present in main config - if !main_ext_map.contains_key(ext_key) { - main_ext_map.insert(ext_key.clone(), ext_value.clone()); + // Deep-merge the extension's own section (ext.) + // This handles the case where main config has a stub with just `source:` + // and the remote extension has the full definition with `dependencies:` etc. + let ext_key = serde_yaml::Value::String(ext_name.to_string()); + + // Try to find the extension's config in the remote extension's config. + // We use multiple strategies: + // 1. Exact match (e.g., "avocado-bsp-raspberrypi4") + // 2. Base name match (e.g., "avocado-bsp" when looking for "avocado-bsp-raspberrypi4") + // 3. Single extension in remote config (if there's only one, use it) + let ext_value = external_ext + .get(&ext_key) + .or_else(|| { + // Try base name: strip common target suffixes from ext_name + let base_names = Self::get_base_extension_names(ext_name); + for base in &base_names { + let base_key = serde_yaml::Value::String(base.clone()); + if let Some(val) = external_ext.get(&base_key) { + return Some(val); + } + } + None + }) + .or_else(|| { + // If remote config has exactly one extension defined, use it + if external_ext.len() == 1 { + external_ext.values().next() + } else { + None + } + }); + + if let Some(ext_value) = ext_value { + // Try to find the existing key in main config that matches ext_name. + // This handles template keys like "avocado-bsp-{{ avocado.target }}" + // that interpolate to "avocado-bsp-raspberrypi4". + let existing_key = Self::find_matching_ext_key(main_ext_map, ext_name); + + if let Some(existing_key) = existing_key { + // Found an existing key (possibly a template) - merge into it + if let Some(existing_ext) = main_ext_map.get_mut(&existing_key) { + // Deep-merge: add fields from remote that don't exist in main + // Main config values take precedence on conflicts + Self::deep_merge_ext_section(existing_ext, ext_value); + } + } else { + // Extension not in main config, just add it + main_ext_map.insert(ext_key, ext_value.clone()); } } } } - // Merge sdk.dependencies + // Merge provision sections based on include patterns + if let Some(external_provision) = external_config + .get("provision_profiles") + .and_then(|p| p.as_mapping()) + { + for (profile_key, profile_value) in external_provision { + if let Some(profile_name) = profile_key.as_str() { + let config_path = format!("provision_profiles.{profile_name}"); + if ExtensionSource::matches_include_pattern(&config_path, include_patterns) { + Self::ensure_provision_section(main_config); + if let Some(main_provision) = main_config + .get_mut("provision_profiles") + .and_then(|p| p.as_mapping_mut()) + { + // Only add if not already present (main takes precedence) + if !main_provision.contains_key(profile_key) { + main_provision.insert(profile_key.clone(), profile_value.clone()); + } + } + } + } + } + } + + // Merge sdk.packages based on include patterns if let Some(external_sdk_deps) = external_config .get("sdk") - .and_then(|s| s.get("dependencies")) + .and_then(|s| s.get("packages")) .and_then(|d| d.as_mapping()) { - Self::ensure_sdk_dependencies_section(main_config); - - if let Some(main_sdk_deps) = main_config - .get_mut("sdk") - .and_then(|s| s.get_mut("dependencies")) - .and_then(|d| d.as_mapping_mut()) - { - for (dep_key, dep_value) in external_sdk_deps { - // Only add if not already present (main takes precedence) - if !main_sdk_deps.contains_key(dep_key) { - main_sdk_deps.insert(dep_key.clone(), dep_value.clone()); + for (dep_key, dep_value) in external_sdk_deps { + if let Some(dep_name) = dep_key.as_str() { + let config_path = format!("sdk.packages.{dep_name}"); + if ExtensionSource::matches_include_pattern(&config_path, include_patterns) { + Self::ensure_sdk_packages_section(main_config); + if let Some(main_sdk_deps) = main_config + .get_mut("sdk") + .and_then(|s| s.get_mut("packages")) + .and_then(|d| d.as_mapping_mut()) + { + // Only add if not already present (main takes precedence) + if !main_sdk_deps.contains_key(dep_key) { + main_sdk_deps.insert(dep_key.clone(), dep_value.clone()); + } + } } } } } - // Merge sdk.compile + // Merge sdk.compile based on include patterns OR auto_include_compile list if let Some(external_sdk_compile) = external_config .get("sdk") .and_then(|s| s.get("compile")) .and_then(|c| c.as_mapping()) { - Self::ensure_sdk_compile_section(main_config); + for (compile_key, compile_value) in external_sdk_compile { + if let Some(compile_name) = compile_key.as_str() { + let config_path = format!("sdk.compile.{compile_name}"); + let should_include = + ExtensionSource::matches_include_pattern(&config_path, include_patterns) + || auto_include_compile.contains(&compile_name.to_string()); + + if should_include { + Self::ensure_sdk_compile_section(main_config); + if let Some(main_sdk_compile) = main_config + .get_mut("sdk") + .and_then(|s| s.get_mut("compile")) + .and_then(|c| c.as_mapping_mut()) + { + // Only add if not already present (main takes precedence) + if !main_sdk_compile.contains_key(compile_key) { + main_sdk_compile.insert(compile_key.clone(), compile_value.clone()); + } + } + } + } + } + } + } - if let Some(main_sdk_compile) = main_config - .get_mut("sdk") - .and_then(|s| s.get_mut("compile")) - .and_then(|c| c.as_mapping_mut()) - { - for (compile_key, compile_value) in external_sdk_compile { - // Only add if not already present (main takes precedence) - if !main_sdk_compile.contains_key(compile_key) { - main_sdk_compile.insert(compile_key.clone(), compile_value.clone()); + /// Find a matching extension key in the main config's ext section. + /// + /// This handles the case where the main config has template keys like + /// "avocado-bsp-{{ avocado.target }}" that should match the interpolated + /// name "avocado-bsp-raspberrypi4". + /// + /// Returns the original key (possibly a template) if found. + fn find_matching_ext_key( + ext_map: &serde_yaml::Mapping, + interpolated_name: &str, + ) -> Option { + // First try exact match + let exact_key = serde_yaml::Value::String(interpolated_name.to_string()); + if ext_map.contains_key(&exact_key) { + return Some(exact_key); + } + + // Look for template keys that would match after interpolation + // Common template patterns: {{ avocado.target }}, {{ config.* }} + for key in ext_map.keys() { + if let Some(key_str) = key.as_str() { + // Check if this is a template key + if key_str.contains("{{") && key_str.contains("}}") { + // Try to match by replacing common template patterns + // with regex-like patterns + + // Handle {{ avocado.target }} - this is the most common case + // The key might be "avocado-bsp-{{ avocado.target }}" + // and we're looking for "avocado-bsp-raspberrypi4" + if key_str.contains("{{ avocado.target }}") + || key_str.contains("{{avocado.target}}") + { + // Extract the prefix and suffix around the template + let parts: Vec<&str> = if key_str.contains("{{ avocado.target }}") { + key_str.split("{{ avocado.target }}").collect() + } else { + key_str.split("{{avocado.target}}").collect() + }; + + if parts.len() == 2 { + let prefix = parts[0]; + let suffix = parts[1]; + + // Check if the interpolated name matches the pattern + if interpolated_name.starts_with(prefix) + && interpolated_name.ends_with(suffix) + { + // Verify the middle part (the target) is reasonable + let middle_len = + interpolated_name.len() - prefix.len() - suffix.len(); + if middle_len > 0 { + return Some(key.clone()); + } + } + } } } } } + + None } - /// Ensure the sdk.dependencies section exists in the config. - fn ensure_sdk_dependencies_section(config: &mut serde_yaml::Value) { + /// Ensure the provision section exists in the config. + fn ensure_provision_section(config: &mut serde_yaml::Value) { + if let Some(main_map) = config.as_mapping_mut() { + if !main_map.contains_key(serde_yaml::Value::String("provision_profiles".to_string())) { + main_map.insert( + serde_yaml::Value::String("provision_profiles".to_string()), + serde_yaml::Value::Mapping(serde_yaml::Mapping::new()), + ); + } + } + } + + /// Get possible base extension names by stripping common target suffixes. + /// + /// For an extension like "avocado-bsp-raspberrypi4", this returns: + /// - "avocado-bsp" (common pattern for BSP packages) + /// - Names with common target suffixes stripped + fn get_base_extension_names(ext_name: &str) -> Vec { + let mut names = Vec::new(); + + // Common target suffixes to try stripping + let target_suffixes = [ + "-raspberrypi4", + "-raspberrypi5", + "-rpi4", + "-rpi5", + "-jetson-orin-nano", + "-jetson-orin-nx", + "-jetson", + "-x86_64", + "-aarch64", + ]; + + for suffix in &target_suffixes { + if let Some(base) = ext_name.strip_suffix(suffix) { + if !base.is_empty() && !names.contains(&base.to_string()) { + names.push(base.to_string()); + } + } + } + + // Also try splitting on last dash as a generic approach + // e.g., "my-ext-target" -> "my-ext" + if let Some(last_dash) = ext_name.rfind('-') { + if last_dash > 0 { + let base = &ext_name[..last_dash]; + if !names.contains(&base.to_string()) { + names.push(base.to_string()); + } + } + } + + names + } + + /// Deep-merge an extension section from external config into main config. + /// + /// This handles the case where main config has a stub definition (with just `source:`) + /// and the remote extension has the full definition (with `dependencies:`, `version:`, etc.). + /// + /// Main config values take precedence on conflicts. + fn deep_merge_ext_section(main_ext: &mut serde_yaml::Value, external_ext: &serde_yaml::Value) { + // Only merge if both are mappings + if let (Some(main_map), Some(external_map)) = + (main_ext.as_mapping_mut(), external_ext.as_mapping()) + { + for (key, external_value) in external_map { + if !main_map.contains_key(key) { + // Key doesn't exist in main, add it from external + main_map.insert(key.clone(), external_value.clone()); + } + // If key exists in main, keep main's value (main takes precedence) + } + } + } + + /// Find compile dependencies in an extension's dependencies section. + /// + /// Scans `ext..dependencies` for entries with a `compile` key + /// and returns the list of compile section names that should be auto-included. + fn find_compile_dependencies_in_ext( + ext_config: &serde_yaml::Value, + ext_name: &str, + ) -> Vec { + let mut compile_deps = Vec::new(); + + if let Some(ext_section) = ext_config + .get("extensions") + .and_then(|e| e.get(ext_name)) + .and_then(|e| e.get("packages")) + .and_then(|d| d.as_mapping()) + { + for (_dep_name, dep_spec) in ext_section { + if let Some(compile_name) = dep_spec.get("compile").and_then(|c| c.as_str()) { + compile_deps.push(compile_name.to_string()); + } + } + } + + compile_deps + } + + /// Ensure the sdk.packages section exists in the config. + fn ensure_sdk_packages_section(config: &mut serde_yaml::Value) { if let Some(main_map) = config.as_mapping_mut() { // Ensure sdk section exists if !main_map.contains_key(serde_yaml::Value::String("sdk".to_string())) { @@ -671,13 +1585,12 @@ impl Config { ); } - // Ensure sdk.dependencies section exists + // Ensure sdk.packages section exists if let Some(sdk) = main_map.get_mut(serde_yaml::Value::String("sdk".to_string())) { if let Some(sdk_map) = sdk.as_mapping_mut() { - if !sdk_map.contains_key(serde_yaml::Value::String("dependencies".to_string())) - { + if !sdk_map.contains_key(serde_yaml::Value::String("packages".to_string())) { sdk_map.insert( - serde_yaml::Value::String("dependencies".to_string()), + serde_yaml::Value::String("packages".to_string()), serde_yaml::Value::Mapping(serde_yaml::Mapping::new()), ); } @@ -773,6 +1686,21 @@ impl Config { (_, target_value) => target_value, } } + /// Merge a target-specific override into a base config value + /// This filters out other target sections from the base and merges the override + pub fn merge_target_override( + &self, + base: serde_yaml::Value, + target_override: serde_yaml::Value, + _current_target: &str, + ) -> serde_yaml::Value { + // Filter out target-specific subsections from base before merging + let supported_targets = self.get_supported_targets().unwrap_or_default(); + let filtered_base = self.filter_target_subsections(base, &supported_targets); + // Merge the target override into the filtered base + self.merge_values(filtered_base, target_override) + } + /// Get merged runtime configuration for a specific runtime and target #[allow(dead_code)] // Future API for command integration pub fn get_merged_runtime_config( @@ -781,7 +1709,7 @@ impl Config { target: &str, config_path: &str, ) -> Result> { - let section_path = format!("runtime.{runtime_name}"); + let section_path = format!("runtimes.{runtime_name}"); self.get_merged_section(§ion_path, target, config_path) } @@ -793,7 +1721,7 @@ impl Config { target: &str, config_path: &str, ) -> Result> { - let section_path = format!("provision.{profile_name}"); + let section_path = format!("provision_profiles.{profile_name}"); self.get_merged_section(§ion_path, target, config_path) } @@ -804,16 +1732,26 @@ impl Config { target: &str, config_path: &str, ) -> Result> { - let section_path = format!("ext.{ext_name}"); + let section_path = format!("extensions.{ext_name}"); self.get_merged_section(§ion_path, target, config_path) } /// Get detailed extension dependencies for a runtime (with type information) /// - /// Returns a list of extension dependencies with their type: - /// - Local: extension defined in the main config file (needs install + build) - /// - External: extension from an external config file (needs install + build) - /// - Versioned: prebuilt extension from package repo (needs install only) + /// Returns a list of extension dependencies from the `extensions` array. + /// All extensions are returned as Local type - extension source configuration + /// (repo, git, path) is defined in the ext section, not in the runtime. + /// + /// New format (extensions array): + /// ```yaml + /// runtime: + /// dev: + /// extensions: + /// - avocado-ext-dev + /// - avocado-ext-sshd-dev + /// packages: + /// avocado-runtime: '0.1.0' + /// ``` pub fn get_runtime_extension_dependencies_detailed( &self, runtime_name: &str, @@ -826,36 +1764,15 @@ impl Config { return Ok(vec![]); }; - let Some(dependencies) = runtime_config - .get("dependencies") - .and_then(|d| d.as_mapping()) - else { - return Ok(vec![]); - }; - let mut ext_deps = Vec::new(); - for (_dep_name, dep_spec) in dependencies { - // Check if this dependency references an extension - if let Some(ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { - // Check if it has a version (versioned/prebuilt extension) - if let Some(version) = dep_spec.get("vsn").and_then(|v| v.as_str()) { - ext_deps.push(RuntimeExtDep::Versioned { - name: ext_name.to_string(), - version: version.to_string(), - }); - } - // Check if it has an external config - else if let Some(external_config) = - dep_spec.get("config").and_then(|v| v.as_str()) - { - ext_deps.push(RuntimeExtDep::External { - name: ext_name.to_string(), - config_path: external_config.to_string(), - }); - } - // Otherwise it's a local extension - else { + // New way: Read from the `extensions` array + if let Some(extensions) = runtime_config + .get("extensions") + .and_then(|e| e.as_sequence()) + { + for ext in extensions { + if let Some(ext_name) = ext.as_str() { ext_deps.push(RuntimeExtDep::Local(ext_name.to_string())); } } @@ -867,9 +1784,9 @@ impl Config { Ok(ext_deps) } - /// Get merged section for nested paths (e.g., "ext.name.dependencies", "runtime.name.dependencies") + /// Get merged section for nested paths (e.g., "extensions.name.packages", "runtimes.name.packages") /// For target-specific overrides, the target is inserted between base_path and nested_path: - /// Base: [ext.name.dependencies] + Target: [ext.name..dependencies] + /// Base: [extensions.name.packages] + Target: [extensions.name..packages] #[allow(dead_code)] // Future API for command integration pub fn get_merged_nested_section( &self, @@ -911,91 +1828,14 @@ impl Config { let path = config_path.as_ref(); if !path.exists() { - // If a YAML file is requested but doesn't exist, check for a TOML version - let is_yaml_request = path - .extension() - .and_then(|e| e.to_str()) - .map(|e| e == "yaml" || e == "yml") - .unwrap_or(false); - - if is_yaml_request { - // Try to find a corresponding TOML file - let toml_path = path.with_extension("toml"); - if toml_path.exists() { - println!( - "âš  Found legacy TOML config file: {}. Migrating to YAML format...", - toml_path.display() - ); - - // Migrate TOML to YAML - let migrated_path = Self::migrate_toml_to_yaml(&toml_path)?; - - // Load the migrated YAML file - let content = fs::read_to_string(&migrated_path).with_context(|| { - format!( - "Failed to read migrated config file: {}", - migrated_path.display() - ) - })?; - - return Self::load_from_yaml_str(&content).with_context(|| { - format!( - "Failed to parse migrated YAML config file: {}", - migrated_path.display() - ) - }); - } - } - return Err(ConfigError::FileNotFound(path.display().to_string()).into()); } let content = fs::read_to_string(path) .with_context(|| format!("Failed to read config file: {}", path.display()))?; - // Determine format based on file extension - let is_yaml = path - .extension() - .and_then(|e| e.to_str()) - .map(|e| e == "yaml" || e == "yml") - .unwrap_or(false); - - if is_yaml { - Self::load_from_yaml_str(&content) - .with_context(|| format!("Failed to parse YAML config file: {}", path.display())) - } else { - // TOML file detected - migrate to YAML - println!( - "âš  Found legacy TOML config file: {}. Migrating to YAML...", - path.display() - ); - - // Parse TOML, convert to YAML, and save - #[allow(deprecated)] - let config = Self::load_from_toml_str(&content) - .with_context(|| format!("Failed to parse TOML config file: {}", path.display()))?; - - // Convert to YAML value for saving - let toml_val: toml::Value = - toml::from_str(&content).with_context(|| "Failed to parse TOML for conversion")?; - let yaml_val = Self::toml_to_yaml(&toml_val)?; - - // Save as YAML in the same directory - let yaml_path = path.with_extension("yaml"); - if !yaml_path.exists() { - let yaml_content = serde_yaml::to_string(&yaml_val)?; - fs::write(&yaml_path, yaml_content).with_context(|| { - format!( - "Failed to write migrated YAML config to {}", - yaml_path.display() - ) - })?; - println!("✓ Migrated to {}", yaml_path.display()); - println!(" Note: The old TOML file has been preserved. You can remove it after verifying the migration."); - } - - Ok(config) - } + Self::load_from_yaml_str(&content) + .with_context(|| format!("Failed to parse YAML config file: {}", path.display())) } /// Load configuration from a YAML string @@ -1015,85 +1855,12 @@ impl Config { Ok(config) } - /// Load configuration from a string (auto-detects YAML or TOML format) - /// Used primarily in tests for flexible parsing + /// Load configuration from a YAML string + /// Used primarily in tests #[allow(dead_code)] pub fn load_from_str(content: &str) -> Result { - // Try YAML first (preferred format) - if let Ok(config) = serde_yaml::from_str::(content) { - return Ok(config); - } - - // Fall back to TOML for test compatibility - #[allow(deprecated)] - { - Self::load_from_toml_str(content) - } - } - - // ============================================================================= - // DEPRECATED: TOML Support Functions (Pre-1.0.0) - // ============================================================================= - // The following functions support legacy TOML configuration files. - // These will be removed before the 1.0.0 release. - // ============================================================================= - - /// DEPRECATED: Load configuration from a TOML string - #[allow(dead_code)] // Kept for backward compatibility until 1.0.0 - #[deprecated( - note = "TOML format is deprecated. Use YAML format instead. Will be removed before 1.0.0" - )] - pub fn load_from_toml_str(content: &str) -> Result { - let config: Config = - toml::from_str(content).with_context(|| "Failed to parse TOML configuration")?; - - Ok(config) - } - - /// Convert TOML value to YAML value - fn toml_to_yaml(toml_val: &toml::Value) -> Result { - let json_str = serde_json::to_string(toml_val)?; - let yaml_val = serde_json::from_str(&json_str)?; - Ok(yaml_val) - } - - /// Migrate a TOML config file to YAML format - /// Reads an avocado.toml file, converts it to YAML, and saves as avocado.yaml - #[allow(dead_code)] // Public API for manual migration, kept until 1.0.0 - pub fn migrate_toml_to_yaml>(toml_path: P) -> Result { - let toml_path = toml_path.as_ref(); - - // Read the TOML file - let toml_content = fs::read_to_string(toml_path) - .with_context(|| format!("Failed to read TOML config file: {}", toml_path.display()))?; - - // Parse as TOML - let toml_val: toml::Value = - toml::from_str(&toml_content).with_context(|| "Failed to parse TOML configuration")?; - - // Convert to YAML - let yaml_val = Self::toml_to_yaml(&toml_val)?; - - // Serialize to YAML string - let yaml_content = - serde_yaml::to_string(&yaml_val).with_context(|| "Failed to serialize to YAML")?; - - // Determine output path - let yaml_path = toml_path.with_file_name("avocado.yaml"); - - // Write YAML file - fs::write(&yaml_path, yaml_content).with_context(|| { - format!("Failed to write YAML config file: {}", yaml_path.display()) - })?; - - println!( - "✓ Migrated {} to {}", - toml_path.display(), - yaml_path.display() - ); - println!(" Note: The old TOML file has been preserved. You can remove it after verifying the migration."); - - Ok(yaml_path) + serde_yaml::from_str::(content) + .with_context(|| "Failed to parse YAML configuration") } /// Get the SDK image from configuration @@ -1103,7 +1870,7 @@ impl Config { /// Get SDK dependencies pub fn get_sdk_dependencies(&self) -> Option<&HashMap> { - self.sdk.as_ref()?.dependencies.as_ref() + self.sdk.as_ref()?.packages.as_ref() } /// Get SDK dependencies with target interpolation. @@ -1137,7 +1904,7 @@ impl Config { // Extract SDK dependencies from the interpolated config let sdk_deps = parsed .get("sdk") - .and_then(|sdk| sdk.get("dependencies")) + .and_then(|sdk| sdk.get("packages")) .and_then(|deps| deps.as_mapping()) .map(|mapping| { mapping @@ -1251,7 +2018,7 @@ impl Config { /// None if the runtime doesn't exist or has no signing section. #[allow(dead_code)] // Public API for future use pub fn get_runtime_signing_key_name(&self, runtime_name: &str) -> Option { - let runtime_config = self.runtime.as_ref()?.get(runtime_name)?; + let runtime_config = self.runtimes.as_ref()?.get(runtime_name)?; Some(runtime_config.signing.as_ref()?.key.clone()) } @@ -1264,7 +2031,7 @@ impl Config { /// Returns the resolved key ID. #[allow(dead_code)] // Public API for future use pub fn get_runtime_signing_key(&self, runtime_name: &str) -> Option { - let runtime_config = self.runtime.as_ref()?.get(runtime_name)?; + let runtime_config = self.runtimes.as_ref()?.get(runtime_name)?; let signing_key_name = &runtime_config.signing.as_ref()?.key; // First, check the local signing_keys mapping @@ -1287,7 +2054,7 @@ impl Config { /// Get provision profile configuration pub fn get_provision_profile(&self, profile_name: &str) -> Option<&ProvisionProfileConfig> { - self.provision.as_ref()?.get(profile_name) + self.provision_profiles.as_ref()?.get(profile_name) } /// Get container args from provision profile @@ -1478,7 +2245,7 @@ impl Config { let mut external_extensions = HashMap::new(); // Find all ext.* sections in the external config - if let Some(ext_section) = parsed.get("ext").and_then(|e| e.as_mapping()) { + if let Some(ext_section) = parsed.get("extensions").and_then(|e| e.as_mapping()) { for (ext_name_key, ext_config) in ext_section { if let Some(ext_name) = ext_name_key.as_str() { external_extensions.insert(ext_name.to_string(), ext_config.clone()); @@ -1489,297 +2256,285 @@ impl Config { Ok(external_extensions) } - /// Find an extension in the full dependency tree (local and external) - /// This is a comprehensive search that looks through all runtime dependencies - /// and their transitive extension dependencies - pub fn find_extension_in_dependency_tree( - &self, + /// Parse the source field from an extension configuration + /// + /// Returns Some(ExtensionSource) if the extension has a source field, + /// None if it's a local extension (no source field) + pub fn parse_extension_source( + ext_name: &str, + ext_config: &serde_yaml::Value, + ) -> Result> { + let source = ext_config.get("source"); + + match source { + None => Ok(None), // Local extension + Some(source_value) => { + // Deserialize the source block into ExtensionSource + let source: ExtensionSource = serde_yaml::from_value(source_value.clone()) + .with_context(|| { + format!("Failed to parse source configuration for extension '{ext_name}'") + })?; + Ok(Some(source)) + } + } + } + + /// Discover all remote extensions in the configuration + /// + /// Returns a list of (extension_name, ExtensionSource) tuples for extensions + /// that have a `source` field in their configuration. + /// + /// If `target` is provided, extension names containing `{{ avocado.target }}` + /// will be interpolated with the target value. + pub fn discover_remote_extensions( config_path: &str, - extension_name: &str, - target: &str, - ) -> Result> { - let content = std::fs::read_to_string(config_path)?; + target: Option<&str>, + ) -> Result> { + let content = std::fs::read_to_string(config_path) + .with_context(|| format!("Failed to read config file: {config_path}"))?; let parsed = Self::parse_config_value(config_path, &content)?; - // First check if it's a local extension - if let Some(ext_section) = parsed.get("ext") { - if let Some(ext_map) = ext_section.as_mapping() { - if ext_map.contains_key(serde_yaml::Value::String(extension_name.to_string())) { - return Ok(Some(ExtensionLocation::Local { - name: extension_name.to_string(), - config_path: config_path.to_string(), - })); - } - } - } + Self::discover_remote_extensions_from_value(&parsed, target) + } - // If not local, search through the full dependency tree - let mut all_extensions = std::collections::HashSet::new(); - let mut visited = std::collections::HashSet::new(); + /// Discover remote extensions from a parsed config value + /// + /// If `target` is provided, extension names containing `{{ avocado.target }}` + /// will be interpolated with the target value. + pub fn discover_remote_extensions_from_value( + parsed: &serde_yaml::Value, + target: Option<&str>, + ) -> Result> { + use crate::utils::interpolation::interpolate_name; - // Get all extensions from runtime dependencies (this will recursively traverse) - let runtime_section = parsed.get("runtime").and_then(|r| r.as_mapping()); + let mut remote_extensions = Vec::new(); - if let Some(runtime_section) = runtime_section { - for (runtime_name_key, _) in runtime_section { - if let Some(runtime_name) = runtime_name_key.as_str() { - // Get merged runtime config for this target - let merged_runtime = - self.get_merged_runtime_config(runtime_name, target, config_path)?; - if let Some(merged_value) = merged_runtime { - if let Some(dependencies) = merged_value - .get("dependencies") - .and_then(|d| d.as_mapping()) - { - for (_dep_name, dep_spec) in dependencies { - // Check for extension dependency - if let Some(ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) - { - // Check if this is an external extension (has config field) - if let Some(external_config) = - dep_spec.get("config").and_then(|v| v.as_str()) - { - let ext_location = ExtensionLocation::External { - name: ext_name.to_string(), - config_path: external_config.to_string(), - }; - all_extensions.insert(ext_location.clone()); - - // Recursively find nested external extension dependencies - self.find_all_nested_extensions_for_lookup( - config_path, - &ext_location, - &mut all_extensions, - &mut visited, - )?; - } else { - // Local extension - all_extensions.insert(ExtensionLocation::Local { - name: ext_name.to_string(), - config_path: config_path.to_string(), - }); - - // Also check local extension dependencies - self.find_local_extension_dependencies_for_lookup( - config_path, - &parsed, - ext_name, - &mut all_extensions, - &mut visited, - )?; - } - } - } - } + if let Some(ext_section) = parsed.get("extensions").and_then(|e| e.as_mapping()) { + for (ext_name_key, ext_config) in ext_section { + if let Some(raw_ext_name) = ext_name_key.as_str() { + // Interpolate extension name if target is provided + let ext_name = if let Some(t) = target { + interpolate_name(raw_ext_name, t) + } else { + raw_ext_name.to_string() + }; + + if let Some(source) = Self::parse_extension_source(&ext_name, ext_config)? { + remote_extensions.push((ext_name, source)); } } } } - // Now search for the target extension in all collected extensions - for ext_location in all_extensions { - let found_name = match &ext_location { - ExtensionLocation::Local { name, .. } => name, - ExtensionLocation::External { name, .. } => name, - }; + Ok(remote_extensions) + } - if found_name == extension_name { - return Ok(Some(ext_location)); + /// Get the path where remote extensions should be installed on the host filesystem. + /// + /// This resolves the Docker volume mountpoint to access `$AVOCADO_PREFIX/includes` from the host. + /// Returns: `//includes/` + /// + /// Falls back to `/.avocado//includes/` if volume state is not available. + pub fn get_extensions_dir(&self, config_path: &str, target: &str) -> PathBuf { + let src_dir = self.get_resolved_src_dir(config_path).unwrap_or_else(|| { + PathBuf::from(config_path) + .parent() + .unwrap_or(Path::new(".")) + .to_path_buf() + }); + + // Try to load volume state and get the mountpoint + if let Ok(Some(volume_state)) = crate::utils::volume::VolumeState::load_from_dir(&src_dir) { + // Use synchronous Docker inspect to get the mountpoint + if let Ok(mountpoint) = Self::get_volume_mountpoint_sync(&volume_state) { + return mountpoint.join(target).join("includes"); } } - Ok(None) + // Fallback: use a local path in src_dir for development/testing + src_dir.join(".avocado").join(target).join("includes") } - /// Recursively find all nested extensions for lookup - fn find_all_nested_extensions_for_lookup( + /// Get the path where a specific remote extension should be installed + /// + /// Returns: `//includes//` + pub fn get_extension_install_path( &self, - base_config_path: &str, - ext_location: &ExtensionLocation, - all_extensions: &mut std::collections::HashSet, - visited: &mut std::collections::HashSet, - ) -> Result<()> { - let (ext_name, ext_config_path) = match ext_location { - ExtensionLocation::External { name, config_path } => (name, config_path), - ExtensionLocation::Local { name, config_path } => { - // For local extensions, we need to check their dependencies too - let content = std::fs::read_to_string(config_path)?; - let parsed = Self::parse_config_value(config_path, &content)?; - return self.find_local_extension_dependencies_for_lookup( - config_path, - &parsed, - name, - all_extensions, - visited, - ); - } - }; - - // Cycle detection: check if we've already processed this extension - let ext_key = format!("{ext_name}:{ext_config_path}"); - if visited.contains(&ext_key) { - return Ok(()); - } - visited.insert(ext_key); - - // Load the external extension configuration - let resolved_external_config_path = - self.resolve_path_relative_to_src_dir(base_config_path, ext_config_path); - let external_extensions = - self.load_external_extensions(base_config_path, ext_config_path)?; - - let extension_config = external_extensions.get(ext_name).ok_or_else(|| { - anyhow::anyhow!( - "Extension '{ext_name}' not found in external config file '{ext_config_path}'" - ) - })?; + config_path: &str, + ext_name: &str, + target: &str, + ) -> PathBuf { + self.get_extensions_dir(config_path, target).join(ext_name) + } - // Load the nested config file to get its src_dir setting - let nested_config_content = std::fs::read_to_string(&resolved_external_config_path) + /// Get the container path expression for extensions directory + /// + /// Returns: `$AVOCADO_PREFIX/includes` + #[allow(dead_code)] + pub fn get_extensions_container_path() -> &'static str { + "$AVOCADO_PREFIX/includes" + } + + /// Get the volume mountpoint synchronously (for use in non-async contexts) + fn get_volume_mountpoint_sync( + volume_state: &crate::utils::volume::VolumeState, + ) -> Result { + let output = std::process::Command::new(&volume_state.container_tool) + .args([ + "volume", + "inspect", + &volume_state.volume_name, + "--format", + "{{.Mountpoint}}", + ]) + .output() .with_context(|| { format!( - "Failed to read nested config file: {}", - resolved_external_config_path.display() + "Failed to inspect Docker volume '{}'", + volume_state.volume_name ) })?; - let nested_config = Self::parse_config_value( - resolved_external_config_path - .to_str() - .unwrap_or(ext_config_path), - &nested_config_content, - ) - .with_context(|| { - format!( - "Failed to parse nested config file: {}", - resolved_external_config_path.display() - ) - })?; - - // Create a temporary Config object for the nested config to handle its src_dir - let nested_config_obj = serde_yaml::from_value::(nested_config.clone())?; - // Check if this external extension has dependencies - if let Some(dependencies) = extension_config - .get("dependencies") - .and_then(|d| d.as_mapping()) - { - for (_dep_name, dep_spec) in dependencies { - // Check for nested extension dependency - if let Some(nested_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { - // Check if this is a nested external extension (has config field) - if let Some(nested_external_config) = - dep_spec.get("config").and_then(|v| v.as_str()) - { - // Resolve the nested config path relative to the nested config's src_dir - let nested_config_path = nested_config_obj - .resolve_path_relative_to_src_dir( - &resolved_external_config_path, - nested_external_config, - ); - - let nested_ext_location = ExtensionLocation::External { - name: nested_ext_name.to_string(), - config_path: nested_config_path.to_string_lossy().to_string(), - }; - - // Add the nested extension to all extensions - all_extensions.insert(nested_ext_location.clone()); + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + anyhow::bail!( + "Failed to get mountpoint for volume '{}': {}", + volume_state.volume_name, + stderr + ); + } - // Recursively process the nested extension - self.find_all_nested_extensions_for_lookup( - base_config_path, - &nested_ext_location, - all_extensions, - visited, - )?; - } else { - // This is a local extension dependency within the external config - all_extensions.insert(ExtensionLocation::Local { - name: nested_ext_name.to_string(), - config_path: resolved_external_config_path - .to_string_lossy() - .to_string(), - }); - - // Check dependencies of this local extension in the external config - self.find_local_extension_dependencies_for_lookup( - &resolved_external_config_path.to_string_lossy(), - &nested_config, - nested_ext_name, - all_extensions, - visited, - )?; - } - } - } + let mountpoint = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if mountpoint.is_empty() { + anyhow::bail!( + "Docker volume '{}' has no mountpoint", + volume_state.volume_name + ); } - Ok(()) + Ok(PathBuf::from(mountpoint)) } - /// Find dependencies of local extensions for lookup - fn find_local_extension_dependencies_for_lookup( + /// Check if a remote extension is already installed + #[allow(dead_code)] + pub fn is_remote_extension_installed( &self, config_path: &str, - parsed_config: &serde_yaml::Value, ext_name: &str, - all_extensions: &mut std::collections::HashSet, - visited: &mut std::collections::HashSet, - ) -> Result<()> { - // Cycle detection for local extensions - let ext_key = format!("local:{ext_name}:{config_path}"); - if visited.contains(&ext_key) { - return Ok(()); + target: &str, + ) -> bool { + let install_path = self.get_extension_install_path(config_path, ext_name, target); + // Check if the directory exists and contains an avocado.yaml + install_path.exists() + && (install_path.join("avocado.yaml").exists() + || install_path.join("avocado.yml").exists()) + } + + /// Find an extension in the full dependency tree (local and external) + /// This is a comprehensive search that looks through all runtime dependencies + /// and their transitive extension dependencies + pub fn find_extension_in_dependency_tree( + &self, + config_path: &str, + extension_name: &str, + target: &str, + ) -> Result> { + use crate::utils::interpolation::interpolate_name; + + let content = std::fs::read_to_string(config_path)?; + let parsed = Self::parse_config_value(config_path, &content)?; + + // First check if it's defined in the ext section + // Need to iterate and interpolate keys since they may contain templates like {{ avocado.target }} + if let Some(ext_section) = parsed.get("extensions") { + if let Some(ext_map) = ext_section.as_mapping() { + for (ext_key, ext_config) in ext_map { + if let Some(raw_name) = ext_key.as_str() { + // Interpolate the extension name with the target + let interpolated_name = interpolate_name(raw_name, target); + if interpolated_name == extension_name { + // Check if this is a remote extension (has source: field) + if let Some(source) = + Self::parse_extension_source(extension_name, ext_config)? + { + return Ok(Some(ExtensionLocation::Remote { + name: extension_name.to_string(), + source, + })); + } + // Otherwise it's a local extension + return Ok(Some(ExtensionLocation::Local { + name: extension_name.to_string(), + config_path: config_path.to_string(), + })); + } + } + } + } } - visited.insert(ext_key); - // Get the local extension configuration - if let Some(ext_config) = parsed_config.get("ext").and_then(|ext| ext.get(ext_name)) { - // Check if this local extension has dependencies - if let Some(dependencies) = ext_config.get("dependencies").and_then(|d| d.as_mapping()) - { - for (_dep_name, dep_spec) in dependencies { - // Check for extension dependency - if let Some(nested_ext_name) = dep_spec.get("ext").and_then(|v| v.as_str()) { - // Check if this is an external extension (has config field) - if let Some(external_config) = - dep_spec.get("config").and_then(|v| v.as_str()) + // If not found in ext section, search through runtime extensions array + let runtime_section = parsed.get("runtimes").and_then(|r| r.as_mapping()); + + if let Some(runtime_section) = runtime_section { + for (runtime_name_key, _) in runtime_section { + if let Some(runtime_name) = runtime_name_key.as_str() { + // Get merged runtime config for this target + let merged_runtime = + self.get_merged_runtime_config(runtime_name, target, config_path)?; + if let Some(merged_value) = merged_runtime { + // Check the new `extensions` array format + if let Some(extensions) = + merged_value.get("extensions").and_then(|e| e.as_sequence()) { - let ext_location = ExtensionLocation::External { - name: nested_ext_name.to_string(), - config_path: external_config.to_string(), - }; - all_extensions.insert(ext_location.clone()); - - // Recursively find nested external extension dependencies - self.find_all_nested_extensions_for_lookup( - config_path, - &ext_location, - all_extensions, - visited, - )?; - } else { - // Local extension dependency - all_extensions.insert(ExtensionLocation::Local { - name: nested_ext_name.to_string(), - config_path: config_path.to_string(), - }); - - // Recursively check this local extension's dependencies - self.find_local_extension_dependencies_for_lookup( - config_path, - parsed_config, - nested_ext_name, - all_extensions, - visited, - )?; + for ext in extensions { + if let Some(ext_name) = ext.as_str() { + if ext_name == extension_name { + // Found in extensions array - now find its definition in ext section + if let Some(ext_section) = parsed.get("extensions") { + if let Some(ext_map) = ext_section.as_mapping() { + for (ext_key, ext_config) in ext_map { + if let Some(raw_name) = ext_key.as_str() { + let interpolated = + interpolate_name(raw_name, target); + if interpolated == extension_name { + if let Some(source) = + Self::parse_extension_source( + extension_name, + ext_config, + )? + { + return Ok(Some( + ExtensionLocation::Remote { + name: extension_name + .to_string(), + source, + }, + )); + } + return Ok(Some( + ExtensionLocation::Local { + name: extension_name + .to_string(), + config_path: config_path + .to_string(), + }, + )); + } + } + } + } + } + } + } + } } } } } } - Ok(()) + Ok(None) } /// Expand environment variables in a string @@ -1987,7 +2742,7 @@ impl Config { if let Some(sdk) = &self.sdk { if let Some(compile) = &sdk.compile { for (section_name, compile_config) in compile { - if let Some(dependencies) = &compile_config.dependencies { + if let Some(dependencies) = &compile_config.packages { compile_deps.insert(section_name.clone(), dependencies); } } @@ -2058,7 +2813,7 @@ impl Config { let mut visited = std::collections::HashSet::new(); // Process local extensions in the current config - if let Some(ext_section) = parsed.get("ext") { + if let Some(ext_section) = parsed.get("extensions") { if let Some(ext_table) = ext_section.as_mapping() { for (ext_name_val, ext_config) in ext_table { if let Some(ext_name) = ext_name_val.as_str() { @@ -2066,10 +2821,10 @@ impl Config { // Extract SDK dependencies for this extension (base and target-specific) let mut merged_deps = HashMap::new(); - // First, collect base SDK dependencies from [ext..sdk.dependencies] + // First, collect base SDK dependencies from [extensions..sdk.packages] if let Some(sdk_section) = ext_config_table.get("sdk") { if let Some(sdk_table) = sdk_section.as_mapping() { - if let Some(dependencies) = sdk_table.get("dependencies") { + if let Some(dependencies) = sdk_table.get("packages") { if let Some(deps_table) = dependencies.as_mapping() { for (k, v) in deps_table.iter() { if let Some(key_str) = k.as_str() { @@ -2082,14 +2837,14 @@ impl Config { } } - // Then, if we have a target, collect target-specific dependencies from [ext...sdk.dependencies] + // Then, if we have a target, collect target-specific dependencies from [extensions...sdk.packages] if let Some(target) = target { if let Some(target_section) = ext_config_table.get(target) { if let Some(target_table) = target_section.as_mapping() { if let Some(sdk_section) = target_table.get("sdk") { if let Some(sdk_table) = sdk_section.as_mapping() { if let Some(dependencies) = - sdk_table.get("dependencies") + sdk_table.get("packages") { if let Some(deps_table) = dependencies.as_mapping() @@ -2118,7 +2873,7 @@ impl Config { // If we have a config path, traverse external extension dependencies if let Some(config_path) = config_path { - if let Some(dependencies) = ext_config_table.get("dependencies") { + if let Some(dependencies) = ext_config_table.get("packages") { if let Some(deps_table) = dependencies.as_mapping() { self.collect_external_extension_sdk_dependencies_with_target( config_path, @@ -2138,12 +2893,12 @@ impl Config { // Also process extensions referenced in runtime dependencies if let Some(config_path) = config_path { - if let Some(runtime_section) = parsed.get("runtime") { + if let Some(runtime_section) = parsed.get("runtimes") { if let Some(runtime_table) = runtime_section.as_mapping() { for (_runtime_name, runtime_config) in runtime_table { if let Some(runtime_config_table) = runtime_config.as_mapping() { // Check base runtime dependencies - if let Some(dependencies) = runtime_config_table.get("dependencies") { + if let Some(dependencies) = runtime_config_table.get("packages") { if let Some(deps_table) = dependencies.as_mapping() { self.collect_external_extension_sdk_dependencies_with_target( config_path, @@ -2159,8 +2914,7 @@ impl Config { if let Some(target) = target { if let Some(target_section) = runtime_config_table.get(target) { if let Some(target_table) = target_section.as_mapping() { - if let Some(dependencies) = target_table.get("dependencies") - { + if let Some(dependencies) = target_table.get("packages") { if let Some(deps_table) = dependencies.as_mapping() { self.collect_external_extension_sdk_dependencies_with_target( config_path, @@ -2195,7 +2949,7 @@ impl Config { for (_dep_name, dep_spec) in dependencies { if let Some(dep_spec_table) = dep_spec.as_mapping() { // Check for external extension dependency - if let Some(ext_name) = dep_spec_table.get("ext").and_then(|v| v.as_str()) { + if let Some(ext_name) = dep_spec_table.get("extensions").and_then(|v| v.as_str()) { if let Some(external_config) = dep_spec_table.get("config").and_then(|v| v.as_str()) { @@ -2229,7 +2983,9 @@ impl Config { ) { // Only process the specific extension that's being referenced - if let Some(ext_section) = external_parsed.get("ext") { + if let Some(ext_section) = + external_parsed.get("extensions") + { if let Some(ext_table) = ext_section.as_mapping() { if let Some(external_ext_config) = ext_table.get(ext_name) @@ -2240,7 +2996,7 @@ impl Config { // Extract SDK dependencies for this specific external extension (base and target-specific) let mut merged_deps = HashMap::new(); - // First, collect base SDK dependencies from [ext..sdk.dependencies] + // First, collect base SDK dependencies from [extensions..sdk.packages] if let Some(sdk_section) = external_ext_config_table.get("sdk") { @@ -2248,8 +3004,7 @@ impl Config { sdk_section.as_mapping() { if let Some(dependencies) = - sdk_table - .get("dependencies") + sdk_table.get("packages") { if let Some(deps_table) = dependencies @@ -2273,7 +3028,7 @@ impl Config { } } - // Then, if we have a target, collect target-specific dependencies from [ext...sdk.dependencies] + // Then, if we have a target, collect target-specific dependencies from [extensions...sdk.packages] if let Some(target) = target { if let Some(target_section) = external_ext_config_table @@ -2292,9 +3047,8 @@ impl Config { if let Some( dependencies, ) = sdk_table - .get( - "dependencies", - ) { + .get("packages") + { if let Some(deps_table) = dependencies.as_mapping() { // Target-specific dependencies override base dependencies for (k, v) in deps_table.iter() { @@ -2321,7 +3075,7 @@ impl Config { // Recursively process dependencies of this specific external extension if let Some(nested_dependencies) = external_ext_config_table - .get("dependencies") + .get("packages") { if let Some(nested_deps_table) = nested_dependencies.as_mapping() @@ -2362,7 +3116,7 @@ impl Config { /// Get target from configuration /// Returns the target if there's exactly one runtime configuration pub fn get_target(&self) -> Option { - let runtime = self.runtime.as_ref()?; + let runtime = self.runtimes.as_ref()?; // Find all runtime configurations (nested dictionaries) let runtime_configs: Vec<&RuntimeConfig> = runtime.values().collect(); @@ -2458,7 +3212,7 @@ impl Config { /// Get merged SDK dependencies for a specific target. /// - /// This merges [sdk.dependencies] with [sdk..dependencies], + /// This merges [sdk.packages] with [sdk..packages], /// where target-specific dependencies override base dependencies. /// /// # Arguments @@ -2482,7 +3236,7 @@ impl Config { // First, add base SDK dependencies if let Some(sdk_section) = parsed.get("sdk") { - if let Some(deps) = sdk_section.get("dependencies") { + if let Some(deps) = sdk_section.get("packages") { if let Some(deps_table) = deps.as_mapping() { for (key, value) in deps_table { if let Some(key_str) = key.as_str() { @@ -2494,7 +3248,7 @@ impl Config { // Then, add/override with target-specific dependencies if let Some(target_section) = sdk_section.get(target) { - if let Some(target_deps) = target_section.get("dependencies") { + if let Some(target_deps) = target_section.get("packages") { if let Some(target_deps_table) = target_deps.as_mapping() { for (key, value) in target_deps_table { if let Some(key_str) = key.as_str() { @@ -2555,18 +3309,18 @@ fn merge_sdk_configs(mut base: SdkConfig, target: SdkConfig) -> SdkConfig { base.host_gid = target.host_gid; } - // For dependencies and compile, merge the HashMaps - if let Some(target_deps) = target.dependencies { - match base.dependencies { + // For packages and compile, merge the HashMaps + if let Some(target_deps) = target.packages { + match base.packages { Some(ref mut base_deps) => { - // Merge target dependencies into base dependencies + // Merge target packages into base packages for (key, value) in target_deps { base_deps.insert(key, value); } } None => { - // No base dependencies, use target dependencies - base.dependencies = Some(target_deps); + // No base packages, use target packages + base.packages = Some(target_deps); } } } @@ -2655,20 +3409,20 @@ mod tests { #[test] fn test_load_valid_config() { let config_content = r#" -[runtime.default] -target = "qemux86-64" - -[runtime.default.dependencies] -nativesdk-avocado-images = "*" - -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" - -[sdk.dependencies] -cmake = "*" +runtimes: + default: + target: qemux86-64 + packages: + nativesdk-avocado-images: "*" -[sdk.compile.app] -dependencies = { gcc = "*" } +sdk: + image: docker.io/avocadolinux/sdk:apollo-edge + packages: + cmake: "*" + compile: + app: + packages: + gcc: "*" "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -2694,10 +3448,10 @@ dependencies = { gcc = "*" } #[test] fn test_src_dir_absolute_path() { let config_content = r#" -src_dir = "/absolute/path/to/source" +src_dir: "/absolute/path/to/source" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -2716,10 +3470,10 @@ image = "docker.io/avocadolinux/sdk:apollo-edge" #[test] fn test_src_dir_relative_path() { let config_content = r#" -src_dir = "../../" +src_dir: "../../" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -2744,8 +3498,8 @@ image = "docker.io/avocadolinux/sdk:apollo-edge" #[test] fn test_src_dir_not_configured() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -2763,13 +3517,13 @@ image = "docker.io/avocadolinux/sdk:apollo-edge" sdk: image: "docker.io/avocadolinux/sdk:apollo-edge" -ext: +extensions: avocado-dev: types: - sysext - confext sdk: - dependencies: + packages: nativesdk-avocado-hitl: "*" nativesdk-something-else: "1.2.3" @@ -2777,7 +3531,7 @@ ext: types: - sysext sdk: - dependencies: + packages: nativesdk-tool: "*" "#; @@ -2821,18 +3575,18 @@ ext: sdk: image: "docker.io/avocadolinux/sdk:apollo-edge" -ext: +extensions: avocado-dev: types: - sysext - confext sdk: - dependencies: + packages: nativesdk-avocado-hitl: "*" nativesdk-base-tool: "1.0.0" qemux86-64: sdk: - dependencies: + packages: nativesdk-avocado-hitl: "2.0.0" nativesdk-target-specific: "*" @@ -2840,11 +3594,11 @@ ext: types: - sysext sdk: - dependencies: + packages: nativesdk-tool: "*" qemuarm64: sdk: - dependencies: + packages: nativesdk-arm-tool: "*" "#; @@ -2963,19 +3717,19 @@ ext: sdk: image: "docker.io/avocadolinux/sdk:apollo-edge" -runtime: +runtimes: dev: - dependencies: + packages: avocado-ext-dev: ext: avocado-ext-dev config: "extensions/dev/avocado.yaml" raspberrypi4: - dependencies: + packages: avocado-bsp-raspberrypi4: ext: avocado-bsp-raspberrypi4 config: "bsp/raspberrypi4/avocado.yaml" -ext: +extensions: config: types: - confext @@ -3018,9 +3772,9 @@ ext: #[test] fn test_sdk_container_args() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" -container_args = ["--network=$USER-avocado", "--privileged"] +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" + container_args: ["--network=$USER-avocado", "--privileged"] "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3037,11 +3791,12 @@ container_args = ["--network=$USER-avocado", "--privileged"] #[test] fn test_default_target_field() { let config_content = r#" -default_target = "qemux86-64" +default_target: "qemux86-64" -[runtime.dev] -target = "qemux86-64" -image = "avocadolinux/runtime:apollo-edge" +runtimes: + dev: + target: "qemux86-64" + image: "avocadolinux/runtime:apollo-edge" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3054,9 +3809,10 @@ image = "avocadolinux/runtime:apollo-edge" #[test] fn test_no_default_target_field() { let config_content = r#" -[runtime.dev] -target = "qemux86-64" -image = "avocadolinux/runtime:apollo-edge" +runtimes: + dev: + target: "qemux86-64" + image: "avocadolinux/runtime:apollo-edge" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3069,11 +3825,12 @@ image = "avocadolinux/runtime:apollo-edge" #[test] fn test_empty_default_target_field() { let config_content = r#" -default_target = "" +default_target: "" -[runtime.dev] -target = "qemux86-64" -image = "avocadolinux/runtime:apollo-edge" +runtimes: + dev: + target: "qemux86-64" + image: "avocadolinux/runtime:apollo-edge" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3086,9 +3843,9 @@ image = "avocadolinux/runtime:apollo-edge" #[test] fn test_merge_sdk_container_args() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" -container_args = ["--network=host", "--privileged"] +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" + container_args: ["--network=host", "--privileged"] "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3109,9 +3866,9 @@ container_args = ["--network=host", "--privileged"] #[test] fn test_merge_sdk_container_args_config_only() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" -container_args = ["--network=host"] +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" + container_args: ["--network=host"] "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3128,8 +3885,8 @@ container_args = ["--network=host"] #[test] fn test_merge_sdk_container_args_cli_only() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3147,8 +3904,8 @@ image = "docker.io/avocadolinux/sdk:apollo-edge" #[test] fn test_merge_sdk_container_args_none() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3163,9 +3920,9 @@ image = "docker.io/avocadolinux/sdk:apollo-edge" fn test_get_sdk_repo_url_env_override() { // Test environment variable override for SDK repo URL let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" -repo_url = "https://config.example.com" +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" + repo_url: "https://config.example.com" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3188,9 +3945,9 @@ repo_url = "https://config.example.com" fn test_get_sdk_repo_release_env_override() { // Test environment variable override for SDK repo release let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" -repo_release = "config-release" +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" + repo_release: "config-release" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3213,8 +3970,8 @@ repo_release = "config-release" fn test_get_sdk_repo_url_env_only() { // Test environment variable when no config value exists let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3270,9 +4027,9 @@ image = "docker.io/avocadolinux/sdk:apollo-edge" std::env::set_var("TEST_USER", "myuser"); let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" -container_args = ["--network=$TEST_USER-avocado", "--privileged"] +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" + container_args: ["--network=$TEST_USER-avocado", "--privileged"] "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3323,11 +4080,11 @@ container_args = ["--network=$TEST_USER-avocado", "--privileged"] #[test] fn test_provision_profile_config() { let config_content = r#" -[provision.usb] -container_args = ["-v", "/dev/usb:/dev/usb", "-v", "/sys:/sys:ro"] - -[provision.development] -container_args = ["--privileged", "--network=host"] +provision_profiles: + usb: + container_args: ["-v", "/dev/usb:/dev/usb", "-v", "/sys:/sys:ro"] + development: + container_args: ["--privileged", "--network=host"] "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3354,8 +4111,9 @@ container_args = ["--privileged", "--network=host"] #[test] fn test_merge_provision_container_args() { let config_content = r#" -[provision.usb] -container_args = ["-v", "/dev/usb:/dev/usb", "--privileged"] +provision_profiles: + usb: + container_args: ["-v", "/dev/usb:/dev/usb", "--privileged"] "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3377,8 +4135,9 @@ container_args = ["-v", "/dev/usb:/dev/usb", "--privileged"] #[test] fn test_merge_provision_container_args_profile_only() { let config_content = r#" -[provision.test] -container_args = ["--network=host"] +provision_profiles: + test: + container_args: ["--network=host"] "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3395,8 +4154,8 @@ container_args = ["--network=host"] #[test] fn test_merge_provision_container_args_cli_only() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3414,8 +4173,8 @@ image = "docker.io/avocadolinux/sdk:apollo-edge" #[test] fn test_merge_provision_container_args_none() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3430,12 +4189,13 @@ image = "docker.io/avocadolinux/sdk:apollo-edge" fn test_merge_provision_container_args_with_sdk_defaults() { // Test that SDK container_args are included as base defaults let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" -container_args = ["--privileged", "--network=host"] +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" + container_args: ["--privileged", "--network=host"] -[provision.usb] -container_args = ["-v", "/dev:/dev"] +provision_profiles: + usb: + container_args: ["-v", "/dev:/dev"] "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3459,9 +4219,9 @@ container_args = ["-v", "/dev:/dev"] fn test_merge_provision_container_args_sdk_defaults_only() { // Test that SDK container_args are used when no provision profile or CLI args let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" -container_args = ["--privileged", "-v", "/dev:/dev"] +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" + container_args: ["--privileged", "-v", "/dev:/dev"] "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3480,12 +4240,13 @@ container_args = ["--privileged", "-v", "/dev:/dev"] fn test_merge_provision_container_args_deduplication() { // Test that duplicate args are removed (keeping the last occurrence) let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:apollo-edge" -container_args = ["--privileged", "--network=host", "-v", "/dev:/dev"] +sdk: + image: "docker.io/avocadolinux/sdk:apollo-edge" + container_args: ["--privileged", "--network=host", "-v", "/dev:/dev"] -[provision.tegraflash] -container_args = ["--privileged", "--network=host", "-v", "/dev:/dev", "-v", "/sys:/sys"] +provision_profiles: + tegraflash: + container_args: ["--privileged", "--network=host", "-v", "/dev:/dev", "-v", "/sys:/sys"] "#; let config = Config::load_from_str(config_content).unwrap(); @@ -3518,7 +4279,7 @@ container_args = ["--privileged", "--network=host", "-v", "/dev:/dev", "-v", "/s fn test_provision_state_file_default() { // Test that state_file defaults to .avocado/provision-{profile}.state when not configured let config_content = r#" -provision: +provision_profiles: usb: container_args: - --privileged @@ -3539,7 +4300,7 @@ provision: fn test_provision_state_file_custom() { // Test that custom state_file is used when configured let config_content = r#" -provision: +provision_profiles: production: container_args: - --privileged @@ -3563,23 +4324,20 @@ provision: fn test_merged_sdk_config() { // Create a temporary config file for testing merging let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64"] -[sdk] -image = "base-image" -repo_url = "http://base-repo" -repo_release = "base-release" - -[sdk.dependencies] -base-package = "*" - -[sdk.qemux86-64] -image = "target-specific-image" -repo_url = "http://target-repo" - -[sdk.qemux86-64.dependencies] -target-package = "*" +sdk: + image: "base-image" + repo_url: "http://base-repo" + repo_release: "base-release" + packages: + base-package: "*" + qemux86-64: + image: "target-specific-image" + repo_url: "http://target-repo" + packages: + target-package: "*" "#; let temp_file = tempfile::NamedTempFile::new().unwrap(); @@ -3602,13 +4360,12 @@ target-package = "*" fn test_merged_sdk_config_with_container_args() { // Test that target-specific container_args are properly merged let config_content = r#" -default_target = "qemux86-64" +default_target: "qemux86-64" -[sdk] -image = "base-image" - -[sdk.qemux86-64] -container_args = ["--net=host", "--privileged"] +sdk: + image: "base-image" + qemux86-64: + container_args: ["--net=host", "--privileged"] "#; let temp_file = tempfile::NamedTempFile::new().unwrap(); @@ -3633,16 +4390,17 @@ container_args = ["--net=host", "--privileged"] fn test_merged_sdk_dependencies() { // Create a temporary config file for testing dependency merging let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64"] -[sdk.dependencies] -base-package = "*" -shared-package = "1.0" - -[sdk.qemux86-64.dependencies] -target-package = "*" -shared-package = "2.0" +sdk: + packages: + base-package: "*" + shared-package: "1.0" + qemux86-64: + packages: + target-package: "*" + shared-package: "2.0" "#; let temp_file = tempfile::NamedTempFile::new().unwrap(); @@ -3669,12 +4427,12 @@ shared-package = "2.0" fn test_merged_sdk_config_no_target_section() { // Test merging when there's no target-specific section let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64"] -[sdk] -image = "base-image" -repo_url = "http://base-repo" +sdk: + image: "base-image" + repo_url: "http://base-repo" "#; let temp_file = tempfile::NamedTempFile::new().unwrap(); @@ -3693,32 +4451,31 @@ repo_url = "http://base-repo" #[test] fn test_hierarchical_section_merging() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "qemuarm64"] - -[sdk] -image = "base-image" -repo_url = "base-repo" - -[sdk.qemuarm64] -image = "arm64-image" - -[provision.usb] -container_args = ["--network=host"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "qemuarm64"] -[provision.usb.qemuarm64] -container_args = ["--privileged"] +sdk: + image: "base-image" + repo_url: "base-repo" + qemuarm64: + image: "arm64-image" -[runtime.prod] -some_setting = "base-value" +provision_profiles: + usb: + container_args: ["--network=host"] + qemuarm64: + container_args: ["--privileged"] -[runtime.prod.qemuarm64] -some_setting = "arm64-value" -additional_setting = "arm64-only" +runtimes: + prod: + some_setting: "base-value" + qemuarm64: + some_setting: "arm64-value" + additional_setting: "arm64-only" "#; // Write test config to a temp file - let temp_file = std::env::temp_dir().join("hierarchical_test.toml"); + let temp_file = std::env::temp_dir().join("hierarchical_test.yaml"); std::fs::write(&temp_file, config_content).unwrap(); let config_path = temp_file.to_str().unwrap(); @@ -3829,27 +4586,29 @@ additional_setting = "arm64-only" #[test] fn test_nested_section_merging() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "qemuarm64"] - -[ext.avocado-dev.dependencies] -base-dep = "*" -shared-dep = "1.0" - -[ext.avocado-dev.qemuarm64.dependencies] -arm64-dep = "*" -shared-dep = "2.0" - -[ext.avocado-dev.users.root] -password = "" -shell = "/bin/bash" +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "qemuarm64"] -[ext.avocado-dev.qemuarm64.users.root] -password = "arm64-password" +extensions: + avocado-dev: + packages: + base-dep: "*" + shared-dep: "1.0" + users: + root: + password: "" + shell: "/bin/bash" + qemuarm64: + packages: + arm64-dep: "*" + shared-dep: "2.0" + users: + root: + password: "arm64-password" "#; // Write test config to a temp file - let temp_file = std::env::temp_dir().join("nested_test.toml"); + let temp_file = std::env::temp_dir().join("nested_test.yaml"); std::fs::write(&temp_file, config_content).unwrap(); let config_path = temp_file.to_str().unwrap(); @@ -3857,7 +4616,12 @@ password = "arm64-password" // Test nested dependencies merging let deps_x86 = config - .get_merged_nested_section("ext.avocado-dev", "dependencies", "qemux86-64", config_path) + .get_merged_nested_section( + "extensions.avocado-dev", + "packages", + "qemux86-64", + config_path, + ) .unwrap(); assert!(deps_x86.is_some()); let deps_x86_value = deps_x86.unwrap(); @@ -3873,7 +4637,12 @@ password = "arm64-password" assert!(deps_x86_table.get("arm64-dep").is_none()); let deps_arm64 = config - .get_merged_nested_section("ext.avocado-dev", "dependencies", "qemuarm64", config_path) + .get_merged_nested_section( + "extensions.avocado-dev", + "packages", + "qemuarm64", + config_path, + ) .unwrap(); assert!(deps_arm64.is_some()); let deps_arm64_value = deps_arm64.unwrap(); @@ -3897,7 +4666,12 @@ password = "arm64-password" // Test nested users merging let users_x86 = config - .get_merged_nested_section("ext.avocado-dev", "users.root", "qemux86-64", config_path) + .get_merged_nested_section( + "extensions.avocado-dev", + "users.root", + "qemux86-64", + config_path, + ) .unwrap(); assert!(users_x86.is_some()); let users_x86_value = users_x86.unwrap(); @@ -3912,7 +4686,12 @@ password = "arm64-password" ); let users_arm64 = config - .get_merged_nested_section("ext.avocado-dev", "users.root", "qemuarm64", config_path) + .get_merged_nested_section( + "extensions.avocado-dev", + "users.root", + "qemuarm64", + config_path, + ) .unwrap(); assert!(users_arm64.is_some()); let users_arm64_value = users_arm64.unwrap(); @@ -3933,15 +4712,19 @@ password = "arm64-password" #[test] fn test_target_only_sections() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "qemuarm64"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "qemuarm64"] # No base section, only target-specific -[runtime.special.qemuarm64] -special_setting = "arm64-only" +runtimes: + special: + qemuarm64: + special_setting: "arm64-only" -[ext.arm-only.qemuarm64] -types = ["sysext"] +extensions: + arm-only: + qemuarm64: + types: ["sysext"] "#; let temp_file = std::env::temp_dir().join("target_only_test.toml"); @@ -3993,11 +4776,11 @@ types = ["sysext"] #[test] fn test_supported_targets_all_format() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = "*" +default_target: "qemux86-64" +supported_targets: "*" -[sdk] -image = "test-image" +sdk: + image: "test-image" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -4013,11 +4796,11 @@ image = "test-image" #[test] fn test_supported_targets_list_format() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "qemuarm64", "raspberrypi4"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "qemuarm64", "raspberrypi4"] -[sdk] -image = "test-image" +sdk: + image: "test-image" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -4039,11 +4822,11 @@ image = "test-image" #[test] fn test_supported_targets_empty_list() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = [] +default_target: "qemux86-64" +supported_targets: [] -[sdk] -image = "test-image" +sdk: + image: "test-image" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -4058,10 +4841,10 @@ image = "test-image" #[test] fn test_supported_targets_missing() { let config_content = r#" -default_target = "qemux86-64" +default_target: "qemux86-64" -[sdk] -image = "test-image" +sdk: + image: "test-image" "#; let config = Config::load_from_str(config_content).unwrap(); @@ -4074,44 +4857,39 @@ image = "test-image" #[test] fn test_comprehensive_sdk_section() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "qemuarm64"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "qemuarm64"] -[sdk] -image = "base-sdk-image" -repo_url = "http://base-repo.com" -repo_release = "main" -container_args = ["--network=host", "--privileged"] - -[sdk.dependencies] -cmake = "*" -gcc = ">=9.0" -build-essential = "*" - -[sdk.compile.app] -compile = "make" - -[sdk.compile.app.dependencies] -libfoo = "*" -libbar = "1.2.3" - -[sdk.qemuarm64] -image = "arm64-sdk-image" -repo_url = "http://arm64-repo.com" -container_args = ["--cap-add=SYS_ADMIN"] - -[sdk.qemuarm64.dependencies] -gcc-aarch64-linux-gnu = "*" -qemu-user-static = "*" - -[sdk.qemuarm64.compile.app] -compile = "cross-make" - -[sdk.qemuarm64.compile.app.dependencies] -libfoo-dev-arm64 = "*" +sdk: + image: "base-sdk-image" + repo_url: "http://base-repo.com" + repo_release: "main" + container_args: ["--network=host", "--privileged"] + packages: + cmake: "*" + gcc: ">=9.0" + build-essential: "*" + compile: + app: + compile: "make" + packages: + libfoo: "*" + libbar: "1.2.3" + qemuarm64: + image: "arm64-sdk-image" + repo_url: "http://arm64-repo.com" + container_args: ["--cap-add=SYS_ADMIN"] + packages: + gcc-aarch64-linux-gnu: "*" + qemu-user-static: "*" + compile: + app: + compile: "cross-make" + packages: + libfoo-dev-arm64: "*" "#; - let temp_file = std::env::temp_dir().join("comprehensive_sdk_test.toml"); + let temp_file = std::env::temp_dir().join("comprehensive_sdk_test.yaml"); std::fs::write(&temp_file, config_content).unwrap(); let config_path = temp_file.to_str().unwrap(); @@ -4129,8 +4907,8 @@ libfoo-dev-arm64 = "*" assert_eq!(merged_x86.repo_release, Some("main".to_string())); assert_eq!(merged_x86.container_args.as_ref().unwrap().len(), 2); - // Test dependencies for base - let deps_x86 = merged_x86.dependencies.unwrap(); + // Test packages for base + let deps_x86 = merged_x86.packages.unwrap(); assert!(deps_x86.contains_key("cmake")); assert!(deps_x86.contains_key("gcc")); assert!(deps_x86.contains_key("build-essential")); @@ -4147,8 +4925,8 @@ libfoo-dev-arm64 = "*" assert_eq!(merged_arm64.repo_release, Some("main".to_string())); // Inherited assert_eq!(merged_arm64.container_args.as_ref().unwrap().len(), 1); // Overridden - // Test merged dependencies - let deps_arm64 = merged_arm64.dependencies.unwrap(); + // Test merged packages + let deps_arm64 = merged_arm64.packages.unwrap(); assert!(deps_arm64.contains_key("cmake")); // From base assert!(deps_arm64.contains_key("gcc")); // From base assert!(deps_arm64.contains_key("gcc-aarch64-linux-gnu")); // Target-specific @@ -4172,13 +4950,14 @@ libfoo-dev-arm64 = "*" fn test_has_compile_sections() { // Test with compile sections defined let config_with_compile = r#" -default_target = "qemux86-64" +default_target: "qemux86-64" -[sdk.compile.app] -compile = "make" - -[sdk.compile.app.dependencies] -libfoo = "*" +sdk: + compile: + app: + compile: "make" + packages: + libfoo: "*" "#; let config = Config::load_from_str(config_with_compile).unwrap(); @@ -4186,10 +4965,12 @@ libfoo = "*" // Test with compile sections but no dependencies let config_no_deps = r#" -default_target = "qemux86-64" +default_target: "qemux86-64" -[sdk.compile.app] -compile = "make" +sdk: + compile: + app: + compile: "make" "#; let config = Config::load_from_str(config_no_deps).unwrap(); @@ -4197,10 +4978,10 @@ compile = "make" // Test with no compile sections let config_no_compile = r#" -default_target = "qemux86-64" +default_target: "qemux86-64" -[sdk] -image = "my-sdk-image" +sdk: + image: "my-sdk-image" "#; let config = Config::load_from_str(config_no_compile).unwrap(); @@ -4208,7 +4989,7 @@ image = "my-sdk-image" // Test with empty config (minimal) let config_minimal = r#" -default_target = "qemux86-64" +default_target: "qemux86-64" "#; let config = Config::load_from_str(config_minimal).unwrap(); @@ -4218,44 +4999,38 @@ default_target = "qemux86-64" #[test] fn test_comprehensive_runtime_section() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "qemuarm64"] - -[runtime.production] -target = "qemux86-64" -image_version = "v1.0.0" -boot_timeout = 30 - -[runtime.production.dependencies] -avocado-img-bootfiles = "*" -avocado-img-rootfs = "*" -base-system = ">=2.0" - -[runtime.production.qemuarm64] -target = "qemuarm64" -image_version = "v1.0.0-arm64" -memory = "2G" - -[runtime.production.qemuarm64.dependencies] -avocado-img-bootfiles-arm64 = "*" -arm64-specific-pkg = "*" +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "qemuarm64"] -[runtime.development] -target = "qemux86-64" -debug_mode = true - -[runtime.development.dependencies] -debug-tools = "*" -gdb = "*" - -[runtime.development.qemuarm64] -cross_debug = true - -[runtime.development.qemuarm64.dependencies] -gdb-multiarch = "*" +runtimes: + production: + target: "qemux86-64" + image_version: "v1.0.0" + boot_timeout: 30 + packages: + avocado-img-bootfiles: "*" + avocado-img-rootfs: "*" + base-system: ">=2.0" + qemuarm64: + target: "qemuarm64" + image_version: "v1.0.0-arm64" + memory: "2G" + packages: + avocado-img-bootfiles-arm64: "*" + arm64-specific-pkg: "*" + development: + target: "qemux86-64" + debug_mode: true + packages: + debug-tools: "*" + gdb: "*" + qemuarm64: + cross_debug: true + packages: + gdb-multiarch: "*" "#; - let temp_file = std::env::temp_dir().join("comprehensive_runtime_test.toml"); + let temp_file = std::env::temp_dir().join("comprehensive_runtime_test.yaml"); std::fs::write(&temp_file, config_content).unwrap(); let config_path = temp_file.to_str().unwrap(); @@ -4348,28 +5123,26 @@ gdb-multiarch = "*" #[test] fn test_comprehensive_provision_section() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "qemuarm64"] - -[provision.usb] -container_args = ["--privileged", "-v", "/dev:/dev"] -timeout = 300 -retry_count = 3 +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "qemuarm64"] -[provision.usb.qemuarm64] -container_args = ["--cap-add=SYS_ADMIN", "-v", "/dev:/dev:ro"] -emulation_mode = true - -[provision.network] -container_args = ["--network=host"] -protocol = "ssh" - -[provision.network.qemuarm64] -protocol = "serial" -baud_rate = 115200 +provision_profiles: + usb: + container_args: ["--privileged", "-v", "/dev:/dev"] + timeout: 300 + retry_count: 3 + qemuarm64: + container_args: ["--cap-add=SYS_ADMIN", "-v", "/dev:/dev:ro"] + emulation_mode: true + network: + container_args: ["--network=host"] + protocol: "ssh" + qemuarm64: + protocol: "serial" + baud_rate: 115200 "#; - let temp_file = std::env::temp_dir().join("comprehensive_provision_test.toml"); + let temp_file = std::env::temp_dir().join("comprehensive_provision_test.yaml"); std::fs::write(&temp_file, config_content).unwrap(); let config_path = temp_file.to_str().unwrap(); @@ -4451,63 +5224,58 @@ baud_rate = 115200 #[test] fn test_comprehensive_ext_section() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "qemuarm64"] - -[ext.avocado-dev] -version = "1.0.0" -types = ["sysext", "confext"] -scopes = ["system"] -overlay = "overlays/avocado-dev" -enable_services = ["sshd.socket"] -modprobe = ["nfs", "overlay"] - -[ext.avocado-dev.dependencies] -openssh = "*" -nfs-utils = "*" -debug-tools = ">=1.0" - -[ext.avocado-dev.sdk.dependencies] -nativesdk-openssh = "*" -nativesdk-gdb = "*" - -[ext.avocado-dev.users.root] -password = "" -shell = "/bin/bash" -home = "/root" - -[ext.avocado-dev.users.developer] -password = "dev123" -groups = ["wheel", "docker"] -home = "/home/developer" - -[ext.avocado-dev.groups.docker] -gid = 999 - -[ext.avocado-dev.qemuarm64] -version = "1.0.0-arm64" -overlay = "overlays/avocado-dev-arm64" - -[ext.avocado-dev.qemuarm64.dependencies] -gdb-multiarch = "*" -arm64-debug-tools = "*" - -[ext.avocado-dev.qemuarm64.sdk.dependencies] -nativesdk-gdb-cross-aarch64 = "*" - -[ext.avocado-dev.qemuarm64.users.root] -password = "arm64-root" - -[ext.peridio] -version = "2.0.0" -types = ["confext"] -enable_services = ["peridiod.service"] - -[ext.peridio.qemuarm64] -enable_services = ["peridiod.service", "peridio-agent.service"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "qemuarm64"] + +extensions: + avocado-dev: + version: "1.0.0" + types: ["sysext", "confext"] + scopes: ["system"] + overlay: "overlays/avocado-dev" + enable_services: ["sshd.socket"] + modprobe: ["nfs", "overlay"] + packages: + openssh: "*" + nfs-utils: "*" + debug-tools: ">=1.0" + sdk: + packages: + nativesdk-openssh: "*" + nativesdk-gdb: "*" + users: + root: + password: "" + shell: "/bin/bash" + home: "/root" + developer: + password: "dev123" + groups: ["wheel", "docker"] + home: "/home/developer" + groups: + docker: + gid: 999 + qemuarm64: + version: "1.0.0-arm64" + overlay: "overlays/avocado-dev-arm64" + packages: + gdb-multiarch: "*" + arm64-debug-tools: "*" + sdk: + packages: + nativesdk-gdb-cross-aarch64: "*" + users: + root: + password: "arm64-root" + peridio: + version: "2.0.0" + types: ["confext"] + enable_services: ["peridiod.service"] + qemuarm64: + enable_services: ["peridiod.service", "peridio-agent.service"] "#; - let temp_file = std::env::temp_dir().join("comprehensive_ext_test.toml"); + let temp_file = std::env::temp_dir().join("comprehensive_ext_test.yaml"); std::fs::write(&temp_file, config_content).unwrap(); let config_path = temp_file.to_str().unwrap(); @@ -4551,7 +5319,12 @@ enable_services = ["peridiod.service", "peridio-agent.service"] // Test nested dependencies merging let deps_x86 = config - .get_merged_nested_section("ext.avocado-dev", "dependencies", "qemux86-64", config_path) + .get_merged_nested_section( + "extensions.avocado-dev", + "packages", + "qemux86-64", + config_path, + ) .unwrap(); assert!(deps_x86.is_some()); let deps_x86_value = deps_x86.unwrap(); @@ -4561,7 +5334,12 @@ enable_services = ["peridiod.service", "peridio-agent.service"] assert!(!deps_x86_table.contains_key("gdb-multiarch")); let deps_arm64 = config - .get_merged_nested_section("ext.avocado-dev", "dependencies", "qemuarm64", config_path) + .get_merged_nested_section( + "extensions.avocado-dev", + "packages", + "qemuarm64", + config_path, + ) .unwrap(); assert!(deps_arm64.is_some()); let deps_arm64_value = deps_arm64.unwrap(); @@ -4573,8 +5351,8 @@ enable_services = ["peridiod.service", "peridio-agent.service"] // Test SDK dependencies merging let sdk_deps_x86 = config .get_merged_nested_section( - "ext.avocado-dev", - "sdk.dependencies", + "extensions.avocado-dev", + "sdk.packages", "qemux86-64", config_path, ) @@ -4588,8 +5366,8 @@ enable_services = ["peridiod.service", "peridio-agent.service"] let sdk_deps_arm64 = config .get_merged_nested_section( - "ext.avocado-dev", - "sdk.dependencies", + "extensions.avocado-dev", + "sdk.packages", "qemuarm64", config_path, ) @@ -4602,7 +5380,12 @@ enable_services = ["peridiod.service", "peridio-agent.service"] // Test users merging let users_root_x86 = config - .get_merged_nested_section("ext.avocado-dev", "users.root", "qemux86-64", config_path) + .get_merged_nested_section( + "extensions.avocado-dev", + "users.root", + "qemux86-64", + config_path, + ) .unwrap(); assert!(users_root_x86.is_some()); let users_root_x86_value = users_root_x86.unwrap(); @@ -4621,7 +5404,12 @@ enable_services = ["peridiod.service", "peridio-agent.service"] ); let users_root_arm64 = config - .get_merged_nested_section("ext.avocado-dev", "users.root", "qemuarm64", config_path) + .get_merged_nested_section( + "extensions.avocado-dev", + "users.root", + "qemuarm64", + config_path, + ) .unwrap(); assert!(users_root_arm64.is_some()); let users_root_arm64_value = users_root_arm64.unwrap(); @@ -4678,11 +5466,11 @@ enable_services = ["peridiod.service", "peridio-agent.service"] fn test_invalid_config_handling() { // Test invalid supported_targets format let invalid_supported_targets = r#" -default_target = "qemux86-64" -supported_targets = 123 # Invalid - not string or array +default_target: "qemux86-64" +supported_targets: 123 # Invalid - not string or array -[sdk] -image = "test" +sdk: + image: "test" "#; let result = Config::load_from_str(invalid_supported_targets); @@ -4690,12 +5478,12 @@ image = "test" // Test missing required fields let missing_sdk_image = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64"] -[sdk] +sdk: # Missing image field -repo_url = "http://example.com" + repo_url: "http://example.com" "#; let config = Config::load_from_str(missing_sdk_image).unwrap(); @@ -4707,32 +5495,40 @@ repo_url = "http://example.com" assert!(result.default_target.is_none()); assert!(result.supported_targets.is_none()); assert!(result.sdk.is_none()); - assert!(result.runtime.is_none()); - assert!(result.provision.is_none()); + assert!(result.runtimes.is_none()); + assert!(result.provision_profiles.is_none()); } #[test] fn test_complex_nested_overrides() { let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "qemuarm64", "raspberrypi4"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "qemuarm64", "raspberrypi4"] # Complex nested structure with target-specific overrides -[ext.complex.level1.level2.level3] -base_value = "original" -shared_value = "base" - -[ext.complex.qemuarm64.level1.level2.level3] -override_value = "arm64-specific" -shared_value = "arm64-override" -nested_override = "arm64-nested" - -[ext.complex.raspberrypi4.level1.level2.level3] -rpi_specific = true -shared_value = "rpi-override" +extensions: + complex: + level1: + level2: + level3: + base_value: "original" + shared_value: "base" + qemuarm64: + level1: + level2: + level3: + override_value: "arm64-specific" + shared_value: "arm64-override" + nested_override: "arm64-nested" + raspberrypi4: + level1: + level2: + level3: + rpi_specific: true + shared_value: "rpi-override" "#; - let temp_file = std::env::temp_dir().join("complex_nested_test.toml"); + let temp_file = std::env::temp_dir().join("complex_nested_test.yaml"); std::fs::write(&temp_file, config_content).unwrap(); let config_path = temp_file.to_str().unwrap(); @@ -4741,7 +5537,7 @@ shared_value = "rpi-override" // Test x86-64 (base only) let x86_nested = config .get_merged_nested_section( - "ext.complex", + "extensions.complex", "level1.level2.level3", "qemux86-64", config_path, @@ -4764,7 +5560,7 @@ shared_value = "rpi-override" // Test ARM64 (has target-specific override) let arm64_nested = config .get_merged_nested_section( - "ext.complex", + "extensions.complex", "level1.level2.level3", "qemuarm64", config_path, @@ -4797,7 +5593,7 @@ shared_value = "rpi-override" // Test RaspberryPi4 (different target-specific override) let rpi_nested = config .get_merged_nested_section( - "ext.complex", + "extensions.complex", "level1.level2.level3", "raspberrypi4", config_path, @@ -4826,18 +5622,23 @@ shared_value = "rpi-override" fn test_edge_cases_and_error_conditions() { // Test configuration with only target-specific sections let target_only_config = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "qemuarm64"] +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "qemuarm64"] # Only target-specific sections, no base -[sdk.qemuarm64] -image = "arm64-only-sdk" +sdk: + qemuarm64: + image: "arm64-only-sdk" -[runtime.special.qemuarm64] -special_mode = true +runtimes: + special: + qemuarm64: + special_mode: true -[ext.arm-only.qemuarm64] -types = ["sysext"] +extensions: + arm-only: + qemuarm64: + types: ["sysext"] "#; let temp_file = std::env::temp_dir().join("target_only_edge_test.toml"); @@ -4896,29 +5697,30 @@ types = ["sysext"] fn test_nested_target_config_merging() { // Create a temporary config file with nested target-specific configuration let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64", "reterminal-dm"] - -[sdk] -image = "ghcr.io/avocado-framework/avocado-sdk:latest" - -[runtime.default] -target = "x86_64-unknown-linux-gnu" - -[ext.avocado-ext-webkit] -version = "1.0.0" -release = "r0" -vendor = "Avocado Linux " -summary = "WPE WebKit browser and display utilities" -description = "WPE WebKit browser and display utilities" -license = "Apache-2.0" -url = "https://github.com/avocadolinux/avocado-ext" -types = ["sysext", "confext"] -enable_services = ["cog.service"] -on_merge = ["systemctl restart --no-block cog.service"] - -[ext.avocado-ext-webkit.reterminal-dm] -overlay = "extensions/webkit/overlays/reterminal-dm" +default_target: "qemux86-64" +supported_targets: ["qemux86-64", "reterminal-dm"] + +sdk: + image: "ghcr.io/avocado-framework/avocado-sdk:latest" + +runtimes: + default: + target: "x86_64-unknown-linux-gnu" + +extensions: + avocado-ext-webkit: + version: "1.0.0" + release: "r0" + vendor: "Avocado Linux " + summary: "WPE WebKit browser and display utilities" + description: "WPE WebKit browser and display utilities" + license: "Apache-2.0" + url: "https://github.com/avocadolinux/avocado-ext" + types: ["sysext", "confext"] + enable_services: ["cog.service"] + on_merge: ["systemctl restart --no-block cog.service"] + reterminal-dm: + overlay: "extensions/webkit/overlays/reterminal-dm" "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5003,12 +5805,13 @@ overlay = "extensions/webkit/overlays/reterminal-dm" #[test] fn test_stone_include_paths_basic() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:latest" +sdk: + image: "docker.io/avocadolinux/sdk:latest" -[runtime.test-runtime] -target = "x86_64" -stone_include_paths = ["stone-qemux86-64"] +runtimes: + test-runtime: + target: "x86_64" + stone_include_paths: ["stone-qemux86-64"] "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5027,12 +5830,13 @@ stone_include_paths = ["stone-qemux86-64"] #[test] fn test_stone_include_paths_multiple() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:latest" +sdk: + image: "docker.io/avocadolinux/sdk:latest" -[runtime.test-runtime] -target = "x86_64" -stone_include_paths = ["stone-a", "stone-b", "stone-c"] +runtimes: + test-runtime: + target: "x86_64" + stone_include_paths: ["stone-a", "stone-b", "stone-c"] "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5051,11 +5855,12 @@ stone_include_paths = ["stone-a", "stone-b", "stone-c"] #[test] fn test_stone_include_paths_not_configured() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:latest" +sdk: + image: "docker.io/avocadolinux/sdk:latest" -[runtime.test-runtime] -target = "x86_64" +runtimes: + test-runtime: + target: "x86_64" "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5072,15 +5877,15 @@ target = "x86_64" #[test] fn test_stone_include_paths_target_specific_override() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:latest" - -[runtime.test-runtime] -target = "x86_64" -stone_include_paths = ["stone-default"] - -[runtime.test-runtime.aarch64] -stone_include_paths = ["stone-aarch64"] +sdk: + image: "docker.io/avocadolinux/sdk:latest" + +runtimes: + test-runtime: + target: "x86_64" + stone_include_paths: ["stone-default"] + aarch64: + stone_include_paths: ["stone-aarch64"] "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5107,14 +5912,14 @@ stone_include_paths = ["stone-aarch64"] fn test_stone_include_paths_user_example() { // Test the exact example from the user's request let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:latest" - -[runtime.dev] -stone_include_paths = ["stone-common"] +sdk: + image: "docker.io/avocadolinux/sdk:latest" -[runtime.dev.qemux86-64] -stone_include_paths = ["stone-qemux86-64"] +runtimes: + dev: + stone_include_paths: ["stone-common"] + qemux86-64: + stone_include_paths: ["stone-qemux86-64"] "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5147,12 +5952,13 @@ stone_include_paths = ["stone-qemux86-64"] #[test] fn test_stone_include_paths_empty_array() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:latest" +sdk: + image: "docker.io/avocadolinux/sdk:latest" -[runtime.test-runtime] -target = "x86_64" -stone_include_paths = [] +runtimes: + test-runtime: + target: "x86_64" + stone_include_paths: [] "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5170,12 +5976,13 @@ stone_include_paths = [] #[test] fn test_stone_manifest_basic() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:latest" +sdk: + image: "docker.io/avocadolinux/sdk:latest" -[runtime.test-runtime] -target = "x86_64" -stone_manifest = "stone-manifest.json" +runtimes: + test-runtime: + target: "x86_64" + stone_manifest: "stone-manifest.json" "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5193,11 +6000,12 @@ stone_manifest = "stone-manifest.json" #[test] fn test_stone_manifest_not_configured() { let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:latest" +sdk: + image: "docker.io/avocadolinux/sdk:latest" -[runtime.test-runtime] -target = "x86_64" +runtimes: + test-runtime: + target: "x86_64" "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5215,14 +6023,14 @@ target = "x86_64" fn test_stone_manifest_target_specific_override() { // Test the exact example from the user's request let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:latest" - -[runtime.dev] -stone_manifest = "stone-common.json" +sdk: + image: "docker.io/avocadolinux/sdk:latest" -[runtime.dev.qemux86-64] -stone_manifest = "stone-qemux86-64.json" +runtimes: + dev: + stone_manifest: "stone-common.json" + qemux86-64: + stone_manifest: "stone-qemux86-64.json" "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5259,14 +6067,14 @@ stone_manifest = "stone-qemux86-64.json" fn test_stone_manifest_only_target_specific() { // Test when stone_manifest is only defined in target-specific section let config_content = r#" -[sdk] -image = "docker.io/avocadolinux/sdk:latest" - -[runtime.dev] -target = "x86_64" +sdk: + image: "docker.io/avocadolinux/sdk:latest" -[runtime.dev.qemux86-64] -stone_manifest = "stone-qemux86-64.json" +runtimes: + dev: + target: "x86_64" + qemux86-64: + stone_manifest: "stone-qemux86-64.json" "#; let mut temp_file = NamedTempFile::new().unwrap(); @@ -5383,7 +6191,7 @@ sdk: fn test_container_args_provision_as_string() { // Test that provision profile container_args also supports string format let config_content = r#" -provision: +provision_profiles: usb: container_args: "-v /dev:/dev -v /sys:/sys:ro --privileged" "#; @@ -5484,7 +6292,7 @@ signing_keys: - my-production-key: {production_keyid} - backup-key: {backup_keyid} -runtime: +runtimes: dev: signing: key: my-production-key @@ -5539,21 +6347,21 @@ runtime: assert_eq!(runtime_key, Some(production_keyid.to_string())); // Test runtime signing config - let runtime = config.runtime.as_ref().unwrap().get("dev").unwrap(); + let runtime = config.runtimes.as_ref().unwrap().get("dev").unwrap(); assert!(runtime.signing.is_some()); let signing = runtime.signing.as_ref().unwrap(); assert_eq!(signing.key, "my-production-key"); assert_eq!(signing.checksum_algorithm, "sha256"); // Test production runtime with blake3 - let production = config.runtime.as_ref().unwrap().get("production").unwrap(); + let production = config.runtimes.as_ref().unwrap().get("production").unwrap(); assert!(production.signing.is_some()); let prod_signing = production.signing.as_ref().unwrap(); assert_eq!(prod_signing.key, "backup-key"); assert_eq!(prod_signing.checksum_algorithm, "blake3"); // Test staging runtime with default checksum_algorithm - let staging = config.runtime.as_ref().unwrap().get("staging").unwrap(); + let staging = config.runtimes.as_ref().unwrap().get("staging").unwrap(); assert!(staging.signing.is_some()); let staging_signing = staging.signing.as_ref().unwrap(); assert_eq!(staging_signing.key, "my-production-key"); @@ -5589,7 +6397,7 @@ default_target: qemux86-64 sdk: image: ghcr.io/avocado-framework/avocado-sdk:latest -runtime: +runtimes: dev: signing: key: my-key @@ -5597,7 +6405,7 @@ runtime: signing: key: production-key no-signing: - dependencies: + packages: some-package: '*' "#; @@ -5641,7 +6449,7 @@ sdk: signing_keys: - existing-key: {keyid} -runtime: +runtimes: dev: signing: key: missing-key @@ -5675,45 +6483,52 @@ runtime: #[test] fn test_discover_external_config_refs_from_runtime() { + // New format: external config refs are defined in extensions section with source: path let config_content = r#" -runtime: +runtimes: prod: target: qemux86-64 - dependencies: - peridio: - ext: avocado-ext-peridio - config: avocado-ext-peridio/avocado.yml - local-ext: - ext: local-extension + extensions: + - avocado-ext-peridio + - local-extension +extensions: + avocado-ext-peridio: + source: + type: path + path: avocado-ext-peridio + local-extension: + version: "1.0.0" "#; let parsed: serde_yaml::Value = serde_yaml::from_str(config_content).unwrap(); let refs = Config::discover_external_config_refs(&parsed); - assert_eq!(refs.len(), 1); - assert_eq!(refs[0].0, "avocado-ext-peridio"); - assert_eq!(refs[0].1, "avocado-ext-peridio/avocado.yml"); + // In the new format, source: path extensions are handled differently + // so this test verifies no deprecated refs are found + assert_eq!(refs.len(), 0); } #[test] fn test_discover_external_config_refs_from_ext() { + // New format: external config refs are no longer in packages, they use source: path let config_content = r#" -ext: +extensions: main-ext: types: - sysext - dependencies: - external-dep: - ext: external-extension - config: external/config.yaml + packages: + some-package: "*" + external-extension: + source: + type: path + path: external "#; let parsed: serde_yaml::Value = serde_yaml::from_str(config_content).unwrap(); let refs = Config::discover_external_config_refs(&parsed); - assert_eq!(refs.len(), 1); - assert_eq!(refs[0].0, "external-extension"); - assert_eq!(refs[0].1, "external/config.yaml"); + // In the new format, source: path extensions are handled differently + assert_eq!(refs.len(), 0); } #[test] @@ -5721,13 +6536,13 @@ ext: let main_config_content = r#" distro: version: "1.0.0" -ext: +extensions: local-ext: types: - sysext "#; let external_config_content = r#" -ext: +extensions: external-ext: types: - sysext @@ -5738,10 +6553,11 @@ ext: let external_config: serde_yaml::Value = serde_yaml::from_str(external_config_content).unwrap(); - Config::merge_external_config(&mut main_config, &external_config, "external-ext"); + // Use empty include patterns - ext. is always merged + Config::merge_external_config(&mut main_config, &external_config, "external-ext", &[], &[]); // Check that both extensions are present - let ext_section = main_config.get("ext").unwrap().as_mapping().unwrap(); + let ext_section = main_config.get("extensions").unwrap().as_mapping().unwrap(); assert!(ext_section.contains_key(serde_yaml::Value::String("local-ext".to_string()))); assert!(ext_section.contains_key(serde_yaml::Value::String("external-ext".to_string()))); } @@ -5751,12 +6567,12 @@ ext: let main_config_content = r#" sdk: image: test-image - dependencies: + packages: main-package: "*" "#; let external_config_content = r#" sdk: - dependencies: + packages: external-package: "1.0.0" main-package: "2.0.0" # Should not override main config "#; @@ -5765,12 +6581,20 @@ sdk: let external_config: serde_yaml::Value = serde_yaml::from_str(external_config_content).unwrap(); - Config::merge_external_config(&mut main_config, &external_config, "test-ext"); + // Include sdk.packages.* to merge SDK packages + let include_patterns = vec!["sdk.packages.*".to_string()]; + Config::merge_external_config( + &mut main_config, + &external_config, + "test-ext", + &include_patterns, + &[], + ); let sdk_deps = main_config .get("sdk") .unwrap() - .get("dependencies") + .get("packages") .unwrap() .as_mapping() .unwrap(); @@ -5812,7 +6636,15 @@ distro: let external_config: serde_yaml::Value = serde_yaml::from_str(external_config_content).unwrap(); - Config::merge_external_config(&mut main_config, &external_config, "test-ext"); + // Distro is never merged regardless of include patterns + let include_patterns = vec!["distro.*".to_string()]; // Even with this, distro won't be merged + Config::merge_external_config( + &mut main_config, + &external_config, + "test-ext", + &include_patterns, + &[], + ); // Distro should remain unchanged from main config let distro = main_config.get("distro").unwrap(); @@ -5820,92 +6652,315 @@ distro: assert_eq!(distro.get("channel").unwrap().as_str(), Some("stable")); } + // NOTE: test_load_composed_with_interpolation was removed as it tested the deprecated + // config: path syntax for external extension loading. The new format uses source: path + // in the extensions section and this functionality is handled by ext fetch. + #[test] - fn test_load_composed_with_interpolation() { - use tempfile::TempDir; + fn test_extension_source_get_include_patterns() { + // Test Repo variant with include patterns + let source = ExtensionSource::Package { + version: "*".to_string(), + package: None, + repo_name: None, + include: Some(vec![ + "provision_profiles.tegraflash".to_string(), + "sdk.compile.*".to_string(), + ]), + }; + let patterns = source.get_include_patterns(); + assert_eq!(patterns.len(), 2); + assert_eq!(patterns[0], "provision_profiles.tegraflash"); + assert_eq!(patterns[1], "sdk.compile.*"); + + // Test Repo variant without include patterns + let source_no_include = ExtensionSource::Package { + version: "*".to_string(), + package: None, + repo_name: None, + include: None, + }; + assert!(source_no_include.get_include_patterns().is_empty()); + + // Test Git variant with include patterns + let git_source = ExtensionSource::Git { + url: "https://example.com/repo.git".to_string(), + git_ref: Some("main".to_string()), + sparse_checkout: None, + include: Some(vec!["provision_profiles.*".to_string()]), + }; + assert_eq!(git_source.get_include_patterns().len(), 1); - // Create a temp directory for our test configs - let temp_dir = TempDir::new().unwrap(); + // Test Path variant with include patterns + let path_source = ExtensionSource::Path { + path: "./external".to_string(), + include: Some(vec!["sdk.packages.*".to_string()]), + }; + assert_eq!(path_source.get_include_patterns().len(), 1); + } - // Create main config - let main_config_content = r#" -distro: - version: "1.0.0" - channel: apollo-edge -default_target: qemux86-64 -sdk: - image: "docker.io/test:{{ config.distro.channel }}" - dependencies: - main-sdk-dep: "*" -runtime: - prod: - target: qemux86-64 - dependencies: - peridio: - ext: test-ext - config: external/avocado.yml -"#; - let main_config_path = temp_dir.path().join("avocado.yaml"); - std::fs::write(&main_config_path, main_config_content).unwrap(); + #[test] + fn test_matches_include_pattern_exact() { + let patterns = vec![ + "provision_profiles.tegraflash".to_string(), + "sdk.compile.nvidia-l4t".to_string(), + ]; + + // Exact matches should return true + assert!(ExtensionSource::matches_include_pattern( + "provision_profiles.tegraflash", + &patterns + )); + assert!(ExtensionSource::matches_include_pattern( + "sdk.compile.nvidia-l4t", + &patterns + )); + + // Non-matches should return false + assert!(!ExtensionSource::matches_include_pattern( + "provision_profiles.usb", + &patterns + )); + assert!(!ExtensionSource::matches_include_pattern( + "sdk.compile.other", + &patterns + )); + assert!(!ExtensionSource::matches_include_pattern( + "provision", + &patterns + )); + } + + #[test] + fn test_matches_include_pattern_wildcard() { + let patterns = vec![ + "provision_profiles.*".to_string(), + "sdk.compile.*".to_string(), + ]; - // Create external config directory and file - let external_dir = temp_dir.path().join("external"); - std::fs::create_dir_all(&external_dir).unwrap(); + // Wildcard matches should work + assert!(ExtensionSource::matches_include_pattern( + "provision_profiles.tegraflash", + &patterns + )); + assert!(ExtensionSource::matches_include_pattern( + "provision_profiles.usb", + &patterns + )); + assert!(ExtensionSource::matches_include_pattern( + "sdk.compile.nvidia-l4t", + &patterns + )); + assert!(ExtensionSource::matches_include_pattern( + "sdk.compile.custom-lib", + &patterns + )); + + // Non-matches should return false + assert!(!ExtensionSource::matches_include_pattern( + "sdk.packages.package1", + &patterns + )); + assert!(!ExtensionSource::matches_include_pattern( + "runtimes.prod", + &patterns + )); + + // Partial prefix matches without proper dot separator should not match + assert!(!ExtensionSource::matches_include_pattern( + "provisionExtra", + &patterns + )); + } + + #[test] + fn test_matches_include_pattern_empty() { + let empty_patterns: Vec = vec![]; + + // Empty patterns should never match + assert!(!ExtensionSource::matches_include_pattern( + "provision_profiles.tegraflash", + &empty_patterns + )); + assert!(!ExtensionSource::matches_include_pattern( + "anything", + &empty_patterns + )); + } + #[test] + fn test_merge_external_config_with_include_patterns() { + let main_config_content = r#" +extensions: + local-ext: + types: + - sysext +provision_profiles: + existing-profile: + script: provision.sh +"#; let external_config_content = r#" -ext: - test-ext: - version: "{{ config.distro.version }}" +extensions: + remote-ext: types: - sysext + packages: + some-dep: "*" +provision_profiles: + tegraflash: + script: flash.sh + usb: + script: usb-provision.sh sdk: - dependencies: - external-sdk-dep: "*" + packages: + external-dep: "*" + compile: + nvidia-l4t: + compile: build.sh "#; - let external_config_path = external_dir.join("avocado.yml"); - std::fs::write(&external_config_path, external_config_content).unwrap(); - // Load composed config - let composed = Config::load_composed(&main_config_path, Some("qemux86-64")).unwrap(); + let mut main_config: serde_yaml::Value = serde_yaml::from_str(main_config_content).unwrap(); + let external_config: serde_yaml::Value = + serde_yaml::from_str(external_config_content).unwrap(); - // Verify the SDK image was interpolated using main config's distro - assert_eq!( - composed - .config - .sdk - .as_ref() - .unwrap() - .image - .as_ref() - .unwrap(), - "docker.io/test:apollo-edge" + // Only include provision_profiles.tegraflash (not provision_profiles.usb) + let include_patterns = vec!["provision_profiles.tegraflash".to_string()]; + Config::merge_external_config( + &mut main_config, + &external_config, + "remote-ext", + &include_patterns, + &[], ); - // Verify the external extension was merged - let ext_section = composed - .merged_value - .get("ext") + // Check that ext.remote-ext was merged (always happens) + let ext_section = main_config.get("extensions").unwrap().as_mapping().unwrap(); + assert!(ext_section.contains_key(serde_yaml::Value::String("remote-ext".to_string()))); + + // Check that provision_profiles.tegraflash was included + let provision = main_config + .get("provision_profiles") .unwrap() .as_mapping() .unwrap(); - assert!(ext_section.contains_key(serde_yaml::Value::String("test-ext".to_string()))); + assert!(provision.contains_key(serde_yaml::Value::String("tegraflash".to_string()))); + assert!(provision.contains_key(serde_yaml::Value::String("existing-profile".to_string()))); - // Verify the external extension's version was interpolated from main config's distro - let test_ext = ext_section - .get(serde_yaml::Value::String("test-ext".to_string())) - .unwrap(); - assert_eq!(test_ext.get("version").unwrap().as_str(), Some("1.0.0")); + // Check that provision.usb was NOT included (not in patterns) + assert!(!provision.contains_key(serde_yaml::Value::String("usb".to_string()))); + + // Check that sdk.packages was NOT merged (not in patterns) + assert!(main_config.get("sdk").is_none()); + } + + #[test] + fn test_merge_external_config_auto_include_compile() { + let main_config_content = r#" +extensions: + local-ext: + types: + - sysext +"#; + let external_config_content = r#" +extensions: + remote-ext: + types: + - sysext + packages: + nvidia-l4t: + compile: nvidia-l4t +sdk: + compile: + nvidia-l4t: + compile: build-nvidia.sh + other-lib: + compile: build-other.sh +"#; + + let mut main_config: serde_yaml::Value = serde_yaml::from_str(main_config_content).unwrap(); + let external_config: serde_yaml::Value = + serde_yaml::from_str(external_config_content).unwrap(); + + // Use auto_include_compile to include nvidia-l4t + let auto_include = vec!["nvidia-l4t".to_string()]; + Config::merge_external_config( + &mut main_config, + &external_config, + "remote-ext", + &[], // No explicit include patterns + &auto_include, // Auto-include nvidia-l4t compile section + ); - // Verify SDK dependencies were merged - let sdk_deps = composed - .merged_value + // Check that sdk.compile.nvidia-l4t was included + let sdk_compile = main_config .get("sdk") .unwrap() - .get("dependencies") + .get("compile") .unwrap() .as_mapping() .unwrap(); - assert!(sdk_deps.contains_key(serde_yaml::Value::String("main-sdk-dep".to_string()))); - assert!(sdk_deps.contains_key(serde_yaml::Value::String("external-sdk-dep".to_string()))); + assert!(sdk_compile.contains_key(serde_yaml::Value::String("nvidia-l4t".to_string()))); + + // Check that sdk.compile.other-lib was NOT included + assert!(!sdk_compile.contains_key(serde_yaml::Value::String("other-lib".to_string()))); + } + + #[test] + fn test_find_compile_dependencies_in_ext() { + let ext_config_content = r#" +extensions: + my-extension: + packages: + nvidia-l4t: + compile: nvidia-l4t + some-package: + version: "1.0" + custom-lib: + compile: custom-compile-section +"#; + let ext_config: serde_yaml::Value = serde_yaml::from_str(ext_config_content).unwrap(); + + let compile_deps = Config::find_compile_dependencies_in_ext(&ext_config, "my-extension"); + + assert_eq!(compile_deps.len(), 2); + assert!(compile_deps.contains(&"nvidia-l4t".to_string())); + assert!(compile_deps.contains(&"custom-compile-section".to_string())); + } + + #[test] + fn test_extension_source_include_serialization() { + let source = ExtensionSource::Package { + version: "*".to_string(), + package: None, + repo_name: None, + include: Some(vec![ + "provision_profiles.tegraflash".to_string(), + "sdk.compile.*".to_string(), + ]), + }; + + let serialized = serde_yaml::to_string(&source).unwrap(); + assert!(serialized.contains("include:")); + assert!(serialized.contains("provision_profiles.tegraflash")); + assert!(serialized.contains("sdk.compile.*")); + + // Test deserialization + let yaml_content = r#" +type: repo +version: "*" +include: + - provision_profiles.tegraflash + - sdk.compile.* +"#; + let deserialized: ExtensionSource = serde_yaml::from_str(yaml_content).unwrap(); + match deserialized { + ExtensionSource::Package { include, .. } => { + assert!(include.is_some()); + let patterns = include.unwrap(); + assert_eq!(patterns.len(), 2); + assert_eq!(patterns[0], "provision_profiles.tegraflash"); + } + _ => panic!("Expected Repo variant"), + } } } diff --git a/src/utils/container.rs b/src/utils/container.rs index c37f764..e90ff10 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -55,6 +55,65 @@ pub fn is_docker_desktop() -> bool { cfg!(target_os = "macos") || cfg!(target_os = "windows") } +/// Convert SDK arch specification to Docker platform format. +/// +/// # Arguments +/// * `sdk_arch` - Architecture string (e.g., "aarch64", "x86-64", "arm64", "amd64") +/// +/// # Returns +/// Docker platform string (e.g., "linux/arm64", "linux/amd64") +pub fn sdk_arch_to_platform(sdk_arch: &str) -> Result { + match sdk_arch.to_lowercase().as_str() { + "aarch64" | "arm64" => Ok("linux/arm64".to_string()), + "x86-64" | "x86_64" | "amd64" => Ok("linux/amd64".to_string()), + _ => Err(anyhow::anyhow!( + "Unsupported SDK architecture: '{sdk_arch}'. Supported values: aarch64, x86-64" + )), + } +} + +/// Normalize SDK arch specification to standard architecture name. +/// +/// # Arguments +/// * `sdk_arch` - Architecture string (e.g., "aarch64", "x86-64", "arm64", "amd64") +/// +/// # Returns +/// Normalized architecture name (e.g., "aarch64", "x86_64") +pub fn normalize_sdk_arch(sdk_arch: &str) -> Result { + match sdk_arch.to_lowercase().as_str() { + "aarch64" | "arm64" => Ok("aarch64".to_string()), + "x86-64" | "x86_64" | "amd64" => Ok("x86_64".to_string()), + _ => Err(anyhow::anyhow!( + "Unsupported SDK architecture: '{sdk_arch}'. Supported values: aarch64, x86-64" + )), + } +} + +/// Get the host's native platform in Docker format (e.g., "linux/amd64" or "linux/arm64"). +/// This is used to explicitly request the native platform variant from multi-arch images, +/// ensuring Docker keeps both variants cached when switching between native and emulated runs. +pub fn get_host_platform() -> String { + let arch = std::env::consts::ARCH; + match arch { + "x86_64" => "linux/amd64".to_string(), + "aarch64" => "linux/arm64".to_string(), + // Fallback for other architectures + "arm" => "linux/arm/v7".to_string(), + "riscv64" => "linux/riscv64".to_string(), + _ => format!("linux/{arch}"), + } +} + +/// Get the platform to use for container execution. +/// If sdk_arch is specified, use that platform (for cross-arch emulation). +/// Otherwise, use the host's native platform to ensure Docker pulls/uses the correct variant. +pub fn get_container_platform(sdk_arch: Option<&str>) -> Result { + match sdk_arch { + Some(arch) => sdk_arch_to_platform(arch), + None => Ok(get_host_platform()), + } +} + /// Add security options to container command based on host security module. /// - SELinux (Fedora/RHEL): adds --security-opt label=disable /// - AppArmor (Ubuntu/Debian): adds --security-opt apparmor=unconfined @@ -102,6 +161,11 @@ pub struct RunConfig { pub runs_on: Option, /// NFS port for remote execution (auto-selected if None) pub nfs_port: Option, + /// SDK container architecture for cross-arch emulation (e.g., "aarch64", "x86-64") + pub sdk_arch: Option, + /// Extension source paths to mount via bindfs (extension name -> host path) + /// These are mounted at /mnt/ext/ and bindfs'd to $AVOCADO_PREFIX/includes/ + pub ext_path_mounts: Option>, } impl Default for RunConfig { @@ -132,6 +196,8 @@ impl Default for RunConfig { signing_checksum_algorithm: None, runs_on: None, nfs_port: None, + sdk_arch: None, + ext_path_mounts: None, } } } @@ -190,6 +256,44 @@ impl SdkContainer { Ok(Self::new().with_src_dir(src_dir)) } + /// Load extension path mounts from the state file + /// + /// Returns a HashMap of extension name -> host path for extensions that use + /// `source: { type: path }` and were registered via `avocado ext fetch`. + /// + /// These paths should be added to `RunConfig.ext_path_mounts` so they get + /// mounted via bindfs at container runtime. + pub fn load_ext_path_mounts(&self) -> Option> { + use crate::utils::ext_fetch::ExtensionPathState; + + let src_dir = self.src_dir.as_ref().unwrap_or(&self.cwd); + + match ExtensionPathState::load_from_dir(src_dir) { + Ok(Some(state)) if !state.path_mounts.is_empty() => { + if self.verbose { + print_info( + &format!( + "Loaded {} extension path mount(s) from state file", + state.path_mounts.len() + ), + OutputLevel::Normal, + ); + } + Some(state.path_mounts) + } + Ok(_) => None, + Err(e) => { + if self.verbose { + print_info( + &format!("Warning: Failed to load extension path state: {e}"), + OutputLevel::Normal, + ); + } + None + } + } + } + /// Create a shared RunsOnContext for running multiple commands on a remote host /// /// This sets up the NFS server and remote volumes once, which can then be reused @@ -314,9 +418,17 @@ impl SdkContainer { config.container_name.clone() }; - // Create a modified config with the container name + // Auto-populate ext_path_mounts from state file if not already set + let effective_ext_path_mounts = if config.ext_path_mounts.is_some() { + config.ext_path_mounts.clone() + } else { + self.load_ext_path_mounts() + }; + + // Create a modified config with the container name and ext_path_mounts let effective_config = RunConfig { container_name: effective_container_name.clone(), + ext_path_mounts: effective_ext_path_mounts, ..config }; @@ -494,6 +606,13 @@ impl SdkContainer { "label=disable".to_string(), ]; + // Always add platform flag to ensure Docker uses the correct image variant. + // This prevents Docker from caching only one variant when switching between + // native and cross-arch emulated runs. + let platform = get_container_platform(config.sdk_arch.as_deref())?; + extra_args.push("--platform".to_string()); + extra_args.push(platform); + if let Some(ref args) = config.container_args { extra_args.extend(args.clone()); } @@ -536,6 +655,13 @@ impl SdkContainer { ) -> Result> { let mut container_cmd = vec![self.container_tool.clone(), "run".to_string()]; + // Always add platform flag to ensure Docker uses the correct image variant. + // This prevents Docker from caching only one variant when switching between + // native and cross-arch emulated runs. + let platform = get_container_platform(config.sdk_arch.as_deref())?; + container_cmd.push("--platform".to_string()); + container_cmd.push(platform); + // Container options if config.rm { container_cmd.push("--rm".to_string()); @@ -603,6 +729,30 @@ impl SdkContainer { None }; + // Mount extension source paths for bindfs + // Each extension path is mounted at /mnt/ext/ and will be bindfs'd + // to $AVOCADO_PREFIX/includes/ in the entrypoint script + let mut ext_path_names: Vec = Vec::new(); + if let Some(ref ext_mounts) = config.ext_path_mounts { + for (ext_name, host_path) in ext_mounts { + container_cmd.push("-v".to_string()); + container_cmd.push(format!("{}:/mnt/ext/{}:rw", host_path.display(), ext_name)); + ext_path_names.push(ext_name.clone()); + + if self.verbose { + print_info( + &format!( + "Mounting extension '{}' source: {} -> /mnt/ext/{}", + ext_name, + host_path.display(), + ext_name + ), + OutputLevel::Normal, + ); + } + } + } + // Note: Working directory is handled in the entrypoint script based on sysroot parameters // Add environment variables @@ -644,6 +794,16 @@ impl SdkContainer { container_cmd.push(format!("AVOCADO_SIGNING_KEYS_DIR={keys_dir}")); } + // Add extension path mounts env var for entrypoint script + // This is a space-separated list of extension names that have bindfs mounts + if !ext_path_names.is_empty() { + container_cmd.push("-e".to_string()); + container_cmd.push(format!( + "AVOCADO_EXT_PATH_MOUNTS={}", + ext_path_names.join(" ") + )); + } + for (key, value) in env_vars { container_cmd.push("-e".to_string()); container_cmd.push(format!("{key}={value}")); @@ -727,11 +887,24 @@ impl SdkContainer { let bash_cmd = vec!["bash".to_string(), "-c".to_string(), full_command]; + // Auto-populate ext_path_mounts from state file if not already set + let effective_ext_path_mounts = if config.ext_path_mounts.is_some() { + config.ext_path_mounts.clone() + } else { + self.load_ext_path_mounts() + }; + + // Create effective config with ext_path_mounts + let effective_config = RunConfig { + ext_path_mounts: effective_ext_path_mounts, + ..config + }; + // Build container command with volume state let container_cmd = - self.build_container_command(&config, &bash_cmd, &env_vars, &volume_state)?; + self.build_container_command(&effective_config, &bash_cmd, &env_vars, &volume_state)?; - if config.verbose || self.verbose { + if effective_config.verbose || self.verbose { print_info( &format!( "Mounting source directory: {} -> /mnt/src (bindfs -> /opt/src)", @@ -793,6 +966,7 @@ impl SdkContainer { /// * `repo_release` - Optional repository release /// * `container_args` - Optional additional container arguments /// * `runs_on_context` - Optional remote execution context for --runs-on support + /// * `sdk_arch` - Optional SDK architecture for cross-arch emulation /// /// # Returns /// A HashMap of package name to version string (NEVRA format without name prefix) @@ -807,6 +981,7 @@ impl SdkContainer { repo_release: Option, container_args: Option>, runs_on_context: Option<&crate::utils::runs_on::RunsOnContext>, + sdk_arch: Option<&String>, ) -> Result> { if packages.is_empty() { return Ok(std::collections::HashMap::new()); @@ -837,6 +1012,7 @@ impl SdkContainer { repo_url, repo_release, container_args, + sdk_arch: sdk_arch.cloned(), ..Default::default() }; @@ -966,6 +1142,13 @@ impl SdkContainer { "label=disable".to_string(), ]; + // Always add platform flag to ensure Docker uses the correct image variant. + // This prevents Docker from caching only one variant when switching between + // native and cross-arch emulated runs. + let platform = get_container_platform(config.sdk_arch.as_deref())?; + extra_args.push("--platform".to_string()); + extra_args.push(platform); + if let Some(ref args) = config.container_args { extra_args.extend(args.clone()); } @@ -1274,6 +1457,35 @@ else if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted /mnt/src -> /opt/src (no UID/GID mapping)"; fi fi +# Mount extension source paths with bindfs (for path-based remote extensions) +# These are mounted at /mnt/ext/ and need to be bindfs'd to $AVOCADO_PREFIX/includes/ +if [ -n "$AVOCADO_EXT_PATH_MOUNTS" ]; then + # AVOCADO_PREFIX must be set before this - use the target from environment + EXT_PREFIX="/opt/_avocado/${{AVOCADO_TARGET}}/includes" + for ext_name in $AVOCADO_EXT_PATH_MOUNTS; do + mnt_path="/mnt/ext/$ext_name" + target_path="$EXT_PREFIX/$ext_name" + + if [ -d "$mnt_path" ]; then + mkdir -p "$target_path" + if [ -n "$AVOCADO_HOST_UID" ] && [ -n "$AVOCADO_HOST_GID" ]; then + if [ "$AVOCADO_HOST_UID" = "0" ] && [ "$AVOCADO_HOST_GID" = "0" ]; then + mount --bind "$mnt_path" "$target_path" + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted extension '$ext_name': $mnt_path -> $target_path (host is root)"; fi + else + bindfs --map=$AVOCADO_HOST_UID/0:@$AVOCADO_HOST_GID/@0 "$mnt_path" "$target_path" + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted extension '$ext_name': $mnt_path -> $target_path with UID/GID mapping"; fi + fi + else + mount --bind "$mnt_path" "$target_path" + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted extension '$ext_name': $mnt_path -> $target_path (no UID/GID mapping)"; fi + fi + else + echo "[WARNING] Extension mount path not found: $mnt_path" + fi + done +fi + # Get repo url from environment or default to prod if [ -n "$AVOCADO_SDK_REPO_URL" ]; then REPO_URL="$AVOCADO_SDK_REPO_URL" @@ -1466,6 +1678,35 @@ else if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted /mnt/src -> /opt/src (no UID/GID mapping)"; fi fi +# Mount extension source paths with bindfs (for path-based remote extensions) +# These are mounted at /mnt/ext/ and need to be bindfs'd to $AVOCADO_PREFIX/includes/ +if [ -n "$AVOCADO_EXT_PATH_MOUNTS" ]; then + # AVOCADO_PREFIX must be set before this - use the target from environment + EXT_PREFIX="/opt/_avocado/${{AVOCADO_TARGET}}/includes" + for ext_name in $AVOCADO_EXT_PATH_MOUNTS; do + mnt_path="/mnt/ext/$ext_name" + target_path="$EXT_PREFIX/$ext_name" + + if [ -d "$mnt_path" ]; then + mkdir -p "$target_path" + if [ -n "$AVOCADO_HOST_UID" ] && [ -n "$AVOCADO_HOST_GID" ]; then + if [ "$AVOCADO_HOST_UID" = "0" ] && [ "$AVOCADO_HOST_GID" = "0" ]; then + mount --bind "$mnt_path" "$target_path" + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted extension '$ext_name': $mnt_path -> $target_path (host is root)"; fi + else + bindfs --map=$AVOCADO_HOST_UID/0:@$AVOCADO_HOST_GID/@0 "$mnt_path" "$target_path" + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted extension '$ext_name': $mnt_path -> $target_path with UID/GID mapping"; fi + fi + else + mount --bind "$mnt_path" "$target_path" + if [ -n "$AVOCADO_VERBOSE" ]; then echo "[INFO] Mounted extension '$ext_name': $mnt_path -> $target_path (no UID/GID mapping)"; fi + fi + else + echo "[WARNING] Extension mount path not found: $mnt_path" + fi + done +fi + # Get repo url from environment or default to prod if [ -n "$AVOCADO_SDK_REPO_URL" ]; then REPO_URL="$AVOCADO_SDK_REPO_URL" @@ -1840,6 +2081,8 @@ mod tests { signing_checksum_algorithm: None, runs_on: None, nfs_port: None, + sdk_arch: None, + ext_path_mounts: None, }; let result = container.build_container_command(&config, &command, &env_vars, &volume_state); @@ -1977,4 +2220,126 @@ mod tests { vec!["-e", "VAR=value with spaces", "--name", "test"] ); } + + #[test] + fn test_sdk_arch_to_platform_aarch64() { + let result = sdk_arch_to_platform("aarch64").unwrap(); + assert_eq!(result, "linux/arm64"); + } + + #[test] + fn test_sdk_arch_to_platform_arm64() { + let result = sdk_arch_to_platform("arm64").unwrap(); + assert_eq!(result, "linux/arm64"); + } + + #[test] + fn test_sdk_arch_to_platform_x86_64() { + let result = sdk_arch_to_platform("x86-64").unwrap(); + assert_eq!(result, "linux/amd64"); + } + + #[test] + fn test_sdk_arch_to_platform_x86_64_underscore() { + let result = sdk_arch_to_platform("x86_64").unwrap(); + assert_eq!(result, "linux/amd64"); + } + + #[test] + fn test_sdk_arch_to_platform_amd64() { + let result = sdk_arch_to_platform("amd64").unwrap(); + assert_eq!(result, "linux/amd64"); + } + + #[test] + fn test_sdk_arch_to_platform_case_insensitive() { + let result = sdk_arch_to_platform("AARCH64").unwrap(); + assert_eq!(result, "linux/arm64"); + } + + #[test] + fn test_sdk_arch_to_platform_unsupported() { + let result = sdk_arch_to_platform("riscv64"); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Unsupported SDK architecture")); + } + + #[test] + fn test_normalize_sdk_arch_aarch64() { + let result = normalize_sdk_arch("aarch64").unwrap(); + assert_eq!(result, "aarch64"); + } + + #[test] + fn test_normalize_sdk_arch_arm64() { + let result = normalize_sdk_arch("arm64").unwrap(); + assert_eq!(result, "aarch64"); + } + + #[test] + fn test_normalize_sdk_arch_x86_64() { + let result = normalize_sdk_arch("x86-64").unwrap(); + assert_eq!(result, "x86_64"); + } + + #[test] + fn test_normalize_sdk_arch_x86_64_underscore() { + let result = normalize_sdk_arch("x86_64").unwrap(); + assert_eq!(result, "x86_64"); + } + + #[test] + fn test_normalize_sdk_arch_amd64() { + let result = normalize_sdk_arch("amd64").unwrap(); + assert_eq!(result, "x86_64"); + } + + #[test] + fn test_normalize_sdk_arch_case_insensitive() { + let result = normalize_sdk_arch("AARCH64").unwrap(); + assert_eq!(result, "aarch64"); + } + + #[test] + fn test_normalize_sdk_arch_unsupported() { + let result = normalize_sdk_arch("riscv64"); + assert!(result.is_err()); + assert!(result + .unwrap_err() + .to_string() + .contains("Unsupported SDK architecture")); + } + + #[test] + fn test_get_host_platform_returns_valid_format() { + let platform = get_host_platform(); + assert!(platform.starts_with("linux/")); + // Should be one of the common architectures + let valid_archs = ["amd64", "arm64", "arm/v7", "riscv64"]; + let arch_part = platform.strip_prefix("linux/").unwrap(); + assert!( + valid_archs.contains(&arch_part) || !arch_part.is_empty(), + "Unexpected platform: {platform}" + ); + } + + #[test] + fn test_get_container_platform_with_sdk_arch() { + let result = get_container_platform(Some("aarch64")).unwrap(); + assert_eq!(result, "linux/arm64"); + + let result = get_container_platform(Some("x86-64")).unwrap(); + assert_eq!(result, "linux/amd64"); + } + + #[test] + fn test_get_container_platform_without_sdk_arch() { + let result = get_container_platform(None).unwrap(); + // Should return the host platform + assert!(result.starts_with("linux/")); + assert_eq!(result, get_host_platform()); + } } diff --git a/src/utils/ext_fetch.rs b/src/utils/ext_fetch.rs new file mode 100644 index 0000000..fbc85b7 --- /dev/null +++ b/src/utils/ext_fetch.rs @@ -0,0 +1,538 @@ +//! Extension fetching utilities for remote extensions. +//! +//! This module provides functionality to fetch extensions from various sources: +//! - Package repository (avocado extension repo) +//! - Git repositories (with optional sparse checkout) +//! - Local filesystem paths (mounted via bindfs at runtime) + +use anyhow::{Context, Result}; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fs; +use std::path::{Path, PathBuf}; + +use crate::utils::config::ExtensionSource; +use crate::utils::container::{RunConfig, SdkContainer}; +use crate::utils::output::{print_info, OutputLevel}; + +/// State for extension path mounts stored in .avocado/ext-paths.json +#[derive(Debug, Clone, Default, Deserialize, Serialize)] +pub struct ExtensionPathState { + /// Map of extension name to host path for bindfs mounting + pub path_mounts: HashMap, +} + +impl ExtensionPathState { + /// Load extension path state from .avocado/ext-paths.json in the given directory + pub fn load_from_dir(dir_path: &Path) -> Result> { + let state_file = dir_path.join(".avocado").join("ext-paths.json"); + + if !state_file.exists() { + return Ok(None); + } + + let content = fs::read_to_string(&state_file).with_context(|| { + format!( + "Failed to read extension path state file: {}", + state_file.display() + ) + })?; + + let state: Self = serde_json::from_str(&content).with_context(|| { + format!( + "Failed to parse extension path state file: {}", + state_file.display() + ) + })?; + + Ok(Some(state)) + } + + /// Save extension path state to .avocado/ext-paths.json in the given directory + pub fn save_to_dir(&self, dir_path: &Path) -> Result<()> { + let state_dir = dir_path.join(".avocado"); + fs::create_dir_all(&state_dir).with_context(|| { + format!( + "Failed to create .avocado directory: {}", + state_dir.display() + ) + })?; + + let state_file = state_dir.join("ext-paths.json"); + let content = serde_json::to_string_pretty(self) + .with_context(|| "Failed to serialize extension path state".to_string())?; + + fs::write(&state_file, content).with_context(|| { + format!( + "Failed to write extension path state file: {}", + state_file.display() + ) + })?; + + Ok(()) + } + + /// Add a path mount for an extension + pub fn add_path_mount(&mut self, ext_name: String, host_path: PathBuf) { + self.path_mounts.insert(ext_name, host_path); + } + + /// Remove a path mount for an extension + #[allow(dead_code)] + pub fn remove_path_mount(&mut self, ext_name: &str) { + self.path_mounts.remove(ext_name); + } + + /// Get the path mount for an extension + #[allow(dead_code)] + pub fn get_path_mount(&self, ext_name: &str) -> Option<&PathBuf> { + self.path_mounts.get(ext_name) + } +} + +/// Extension fetcher for downloading and installing remote extensions +pub struct ExtensionFetcher { + /// Path to the main configuration file + config_path: String, + /// Target architecture + target: String, + /// Enable verbose output + verbose: bool, + /// Container image for running fetch operations + container_image: String, + /// Repository URL for package fetching + repo_url: Option, + /// Repository release for package fetching + repo_release: Option, + /// Container arguments + container_args: Option>, + /// SDK container architecture for cross-arch emulation + sdk_arch: Option, + /// Source directory for resolving relative extension paths + src_dir: Option, +} + +impl ExtensionFetcher { + /// Create a new ExtensionFetcher + pub fn new( + config_path: String, + target: String, + container_image: String, + verbose: bool, + ) -> Self { + Self { + config_path, + target, + verbose, + container_image, + repo_url: None, + repo_release: None, + container_args: None, + sdk_arch: None, + src_dir: None, + } + } + + /// Set repository URL + pub fn with_repo_url(mut self, repo_url: Option) -> Self { + self.repo_url = repo_url; + self + } + + /// Set repository release + pub fn with_repo_release(mut self, repo_release: Option) -> Self { + self.repo_release = repo_release; + self + } + + /// Set container arguments + pub fn with_container_args(mut self, container_args: Option>) -> Self { + self.container_args = container_args; + self + } + + /// Set SDK container architecture for cross-arch emulation + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Set source directory for resolving relative extension paths + pub fn with_src_dir(mut self, src_dir: Option) -> Self { + self.src_dir = src_dir; + self + } + + /// Fetch an extension based on its source configuration + /// + /// Returns the path where the extension was installed + pub async fn fetch( + &self, + ext_name: &str, + source: &ExtensionSource, + install_dir: &Path, + ) -> Result { + let ext_install_path = install_dir.join(ext_name); + + match source { + ExtensionSource::Package { + version, + package, + repo_name, + .. // include field not needed for fetching + } => { + self.fetch_from_repo( + ext_name, + version, + package.as_deref(), + repo_name.as_deref(), + &ext_install_path, + ) + .await?; + } + ExtensionSource::Git { + url, + git_ref, + sparse_checkout, + .. // include field not needed for fetching + } => { + self.fetch_from_git( + ext_name, + url, + git_ref.as_deref(), + sparse_checkout.as_deref(), + &ext_install_path, + ) + .await?; + } + ExtensionSource::Path { path, .. } => { + self.fetch_from_path(ext_name, path, &ext_install_path) + .await?; + } + } + + Ok(ext_install_path) + } + + /// Fetch an extension from the avocado package repository + async fn fetch_from_repo( + &self, + ext_name: &str, + version: &str, + package: Option<&str>, + repo_name: Option<&str>, + _install_path: &Path, // Host path - not used, we use container path instead + ) -> Result<()> { + // Use explicit package name if provided, otherwise fall back to extension name + let package_name = package.unwrap_or(ext_name); + + if self.verbose { + print_info( + &format!( + "Fetching extension '{ext_name}' (package: '{package_name}') version '{version}' from package repository" + ), + OutputLevel::Normal, + ); + } + + // Build the package spec using the package name (not extension name) + let package_spec = if version == "*" { + package_name.to_string() + } else { + format!("{package_name}-{version}") + }; + + // Build the DNF command to download and extract the package + // We use --downloadonly and then extract the RPM contents + let repo_arg = repo_name.map(|r| format!("--repo={r}")).unwrap_or_default(); + + // Use container path $AVOCADO_PREFIX/includes/ instead of host path + // This ensures the directory is created inside the container with proper permissions + let container_install_path = format!("$AVOCADO_PREFIX/includes/{ext_name}"); + + // The fetch script downloads the package and extracts it to the install path + // Use $DNF_SDK_HOST with $DNF_SDK_COMBINED_REPO_CONF to access target-specific repos + let fetch_script = format!( + r#" +set -e + +# Create temp directory for download +TMPDIR=$(mktemp -d) + +# Download the extension package using SDK DNF with combined repo config +# This includes both SDK repos and target-specific repos (like $AVOCADO_TARGET-ext) +RPM_CONFIGDIR=$AVOCADO_SDK_PREFIX/usr/lib/rpm \ +RPM_ETCCONFIGDIR=$AVOCADO_SDK_PREFIX \ +$DNF_SDK_HOST \ + $DNF_SDK_HOST_OPTS \ + $DNF_SDK_COMBINED_REPO_CONF \ + {repo_arg} \ + --downloadonly \ + --downloaddir="$TMPDIR" \ + -y \ + install \ + {package_spec} + +# Find the downloaded RPM +RPM_FILE=$(ls -1 "$TMPDIR"/*.rpm 2>/dev/null | head -1) +if [ -z "$RPM_FILE" ]; then + echo "ERROR: Failed to download package '{package_spec}' for extension '{ext_name}'" + exit 1 +fi + +# Extract RPM contents to install path (using container path) +# The package root / maps to the extension's src_dir +mkdir -p "{container_install_path}" +cd "{container_install_path}" +rpm2cpio "$RPM_FILE" | cpio -idmv + +echo "Successfully fetched extension '{ext_name}' (package: {package_spec}) to {container_install_path}" + +# Cleanup +rm -rf "$TMPDIR" +"# + ); + + let container_helper = SdkContainer::new().verbose(self.verbose); + let run_config = RunConfig { + container_image: self.container_image.clone(), + target: self.target.clone(), + command: fetch_script, + verbose: self.verbose, + source_environment: true, + interactive: false, + repo_url: self.repo_url.clone(), + repo_release: self.repo_release.clone(), + container_args: self.container_args.clone(), + sdk_arch: self.sdk_arch.clone(), + ..Default::default() + }; + + let success = container_helper.run_in_container(run_config).await?; + if !success { + return Err(anyhow::anyhow!( + "Failed to fetch extension '{ext_name}' from package repository" + )); + } + + Ok(()) + } + + /// Fetch an extension from a git repository + async fn fetch_from_git( + &self, + ext_name: &str, + url: &str, + git_ref: Option<&str>, + sparse_checkout: Option<&[String]>, + _install_path: &Path, // Host path - not used, we use container path instead + ) -> Result<()> { + if self.verbose { + print_info( + &format!("Fetching extension '{ext_name}' from git: {url}"), + OutputLevel::Normal, + ); + } + + // Use container path $AVOCADO_PREFIX/includes/ instead of host path + let container_install_path = format!("$AVOCADO_PREFIX/includes/{ext_name}"); + let ref_arg = git_ref.unwrap_or("HEAD"); + + // Build the git clone command + let git_cmd = if let Some(sparse_paths) = sparse_checkout { + // Use sparse checkout for specific paths + let sparse_paths_str = sparse_paths.join(" "); + format!( + r#" +set -e +rm -rf "{container_install_path}" +mkdir -p "{container_install_path}" +cd "{container_install_path}" +git init +git remote add origin "{url}" +git config core.sparseCheckout true +echo "{sparse_paths_str}" | tr ' ' '\n' > .git/info/sparse-checkout +git fetch --depth 1 origin {ref_arg} +git checkout FETCH_HEAD +# Move sparse checkout contents to root if needed +if [ -d "{sparse_paths_str}" ]; then + mv {sparse_paths_str}/* . 2>/dev/null || true + rm -rf {sparse_paths_str} +fi +echo "Successfully fetched extension '{ext_name}' from git" +"# + ) + } else { + // Full clone + format!( + r#" +set -e +rm -rf "{container_install_path}" +git clone --depth 1 --branch {ref_arg} "{url}" "{container_install_path}" || \ +git clone --depth 1 "{url}" "{container_install_path}" +cd "{container_install_path}" +if [ "{ref_arg}" != "HEAD" ]; then + git checkout {ref_arg} 2>/dev/null || true +fi +echo "Successfully fetched extension '{ext_name}' from git" +"# + ) + }; + + let container_helper = SdkContainer::new().verbose(self.verbose); + let run_config = RunConfig { + container_image: self.container_image.clone(), + target: self.target.clone(), + command: git_cmd, + verbose: self.verbose, + source_environment: true, + interactive: false, + repo_url: self.repo_url.clone(), + repo_release: self.repo_release.clone(), + container_args: self.container_args.clone(), + sdk_arch: self.sdk_arch.clone(), + ..Default::default() + }; + + let success = container_helper.run_in_container(run_config).await?; + if !success { + return Err(anyhow::anyhow!( + "Failed to fetch extension '{ext_name}' from git repository" + )); + } + + Ok(()) + } + + /// Fetch an extension from a local filesystem path + /// + /// Instead of copying files, this validates the path exists and stores the + /// mapping for bindfs mounting at container runtime. The extension source + /// will be mounted at `/mnt/ext/` and bindfs'd to + /// `$AVOCADO_PREFIX/includes/`. + async fn fetch_from_path( + &self, + ext_name: &str, + source_path: &str, + _install_path: &Path, // Host path - not used, we use bindfs mounting instead + ) -> Result<()> { + if self.verbose { + print_info( + &format!("Registering extension '{ext_name}' from path: {source_path}"), + OutputLevel::Normal, + ); + } + + // Resolve the source path relative to src_dir (or config dir if src_dir not set) + let resolved_source = if Path::new(source_path).is_absolute() { + PathBuf::from(source_path) + } else { + // Use src_dir if available, otherwise fall back to config directory + if let Some(ref src_dir) = self.src_dir { + src_dir.join(source_path) + } else { + let config_dir = Path::new(&self.config_path) + .parent() + .unwrap_or(Path::new(".")); + config_dir.join(source_path) + } + }; + + // Canonicalize the path to get the absolute path + let resolved_source = resolved_source.canonicalize().unwrap_or(resolved_source); + + if !resolved_source.exists() { + return Err(anyhow::anyhow!( + "Extension source path does not exist: {}\n\ + Path was resolved relative to: {}", + resolved_source.display(), + self.src_dir + .as_ref() + .map(|p| p.display().to_string()) + .unwrap_or_else(|| "config directory".to_string()) + )); + } + + // Check that the path contains an avocado.yaml or avocado.yml file + let has_config = resolved_source.join("avocado.yaml").exists() + || resolved_source.join("avocado.yml").exists(); + if !has_config { + return Err(anyhow::anyhow!( + "Extension source path does not contain an avocado.yaml or avocado.yml file: {}", + resolved_source.display() + )); + } + + // Get the state directory (src_dir or config dir) + let state_dir = self.src_dir.clone().unwrap_or_else(|| { + Path::new(&self.config_path) + .parent() + .unwrap_or(Path::new(".")) + .to_path_buf() + }); + + // Load or create extension path state + let mut state = ExtensionPathState::load_from_dir(&state_dir)?.unwrap_or_default(); + + // Add the path mount for this extension + state.add_path_mount(ext_name.to_string(), resolved_source.clone()); + + // Save the state + state.save_to_dir(&state_dir)?; + + if self.verbose { + print_info( + &format!( + "Registered extension '{ext_name}' for bindfs mounting from: {}", + resolved_source.display() + ), + OutputLevel::Normal, + ); + } + + print_info( + &format!( + "Extension '{ext_name}' will be mounted via bindfs at runtime from: {}", + resolved_source.display() + ), + OutputLevel::Normal, + ); + + Ok(()) + } + + /// Check if an extension is already fetched/installed + pub fn is_extension_installed(install_dir: &Path, ext_name: &str) -> bool { + let ext_path = install_dir.join(ext_name); + // Check if the directory exists and has an avocado config file + ext_path.exists() + && (ext_path.join("avocado.yaml").exists() || ext_path.join("avocado.yml").exists()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_extension_fetcher_creation() { + let fetcher = ExtensionFetcher::new( + "avocado.yaml".to_string(), + "x86_64-unknown-linux-gnu".to_string(), + "docker.io/avocadolinux/sdk:latest".to_string(), + false, + ); + + assert!(!fetcher.verbose); + assert_eq!(fetcher.target, "x86_64-unknown-linux-gnu"); + } + + #[test] + fn test_is_extension_installed() { + // This would need a temp directory to test properly + // For now just verify the function exists + let result = + ExtensionFetcher::is_extension_installed(Path::new("/nonexistent"), "test-ext"); + assert!(!result); + } +} diff --git a/src/utils/interpolation/avocado.rs b/src/utils/interpolation/avocado.rs index 335e8b0..20587f9 100644 --- a/src/utils/interpolation/avocado.rs +++ b/src/utils/interpolation/avocado.rs @@ -4,42 +4,178 @@ //! //! **Available values:** //! - `{{ avocado.target }}` - Resolved target architecture +//! - `{{ avocado.distro.version }}` - Distro version from main config +//! - `{{ avocado.distro.channel }}` - Distro channel from main config //! //! **Behavior:** //! - Returns None if value is not available (leaves template as-is) //! - Never produces errors - CLI will handle validation later //! - Follows the same precedence as CLI: CLI args > env vars > config +//! - distro values come from the main config context, not the current config use anyhow::Result; use serde_yaml::Value; use std::env; -/// Resolve an avocado computed value. +/// Context for avocado interpolation values. +/// +/// This struct holds values that are set by the main config and should be +/// available to all subsequent configs during interpolation. This ensures +/// that `avocado.*` values always reference the main config's values, +/// while `config.*` values reference the current config being interpolated. +#[derive(Debug, Clone, Default)] +pub struct AvocadoContext { + /// Target architecture (CLI > env > config precedence) + pub target: Option, + /// Distro version from the main config + pub distro_version: Option, + /// Distro channel from the main config + pub distro_channel: Option, +} + +impl AvocadoContext { + /// Create a new empty context. + #[allow(dead_code)] + pub fn new() -> Self { + Self::default() + } + + /// Create a context with just the target value. + /// + /// This is useful for simple interpolation cases where only target is needed. + #[allow(dead_code)] + pub fn with_target(target: Option<&str>) -> Self { + Self { + target: target.map(|s| s.to_string()), + distro_version: None, + distro_channel: None, + } + } + + /// Create a context from a main config YAML value. + /// + /// Extracts target (with CLI override and env precedence) and distro values + /// from the config to be used for interpolation in all subsequent configs. + /// + /// # Arguments + /// * `root` - The main config YAML value + /// * `cli_target` - Optional CLI target override (highest priority) + pub fn from_main_config(root: &Value, cli_target: Option<&str>) -> Self { + // Resolve target with precedence: CLI > env > config + let target = Self::resolve_target_value(root, cli_target); + + // Extract distro values from the main config + let (distro_version, distro_channel) = Self::extract_distro_values(root); + + Self { + target, + distro_version, + distro_channel, + } + } + + /// Resolve the target value with standard precedence. + fn resolve_target_value(root: &Value, cli_target: Option<&str>) -> Option { + // 1. CLI target (highest priority) + if let Some(target) = cli_target { + return Some(target.to_string()); + } + + // 2. Environment variable + if let Ok(target) = env::var("AVOCADO_TARGET") { + return Some(target); + } + + // 3. Config default_target + if let Some(default_target) = root.get("default_target") { + if let Some(target_str) = default_target.as_str() { + return Some(target_str.to_string()); + } + } + + None + } + + /// Extract distro version and channel from the config. + fn extract_distro_values(root: &Value) -> (Option, Option) { + let distro = match root.get("distro") { + Some(d) => d, + None => return (None, None), + }; + + let version = distro + .get("version") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + + let channel = distro + .get("channel") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + + (version, channel) + } + + /// Create a context with all values explicitly provided. + /// + /// This is useful when constructing from a deserialized Config struct. + /// + /// # Arguments + /// * `target` - The resolved target (CLI > env > config precedence should be applied by caller) + /// * `distro_version` - The distro version from the main config + /// * `distro_channel` - The distro channel from the main config + #[allow(dead_code)] + pub fn with_values( + target: Option, + distro_version: Option, + distro_channel: Option, + ) -> Self { + Self { + target, + distro_version, + distro_channel, + } + } +} + +/// Resolve an avocado computed value using path segments. /// /// # Arguments -/// * `key` - The avocado key (e.g., "target") -/// * `root` - The root YAML value for fallback lookups -/// * `cli_target` - Optional CLI target value (highest priority) +/// * `path` - The avocado path segments (e.g., ["target"] or ["distro", "version"]) +/// * `root` - The root YAML value for fallback lookups (used for target resolution) +/// * `context` - Optional avocado context with pre-resolved values from main config /// /// # Returns /// Result with Option - Some(value) if available, None to leave template as-is /// /// # Examples /// ``` -/// # use avocado_cli::utils::interpolation::avocado::resolve; +/// # use avocado_cli::utils::interpolation::avocado::{resolve, AvocadoContext}; /// let yaml = serde_yaml::from_str("default_target: x86_64-unknown-linux-gnu").unwrap(); /// -/// // With CLI target (highest priority) -/// let result = resolve("target", &yaml, Some("cli-target")).unwrap(); +/// // With context containing target +/// let ctx = AvocadoContext::with_target(Some("cli-target")); +/// let result = resolve(&["target"], &yaml, Some(&ctx)).unwrap(); /// assert_eq!(result, Some("cli-target".to_string())); /// -/// // From config -/// let result = resolve("target", &yaml, None).unwrap(); -/// assert_eq!(result, Some("x86_64-unknown-linux-gnu".to_string())); +/// // With distro context +/// let ctx = AvocadoContext { +/// target: None, +/// distro_version: Some("1.0.0".to_string()), +/// distro_channel: Some("stable".to_string()), +/// }; +/// let result = resolve(&["distro", "version"], &yaml, Some(&ctx)).unwrap(); +/// assert_eq!(result, Some("1.0.0".to_string())); /// ``` -pub fn resolve(key: &str, root: &Value, cli_target: Option<&str>) -> Result> { - match key { - "target" => resolve_target(root, cli_target), +pub fn resolve( + path: &[&str], + root: &Value, + context: Option<&AvocadoContext>, +) -> Result> { + match path { + ["target"] => resolve_target(root, context), + ["distro", "version"] => resolve_distro_version(context), + ["distro", "channel"] => resolve_distro_channel(context), _ => { // Other avocado keys are not yet supported, but don't error // Just leave the template as-is for future extension @@ -50,21 +186,16 @@ pub fn resolve(key: &str, root: &Value, cli_target: Option<&str>) -> Result - Some(target) if available, None if not available -fn resolve_target(root: &Value, cli_target: Option<&str>) -> Result> { - // 1. CLI target (highest priority) - if let Some(target) = cli_target { - return Ok(Some(target.to_string())); +/// 3. Config default_target (from root - the current config) +fn resolve_target(root: &Value, context: Option<&AvocadoContext>) -> Result> { + // 1. Context target (highest priority - from CLI or pre-resolved) + if let Some(ctx) = context { + if let Some(ref target) = ctx.target { + return Ok(Some(target.clone())); + } } // 2. Environment variable @@ -72,7 +203,7 @@ fn resolve_target(root: &Value, cli_target: Option<&str>) -> Result) -> Result) -> Result> { + if let Some(ctx) = context { + if let Some(ref version) = ctx.distro_version { + return Ok(Some(version.clone())); + } + } + // Not available - leave template as-is + Ok(None) +} + +/// Resolve the distro channel from the avocado context. +/// +/// This value comes from the main config and is passed through the context, +/// ensuring all configs use the same distro channel. +fn resolve_distro_channel(context: Option<&AvocadoContext>) -> Result> { + if let Some(ctx) = context { + if let Some(ref channel) = ctx.distro_channel { + return Ok(Some(channel.clone())); + } + } + // Not available - leave template as-is + Ok(None) +} + #[cfg(test)] mod tests { use super::*; @@ -95,9 +254,10 @@ mod tests { #[test] #[serial] - fn test_resolve_target_from_cli() { + fn test_resolve_target_from_context() { let config = parse_yaml("default_target: config-target"); - let result = resolve("target", &config, Some("cli-target")).unwrap(); + let ctx = AvocadoContext::with_target(Some("cli-target")); + let result = resolve(&["target"], &config, Some(&ctx)).unwrap(); assert_eq!(result, Some("cli-target".to_string())); } @@ -106,7 +266,7 @@ mod tests { fn test_resolve_target_from_env() { env::set_var("AVOCADO_TARGET", "env-target"); let config = parse_yaml("default_target: config-target"); - let result = resolve("target", &config, None).unwrap(); + let result = resolve(&["target"], &config, None).unwrap(); assert_eq!(result, Some("env-target".to_string())); env::remove_var("AVOCADO_TARGET"); } @@ -116,7 +276,7 @@ mod tests { fn test_resolve_target_from_config() { env::remove_var("AVOCADO_TARGET"); let config = parse_yaml("default_target: config-target"); - let result = resolve("target", &config, None).unwrap(); + let result = resolve(&["target"], &config, None).unwrap(); assert_eq!(result, Some("config-target".to_string())); } @@ -125,16 +285,92 @@ mod tests { fn test_resolve_target_unavailable() { env::remove_var("AVOCADO_TARGET"); let config = parse_yaml("{}"); - let result = resolve("target", &config, None).unwrap(); + let result = resolve(&["target"], &config, None).unwrap(); // Should return None (leave template as-is) assert_eq!(result, None); } #[test] - fn test_resolve_unknown_key() { + fn test_resolve_unknown_path() { let config = parse_yaml("{}"); - let result = resolve("unknown", &config, None).unwrap(); + let result = resolve(&["unknown"], &config, None).unwrap(); // Should return None (not supported yet, but no error) assert_eq!(result, None); } + + #[test] + fn test_resolve_distro_version_from_context() { + let config = parse_yaml("{}"); + let ctx = AvocadoContext { + target: None, + distro_version: Some("1.2.3".to_string()), + distro_channel: None, + }; + let result = resolve(&["distro", "version"], &config, Some(&ctx)).unwrap(); + assert_eq!(result, Some("1.2.3".to_string())); + } + + #[test] + fn test_resolve_distro_channel_from_context() { + let config = parse_yaml("{}"); + let ctx = AvocadoContext { + target: None, + distro_version: None, + distro_channel: Some("apollo-edge".to_string()), + }; + let result = resolve(&["distro", "channel"], &config, Some(&ctx)).unwrap(); + assert_eq!(result, Some("apollo-edge".to_string())); + } + + #[test] + fn test_resolve_distro_without_context() { + let config = parse_yaml("{}"); + // Without context, distro values should return None + let result = resolve(&["distro", "version"], &config, None).unwrap(); + assert_eq!(result, None); + + let result = resolve(&["distro", "channel"], &config, None).unwrap(); + assert_eq!(result, None); + } + + #[test] + fn test_avocado_context_from_main_config() { + let config = parse_yaml( + r#" +default_target: x86_64-unknown-linux-gnu +distro: + version: 0.1.0 + channel: apollo-edge +"#, + ); + let ctx = AvocadoContext::from_main_config(&config, None); + assert_eq!(ctx.target, Some("x86_64-unknown-linux-gnu".to_string())); + assert_eq!(ctx.distro_version, Some("0.1.0".to_string())); + assert_eq!(ctx.distro_channel, Some("apollo-edge".to_string())); + } + + #[test] + fn test_avocado_context_cli_overrides_config() { + let config = parse_yaml( + r#" +default_target: config-target +distro: + version: 0.1.0 + channel: apollo-edge +"#, + ); + let ctx = AvocadoContext::from_main_config(&config, Some("cli-target")); + assert_eq!(ctx.target, Some("cli-target".to_string())); + assert_eq!(ctx.distro_version, Some("0.1.0".to_string())); + assert_eq!(ctx.distro_channel, Some("apollo-edge".to_string())); + } + + #[test] + fn test_avocado_context_missing_distro() { + let config = parse_yaml("default_target: x86_64"); + let ctx = AvocadoContext::from_main_config(&config, None); + assert_eq!(ctx.target, Some("x86_64".to_string())); + assert_eq!(ctx.distro_version, None); + assert_eq!(ctx.distro_channel, None); + } } diff --git a/src/utils/interpolation/mod.rs b/src/utils/interpolation/mod.rs index 81caac3..1c3a16d 100644 --- a/src/utils/interpolation/mod.rs +++ b/src/utils/interpolation/mod.rs @@ -27,14 +27,21 @@ //! - Navigates the YAML tree using dot notation //! - Returns an error if path doesn't exist (fatal) //! - Converts non-string values to strings +//! - References are scoped to the current config being interpolated //! //! ## [`avocado`] - Computed Internal Values //! ```yaml //! target_pkg: "pkg-{{ avocado.target }}" +//! distro_image: "sdk:{{ avocado.distro.channel }}" +//! version_ref: "{{ avocado.distro.version }}" //! ``` -//! - Provides access to computed values like target architecture +//! - Provides access to computed values from the main config +//! - `avocado.target` - Target architecture (CLI > env > config precedence) +//! - `avocado.distro.version` - Distro version from the main config +//! - `avocado.distro.channel` - Distro channel from the main config //! - Leaves template as-is if value unavailable //! - Never produces errors (CLI handles validation) +//! - Ensures all configs use the same distro values from the main config //! //! # Features //! @@ -52,8 +59,34 @@ pub mod avocado; pub mod config; pub mod env; +// Re-export AvocadoContext for convenience +pub use avocado::AvocadoContext; + const MAX_ITERATIONS: usize = 100; +/// Interpolate a simple string with the target value. +/// +/// This is a lightweight interpolation for extension names and other strings +/// that only need `{{ avocado.target }}` interpolation without the full config context. +/// +/// # Arguments +/// * `input` - The string to interpolate +/// * `target` - The target architecture value +/// +/// # Returns +/// The interpolated string with `{{ avocado.target }}` replaced +/// +/// # Examples +/// ``` +/// # use avocado_cli::utils::interpolation::interpolate_name; +/// let result = interpolate_name("my-ext-{{ avocado.target }}", "raspberrypi4"); +/// assert_eq!(result, "my-ext-raspberrypi4"); +/// ``` +pub fn interpolate_name(input: &str, target: &str) -> String { + let re = Regex::new(r"\{\{\s*avocado\.target\s*\}\}").unwrap(); + re.replace_all(input, target).to_string() +} + /// Interpolate configuration values in a YAML structure. /// /// This function recursively walks the YAML structure and replaces template strings @@ -79,6 +112,47 @@ const MAX_ITERATIONS: usize = 100; /// assert_eq!(config.get("derived").unwrap().as_str().unwrap(), "value"); /// ``` pub fn interpolate_config(yaml_value: &mut Value, cli_target: Option<&str>) -> Result<()> { + // Create a context with just the target for backward compatibility + let context = AvocadoContext::from_main_config(yaml_value, cli_target); + interpolate_config_with_context(yaml_value, &context) +} + +/// Interpolate configuration values using a pre-built avocado context. +/// +/// This is the preferred method when interpolating multiple configs that should +/// share the same avocado context (e.g., main config + extension configs). +/// +/// # Arguments +/// * `yaml_value` - The YAML value to interpolate (modified in place) +/// * `context` - The avocado context with pre-resolved values from the main config +/// +/// # Returns +/// Result indicating success or error if config references cannot be resolved +/// +/// # Examples +/// ``` +/// # use avocado_cli::utils::interpolation::{interpolate_config_with_context, AvocadoContext}; +/// let main_config = serde_yaml::from_str(r#" +/// distro: +/// version: "1.0.0" +/// channel: "stable" +/// "#).unwrap(); +/// +/// // Create context from main config +/// let context = AvocadoContext::from_main_config(&main_config, Some("x86_64")); +/// +/// // Use context to interpolate an extension config +/// let mut ext_config = serde_yaml::from_str(r#" +/// image: "sdk:{{ avocado.distro.channel }}" +/// "#).unwrap(); +/// +/// interpolate_config_with_context(&mut ext_config, &context).unwrap(); +/// assert_eq!(ext_config.get("image").unwrap().as_str().unwrap(), "sdk:stable"); +/// ``` +pub fn interpolate_config_with_context( + yaml_value: &mut Value, + context: &AvocadoContext, +) -> Result<()> { let mut iteration = 0; let mut changed = true; let mut previous_states: Vec = Vec::new(); @@ -105,7 +179,7 @@ pub fn interpolate_config(yaml_value: &mut Value, cli_target: Option<&str>) -> R let mut resolving_stack = HashSet::new(); // Start with empty path at root level let path: Vec = Vec::new(); - changed = interpolate_value(yaml_value, &root, cli_target, &mut resolving_stack, &path)?; + changed = interpolate_value(yaml_value, &root, context, &mut resolving_stack, &path)?; iteration += 1; } @@ -148,7 +222,7 @@ fn format_yaml_path(path: &[String], location: &YamlLocation) -> String { /// # Arguments /// * `value` - The current value to interpolate /// * `root` - The root YAML value for config references -/// * `cli_target` - Optional CLI target value +/// * `context` - The avocado context /// * `resolving_stack` - Set of templates currently being resolved (for cycle detection) /// * `path` - The current YAML path for error messages /// @@ -157,7 +231,7 @@ fn format_yaml_path(path: &[String], location: &YamlLocation) -> String { fn interpolate_value( value: &mut Value, root: &Value, - cli_target: Option<&str>, + context: &AvocadoContext, resolving_stack: &mut HashSet, path: &[String], ) -> Result { @@ -167,7 +241,7 @@ fn interpolate_value( Value::String(s) => { let location = YamlLocation::Value; if let Some(new_value) = - interpolate_string(s, root, cli_target, resolving_stack, path, &location)? + interpolate_string(s, root, context, resolving_stack, path, &location)? { *s = new_value; changed = true; @@ -184,7 +258,7 @@ fn interpolate_value( if let Some(new_key) = interpolate_string( key_str, root, - cli_target, + context, resolving_stack, path, &location, @@ -209,7 +283,7 @@ fn interpolate_value( }; let mut child_path = path.to_vec(); child_path.push(key_str); - if interpolate_value(v, root, cli_target, resolving_stack, &child_path)? { + if interpolate_value(v, root, context, resolving_stack, &child_path)? { changed = true; } } @@ -218,7 +292,7 @@ fn interpolate_value( for (idx, item) in seq.iter_mut().enumerate() { let mut child_path = path.to_vec(); child_path.push(format!("[{idx}]")); - if interpolate_value(item, root, cli_target, resolving_stack, &child_path)? { + if interpolate_value(item, root, context, resolving_stack, &child_path)? { changed = true; } } @@ -236,7 +310,7 @@ fn interpolate_value( /// # Arguments /// * `input` - The input string that may contain templates /// * `root` - The root YAML value for config references -/// * `cli_target` - Optional CLI target value +/// * `context` - The avocado context /// * `resolving_stack` - Set of templates currently being resolved (for cycle detection) /// * `path` - The current YAML path for error messages /// * `location` - Whether this is a key or value @@ -246,7 +320,7 @@ fn interpolate_value( fn interpolate_string( input: &str, root: &Value, - cli_target: Option<&str>, + context: &AvocadoContext, resolving_stack: &mut HashSet, path: &[String], location: &YamlLocation, @@ -266,7 +340,7 @@ fn interpolate_string( let full_match = capture.get(0).unwrap().as_str(); let template = capture.get(1).unwrap().as_str().trim(); - match resolve_template(template, root, cli_target, resolving_stack) { + match resolve_template(template, root, context, resolving_stack) { Ok(Some(replacement)) => { result = result.replace(full_match, &replacement); any_replaced = true; @@ -294,7 +368,7 @@ fn interpolate_string( /// # Arguments /// * `template` - The template expression (e.g., "env.VAR" or "config.key") /// * `root` - The root YAML value for config references -/// * `cli_target` - Optional CLI target value +/// * `context` - The avocado context /// * `resolving_stack` - Set of templates currently being resolved (for cycle detection) /// /// # Returns @@ -302,7 +376,7 @@ fn interpolate_string( fn resolve_template( template: &str, root: &Value, - cli_target: Option<&str>, + context: &AvocadoContext, resolving_stack: &mut HashSet, ) -> Result> { // Check for circular reference @@ -328,9 +402,9 @@ fn resolve_template( anyhow::bail!("Invalid template syntax: empty template"); } - let context = parts[0]; + let context_name = parts[0]; - let result = match context { + let result = match context_name { "env" => { if parts.len() < 2 { anyhow::bail!("Invalid env template: {template}"); @@ -349,12 +423,13 @@ fn resolve_template( if parts.len() < 2 { anyhow::bail!("Invalid avocado template: {template}"); } - let key = parts[1]; - avocado::resolve(key, root, cli_target) + // Pass the full path (excluding "avocado" prefix) + let path = &parts[1..]; + avocado::resolve(path, root, Some(context)) } _ => { anyhow::bail!( - "Unknown template context: {context}. Expected 'env', 'config', or 'avocado'" + "Unknown template context: {context_name}. Expected 'env', 'config', or 'avocado'" ); } }; @@ -700,9 +775,9 @@ key: "{{ env.TRIMMED }}" let mut config = parse_yaml( r#" default_target: "x86_64" -runtime: +runtimes: dev: - dependencies: + packages: pkg1: "{{ env.PKG_VERSION }}" pkg2: "{{ config.default_target }}" array: @@ -713,9 +788,9 @@ runtime: interpolate_config(&mut config, None).unwrap(); - let runtime = config.get("runtime").unwrap(); + let runtime = config.get("runtimes").unwrap(); let dev = runtime.get("dev").unwrap(); - let deps = dev.get("dependencies").unwrap(); + let deps = dev.get("packages").unwrap(); assert_eq!(deps.get("pkg1").unwrap().as_str().unwrap(), "1.2.3"); assert_eq!(deps.get("pkg2").unwrap().as_str().unwrap(), "x86_64"); @@ -786,7 +861,7 @@ distro: version: 0.1.0 sdk: image: "docker.io/avocadolinux/sdk:{{ config.distro.channel }}" - dependencies: + packages: avocado-sdk-toolchain: "{{ config.distro.version }}" "#, ); @@ -799,7 +874,7 @@ sdk: "docker.io/avocadolinux/sdk:apollo-edge" ); - let deps = sdk.get("dependencies").unwrap(); + let deps = sdk.get("packages").unwrap(); assert_eq!( deps.get("avocado-sdk-toolchain").unwrap().as_str().unwrap(), "0.1.0" @@ -814,7 +889,7 @@ sdk: r#" default_target: qemux86-64 sdk: - dependencies: + packages: packagegroup-rust-cross-canadian-{{ avocado.target }}: "*" regular-package: "1.0.0" "#, @@ -823,7 +898,7 @@ sdk: interpolate_config(&mut config, Some("qemux86-64")).unwrap(); let sdk = config.get("sdk").unwrap(); - let deps = sdk.get("dependencies").unwrap(); + let deps = sdk.get("packages").unwrap(); // The key should be interpolated with the target assert!(deps @@ -856,14 +931,14 @@ sdk: let mut config = parse_yaml( r#" -dependencies: +packages: package-{{ env.MY_SUFFIX }}: "1.0.0" "#, ); interpolate_config(&mut config, None).unwrap(); - let deps = config.get("dependencies").unwrap(); + let deps = config.get("packages").unwrap(); assert!(deps.get("package-custom").is_some()); assert_eq!( deps.get("package-custom").unwrap().as_str().unwrap(), @@ -892,4 +967,123 @@ mapping: "value" ); } + + #[test] + fn test_avocado_distro_version_interpolation() { + // Create main config with distro values + let main_config = parse_yaml( + r#" +distro: + version: "1.0.0" + channel: "apollo-edge" +"#, + ); + + // Create context from main config + let context = AvocadoContext::from_main_config(&main_config, Some("x86_64")); + + // Test interpolating an extension config + let mut ext_config = parse_yaml( + r#" +packages: + avocado-runtime: "{{ avocado.distro.version }}" + avocado-sdk: "{{ avocado.distro.channel }}" +"#, + ); + + interpolate_config_with_context(&mut ext_config, &context).unwrap(); + + let packages = ext_config.get("packages").unwrap(); + assert_eq!( + packages.get("avocado-runtime").unwrap().as_str().unwrap(), + "1.0.0" + ); + assert_eq!( + packages.get("avocado-sdk").unwrap().as_str().unwrap(), + "apollo-edge" + ); + } + + #[test] + fn test_avocado_distro_in_same_config() { + // When interpolating main config itself, avocado.distro should work + let mut config = parse_yaml( + r#" +distro: + version: "1.0.0" + channel: "apollo-edge" +sdk: + image: "docker.io/sdk:{{ avocado.distro.channel }}" + packages: + runtime: "{{ avocado.distro.version }}" +"#, + ); + + interpolate_config(&mut config, None).unwrap(); + + let sdk = config.get("sdk").unwrap(); + assert_eq!( + sdk.get("image").unwrap().as_str().unwrap(), + "docker.io/sdk:apollo-edge" + ); + + let packages = sdk.get("packages").unwrap(); + assert_eq!(packages.get("runtime").unwrap().as_str().unwrap(), "1.0.0"); + } + + #[test] + fn test_avocado_distro_unavailable() { + // When distro values are not set, template should be left as-is + let mut config = parse_yaml( + r#" +reference: "{{ avocado.distro.version }}" +"#, + ); + + // No distro in config, so it should remain unresolved + interpolate_config(&mut config, None).unwrap(); + + assert_eq!( + config.get("reference").unwrap().as_str().unwrap(), + "{{ avocado.distro.version }}" + ); + } + + #[test] + fn test_extension_uses_main_config_distro() { + // Main config has distro values + let main_config = parse_yaml( + r#" +distro: + version: "2.0.0" + channel: "stable" +"#, + ); + + let context = AvocadoContext::from_main_config(&main_config, Some("aarch64")); + + // Extension config has its OWN distro values, but avocado.distro should use main config + let mut ext_config = parse_yaml( + r#" +distro: + version: "1.0.0" + channel: "ext-channel" +avocado_version: "{{ avocado.distro.version }}" +config_version: "{{ config.distro.version }}" +"#, + ); + + interpolate_config_with_context(&mut ext_config, &context).unwrap(); + + // avocado.distro should use main config values + assert_eq!( + ext_config.get("avocado_version").unwrap().as_str().unwrap(), + "2.0.0" + ); + // config.distro should use extension's own values + assert_eq!( + ext_config.get("config_version").unwrap().as_str().unwrap(), + "1.0.0" + ); + } } diff --git a/src/utils/lockfile.rs b/src/utils/lockfile.rs index 9d9f40f..1cf11f3 100644 --- a/src/utils/lockfile.rs +++ b/src/utils/lockfile.rs @@ -3,6 +3,9 @@ //! This module provides functionality to track and pin package versions //! across different sysroots to ensure reproducible builds. +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] + use anyhow::{Context, Result}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; @@ -34,8 +37,12 @@ pub enum SysrootType { /// Local/external extension sysroot ($AVOCADO_EXT_SYSROOTS/{name}) /// Uses ext-rpm-config-scripts for RPM database Extension(String), - /// Versioned extension sysroot ($AVOCADO_EXT_SYSROOTS/{name}) - /// Uses ext-rpm-config for RPM database (different location than local extensions) + /// DEPRECATED: Versioned extension sysroot + /// The vsn: syntax is no longer supported. Remote extensions are now defined + /// in the ext section with source: field and are treated as local extensions + /// after being fetched to $AVOCADO_PREFIX/includes//. + #[deprecated(since = "0.23.0", note = "Use Extension variant for all extensions")] + #[allow(dead_code)] VersionedExtension(String), /// Runtime sysroot ($AVOCADO_PREFIX/runtimes/{name}) Runtime(String), diff --git a/src/utils/mod.rs b/src/utils/mod.rs index c37ea6d..cc34db5 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -1,5 +1,6 @@ pub mod config; pub mod container; +pub mod ext_fetch; pub mod image_signing; pub mod interpolation; pub mod lockfile; diff --git a/src/utils/stamps.rs b/src/utils/stamps.rs index 2a7b29a..e618402 100644 --- a/src/utils/stamps.rs +++ b/src/utils/stamps.rs @@ -7,6 +7,9 @@ //! 2. Detects staleness via content-addressable hashing (config + package list) //! 3. Enforces command ordering with dependency resolution from config +// Allow deprecated variants for backward compatibility during migration +#![allow(deprecated)] + use anyhow::{Context, Result}; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; @@ -618,11 +621,29 @@ impl fmt::Display for StampValidationError { // Collect unique fix commands, using runs_on hint for SDK install commands let runs_on_ref = self.runs_on.as_deref(); + let local_arch = get_local_arch(); + let mut fixes: Vec = self .missing .iter() .chain(self.stale.iter().map(|(req, _)| req)) - .map(|req| req.fix_command_with_remote(runs_on_ref)) + .flat_map(|req| { + // For SDK install stamps with a different architecture than local, + // offer both --runs-on and --sdk-arch alternatives + if req.component == StampComponent::Sdk + && req.command == StampCommand::Install + && req.host_arch.as_deref() != Some(local_arch) + { + if let Some(arch) = &req.host_arch { + let mut cmds = vec![format!("avocado sdk install --sdk-arch {arch}")]; + if let Some(remote) = runs_on_ref { + cmds.push(format!("avocado sdk install --runs-on {remote}")); + } + return cmds; + } + } + vec![req.fix_command_with_remote(runs_on_ref)] + }) .collect(); fixes.sort(); fixes.dedup(); @@ -657,7 +678,7 @@ pub fn compute_sdk_input_hash(config: &serde_yaml::Value) -> Result // Include sdk.dependencies if let Some(sdk) = config.get("sdk") { - if let Some(deps) = sdk.get("dependencies") { + if let Some(deps) = sdk.get("packages") { hash_data.insert( serde_yaml::Value::String("sdk.dependencies".to_string()), deps.clone(), @@ -693,8 +714,8 @@ pub fn compute_ext_input_hash(config: &serde_yaml::Value, ext_name: &str) -> Res let mut hash_data = serde_yaml::Mapping::new(); // Include ext..dependencies - if let Some(ext) = config.get("ext").and_then(|e| e.get(ext_name)) { - if let Some(deps) = ext.get("dependencies") { + if let Some(ext) = config.get("extensions").and_then(|e| e.get(ext_name)) { + if let Some(deps) = ext.get("packages") { hash_data.insert( serde_yaml::Value::String(format!("ext.{ext_name}.dependencies")), deps.clone(), @@ -722,7 +743,7 @@ pub fn compute_runtime_input_hash( let mut hash_data = serde_yaml::Mapping::new(); // Include the merged dependencies section - if let Some(deps) = merged_runtime.get("dependencies") { + if let Some(deps) = merged_runtime.get("packages") { hash_data.insert( serde_yaml::Value::String(format!("runtime.{runtime_name}.dependencies")), deps.clone(), @@ -964,16 +985,19 @@ pub fn resolve_required_stamps_for_arch( reqs } - // Sign requires runtime build + // Sign requires SDK install + runtime build + // SDK install is needed because signing runs in the SDK container (StampCommand::Sign, StampComponent::Runtime) => { let runtime_name = component_name.expect("Runtime name required"); - vec![StampRequirement::runtime_build(runtime_name)] + vec![sdk_install(), StampRequirement::runtime_build(runtime_name)] } - // Provision requires runtime build + // Provision requires SDK install + runtime build + // SDK install is needed because provisioning runs in the SDK container + // When using --runs-on, this ensures the SDK is installed for the remote's arch (StampCommand::Provision, StampComponent::Runtime) => { let runtime_name = component_name.expect("Runtime name required"); - vec![StampRequirement::runtime_build(runtime_name)] + vec![sdk_install(), StampRequirement::runtime_build(runtime_name)] } // Other combinations have no requirements @@ -986,9 +1010,8 @@ pub fn resolve_required_stamps_for_arch( /// This properly handles different extension types: /// - Local extensions: require install + build + image stamps /// - External extensions: require install + build + image stamps -/// - Versioned extensions: NO stamp requirements - they're prebuilt packages -/// installed directly via DNF during `runtime install`. The package repository -/// contains the complete extension images, so no local build/image steps needed. +/// - Versioned extensions: DEPRECATED - should error during config parsing +/// Remote extensions are now defined in the ext section with source: field pub fn resolve_required_stamps_for_runtime_build( runtime_name: &str, ext_dependencies: &[RuntimeExtDep], @@ -1012,30 +1035,13 @@ pub fn resolve_required_stamps_for_runtime_build_with_arch( let mut reqs = vec![sdk_install, StampRequirement::runtime_install(runtime_name)]; + // All extensions now require install + build + image stamps + // Extension source configuration (repo, git, path) is defined in the ext section for ext_dep in ext_dependencies { let ext_name = ext_dep.name(); - - match ext_dep { - // Local extensions: require install + build + image stamps - RuntimeExtDep::Local(_) => { - reqs.push(StampRequirement::ext_install(ext_name)); - reqs.push(StampRequirement::ext_build(ext_name)); - reqs.push(StampRequirement::ext_image(ext_name)); - } - // External extensions: require install + build + image stamps - RuntimeExtDep::External { .. } => { - reqs.push(StampRequirement::ext_install(ext_name)); - reqs.push(StampRequirement::ext_build(ext_name)); - reqs.push(StampRequirement::ext_image(ext_name)); - } - // Versioned extensions: NO stamp requirements - // They're prebuilt packages from the package repository, installed - // directly via DNF during `runtime install`. No local ext install, - // ext build, or ext image steps are needed. - RuntimeExtDep::Versioned { .. } => { - // No stamps required - covered by runtime install - } - } + reqs.push(StampRequirement::ext_install(ext_name)); + reqs.push(StampRequirement::ext_build(ext_name)); + reqs.push(StampRequirement::ext_image(ext_name)); } reqs @@ -1294,28 +1300,30 @@ mod tests { #[test] fn test_resolve_required_stamps_sign() { - // Sign requires runtime build + // Sign requires SDK install + runtime build let reqs = resolve_required_stamps( StampCommand::Sign, StampComponent::Runtime, Some("my-runtime"), &[], ); - assert_eq!(reqs.len(), 1); - assert_eq!(reqs[0], StampRequirement::runtime_build("my-runtime")); + assert_eq!(reqs.len(), 2); + assert_eq!(reqs[0], StampRequirement::sdk_install()); + assert_eq!(reqs[1], StampRequirement::runtime_build("my-runtime")); } #[test] fn test_resolve_required_stamps_provision() { - // Provision requires runtime build + // Provision requires SDK install + runtime build let reqs = resolve_required_stamps( StampCommand::Provision, StampComponent::Runtime, Some("my-runtime"), &[], ); - assert_eq!(reqs.len(), 1); - assert_eq!(reqs[0], StampRequirement::runtime_build("my-runtime")); + assert_eq!(reqs.len(), 2); + assert_eq!(reqs[0], StampRequirement::sdk_install()); + assert_eq!(reqs[1], StampRequirement::runtime_build("my-runtime")); } #[test] @@ -1427,23 +1435,15 @@ mod tests { } #[test] - fn test_resolve_required_stamps_for_runtime_build_with_mixed_extensions() { + fn test_resolve_required_stamps_for_runtime_build_with_multiple_extensions() { use crate::utils::config::RuntimeExtDep; - // Test with mixed extension types: - // - local-ext: needs install + build + image stamps - // - external-ext: needs install + build + image stamps - // - versioned-ext: NO stamps (prebuilt package from repo) + // Test with multiple extensions: + // All extensions are now Local type - source config (repo, git, path) is in ext section let ext_deps = vec![ - RuntimeExtDep::Local("local-ext".to_string()), - RuntimeExtDep::External { - name: "external-ext".to_string(), - config_path: "../external/avocado.yaml".to_string(), - }, - RuntimeExtDep::Versioned { - name: "versioned-ext".to_string(), - version: "1.0.0".to_string(), - }, + RuntimeExtDep::Local("app".to_string()), + RuntimeExtDep::Local("config-dev".to_string()), + RuntimeExtDep::Local("avocado-ext-dev".to_string()), ]; let reqs = resolve_required_stamps_for_runtime_build("my-runtime", &ext_deps); @@ -1451,108 +1451,35 @@ mod tests { // Should have: // - SDK install (1) // - Runtime install (1) - // - local-ext install + build + image (3) - // - external-ext install + build + image (3) - // - versioned-ext: NOTHING (prebuilt package from repo) - // Total: 8 - assert_eq!(reqs.len(), 8); + // - app install + build + image (3) + // - config-dev install + build + image (3) + // - avocado-ext-dev install + build + image (3) + // Total: 11 + assert_eq!(reqs.len(), 11); // Verify SDK and runtime install are present assert!(reqs.contains(&StampRequirement::sdk_install())); assert!(reqs.contains(&StampRequirement::runtime_install("my-runtime"))); - // Verify local extension has install, build, and image - assert!(reqs.contains(&StampRequirement::ext_install("local-ext"))); - assert!(reqs.contains(&StampRequirement::ext_build("local-ext"))); - assert!(reqs.contains(&StampRequirement::ext_image("local-ext"))); - - // Verify external extension has install, build, and image - assert!(reqs.contains(&StampRequirement::ext_install("external-ext"))); - assert!(reqs.contains(&StampRequirement::ext_build("external-ext"))); - assert!(reqs.contains(&StampRequirement::ext_image("external-ext"))); - - // Verify versioned extension has NO stamps at all - // (they're prebuilt packages installed via DNF during runtime install) - assert!(!reqs.contains(&StampRequirement::ext_install("versioned-ext"))); - assert!(!reqs.contains(&StampRequirement::ext_build("versioned-ext"))); - assert!(!reqs.contains(&StampRequirement::ext_image("versioned-ext"))); - } - - #[test] - fn test_resolve_required_stamps_runtime_build_only_versioned_extensions() { - use crate::utils::config::RuntimeExtDep; - - // Runtime with ONLY versioned extensions (common for prebuilt extensions from package repo) - // Example: avocado-ext-dev, avocado-ext-sshd-dev - // Versioned extensions are prebuilt packages - NO stamps required - let ext_deps = vec![ - RuntimeExtDep::Versioned { - name: "avocado-ext-dev".to_string(), - version: "0.1.0".to_string(), - }, - RuntimeExtDep::Versioned { - name: "avocado-ext-sshd-dev".to_string(), - version: "0.1.0".to_string(), - }, - ]; - - let reqs = resolve_required_stamps_for_runtime_build("dev", &ext_deps); - - // Should ONLY have SDK install + runtime install (2 total) - // Versioned extensions don't add any stamp requirements - assert_eq!(reqs.len(), 2); - assert!(reqs.contains(&StampRequirement::sdk_install())); - assert!(reqs.contains(&StampRequirement::runtime_install("dev"))); - - // Verify NO extension stamps are required for versioned extensions - assert!(!reqs.contains(&StampRequirement::ext_install("avocado-ext-dev"))); - assert!(!reqs.contains(&StampRequirement::ext_build("avocado-ext-dev"))); - assert!(!reqs.contains(&StampRequirement::ext_image("avocado-ext-dev"))); - assert!(!reqs.contains(&StampRequirement::ext_install("avocado-ext-sshd-dev"))); - assert!(!reqs.contains(&StampRequirement::ext_build("avocado-ext-sshd-dev"))); - assert!(!reqs.contains(&StampRequirement::ext_image("avocado-ext-sshd-dev"))); - } - - #[test] - fn test_resolve_required_stamps_runtime_build_only_external_extensions() { - use crate::utils::config::RuntimeExtDep; - - // Runtime with ONLY external extensions (from external config files) - let ext_deps = vec![ - RuntimeExtDep::External { - name: "avocado-ext-peridio".to_string(), - config_path: "avocado-ext-peridio/avocado.yml".to_string(), - }, - RuntimeExtDep::External { - name: "custom-ext".to_string(), - config_path: "../custom/avocado.yaml".to_string(), - }, - ]; - - let reqs = resolve_required_stamps_for_runtime_build("my-runtime", &ext_deps); + // Verify all extensions have install, build, and image + assert!(reqs.contains(&StampRequirement::ext_install("app"))); + assert!(reqs.contains(&StampRequirement::ext_build("app"))); + assert!(reqs.contains(&StampRequirement::ext_image("app"))); - // Should have: - // - SDK install (1) - // - Runtime install (1) - // - avocado-ext-peridio install + build + image (3) - // - custom-ext install + build + image (3) - // Total: 8 - assert_eq!(reqs.len(), 8); + assert!(reqs.contains(&StampRequirement::ext_install("config-dev"))); + assert!(reqs.contains(&StampRequirement::ext_build("config-dev"))); + assert!(reqs.contains(&StampRequirement::ext_image("config-dev"))); - // Verify external extensions require install, build, and image - assert!(reqs.contains(&StampRequirement::ext_install("avocado-ext-peridio"))); - assert!(reqs.contains(&StampRequirement::ext_build("avocado-ext-peridio"))); - assert!(reqs.contains(&StampRequirement::ext_image("avocado-ext-peridio"))); - assert!(reqs.contains(&StampRequirement::ext_install("custom-ext"))); - assert!(reqs.contains(&StampRequirement::ext_build("custom-ext"))); - assert!(reqs.contains(&StampRequirement::ext_image("custom-ext"))); + assert!(reqs.contains(&StampRequirement::ext_install("avocado-ext-dev"))); + assert!(reqs.contains(&StampRequirement::ext_build("avocado-ext-dev"))); + assert!(reqs.contains(&StampRequirement::ext_image("avocado-ext-dev"))); } #[test] - fn test_resolve_required_stamps_runtime_build_only_local_extensions() { + fn test_resolve_required_stamps_runtime_build_local_extensions() { use crate::utils::config::RuntimeExtDep; - // Runtime with ONLY local extensions (defined in main config) + // Runtime with extensions (all are now Local type) let ext_deps = vec![ RuntimeExtDep::Local("app".to_string()), RuntimeExtDep::Local("config-dev".to_string()), @@ -1631,20 +1558,9 @@ mod tests { fn test_runtime_ext_dep_name() { use crate::utils::config::RuntimeExtDep; + // Test the Local variant (the primary way to specify extensions) let local = RuntimeExtDep::Local("my-local-ext".to_string()); assert_eq!(local.name(), "my-local-ext"); - - let external = RuntimeExtDep::External { - name: "my-external-ext".to_string(), - config_path: "path/to/config.yaml".to_string(), - }; - assert_eq!(external.name(), "my-external-ext"); - - let versioned = RuntimeExtDep::Versioned { - name: "my-versioned-ext".to_string(), - version: "1.2.3".to_string(), - }; - assert_eq!(versioned.name(), "my-versioned-ext"); } #[test] @@ -2220,25 +2136,46 @@ runtime/my-runtime/build.stamp:::null"#, } #[test] - fn test_validation_error_includes_runs_on_hint() { + fn test_validation_error_includes_sdk_arch_hint_for_different_arch() { let mut result = StampValidationResult::new(); - result.add_missing(StampRequirement::sdk_install_for_arch("aarch64")); + // Use an architecture different from local to trigger --sdk-arch suggestion + let different_arch = if get_local_arch() == "aarch64" { + "x86_64" + } else { + "aarch64" + }; + result.add_missing(StampRequirement::sdk_install_for_arch(different_arch)); - // Without runs_on, fix should be regular install + // Without runs_on, fix should suggest --sdk-arch for different architecture let error = result.into_error("Cannot provision"); let msg = error.to_string(); - assert!(msg.contains("avocado sdk install")); - assert!(!msg.contains("--runs-on")); + assert!( + msg.contains(&format!("avocado sdk install --sdk-arch {different_arch}")), + "Expected --sdk-arch suggestion in: {msg}" + ); } #[test] - fn test_validation_error_with_runs_on_includes_remote_in_fix() { + fn test_validation_error_with_runs_on_includes_both_alternatives() { let mut result = StampValidationResult::new(); - result.add_missing(StampRequirement::sdk_install_for_arch("aarch64")); + // Use an architecture different from local to trigger both suggestions + let different_arch = if get_local_arch() == "aarch64" { + "x86_64" + } else { + "aarch64" + }; + result.add_missing(StampRequirement::sdk_install_for_arch(different_arch)); - // With runs_on, fix should include the remote + // With runs_on, fix should include BOTH --sdk-arch and --runs-on alternatives let error = result.into_error_with_runs_on("Cannot provision", Some("user@remote")); let msg = error.to_string(); - assert!(msg.contains("avocado sdk install --runs-on user@remote")); + assert!( + msg.contains(&format!("avocado sdk install --sdk-arch {different_arch}")), + "Expected --sdk-arch suggestion in: {msg}" + ); + assert!( + msg.contains("avocado sdk install --runs-on user@remote"), + "Expected --runs-on suggestion in: {msg}" + ); } } diff --git a/src/utils/target.rs b/src/utils/target.rs index 13109d0..72edba5 100644 --- a/src/utils/target.rs +++ b/src/utils/target.rs @@ -235,9 +235,9 @@ mod tests { supported_targets: None, src_dir: None, distro: None, - runtime: None, + runtimes: None, sdk: None, - provision: None, + provision_profiles: None, signing_keys: None, } } @@ -249,9 +249,9 @@ mod tests { supported_targets: Some(SupportedTargets::List(targets)), src_dir: None, distro: None, - runtime: None, + runtimes: None, sdk: None, - provision: None, + provision_profiles: None, signing_keys: None, } } @@ -263,9 +263,9 @@ mod tests { supported_targets: Some(SupportedTargets::All("*".to_string())), src_dir: None, distro: None, - runtime: None, + runtimes: None, sdk: None, - provision: None, + provision_profiles: None, signing_keys: None, } } diff --git a/src/utils/volume.rs b/src/utils/volume.rs index ca35a83..fdc3db0 100644 --- a/src/utils/volume.rs +++ b/src/utils/volume.rs @@ -258,6 +258,126 @@ impl VolumeManager { Ok(containers) } + + /// Name prefix for VS Code extension explorer containers + const EXPLORER_CONTAINER_PREFIX: &'static str = "avocado-explorer-"; + + /// Get list of VS Code extension explorer containers using a specific volume. + /// These containers are created by the avocado-devtools VS Code extension + /// to browse volume contents and can be safely stopped automatically. + pub async fn get_explorer_containers_using_volume( + &self, + volume_name: &str, + ) -> Result> { + // Find containers that: + // 1. Use the specified volume + // 2. Have a name matching the explorer pattern (avocado-explorer-*) + let output = AsyncCommand::new(&self.container_tool) + .args([ + "ps", + "-a", + "--filter", + &format!("volume={volume_name}"), + "--filter", + &format!("name={}", Self::EXPLORER_CONTAINER_PREFIX), + "--format", + "{{.ID}}\t{{.Names}}", + ]) + .output() + .await + .with_context(|| "Failed to list explorer containers")?; + + if !output.status.success() { + return Ok(Vec::new()); + } + + let stdout = String::from_utf8_lossy(&output.stdout); + let containers: Vec = stdout + .lines() + .filter(|line| !line.is_empty()) + .filter_map(|line| { + let parts: Vec<&str> = line.split('\t').collect(); + if parts.len() >= 2 { + let id = parts[0]; + let name = parts[1]; + // Double-check the name starts with our prefix + if name.starts_with(Self::EXPLORER_CONTAINER_PREFIX) { + return Some(id.to_string()); + } + } + None + }) + .collect(); + + Ok(containers) + } + + /// Stop and remove VS Code extension explorer containers using a specific volume. + /// Returns the number of containers that were stopped. + pub async fn stop_explorer_containers(&self, volume_name: &str) -> Result { + let containers = self + .get_explorer_containers_using_volume(volume_name) + .await?; + + if containers.is_empty() { + return Ok(0); + } + + if self.verbose { + print_info( + &format!( + "Found {} VS Code explorer container(s) using volume, stopping...", + containers.len() + ), + OutputLevel::Normal, + ); + } + + for container_id in &containers { + // Stop the container gracefully with short timeout + let _ = AsyncCommand::new(&self.container_tool) + .args(["stop", "-t", "1", container_id]) + .output() + .await; + + // Remove the container + let output = AsyncCommand::new(&self.container_tool) + .args(["rm", "-f", container_id]) + .output() + .await + .with_context(|| format!("Failed to remove explorer container {container_id}"))?; + + if self.verbose && output.status.success() { + print_info( + &format!( + "Stopped explorer container: {}", + &container_id[..12.min(container_id.len())] + ), + OutputLevel::Normal, + ); + } + } + + Ok(containers.len()) + } + + /// Remove a docker volume, automatically stopping any VS Code explorer containers first. + /// Unlike force_remove_volume, this only stops known safe containers (explorer containers) + /// and will still fail if other containers are using the volume. + pub async fn remove_volume_with_explorer_cleanup(&self, volume_name: &str) -> Result<()> { + // First, stop any VS Code explorer containers that might be using this volume + let stopped = self.stop_explorer_containers(volume_name).await?; + + if stopped > 0 && self.verbose { + print_info( + &format!("Stopped {stopped} VS Code explorer container(s) before volume removal"), + OutputLevel::Normal, + ); + } + + // Now try to remove the volume + self.remove_volume(volume_name).await + } } /// Information about a docker volume diff --git a/tests/fixtures/configs/complex.yaml b/tests/fixtures/configs/complex.yaml index 6a5bd07..ec5c131 100644 --- a/tests/fixtures/configs/complex.yaml +++ b/tests/fixtures/configs/complex.yaml @@ -5,11 +5,11 @@ distro: sdk: image: ghcr.io/avocado-framework/avocado-sdk:v1.0.0 version: 1.0.0 -runtime: +runtimes: default: target: aarch64-unknown-linux-gnu board: raspberry-pi-4 -ext: +extensions: web-server: types: - sysext diff --git a/tests/fixtures/configs/external-config.yaml b/tests/fixtures/configs/external-config.yaml index e8d20f6..6c87ccf 100644 --- a/tests/fixtures/configs/external-config.yaml +++ b/tests/fixtures/configs/external-config.yaml @@ -1,11 +1,11 @@ src_dir: . -ext: +extensions: external-extension: types: - sysext packages: - curl - dependencies: + packages: nested-dep: ext: nested-extension config: nested-config.yaml diff --git a/tests/fixtures/configs/minimal.yaml b/tests/fixtures/configs/minimal.yaml index df5f59e..a80cdd7 100644 --- a/tests/fixtures/configs/minimal.yaml +++ b/tests/fixtures/configs/minimal.yaml @@ -4,7 +4,7 @@ distro: version: 0.1.0 sdk: image: ghcr.io/avocado-framework/avocado-sdk:latest -runtime: +runtimes: default: target: x86_64-unknown-linux-gnu -ext: {} +extensions: {} diff --git a/tests/fixtures/configs/nested-config.yaml b/tests/fixtures/configs/nested-config.yaml index 90c69cd..391593a 100644 --- a/tests/fixtures/configs/nested-config.yaml +++ b/tests/fixtures/configs/nested-config.yaml @@ -1,5 +1,5 @@ src_dir: . -ext: +extensions: nested-extension: types: - sysext diff --git a/tests/fixtures/configs/with-both-extensions.yaml b/tests/fixtures/configs/with-both-extensions.yaml index a548fcf..f45c7c3 100644 --- a/tests/fixtures/configs/with-both-extensions.yaml +++ b/tests/fixtures/configs/with-both-extensions.yaml @@ -1,10 +1,10 @@ default_target: qemux86-64 sdk: image: ghcr.io/avocado-framework/avocado-sdk:latest -runtime: +runtimes: default: target: x86_64-unknown-linux-gnu -ext: +extensions: test-both: sysext: true confext: true diff --git a/tests/fixtures/configs/with-confext.yaml b/tests/fixtures/configs/with-confext.yaml index 7fc5b69..20c548d 100644 --- a/tests/fixtures/configs/with-confext.yaml +++ b/tests/fixtures/configs/with-confext.yaml @@ -1,10 +1,10 @@ default_target: qemux86-64 sdk: image: ghcr.io/avocado-framework/avocado-sdk:latest -runtime: +runtimes: default: target: x86_64-unknown-linux-gnu -ext: +extensions: test-confext: types: - confext diff --git a/tests/fixtures/configs/with-external-extensions.yaml b/tests/fixtures/configs/with-external-extensions.yaml index f079526..8dd4bd5 100644 --- a/tests/fixtures/configs/with-external-extensions.yaml +++ b/tests/fixtures/configs/with-external-extensions.yaml @@ -2,13 +2,13 @@ sdk: image: registry.fedoraproject.org/fedora-toolbox:40 repo_url: https://mirrors.fedoraproject.org/metalink?repo=fedora-40&arch=x86_64 repo_release: '40' -ext: +extensions: main-extension: types: - sysext packages: - nginx - dependencies: + packages: external-dep: ext: external-extension config: external-config.yaml diff --git a/tests/fixtures/configs/with-interpolation.yaml b/tests/fixtures/configs/with-interpolation.yaml index 4e90a8c..61c15f3 100644 --- a/tests/fixtures/configs/with-interpolation.yaml +++ b/tests/fixtures/configs/with-interpolation.yaml @@ -15,29 +15,29 @@ nested: reference_nested: "{{ config.nested.value }}" -runtime: +runtimes: dev: # Test avocado.target interpolation target: "{{ avocado.target }}" - dependencies: + packages: # Test multiple interpolation types base-pkg: "{{ config.base_image }}" target-pkg: "avocado-os-{{ avocado.target }}" env-pkg: "{{ env.TEST_PKG_ENV_VAR_INTERP }}" prod: - dependencies: + packages: # Test combined interpolation combined: "{{ config.base_image }}-{{ avocado.target }}" sdk: # Test distro interpolation via config context image: "docker.io/avocadolinux/sdk:{{ config.distro.channel }}" - dependencies: + packages: avocado-sdk-toolchain: "{{ config.distro.version }}" nativesdk-avocado-ext-dev: "*" -ext: +extensions: test-ext: types: - sysext diff --git a/tests/fixtures/configs/with-nested-target-config.yaml b/tests/fixtures/configs/with-nested-target-config.yaml index 9d3f71d..2e638fc 100644 --- a/tests/fixtures/configs/with-nested-target-config.yaml +++ b/tests/fixtures/configs/with-nested-target-config.yaml @@ -7,10 +7,10 @@ distro: version: 0.1.0 sdk: image: ghcr.io/avocado-framework/avocado-sdk:latest -runtime: +runtimes: default: target: x86_64-unknown-linux-gnu -ext: +extensions: avocado-ext-webkit: version: 1.0.0 release: r0 diff --git a/tests/fixtures/configs/with-overlay-merge.yaml b/tests/fixtures/configs/with-overlay-merge.yaml index baa6bd1..0eda6a2 100644 --- a/tests/fixtures/configs/with-overlay-merge.yaml +++ b/tests/fixtures/configs/with-overlay-merge.yaml @@ -1,10 +1,10 @@ default_target: qemux86-64 sdk: image: ghcr.io/avocado-framework/avocado-sdk:latest -runtime: +runtimes: default: target: x86_64-unknown-linux-gnu -ext: +extensions: peridio: types: - sysext diff --git a/tests/fixtures/configs/with-overlay-opaque.yaml b/tests/fixtures/configs/with-overlay-opaque.yaml index e05db37..6835780 100644 --- a/tests/fixtures/configs/with-overlay-opaque.yaml +++ b/tests/fixtures/configs/with-overlay-opaque.yaml @@ -1,10 +1,10 @@ default_target: qemux86-64 sdk: image: ghcr.io/avocado-framework/avocado-sdk:latest -runtime: +runtimes: default: target: x86_64-unknown-linux-gnu -ext: +extensions: peridio: types: - sysext diff --git a/tests/fixtures/configs/with-overlay.yaml b/tests/fixtures/configs/with-overlay.yaml index 35a97a1..fcea122 100644 --- a/tests/fixtures/configs/with-overlay.yaml +++ b/tests/fixtures/configs/with-overlay.yaml @@ -1,14 +1,14 @@ default_target: qemux86-64 sdk: image: ghcr.io/avocado-framework/avocado-sdk:latest -runtime: +runtimes: default: target: x86_64-unknown-linux-gnu -ext: +extensions: peridio: types: - sysext - confext overlay: peridio - dependencies: + packages: curl: '*' diff --git a/tests/fixtures/configs/with-signing-keys.yaml b/tests/fixtures/configs/with-signing-keys.yaml index 2581319..f29ebaa 100644 --- a/tests/fixtures/configs/with-signing-keys.yaml +++ b/tests/fixtures/configs/with-signing-keys.yaml @@ -8,7 +8,7 @@ signing_keys: - my-production-key: abc123def456abc123def456abc123def456abc123def456abc123def456abc1 - backup-key: 789012fedcba789012fedcba789012fedcba789012fedcba789012fedcba7890 -runtime: +runtimes: default: target: x86_64-unknown-linux-gnu dev: diff --git a/tests/fixtures/configs/with-sysext.yaml b/tests/fixtures/configs/with-sysext.yaml index a1e29d1..f6c951f 100644 --- a/tests/fixtures/configs/with-sysext.yaml +++ b/tests/fixtures/configs/with-sysext.yaml @@ -1,10 +1,10 @@ default_target: qemux86-64 sdk: image: ghcr.io/avocado-framework/avocado-sdk:latest -runtime: +runtimes: default: target: x86_64-unknown-linux-gnu -ext: +extensions: test-sysext: types: - sysext diff --git a/tests/fixtures/configs/with-users.yaml b/tests/fixtures/configs/with-users.yaml index d046f77..c99a213 100644 --- a/tests/fixtures/configs/with-users.yaml +++ b/tests/fixtures/configs/with-users.yaml @@ -1,10 +1,10 @@ default_target: qemux86-64 sdk: image: ghcr.io/avocado-framework/avocado-sdk:latest -runtime: +runtimes: default: target: x86_64-unknown-linux-gnu -ext: +extensions: avocado-dev: types: - sysext diff --git a/tests/interpolation.rs b/tests/interpolation.rs index 9dfea1f..b6caff4 100644 --- a/tests/interpolation.rs +++ b/tests/interpolation.rs @@ -25,7 +25,7 @@ fn test_env_var_interpolation() { let config = avocado_cli::utils::config::Config::load(&config_path).unwrap(); // Verify that runtime dev dependencies include interpolated env var - let runtime = config.runtime.as_ref().unwrap(); + let runtime = config.runtimes.as_ref().unwrap(); let dev = runtime.get("dev").unwrap(); if let Some(deps) = &dev.dependencies { @@ -53,7 +53,7 @@ fn test_missing_env_var_warning() { // Should succeed but replace with empty string let config = avocado_cli::utils::config::Config::load(&config_path).unwrap(); - let runtime = config.runtime.as_ref().unwrap(); + let runtime = config.runtimes.as_ref().unwrap(); let dev = runtime.get("dev").unwrap(); if let Some(deps) = &dev.dependencies { @@ -102,7 +102,7 @@ fn test_avocado_target_from_env() { let config_path = get_interpolation_test_config(); let config = avocado_cli::utils::config::Config::load(&config_path).unwrap(); - let runtime = config.runtime.as_ref().unwrap(); + let runtime = config.runtimes.as_ref().unwrap(); let dev = runtime.get("dev").unwrap(); assert_eq!(dev.target.as_ref().unwrap(), "aarch64-unknown-linux-gnu"); @@ -118,7 +118,7 @@ fn test_avocado_target_from_config() { let config_path = get_interpolation_test_config(); let config = avocado_cli::utils::config::Config::load(&config_path).unwrap(); - let runtime = config.runtime.as_ref().unwrap(); + let runtime = config.runtimes.as_ref().unwrap(); let dev = runtime.get("dev").unwrap(); // Should use default_target from config @@ -132,7 +132,7 @@ fn test_avocado_target_unavailable() { // Create a test config without default_target let test_yaml = r#" -runtime: +runtimes: dev: target: "{{ avocado.target }}" "#; @@ -141,7 +141,7 @@ runtime: avocado_cli::utils::interpolation::interpolate_config(&mut parsed, None).unwrap(); // Should leave template as-is - let runtime = parsed.get("runtime").unwrap(); + let runtime = parsed.get("runtimes").unwrap(); let dev = runtime.get("dev").unwrap(); let target = dev.get("target").unwrap().as_str().unwrap(); @@ -189,7 +189,7 @@ fn test_multiple_interpolation_types() { let config_path = get_interpolation_test_config(); let config = avocado_cli::utils::config::Config::load(&config_path).unwrap(); - let runtime = config.runtime.as_ref().unwrap(); + let runtime = config.runtimes.as_ref().unwrap(); let dev = runtime.get("dev").unwrap(); if let Some(deps) = &dev.dependencies { @@ -224,7 +224,7 @@ fn test_combined_interpolation() { let config_path = get_interpolation_test_config(); let config = avocado_cli::utils::config::Config::load(&config_path).unwrap(); - let runtime = config.runtime.as_ref().unwrap(); + let runtime = config.runtimes.as_ref().unwrap(); let prod = runtime.get("prod").unwrap(); if let Some(deps) = &prod.dependencies { @@ -276,7 +276,7 @@ fn test_config_distro_interpolation_in_sdk() { ); // SDK dependencies should use config.distro.version interpolation - let deps = sdk.dependencies.as_ref().unwrap(); + let deps = sdk.packages.as_ref().unwrap(); let toolchain_version = deps.get("avocado-sdk-toolchain").unwrap(); assert_eq!(toolchain_version.as_str().unwrap(), "0.1.0"); } diff --git a/tests/target_precedence.rs b/tests/target_precedence.rs index b5fe6bb..09721a7 100644 --- a/tests/target_precedence.rs +++ b/tests/target_precedence.rs @@ -16,14 +16,18 @@ fn test_target_precedence_order() { env::remove_var("AVOCADO_TARGET"); let config_content = r#" -default_target = "config-target" -supported_targets = ["cli-target", "env-target", "config-target"] - -[sdk] -image = "ghcr.io/avocado-framework/avocado-sdk:latest" - -[runtime.dev] -target = "qemux86-64" +default_target: "config-target" +supported_targets: + - cli-target + - env-target + - config-target + +sdk: + image: "ghcr.io/avocado-framework/avocado-sdk:latest" + +runtimes: + dev: + target: "qemux86-64" "#; let mut config_file = NamedTempFile::new().unwrap(); @@ -85,11 +89,12 @@ target = "qemux86-64" #[serial] fn test_target_error_when_none_specified() { let config_content = r#" -[sdk] -image = "ghcr.io/avocado-framework/avocado-sdk:latest" +sdk: + image: "ghcr.io/avocado-framework/avocado-sdk:latest" -[runtime.dev] -target = "qemux86-64" +runtimes: + dev: + target: "qemux86-64" "#; let mut config_file = NamedTempFile::new().unwrap(); @@ -124,13 +129,15 @@ fn test_avocado_target_environment_variable() { env::remove_var("AVOCADO_TARGET"); let config_content = r#" -supported_targets = ["test-env-target"] +supported_targets: + - test-env-target -[sdk] -image = "ghcr.io/avocado-framework/avocado-sdk:latest" +sdk: + image: "ghcr.io/avocado-framework/avocado-sdk:latest" -[runtime.dev] -target = "qemux86-64" +runtimes: + dev: + target: "qemux86-64" "#; let mut config_file = NamedTempFile::new().unwrap(); @@ -206,17 +213,21 @@ fn test_all_commands_accept_target_flag() { // Test that major commands accept --target flag without error let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["test", "qemux86-64"] +default_target: "qemux86-64" +supported_targets: + - test + - qemux86-64 -[sdk] -image = "ghcr.io/avocado-framework/avocado-sdk:latest" +sdk: + image: "ghcr.io/avocado-framework/avocado-sdk:latest" -[runtime.default] -target = "x86_64-unknown-linux-gnu" +runtimes: + default: + target: "x86_64-unknown-linux-gnu" -[ext.test-ext] -sysext = true +extensions: + test-ext: + sysext: true "#; let mut config_file = NamedTempFile::new().unwrap(); @@ -274,14 +285,16 @@ fn test_sdk_target_validation_supported() { env::remove_var("AVOCADO_TARGET"); let config_content = r#" -default_target = "qemux86-64" -supported_targets = ["qemux86-64"] +default_target: "qemux86-64" +supported_targets: + - qemux86-64 -[sdk] -image = "ghcr.io/avocado-framework/avocado-sdk:latest" +sdk: + image: "ghcr.io/avocado-framework/avocado-sdk:latest" -[runtime.dev] -target = "qemux86-64" +runtimes: + dev: + target: "qemux86-64" "#; let mut config_file = NamedTempFile::new().unwrap(); @@ -313,14 +326,16 @@ fn test_sdk_target_validation_unsupported() { env::remove_var("AVOCADO_TARGET"); let config_content = r#" -default_target = "unsupported-target" -supported_targets = ["qemux86-64"] +default_target: "unsupported-target" +supported_targets: + - qemux86-64 -[sdk] -image = "ghcr.io/avocado-framework/avocado-sdk:latest" +sdk: + image: "ghcr.io/avocado-framework/avocado-sdk:latest" -[runtime.dev] -target = "qemux86-64" +runtimes: + dev: + target: "qemux86-64" "#; let mut config_file = NamedTempFile::new().unwrap();