-
Notifications
You must be signed in to change notification settings - Fork 154
install/bootupd: chroot to deployment #1816
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -8,6 +8,7 @@ use fn_error_context::context; | |
|
|
||
| use bootc_blockdev::{Partition, PartitionTable}; | ||
| use bootc_mount as mount; | ||
| use rustix::mount::UnmountFlags; | ||
|
|
||
| use crate::bootc_composefs::boot::mount_esp; | ||
| use crate::{discoverable_partition_specification, utils}; | ||
|
|
@@ -50,22 +51,60 @@ pub(crate) fn install_via_bootupd( | |
| // bootc defaults to only targeting the platform boot method. | ||
| let bootupd_opts = (!configopts.generic_image).then_some(["--update-firmware", "--auto"]); | ||
|
|
||
| let abs_deployment_path = deployment_path.map(|v| rootfs.join(v)); | ||
| let src_root_arg = if let Some(p) = abs_deployment_path.as_deref() { | ||
| vec!["--src-root", p.as_str()] | ||
| let abs_deployment_path = deployment_path.map(|deploy| rootfs.join(deploy)); | ||
| // When not running inside the target container (through `--src-imgref`) we chroot | ||
| // into the deployment before running bootupd. This makes sure we use binaries | ||
| // from the target image rather than the buildroot | ||
| let bind_mount_dirs = ["/dev", "/run", "/proc", "/sys"]; | ||
| let chroot_args = if let Some(target_root) = abs_deployment_path.as_deref() { | ||
| tracing::debug!("Setting up bind-mounts before chrooting to the target deployment"); | ||
| for src in bind_mount_dirs { | ||
| let dest = target_root | ||
| // joining an absolute path | ||
| // makes it replace self, so we strip the prefix | ||
| .join_os(src.strip_prefix("/").unwrap()); | ||
| tracing::debug!("bind mounting {}", dest.display()); | ||
| rustix::mount::mount_bind_recursive(src, dest)?; | ||
| } | ||
|
Comment on lines
+61
to
+68
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If To fix this, you should ensure that cleanup is always performed. A common pattern in Rust is to use a guard struct with a An alternative is to restructure the code to ensure the cleanup block is always reached. For example, you could wrap the mounting and command execution in a closure that returns a Example structure: let mut mounted_dirs = Vec::new();
let result = (|| {
// Mount loop, add to mounted_dirs on success
for src in bind_mount_dirs {
// ... mount logic ...
mounted_dirs.push(src);
}
// Execute command
// ...
Ok(())
})();
// Cleanup loop over mounted_dirs
// ...
// Return combined resultThis is a significant refactoring, but it's crucial for resource safety. |
||
| // Append the `bootupctl` command, it will be passed as | ||
| // an argument to chroot | ||
| vec![target_root.as_str(), "bootupctl"] | ||
| } else { | ||
| vec![] | ||
| }; | ||
|
|
||
| let devpath = device.path(); | ||
| println!("Installing bootloader via bootupd"); | ||
| Command::new("bootupctl") | ||
| let mut bootupctl = if abs_deployment_path.is_some() { | ||
| Command::new("chroot") | ||
| } else { | ||
| Command::new("bootupctl") | ||
| }; | ||
| let install_result = bootupctl | ||
| .args(chroot_args) | ||
| .args(["backend", "install", "--write-uuid"]) | ||
| .args(verbose) | ||
| .args(bootupd_opts.iter().copied().flatten()) | ||
| .args(src_root_arg) | ||
| .args(["--device", devpath.as_str(), rootfs.as_str()]) | ||
| .log_debug() | ||
| .run_inherited_with_cmd_context() | ||
| .run_inherited_with_cmd_context(); | ||
|
|
||
| // Clean up the mounts after ourselves | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We could entirely avoid the need to clean up by using the new mount API to get file descriptors instead, and then use https://docs.rs/cap-std-ext/latest/cap_std_ext/cmdext/trait.CapStdExtCommandExt.html#tymethod.cwd_dir with |
||
| if let Some(target_root) = abs_deployment_path { | ||
| let mut unmount_res = Ok(()); | ||
| for dir in bind_mount_dirs { | ||
| let mount = target_root | ||
| .join(dir.strip_prefix("/").unwrap()) | ||
| .into_std_path_buf(); | ||
| if let Err(e) = rustix::mount::unmount(&mount, UnmountFlags::DETACH) { | ||
| tracing::warn!("Error unmounting {}: {e}", mount.display()); | ||
| unmount_res = Err(e.into()); | ||
| } | ||
|
Comment on lines
+99
to
+102
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In the case of multiple unmount failures, this logic will overwrite previous errors, and only the last error will be propagated. To ensure the first error is preserved and reported, you should only set if let Err(e) = rustix::mount::unmount(&mount, UnmountFlags::DETACH) {
tracing::warn!("Error unmounting {}: {e}", mount.display());
if unmount_res.is_ok() {
unmount_res = Err(e.into());
}
} |
||
| } | ||
| install_result.and(unmount_res) | ||
| } else { | ||
| install_result | ||
| } | ||
| } | ||
|
|
||
| #[context("Installing bootloader")] | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There's other threads were we talked about offering a
bootc install mountas a general ability to mount a deployment outside of booting it; were we to do that it would make a lot of sense for this code to use it.In ostree we resisted doing that for a long time but eventually did just internally for selinux, see https://github.com/ostreedev/ostree/blob/c6f0b5b2bc26b22fbceee0dc28a0f31349c28d41/src/libostree/ostree-sysroot-deploy.c#L3308
On that topic, it'd be a lot cleaner even here to use a more proper containerization than just setting up the mounts. It's a bit tricky though because we actually do need to e.g. pass through all of
/devand/syshere (i.e.--privilegedin docker/podman terms) in order to update the ESP if desired.I haven't looked at which of bwrap/{runc,crun}/nspawn/podman would make the most sense for this use case.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I am not sure of what you mean with this comment. Do you want to block this change until there are more proper containerization helpers in bootc, or are you just making a note that this should be revisited later on ?