diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 515ce413..8ae1f495 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,6 +2,10 @@ # Set a tag, then push it to trigger the release workflow: # git tag v0.1.0 # git push origin v0.1.0 +# +# Prerequisites: +# - Commits must be signed by a maintainer listed in .auths/allowed_signers +# - No signing secrets needed — CI verifies commits, then signs artifacts with ephemeral keys name: Release @@ -14,7 +18,19 @@ permissions: contents: write jobs: + verify: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Verify commit signatures + uses: auths-dev/verify@v1 + build: + needs: verify strategy: fail-fast: false matrix: @@ -97,14 +113,24 @@ jobs: $hash = (Get-FileHash ${{ matrix.asset_name }}${{ matrix.ext }} -Algorithm SHA256).Hash.ToLower() "$hash ${{ matrix.asset_name }}${{ matrix.ext }}" | Out-File -Encoding ascii ${{ matrix.asset_name }}${{ matrix.ext }}.sha256 - - name: Sign artifact (Unix) + - name: Sign artifact (ephemeral, Unix) if: matrix.ext == '.tar.gz' - uses: auths-dev/sign@v1 - with: - token: ${{ secrets.AUTHS_CI_TOKEN }} - files: ${{ matrix.asset_name }}${{ matrix.ext }} - verify: true - note: 'GitHub Actions release — ${{ github.ref_name }}' + run: | + FILE="${{ matrix.asset_name }}${{ matrix.ext }}" + ./staging/auths artifact sign "$FILE" \ + --ci \ + --commit "${{ github.sha }}" \ + --note "Release ${{ github.ref_name }}" + + - name: Sign artifact (ephemeral, Windows) + if: matrix.ext == '.zip' + shell: pwsh + run: | + $file = "${{ matrix.asset_name }}${{ matrix.ext }}" + .\staging\auths.exe artifact sign $file ` + --ci ` + --commit $env:GITHUB_SHA ` + --note "Release ${{ github.ref_name }}" - name: Upload artifact uses: actions/upload-artifact@v4 @@ -142,7 +168,7 @@ jobs: for asset in auths-macos-aarch64 auths-linux-x86_64 auths-linux-aarch64; do curl -sL "${BASE}/${asset}.tar.gz.auths.json" -o "${asset}.auths.json" done - extract_hash() { python3 -c "import json; d=json.load(open('$1')); print(d['payload']['digest']['hex'])"; } + extract_hash() { jq -r '.payload.digest.hex' "$1"; } { echo "version=${VERSION}" echo "macos_aarch64=$(extract_hash auths-macos-aarch64.auths.json)" @@ -207,6 +233,6 @@ jobs: body: | Automated formula update from [release v${{ steps.hashes.outputs.version }}](https://github.com/auths-dev/auths/releases/tag/v${{ steps.hashes.outputs.version }}). - SHA256 hashes extracted from `.auths.json` attestation files (dogfooding). + SHA256 hashes extracted from `.auths.json` provenance files. branch: "update-${{ steps.hashes.outputs.version }}" base: main diff --git a/.github/workflows/sign-commits.yml b/.github/workflows/sign-commits.yml deleted file mode 100644 index 6dc80cb9..00000000 --- a/.github/workflows/sign-commits.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Sign Commits - -on: - push: - branches: [main] - paths-ignore: - - '**.md' - - 'docs/**' - - 'LICENSE*' - - '.gitignore' - -permissions: - contents: write - -jobs: - sign-commits: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - uses: auths-dev/sign@v1 - with: - token: ${{ secrets.AUTHS_CI_TOKEN }} - commits: 'HEAD~1..HEAD' diff --git a/Cargo.lock b/Cargo.lock index 90c7aee1..d41116e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -343,6 +343,7 @@ dependencies = [ "auths-index", "auths-infra-git", "auths-infra-http", + "auths-infra-rekor", "auths-keri", "auths-pairing-daemon", "auths-pairing-protocol", @@ -407,6 +408,7 @@ dependencies = [ "auths-crypto", "auths-keri", "auths-pairing-protocol", + "auths-transparency", "auths-verifier", "axum", "axum-server", @@ -583,6 +585,30 @@ dependencies = [ "zeroize", ] +[[package]] +name = "auths-infra-rekor" +version = "0.1.0" +dependencies = [ + "async-trait", + "auths-core", + "auths-crypto", + "auths-sdk", + "auths-transparency", + "auths-verifier", + "base64", + "chrono", + "hex", + "http 1.4.0", + "reqwest", + "ring", + "serde", + "serde_json", + "sha2", + "thiserror 2.0.18", + "tokio", + "tracing", +] + [[package]] name = "auths-jwt" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index cdefe32a..802bab00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,7 @@ members = [ "crates/auths-sdk", "crates/auths-infra-git", "crates/auths-infra-http", + "crates/auths-infra-rekor", "crates/auths-storage", "crates/auths-transparency", "crates/auths-keri", diff --git a/README.md b/README.md index 06a46197..3755486c 100644 --- a/README.md +++ b/README.md @@ -2,16 +2,26 @@ [![CI](https://github.com/auths-dev/auths/actions/workflows/ci.yml/badge.svg)](https://github.com/auths-dev/auths/actions/workflows/ci.yml) [![Verify Commits](https://github.com/auths-dev/auths/actions/workflows/verify-commits.yml/badge.svg)](https://github.com/auths-dev/auths/actions/workflows/verify-commits.yml?query=branch%3Amain+event%3Apush) -[![Sign Commits](https://github.com/auths-dev/auths/actions/workflows/sign-commits.yml/badge.svg)](https://github.com/auths-dev/auths/actions/workflows/sign-commits.yml?query=branch%3Amain) + [![Verified with Auths](https://img.shields.io/badge/identity-verified%20with%20auths-brightgreen)](https://auths.dev) -Decentralized identity for individuals, AI agents, and their organizations. +Cryptographic identity and signing for software supply chains. + +No central authority. No CA. No server. Just Git and cryptography. -One identity, multiple devices, Git-native storage. +## Quick Start + +```bash +brew tap auths-dev/auths-cli +brew install auths +auths init # create your identity +auths sign ./release.tar.gz # sign an artifact +auths verify ./release.tar.gz # verify it +``` ## Install @@ -28,7 +38,7 @@ cargo install --git https://github.com/auths-dev/auths.git auths_cli This installs `auths`, `auths-sign`, and `auths-verify`. -## Quick Start +## Walkthrough ### 1. Initialize your identity (30 seconds) @@ -85,66 +95,6 @@ That's it. Your commits are now cryptographically signed with your decentralized --- -## What can you do with Auths? - -**Link multiple devices to one identity** - -```bash -# On your laptop -auths device link --device-did did:key:z6Mk... - -# Now both devices can sign as the same identity -``` - -**Revoke a compromised device** - -```bash -auths device revoke --device-did did:key:z6Mk... -``` - -**Verify any attestation** - -```bash -auths verify attestation.json -``` - -**Sync allowed-signers for Git verification** - -```bash -auths signers sync -``` - ---- - -## Agent & Workload Identity - -Auths treats AI agents and CI/CD runners as first-class identity holders — not borrowers of human credentials. - -**Give an agent its own identity:** - -```bash -# Create a dedicated agent identity -auths init --profile agent - -# Issue a scoped, time-limited attestation from a human to the agent -auths attestation issue \ - --subject did:key:z6MkAgent... \ - --signer-type Agent \ - --capabilities "sign:commit,deploy:staging" \ - --delegated-by did:keri:EHuman... \ - --expires-in 24h -``` - -The agent now holds a cryptographic attestation chain traceable back to the human who authorized it. Every action the agent takes is signed under its own key, scoped to only the capabilities it was granted, and verifiable by anyone — offline, without contacting a central authority. - -**How delegation works:** A human creates a signed attestation granting specific capabilities to an agent. The agent can further delegate a subset of those capabilities to sub-agents. Verifiers walk the chain back to the human sponsor. Capabilities can only narrow at each hop, never widen. See the [Delegation Guide](docs/getting-started/delegation.md) for a full walkthrough. - -**Cloud integration via OIDC:** The [OIDC bridge](docs/architecture/oidc-bridge.md) verifies an agent's attestation chain and issues a standard JWT consumable by AWS STS, GCP Workload Identity, and Azure AD — no cloud provider changes required. - -**MCP compatibility:** Auths attestations serve as the cryptographic identity layer behind MCP's OAuth-based authorization, providing verifiable delegation chains from human principals to AI agents. - ---- - ## How it works Auths stores your identity and device attestations in a Git repository (`~/.auths` by default). Each device link is a cryptographically signed attestation stored as a Git ref. diff --git a/crates/auths-cli/Cargo.toml b/crates/auths-cli/Cargo.toml index 8cdfbd40..3b17ae55 100644 --- a/crates/auths-cli/Cargo.toml +++ b/crates/auths-cli/Cargo.toml @@ -44,6 +44,7 @@ auths-telemetry = { workspace = true, features = ["sink-http"] } auths-verifier = { workspace = true, features = ["native"] } auths-infra-git.workspace = true auths-infra-http.workspace = true +auths-infra-rekor = { path = "../auths-infra-rekor" } auths-utils.workspace = true tokio = { version = "1", features = ["rt-multi-thread", "macros", "time"] } ring.workspace = true diff --git a/crates/auths-cli/src/cli.rs b/crates/auths-cli/src/cli.rs index 496d2317..1044c1d3 100644 --- a/crates/auths-cli/src/cli.rs +++ b/crates/auths-cli/src/cli.rs @@ -9,7 +9,7 @@ use crate::commands::approval::ApprovalCommand; use crate::commands::artifact::ArtifactCommand; use crate::commands::audit::AuditCommand; use crate::commands::auth::AuthCommand; -use crate::commands::ci::CiCommand; + use crate::commands::commit::CommitCmd; use crate::commands::completions::CompletionsCommand; use crate::commands::config::ConfigCommand; @@ -103,9 +103,6 @@ pub enum RootCommand { Config(ConfigCommand), Completions(CompletionsCommand), - // ── CI/CD ── - Ci(CiCommand), - // ── Advanced (visible via --help-all) ── #[command(hide = true)] Publish(PublishCommand), diff --git a/crates/auths-cli/src/commands/artifact/batch_sign.rs b/crates/auths-cli/src/commands/artifact/batch_sign.rs deleted file mode 100644 index f4759648..00000000 --- a/crates/auths-cli/src/commands/artifact/batch_sign.rs +++ /dev/null @@ -1,143 +0,0 @@ -//! Handler for `auths artifact batch-sign`. - -use anyhow::{Context, Result}; -use std::path::PathBuf; -use std::sync::Arc; - -use auths_sdk::core_config::EnvironmentConfig; -use auths_sdk::signing::PassphraseProvider; -use auths_sdk::workflows::ci::batch_attest::{ - BatchEntry, BatchEntryResult, BatchSignConfig, batch_sign_artifacts, default_attestation_path, -}; - -use super::file::FileArtifact; -use crate::factories::storage::build_auths_context; - -/// Execute the `artifact batch-sign` command. -/// -/// Args: -/// * `pattern`: Glob pattern matching artifact files. -/// * `device_key`: Device key alias for signing. -/// * `key`: Optional identity key alias. -/// * `attestation_dir`: Optional directory to collect attestation files. -/// * `expires_in`: Optional TTL in seconds. -/// * `note`: Optional note for attestations. -/// * `repo_opt`: Optional identity repo path. -/// * `passphrase_provider`: Passphrase provider for key decryption. -/// * `env_config`: Environment configuration. -/// -/// Usage: -/// ```ignore -/// handle_batch_sign("dist/*.tar.gz", "ci-device", None, Some(".auths/releases"), ...)?; -/// ``` -#[allow(clippy::too_many_arguments)] -pub fn handle_batch_sign( - pattern: &str, - device_key: &str, - key: Option<&str>, - attestation_dir: Option, - expires_in: Option, - note: Option, - commit_sha: Option, - repo_opt: Option, - passphrase_provider: Arc, - env_config: &EnvironmentConfig, -) -> Result<()> { - let repo_path = auths_sdk::storage_layout::resolve_repo_path(repo_opt)?; - let ctx = build_auths_context(&repo_path, env_config, Some(passphrase_provider))?; - - let paths = expand_glob(pattern)?; - if paths.is_empty() { - println!("No files match pattern: {}", pattern); - return Ok(()); - } - - let entries: Vec = paths - .iter() - .map(|p| BatchEntry { - source: Arc::new(FileArtifact::new(p)), - output_path: default_attestation_path(p), - }) - .collect(); - - println!("Signing {} artifact(s)...", entries.len()); - - let config = BatchSignConfig { - entries, - device_key: device_key.to_string(), - identity_key: key.map(|s| s.to_string()), - expires_in, - note, - commit_sha, - }; - - let result = batch_sign_artifacts(config, &ctx) - .with_context(|| format!("Batch signing failed for pattern: {}", pattern))?; - - // Write attestation files and collect to directory (file I/O is CLI's job) - for entry in &result.results { - if let BatchEntryResult::Signed(s) = entry { - std::fs::write(&s.output_path, &s.attestation_json) - .with_context(|| format!("Failed to write {}", s.output_path.display()))?; - println!( - " Signed: {} (sha256:{})", - s.output_path.display(), - s.digest - ); - } - if let BatchEntryResult::Failed(f) = entry { - eprintln!(" FAILED: {}: {}", f.output_path.display(), f.error); - } - } - - if let Some(ref dir) = attestation_dir { - collect_to_dir(&result.results, dir)?; - println!("Collected attestations to: {}", dir.display()); - } - - println!( - "{} signed, {} failed", - result.signed_count(), - result.failed_count() - ); - - if result.failed_count() > 0 { - anyhow::bail!( - "{} of {} artifact(s) failed to sign", - result.failed_count(), - result.signed_count() + result.failed_count() - ); - } - - Ok(()) -} - -fn expand_glob(pattern: &str) -> Result> { - let paths: Vec = glob::glob(pattern) - .with_context(|| format!("Invalid glob pattern: {}", pattern))? - .filter_map(|entry| entry.ok()) - .filter(|p| p.is_file()) - .collect(); - Ok(paths) -} - -fn collect_to_dir(results: &[BatchEntryResult], dir: &std::path::Path) -> Result<()> { - std::fs::create_dir_all(dir) - .with_context(|| format!("Failed to create attestation directory: {}", dir.display()))?; - - for entry in results { - if let BatchEntryResult::Signed(s) = entry { - let filename = s - .output_path - .file_name() - .unwrap_or_default() - .to_string_lossy() - .to_string(); - let dst = dir.join(&filename); - std::fs::write(&dst, &s.attestation_json) - .with_context(|| format!("Failed to write {}", dst.display()))?; - } - } - - Ok(()) -} diff --git a/crates/auths-cli/src/commands/artifact/mod.rs b/crates/auths-cli/src/commands/artifact/mod.rs index c13a556c..f5c170de 100644 --- a/crates/auths-cli/src/commands/artifact/mod.rs +++ b/crates/auths-cli/src/commands/artifact/mod.rs @@ -1,4 +1,3 @@ -pub mod batch_sign; pub mod core; pub mod file; pub mod publish; @@ -9,7 +8,7 @@ use clap::{Args, Subcommand}; use std::path::{Path, PathBuf}; use std::sync::Arc; -use anyhow::{Result, bail}; +use anyhow::{Context, Result, bail}; use auths_sdk::core_config::EnvironmentConfig; use auths_sdk::signing::PassphraseProvider; use auths_sdk::signing::validate_commit_sha; @@ -82,6 +81,23 @@ pub enum ArtifactSubcommand { /// Do not embed any commit SHA in the attestation. #[arg(long, conflicts_with = "commit")] no_commit: bool, + + /// Use ephemeral CI signing (no keychain needed). Requires --commit. + #[arg(long)] + ci: bool, + + /// CI platform override when --ci is used outside a detected CI environment. + #[arg(long, requires = "ci")] + ci_platform: Option, + + /// Transparency log to submit to (overrides default from trust config). + #[arg(long, value_name = "LOG_ID")] + log: Option, + + /// Skip transparency log submission (local testing only). + /// Produces an unlogged attestation that verifiers reject by default. + #[arg(long)] + allow_unlogged: bool, }, /// Sign and publish an artifact attestation to a registry. @@ -129,44 +145,6 @@ pub enum ArtifactSubcommand { no_commit: bool, }, - /// Sign multiple artifacts matching a glob pattern. - /// - /// Signs each file, writes `.auths.json` attestations, and optionally - /// collects them into a target directory. - BatchSign { - /// Glob pattern matching artifact files (e.g. "dist/*.tar.gz"). - #[arg(help = "Glob pattern matching artifact files to sign.")] - pattern: String, - - /// Local alias of the device key. - #[arg(long)] - device_key: Option, - - /// Local alias of the identity key. Omit for device-only CI signing. - #[arg(long)] - key: Option, - - /// Directory to collect attestation files into. - #[arg(long, value_name = "DIR")] - attestation_dir: Option, - - /// Duration in seconds until expiration. - #[arg(long = "expires-in", value_name = "N")] - expires_in: Option, - - /// Optional note to embed in each attestation. - #[arg(long)] - note: Option, - - /// Git commit SHA to embed in the attestation (auto-detected from HEAD if omitted). - #[arg(long, conflicts_with = "no_commit")] - commit: Option, - - /// Do not embed any commit SHA in the attestation. - #[arg(long, conflicts_with = "commit")] - no_commit: bool, - }, - /// Verify an artifact's signature against an Auths identity. Verify { /// Path to the artifact file to verify. @@ -199,6 +177,24 @@ pub enum ArtifactSubcommand { }, } +fn is_rate_limited(err: &auths_sdk::workflows::log_submit::LogSubmitError) -> bool { + matches!( + err, + auths_sdk::workflows::log_submit::LogSubmitError::LogError( + auths_sdk::ports::LogError::RateLimited { .. } + ) + ) +} + +fn rate_limit_secs(err: &auths_sdk::workflows::log_submit::LogSubmitError) -> u64 { + match err { + auths_sdk::workflows::log_submit::LogSubmitError::LogError( + auths_sdk::ports::LogError::RateLimited { retry_after_secs }, + ) => *retry_after_secs, + _ => 10, + } +} + /// Resolve the commit SHA from CLI flags. fn resolve_commit_sha_from_flags( commit: Option, @@ -231,27 +227,205 @@ pub fn handle_artifact( note, commit, no_commit, + ci, + ci_platform, + log, + allow_unlogged, } => { - let commit_sha = resolve_commit_sha_from_flags(commit, no_commit)?; - let resolved_alias = match device_key { - Some(alias) => alias, - None => crate::commands::key_detect::auto_detect_device_key( - repo_opt.as_deref(), + if ci { + // Ephemeral CI signing — no keychain, no passphrase + use auths_sdk::domains::signing::ci_env::{ + CiEnvironment, CiPlatform, detect_ci_environment, + }; + + let commit_sha = match commit { + Some(sha) => sha, + None => bail!("--ci requires --commit . Pass the commit SHA explicitly."), + }; + + let ci_env = match detect_ci_environment() { + Some(env) => env, + None => match ci_platform.as_deref() { + Some("local") => CiEnvironment { + platform: CiPlatform::Local, + workflow_ref: None, + run_id: None, + actor: None, + runner_os: None, + }, + Some(name) => CiEnvironment { + platform: CiPlatform::Generic, + workflow_ref: None, + run_id: None, + actor: None, + runner_os: Some(name.to_string()), + }, + None => bail!( + "No CI environment detected. If this is intentional (e.g., testing), \ + pass --ci-platform local. Otherwise run inside GitHub Actions, \ + GitLab CI, or a recognized CI runner." + ), + }, + }; + + let ci_env_json = serde_json::to_value(&ci_env) + .map_err(|e| anyhow::anyhow!("Failed to serialize CI env: {}", e))?; + + let data = std::fs::read(&file) + .with_context(|| format!("Failed to read artifact {:?}", file))?; + let artifact_name = file.file_name().map(|n| n.to_string_lossy().to_string()); + + #[allow(clippy::disallowed_methods)] + let now = chrono::Utc::now(); + + let result = auths_sdk::domains::signing::service::sign_artifact_ephemeral( + now, + &data, + artifact_name, + commit_sha, + expires_in, + note, + Some(ci_env_json), + ) + .map_err(|e| anyhow::anyhow!("Ephemeral signing failed: {}", e))?; + + // Submit to transparency log (unless --allow-unlogged) + let transparency_json = if allow_unlogged { + eprintln!( + "WARNING: Signing without transparency log. \ + This artifact will not be verifiable against any log." + ); + None + } else { + // Parse the attestation to extract public key and signature + let attestation_value: serde_json::Value = + serde_json::from_str(&result.attestation_json) + .map_err(|e| anyhow::anyhow!("Failed to parse attestation: {e}"))?; + + let identity_sig_hex = attestation_value["identity_signature"] + .as_str() + .ok_or_else(|| anyhow::anyhow!("missing identity_signature"))?; + let sig_bytes = hex::decode(identity_sig_hex) + .map_err(|e| anyhow::anyhow!("invalid signature hex: {e}"))?; + + let device_pk_hex = attestation_value["device_public_key"] + .as_str() + .ok_or_else(|| anyhow::anyhow!("missing device_public_key"))?; + let pk_bytes = hex::decode(device_pk_hex) + .map_err(|e| anyhow::anyhow!("invalid public key hex: {e}"))?; + + let rt = tokio::runtime::Runtime::new() + .map_err(|e| anyhow::anyhow!("Failed to create async runtime: {e}"))?; + + // Build the transparency log client + let log_client: std::sync::Arc = + match log.as_deref() { + Some("sigstore-rekor") | None => std::sync::Arc::new( + auths_infra_rekor::RekorClient::public().map_err(|e| { + anyhow::anyhow!("Failed to create Rekor client: {e}") + })?, + ), + Some(other) => { + bail!("Unknown log '{}'. Available: sigstore-rekor", other) + } + }; + + let submit = || { + rt.block_on(auths_sdk::workflows::log_submit::submit_attestation_to_log( + result.attestation_json.as_bytes(), + &pk_bytes, + &sig_bytes, + log_client.as_ref(), + )) + }; + + let submission_result = match submit() { + Ok(bundle) => Ok(bundle), + Err(ref e) if is_rate_limited(e) => { + let secs = rate_limit_secs(e); + eprintln!("Rate limited by transparency log. Retrying in {secs}s..."); + std::thread::sleep(std::time::Duration::from_secs(secs)); + submit() + } + Err(e) => Err(e), + }; + + match submission_result { + Ok(bundle) => { + eprintln!( + " Logged to {} at index {}", + bundle.log_id, bundle.leaf_index + ); + Some( + serde_json::to_value(&bundle) + .map_err(|e| anyhow::anyhow!("Failed to serialize: {e}"))?, + ) + } + Err(e) => { + return Err(anyhow::anyhow!("Transparency log submission failed: {e}")); + } + } + }; + + // Build final .auths.json with optional transparency section + let final_json = if let Some(transparency) = transparency_json { + let mut attestation: serde_json::Value = + serde_json::from_str(&result.attestation_json) + .map_err(|e| anyhow::anyhow!("Failed to re-parse attestation: {e}"))?; + if let serde_json::Value::Object(ref mut map) = attestation { + map.insert("transparency".to_string(), transparency); + } + serde_json::to_string_pretty(&attestation) + .map_err(|e| anyhow::anyhow!("Failed to serialize final JSON: {e}"))? + } else { + result.attestation_json.clone() + }; + + let output_path = sig_output.unwrap_or_else(|| { + let mut p = file.clone(); + let new_name = format!( + "{}.auths.json", + p.file_name().unwrap_or_default().to_string_lossy() + ); + p.set_file_name(new_name); + p + }); + + std::fs::write(&output_path, &final_json) + .with_context(|| format!("Failed to write signature to {:?}", output_path))?; + + println!( + "Signed {:?} -> {:?} (ephemeral CI key)", + file.file_name().unwrap_or_default(), + output_path + ); + println!(" RID: {}", result.rid); + println!(" Digest: sha256:{}", result.digest); + + Ok(()) + } else { + // Standard device-key signing + let commit_sha = resolve_commit_sha_from_flags(commit, no_commit)?; + let resolved_alias = match device_key { + Some(alias) => alias, + None => crate::commands::key_detect::auto_detect_device_key( + repo_opt.as_deref(), + env_config, + )?, + }; + sign::handle_sign( + &file, + sig_output, + key.as_deref(), + &resolved_alias, + expires_in, + note, + commit_sha, + repo_opt, + passphrase_provider, env_config, - )?, - }; - sign::handle_sign( - &file, - sig_output, - key.as_deref(), - &resolved_alias, - expires_in, - note, - commit_sha, - repo_opt, - passphrase_provider, - env_config, - ) + ) + } } ArtifactSubcommand::Publish { file, @@ -301,37 +475,6 @@ pub fn handle_artifact( }; publish::handle_publish(&sig_path, package.as_deref(), ®istry) } - ArtifactSubcommand::BatchSign { - pattern, - device_key, - key, - attestation_dir, - expires_in, - note, - commit, - no_commit, - } => { - let commit_sha = resolve_commit_sha_from_flags(commit, no_commit)?; - let resolved_alias = match device_key { - Some(alias) => alias, - None => crate::commands::key_detect::auto_detect_device_key( - repo_opt.as_deref(), - env_config, - )?, - }; - batch_sign::handle_batch_sign( - &pattern, - &resolved_alias, - key.as_deref(), - attestation_dir, - expires_in, - note, - commit_sha, - repo_opt, - passphrase_provider, - env_config, - ) - } ArtifactSubcommand::Verify { file, signature, diff --git a/crates/auths-cli/src/commands/artifact/verify.rs b/crates/auths-cli/src/commands/artifact/verify.rs index 9344c58e..ef243c73 100644 --- a/crates/auths-cli/src/commands/artifact/verify.rs +++ b/crates/auths-cli/src/commands/artifact/verify.rs @@ -4,10 +4,6 @@ use std::fs; use std::path::{Path, PathBuf}; use auths_keri::witness::SignedReceipt; -use auths_transparency::{ - BundleVerificationReport, CheckpointStatus, DelegationStatus, InclusionStatus, NamespaceStatus, - OfflineBundle, SignatureStatus, TrustRoot, WitnessStatus, -}; use auths_verifier::core::Attestation; use auths_verifier::witness::{WitnessQuorum, WitnessVerifyConfig}; use auths_verifier::{ @@ -81,16 +77,6 @@ pub async fn handle_verify( } }; - let sig_value: serde_json::Value = match serde_json::from_str(&sig_content) { - Ok(v) => v, - Err(e) => { - return output_error(&file_str, 2, &format!("Failed to parse .auths.json: {}", e)); - } - }; - if sig_value.get("offline_bundle").is_some() { - return handle_bundle_verify(file, &sig_content); - } - // 2. Parse attestation let attestation: Attestation = match serde_json::from_str(&sig_content) { Ok(a) => a, @@ -209,10 +195,50 @@ pub async fn handle_verify( valid = false; } + // 8a. Ephemeral attestation: verify commit signature transitively + let is_ephemeral = attestation.issuer.as_str().starts_with("did:key:"); + if is_ephemeral && valid { + match &attestation.commit_sha { + None => { + if !is_json_mode() { + eprintln!( + "Error: ephemeral attestation (did:key issuer) requires commit_sha. \ + This attestation is unsigned provenance without a commit anchor." + ); + } + valid = false; + } + Some(sha) => { + // Verify the commit is signed by a trusted key. + // Uses in-process verification via auths-verifier (no git shell-out). + let commit_sig_ok = verify_commit_in_process(sha); + + if !commit_sig_ok { + valid = false; + } + + if !is_json_mode() { + if commit_sig_ok { + eprintln!( + " Trust chain: artifact <- ephemeral key <- commit {} <- maintainer", + &sha[..8.min(sha.len())] + ); + } else { + eprintln!( + " Commit {} is not signed by a trusted maintainer.", + &sha[..8.min(sha.len())] + ); + } + } + } + } + } + // 8b. Display commit linkage info (always, when present) let commit_sha_val = attestation.commit_sha.clone(); if let Some(ref sha) = commit_sha_val && !is_json_mode() + && !is_ephemeral { eprintln!(" Commit: {}", sha); } @@ -362,7 +388,6 @@ async fn verify_witnesses( Ok(report.witness_quorum) } -/// Output error with appropriate formatting and exit code. fn output_error(file: &str, exit_code: i32, message: &str) -> Result<()> { if is_json_mode() { let result = VerifyArtifactResult { @@ -415,145 +440,164 @@ fn output_result(exit_code: i32, result: VerifyArtifactResult) -> Result<()> { Ok(()) } -fn handle_bundle_verify(file: &Path, sig_content: &str) -> Result<()> { - let file_str = file.to_string_lossy().to_string(); - - let sig_value: serde_json::Value = - serde_json::from_str(sig_content).with_context(|| "Failed to parse .auths.json")?; - let bundle: OfflineBundle = serde_json::from_value(sig_value["offline_bundle"].clone()) - .with_context(|| "Failed to parse offline_bundle from .auths.json")?; - - let trust_root: TrustRoot = serde_json::from_str(&default_trust_root_json()) - .with_context(|| "Failed to parse default trust root")?; - - #[allow(clippy::disallowed_methods)] // CLI is the presentation boundary - let now = chrono::Utc::now(); - - let report = auths_transparency::verify_bundle(&bundle, &trust_root, now); - - if is_json_mode() { - println!( - "{}", - serde_json::to_string(&report).with_context(|| "Failed to serialize bundle report")? - ); - } else { - render_bundle_report(&report); - } - - if report.is_valid() { - cache_checkpoint_from_bundle(&bundle); - Ok(()) - } else { - output_error(&file_str, 1, "Bundle verification failed") - } -} - -/// Best-effort checkpoint caching after bundle verification. -#[allow(clippy::disallowed_methods)] // CLI is the presentation boundary -fn cache_checkpoint_from_bundle(bundle: &OfflineBundle) { - let cache_path = match dirs::home_dir() { - Some(home) => home.join(".auths").join("log_checkpoint.json"), - None => return, - }; - - match auths_sdk::workflows::transparency::try_cache_checkpoint( - &cache_path, - &bundle.signed_checkpoint, - None, - ) { - Ok(report) => { - if report.old_size == 0 && !is_json_mode() { - eprintln!( - "Cached transparency checkpoint (tree size: {})", - report.new_size - ); +/// Verify a commit signature in-process using `auths-verifier`. +/// +/// Reads the commit content via git2, loads allowed signer keys from +/// `.auths/allowed_signers`, and verifies using the native Rust verifier. +/// No `git verify-commit --raw` shell-out. +fn verify_commit_in_process(sha: &str) -> bool { + // Open the repository + let repo = match git2::Repository::discover(".") { + Ok(r) => r, + Err(e) => { + if !is_json_mode() { + eprintln!("Failed to open git repository: {e}"); } + return false; } + }; + + // Parse the commit SHA + let oid = match git2::Oid::from_str(sha) { + Ok(o) => o, Err(e) => { if !is_json_mode() { - eprintln!("Warning: checkpoint cache update failed: {e}"); + eprintln!("Invalid commit SHA '{}': {e}", &sha[..8.min(sha.len())]); } + return false; } - } -} - -fn render_bundle_report(report: &BundleVerificationReport) { - println!("Bundle Verification:"); + }; - match &report.signature { - SignatureStatus::Verified => println!(" Signature: \u{2713} Verified"), - SignatureStatus::Failed { reason } => { - println!(" Signature: \u{2717} Failed: {reason}") + // Get the raw commit content (same as `git cat-file commit `) + let commit_obj = match repo.find_object(oid, Some(git2::ObjectType::Commit)) { + Ok(obj) => obj, + Err(e) => { + if !is_json_mode() { + eprintln!("Commit {} not found: {e}", &sha[..8.min(sha.len())]); + } + return false; } - SignatureStatus::NotProvided => println!(" Signature: - Not provided"), - _ => println!(" Signature: ? Unknown status"), - } + }; - match &report.inclusion { - InclusionStatus::Verified => println!(" Inclusion: \u{2713} Verified"), - InclusionStatus::Failed { reason } => { - println!(" Inclusion: \u{2717} Failed: {reason}") + // Get raw content including the signature header + let commit_content = match repo + .find_commit(oid) + .ok() + .and_then(|c| c.raw_header().map(|h| h.to_string())) + { + Some(header) => { + // Reconstruct full commit content: header + \n\n + message + let msg = repo + .find_commit(oid) + .ok() + .and_then(|c| c.message_raw().map(|m| m.to_string())) + .unwrap_or_default(); + format!("{}\n\n{}", header, msg) } - InclusionStatus::NotProvided => println!(" Inclusion: - Not provided"), - _ => println!(" Inclusion: ? Unknown status"), - } - - match &report.checkpoint { - CheckpointStatus::Verified => println!(" Checkpoint: \u{2713} Verified"), - CheckpointStatus::InvalidSignature => { - println!(" Checkpoint: \u{2717} Invalid signature") + None => { + // Fallback: use the raw object data + match commit_obj.as_blob() { + Some(blob) => String::from_utf8_lossy(blob.content()).to_string(), + None => { + if !is_json_mode() { + eprintln!( + "Cannot read commit content for {}", + &sha[..8.min(sha.len())] + ); + } + return false; + } + } } - CheckpointStatus::NotProvided => println!(" Checkpoint: - Not provided"), - _ => println!(" Checkpoint: ? Unknown status"), - } + }; - match &report.witnesses { - WitnessStatus::Quorum { verified, required } => { - println!(" Witnesses: \u{2713} Quorum ({verified}/{required} verified)"); + // Load allowed signer keys from .auths/allowed_signers + let allowed_signers_path = std::path::Path::new(".auths/allowed_signers"); + let allowed_keys = if allowed_signers_path.exists() { + match std::fs::read_to_string(allowed_signers_path) { + Ok(content) => parse_allowed_signer_keys(&content), + Err(e) => { + if !is_json_mode() { + eprintln!("Failed to read .auths/allowed_signers: {e}"); + } + return false; + } } - WitnessStatus::Insufficient { verified, required } => { - println!(" Witnesses: \u{2717} Insufficient ({verified}/{required} verified)"); + } else { + if !is_json_mode() { + eprintln!("No .auths/allowed_signers file found. Create one with: auths signers sync"); } - WitnessStatus::NotProvided => println!(" Witnesses: - Not provided"), - _ => println!(" Witnesses: ? Unknown status"), - } + return false; + }; - match &report.namespace { - NamespaceStatus::Authorized => println!(" Namespace: \u{2713} Authorized"), - NamespaceStatus::Owned => println!(" Namespace: \u{2713} Owned"), - NamespaceStatus::Unowned => println!(" Namespace: - Unowned"), - NamespaceStatus::Unauthorized => println!(" Namespace: \u{2717} Unauthorized"), - _ => println!(" Namespace: ? Unknown status"), + if allowed_keys.is_empty() { + if !is_json_mode() { + eprintln!("No Ed25519 keys found in .auths/allowed_signers"); + } + return false; } - match &report.delegation { - DelegationStatus::Direct => println!(" Delegation: \u{2713} Direct"), - DelegationStatus::ChainVerified { - org_did, - member_did, - .. - } => { - println!(" Delegation: \u{2713} Chain verified ({org_did} \u{2192} {member_did})"); - } - DelegationStatus::ChainBroken { reason } => { - println!(" Delegation: \u{2717} Chain broken: {reason}"); + // Verify using in-process verifier + let provider = auths_crypto::RingCryptoProvider; + let rt = match tokio::runtime::Runtime::new() { + Ok(rt) => rt, + Err(e) => { + if !is_json_mode() { + eprintln!("Failed to create async runtime: {e}"); + } + return false; } - DelegationStatus::NoDelegationData => println!(" Delegation: - No delegation data"), - _ => println!(" Delegation: ? Unknown status"), - } + }; - for warning in &report.warnings { - println!(" Warning: \u{26a0} {warning}"); + match rt.block_on(auths_verifier::commit::verify_commit_signature( + commit_content.as_bytes(), + &allowed_keys, + &provider, + Some(repo.path().parent().unwrap_or(std::path::Path::new("."))), + )) { + Ok(_verified) => true, + Err(e) => { + if !is_json_mode() { + eprintln!( + "Commit {} signature verification failed: {e}", + &sha[..8.min(sha.len())] + ); + } + false + } } } -fn default_trust_root_json() -> String { - // Epic 1 hardcoded trust root: no witnesses, placeholder log key. - // Will be replaced by TUF-distributed trust root in fn-76. - serde_json::json!({ - "log_public_key": "0000000000000000000000000000000000000000000000000000000000000000", - "log_origin": "auths.dev/log", - "witnesses": [] - }) - .to_string() +/// Parse Ed25519 public keys from an allowed_signers file. +/// +/// Format: `email namespaces key-type base64-key` +/// We extract keys where key-type is `ssh-ed25519`. +fn parse_allowed_signer_keys(content: &str) -> Vec { + content + .lines() + .filter(|line| !line.trim().is_empty() && !line.starts_with('#')) + .filter_map(|line| { + let parts: Vec<&str> = line.split_whitespace().collect(); + // Find ssh-ed25519 key type and extract the base64 key + let key_idx = parts.iter().position(|&p| p == "ssh-ed25519")?; + let b64_key = parts.get(key_idx + 1)?; + + use base64::Engine; + let key_bytes = base64::engine::general_purpose::STANDARD + .decode(b64_key) + .ok()?; + // SSH key format: 4-byte length + "ssh-ed25519" + 4-byte length + 32-byte key + // Skip the type prefix to get the raw 32-byte key + if key_bytes.len() < 4 { + return None; + } + let type_len = u32::from_be_bytes(key_bytes[..4].try_into().ok()?) as usize; + let key_start = 4 + type_len + 4; // skip type string + second length prefix + if key_bytes.len() < key_start + 32 { + return None; + } + let raw_key: [u8; 32] = key_bytes[key_start..key_start + 32].try_into().ok()?; + Some(auths_verifier::Ed25519PublicKey::from_bytes(raw_key)) + }) + .collect() } diff --git a/crates/auths-cli/src/commands/ci/forge_backend.rs b/crates/auths-cli/src/commands/ci/forge_backend.rs deleted file mode 100644 index 39a18d0b..00000000 --- a/crates/auths-cli/src/commands/ci/forge_backend.rs +++ /dev/null @@ -1,140 +0,0 @@ -//! Forge backend implementations for setting CI secrets. - -use anyhow::{Context, Result, anyhow}; -use auths_sdk::domains::ci::forge::Forge; -use std::io::Write; -use std::process::{Command, Stdio}; - -/// Abstraction over forge-specific secret-setting operations. -/// -/// Usage: -/// ```ignore -/// let backend = backend_for_forge(&forge); -/// backend.set_secret("AUTHS_CI_TOKEN", &token_json)?; -/// backend.print_ci_template(); -/// ``` -pub trait ForgeBackend { - /// Set a CI secret/variable on the forge. - fn set_secret(&self, name: &str, value: &str) -> Result<()>; - - /// Human-readable forge name. - fn name(&self) -> &str; - - /// Print CI workflow template for this forge. - fn print_ci_template(&self); -} - -/// GitHub backend — sets secrets via `gh secret set`. -pub struct GitHubBackend { - pub owner_repo: String, -} - -impl ForgeBackend for GitHubBackend { - fn set_secret(&self, name: &str, value: &str) -> Result<()> { - // Check gh is available and authenticated (strip GH_TOKEN to avoid stale tokens) - let auth_status = Command::new("gh") - .args(["auth", "status"]) - .env_remove("GH_TOKEN") - .env_remove("GITHUB_TOKEN") - .stdout(Stdio::null()) - .stderr(Stdio::null()) - .status() - .context("gh CLI not found — install it from https://cli.github.com")?; - - if !auth_status.success() { - return Err(anyhow!( - "gh CLI is not authenticated. Run `gh auth login` first." - )); - } - - let mut child = Command::new("gh") - .args(["secret", "set", name, "--repo", &self.owner_repo]) - .env_remove("GH_TOKEN") - .env_remove("GITHUB_TOKEN") - .stdin(Stdio::piped()) - .stdout(Stdio::null()) - .stderr(Stdio::piped()) - .spawn() - .context("Failed to spawn gh secret set")?; - - if let Some(mut stdin) = child.stdin.take() { - stdin - .write_all(value.as_bytes()) - .context("Failed to write secret to gh stdin")?; - } - - let output = child - .wait_with_output() - .context("Failed to wait for gh secret set")?; - - if !output.status.success() { - let stderr = String::from_utf8_lossy(&output.stderr); - return Err(anyhow!("gh secret set failed: {}", stderr.trim())); - } - - Ok(()) - } - - fn name(&self) -> &str { - "GitHub" - } - - fn print_ci_template(&self) { - println!("Add to your release workflow:"); - println!(); - println!(" - uses: auths-dev/attest-action@v1"); - println!(" with:"); - println!(" token: ${{{{ secrets.AUTHS_CI_TOKEN }}}}"); - println!(" files: 'dist/*.tar.gz'"); - } -} - -/// Fallback backend for unsupported forges — prints values for manual setup. -pub struct ManualBackend { - pub forge_name: String, -} - -impl ForgeBackend for ManualBackend { - fn set_secret(&self, _name: &str, _value: &str) -> Result<()> { - // No-op — values are printed by the caller on failure - Ok(()) - } - - fn name(&self) -> &str { - &self.forge_name - } - - fn print_ci_template(&self) { - println!("Set AUTHS_CI_TOKEN as a masked CI variable in your forge's settings."); - println!("See https://docs.auths.dev/ci for forge-specific instructions."); - } -} - -/// Create the appropriate backend for a detected forge. -/// -/// Args: -/// * `forge`: The detected forge variant. -/// -/// Usage: -/// ```ignore -/// let backend = backend_for_forge(&forge); -/// ``` -pub fn backend_for_forge(forge: &Forge) -> Box { - match forge { - Forge::GitHub { owner_repo } => Box::new(GitHubBackend { - owner_repo: owner_repo.clone(), - }), - Forge::GitLab { .. } => Box::new(ManualBackend { - forge_name: "GitLab".into(), - }), - Forge::Bitbucket { .. } => Box::new(ManualBackend { - forge_name: "Bitbucket".into(), - }), - Forge::Radicle { .. } => Box::new(ManualBackend { - forge_name: "Radicle".into(), - }), - Forge::Unknown { .. } => Box::new(ManualBackend { - forge_name: "Unknown".into(), - }), - } -} diff --git a/crates/auths-cli/src/commands/ci/mod.rs b/crates/auths-cli/src/commands/ci/mod.rs deleted file mode 100644 index a4a5e73c..00000000 --- a/crates/auths-cli/src/commands/ci/mod.rs +++ /dev/null @@ -1,108 +0,0 @@ -//! CI/CD integration commands — setup and rotate CI signing secrets. - -pub mod forge_backend; -pub mod rotate; -pub mod setup; - -/// Key alias used by all CI commands (setup, rotate). -pub(crate) const CI_DEVICE_ALIAS: &str = "ci-release-device"; - -use anyhow::Result; -use clap::{Args, Subcommand}; -use std::sync::Arc; - -use auths_sdk::signing::PassphraseProvider; -use auths_sdk::storage_layout::layout; - -use crate::commands::executable::ExecutableCommand; -use crate::config::CliConfig; - -/// CI/CD integration (setup, rotate secrets). -#[derive(Args, Debug, Clone)] -#[command( - about = "CI/CD integration — set up and rotate CI signing secrets.", - after_help = "Examples: - auths ci setup # Auto-detect forge, set AUTHS_CI_TOKEN - auths ci setup --repo owner/repo - # Specify target repo - auths ci rotate # Refresh token, reuse device key - auths ci rotate --max-age-secs 7776000 - # Rotate with 90-day TTL - -Related: - auths device — Manage device authorizations - auths key — Manage cryptographic keys - auths init — Set up identity" -)] -pub struct CiCommand { - #[command(subcommand)] - pub command: CiSubcommand, -} - -#[derive(Subcommand, Debug, Clone)] -pub enum CiSubcommand { - /// Set up CI secrets for release artifact signing and verification. - Setup { - /// Target repo. Accepts `owner/repo`, HTTPS URL, or SSH URL. - /// Defaults to git remote origin. - #[arg(long)] - repo: Option, - - /// Max age for the verification bundle in seconds (default: 1 year). - #[arg(long, default_value = "31536000")] - max_age_secs: u64, - - /// Disable auto-generated passphrase and prompt interactively instead. - #[arg(long)] - manual_passphrase: bool, - }, - - /// Rotate an existing CI token (regenerate bundle, reuse device key). - Rotate { - /// Target repo override. - #[arg(long)] - repo: Option, - - /// Max age for the verification bundle in seconds (default: 1 year). - #[arg(long, default_value = "31536000")] - max_age_secs: u64, - - /// Disable auto-generated passphrase and prompt interactively instead. - #[arg(long)] - manual_passphrase: bool, - }, -} - -impl ExecutableCommand for CiCommand { - fn execute(&self, ctx: &CliConfig) -> Result<()> { - let repo_path = layout::resolve_repo_path(ctx.repo_path.clone())?; - let pp: Arc = Arc::clone(&ctx.passphrase_provider); - - match &self.command { - CiSubcommand::Setup { - repo, - max_age_secs, - manual_passphrase, - } => setup::run_setup( - repo.clone(), - *max_age_secs, - !manual_passphrase, - pp, - &ctx.env_config, - &repo_path, - ), - CiSubcommand::Rotate { - repo, - max_age_secs, - manual_passphrase, - } => rotate::run_rotate( - repo.clone(), - *max_age_secs, - !manual_passphrase, - pp, - &ctx.env_config, - &repo_path, - ), - } - } -} diff --git a/crates/auths-cli/src/commands/ci/rotate.rs b/crates/auths-cli/src/commands/ci/rotate.rs deleted file mode 100644 index 80ae4f11..00000000 --- a/crates/auths-cli/src/commands/ci/rotate.rs +++ /dev/null @@ -1,197 +0,0 @@ -//! `auths ci rotate` — refresh CI token without regenerating the device key. - -use anyhow::{Context, Result, anyhow}; -use std::path::Path; -use std::sync::Arc; - -use auths_crypto::did_key::ed25519_pubkey_to_did_key; -use auths_sdk::core_config::EnvironmentConfig; -use auths_sdk::domains::ci::bundle::build_identity_bundle; -use auths_sdk::domains::ci::forge::Forge; -use auths_sdk::domains::ci::token::CiToken; -use auths_sdk::keychain::{KeyAlias, get_platform_keychain}; -use auths_sdk::signing::PassphraseProvider; -use ring::signature::KeyPair; -use zeroize::Zeroizing; - -use crate::commands::ci::forge_backend::backend_for_forge; -use crate::commands::ci::setup::warn_short_ttl; -use crate::subprocess::git_stdout; - -use super::CI_DEVICE_ALIAS; - -/// Run the `auths ci rotate` flow. -/// -/// Regenerates the file keychain, identity bundle, and verify bundle, -/// but reuses the existing CI device key (no new key generation or device linking). -/// -/// Args: -/// * `repo_override`: Optional forge repo. Auto-detected from git remote if `None`. -/// * `max_age_secs`: TTL for the verify bundle in seconds. -/// * `auto_passphrase`: If `true`, generate a random hex passphrase. -/// * `_passphrase_provider`: CLI passphrase provider (unused for rotate, kept for consistency). -/// * `_env_config`: Environment configuration. -/// * `repo_path`: Path to the auths registry. -/// -/// Usage: -/// ```ignore -/// run_rotate(None, 31536000, true, &pp, &env, &repo)?; -/// ``` -pub fn run_rotate( - repo_override: Option, - max_age_secs: u64, - _auto_passphrase: bool, - _passphrase_provider: Arc, - _env_config: &EnvironmentConfig, - repo_path: &Path, -) -> Result<()> { - println!(); - println!("\x1b[0;36m╔════════════════════════════════════════════════════════════╗\x1b[0m"); - println!( - "\x1b[0;36m║\x1b[0m\x1b[1m CI Token Rotation \x1b[0m\x1b[0;36m║\x1b[0m" - ); - println!("\x1b[0;36m╚════════════════════════════════════════════════════════════╝\x1b[0m"); - println!(); - - // Verify CI device key exists - let keychain = get_platform_keychain()?; - let aliases = keychain - .list_aliases() - .context("Failed to list key aliases")?; - - let has_ci_key = aliases.iter().any(|a| *a == CI_DEVICE_ALIAS); - if !has_ci_key { - return Err(anyhow!( - "No CI device key found. Run `auths ci setup` first." - )); - } - - // Find identity key alias - let identity_key_alias = aliases - .first() - .ok_or_else(|| anyhow!("No keys found in keychain"))? - .to_string(); - - // Handle passphrase — rotate always reuses the existing key, - // so we need the ORIGINAL passphrase to decrypt it. - let ci_pass = { - #[allow(clippy::disallowed_methods)] - let env_pass = std::env::var("AUTHS_PASSPHRASE").ok(); - if let Some(pass) = env_pass { - println!("\x1b[2mUsing passphrase from AUTHS_PASSPHRASE env var.\x1b[0m"); - Zeroizing::new(pass) - } else { - let pass = - rpassword::prompt_password("Passphrase for existing ci-release-device key: ") - .context("Failed to read passphrase")?; - Zeroizing::new(pass) - } - }; - - // Regenerate file keychain - println!("\x1b[2mRegenerating file keychain...\x1b[0m"); - let keychain_b64 = super::setup::create_file_keychain(keychain.as_ref(), &ci_pass)?; - println!("\x1b[0;32m\u{2713}\x1b[0m File keychain regenerated"); - - // Derive device DID (for display) - let key_alias = KeyAlias::new_unchecked(CI_DEVICE_ALIAS); - let (_, _, encrypted_key) = keychain - .load_key(&key_alias) - .context("Failed to load CI device key")?; - let pkcs8 = auths_sdk::crypto::decrypt_keypair(&encrypted_key, &ci_pass) - .context("Failed to decrypt CI device key")?; - let kp = auths_sdk::identity::load_keypair_from_der_or_seed(&pkcs8)?; - let pub_bytes: [u8; 32] = kp - .public_key() - .as_ref() - .try_into() - .map_err(|_| anyhow!("Public key is not 32 bytes"))?; - let device_did = ed25519_pubkey_to_did_key(&pub_bytes); - - // Repackage identity repo - println!("\x1b[2mRepackaging identity repo...\x1b[0m"); - let identity_repo_b64 = - build_identity_bundle(repo_path).map_err(|e| anyhow!("Bundle failed: {e}"))?; - println!("\x1b[0;32m\u{2713}\x1b[0m Identity repo packaged"); - - // Re-export verify bundle - let identity_storage = - auths_sdk::storage::RegistryIdentityStorage::new(repo_path.to_path_buf()); - let identity = auths_sdk::ports::IdentityStorage::load_identity(&identity_storage) - .context("Failed to load identity")?; - let identity_did_str = identity.controller_did.to_string(); - - let verify_bundle_json = - super::setup::build_verify_bundle(&identity_did_str, &pub_bytes, repo_path, max_age_secs)?; - - // Assemble new CiToken - #[allow(clippy::disallowed_methods)] - let now = chrono::Utc::now(); - let token = CiToken::new( - ci_pass.to_string(), - keychain_b64, - identity_repo_b64, - verify_bundle_json, - now.to_rfc3339(), - max_age_secs, - ); - let token_json = token - .to_json() - .map_err(|e| anyhow!("Token serialization: {e}"))?; - - warn_short_ttl(max_age_secs); - if token.is_large() { - eprintln!( - "\x1b[1;33mWarning:\x1b[0m CI token is ~{} KB, approaching GitHub's 48 KB secret limit.", - token.estimated_size() / 1024 - ); - eprintln!(" Consider reducing the identity repo size or splitting secrets."); - } - - // Detect forge + update secret - let forge = match repo_override { - Some(url) => Forge::from_url(&url), - None => { - let url = git_stdout(&["remote", "get-url", "origin"]) - .context("No git remote origin found. Use --repo to specify.")?; - Forge::from_url(&url) - } - }; - - let backend = backend_for_forge(&forge); - println!(); - println!( - "Detected forge: {} ({})", - backend.name(), - forge.repo_identifier() - ); - - match backend.set_secret("AUTHS_CI_TOKEN", &token_json) { - Ok(()) => println!( - "\x1b[0;32m\u{2713}\x1b[0m AUTHS_CI_TOKEN updated on {}", - forge.repo_identifier() - ), - Err(e) => { - eprintln!("\x1b[1;33mCould not update secret automatically: {e}\x1b[0m"); - println!(); - println!("Update AUTHS_CI_TOKEN manually:"); - println!(); - println!("{token_json}"); - } - } - - println!(); - #[allow(clippy::disallowed_methods)] - let expiry = chrono::Utc::now() + chrono::Duration::seconds(max_age_secs as i64); - println!( - "New token expires: {} ({} from now)", - expiry.format("%Y-%m-%d"), - super::setup::humanize_duration(max_age_secs) - ); - println!( - "To revoke: auths device revoke --device {} --key {}", - device_did, identity_key_alias - ); - - Ok(()) -} diff --git a/crates/auths-cli/src/commands/ci/setup.rs b/crates/auths-cli/src/commands/ci/setup.rs deleted file mode 100644 index 9011c618..00000000 --- a/crates/auths-cli/src/commands/ci/setup.rs +++ /dev/null @@ -1,392 +0,0 @@ -//! `auths ci setup` — one-command CI signing setup. - -use anyhow::{Context, Result, anyhow}; -use std::path::Path; -use std::sync::Arc; - -use auths_crypto::did_key::ed25519_pubkey_to_did_key; -use auths_sdk::core_config::EnvironmentConfig; -use auths_sdk::domains::ci::bundle::{build_identity_bundle, generate_ci_passphrase}; -use auths_sdk::domains::ci::forge::Forge; -use auths_sdk::domains::ci::token::CiToken; -use auths_sdk::keychain::EncryptedFileStorage; -use auths_sdk::keychain::{IdentityDID, KeyAlias, KeyRole, KeyStorage, get_platform_keychain}; -use auths_sdk::ports::AttestationSource; -use auths_sdk::ports::IdentityStorage; -use auths_sdk::signing::PassphraseProvider; -use auths_sdk::storage::{RegistryAttestationStorage, RegistryIdentityStorage}; -use auths_verifier::IdentityBundle; -use ring::signature::KeyPair; -use zeroize::Zeroizing; - -use crate::commands::ci::forge_backend::backend_for_forge; -use crate::factories::storage::build_auths_context; -use crate::subprocess::git_stdout; - -use super::CI_DEVICE_ALIAS; - -/// Run the `auths ci setup` flow. -/// -/// Args: -/// * `repo_override`: Optional forge repo (e.g., `owner/repo`). Auto-detected from git remote if `None`. -/// * `max_age_secs`: TTL for the verify bundle in seconds. -/// * `auto_passphrase`: If `true`, generate a random hex passphrase. Otherwise prompt interactively. -/// * `passphrase_provider`: CLI passphrase provider for key operations. -/// * `env_config`: Environment configuration for keychain backend selection. -/// * `repo_path`: Path to the auths registry (typically `~/.auths`). -/// -/// Usage: -/// ```ignore -/// run_setup(None, 31536000, true, &pp, &env, &repo)?; -/// ``` -pub fn run_setup( - repo_override: Option, - max_age_secs: u64, - auto_passphrase: bool, - passphrase_provider: Arc, - env_config: &EnvironmentConfig, - repo_path: &Path, -) -> Result<()> { - println!(); - println!("\x1b[0;36m╔════════════════════════════════════════════════════════════╗\x1b[0m"); - println!( - "\x1b[0;36m║\x1b[0m\x1b[1m CI Release Signing Setup (One-Time) \x1b[0m\x1b[0;36m║\x1b[0m" - ); - println!("\x1b[0;36m╚════════════════════════════════════════════════════════════╝\x1b[0m"); - println!(); - - // Step 1: Verify identity exists - let identity_storage = RegistryIdentityStorage::new(repo_path.to_path_buf()); - let identity = identity_storage - .load_identity() - .context("No auths identity found. Run `auths init` first.")?; - - let identity_did_str = identity.controller_did.to_string(); - - // Step 2: Find primary key alias - let keychain = get_platform_keychain()?; - let aliases = keychain - .list_aliases() - .context("Failed to list key aliases")?; - let identity_key_alias = aliases - .first() - .ok_or_else(|| anyhow!("No keys found in keychain. Run `auths init` first."))? - .to_string(); - - println!("\x1b[1mIdentity:\x1b[0m \x1b[0;36m{identity_did_str}\x1b[0m"); - println!("\x1b[1mKey alias:\x1b[0m \x1b[0;36m{identity_key_alias}\x1b[0m"); - println!(); - - // Step 3: Check for existing CI device key - let reuse = aliases.iter().any(|a| *a == CI_DEVICE_ALIAS); - if reuse { - println!("\x1b[2mFound existing {CI_DEVICE_ALIAS} key \u{2014} will reuse it.\x1b[0m"); - } - - // Step 4: Handle passphrase - // When reusing an existing key, we need the ORIGINAL passphrase to decrypt it. - // Auto-generate is only valid for new keys. - let ci_pass = if reuse { - // Existing key — need the original passphrase - #[allow(clippy::disallowed_methods)] - let env_pass = std::env::var("AUTHS_PASSPHRASE").ok(); - if let Some(pass) = env_pass { - println!("\x1b[2mUsing passphrase from AUTHS_PASSPHRASE env var.\x1b[0m"); - Zeroizing::new(pass) - } else { - let pass = - rpassword::prompt_password("Passphrase for existing ci-release-device key: ") - .context("Failed to read passphrase")?; - Zeroizing::new(pass) - } - } else if auto_passphrase { - let pass = generate_ci_passphrase(); - println!("\x1b[2mAuto-generated CI passphrase (64-char hex).\x1b[0m"); - Zeroizing::new(pass) - } else { - let pass = rpassword::prompt_password("CI device passphrase: ") - .context("Failed to read passphrase")?; - let confirm = rpassword::prompt_password("Confirm passphrase: ") - .context("Failed to read confirmation")?; - if pass != confirm { - return Err(anyhow!("Passphrases do not match")); - } - Zeroizing::new(pass) - }; - - // Step 5: Generate or reuse CI device key + file keychain - let keychain_b64 = if !reuse { - println!(); - println!("\x1b[2mGenerating CI device key...\x1b[0m"); - - let seed: [u8; 32] = rand::random(); - let seed_z = Zeroizing::new(seed); - - #[allow(clippy::disallowed_methods)] - let identity_did = IdentityDID::new_unchecked(identity_did_str.clone()); - auths_sdk::keys::import_seed( - &seed_z, - &ci_pass, - CI_DEVICE_ALIAS, - &identity_did, - keychain.as_ref(), - ) - .map_err(|e| anyhow!("Failed to import CI device key: {e}"))?; - - println!("\x1b[0;32m\u{2713}\x1b[0m CI device key imported"); - create_file_keychain(keychain.as_ref(), &ci_pass)? - } else { - println!( - "\x1b[2mReusing existing {CI_DEVICE_ALIAS} key \u{2014} regenerating file keychain...\x1b[0m" - ); - create_file_keychain(keychain.as_ref(), &ci_pass)? - }; - - // Step 6: Derive device DID - let key_alias = KeyAlias::new_unchecked(CI_DEVICE_ALIAS); - let (_, _, encrypted_key) = keychain - .load_key(&key_alias) - .context("Failed to load CI device key")?; - let pkcs8 = auths_sdk::crypto::decrypt_keypair(&encrypted_key, &ci_pass) - .context("Failed to decrypt CI device key")?; - let kp = auths_sdk::identity::load_keypair_from_der_or_seed(&pkcs8)?; - let pub_bytes: [u8; 32] = kp - .public_key() - .as_ref() - .try_into() - .map_err(|_| anyhow!("Public key is not 32 bytes"))?; - let device_did = ed25519_pubkey_to_did_key(&pub_bytes); - println!("\x1b[0;32m\u{2713}\x1b[0m Device DID: \x1b[0;36m{device_did}\x1b[0m"); - - // Step 7: Link device (if not already linked) - if !reuse { - link_ci_device( - &identity_key_alias, - &device_did, - repo_path, - env_config, - Arc::clone(&passphrase_provider), - )?; - } - - // Step 8: Package identity repo - println!("\x1b[2mPackaging identity repo...\x1b[0m"); - let identity_repo_b64 = - build_identity_bundle(repo_path).map_err(|e| anyhow!("Bundle failed: {e}"))?; - println!("\x1b[0;32m\u{2713}\x1b[0m Identity repo packaged"); - - // Step 9: Export verify bundle - let verify_bundle_json = - build_verify_bundle(&identity_did_str, &pub_bytes, repo_path, max_age_secs)?; - - // Step 10: Assemble CiToken - #[allow(clippy::disallowed_methods)] - let now = chrono::Utc::now(); - let token = CiToken::new( - ci_pass.to_string(), - keychain_b64, - identity_repo_b64, - verify_bundle_json, - now.to_rfc3339(), - max_age_secs, - ); - let token_json = token - .to_json() - .map_err(|e| anyhow!("Token serialization: {e}"))?; - - // TTL warning - warn_short_ttl(max_age_secs); - - // Size warning - if token.is_large() { - eprintln!( - "\x1b[1;33mWarning:\x1b[0m CI token is ~{} KB, approaching GitHub's 48 KB secret limit.", - token.estimated_size() / 1024 - ); - eprintln!(" Consider reducing the identity repo size or splitting secrets."); - } - - // Step 11: Detect forge + set secret - let forge = match repo_override { - Some(url) => Forge::from_url(&url), - None => { - let url = git_stdout(&["remote", "get-url", "origin"]) - .context("No git remote origin found. Use --repo to specify.")?; - Forge::from_url(&url) - } - }; - - let backend = backend_for_forge(&forge); - println!(); - println!( - "Detected forge: {} ({})", - backend.name(), - forge.repo_identifier() - ); - - match backend.set_secret("AUTHS_CI_TOKEN", &token_json) { - Ok(()) => println!( - "\x1b[0;32m\u{2713}\x1b[0m AUTHS_CI_TOKEN set on {}", - forge.repo_identifier() - ), - Err(e) => { - eprintln!("\x1b[1;33mCould not set secret automatically: {e}\x1b[0m"); - println!(); - println!("Set this manually as a repository secret named AUTHS_CI_TOKEN:"); - println!(); - println!("{token_json}"); - } - } - - // Step 12: Print template + revocation instructions - println!(); - backend.print_ci_template(); - println!(); - - #[allow(clippy::disallowed_methods)] - let expiry = chrono::Utc::now() + chrono::Duration::seconds(max_age_secs as i64); - println!( - "Token expires: {} ({} from now)", - expiry.format("%Y-%m-%d"), - humanize_duration(max_age_secs) - ); - println!("To rotate: auths ci rotate"); - println!( - "To revoke: auths device revoke --device {} --key {}", - device_did, identity_key_alias - ); - - Ok(()) -} - -/// Create a portable file-backend keychain from the platform keychain. -pub(super) fn create_file_keychain(keychain: &dyn KeyStorage, passphrase: &str) -> Result { - let key_alias = KeyAlias::new_unchecked(CI_DEVICE_ALIAS); - let (identity_did, _role, encrypted_key_data) = keychain - .load_key(&key_alias) - .context("CI device key not found in keychain")?; - - let tmp = tempfile::TempDir::new().context("Failed to create temp directory")?; - let keychain_path = tmp.path().join("ci-keychain.enc"); - let dst = EncryptedFileStorage::with_path(keychain_path.clone()) - .context("Failed to create file storage")?; - dst.set_password(Zeroizing::new(passphrase.to_string())); - dst.store_key( - &key_alias, - &identity_did, - KeyRole::Primary, - &encrypted_key_data, - ) - .context("Failed to store key in file keychain")?; - - let keychain_bytes = std::fs::read(&keychain_path).context("Failed to read file keychain")?; - Ok(base64::Engine::encode( - &base64::engine::general_purpose::STANDARD, - &keychain_bytes, - )) -} - -/// Link the CI device to the identity. -fn link_ci_device( - identity_key_alias: &str, - device_did: &str, - repo_path: &Path, - env_config: &EnvironmentConfig, - passphrase_provider: Arc, -) -> Result<()> { - println!("\x1b[2mLinking CI device to identity...\x1b[0m"); - - let link_config = auths_sdk::types::DeviceLinkConfig { - identity_key_alias: KeyAlias::new_unchecked(identity_key_alias), - device_key_alias: Some(KeyAlias::new_unchecked(CI_DEVICE_ALIAS)), - device_did: Some(device_did.to_string()), - capabilities: vec![auths_verifier::Capability::sign_release()], - expires_in: None, - note: Some("CI release signer (auths ci setup)".to_string()), - payload: None, - }; - - let ctx = build_auths_context(repo_path, env_config, Some(passphrase_provider))?; - auths_sdk::domains::device::service::link_device( - link_config, - &ctx, - &auths_sdk::ports::SystemClock, - ) - .map_err(|e| anyhow!("Failed to link CI device: {e}"))?; - - println!("\x1b[0;32m\u{2713}\x1b[0m CI device linked"); - Ok(()) -} - -/// Build the verify bundle JSON for inclusion in the CiToken. -pub(super) fn build_verify_bundle( - identity_did_str: &str, - public_key_bytes: &[u8; 32], - repo_path: &Path, - max_age_secs: u64, -) -> Result { - let attestation_storage = RegistryAttestationStorage::new(repo_path.to_path_buf()); - let attestations = attestation_storage - .load_all_attestations() - .unwrap_or_default(); - - #[allow(clippy::disallowed_methods)] - let now = chrono::Utc::now(); - - #[allow(clippy::disallowed_methods)] - let identity_did = auths_sdk::keychain::IdentityDID::new_unchecked(identity_did_str); - #[allow(clippy::disallowed_methods)] - let public_key_hex = auths_verifier::PublicKeyHex::new_unchecked(hex::encode(public_key_bytes)); - - let bundle = IdentityBundle { - identity_did, - public_key_hex, - attestation_chain: attestations, - bundle_timestamp: now, - max_valid_for_secs: max_age_secs, - }; - - serde_json::to_value(&bundle).context("Failed to serialize verify bundle") -} - -/// Print a warning for very short TTL values. -pub fn warn_short_ttl(max_age_secs: u64) { - if max_age_secs < 3600 { - eprintln!( - "\x1b[1;33mWarning:\x1b[0m Token TTL is {}s (< 1 hour). CI will fail after expiry.", - max_age_secs - ); - eprintln!(" Recommended:"); - eprintln!(" 30 days: --max-age-secs 2592000"); - eprintln!(" 90 days: --max-age-secs 7776000"); - eprintln!(" 1 year: --max-age-secs 31536000"); - } -} - -/// Format a duration in seconds to a human-readable string. -pub(super) fn humanize_duration(secs: u64) -> String { - if secs >= 86400 * 365 { - let years = secs / (86400 * 365); - if years == 1 { - "1 year".to_string() - } else { - format!("{years} years") - } - } else if secs >= 86400 { - let days = secs / 86400; - if days == 1 { - "1 day".to_string() - } else { - format!("{days} days") - } - } else if secs >= 3600 { - let hours = secs / 3600; - if hours == 1 { - "1 hour".to_string() - } else { - format!("{hours} hours") - } - } else { - format!("{secs}s") - } -} diff --git a/crates/auths-cli/src/commands/init/helpers.rs b/crates/auths-cli/src/commands/init/helpers.rs index 3421ee27..e86108af 100644 --- a/crates/auths-cli/src/commands/init/helpers.rs +++ b/crates/auths-cli/src/commands/init/helpers.rs @@ -128,10 +128,10 @@ fn set_git_config(key: &str, value: &str, scope: &str) -> Result<()> { // --- GitHub Action Scaffolding --- -const GITHUB_ACTION_WORKFLOW_TEMPLATE: &str = r#"# Auths release workflow — signs artifacts and verifies them. +const GITHUB_ACTION_WORKFLOW_TEMPLATE: &str = r#"# Auths release workflow — verifies commits and signs artifacts ephemerally. # Generated by: auths init --github-action # -# Required: run `auths ci setup` to set the AUTHS_CI_TOKEN secret. +# No secrets needed for signing. Trust derives from commit signatures. name: Auths Release @@ -144,7 +144,16 @@ permissions: contents: write jobs: + verify: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: auths-dev/verify@v1 + release: + needs: verify runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -154,12 +163,9 @@ jobs: # Replace this with your build step echo "Build your artifacts here" - - name: Sign and verify artifacts - uses: auths-dev/sign@v1 - with: - token: ${{ secrets.AUTHS_CI_TOKEN }} - files: 'dist/*.tar.gz' - verify: true + - name: Sign artifacts (ephemeral) + run: | + auths artifact sign dist/*.tar.gz --ci --commit ${{ github.sha }} "#; /// Scaffolds a GitHub Actions workflow for attestation signing. diff --git a/crates/auths-cli/src/commands/key.rs b/crates/auths-cli/src/commands/key.rs index eb6af641..32bbaf7a 100644 --- a/crates/auths-cli/src/commands/key.rs +++ b/crates/auths-cli/src/commands/key.rs @@ -113,12 +113,12 @@ pub enum KeySubcommand { /// /// Examples: /// # Copy to file keychain (passphrase from env var) - /// AUTHS_PASSPHRASE="$CI_PASS" auths key copy-backend \ - /// --alias ci-release-device --dst-backend file --dst-file /tmp/ci-keychain.enc + /// AUTHS_PASSPHRASE="$PASS" auths key copy-backend \ + /// --alias main --dst-backend file --dst-file /tmp/keychain.enc /// /// # Copy to file keychain (passphrase from flag) - /// auths key copy-backend --alias ci-release-device \ - /// --dst-backend file --dst-file /tmp/ci-keychain.enc --dst-passphrase "$CI_PASS" + /// auths key copy-backend --alias main \ + /// --dst-backend file --dst-file /tmp/keychain.enc --dst-passphrase "$PASS" CopyBackend { /// Alias of the key to copy from the current (source) keychain. #[arg(long = "key-alias", visible_alias = "alias")] diff --git a/crates/auths-cli/src/commands/mod.rs b/crates/auths-cli/src/commands/mod.rs index 26da0545..7cedfd23 100644 --- a/crates/auths-cli/src/commands/mod.rs +++ b/crates/auths-cli/src/commands/mod.rs @@ -8,7 +8,7 @@ pub mod artifact; pub mod audit; pub mod auth; pub mod cache; -pub mod ci; + pub mod commit; pub mod completions; pub mod config; diff --git a/crates/auths-cli/src/commands/org.rs b/crates/auths-cli/src/commands/org.rs index ac59f559..2ca7820e 100644 --- a/crates/auths-cli/src/commands/org.rs +++ b/crates/auths-cli/src/commands/org.rs @@ -376,6 +376,7 @@ pub fn handle_org( Some(Role::Admin), None, // Root admin has no delegator None, // commit_sha + None, ) .context("Failed to create admin attestation")?; @@ -495,6 +496,7 @@ pub fn handle_org( None, None, None, // commit_sha + None, ) .context("Failed to create signed attestation object")?; diff --git a/crates/auths-cli/src/errors/registry.rs b/crates/auths-cli/src/errors/registry.rs index e653ff85..0114d475 100644 --- a/crates/auths-cli/src/errors/registry.rs +++ b/crates/auths-cli/src/errors/registry.rs @@ -1066,23 +1066,6 @@ pub fn explain(code: &str) -> Option<&'static str> { "# AUTHS-E6004\n\n**Crate:** `auths-sdk` \n**Type:** `AuthChallengeError::SigningFailed`\n\n## Message\n\nsigning failed: {0}\n", ), - // --- auths-sdk (CiError) --- - "AUTHS-E7001" => Some( - "# AUTHS-E7001\n\n**Crate:** `auths-sdk` \n**Type:** `CiError::EnvironmentNotDetected`\n\n## Message\n\nCI environment not detected\n", - ), - "AUTHS-E7002" => Some( - "# AUTHS-E7002\n\n**Crate:** `auths-sdk` \n**Type:** `CiError::IdentityBundleInvalid`\n\n## Message\n\nidentity bundle invalid at {path}: {reason}\n", - ), - "AUTHS-E7003" => Some( - "# AUTHS-E7003\n\n**Crate:** `auths-sdk` \n**Type:** `CiError::NoArtifacts`\n\n## Message\n\nno artifacts to sign\n\n## Suggestion\n\nCheck your glob pattern matches at least one file\n", - ), - "AUTHS-E7004" => Some( - "# AUTHS-E7004\n\n**Crate:** `auths-sdk` \n**Type:** `CiError::CollectionDirFailed`\n\n## Message\n\nfailed to create attestation directory {path}: {reason}\n", - ), - "AUTHS-E7005" => Some( - "# AUTHS-E7005\n\n**Crate:** `auths-sdk` \n**Type:** `CiError::CollectionCopyFailed`\n\n## Message\n\nfailed to collect attestation {src} → {dst}: {reason}\n", - ), - // --- auths-oidc-port (OidcError) --- "AUTHS-E8001" => Some( "# AUTHS-E8001\n\n**Crate:** `auths-oidc-port` \n**Type:** `OidcError::JwtDecode`\n\n## Message\n\nJWT decode failed: {0}\n", @@ -1431,11 +1414,6 @@ pub fn all_codes() -> &'static [&'static str] { "AUTHS-E6002", "AUTHS-E6003", "AUTHS-E6004", - "AUTHS-E7001", - "AUTHS-E7002", - "AUTHS-E7003", - "AUTHS-E7004", - "AUTHS-E7005", "AUTHS-E8001", "AUTHS-E8002", "AUTHS-E8003", diff --git a/crates/auths-cli/src/main.rs b/crates/auths-cli/src/main.rs index cea96b0f..b1cc943e 100644 --- a/crates/auths-cli/src/main.rs +++ b/crates/auths-cli/src/main.rs @@ -89,8 +89,6 @@ fn run() -> Result<()> { // Utilities RootCommand::Config(cmd) => cmd.execute(&ctx), RootCommand::Completions(cmd) => cmd.execute(&ctx), - // CI/CD - RootCommand::Ci(cmd) => cmd.execute(&ctx), // Advanced RootCommand::Reset(cmd) => cmd.execute(&ctx), RootCommand::SignCommit(cmd) => cmd.execute(&ctx), diff --git a/crates/auths-cli/tests/cases/init.rs b/crates/auths-cli/tests/cases/init.rs index 16c209aa..162fa5c0 100644 --- a/crates/auths-cli/tests/cases/init.rs +++ b/crates/auths-cli/tests/cases/init.rs @@ -36,12 +36,12 @@ fn test_init_github_action_scaffold() { let content = std::fs::read_to_string(&workflow).unwrap(); assert!( - content.contains("auths-dev/sign@v1"), - "workflow should reference sign action" + content.contains("auths-dev/verify@v1"), + "workflow should reference verify action" ); assert!( - content.contains("AUTHS_CI_TOKEN"), - "workflow should reference AUTHS_CI_TOKEN secret" + content.contains("--ci"), + "workflow should reference ephemeral CI signing" ); // .auths/.gitkeep should exist diff --git a/crates/auths-core/Cargo.toml b/crates/auths-core/Cargo.toml index 0047daab..9a4674d5 100644 --- a/crates/auths-core/Cargo.toml +++ b/crates/auths-core/Cargo.toml @@ -53,6 +53,7 @@ x25519-dalek = { version = "2", features = ["static_secrets"] } auths-verifier = { workspace = true, features = ["native"] } auths-keri = { workspace = true } +auths-transparency = { workspace = true } url = { version = "2", features = ["serde"] } uuid.workspace = true diff --git a/crates/auths-core/src/ports/mod.rs b/crates/auths-core/src/ports/mod.rs index fc48551b..b40fe592 100644 --- a/crates/auths-core/src/ports/mod.rs +++ b/crates/auths-core/src/ports/mod.rs @@ -13,3 +13,5 @@ pub mod pairing; pub mod platform; pub mod ssh_agent; pub mod storage; +/// Transparency log port trait for pluggable log backends. +pub mod transparency_log; diff --git a/crates/auths-core/src/ports/transparency_log.rs b/crates/auths-core/src/ports/transparency_log.rs new file mode 100644 index 00000000..53486901 --- /dev/null +++ b/crates/auths-core/src/ports/transparency_log.rs @@ -0,0 +1,279 @@ +//! Transparency log port trait for pluggable log backends. +//! +//! Abstracts appending attestations to a transparency log and +//! retrieving inclusion proofs. The SDK and CLI depend only on this +//! trait — adapter selection happens at the composition root. + +use async_trait::async_trait; +use auths_transparency::checkpoint::SignedCheckpoint; +use auths_transparency::proof::{ConsistencyProof, InclusionProof}; +use auths_transparency::types::LogOrigin; +use auths_verifier::Ed25519PublicKey; + +/// Result of submitting a leaf to a transparency log. +/// +/// Args: +/// * `leaf_index` — The zero-based index assigned to the leaf. +/// * `inclusion_proof` — Merkle inclusion proof against the checkpoint. +/// * `signed_checkpoint` — The log's signed checkpoint at submission time. +/// +/// Usage: +/// ```ignore +/// let submission = log.submit(data, &pk, &sig).await?; +/// assert!(submission.inclusion_proof.verify(&leaf_hash).is_ok()); +/// ``` +#[derive(Debug, Clone)] +pub struct LogSubmission { + /// Zero-based leaf index in the log. + pub leaf_index: u64, + /// Merkle inclusion proof for the leaf against the checkpoint. + pub inclusion_proof: InclusionProof, + /// Signed checkpoint at the time of submission. + pub signed_checkpoint: SignedCheckpoint, +} + +/// Static metadata about a transparency log backend. +/// +/// Args: +/// * `log_id` — Stable identifier for trust config lookup (e.g., `"sigstore-rekor"`). +/// * `log_origin` — C2SP checkpoint origin string. +/// * `log_public_key` — The log's Ed25519 public key for checkpoint verification. +/// * `api_url` — Optional API endpoint URL. +/// +/// Usage: +/// ```ignore +/// let meta = log.metadata(); +/// println!("Log: {} ({})", meta.log_id, meta.log_origin); +/// ``` +#[derive(Debug, Clone)] +pub struct LogMetadata { + /// Stable identifier used in trust config and bundle format. + pub log_id: String, + /// C2SP checkpoint origin string (byte-for-byte match required). + pub log_origin: LogOrigin, + /// The log's public key for checkpoint signature verification. + pub log_public_key: Ed25519PublicKey, + /// API endpoint URL, if applicable. + pub api_url: Option, +} + +/// Errors from transparency log operations. +#[derive(Debug, thiserror::Error)] +pub enum LogError { + /// The log rejected the submitted entry. + #[error("submission rejected: {reason}")] + SubmissionRejected { + /// Why the submission was rejected. + reason: String, + }, + + /// Network or connection error reaching the log. + #[error("network error: {0}")] + NetworkError(String), + + /// Log returned HTTP 429; caller should wait and retry. + #[error("rate limited, retry after {retry_after_secs}s")] + RateLimited { + /// Seconds to wait before retrying. + retry_after_secs: u64, + }, + + /// Log returned an unparseable or unexpected response. + #[error("invalid response: {0}")] + InvalidResponse(String), + + /// Requested entry not found in the log. + #[error("entry not found")] + EntryNotFound, + + /// Consistency or inclusion proof verification failed. + #[error("consistency violation: {0}")] + ConsistencyViolation(String), + + /// Log is temporarily or permanently unavailable. + #[error("log unavailable: {0}")] + Unavailable(String), +} + +impl auths_crypto::AuthsErrorInfo for LogError { + fn error_code(&self) -> &'static str { + match self { + Self::SubmissionRejected { .. } => "AUTHS-E9001", + Self::NetworkError(_) => "AUTHS-E9002", + Self::RateLimited { .. } => "AUTHS-E9003", + Self::InvalidResponse(_) => "AUTHS-E9004", + Self::EntryNotFound => "AUTHS-E9005", + Self::ConsistencyViolation(_) => "AUTHS-E9006", + Self::Unavailable(_) => "AUTHS-E9007", + } + } + + fn suggestion(&self) -> Option<&'static str> { + match self { + Self::SubmissionRejected { .. } => { + Some("Check the attestation format and payload size") + } + Self::NetworkError(_) => Some("Check your internet connection and the log's API URL"), + Self::RateLimited { .. } => Some("Wait and retry; the log is rate-limiting requests"), + Self::InvalidResponse(_) => { + Some("The log returned an unexpected response; check the log version") + } + Self::EntryNotFound => Some("The entry may not be sequenced yet; retry after a moment"), + Self::ConsistencyViolation(_) => { + Some("The log returned data that does not match what was submitted") + } + Self::Unavailable(_) => { + Some("The transparency log is unavailable; retry later or use --allow-unlogged") + } + } + } +} + +/// Pluggable transparency log backend. +/// +/// Abstracts appending attestations to a transparency log and retrieving +/// Merkle proofs. Adapters translate backend-native formats (e.g., Rekor +/// hashedrekord) to canonical `auths-transparency` types at the boundary. +/// +/// Usage: +/// ```ignore +/// let log: Arc = factory.create_log(&config)?; +/// let submission = log.submit(&attestation_bytes, &pk, &sig).await?; +/// ``` +#[async_trait] +pub trait TransparencyLog: Send + Sync { + /// Submit a leaf to the log and receive an inclusion proof. + /// + /// The adapter wraps `leaf_data` in whatever envelope the backend + /// requires. `public_key` and `signature` are provided for backends + /// that verify entry signatures on submission. + /// + /// Args: + /// * `leaf_data` — Raw bytes to log (typically serialized attestation JSON). + /// * `public_key` — Signer's public key (Ed25519, DER-encoded). + /// * `signature` — Signature over `leaf_data`. + async fn submit( + &self, + leaf_data: &[u8], + public_key: &[u8], + signature: &[u8], + ) -> Result; + + /// Fetch the log's current signed checkpoint. + async fn get_checkpoint(&self) -> Result; + + /// Fetch an inclusion proof for a leaf at `leaf_index` in a tree of `tree_size`. + /// + /// Args: + /// * `leaf_index` — Zero-based index of the leaf. + /// * `tree_size` — Tree size to prove inclusion against. + async fn get_inclusion_proof( + &self, + leaf_index: u64, + tree_size: u64, + ) -> Result; + + /// Fetch a consistency proof between two tree sizes. + /// + /// Args: + /// * `old_size` — Earlier tree size. + /// * `new_size` — Later tree size. + async fn get_consistency_proof( + &self, + old_size: u64, + new_size: u64, + ) -> Result; + + /// Return static metadata about this log backend. + fn metadata(&self) -> LogMetadata; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn log_error_display() { + let err = LogError::SubmissionRejected { + reason: "payload too large".into(), + }; + assert_eq!(err.to_string(), "submission rejected: payload too large"); + + let err = LogError::RateLimited { + retry_after_secs: 30, + }; + assert_eq!(err.to_string(), "rate limited, retry after 30s"); + + let err = LogError::NetworkError("connection refused".into()); + assert_eq!(err.to_string(), "network error: connection refused"); + + let err = LogError::Unavailable("service unavailable".into()); + assert_eq!(err.to_string(), "log unavailable: service unavailable"); + } + + #[test] + fn log_error_codes() { + use auths_crypto::AuthsErrorInfo; + + assert_eq!( + LogError::SubmissionRejected { + reason: String::new() + } + .error_code(), + "AUTHS-E9001" + ); + assert_eq!( + LogError::NetworkError(String::new()).error_code(), + "AUTHS-E9002" + ); + assert_eq!( + LogError::RateLimited { + retry_after_secs: 0 + } + .error_code(), + "AUTHS-E9003" + ); + assert_eq!( + LogError::InvalidResponse(String::new()).error_code(), + "AUTHS-E9004" + ); + assert_eq!(LogError::EntryNotFound.error_code(), "AUTHS-E9005"); + assert_eq!( + LogError::ConsistencyViolation(String::new()).error_code(), + "AUTHS-E9006" + ); + assert_eq!( + LogError::Unavailable(String::new()).error_code(), + "AUTHS-E9007" + ); + } + + #[test] + fn log_error_suggestions_not_none() { + use auths_crypto::AuthsErrorInfo; + + let variants: Vec = vec![ + LogError::SubmissionRejected { + reason: String::new(), + }, + LogError::NetworkError(String::new()), + LogError::RateLimited { + retry_after_secs: 0, + }, + LogError::InvalidResponse(String::new()), + LogError::EntryNotFound, + LogError::ConsistencyViolation(String::new()), + LogError::Unavailable(String::new()), + ]; + for v in &variants { + assert!( + v.suggestion().is_some(), + "missing suggestion for {}", + v.error_code() + ); + } + } + + // Compile-time check: trait must be object-safe for Arc + fn _assert_object_safe(_: std::sync::Arc) {} +} diff --git a/crates/auths-id/src/agent_identity.rs b/crates/auths-id/src/agent_identity.rs index c37e0529..c971a9d6 100644 --- a/crates/auths-id/src/agent_identity.rs +++ b/crates/auths-id/src/agent_identity.rs @@ -308,9 +308,9 @@ fn sign_agent_attestation( None, config.delegated_by.clone(), None, // commit_sha + Some(SignerType::Agent), )?; - att.signer_type = Some(SignerType::Agent); resign_attestation( &mut att, &signer, diff --git a/crates/auths-id/src/attestation/create.rs b/crates/auths-id/src/attestation/create.rs index 9e1d2ff3..d868a875 100644 --- a/crates/auths-id/src/attestation/create.rs +++ b/crates/auths-id/src/attestation/create.rs @@ -4,7 +4,7 @@ use auths_core::signing::{PassphraseProvider, SecureSigner}; use auths_core::storage::keychain::{IdentityDID, KeyAlias}; use auths_verifier::Capability; use auths_verifier::core::{ - Attestation, Ed25519PublicKey, Ed25519Signature, ResourceId, Role, + Attestation, Ed25519PublicKey, Ed25519Signature, ResourceId, Role, SignerType, canonicalize_attestation_data, }; use auths_verifier::error::AttestationError; @@ -72,6 +72,7 @@ pub fn create_signed_attestation( role: Option, delegated_by: Option, commit_sha: Option, + signer_type: Option, ) -> Result { if device_public_key.len() != ED25519_PUBLIC_KEY_LEN { return Err(AttestationError::InvalidInput(format!( @@ -117,7 +118,7 @@ pub fn create_signed_attestation( role, capabilities, delegated_by: delegated_canonical, - signer_type: None, + signer_type, environment_claim: None, commit_sha, commit_message: None, diff --git a/crates/auths-id/tests/cases/lifecycle.rs b/crates/auths-id/tests/cases/lifecycle.rs index 048b2959..21cc104e 100644 --- a/crates/auths-id/tests/cases/lifecycle.rs +++ b/crates/auths-id/tests/cases/lifecycle.rs @@ -122,6 +122,7 @@ fn create_test_attestation( None, None, None, // commit_sha + None, // signer_type ) .expect("Failed to create signed attestation") } diff --git a/crates/auths-infra-rekor/Cargo.toml b/crates/auths-infra-rekor/Cargo.toml new file mode 100644 index 00000000..79c8b7f1 --- /dev/null +++ b/crates/auths-infra-rekor/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "auths-infra-rekor" +version.workspace = true +edition = "2024" +authors = ["bordumb "] +description = "Rekor transparency log adapter for Auths" +publish = false +license.workspace = true +repository.workspace = true + +[dependencies] +auths-core = { workspace = true } +auths-transparency = { workspace = true } +auths-verifier = { workspace = true, features = ["native"] } +auths-crypto = { workspace = true } +async-trait = "0.1" +base64.workspace = true +hex = "0.4" +reqwest = { version = "0.13.2", features = ["json"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +sha2 = "0.10" +thiserror.workspace = true +tracing = "0.1" +chrono = { version = "0.4", features = ["serde"] } + +[dev-dependencies] +auths-sdk = { workspace = true } +http = "1" +tokio = { version = "1", features = ["full"] } +ring.workspace = true diff --git a/crates/auths-infra-rekor/src/client.rs b/crates/auths-infra-rekor/src/client.rs new file mode 100644 index 00000000..dee5d474 --- /dev/null +++ b/crates/auths-infra-rekor/src/client.rs @@ -0,0 +1,520 @@ +//! Rekor v1 HTTP client implementing the `TransparencyLog` port trait. + +use std::time::Duration; + +use async_trait::async_trait; +use base64::Engine; +use base64::engine::general_purpose::STANDARD as BASE64; +use reqwest::Client; +use sha2::{Digest, Sha256}; +use tracing::{debug, error, warn}; + +use auths_core::ports::transparency_log::{LogError, LogMetadata, LogSubmission, TransparencyLog}; +use auths_transparency::checkpoint::SignedCheckpoint; +use auths_transparency::proof::{ConsistencyProof, InclusionProof}; +use auths_transparency::types::{LogOrigin, MerkleHash}; +use auths_verifier::Ed25519PublicKey; + +use crate::error::map_rekor_status; +use crate::types::*; + +/// Maximum attestation payload size (100KB). +/// Typical attestation is 2-5KB; this is ~20-50x headroom. +/// Rekor's server-side limit may differ; catching locally gives a better error. +const MAX_PAYLOAD_SIZE: usize = 100 * 1024; + +/// Rekor v1 API client implementing the `TransparencyLog` trait. +/// +/// Targets a single Rekor instance. For public Rekor, use +/// `RekorClient::public()`. +/// +/// Usage: +/// ```ignore +/// let client = RekorClient::public(); +/// let submission = client.submit(&data, &pk, &sig).await?; +/// ``` +pub struct RekorClient { + http: Client, + api_url: String, + log_id: String, + log_origin: String, +} + +impl RekorClient { + /// Create a client for the public Sigstore Rekor instance. + pub fn public() -> Result { + Self::new( + "https://rekor.sigstore.dev", + "sigstore-rekor", + // Origin pinned from GET https://rekor.sigstore.dev/api/v1/log on 2026-04-09 + "rekor.sigstore.dev - 1193050959916656506", + ) + } + + /// Create a client for a custom Rekor instance. + /// + /// Args: + /// * `api_url` — Base URL (e.g., `"https://rekor.example.com"`). + /// * `log_id` — Stable identifier for trust config lookup. + /// * `log_origin` — C2SP checkpoint origin string. + pub fn new(api_url: &str, log_id: &str, log_origin: &str) -> Result { + let http = Client::builder() + // Fail fast on unreachable hosts + .connect_timeout(Duration::from_secs(5)) + // Generous request timeout: Rekor blocks until checkpoint publication + .timeout(Duration::from_secs(20)) + .build() + .map_err(|e| LogError::NetworkError(format!("failed to build HTTP client: {e}")))?; + + Ok(Self { + http, + api_url: api_url.trim_end_matches('/').to_string(), + log_id: log_id.to_string(), + log_origin: log_origin.to_string(), + }) + } + + /// Build a hashedrekord v0.0.1 request from attestation data. + fn build_hashedrekord( + &self, + leaf_data: &[u8], + public_key: &[u8], + signature: &[u8], + ) -> HashedRekordRequest { + let data_hash = hex::encode(Sha256::digest(leaf_data)); + + HashedRekordRequest { + api_version: "0.0.1".to_string(), + kind: "hashedrekord".to_string(), + spec: HashedRekordSpec { + signature: HashedRekordSignature { + content: BASE64.encode(signature), + public_key: HashedRekordPublicKey { + content: BASE64.encode(public_key), + }, + }, + data: HashedRekordData { + hash: HashedRekordHash { + algorithm: "sha256".to_string(), + value: data_hash, + }, + }, + }, + } + } + + /// Parse a Rekor v1 inclusion proof into canonical types. + fn parse_inclusion_proof( + &self, + proof: &RekorInclusionProof, + ) -> Result { + let hashes: Result, _> = proof + .hashes + .iter() + .map(|h| { + let bytes = hex::decode(h).map_err(|e| { + LogError::InvalidResponse(format!("invalid hex in proof hash: {e}")) + })?; + let arr: [u8; 32] = bytes.try_into().map_err(|v: Vec| { + LogError::InvalidResponse(format!( + "proof hash wrong length: expected 32, got {}", + v.len() + )) + })?; + Ok(MerkleHash::from_bytes(arr)) + }) + .collect(); + + let root_bytes = hex::decode(&proof.root_hash) + .map_err(|e| LogError::InvalidResponse(format!("invalid hex in root hash: {e}")))?; + let root_arr: [u8; 32] = root_bytes.try_into().map_err(|v: Vec| { + LogError::InvalidResponse(format!( + "root hash wrong length: expected 32, got {}", + v.len() + )) + })?; + + Ok(InclusionProof { + index: proof.log_index, + size: proof.tree_size, + root: MerkleHash::from_bytes(root_arr), + hashes: hashes?, + }) + } + + /// Parse the C2SP checkpoint string from a Rekor response. + fn parse_checkpoint_string(&self, checkpoint_str: &str) -> Result { + // The checkpoint is a C2SP signed note. Parse it using + // auths-transparency's note parser. + let (note_body, _signatures) = auths_transparency::parse_signed_note(checkpoint_str) + .map_err(|e| { + LogError::InvalidResponse(format!("failed to parse checkpoint note: {e}")) + })?; + + // Parse the checkpoint body: origin\nsize\nbase64(root)\n + #[allow(clippy::disallowed_methods)] // Infra boundary: Rekor timestamp is operational + let now = chrono::Utc::now(); + let checkpoint = + auths_transparency::Checkpoint::from_note_body(¬e_body, now).map_err(|e| { + LogError::InvalidResponse(format!("failed to parse checkpoint body: {e}")) + })?; + + // For the ECDSA production shard, we extract the signature from the + // signed note and store it in the ecdsa fields. The log_signature + // and log_public_key fields use Ed25519 placeholders. + // + // The actual checkpoint signature verification dispatches on + // TrustRoot.signature_algorithm at verify time. + let ecdsa_sig = None; + let ecdsa_pk = None; + + // If we have note signatures, try to extract the first one + if let Some(sig) = _signatures.first() { + // The raw signature bytes from the note (algorithm byte + key_id + signature) + // For ECDSA, we'd need the DER signature. For now, store what we have. + // Full ECDSA extraction will be completed when Rekor adapter is tested + // against production. + let _ = sig; // Will be used when ECDSA parsing is wired + } + + Ok(SignedCheckpoint { + checkpoint, + log_signature: auths_verifier::Ed25519Signature::default(), + log_public_key: Ed25519PublicKey::from_bytes([0u8; 32]), + witnesses: vec![], + ecdsa_checkpoint_signature: ecdsa_sig, + ecdsa_checkpoint_key: ecdsa_pk, + }) + } + + /// Handle HTTP 409 Conflict (duplicate entry): fetch existing entry. + async fn handle_conflict( + &self, + response: reqwest::Response, + ) -> Result { + // Extract the Location header or parse the response body for the entry UUID + let location = response + .headers() + .get("location") + .and_then(|v| v.to_str().ok()) + .map(String::from); + + let body = response + .text() + .await + .map_err(|e| LogError::InvalidResponse(format!("failed to read 409 body: {e}")))?; + + debug!(body_len = body.len(), "Rekor 409 Conflict — entry exists"); + + // Try to parse the body as a log entry response (Rekor returns the existing entry) + let entries: RekorLogEntryResponse = serde_json::from_str(&body).map_err(|e| { + // If body isn't a valid entry, try fetching via Location header + LogError::InvalidResponse(format!( + "409 response not parseable as entry (location: {:?}): {e}", + location + )) + })?; + + self.parse_entry_response(&entries) + } + + /// Parse a Rekor log entry response into a `LogSubmission`. + fn parse_entry_response( + &self, + entries: &RekorLogEntryResponse, + ) -> Result { + let (_uuid, entry) = entries + .iter() + .next() + .ok_or_else(|| LogError::InvalidResponse("empty entry response".into()))?; + + let inclusion_proof = self.parse_inclusion_proof(&entry.verification.inclusion_proof)?; + + // Use the checkpoint bound to the inclusion proof, not a separately fetched one. + let signed_checkpoint = + self.parse_checkpoint_string(&entry.verification.inclusion_proof.checkpoint)?; + + Ok(LogSubmission { + leaf_index: entry.log_index, + inclusion_proof, + signed_checkpoint, + }) + } + + /// Parse the Retry-After header value, defaulting to 10 seconds. + fn parse_retry_after(response: &reqwest::Response) -> u64 { + response + .headers() + .get("retry-after") + .and_then(|v| v.to_str().ok()) + .and_then(|v| v.parse::().ok()) + .unwrap_or(10) + } +} + +#[async_trait] +impl TransparencyLog for RekorClient { + async fn submit( + &self, + leaf_data: &[u8], + public_key: &[u8], + signature: &[u8], + ) -> Result { + // Pre-send payload size check: reject locally before HTTP + if leaf_data.len() > MAX_PAYLOAD_SIZE { + return Err(LogError::SubmissionRejected { + reason: format!( + "attestation exceeds max size of {}KB ({} bytes)", + MAX_PAYLOAD_SIZE / 1024, + leaf_data.len() + ), + }); + } + + let entry = self.build_hashedrekord(leaf_data, public_key, signature); + let url = format!("{}/api/v1/log/entries", self.api_url); + + debug!(url = %url, payload_size = leaf_data.len(), "Submitting to Rekor"); + + let response = self + .http + .post(&url) + .json(&entry) + .send() + .await + .map_err(|e| { + error!(error = %e, "Rekor submission failed"); + if e.is_timeout() { + LogError::NetworkError(format!("request timed out: {e}")) + } else if e.is_connect() { + LogError::NetworkError(format!("connection failed: {e}")) + } else { + LogError::NetworkError(e.to_string()) + } + })?; + + let status = response.status(); + debug!(status = %status, "Rekor response received"); + + // Handle 409 Conflict: entry already exists (idempotent success) + if status.as_u16() == 409 { + return self.handle_conflict(response).await; + } + + // Handle 429 with Retry-After + if status.as_u16() == 429 { + let retry_after = Self::parse_retry_after(&response); + warn!(retry_after_secs = retry_after, "Rekor rate limited"); + return Err(LogError::RateLimited { + retry_after_secs: retry_after, + }); + } + + let body = response + .text() + .await + .map_err(|e| LogError::InvalidResponse(format!("failed to read response body: {e}")))?; + + map_rekor_status(status, &body)?; + + let entries: RekorLogEntryResponse = serde_json::from_str(&body).map_err(|e| { + LogError::InvalidResponse(format!("failed to parse entry response: {e}")) + })?; + + let submission = self.parse_entry_response(&entries)?; + + debug!( + leaf_index = submission.leaf_index, + tree_size = submission.signed_checkpoint.checkpoint.size, + "Entry submitted successfully" + ); + + Ok(submission) + } + + async fn get_checkpoint(&self) -> Result { + let url = format!("{}/api/v1/log", self.api_url); + debug!(url = %url, "Fetching Rekor log info"); + + let response = self + .http + .get(&url) + .send() + .await + .map_err(|e| LogError::NetworkError(format!("failed to fetch log info: {e}")))?; + + let body = response + .text() + .await + .map_err(|e| LogError::InvalidResponse(format!("failed to read log info body: {e}")))?; + + let info: RekorLogInfo = serde_json::from_str(&body) + .map_err(|e| LogError::InvalidResponse(format!("failed to parse log info: {e}")))?; + + self.parse_checkpoint_string(&info.signed_tree_head) + } + + async fn get_inclusion_proof( + &self, + leaf_index: u64, + _tree_size: u64, + ) -> Result { + // Rekor v1 doesn't have a standalone inclusion proof endpoint. + // Fetch the entry by index and extract its proof. + let url = format!( + "{}/api/v1/log/entries?logIndex={}", + self.api_url, leaf_index + ); + debug!(url = %url, "Fetching entry for inclusion proof"); + + let response = self + .http + .get(&url) + .send() + .await + .map_err(|e| LogError::NetworkError(format!("failed to fetch entry: {e}")))?; + + if response.status().as_u16() == 404 { + return Err(LogError::EntryNotFound); + } + + let body = response + .text() + .await + .map_err(|e| LogError::InvalidResponse(format!("failed to read entry body: {e}")))?; + + let entries: RekorLogEntryResponse = serde_json::from_str(&body) + .map_err(|e| LogError::InvalidResponse(format!("failed to parse entry: {e}")))?; + + let (_uuid, entry) = entries.iter().next().ok_or(LogError::EntryNotFound)?; + + self.parse_inclusion_proof(&entry.verification.inclusion_proof) + } + + async fn get_consistency_proof( + &self, + old_size: u64, + new_size: u64, + ) -> Result { + let url = format!( + "{}/api/v1/log/proof?firstSize={}&lastSize={}", + self.api_url, old_size, new_size + ); + debug!(url = %url, "Fetching consistency proof"); + + let response = self.http.get(&url).send().await.map_err(|e| { + LogError::NetworkError(format!("failed to fetch consistency proof: {e}")) + })?; + + let body = response + .text() + .await + .map_err(|e| LogError::InvalidResponse(format!("failed to read proof body: {e}")))?; + + let proof: crate::types::RekorConsistencyProof = + serde_json::from_str(&body).map_err(|e| { + LogError::InvalidResponse(format!("failed to parse consistency proof: {e}")) + })?; + + let hashes: Result, _> = proof + .hashes + .iter() + .map(|h| { + let bytes = hex::decode(h).map_err(|e| { + LogError::InvalidResponse(format!("invalid hex in consistency hash: {e}")) + })?; + let arr: [u8; 32] = bytes.try_into().map_err(|v: Vec| { + LogError::InvalidResponse(format!( + "consistency hash wrong length: expected 32, got {}", + v.len() + )) + })?; + Ok(MerkleHash::from_bytes(arr)) + }) + .collect(); + + let root_bytes = hex::decode(&proof.root_hash) + .map_err(|e| LogError::InvalidResponse(format!("invalid hex in root hash: {e}")))?; + let root_arr: [u8; 32] = root_bytes.try_into().map_err(|v: Vec| { + LogError::InvalidResponse(format!( + "root hash wrong length: expected 32, got {}", + v.len() + )) + })?; + + Ok(ConsistencyProof { + old_size, + new_size, + old_root: MerkleHash::from_bytes([0u8; 32]), // Not returned by Rekor v1 + new_root: MerkleHash::from_bytes(root_arr), + hashes: hashes?, + }) + } + + fn metadata(&self) -> LogMetadata { + LogMetadata { + log_id: self.log_id.clone(), + log_origin: LogOrigin::new_unchecked(&self.log_origin), + log_public_key: Ed25519PublicKey::from_bytes([0u8; 32]), // ECDSA key, not Ed25519 + api_url: Some(self.api_url.clone()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn payload_size_limit() { + let big = vec![0u8; MAX_PAYLOAD_SIZE + 1]; + let client = RekorClient::public().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); + let result = rt.block_on(client.submit(&big, b"pk", b"sig")); + match result { + Err(LogError::SubmissionRejected { reason }) => { + assert!(reason.contains("exceeds max size")); + } + other => panic!("expected SubmissionRejected, got: {:?}", other), + } + } + + #[test] + fn hashedrekord_format() { + let client = RekorClient::public().unwrap(); + let entry = client.build_hashedrekord(b"test data", b"public_key", b"signature"); + + assert_eq!(entry.kind, "hashedrekord"); + assert_eq!(entry.api_version, "0.0.1"); + assert_eq!(entry.spec.data.hash.algorithm, "sha256"); + + // Verify the hash is correct + let expected_hash = hex::encode(Sha256::digest(b"test data")); + assert_eq!(entry.spec.data.hash.value, expected_hash); + } + + #[test] + fn inclusion_proof_parsing() { + let client = RekorClient::public().unwrap(); + let rekor_proof = RekorInclusionProof { + log_index: 42, + root_hash: "a".repeat(64), // 32 bytes hex + tree_size: 100, + hashes: vec!["b".repeat(64)], + checkpoint: String::new(), + }; + + let proof = client.parse_inclusion_proof(&rekor_proof).unwrap(); + assert_eq!(proof.index, 42); + assert_eq!(proof.size, 100); + assert_eq!(proof.hashes.len(), 1); + } + + #[test] + fn retry_after_parsing() { + // Default when no header + let response = + reqwest::Response::from(http::Response::builder().status(429).body("").unwrap()); + assert_eq!(RekorClient::parse_retry_after(&response), 10); + } +} diff --git a/crates/auths-infra-rekor/src/error.rs b/crates/auths-infra-rekor/src/error.rs new file mode 100644 index 00000000..9af8489e --- /dev/null +++ b/crates/auths-infra-rekor/src/error.rs @@ -0,0 +1,42 @@ +//! Error mapping from Rekor HTTP responses to `LogError`. + +use auths_core::ports::transparency_log::LogError; +use reqwest::StatusCode; + +/// Map a Rekor HTTP response status to a `LogError`. +/// +/// Returns `Ok(())` for success statuses (200, 201). +/// Returns the parsed `LogError` for error statuses. +pub fn map_rekor_status(status: StatusCode, body: &str) -> Result<(), LogError> { + match status.as_u16() { + 200 | 201 => Ok(()), + 400 => Err(LogError::SubmissionRejected { + reason: format!("bad request: {}", truncate(body, 200)), + }), + // 409 is handled separately in the client (idempotent success) + 413 => Err(LogError::SubmissionRejected { + reason: "payload too large".into(), + }), + 422 => Err(LogError::SubmissionRejected { + reason: format!("unprocessable entity: {}", truncate(body, 200)), + }), + 429 => { + // Parse Retry-After header would happen at the call site; + // here we provide a default + Err(LogError::RateLimited { + retry_after_secs: 10, + }) + } + 500 => Err(LogError::Unavailable("server error".into())), + 503 => Err(LogError::Unavailable("service unavailable".into())), + _ => Err(LogError::InvalidResponse(format!( + "unexpected status {}: {}", + status.as_u16(), + truncate(body, 200) + ))), + } +} + +fn truncate(s: &str, max: usize) -> &str { + if s.len() <= max { s } else { &s[..max] } +} diff --git a/crates/auths-infra-rekor/src/lib.rs b/crates/auths-infra-rekor/src/lib.rs new file mode 100644 index 00000000..7d3636b2 --- /dev/null +++ b/crates/auths-infra-rekor/src/lib.rs @@ -0,0 +1,20 @@ +//! Rekor transparency log adapter for Auths. +//! +//! Implements the [`TransparencyLog`] port trait against Sigstore's Rekor v1 API. +//! Targets the production instance at `rekor.sigstore.dev`. +//! +//! ## Entry type +//! +//! Uses `hashedrekord` v0.0.1 with Ed25519 public keys. If Rekor rejects +//! pure Ed25519, the fallback is `dsse` (see design doc Section 5). +//! +//! ## Timeouts +//! +//! - Connect timeout: 5s (fail fast on unreachable) +//! - Request timeout: 20s (Rekor blocks until checkpoint publication) + +mod client; +mod error; +mod types; + +pub use client::RekorClient; diff --git a/crates/auths-infra-rekor/src/types.rs b/crates/auths-infra-rekor/src/types.rs new file mode 100644 index 00000000..2af05720 --- /dev/null +++ b/crates/auths-infra-rekor/src/types.rs @@ -0,0 +1,133 @@ +//! Rekor v1 API request/response types. +//! +//! These are the wire-format types for Rekor's REST API. They are +//! translated to canonical `auths-transparency` types at the adapter boundary. + +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// Rekor v1 hashedrekord entry for submission. +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct HashedRekordRequest { + /// API version string. + pub api_version: String, + /// Entry kind. + pub kind: String, + /// Entry specification. + pub spec: HashedRekordSpec, +} + +/// hashedrekord v0.0.1 spec. +#[derive(Debug, Serialize)] +pub struct HashedRekordSpec { + /// Signature information. + pub signature: HashedRekordSignature, + /// Data hash information. + pub data: HashedRekordData, +} + +/// Signature block in a hashedrekord entry. +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct HashedRekordSignature { + /// Base64-encoded signature bytes. + pub content: String, + /// Public key information. + pub public_key: HashedRekordPublicKey, +} + +/// Public key in a hashedrekord signature block. +#[derive(Debug, Serialize)] +pub struct HashedRekordPublicKey { + /// Base64-encoded DER public key. + pub content: String, +} + +/// Data hash in a hashedrekord entry. +#[derive(Debug, Serialize)] +pub struct HashedRekordData { + /// Hash information. + pub hash: HashedRekordHash, +} + +/// Hash specification. +#[derive(Debug, Serialize)] +pub struct HashedRekordHash { + /// Hash algorithm (e.g., "sha256"). + pub algorithm: String, + /// Hex-encoded hash value. + pub value: String, +} + +/// Rekor v1 log entry response (keyed by UUID). +pub type RekorLogEntryResponse = HashMap; + +/// A single Rekor log entry. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(dead_code)] // API response fields — deserialized but not all read yet +pub struct RekorLogEntry { + /// Hex-encoded log ID. + pub log_i_d: String, + /// Monotonically increasing log index. + pub log_index: u64, + /// Base64-encoded canonicalized entry body. + pub body: String, + /// Unix timestamp of integration. + pub integrated_time: i64, + /// Verification data. + pub verification: RekorVerification, +} + +/// Verification block in a Rekor log entry. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(dead_code)] +pub struct RekorVerification { + /// Inclusion proof with checkpoint. + pub inclusion_proof: RekorInclusionProof, + /// Base64-encoded Signed Entry Timestamp. + pub signed_entry_timestamp: String, +} + +/// Rekor's inclusion proof structure. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RekorInclusionProof { + /// Leaf index in the log. + pub log_index: u64, + /// Hex-encoded root hash. + pub root_hash: String, + /// Tree size at proof time. + pub tree_size: u64, + /// Hex-encoded sibling hashes from leaf to root. + pub hashes: Vec, + /// C2SP signed note checkpoint string. + pub checkpoint: String, +} + +/// Rekor v1 log info response. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(dead_code)] +pub struct RekorLogInfo { + /// Hex-encoded root hash. + pub root_hash: String, + /// Current tree size. + pub tree_size: u64, + /// C2SP signed note checkpoint. + pub signed_tree_head: String, + /// Numeric tree ID string. + pub tree_i_d: String, +} + +/// Rekor v1 consistency proof response. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RekorConsistencyProof { + /// Hex-encoded root hash of the new tree. + pub root_hash: String, + /// Hex-encoded consistency proof hashes. + pub hashes: Vec, +} diff --git a/crates/auths-infra-rekor/tests/cases/mod.rs b/crates/auths-infra-rekor/tests/cases/mod.rs new file mode 100644 index 00000000..f175dec5 --- /dev/null +++ b/crates/auths-infra-rekor/tests/cases/mod.rs @@ -0,0 +1 @@ +mod rekor_integration; diff --git a/crates/auths-infra-rekor/tests/cases/rekor_integration.rs b/crates/auths-infra-rekor/tests/cases/rekor_integration.rs new file mode 100644 index 00000000..ed0308d4 --- /dev/null +++ b/crates/auths-infra-rekor/tests/cases/rekor_integration.rs @@ -0,0 +1,252 @@ +//! Integration tests for the Rekor adapter. +//! +//! Tests that require real Rekor are gated on `AUTHS_TEST_REKOR=1`. +//! Tests using the FakeTransparencyLog run always. + +use auths_core::ports::transparency_log::{LogError, TransparencyLog}; +use auths_infra_rekor::RekorClient; +use auths_transparency::TrustConfig; +use auths_transparency::merkle::hash_leaf; +use ring::signature::KeyPair; + +#[allow(clippy::disallowed_methods)] // Test boundary: reading test gate env var +fn rekor_enabled() -> bool { + std::env::var("AUTHS_TEST_REKOR").is_ok() +} + +// ============================================================ +// Real Rekor tests (gated on AUTHS_TEST_REKOR=1) +// ============================================================ + +#[tokio::test] +async fn rekor_happy_path_submit_and_verify() { + if !rekor_enabled() { + eprintln!("Skipping: set AUTHS_TEST_REKOR=1 to run Rekor integration tests"); + return; + } + + let client = RekorClient::public().unwrap(); + + // Generate a throwaway Ed25519 key + let keypair = ring::signature::Ed25519KeyPair::from_seed_unchecked(&[99u8; 32]).unwrap(); + let public_key = keypair.public_key().as_ref(); + + // Create test attestation + let attestation = b"test-attestation-for-rekor-integration"; + let sig = keypair.sign(attestation); + + // Submit to Rekor + let submission = client.submit(attestation, public_key, sig.as_ref()).await; + + match submission { + Ok(sub) => { + // Verify inclusion proof + let leaf_hash = hash_leaf(attestation); + assert!( + sub.inclusion_proof.verify(&leaf_hash).is_ok(), + "inclusion proof should verify" + ); + eprintln!( + "Rekor submission succeeded: index={}, tree_size={}", + sub.leaf_index, sub.signed_checkpoint.checkpoint.size + ); + } + Err(LogError::RateLimited { .. }) => { + eprintln!("Rate limited by Rekor — test skipped"); + } + Err(e) => { + // May fail if hashedrekord+Ed25519 is rejected — this is expected + // and documented in the design doc as a potential fallback trigger. + eprintln!("Rekor submission failed (may need DSSE fallback): {e}"); + } + } +} + +#[tokio::test] +async fn rekor_get_checkpoint() { + if !rekor_enabled() { + return; + } + + let client = RekorClient::public().unwrap(); + let checkpoint = client.get_checkpoint().await; + assert!(checkpoint.is_ok(), "should fetch Rekor checkpoint"); + let cp = checkpoint.unwrap(); + assert!(cp.checkpoint.size > 0, "tree should have entries"); +} + +// ============================================================ +// Tests that don't require real Rekor +// ============================================================ + +#[tokio::test] +async fn unreachable_endpoint_returns_network_error() { + let client = RekorClient::new("https://localhost:1", "test", "test.dev/log").unwrap(); + let result = client.submit(b"test", b"pk", b"sig").await; + assert!(matches!(result, Err(LogError::NetworkError(_)))); +} + +#[tokio::test] +async fn payload_size_rejection_is_local() { + let client = RekorClient::public().unwrap(); + let big = vec![0u8; 101 * 1024]; // > 100KB + let result = client.submit(&big, b"pk", b"sig").await; + match result { + Err(LogError::SubmissionRejected { reason }) => { + assert!(reason.contains("exceeds max size")); + } + other => panic!("expected SubmissionRejected, got: {:?}", other), + } +} + +#[tokio::test] +async fn unknown_log_id_in_trust_config() { + let config = TrustConfig::default_config(); + let result = config.get_log("nonexistent-log"); + assert!(result.is_none(), "unknown log should return None"); +} + +// ============================================================ +// GHSA-whqx-f9j3-ch6m regression test +// ============================================================ + +/// Tests that submit_attestation_to_log verifies the inclusion proof +/// matches the submitted data. Uses FakeTransparencyLog since we +/// need to control the response. +#[tokio::test] +async fn ghsa_content_mismatch_detected() { + // The FakeTransparencyLog always returns valid proofs for the data + // that was actually submitted. To test the GHSA countermeasure, + // we verify that the SDK's submit_attestation_to_log function + // checks the proof against the submitted data. + // + // With a succeeding fake, the proof will match — so the test + // confirms the happy path works. The mismatch case is tested by + // verifying against wrong data after submission. + use auths_sdk::testing::fakes::FakeTransparencyLog; + use auths_sdk::workflows::log_submit::submit_attestation_to_log; + + let log = FakeTransparencyLog::succeeding(); + let result = submit_attestation_to_log(b"original attestation", b"pk", b"sig", &log).await; + assert!(result.is_ok()); + + // Now verify that a DIFFERENT attestation's hash does NOT match + // the proof that was generated for "original attestation" + let bundle = result.unwrap(); + let wrong_leaf_hash = hash_leaf(b"different attestation"); + assert!( + bundle.inclusion_proof.verify(&wrong_leaf_hash).is_err(), + "proof for 'original' should NOT verify for 'different'" + ); +} + +// ============================================================ +// Checkpoint-proof binding regression test +// ============================================================ + +#[tokio::test] +async fn checkpoint_proof_root_mismatch_detected() { + use auths_sdk::testing::fakes::FakeTransparencyLog; + use auths_sdk::workflows::log_submit::submit_attestation_to_log; + + let log = FakeTransparencyLog::succeeding(); + let result = submit_attestation_to_log(b"test", b"pk", b"sig", &log).await; + assert!(result.is_ok()); + + let bundle = result.unwrap(); + + // The proof root should match the checkpoint root + assert_eq!( + bundle.inclusion_proof.root, bundle.signed_checkpoint.checkpoint.root, + "proof root must match checkpoint root" + ); +} + +// ============================================================ +// Offline verification test +// ============================================================ + +#[tokio::test] +async fn offline_verification_no_network() { + use auths_sdk::testing::fakes::FakeTransparencyLog; + use auths_sdk::workflows::log_submit::submit_attestation_to_log; + + // Step 1: produce a bundle using the fake + let log = FakeTransparencyLog::succeeding(); + let bundle = submit_attestation_to_log(b"offline test data", b"pk", b"sig", &log) + .await + .unwrap(); + + // Step 2: verify the inclusion proof offline (no network calls) + let leaf_hash = hash_leaf(b"offline test data"); + assert!( + bundle.inclusion_proof.verify(&leaf_hash).is_ok(), + "offline inclusion proof should verify" + ); + + // Step 3: verify the checkpoint signature against the fake's trust root + let trust_root = log.trust_root(); + let note_body = bundle.signed_checkpoint.checkpoint.to_note_body(); + let peer_key = ring::signature::UnparsedPublicKey::new( + &ring::signature::ED25519, + trust_root.log_public_key.as_bytes(), + ); + assert!( + peer_key + .verify( + note_body.as_bytes(), + bundle.signed_checkpoint.log_signature.as_bytes() + ) + .is_ok(), + "offline checkpoint signature should verify" + ); +} + +// ============================================================ +// Pluggability proof: same flow with Fake and Rekor +// ============================================================ + +#[tokio::test] +async fn pluggability_same_flow_different_backends() { + use auths_sdk::testing::fakes::FakeTransparencyLog; + use auths_sdk::workflows::log_submit::submit_attestation_to_log; + + let attestation = b"pluggability test"; + let pk = b"pk"; + let sig = b"sig"; + + // Run with FakeTransparencyLog + let fake = FakeTransparencyLog::succeeding(); + let fake_result = submit_attestation_to_log(attestation, pk, sig, &fake).await; + assert!(fake_result.is_ok(), "fake backend should succeed"); + + // Run with RekorClient (only if AUTHS_TEST_REKOR is set) + if rekor_enabled() { + let keypair = ring::signature::Ed25519KeyPair::from_seed_unchecked(&[77u8; 32]).unwrap(); + let real_pk = keypair.public_key().as_ref(); + let real_sig = keypair.sign(attestation); + + let rekor = RekorClient::public().unwrap(); + let rekor_result = + submit_attestation_to_log(attestation, real_pk, real_sig.as_ref(), &rekor).await; + + match rekor_result { + Ok(bundle) => { + // Both backends produced valid bundles + let leaf_hash = hash_leaf(attestation); + assert!(bundle.inclusion_proof.verify(&leaf_hash).is_ok()); + eprintln!("Pluggability proof: both Fake and Rekor succeeded"); + } + Err(auths_sdk::workflows::log_submit::LogSubmitError::LogError( + LogError::RateLimited { .. }, + )) => { + eprintln!("Rekor rate limited — pluggability partially verified (fake only)"); + } + Err(e) => { + eprintln!("Rekor failed: {e} — pluggability partially verified"); + } + } + } else { + eprintln!("Rekor not enabled — pluggability verified with fake only"); + } +} diff --git a/crates/auths-infra-rekor/tests/integration.rs b/crates/auths-infra-rekor/tests/integration.rs new file mode 100644 index 00000000..8277b9fa --- /dev/null +++ b/crates/auths-infra-rekor/tests/integration.rs @@ -0,0 +1 @@ +mod cases; diff --git a/crates/auths-sdk/src/domains/ci/bundle.rs b/crates/auths-sdk/src/domains/ci/bundle.rs deleted file mode 100644 index 95293ba2..00000000 --- a/crates/auths-sdk/src/domains/ci/bundle.rs +++ /dev/null @@ -1,123 +0,0 @@ -//! Identity repo bundler — packages `~/.auths` into a portable base64 tar.gz. - -use super::error::CiError; -use base64::Engine as _; -use flate2::Compression; -use flate2::write::GzEncoder; -use std::io::Write; -use std::path::Path; -use tar::Builder; -use walkdir::WalkDir; - -/// Build a base64-encoded tar.gz of the identity repo directory. -/// -/// Creates a flat archive (contents at root, no directory prefix) excluding -/// `*.sock` and `*.lock` files. Sets `mtime(0)` for reproducible archives. -/// -/// Args: -/// * `auths_dir`: Path to the `~/.auths` directory to bundle. -/// -/// Usage: -/// ```ignore -/// let b64 = build_identity_bundle(Path::new("/home/user/.auths"))?; -/// ``` -pub fn build_identity_bundle(auths_dir: &Path) -> Result { - let mut buf = Vec::new(); - { - let gz = GzEncoder::new(&mut buf, Compression::default()); - let mut archive = Builder::new(gz); - add_dir_to_tar(&mut archive, auths_dir, Path::new("."))?; - let gz = archive.into_inner().map_err(|e| CiError::BundleFailed { - reason: format!("tar finalize: {e}"), - })?; - gz.finish().map_err(|e| CiError::BundleFailed { - reason: format!("gzip finalize: {e}"), - })?; - } - Ok(base64::engine::general_purpose::STANDARD.encode(&buf)) -} - -/// Recursively add a directory to a tar archive, excluding `*.sock` and `*.lock` files. -fn add_dir_to_tar( - archive: &mut Builder, - src_dir: &Path, - prefix: &Path, -) -> Result<(), CiError> { - for entry in WalkDir::new(src_dir).follow_links(false) { - let entry = entry.map_err(|e| CiError::BundleFailed { - reason: format!("walk: {e}"), - })?; - let path = entry.path(); - - // Exclude socket and lock files - if let Some(ext) = path.extension() - && (ext == "sock" || ext == "lock") - { - continue; - } - - let rel = path - .strip_prefix(src_dir) - .map_err(|e| CiError::BundleFailed { - reason: format!("strip prefix: {e}"), - })?; - if rel.as_os_str().is_empty() { - continue; - } - let archive_path = prefix.join(rel); - - let metadata = entry.metadata().map_err(|e| CiError::BundleFailed { - reason: format!("metadata for {}: {e}", path.display()), - })?; - - if metadata.is_dir() { - let mut header = tar::Header::new_gnu(); - header.set_entry_type(tar::EntryType::Directory); - header.set_size(0); - header.set_mode(0o755); - header.set_mtime(0); - header.set_cksum(); - archive - .append_data(&mut header, &archive_path, &[] as &[u8]) - .map_err(|e| CiError::BundleFailed { - reason: format!("append dir {}: {e}", archive_path.display()), - })?; - } else if metadata.is_file() { - #[allow(clippy::disallowed_methods)] - // INVARIANT: bundle must read identity repo files from disk - let data = std::fs::read(path).map_err(|e| CiError::BundleFailed { - reason: format!("read {}: {e}", path.display()), - })?; - let mut header = tar::Header::new_gnu(); - header.set_entry_type(tar::EntryType::Regular); - header.set_size(data.len() as u64); - header.set_mode(0o644); - header.set_mtime(0); - header.set_cksum(); - archive - .append_data(&mut header, &archive_path, data.as_slice()) - .map_err(|e| CiError::BundleFailed { - reason: format!("append file {}: {e}", archive_path.display()), - })?; - } - // Skip symlinks, sockets, etc. - } - Ok(()) -} - -/// Generate a cryptographically secure passphrase for CI device keys. -/// -/// Returns a 64-character hex string (32 random bytes), which is shell-safe -/// across all platforms (no special characters that need escaping). -/// -/// Usage: -/// ```ignore -/// let passphrase = generate_ci_passphrase(); -/// assert_eq!(passphrase.len(), 64); -/// ``` -pub fn generate_ci_passphrase() -> String { - use rand::RngCore; - let mut bytes = [0u8; 32]; - rand::rng().fill_bytes(&mut bytes); - hex::encode(bytes) -} diff --git a/crates/auths-sdk/src/domains/ci/error.rs b/crates/auths-sdk/src/domains/ci/error.rs deleted file mode 100644 index 5453fd1d..00000000 --- a/crates/auths-sdk/src/domains/ci/error.rs +++ /dev/null @@ -1,117 +0,0 @@ -//! CI domain errors shared across CI workflows. - -use std::path::PathBuf; - -/// Errors from CI domain operations. -#[derive(Debug, thiserror::Error)] -#[non_exhaustive] -pub enum CiError { - /// No CI platform could be detected from environment variables. - #[error("CI environment not detected")] - EnvironmentNotDetected, - - /// The identity bundle at the given path is not a valid git repository. - #[error("identity bundle invalid at {path}: {reason}")] - IdentityBundleInvalid { - /// Path to the invalid identity bundle. - path: PathBuf, - /// What was wrong with it. - reason: String, - }, - - /// No artifacts were provided to sign. - #[error("no artifacts to sign")] - NoArtifacts, - - /// Failed to create the attestation collection directory. - #[error("failed to create attestation directory {path}: {reason}")] - CollectionDirFailed { - /// Path that could not be created. - path: PathBuf, - /// Underlying error. - reason: String, - }, - - /// Failed to copy an attestation file to the collection directory. - #[error("failed to collect attestation {src} → {dst}: {reason}")] - CollectionCopyFailed { - /// Source attestation file. - src: PathBuf, - /// Destination path. - dst: PathBuf, - /// Underlying error. - reason: String, - }, - - /// The CI token version is not supported by this build. - #[error("unsupported CI token version {version} (expected 1)")] - TokenVersionUnsupported { - /// The version found in the token. - version: u32, - }, - - /// Failed to deserialize a CI token from JSON. - #[error("CI token deserialization failed: {reason}")] - TokenDeserializationFailed { - /// The underlying parse error message. - reason: String, - }, - - /// Failed to serialize a CI token to JSON. - #[error("CI token serialization failed: {reason}")] - TokenSerializationFailed { - /// The underlying serialization error message. - reason: String, - }, - - /// Failed to build the identity bundle tar.gz. - #[error("identity bundle failed: {reason}")] - BundleFailed { - /// What went wrong during bundling. - reason: String, - }, -} - -impl auths_core::error::AuthsErrorInfo for CiError { - fn error_code(&self) -> &'static str { - match self { - Self::EnvironmentNotDetected => "AUTHS-E7001", - Self::IdentityBundleInvalid { .. } => "AUTHS-E7002", - Self::NoArtifacts => "AUTHS-E7003", - Self::CollectionDirFailed { .. } => "AUTHS-E7004", - Self::CollectionCopyFailed { .. } => "AUTHS-E7005", - Self::TokenVersionUnsupported { .. } => "AUTHS-E7006", - Self::TokenDeserializationFailed { .. } => "AUTHS-E7007", - Self::TokenSerializationFailed { .. } => "AUTHS-E7008", - Self::BundleFailed { .. } => "AUTHS-E7009", - } - } - - fn suggestion(&self) -> Option<&'static str> { - match self { - Self::EnvironmentNotDetected => { - Some("Set CI-specific environment variables or pass --ci-environment explicitly") - } - Self::IdentityBundleInvalid { .. } => { - Some("Re-run `auths ci setup` to regenerate the identity bundle secret") - } - Self::NoArtifacts => Some("Check your glob pattern matches at least one file"), - Self::CollectionDirFailed { .. } => { - Some("Check directory permissions and that the path is writable") - } - Self::CollectionCopyFailed { .. } => { - Some("Check file permissions and available disk space") - } - Self::TokenVersionUnsupported { .. } => { - Some("Update auths to the latest version to support this token format") - } - Self::TokenDeserializationFailed { .. } => { - Some("Check that AUTHS_CI_TOKEN contains valid JSON from `auths ci setup`") - } - Self::TokenSerializationFailed { .. } => { - Some("This is an internal error — please report it") - } - Self::BundleFailed { .. } => Some("Check that ~/.auths exists and is readable"), - } - } -} diff --git a/crates/auths-sdk/src/domains/ci/forge.rs b/crates/auths-sdk/src/domains/ci/forge.rs deleted file mode 100644 index ae696387..00000000 --- a/crates/auths-sdk/src/domains/ci/forge.rs +++ /dev/null @@ -1,156 +0,0 @@ -//! Forge detection from git remote URLs. -//! -//! Parses git remote URLs (HTTPS, SSH, bare shorthand) into a [`Forge`] variant -//! identifying the hosting platform and repository path. - -/// A detected forge (hosting platform) and its repository identifier. -/// -/// Usage: -/// ```ignore -/// let forge = Forge::from_url("git@github.com:owner/repo.git"); -/// assert_eq!(forge.display_name(), "GitHub"); -/// assert_eq!(forge.repo_identifier(), "owner/repo"); -/// ``` -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum Forge { - /// GitHub (github.com or enterprise instances containing "github"). - GitHub { - /// Repository in `owner/repo` format. - owner_repo: String, - }, - /// GitLab (gitlab.com or instances containing "gitlab"). - GitLab { - /// Repository in `group/project` format (may include subgroups). - group_project: String, - }, - /// Bitbucket (bitbucket.org or instances containing "bitbucket"). - Bitbucket { - /// Repository in `workspace/repo` format. - workspace_repo: String, - }, - /// Radicle (hosts containing "radicle"). - Radicle { - /// Radicle repository identifier. - rid: String, - }, - /// Unrecognized hosting platform. - Unknown { - /// The original URL or identifier. - url: String, - }, -} - -impl Forge { - /// Parse any git remote URL or shorthand into a `Forge` variant. - /// - /// Handles HTTPS (`https://github.com/owner/repo.git`), SSH - /// (`git@github.com:owner/repo.git`), and bare shorthand (`owner/repo`). - /// Strips `.git` suffix automatically. - /// - /// Args: - /// * `url`: The git remote URL string. - /// - /// Usage: - /// ```ignore - /// let forge = Forge::from_url("https://github.com/auths-dev/auths.git"); - /// assert!(matches!(forge, Forge::GitHub { .. })); - /// ``` - pub fn from_url(url: &str) -> Self { - let url = url.trim().trim_end_matches(".git"); - - // SSH: git@host:path - if let Some(rest) = url.strip_prefix("git@") - && let Some((host, path)) = rest.split_once(':') - { - return Self::from_host_and_path(host, path); - } - - // SSH with explicit protocol: ssh://git@host/path or ssh://git@host:port/path - if let Some(rest) = url.strip_prefix("ssh://git@") - && let Some((host_port, path)) = rest.split_once('/') - { - let host = host_port.split(':').next().unwrap_or(host_port); - return Self::from_host_and_path(host, path); - } - - // HTTPS/HTTP: https://host/path - if let Some(rest) = url - .strip_prefix("https://") - .or_else(|| url.strip_prefix("http://")) - && let Some((host, path)) = rest.split_once('/') - { - return Self::from_host_and_path(host, path); - } - - // Bare owner/repo — cannot determine forge without hostname - if url.contains('/') && !url.contains(':') && !url.contains('.') { - return Forge::Unknown { - url: url.to_string(), - }; - } - - Forge::Unknown { - url: url.to_string(), - } - } - - /// Match a hostname and path to a forge variant. - fn from_host_and_path(host: &str, path: &str) -> Self { - let path = path - .trim_start_matches('/') - .trim_end_matches('/') - .to_string(); - let host_lower = host.to_lowercase(); - - if host_lower.contains("github") { - Forge::GitHub { owner_repo: path } - } else if host_lower.contains("gitlab") { - Forge::GitLab { - group_project: path, - } - } else if host_lower.contains("bitbucket") { - Forge::Bitbucket { - workspace_repo: path, - } - } else if host_lower.contains("radicle") { - Forge::Radicle { rid: path } - } else { - Forge::Unknown { - url: format!("{host}/{path}"), - } - } - } - - /// Human-readable name for this forge. - /// - /// Usage: - /// ```ignore - /// assert_eq!(Forge::GitHub { owner_repo: "a/b".into() }.display_name(), "GitHub"); - /// ``` - pub fn display_name(&self) -> &str { - match self { - Forge::GitHub { .. } => "GitHub", - Forge::GitLab { .. } => "GitLab", - Forge::Bitbucket { .. } => "Bitbucket", - Forge::Radicle { .. } => "Radicle", - Forge::Unknown { .. } => "Unknown", - } - } - - /// The repository identifier string (e.g., `owner/repo`). - /// - /// Usage: - /// ```ignore - /// let forge = Forge::from_url("git@github.com:auths-dev/auths.git"); - /// assert_eq!(forge.repo_identifier(), "auths-dev/auths"); - /// ``` - pub fn repo_identifier(&self) -> &str { - match self { - Forge::GitHub { owner_repo } => owner_repo, - Forge::GitLab { group_project } => group_project, - Forge::Bitbucket { workspace_repo } => workspace_repo, - Forge::Radicle { rid } => rid, - Forge::Unknown { url } => url, - } - } -} diff --git a/crates/auths-sdk/src/domains/ci/mod.rs b/crates/auths-sdk/src/domains/ci/mod.rs index be43c78a..290e8f85 100644 --- a/crates/auths-sdk/src/domains/ci/mod.rs +++ b/crates/auths-sdk/src/domains/ci/mod.rs @@ -1,15 +1,10 @@ -//! CI domain — shared types, errors, and environment detection for CI workflows. +//! CI environment detection types. +//! +//! Lightweight types for identifying CI platforms during identity initialization. +//! No signing, no tokens, no key material. -pub mod bundle; pub mod environment; -pub mod error; -pub mod forge; -pub mod token; pub mod types; -pub use bundle::{build_identity_bundle, generate_ci_passphrase}; pub use environment::map_ci_environment; -pub use error::CiError; -pub use forge::Forge; -pub use token::CiToken; pub use types::{CiEnvironment, CiIdentityConfig}; diff --git a/crates/auths-sdk/src/domains/ci/token.rs b/crates/auths-sdk/src/domains/ci/token.rs deleted file mode 100644 index 3bac19f2..00000000 --- a/crates/auths-sdk/src/domains/ci/token.rs +++ /dev/null @@ -1,155 +0,0 @@ -//! CI token format for bundling all signing/verification secrets into one portable JSON blob. - -use super::error::CiError; -use serde::{Deserialize, Serialize}; - -/// Current token format version. -const CURRENT_VERSION: u32 = 1; - -/// Size threshold (bytes) above which a warning is emitted about GitHub secrets limits. -const SIZE_WARNING_THRESHOLD: usize = 40_960; // 40 KB - -/// Single portable token containing everything CI needs for signing and verification. -/// -/// Set as one GitHub/GitLab/etc secret (`AUTHS_CI_TOKEN`). The CLI produces it; -/// the sign/verify actions consume it. Users never see the internals. -/// -/// Args: -/// * `version`: Format version for forward compatibility (currently 1). -/// * `passphrase`: Passphrase for the CI device key. -/// * `keychain`: Base64-encoded encrypted keychain file (file-backend). -/// * `identity_repo`: Base64-encoded tar.gz of `~/.auths` (flat format). -/// * `verify_bundle`: Identity bundle JSON for verification. -/// * `created_at`: ISO 8601 timestamp of when this token was created. -/// * `max_valid_for_secs`: Max age of the verify bundle in seconds. -/// -/// Usage: -/// ```ignore -/// let token = CiToken::new(passphrase, keychain_b64, repo_b64, bundle_json, 31536000); -/// let json = token.to_json()?; -/// let parsed = CiToken::from_json(&json)?; -/// ``` -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CiToken { - /// Format version for forward compatibility. - pub version: u32, - - /// Passphrase for the CI device key. - pub passphrase: String, - - /// Base64-encoded encrypted keychain file (file-backend). - pub keychain: String, - - /// Base64-encoded tar.gz of `~/.auths` (flat format: contents at root, no `.auths/` prefix). - pub identity_repo: String, - - /// Identity bundle JSON for verification (output of `auths id export-bundle`). - pub verify_bundle: serde_json::Value, - - /// When this token was created (ISO 8601). - pub created_at: String, - - /// Max age of the verify bundle in seconds. - pub max_valid_for_secs: u64, -} - -impl CiToken { - /// Create a new `CiToken` with the current version and timestamp. - /// - /// Args: - /// * `passphrase`: Passphrase for the CI device key. - /// * `keychain`: Base64-encoded encrypted keychain. - /// * `identity_repo`: Base64-encoded tar.gz of the identity repo. - /// * `verify_bundle`: Verify bundle JSON value. - /// * `created_at`: ISO 8601 timestamp string. - /// * `max_valid_for_secs`: TTL for the verify bundle. - /// - /// Usage: - /// ```ignore - /// let token = CiToken::new(pass, kc, repo, bundle, now_str, 31536000); - /// ``` - pub fn new( - passphrase: String, - keychain: String, - identity_repo: String, - verify_bundle: serde_json::Value, - created_at: String, - max_valid_for_secs: u64, - ) -> Self { - Self { - version: CURRENT_VERSION, - passphrase, - keychain, - identity_repo, - verify_bundle, - created_at, - max_valid_for_secs, - } - } - - /// Serialize this token to a JSON string. - /// - /// Usage: - /// ```ignore - /// let json_str = token.to_json()?; - /// ``` - pub fn to_json(&self) -> Result { - serde_json::to_string(self).map_err(|e| CiError::TokenSerializationFailed { - reason: e.to_string(), - }) - } - - /// Deserialize a token from a JSON string, validating the version. - /// - /// Args: - /// * `json`: JSON string representing a `CiToken`. - /// - /// Usage: - /// ```ignore - /// let token = CiToken::from_json(&json_str)?; - /// ``` - pub fn from_json(json: &str) -> Result { - let token: Self = - serde_json::from_str(json).map_err(|e| CiError::TokenDeserializationFailed { - reason: e.to_string(), - })?; - - if token.version != CURRENT_VERSION { - return Err(CiError::TokenVersionUnsupported { - version: token.version, - }); - } - - Ok(token) - } - - /// Estimate the byte size of this token when serialized to JSON. - /// - /// Usage: - /// ```ignore - /// let size = token.estimated_size(); - /// ``` - pub fn estimated_size(&self) -> usize { - // Approximate: sum of field lengths plus JSON overhead - self.passphrase.len() - + self.keychain.len() - + self.identity_repo.len() - + self.verify_bundle.to_string().len() - + self.created_at.len() - + 200 // JSON keys, braces, commas, version, max_valid_for_secs - } - - /// Returns `true` if the token exceeds the size warning threshold (40 KB). - /// - /// The caller is responsible for displaying any warning to the user. - /// - /// Usage: - /// ```ignore - /// if token.is_large() { - /// eprintln!("Warning: token is ~{} KB", token.estimated_size() / 1024); - /// } - /// ``` - pub fn is_large(&self) -> bool { - self.estimated_size() > SIZE_WARNING_THRESHOLD - } -} diff --git a/crates/auths-sdk/src/domains/device/service.rs b/crates/auths-sdk/src/domains/device/service.rs index 6d222f09..66a02687 100644 --- a/crates/auths-sdk/src/domains/device/service.rs +++ b/crates/auths-sdk/src/domains/device/service.rs @@ -225,6 +225,7 @@ pub fn extend_device( None, None, None, // commit_sha + None, ) .map_err(DeviceExtensionError::AttestationFailed)?; @@ -318,6 +319,7 @@ fn sign_and_persist_attestation( None, None, None, // commit_sha + None, ) .map_err(DeviceError::AttestationError)?; diff --git a/crates/auths-sdk/src/domains/identity/service.rs b/crates/auths-sdk/src/domains/identity/service.rs index f2f205f3..0e78c1f2 100644 --- a/crates/auths-sdk/src/domains/identity/service.rs +++ b/crates/auths-sdk/src/domains/identity/service.rs @@ -322,6 +322,7 @@ fn bind_device( None, None, None, // commit_sha + None, ) .map_err(|e| SetupError::StorageError(e.into()))?; diff --git a/crates/auths-sdk/src/domains/org/service.rs b/crates/auths-sdk/src/domains/org/service.rs index e689fadf..4f413cd2 100644 --- a/crates/auths-sdk/src/domains/org/service.rs +++ b/crates/auths-sdk/src/domains/org/service.rs @@ -373,6 +373,7 @@ pub fn add_organization_member( Some(IdentityDID::new_unchecked(admin_att.subject.to_string())) }, None, // commit_sha + None, ) .map_err(|e| OrgError::Signing(e.to_string()))?; diff --git a/crates/auths-sdk/src/domains/signing/ci_env.rs b/crates/auths-sdk/src/domains/signing/ci_env.rs new file mode 100644 index 00000000..f21695d7 --- /dev/null +++ b/crates/auths-sdk/src/domains/signing/ci_env.rs @@ -0,0 +1,114 @@ +//! CI environment detection and typed metadata for ephemeral signing. + +use serde::{Deserialize, Serialize}; + +/// CI platform identifier. +/// +/// Usage: +/// ```ignore +/// let platform = CiPlatform::GithubActions; +/// assert_eq!(serde_json::to_string(&platform)?, "\"github_actions\""); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum CiPlatform { + /// GitHub Actions. + GithubActions, + /// GitLab CI/CD. + GitlabCi, + /// CircleCI. + CircleCi, + /// Generic CI platform (detected via `CI` env var). + Generic, + /// Local development (explicit opt-in via `--ci-platform local`). + Local, +} + +/// Structured CI environment metadata embedded in ephemeral attestations. +/// +/// Serialized into the attestation `payload` (covered by signature). +/// +/// Args: +/// * `platform` - CI platform identifier. +/// * `workflow_ref` - Workflow file path or reference. +/// * `run_id` - CI run identifier. +/// * `actor` - User or bot that triggered the run. +/// * `runner_os` - OS of the CI runner. +/// +/// Usage: +/// ```ignore +/// let env = detect_ci_environment().unwrap(); +/// assert_eq!(env.platform, CiPlatform::GithubActions); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CiEnvironment { + /// CI platform. + pub platform: CiPlatform, + /// Workflow file path or reference. + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow_ref: Option, + /// CI run identifier. + #[serde(skip_serializing_if = "Option::is_none")] + pub run_id: Option, + /// User or bot that triggered the run. + #[serde(skip_serializing_if = "Option::is_none")] + pub actor: Option, + /// OS of the CI runner. + #[serde(skip_serializing_if = "Option::is_none")] + pub runner_os: Option, +} + +/// Detect CI environment from standard environment variables. +/// +/// Returns `None` if no CI environment is detected. +/// +/// Usage: +/// ```ignore +/// if let Some(env) = detect_ci_environment() { +/// println!("Running in {:?}", env.platform); +/// } +/// ``` +#[allow(clippy::disallowed_methods)] // CLI boundary: reading CI env vars +pub fn detect_ci_environment() -> Option { + if std::env::var("GITHUB_ACTIONS").ok().as_deref() == Some("true") { + return Some(CiEnvironment { + platform: CiPlatform::GithubActions, + workflow_ref: std::env::var("GITHUB_WORKFLOW").ok(), + run_id: std::env::var("GITHUB_RUN_ID").ok(), + actor: std::env::var("GITHUB_ACTOR").ok(), + runner_os: std::env::var("RUNNER_OS").ok(), + }); + } + + if std::env::var("GITLAB_CI").is_ok() { + return Some(CiEnvironment { + platform: CiPlatform::GitlabCi, + workflow_ref: std::env::var("CI_CONFIG_PATH").ok(), + run_id: std::env::var("CI_PIPELINE_ID").ok(), + actor: std::env::var("GITLAB_USER_LOGIN").ok(), + runner_os: None, + }); + } + + if std::env::var("CIRCLECI").is_ok() { + return Some(CiEnvironment { + platform: CiPlatform::CircleCi, + workflow_ref: std::env::var("CIRCLE_WORKFLOW_ID").ok(), + run_id: std::env::var("CIRCLE_BUILD_NUM").ok(), + actor: std::env::var("CIRCLE_USERNAME").ok(), + runner_os: None, + }); + } + + if std::env::var("CI").is_ok() { + return Some(CiEnvironment { + platform: CiPlatform::Generic, + workflow_ref: None, + run_id: None, + actor: None, + runner_os: None, + }); + } + + None +} diff --git a/crates/auths-sdk/src/domains/signing/mod.rs b/crates/auths-sdk/src/domains/signing/mod.rs index af2dc320..f86a5006 100644 --- a/crates/auths-sdk/src/domains/signing/mod.rs +++ b/crates/auths-sdk/src/domains/signing/mod.rs @@ -1,5 +1,6 @@ //! Domain services for signing. +pub mod ci_env; pub mod error; /// Platform-specific signing implementations pub mod platform; diff --git a/crates/auths-sdk/src/domains/signing/service.rs b/crates/auths-sdk/src/domains/signing/service.rs index 0e13c478..ddae46a1 100644 --- a/crates/auths-sdk/src/domains/signing/service.rs +++ b/crates/auths-sdk/src/domains/signing/service.rs @@ -12,13 +12,14 @@ use auths_core::storage::keychain::{IdentityDID, KeyAlias, KeyStorage}; use auths_id::attestation::core::resign_attestation; use auths_id::attestation::create::create_signed_attestation; use auths_id::storage::git_refs::AttestationMetadata; -use auths_verifier::core::{Capability, ResourceId}; +use auths_verifier::core::{Capability, ResourceId, SignerType}; use auths_verifier::types::DeviceDID; use chrono::{DateTime, Utc}; use sha2::{Digest, Sha256}; use std::collections::HashMap; use std::path::Path; use std::sync::Arc; +use zeroize::Zeroizing; /// Errors from the signing pipeline. #[derive(Debug, thiserror::Error)] @@ -529,6 +530,7 @@ pub fn sign_artifact( None, None, validated_commit_sha, + None, ) .map_err(|e| ArtifactSigningError::AttestationFailed(e.to_string()))?; @@ -551,6 +553,133 @@ pub fn sign_artifact( }) } +/// Signs artifact bytes with a one-time ephemeral Ed25519 key. No keychain, no +/// identity storage, no passphrase — the key is generated, used, and zeroized +/// within this function call. +/// +/// The ephemeral key signs "this artifact was built from this commit." Trust +/// derives transitively: consumers verify the commit is signed by a maintainer, +/// then verify this attestation's ephemeral signature covers the artifact hash +/// and commit SHA. +/// +/// Args: +/// * `now` - Current UTC time (injected per clock pattern). +/// * `data` - Raw artifact bytes to sign. +/// * `artifact_name` - Optional human-readable name for the artifact. +/// * `commit_sha` - Git commit SHA this artifact was built from (required, 40 or 64 hex chars). +/// * `expires_in` - Optional TTL in seconds. +/// * `note` - Optional attestation note. +/// * `ci_env` - Optional CI environment metadata (serialized into payload, covered by signature). +/// +/// Usage: +/// ```ignore +/// let result = sign_artifact_ephemeral( +/// Utc::now(), b"artifact bytes", Some("release.tar.gz".into()), +/// "abc123def456abc123def456abc123def456abc1".into(), None, None, None, +/// )?; +/// ``` +pub fn sign_artifact_ephemeral( + now: DateTime, + data: &[u8], + artifact_name: Option, + commit_sha: String, + expires_in: Option, + note: Option, + ci_env: Option, +) -> Result { + // 1. Generate ephemeral seed and zeroize on drop + let mut seed_bytes = Zeroizing::new([0u8; 32]); + ring::rand::SecureRandom::fill(&ring::rand::SystemRandom::new(), seed_bytes.as_mut()) + .map_err(|_| ArtifactSigningError::AttestationFailed("RNG failure".into()))?; + + let seed = SecureSeed::new(*seed_bytes); + + // 2. Derive pubkey and DIDs + let pubkey = provider_bridge::ed25519_public_key_from_seed_sync(&seed) + .map_err(|e| ArtifactSigningError::AttestationFailed(e.to_string()))?; + + let device_did = DeviceDID::from_ed25519(&pubkey); + #[allow(clippy::disallowed_methods)] + let identity_did = IdentityDID::new_unchecked(device_did.as_str()); + + // 3. Build artifact metadata with optional CI environment in payload + let digest_hex = hex::encode(Sha256::digest(data)); + let artifact_meta = ArtifactMetadata { + artifact_type: "file".to_string(), + digest: ArtifactDigest { + algorithm: "sha256".to_string(), + hex: digest_hex, + }, + name: artifact_name, + size: Some(data.len() as u64), + }; + + let mut payload_value = serde_json::to_value(&artifact_meta) + .map_err(|e| ArtifactSigningError::AttestationFailed(e.to_string()))?; + + if let Some(env) = ci_env + && let serde_json::Value::Object(ref mut map) = payload_value + { + map.insert("ci_environment".to_string(), env); + } + + let rid = ResourceId::new(format!("sha256:{}", artifact_meta.digest.hex)); + let meta = AttestationMetadata { + timestamp: Some(now), + expires_at: expires_in.map(|s| now + chrono::Duration::seconds(s as i64)), + note, + }; + + // 4. Validate commit SHA + let validated_sha = validate_commit_sha(&commit_sha)?; + + // 5. Set up ephemeral signer + let identity_alias = KeyAlias::new_unchecked("__ephemeral_identity__"); + let device_alias = KeyAlias::new_unchecked("__ephemeral_device__"); + + let mut seeds: HashMap = HashMap::new(); + seeds.insert( + identity_alias.as_str().to_string(), + SecureSeed::new(*seed_bytes), + ); + seeds.insert( + device_alias.as_str().to_string(), + SecureSeed::new(*seed_bytes), + ); + let signer = SeedMapSigner { seeds }; + let noop_provider = auths_core::PrefilledPassphraseProvider::new(""); + + // 6. Create signed attestation with Workload signer type + let attestation = create_signed_attestation( + now, + &rid, + &identity_did, + &device_did, + &pubkey, + Some(payload_value), + &meta, + &signer, + &noop_provider, + Some(&identity_alias), + Some(&device_alias), + vec![Capability::sign_release()], + None, + None, + Some(validated_sha), + Some(SignerType::Workload), + ) + .map_err(|e| ArtifactSigningError::AttestationFailed(e.to_string()))?; + + let attestation_json = serde_json::to_string_pretty(&attestation) + .map_err(|e| ArtifactSigningError::AttestationFailed(e.to_string()))?; + + Ok(ArtifactSigningResult { + attestation_json, + rid, + digest: artifact_meta.digest.hex, + }) +} + /// Signs artifact bytes with a raw Ed25519 seed, bypassing keychain and identity storage. /// /// This is the raw-key equivalent of [`sign_artifact`]. It does not require an @@ -642,6 +771,7 @@ pub fn sign_artifact_raw( None, None, validated_commit_sha, + None, ) .map_err(|e| ArtifactSigningError::AttestationFailed(e.to_string()))?; diff --git a/crates/auths-sdk/src/pairing/mod.rs b/crates/auths-sdk/src/pairing/mod.rs index 198e28fe..66982a88 100644 --- a/crates/auths-sdk/src/pairing/mod.rs +++ b/crates/auths-sdk/src/pairing/mod.rs @@ -373,6 +373,7 @@ pub fn create_pairing_attestation( None, None, None, // commit_sha + None, ) .map_err(|e| PairingError::AttestationFailed(e.to_string()))?; diff --git a/crates/auths-sdk/src/ports/mod.rs b/crates/auths-sdk/src/ports/mod.rs index 21fcfc9b..2a32f639 100644 --- a/crates/auths-sdk/src/ports/mod.rs +++ b/crates/auths-sdk/src/ports/mod.rs @@ -23,6 +23,9 @@ pub use auths_core::ports::namespace::{Ecosystem, NamespaceVerifyError, PackageN pub use auths_core::ports::network::RegistryClient; pub use auths_core::ports::ssh_agent::{SshAgentError, SshAgentPort}; pub use auths_core::ports::storage::StorageError as CoreStorageError; +pub use auths_core::ports::transparency_log::{ + LogError, LogMetadata, LogSubmission, TransparencyLog, +}; // Re-exports from auths-id ports pub use auths_id::ports::registry::RegistryBackend; diff --git a/crates/auths-sdk/src/testing/fakes/mod.rs b/crates/auths-sdk/src/testing/fakes/mod.rs index 2450c998..a86369ab 100644 --- a/crates/auths-sdk/src/testing/fakes/mod.rs +++ b/crates/auths-sdk/src/testing/fakes/mod.rs @@ -7,6 +7,7 @@ mod git; mod git_config; mod namespace; mod signer; +mod transparency_log; pub use agent::FakeAgentProvider; pub use agent_persistence::FakeAgentPersistence; @@ -17,3 +18,4 @@ pub use git::FakeGitLogProvider; pub use git_config::{FakeGitConfigProvider, GitConfigSetCall}; pub use namespace::FakeNamespaceVerifier; pub use signer::FakeSecureSigner; +pub use transparency_log::{FakeLogCall, FakeTransparencyLog}; diff --git a/crates/auths-sdk/src/testing/fakes/transparency_log.rs b/crates/auths-sdk/src/testing/fakes/transparency_log.rs new file mode 100644 index 00000000..8f4f1dce --- /dev/null +++ b/crates/auths-sdk/src/testing/fakes/transparency_log.rs @@ -0,0 +1,393 @@ +//! Fake transparency log for testing SDK workflows. +//! +//! Maintains an in-memory Merkle tree using the same functions +//! the verifier uses (`auths_transparency::merkle`). + +use async_trait::async_trait; +use ring::signature::{Ed25519KeyPair, KeyPair}; +use std::sync::Mutex; + +use auths_core::ports::transparency_log::{LogError, LogMetadata, LogSubmission, TransparencyLog}; +use auths_transparency::checkpoint::{Checkpoint, SignedCheckpoint}; +use auths_transparency::merkle::{compute_root, hash_leaf}; +use auths_transparency::proof::{ConsistencyProof, InclusionProof}; +use auths_transparency::types::{LogOrigin, MerkleHash}; +use auths_verifier::{Ed25519PublicKey, Ed25519Signature}; + +/// Deterministic test seed for the fake log's signing key. +const FAKE_LOG_SEED: [u8; 32] = [42u8; 32]; + +/// A recorded call to the fake transparency log. +#[derive(Debug, Clone)] +pub enum FakeLogCall { + /// A `submit()` call with the leaf data length. + Submit { + /// Length of the submitted leaf data in bytes. + leaf_data_len: usize, + }, + /// A `get_checkpoint()` call. + GetCheckpoint, + /// A `get_inclusion_proof()` call. + GetInclusionProof { + /// Requested leaf index. + leaf_index: u64, + /// Requested tree size. + tree_size: u64, + }, +} + +/// Internal state of the fake log. +struct FakeLogState { + /// All leaf hashes in insertion order. + leaves: Vec, + /// Recorded calls for assertions. + calls: Vec, +} + +/// Configurable fake transparency log for testing. +/// +/// Uses `auths_transparency::merkle::compute_root` and `hash_leaf` — +/// the same functions the verifier uses. Not a parallel implementation. +pub struct FakeTransparencyLog { + state: Mutex, + keypair: Ed25519KeyPair, + public_key: [u8; 32], + /// If set, all trait methods return this error instead of succeeding. + forced_error: Option, +} + +impl FakeTransparencyLog { + /// Create a fake that succeeds and builds real Merkle proofs. + pub fn succeeding() -> Self { + #[allow(clippy::expect_used)] // INVARIANT: fixed test seed is always valid + let keypair = + Ed25519KeyPair::from_seed_unchecked(&FAKE_LOG_SEED).expect("valid Ed25519 seed"); + #[allow(clippy::expect_used)] // INVARIANT: Ed25519 public key is always 32 bytes + let public_key: [u8; 32] = keypair + .public_key() + .as_ref() + .try_into() + .expect("Ed25519 public key is 32 bytes"); + + Self { + state: Mutex::new(FakeLogState { + leaves: Vec::new(), + calls: Vec::new(), + }), + keypair, + public_key, + forced_error: None, + } + } + + /// Create a fake that always returns the given error. + pub fn failing(error: LogError) -> Self { + let mut fake = Self::succeeding(); + fake.forced_error = Some(error); + fake + } + + /// Create a fake that returns `RateLimited`. + pub fn rate_limited(secs: u64) -> Self { + Self::failing(LogError::RateLimited { + retry_after_secs: secs, + }) + } + + /// Get recorded calls for assertions. + pub fn calls(&self) -> Vec { + #[allow(clippy::expect_used)] // INVARIANT: test code only + self.state.lock().expect("lock").calls.clone() + } + + /// Build a `TrustRoot` matching this fake's signing key. + pub fn trust_root(&self) -> auths_transparency::TrustRoot { + auths_transparency::TrustRoot { + log_public_key: Ed25519PublicKey::from_bytes(self.public_key), + log_origin: LogOrigin::new_unchecked("fake.test/log"), + witnesses: vec![], + signature_algorithm: auths_verifier::SignatureAlgorithm::Ed25519, + } + } + + fn check_forced_error(&self) -> Result<(), LogError> { + if let Some(ref err) = self.forced_error { + // Clone the error for return + Err(match err { + LogError::SubmissionRejected { reason } => LogError::SubmissionRejected { + reason: reason.clone(), + }, + LogError::NetworkError(s) => LogError::NetworkError(s.clone()), + LogError::RateLimited { retry_after_secs } => LogError::RateLimited { + retry_after_secs: *retry_after_secs, + }, + LogError::InvalidResponse(s) => LogError::InvalidResponse(s.clone()), + LogError::EntryNotFound => LogError::EntryNotFound, + LogError::ConsistencyViolation(s) => LogError::ConsistencyViolation(s.clone()), + LogError::Unavailable(s) => LogError::Unavailable(s.clone()), + }) + } else { + Ok(()) + } + } + + /// Build the signed checkpoint for the current tree state. + fn sign_checkpoint(&self, leaves: &[MerkleHash]) -> SignedCheckpoint { + let root = if leaves.is_empty() { + MerkleHash::from_bytes([0u8; 32]) + } else { + compute_root(leaves) + }; + + let checkpoint = Checkpoint { + origin: LogOrigin::new_unchecked("fake.test/log"), + size: leaves.len() as u64, + root, + #[allow(clippy::expect_used)] // INVARIANT: hardcoded test timestamp is valid + timestamp: chrono::DateTime::parse_from_rfc3339("2026-01-01T00:00:00Z") + .expect("valid timestamp") + .with_timezone(&chrono::Utc), + }; + + let note_body = checkpoint.to_note_body(); + let sig_bytes = self.keypair.sign(note_body.as_bytes()); + #[allow(clippy::expect_used)] // INVARIANT: Ed25519 signature is always 64 bytes + let log_signature = + Ed25519Signature::try_from_slice(sig_bytes.as_ref()).expect("64-byte sig"); + + SignedCheckpoint { + checkpoint, + log_signature, + log_public_key: Ed25519PublicKey::from_bytes(self.public_key), + witnesses: vec![], + ecdsa_checkpoint_signature: None, + ecdsa_checkpoint_key: None, + } + } + + /// Compute an inclusion proof for leaf at `index` in a tree of `leaves`. + fn compute_inclusion_proof(leaves: &[MerkleHash], index: u64) -> InclusionProof { + let size = leaves.len() as u64; + let root = compute_root(leaves); + + // For a simple implementation: walk up the tree collecting siblings + let hashes = Self::merkle_path(leaves, index as usize); + + InclusionProof { + index, + size, + root, + hashes, + } + } + + /// Compute the Merkle sibling path for the leaf at `index`. + fn merkle_path(leaves: &[MerkleHash], index: usize) -> Vec { + if leaves.len() <= 1 { + return vec![]; + } + + let k = largest_power_of_2_less_than(leaves.len()); + if index < k { + let mut path = Self::merkle_path(&leaves[..k], index); + path.push(compute_root(&leaves[k..])); + path + } else { + let mut path = Self::merkle_path(&leaves[k..], index - k); + path.push(compute_root(&leaves[..k])); + path + } + } +} + +fn largest_power_of_2_less_than(n: usize) -> usize { + if n <= 1 { + return 0; + } + let mut k = 1; + while k * 2 < n { + k *= 2; + } + k +} + +#[async_trait] +impl TransparencyLog for FakeTransparencyLog { + async fn submit( + &self, + leaf_data: &[u8], + _public_key: &[u8], + _signature: &[u8], + ) -> Result { + self.check_forced_error()?; + + #[allow(clippy::expect_used)] // INVARIANT: test code + let mut state = self.state.lock().expect("lock"); + state.calls.push(FakeLogCall::Submit { + leaf_data_len: leaf_data.len(), + }); + + let leaf_hash = hash_leaf(leaf_data); + state.leaves.push(leaf_hash); + + let leaf_index = (state.leaves.len() - 1) as u64; + let inclusion_proof = Self::compute_inclusion_proof(&state.leaves, leaf_index); + let signed_checkpoint = self.sign_checkpoint(&state.leaves); + + Ok(LogSubmission { + leaf_index, + inclusion_proof, + signed_checkpoint, + }) + } + + async fn get_checkpoint(&self) -> Result { + self.check_forced_error()?; + + #[allow(clippy::expect_used)] + let mut state = self.state.lock().expect("lock"); + state.calls.push(FakeLogCall::GetCheckpoint); + Ok(self.sign_checkpoint(&state.leaves)) + } + + async fn get_inclusion_proof( + &self, + leaf_index: u64, + tree_size: u64, + ) -> Result { + self.check_forced_error()?; + + #[allow(clippy::expect_used)] + let mut state = self.state.lock().expect("lock"); + state.calls.push(FakeLogCall::GetInclusionProof { + leaf_index, + tree_size, + }); + + if leaf_index >= state.leaves.len() as u64 { + return Err(LogError::EntryNotFound); + } + let end = std::cmp::min(tree_size as usize, state.leaves.len()); + Ok(Self::compute_inclusion_proof( + &state.leaves[..end], + leaf_index, + )) + } + + async fn get_consistency_proof( + &self, + _old_size: u64, + _new_size: u64, + ) -> Result { + self.check_forced_error()?; + + // Simplified: return empty proof (tests that need real consistency + // proofs should use the full Merkle math directly) + Ok(ConsistencyProof { + old_size: _old_size, + new_size: _new_size, + old_root: MerkleHash::from_bytes([0u8; 32]), + new_root: MerkleHash::from_bytes([0u8; 32]), + hashes: vec![], + }) + } + + fn metadata(&self) -> LogMetadata { + LogMetadata { + log_id: "fake-test-log".to_string(), + log_origin: LogOrigin::new_unchecked("fake.test/log"), + log_public_key: Ed25519PublicKey::from_bytes(self.public_key), + api_url: None, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn succeeding_submit_returns_valid_proof() { + let log = FakeTransparencyLog::succeeding(); + let result = log.submit(b"hello", b"pk", b"sig").await; + assert!(result.is_ok()); + let submission = result.unwrap(); + assert_eq!(submission.leaf_index, 0); + + // Verify the inclusion proof using the same Merkle math + let leaf_hash = hash_leaf(b"hello"); + assert!(submission.inclusion_proof.verify(&leaf_hash).is_ok()); + } + + #[tokio::test] + async fn succeeding_multiple_submits() { + let log = FakeTransparencyLog::succeeding(); + + let s1 = log.submit(b"a", b"pk", b"sig").await.unwrap(); + let s2 = log.submit(b"b", b"pk", b"sig").await.unwrap(); + let s3 = log.submit(b"c", b"pk", b"sig").await.unwrap(); + + assert_eq!(s1.leaf_index, 0); + assert_eq!(s2.leaf_index, 1); + assert_eq!(s3.leaf_index, 2); + + // Each proof should verify + assert!(s1.inclusion_proof.verify(&hash_leaf(b"a")).is_ok()); + assert!(s2.inclusion_proof.verify(&hash_leaf(b"b")).is_ok()); + assert!(s3.inclusion_proof.verify(&hash_leaf(b"c")).is_ok()); + } + + #[tokio::test] + async fn failing_returns_configured_error() { + let log = FakeTransparencyLog::failing(LogError::NetworkError("test error".into())); + let result = log.submit(b"hello", b"pk", b"sig").await; + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("test error")); + } + + #[tokio::test] + async fn rate_limited_returns_retry_after() { + let log = FakeTransparencyLog::rate_limited(30); + let result = log.submit(b"hello", b"pk", b"sig").await; + match result { + Err(LogError::RateLimited { retry_after_secs }) => { + assert_eq!(retry_after_secs, 30); + } + other => panic!("expected RateLimited, got: {:?}", other), + } + } + + #[tokio::test] + async fn calls_are_recorded() { + let log = FakeTransparencyLog::succeeding(); + log.submit(b"a", b"pk", b"sig").await.unwrap(); + log.get_checkpoint().await.unwrap(); + + let calls = log.calls(); + assert_eq!(calls.len(), 2); + assert!(matches!(calls[0], FakeLogCall::Submit { .. })); + assert!(matches!(calls[1], FakeLogCall::GetCheckpoint)); + } + + #[tokio::test] + async fn trust_root_matches_checkpoint_signature() { + let log = FakeTransparencyLog::succeeding(); + let submission = log.submit(b"test", b"pk", b"sig").await.unwrap(); + let trust_root = log.trust_root(); + + // The checkpoint should verify against the trust root + let note_body = submission.signed_checkpoint.checkpoint.to_note_body(); + let peer_key = ring::signature::UnparsedPublicKey::new( + &ring::signature::ED25519, + trust_root.log_public_key.as_bytes(), + ); + assert!( + peer_key + .verify( + note_body.as_bytes(), + submission.signed_checkpoint.log_signature.as_bytes() + ) + .is_ok() + ); + } +} diff --git a/crates/auths-sdk/src/workflows/ci/batch_attest.rs b/crates/auths-sdk/src/workflows/ci/batch_attest.rs deleted file mode 100644 index daf1ab73..00000000 --- a/crates/auths-sdk/src/workflows/ci/batch_attest.rs +++ /dev/null @@ -1,284 +0,0 @@ -//! Batch artifact signing and attestation collection workflow. -//! -//! Provides the domain logic for CI attestation pipelines: sign multiple -//! artifacts in one pass, collect attestation files to a target directory, -//! and report per-file results. Used by the CLI `artifact batch-sign` command -//! and the `auths-dev/attest-action` GitHub Action. - -use std::path::{Path, PathBuf}; -use std::sync::Arc; - -use crate::context::AuthsContext; -use crate::domains::ci::error::CiError; -use crate::domains::signing::service::{ArtifactSigningParams, SigningKeyMaterial, sign_artifact}; -use crate::ports::artifact::ArtifactSource; -use auths_core::storage::keychain::KeyAlias; - -/// A single artifact to sign in a batch operation. -/// -/// Args: -/// * `source`: The artifact data source (implements digest/metadata). -/// * `output_path`: Where to write the `.auths.json` attestation file. -/// -/// Usage: -/// ```ignore -/// let entry = BatchEntry { -/// source: Arc::new(my_artifact), -/// output_path: PathBuf::from("release.tar.gz.auths.json"), -/// }; -/// ``` -pub struct BatchEntry { - /// Artifact source providing digest and metadata. - pub source: Arc, - /// Destination path for the attestation JSON file. - pub output_path: PathBuf, -} - -/// Configuration for a batch signing operation. -/// -/// Args: -/// * `entries`: List of artifacts to sign with their output paths. -/// * `device_key`: Device key alias used for dual-signing. -/// * `identity_key`: Optional identity key alias (omit for device-only CI signing). -/// * `expires_in`: Optional TTL in seconds for attestation expiry. -/// * `note`: Optional annotation embedded in each attestation. -/// * `attestation_dir`: If set, attestation files are also copied here. -/// -/// Usage: -/// ```ignore -/// let config = BatchSignConfig { -/// entries: vec![entry1, entry2], -/// device_key: "ci-release-device".to_string(), -/// identity_key: None, -/// expires_in: None, -/// note: Some("release v1.0".to_string()), -/// attestation_dir: Some(PathBuf::from(".auths/releases")), -/// }; -/// ``` -pub struct BatchSignConfig { - /// Artifacts to sign. - pub entries: Vec, - /// Device key alias for signing. - pub device_key: String, - /// Optional identity key alias. - pub identity_key: Option, - /// Optional TTL in seconds. - pub expires_in: Option, - /// Optional note for all attestations. - pub note: Option, - /// Git commit SHA for provenance binding (shared across all artifacts in batch). - pub commit_sha: Option, -} - -/// Outcome for a single artifact in a batch. -#[derive(Debug)] -pub enum BatchEntryResult { - /// Signing succeeded. - Signed(SignedArtifact), - /// Signing failed for this artifact (other artifacts may still succeed). - Failed(FailedArtifact), -} - -/// A successfully signed artifact. -/// -/// Usage: -/// ```ignore -/// println!("Signed {} (sha256:{})", result.rid, result.digest); -/// ``` -#[derive(Debug)] -pub struct SignedArtifact { - /// Intended output path for the attestation file. - pub output_path: PathBuf, - /// Canonical JSON of the signed attestation. - pub attestation_json: String, - /// Resource identifier from the attestation. - pub rid: String, - /// Hex-encoded SHA-256 digest of the artifact. - pub digest: String, -} - -/// An artifact that failed to sign. -#[derive(Debug)] -pub struct FailedArtifact { - /// Output path that would have been written. - pub output_path: PathBuf, - /// The error that prevented signing. - pub error: String, -} - -/// Result of a batch signing operation. -/// -/// Usage: -/// ```ignore -/// let result = batch_sign_artifacts(config, &ctx)?; -/// println!("{} signed, {} failed", result.signed_count(), result.failed_count()); -/// ``` -#[derive(Debug)] -pub struct BatchSignResult { - /// Per-artifact outcomes. - pub results: Vec, -} - -impl BatchSignResult { - /// Number of successfully signed artifacts. - pub fn signed_count(&self) -> usize { - self.results - .iter() - .filter(|r| matches!(r, BatchEntryResult::Signed(_))) - .count() - } - - /// Number of failed artifacts. - pub fn failed_count(&self) -> usize { - self.results - .iter() - .filter(|r| matches!(r, BatchEntryResult::Failed(_))) - .count() - } - - /// Whether all artifacts were signed successfully. - pub fn all_succeeded(&self) -> bool { - self.failed_count() == 0 - } -} - -// Errors are defined in crate::domains::ci::error::CiError - -/// Derive the default attestation output path for an artifact. -/// -/// Args: -/// * `artifact_path`: Path to the original artifact file. -/// -/// Usage: -/// ```ignore -/// let out = default_attestation_path(Path::new("release.tar.gz")); -/// assert_eq!(out, PathBuf::from("release.tar.gz.auths.json")); -/// ``` -pub fn default_attestation_path(artifact_path: &Path) -> PathBuf { - let mut p = artifact_path.to_path_buf(); - let new_name = format!( - "{}.auths.json", - p.file_name().unwrap_or_default().to_string_lossy() - ); - p.set_file_name(new_name); - p -} - -/// Sign multiple artifacts in a single batch and optionally collect attestations. -/// -/// Each artifact is signed independently — a failure on one does not prevent -/// signing the others. Results are returned per-artifact so callers can decide -/// how to handle partial failures. -/// -/// Args: -/// * `config`: Batch configuration with artifact entries, keys, and options. -/// * `ctx`: Runtime context providing identity storage, keychain, and clock. -/// -/// Usage: -/// ```ignore -/// let result = batch_sign_artifacts(config, &ctx)?; -/// for entry in &result.results { -/// match entry { -/// BatchEntryResult::Signed(s) => println!("OK: {}", s.output_path.display()), -/// BatchEntryResult::Failed(f) => eprintln!("FAIL: {}: {}", f.output_path.display(), f.error), -/// } -/// } -/// ``` -pub fn batch_sign_artifacts( - config: BatchSignConfig, - ctx: &AuthsContext, -) -> Result { - if config.entries.is_empty() { - return Err(CiError::NoArtifacts); - } - - let mut results = Vec::with_capacity(config.entries.len()); - - for entry in &config.entries { - let params = ArtifactSigningParams { - artifact: Arc::clone(&entry.source), - identity_key: config - .identity_key - .as_ref() - .map(|k| SigningKeyMaterial::Alias(KeyAlias::new_unchecked(k))), - device_key: SigningKeyMaterial::Alias(KeyAlias::new_unchecked(&config.device_key)), - expires_in: config.expires_in, - note: config.note.clone(), - commit_sha: config.commit_sha.clone(), - }; - - match sign_artifact(params, ctx) { - Ok(result) => results.push(BatchEntryResult::Signed(SignedArtifact { - output_path: entry.output_path.clone(), - attestation_json: result.attestation_json, - rid: result.rid.to_string(), - digest: result.digest, - })), - Err(e) => results.push(BatchEntryResult::Failed(FailedArtifact { - output_path: entry.output_path.clone(), - error: e.to_string(), - })), - } - } - - Ok(BatchSignResult { results }) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn default_attestation_path_appends_suffix() { - let p = default_attestation_path(Path::new("/tmp/release.tar.gz")); - assert_eq!(p, PathBuf::from("/tmp/release.tar.gz.auths.json")); - } - - #[test] - fn default_attestation_path_handles_bare_name() { - let p = default_attestation_path(Path::new("artifact.bin")); - assert_eq!(p, PathBuf::from("artifact.bin.auths.json")); - } - - #[test] - fn batch_sign_result_counts() { - let result = BatchSignResult { - results: vec![ - BatchEntryResult::Signed(SignedArtifact { - output_path: PathBuf::from("a.auths.json"), - attestation_json: "{}".to_string(), - rid: "sha256:abc".to_string(), - digest: "abc".to_string(), - }), - BatchEntryResult::Failed(FailedArtifact { - output_path: PathBuf::from("b.auths.json"), - error: "test error".to_string(), - }), - BatchEntryResult::Signed(SignedArtifact { - output_path: PathBuf::from("c.auths.json"), - attestation_json: "{}".to_string(), - rid: "sha256:def".to_string(), - digest: "def".to_string(), - }), - ], - }; - - assert_eq!(result.signed_count(), 2); - assert_eq!(result.failed_count(), 1); - assert!(!result.all_succeeded()); - } - - #[test] - fn batch_sign_result_all_succeeded() { - let result = BatchSignResult { - results: vec![BatchEntryResult::Signed(SignedArtifact { - output_path: PathBuf::from("a.auths.json"), - attestation_json: "{}".to_string(), - rid: "sha256:abc".to_string(), - digest: "abc".to_string(), - })], - }; - - assert!(result.all_succeeded()); - } -} diff --git a/crates/auths-sdk/src/workflows/ci/mod.rs b/crates/auths-sdk/src/workflows/ci/mod.rs index 6d600733..542940b3 100644 --- a/crates/auths-sdk/src/workflows/ci/mod.rs +++ b/crates/auths-sdk/src/workflows/ci/mod.rs @@ -1,6 +1,4 @@ -//! CI workflow orchestration — batch signing, OIDC machine identity, and future CI automations. +//! CI workflow orchestration — OIDC machine identity creation from CI platform tokens. -/// Batch artifact signing and attestation collection. -pub mod batch_attest; /// OIDC machine identity creation from CI platform tokens. pub mod machine_identity; diff --git a/crates/auths-sdk/src/workflows/log_submit.rs b/crates/auths-sdk/src/workflows/log_submit.rs new file mode 100644 index 00000000..ce1cf9fd --- /dev/null +++ b/crates/auths-sdk/src/workflows/log_submit.rs @@ -0,0 +1,158 @@ +//! SDK workflow for submitting attestations to a transparency log. +//! +//! This module provides [`submit_attestation_to_log`], the async workflow +//! that takes a signed attestation and submits it to whichever transparency +//! log backend is configured. The function does NOT retry on rate limits — +//! the caller (CLI) owns retry policy. + +use auths_core::ports::transparency_log::{LogError, TransparencyLog}; +use auths_transparency::checkpoint::SignedCheckpoint; +use auths_transparency::proof::InclusionProof; +use thiserror::Error; + +/// Result of submitting an attestation to a transparency log. +/// +/// Named `LogSubmissionBundle` to avoid collision with the existing +/// `OfflineBundle` type in `auths-transparency`. +/// +/// Args: +/// * `log_id` — Stable log identifier for trust config lookup. +/// * `leaf_index` — Zero-based index of the logged leaf. +/// * `inclusion_proof` — Merkle inclusion proof. +/// * `signed_checkpoint` — Signed checkpoint at submission time. +/// +/// Usage: +/// ```ignore +/// let bundle = submit_attestation_to_log(&json, &pk, &sig, &log).await?; +/// println!("Logged at index {} in {}", bundle.leaf_index, bundle.log_id); +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct LogSubmissionBundle { + /// Stable log identifier for trust config lookup. + pub log_id: String, + /// Zero-based leaf index in the log. + pub leaf_index: u64, + /// Merkle inclusion proof against the checkpoint. + pub inclusion_proof: InclusionProof, + /// Signed checkpoint at submission time. + pub signed_checkpoint: SignedCheckpoint, +} + +/// Errors from the log submission workflow. +#[derive(Debug, Error)] +pub enum LogSubmitError { + /// The transparency log returned an error. + #[error("log error: {0}")] + LogError(#[from] LogError), + + /// Post-submission verification failed (GHSA-whqx-f9j3-ch6m countermeasure). + #[error("post-submission verification failed: {0}")] + VerificationFailed(String), +} + +/// Submit an attestation to a transparency log and verify the response. +/// +/// This function: +/// 1. Submits the attestation as a leaf to the log +/// 2. Verifies the returned inclusion proof against the checkpoint (GHSA-whqx-f9j3-ch6m) +/// 3. Returns the bundle for embedding in `.auths.json` +/// +/// **Does NOT retry** on `LogError::RateLimited`. The caller owns retry policy. +/// +/// Args: +/// * `attestation_json` — Serialized attestation JSON bytes. +/// * `public_key` — Signer's Ed25519 public key (raw 32 bytes or PKIX DER). +/// * `signature` — Ed25519 signature over the attestation. +/// * `log` — The transparency log backend to submit to. +/// +/// Usage: +/// ```ignore +/// let bundle = submit_attestation_to_log( +/// attestation_json.as_bytes(), +/// &public_key_bytes, +/// &signature_bytes, +/// &log, +/// ).await?; +/// ``` +pub async fn submit_attestation_to_log( + attestation_json: &[u8], + public_key: &[u8], + signature: &[u8], + log: &dyn TransparencyLog, +) -> Result { + // 1. Submit to the log + let submission = log.submit(attestation_json, public_key, signature).await?; + + // 2. Verify the inclusion proof against the checkpoint root + // (GHSA-whqx-f9j3-ch6m: verify the response matches what we submitted) + let leaf_hash = auths_transparency::merkle::hash_leaf(attestation_json); + if let Err(e) = submission.inclusion_proof.verify(&leaf_hash) { + return Err(LogSubmitError::VerificationFailed(format!( + "inclusion proof does not match submitted attestation: {e}" + ))); + } + + // 3. Verify the proof root matches the checkpoint root + if submission.inclusion_proof.root != submission.signed_checkpoint.checkpoint.root { + return Err(LogSubmitError::VerificationFailed( + "inclusion proof root does not match checkpoint root".into(), + )); + } + + // The inclusion proof verification in step 2 already confirms the leaf + // data matches (H(0x00 || data)), closing the GHSA-whqx-f9j3-ch6m vector. + + let metadata = log.metadata(); + + Ok(LogSubmissionBundle { + log_id: metadata.log_id, + leaf_index: submission.leaf_index, + inclusion_proof: submission.inclusion_proof, + signed_checkpoint: submission.signed_checkpoint, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::fakes::FakeTransparencyLog; + + #[tokio::test] + async fn submit_succeeds_with_fake() { + let log = FakeTransparencyLog::succeeding(); + let result = + submit_attestation_to_log(b"test attestation", b"public_key", b"signature", &log).await; + + assert!(result.is_ok()); + let bundle = result.unwrap(); + assert_eq!(bundle.log_id, "fake-test-log"); + assert_eq!(bundle.leaf_index, 0); + } + + #[tokio::test] + async fn submit_propagates_rate_limit() { + let log = FakeTransparencyLog::rate_limited(30); + let result = submit_attestation_to_log(b"test", b"pk", b"sig", &log).await; + + match result { + Err(LogSubmitError::LogError(LogError::RateLimited { retry_after_secs })) => { + assert_eq!(retry_after_secs, 30); + } + other => panic!("expected RateLimited, got: {:?}", other), + } + } + + #[tokio::test] + async fn submit_propagates_network_error() { + let log = FakeTransparencyLog::failing(LogError::NetworkError("connection refused".into())); + let result = submit_attestation_to_log(b"test", b"pk", b"sig", &log).await; + + assert!(result.is_err()); + assert!( + result + .unwrap_err() + .to_string() + .contains("connection refused") + ); + } +} diff --git a/crates/auths-sdk/src/workflows/mod.rs b/crates/auths-sdk/src/workflows/mod.rs index a3facd52..b444f9d8 100644 --- a/crates/auths-sdk/src/workflows/mod.rs +++ b/crates/auths-sdk/src/workflows/mod.rs @@ -8,6 +8,7 @@ pub mod auth; pub mod ci; pub mod diagnostics; pub mod git_integration; +pub mod log_submit; #[cfg(feature = "mcp")] pub mod mcp; pub mod namespace; diff --git a/crates/auths-sdk/src/workflows/org.rs b/crates/auths-sdk/src/workflows/org.rs index f5ef3e9a..f6d1515c 100644 --- a/crates/auths-sdk/src/workflows/org.rs +++ b/crates/auths-sdk/src/workflows/org.rs @@ -373,6 +373,7 @@ pub fn add_organization_member( Some(IdentityDID::new_unchecked(admin_att.subject.to_string())) }, None, // commit_sha + None, ) .map_err(|e| OrgError::Signing(e.to_string()))?; diff --git a/crates/auths-sdk/src/workflows/transparency.rs b/crates/auths-sdk/src/workflows/transparency.rs index f99fe3cb..524f9ed8 100644 --- a/crates/auths-sdk/src/workflows/transparency.rs +++ b/crates/auths-sdk/src/workflows/transparency.rs @@ -116,6 +116,7 @@ pub async fn fetch_trust_root( log_public_key: Ed25519PublicKey::from_bytes(log_public_key_bytes), log_origin, witnesses, + signature_algorithm: Default::default(), }) } @@ -364,6 +365,8 @@ mod tests { log_signature: Ed25519Signature::from_bytes([0u8; 64]), log_public_key: Ed25519PublicKey::from_bytes([0u8; 32]), witnesses: vec![], + ecdsa_checkpoint_signature: None, + ecdsa_checkpoint_key: None, } } @@ -372,6 +375,7 @@ mod tests { log_public_key: Ed25519PublicKey::from_bytes([0u8; 32]), log_origin: LogOrigin::new("test.dev/log").unwrap(), witnesses: vec![], + signature_algorithm: Default::default(), } } diff --git a/crates/auths-sdk/tests/cases/ci_setup.rs b/crates/auths-sdk/tests/cases/ci_setup.rs deleted file mode 100644 index a0f05d00..00000000 --- a/crates/auths-sdk/tests/cases/ci_setup.rs +++ /dev/null @@ -1,93 +0,0 @@ -use std::sync::Arc; - -use auths_core::PrefilledPassphraseProvider; -use auths_core::signing::StorageSigner; -use auths_core::storage::memory::{MEMORY_KEYCHAIN, MemoryKeychainHandle}; -use auths_sdk::domains::ci::types::{CiEnvironment, CiIdentityConfig}; -use auths_sdk::domains::identity::service::initialize; -use auths_sdk::domains::identity::types::IdentityConfig; -use auths_sdk::domains::identity::types::InitializeResult; - -use crate::cases::helpers::build_test_context; - -#[test] -fn create_ci_identity_creates_ephemeral_identity() { - MEMORY_KEYCHAIN.lock().unwrap().clear_all().ok(); - - let tmp = tempfile::tempdir().unwrap(); - let registry_path = tmp.path().join(".auths-ci"); - - let config = CiIdentityConfig { - ci_environment: CiEnvironment::GitHubActions, - registry_path: registry_path.clone(), - }; - - let keychain: Arc = - Arc::new(MemoryKeychainHandle); - let signer = StorageSigner::new(MemoryKeychainHandle); - let provider = PrefilledPassphraseProvider::new("Ci-ephemeral-pass1!"); - let ctx = build_test_context(®istry_path, Arc::clone(&keychain)); - let result = match initialize( - IdentityConfig::Ci(config), - &ctx, - keychain, - &signer, - &provider, - None, - ) - .unwrap() - { - InitializeResult::Ci(r) => r, - _ => unreachable!(), - }; - - assert!(result.identity_did.starts_with("did:keri:")); - assert!(result.device_did.starts_with("did:key:z")); - assert!(!result.env_block.is_empty()); - assert!( - result - .env_block - .iter() - .any(|l| l.contains("AUTHS_KEYCHAIN_BACKEND")) - ); - assert!( - result - .env_block - .iter() - .any(|l| l.contains("GitHub Actions")) - ); -} - -#[test] -fn create_ci_identity_gitlab_env_block() { - MEMORY_KEYCHAIN.lock().unwrap().clear_all().ok(); - - let tmp = tempfile::tempdir().unwrap(); - let registry_path = tmp.path().join(".auths-ci"); - - let config = CiIdentityConfig { - ci_environment: CiEnvironment::GitLabCi, - registry_path: registry_path.clone(), - }; - - let keychain: Arc = - Arc::new(MemoryKeychainHandle); - let signer = StorageSigner::new(MemoryKeychainHandle); - let provider = PrefilledPassphraseProvider::new("Ci-ephemeral-pass1!"); - let ctx = build_test_context(®istry_path, Arc::clone(&keychain)); - let result = match initialize( - IdentityConfig::Ci(config), - &ctx, - keychain, - &signer, - &provider, - None, - ) - .unwrap() - { - InitializeResult::Ci(r) => r, - _ => unreachable!(), - }; - - assert!(result.env_block.iter().any(|l| l.contains("GitLab CI"))); -} diff --git a/crates/auths-sdk/tests/cases/ci_token.rs b/crates/auths-sdk/tests/cases/ci_token.rs deleted file mode 100644 index 1e7f92d1..00000000 --- a/crates/auths-sdk/tests/cases/ci_token.rs +++ /dev/null @@ -1,282 +0,0 @@ -//! Tests for CiToken, Forge URL parsing, identity bundler, and passphrase generation. - -use auths_sdk::domains::ci::bundle::{build_identity_bundle, generate_ci_passphrase}; -use auths_sdk::domains::ci::error::CiError; -use auths_sdk::domains::ci::forge::Forge; -use auths_sdk::domains::ci::token::CiToken; - -// ── CiToken serialization ── - -#[test] -fn ci_token_serialize_roundtrip() { - let token = CiToken::new( - "abcdef1234567890".to_string(), - "base64keychain==".to_string(), - "base64repo==".to_string(), - serde_json::json!({"identity_did": "did:keri:test"}), - "2026-01-01T00:00:00Z".to_string(), - 31536000, - ); - - let json = token.to_json().unwrap(); - let parsed = CiToken::from_json(&json).unwrap(); - - assert_eq!(parsed.version, 1); - assert_eq!(parsed.passphrase, "abcdef1234567890"); - assert_eq!(parsed.keychain, "base64keychain=="); - assert_eq!(parsed.identity_repo, "base64repo=="); - assert_eq!(parsed.created_at, "2026-01-01T00:00:00Z"); - assert_eq!(parsed.max_valid_for_secs, 31536000); -} - -#[test] -fn ci_token_rejects_unsupported_version() { - let json = r#"{ - "version": 99, - "passphrase": "test", - "keychain": "test", - "identity_repo": "test", - "verify_bundle": {}, - "created_at": "2026-01-01T00:00:00Z", - "max_valid_for_secs": 31536000 - }"#; - - let result = CiToken::from_json(json); - assert!(result.is_err()); - let err = result.unwrap_err(); - assert!( - matches!(err, CiError::TokenVersionUnsupported { version: 99 }), - "Expected TokenVersionUnsupported, got: {err:?}" - ); -} - -#[test] -fn ci_token_rejects_invalid_json() { - let result = CiToken::from_json("not valid json"); - assert!(result.is_err()); - assert!(matches!( - result.unwrap_err(), - CiError::TokenDeserializationFailed { .. } - )); -} - -#[test] -fn ci_token_estimated_size() { - let token = CiToken::new( - "a".repeat(64), - "b".repeat(1000), - "c".repeat(30000), - serde_json::json!({"key": "value"}), - "2026-01-01T00:00:00Z".to_string(), - 31536000, - ); - - let estimated = token.estimated_size(); - let actual_json = token.to_json().unwrap(); - let actual_size = actual_json.len(); - - // Estimate should be within 20% of actual - let tolerance = actual_size / 5; - assert!( - estimated.abs_diff(actual_size) < tolerance, - "Estimated {estimated} vs actual {actual_size} (tolerance {tolerance})" - ); -} - -// ── Forge URL parsing ── - -#[test] -fn forge_from_github_https() { - let forge = Forge::from_url("https://github.com/owner/repo.git"); - assert_eq!( - forge, - Forge::GitHub { - owner_repo: "owner/repo".to_string() - } - ); - assert_eq!(forge.display_name(), "GitHub"); - assert_eq!(forge.repo_identifier(), "owner/repo"); -} - -#[test] -fn forge_from_github_https_no_suffix() { - let forge = Forge::from_url("https://github.com/owner/repo"); - assert_eq!( - forge, - Forge::GitHub { - owner_repo: "owner/repo".to_string() - } - ); -} - -#[test] -fn forge_from_github_ssh() { - let forge = Forge::from_url("git@github.com:auths-dev/auths.git"); - assert_eq!( - forge, - Forge::GitHub { - owner_repo: "auths-dev/auths".to_string() - } - ); -} - -#[test] -fn forge_from_gitlab_https() { - let forge = Forge::from_url("https://gitlab.com/group/project.git"); - assert_eq!( - forge, - Forge::GitLab { - group_project: "group/project".to_string() - } - ); -} - -#[test] -fn forge_from_gitlab_ssh() { - let forge = Forge::from_url("git@gitlab.com:group/subgroup/project.git"); - assert_eq!( - forge, - Forge::GitLab { - group_project: "group/subgroup/project".to_string() - } - ); -} - -#[test] -fn forge_from_bitbucket() { - let forge = Forge::from_url("git@bitbucket.org:workspace/repo.git"); - assert_eq!( - forge, - Forge::Bitbucket { - workspace_repo: "workspace/repo".to_string() - } - ); -} - -#[test] -fn forge_from_unknown_host() { - let forge = Forge::from_url("https://selfhosted.example.com/org/repo.git"); - assert_eq!( - forge, - Forge::Unknown { - url: "selfhosted.example.com/org/repo".to_string() - } - ); -} - -#[test] -fn forge_from_enterprise_github() { - let forge = Forge::from_url("https://github.acme.com/internal/tools.git"); - assert_eq!( - forge, - Forge::GitHub { - owner_repo: "internal/tools".to_string() - } - ); -} - -#[test] -fn forge_from_ssh_with_explicit_protocol() { - let forge = Forge::from_url("ssh://git@github.com/owner/repo.git"); - assert_eq!( - forge, - Forge::GitHub { - owner_repo: "owner/repo".to_string() - } - ); -} - -#[test] -fn forge_strips_trailing_slash() { - let forge = Forge::from_url("https://github.com/owner/repo/"); - assert_eq!(forge.repo_identifier(), "owner/repo"); -} - -// ── Passphrase generation ── - -#[test] -fn ci_passphrase_is_hex_64_chars() { - let pass = generate_ci_passphrase(); - assert_eq!(pass.len(), 64); - assert!( - pass.chars().all(|c| c.is_ascii_hexdigit()), - "Passphrase contains non-hex chars: {pass}" - ); -} - -#[test] -fn ci_passphrase_is_unique() { - let a = generate_ci_passphrase(); - let b = generate_ci_passphrase(); - assert_ne!(a, b, "Two generated passphrases should differ"); -} - -// ── Identity bundle ── - -#[test] -fn build_identity_bundle_produces_valid_base64() { - let tmp = tempfile::tempdir().unwrap(); - let dir = tmp.path(); - - // Create some test files - std::fs::write(dir.join("config"), b"test config").unwrap(); - std::fs::create_dir_all(dir.join("objects")).unwrap(); - std::fs::write(dir.join("objects/abc"), b"object data").unwrap(); - - let b64 = build_identity_bundle(dir).unwrap(); - - // Should be valid base64 - use base64::Engine as _; - let decoded = base64::engine::general_purpose::STANDARD - .decode(&b64) - .expect("Should be valid base64"); - assert!(!decoded.is_empty()); - - // Should be valid gzip - use std::io::Read; - let mut gz = flate2::read::GzDecoder::new(&decoded[..]); - let mut decompressed = Vec::new(); - gz.read_to_end(&mut decompressed) - .expect("Should be valid gzip"); - assert!(!decompressed.is_empty()); -} - -#[test] -fn build_identity_bundle_excludes_socks_and_locks() { - let tmp = tempfile::tempdir().unwrap(); - let dir = tmp.path(); - - std::fs::write(dir.join("config"), b"keep").unwrap(); - std::fs::write(dir.join("agent.sock"), b"exclude").unwrap(); - std::fs::write(dir.join("registry.lock"), b"exclude").unwrap(); - - let b64 = build_identity_bundle(dir).unwrap(); - - // Decode and read tar entries - use base64::Engine as _; - let decoded = base64::engine::general_purpose::STANDARD - .decode(&b64) - .unwrap(); - let gz = flate2::read::GzDecoder::new(&decoded[..]); - let mut archive = tar::Archive::new(gz); - - let entry_names: Vec = archive - .entries() - .unwrap() - .filter_map(|e| e.ok()) - .map(|e| e.path().unwrap().display().to_string()) - .collect(); - - assert!( - entry_names.iter().any(|n| n.contains("config")), - "Should include config, got: {entry_names:?}" - ); - assert!( - !entry_names.iter().any(|n| n.contains("sock")), - "Should exclude .sock files, got: {entry_names:?}" - ); - assert!( - !entry_names.iter().any(|n| n.contains("lock")), - "Should exclude .lock files, got: {entry_names:?}" - ); -} diff --git a/crates/auths-sdk/tests/cases/ephemeral_signing.rs b/crates/auths-sdk/tests/cases/ephemeral_signing.rs new file mode 100644 index 00000000..e431d514 --- /dev/null +++ b/crates/auths-sdk/tests/cases/ephemeral_signing.rs @@ -0,0 +1,243 @@ +use auths_sdk::domains::signing::service::sign_artifact_ephemeral; +use auths_verifier::core::{Attestation, SignerType}; +use chrono::Utc; + +const VALID_SHA: &str = "abc123def456abc123def456abc123def456abc1"; + +#[test] +fn produces_valid_attestation_with_did_key_issuer() { + let result = sign_artifact_ephemeral( + Utc::now(), + b"test data", + None, + VALID_SHA.into(), + None, + None, + None, + ) + .expect("signing should succeed"); + + let att: Attestation = + serde_json::from_str(&result.attestation_json).expect("should parse as Attestation"); + + assert!( + att.issuer.as_str().starts_with("did:key:z"), + "issuer should be did:key:, got: {}", + att.issuer + ); + assert!( + att.subject.as_str().starts_with("did:key:z"), + "subject should be did:key:, got: {}", + att.subject + ); +} + +#[test] +fn signer_type_is_workload() { + let result = sign_artifact_ephemeral( + Utc::now(), + b"test data", + None, + VALID_SHA.into(), + None, + None, + None, + ) + .expect("signing should succeed"); + + let att: Attestation = serde_json::from_str(&result.attestation_json).unwrap(); + assert_eq!(att.signer_type, Some(SignerType::Workload)); +} + +#[test] +fn commit_sha_is_present() { + let result = sign_artifact_ephemeral( + Utc::now(), + b"test data", + None, + VALID_SHA.into(), + None, + None, + None, + ) + .expect("signing should succeed"); + + let att: Attestation = serde_json::from_str(&result.attestation_json).unwrap(); + assert_eq!(att.commit_sha.as_deref(), Some(VALID_SHA)); +} + +#[test] +fn each_call_uses_different_ephemeral_key() { + let r1 = sign_artifact_ephemeral( + Utc::now(), + b"data1", + None, + VALID_SHA.into(), + None, + None, + None, + ) + .unwrap(); + let r2 = sign_artifact_ephemeral( + Utc::now(), + b"data2", + None, + VALID_SHA.into(), + None, + None, + None, + ) + .unwrap(); + + let a1: Attestation = serde_json::from_str(&r1.attestation_json).unwrap(); + let a2: Attestation = serde_json::from_str(&r2.attestation_json).unwrap(); + + assert_ne!( + a1.issuer, a2.issuer, + "two calls should produce different ephemeral keys" + ); +} + +#[test] +fn ci_environment_in_payload() { + let ci_env = serde_json::json!({ + "platform": "github_actions", + "workflow_ref": "release.yml", + "run_id": "42" + }); + + let result = sign_artifact_ephemeral( + Utc::now(), + b"test data", + Some("test.tar.gz".into()), + VALID_SHA.into(), + None, + None, + Some(ci_env), + ) + .expect("signing should succeed"); + + let att: Attestation = serde_json::from_str(&result.attestation_json).unwrap(); + let payload = att.payload.expect("payload should exist"); + let ci = payload + .get("ci_environment") + .expect("ci_environment should be in payload"); + assert_eq!(ci["platform"], "github_actions"); + assert_eq!(ci["run_id"], "42"); +} + +#[test] +fn empty_data_produces_valid_attestation() { + let result = sign_artifact_ephemeral(Utc::now(), b"", None, VALID_SHA.into(), None, None, None) + .expect("empty data should still produce valid attestation"); + + let att: Attestation = serde_json::from_str(&result.attestation_json).unwrap(); + assert!(att.issuer.as_str().starts_with("did:key:z")); +} + +#[test] +fn invalid_commit_sha_rejected() { + let result = sign_artifact_ephemeral( + Utc::now(), + b"test data", + None, + "not-a-valid-sha".into(), + None, + None, + None, + ); + + assert!(result.is_err(), "invalid commit SHA should be rejected"); + let err = result.unwrap_err().to_string(); + assert!( + err.contains("commit SHA") || err.contains("InvalidCommitSha"), + "error should mention commit SHA: {}", + err + ); +} + +#[test] +fn tamper_commit_sha_breaks_signature() { + let result = sign_artifact_ephemeral( + Utc::now(), + b"test data", + None, + VALID_SHA.into(), + None, + None, + None, + ) + .unwrap(); + + // Parse, tamper with commit_sha, re-serialize + let mut att: Attestation = serde_json::from_str(&result.attestation_json).unwrap(); + att.commit_sha = Some("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef".into()); + + // Extract the pubkey from the issuer DID for verification + let issuer_did = att.issuer.as_str(); + let pk = auths_crypto::did_key_to_ed25519(issuer_did).expect("should resolve did:key"); + + // Verify the tampered attestation — should fail because signature covers commit_sha + let rt = tokio::runtime::Runtime::new().unwrap(); + let chain = vec![att]; + let report = rt.block_on(auths_verifier::verify_chain(&chain, &pk)); + + if let Ok(r) = report { + assert!( + !r.is_valid(), + "tampered commit_sha should produce invalid verification" + ); + } + // Err is also acceptable — signature mismatch +} + +#[test] +fn tamper_artifact_fails_digest_check() { + let result = sign_artifact_ephemeral( + Utc::now(), + b"original artifact data", + None, + VALID_SHA.into(), + None, + None, + None, + ) + .unwrap(); + + // The result digest is for "original artifact data" + // A different artifact should not match + let different_digest = hex::encode(sha2::Sha256::digest(b"tampered artifact data")); + assert_ne!( + result.digest, different_digest, + "different data should produce different digest" + ); +} + +#[test] +fn ephemeral_key_collision_check() { + use std::collections::HashSet; + + let mut issuers = HashSet::new(); + for _ in 0..100 { + let r = sign_artifact_ephemeral( + Utc::now(), + b"data", + None, + VALID_SHA.into(), + None, + None, + None, + ) + .unwrap(); + let att: Attestation = serde_json::from_str(&r.attestation_json).unwrap(); + issuers.insert(att.issuer.to_string()); + } + + assert_eq!( + issuers.len(), + 100, + "100 calls should produce 100 distinct ephemeral keys" + ); +} + +use sha2::Digest; diff --git a/crates/auths-sdk/tests/cases/mod.rs b/crates/auths-sdk/tests/cases/mod.rs index 791bac5f..e7c17590 100644 --- a/crates/auths-sdk/tests/cases/mod.rs +++ b/crates/auths-sdk/tests/cases/mod.rs @@ -2,10 +2,10 @@ mod agents; mod allowed_signers; mod artifact; mod audit; -mod ci_setup; -mod ci_token; + mod device; mod diagnostics; +mod ephemeral_signing; pub mod helpers; mod org; mod pairing; diff --git a/crates/auths-transparency/src/checkpoint.rs b/crates/auths-transparency/src/checkpoint.rs index fe365fff..42e47943 100644 --- a/crates/auths-transparency/src/checkpoint.rs +++ b/crates/auths-transparency/src/checkpoint.rs @@ -91,6 +91,13 @@ pub struct SignedCheckpoint { pub log_public_key: Ed25519PublicKey, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub witnesses: Vec, + /// ECDSA P-256 checkpoint signature (DER-encoded). Present when the log + /// uses ECDSA instead of Ed25519 (e.g., Rekor production shard). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ecdsa_checkpoint_signature: Option, + /// ECDSA P-256 public key for checkpoint verification (PKIX DER). + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ecdsa_checkpoint_key: Option, } /// A witness cosignature on a checkpoint. diff --git a/crates/auths-transparency/src/lib.rs b/crates/auths-transparency/src/lib.rs index 8f2cf804..fd3e230b 100644 --- a/crates/auths-transparency/src/lib.rs +++ b/crates/auths-transparency/src/lib.rs @@ -104,6 +104,10 @@ pub struct TrustRoot { pub log_origin: LogOrigin, /// Trusted witness keys. Empty for Epic 1. pub witnesses: Vec, + /// Signature algorithm for checkpoint verification. Defaults to Ed25519. + /// Rekor production shard uses EcdsaP256. + #[serde(default)] + pub signature_algorithm: auths_verifier::SignatureAlgorithm, } /// A trusted witness in the [`TrustRoot`]. @@ -130,3 +134,81 @@ pub struct TrustRootWitness { /// Witness Ed25519 public key. pub public_key: auths_verifier::Ed25519PublicKey, } + +/// Multi-log trust configuration. +/// +/// Indexes trust material by log ID. Each log entry is a [`TrustRoot`]. +/// The `default_log` selects which log is used when none is specified. +/// +/// Usage: +/// ```ignore +/// let config = TrustConfig::default_config(); +/// let root = config.get_log("sigstore-rekor").unwrap(); +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct TrustConfig { + /// ID of the default log. Must reference a key in `logs`. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub default_log: Option, + /// Map of log ID to trust material. + pub logs: std::collections::HashMap, +} + +impl TrustConfig { + /// Look up trust material for a specific log by ID. + pub fn get_log(&self, log_id: &str) -> Option<&TrustRoot> { + self.logs.get(log_id) + } + + /// Get the default log's trust material. + pub fn default_log(&self) -> Option<(&str, &TrustRoot)> { + let id = self.default_log.as_deref()?; + self.logs.get(id).map(|root| (id, root)) + } + + /// Validate the config: if `default_log` is set, it must reference + /// a key in `logs`. Call at load time to catch misconfigurations early. + pub fn validate(&self) -> std::result::Result<(), TransparencyError> { + if let Some(ref id) = self.default_log + && !self.logs.contains_key(id) + { + return Err(TransparencyError::InvalidNote(format!( + "default_log '{}' not found in logs. Available: {:?}", + id, + self.logs.keys().collect::>() + ))); + } + Ok(()) + } + + /// Compiled-in default: Rekor production shard. + /// + /// Origin pinned from `GET https://rekor.sigstore.dev/api/v1/log` + /// on 2026-04-09. Public key from sigstore trusted_root.json. + pub fn default_config() -> Self { + use std::collections::HashMap; + + // Rekor production shard ECDSA P-256 public key (DER PKIX) + // Source: https://github.com/sigstore/root-signing/blob/main/targets/trusted_root.json + // Key: MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwrkBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw== + // + // For the Ed25519PublicKey field (used by Ed25519 verification path), + // we store zeros since the production shard uses ECDSA, not Ed25519. + // The actual verification dispatches on signature_algorithm. + let rekor_root = TrustRoot { + log_public_key: auths_verifier::Ed25519PublicKey::from_bytes([0u8; 32]), + // Origin pinned from: GET https://rekor.sigstore.dev/api/v1/log → signedTreeHead, first line + log_origin: LogOrigin::new_unchecked("rekor.sigstore.dev - 1193050959916656506"), + witnesses: vec![], + signature_algorithm: auths_verifier::SignatureAlgorithm::EcdsaP256, + }; + + let mut logs = HashMap::new(); + logs.insert("sigstore-rekor".to_string(), rekor_root); + + Self { + default_log: Some("sigstore-rekor".to_string()), + logs, + } + } +} diff --git a/crates/auths-transparency/src/types.rs b/crates/auths-transparency/src/types.rs index e6ea6799..98454967 100644 --- a/crates/auths-transparency/src/types.rs +++ b/crates/auths-transparency/src/types.rs @@ -133,6 +133,15 @@ impl LogOrigin { Ok(Self(s.to_string())) } + /// Create from a compile-time constant. Panics if invalid. + /// + /// Only for use in `default_config()` and similar contexts where the + /// string is a known-good constant. + #[allow(clippy::expect_used)] // INVARIANT: only called with compile-time ASCII constants + pub fn new_unchecked(s: &str) -> Self { + Self::new(s).expect("LogOrigin::new_unchecked called with invalid origin") + } + /// The inner string. pub fn as_str(&self) -> &str { &self.0 diff --git a/crates/auths-transparency/src/verify.rs b/crates/auths-transparency/src/verify.rs index 32065800..52f3a4f6 100644 --- a/crates/auths-transparency/src/verify.rs +++ b/crates/auths-transparency/src/verify.rs @@ -4,7 +4,7 @@ //! an [`OfflineBundle`] against a [`TrustRoot`]. use chrono::{DateTime, Duration, Utc}; -use ring::signature::{ED25519, UnparsedPublicKey}; +use ring::signature::{ECDSA_P256_SHA256_ASN1, ED25519, UnparsedPublicKey}; use crate::bundle::{ BundleVerificationReport, CheckpointStatus, DelegationStatus, InclusionStatus, NamespaceStatus, @@ -157,10 +157,37 @@ fn verify_checkpoint(signed: &SignedCheckpoint, trust_root: &TrustRoot) -> Check let note_body = signed.checkpoint.to_note_body(); - let peer_key = UnparsedPublicKey::new(&ED25519, trust_root.log_public_key.as_bytes()); - match peer_key.verify(note_body.as_bytes(), signed.log_signature.as_bytes()) { - Ok(()) => CheckpointStatus::Verified, - Err(_) => CheckpointStatus::InvalidSignature, + match trust_root.signature_algorithm { + auths_verifier::SignatureAlgorithm::Ed25519 => { + let peer_key = UnparsedPublicKey::new(&ED25519, trust_root.log_public_key.as_bytes()); + match peer_key.verify(note_body.as_bytes(), signed.log_signature.as_bytes()) { + Ok(()) => CheckpointStatus::Verified, + Err(_) => CheckpointStatus::InvalidSignature, + } + } + auths_verifier::SignatureAlgorithm::EcdsaP256 => { + // For ECDSA P-256, the checkpoint carries the DER signature in + // `log_signature` (repurposed as raw bytes) and the trust root + // carries the ECDSA public key in a separate field. Since + // SignedCheckpoint.log_signature is Ed25519Signature (64 bytes fixed), + // ECDSA requires the raw DER bytes stored differently. + // + // For now, we use the ecdsa_checkpoint_signature and ecdsa_checkpoint_key + // optional fields that the Rekor adapter populates. + // Fall through to the raw-bytes verification path. + if let (Some(ecdsa_sig), Some(ecdsa_pk)) = ( + &signed.ecdsa_checkpoint_signature, + &signed.ecdsa_checkpoint_key, + ) { + let peer_key = UnparsedPublicKey::new(&ECDSA_P256_SHA256_ASN1, ecdsa_pk.as_der()); + match peer_key.verify(note_body.as_bytes(), ecdsa_sig.as_der()) { + Ok(()) => CheckpointStatus::Verified, + Err(_) => CheckpointStatus::InvalidSignature, + } + } else { + CheckpointStatus::InvalidSignature + } + } } } @@ -553,6 +580,7 @@ mod tests { log_public_key: Ed25519PublicKey::from_bytes(log_public_key), log_origin: LogOrigin::new("test.dev/log").unwrap(), witnesses: vec![], + signature_algorithm: Default::default(), }; TestFixture { @@ -610,6 +638,8 @@ mod tests { log_signature, log_public_key: Ed25519PublicKey::from_bytes(fixture.log_public_key), witnesses: vec![], + ecdsa_checkpoint_signature: None, + ecdsa_checkpoint_key: None, }; let proof = InclusionProof { @@ -739,6 +769,7 @@ mod tests { public_key: Ed25519PublicKey::from_bytes(w2_pk), }, ], + signature_algorithm: Default::default(), }; let report = verify_bundle(&bundle, &trust_root, fixed_now()); diff --git a/crates/auths-transparency/src/witness.rs b/crates/auths-transparency/src/witness.rs index 26afe7a5..ea05633d 100644 --- a/crates/auths-transparency/src/witness.rs +++ b/crates/auths-transparency/src/witness.rs @@ -541,6 +541,8 @@ mod tests { log_signature: Ed25519Signature::from_bytes([0xcc; 64]), log_public_key: Ed25519PublicKey::from_bytes([0xdd; 32]), witnesses: vec![], + ecdsa_checkpoint_signature: None, + ecdsa_checkpoint_key: None, }, }; diff --git a/crates/auths-transparency/tests/cases/verify.rs b/crates/auths-transparency/tests/cases/verify.rs index 44eac063..044f9ec9 100644 --- a/crates/auths-transparency/tests/cases/verify.rs +++ b/crates/auths-transparency/tests/cases/verify.rs @@ -84,6 +84,8 @@ fn verify_bundle_end_to_end_single_entry() { log_signature: log_sig, log_public_key: Ed25519PublicKey::from_bytes(log_pk), witnesses: vec![], + ecdsa_checkpoint_signature: None, + ecdsa_checkpoint_key: None, }, delegation_chain: vec![], }; @@ -92,6 +94,7 @@ fn verify_bundle_end_to_end_single_entry() { log_public_key: Ed25519PublicKey::from_bytes(log_pk), log_origin: LogOrigin::new("test.dev/log").unwrap(), witnesses: vec![], + signature_algorithm: Default::default(), }; let report = verify_bundle(&bundle, &trust_root, fixed_now()); @@ -168,6 +171,8 @@ fn verify_bundle_multi_leaf_tree() { log_signature: log_sig, log_public_key: Ed25519PublicKey::from_bytes(log_pk), witnesses: vec![], + ecdsa_checkpoint_signature: None, + ecdsa_checkpoint_key: None, }, delegation_chain: vec![], }; @@ -176,6 +181,7 @@ fn verify_bundle_multi_leaf_tree() { log_public_key: Ed25519PublicKey::from_bytes(log_pk), log_origin: LogOrigin::new("test.dev/log").unwrap(), witnesses: vec![], + signature_algorithm: Default::default(), }; let report = verify_bundle(&bundle, &trust_root, fixed_now()); @@ -250,6 +256,8 @@ fn verify_bundle_with_witnesses() { signature: w1_sig, timestamp: fixed_ts(), }], + ecdsa_checkpoint_signature: None, + ecdsa_checkpoint_key: None, }, delegation_chain: vec![], }; @@ -262,6 +270,7 @@ fn verify_bundle_with_witnesses() { name: "w1".into(), public_key: Ed25519PublicKey::from_bytes(w1_pk), }], + signature_algorithm: Default::default(), }; let report = verify_bundle(&bundle, &trust_root, fixed_now()); diff --git a/crates/auths-transparency/tests/cases/witness.rs b/crates/auths-transparency/tests/cases/witness.rs index f17f2977..17f76126 100644 --- a/crates/auths-transparency/tests/cases/witness.rs +++ b/crates/auths-transparency/tests/cases/witness.rs @@ -26,6 +26,8 @@ fn make_test_checkpoint() -> SignedCheckpoint { log_signature: Ed25519Signature::from_bytes([0xcc; 64]), log_public_key: Ed25519PublicKey::from_bytes([0xdd; 32]), witnesses: vec![], + ecdsa_checkpoint_signature: None, + ecdsa_checkpoint_key: None, } } diff --git a/crates/auths-verifier/src/core.rs b/crates/auths-verifier/src/core.rs index 816e9d46..621f2e42 100644 --- a/crates/auths-verifier/src/core.rs +++ b/crates/auths-verifier/src/core.rs @@ -372,6 +372,161 @@ impl<'de> serde::Deserialize<'de> for Ed25519Signature { #[error("expected 64 bytes, got {0}")] pub struct SignatureLengthError(pub usize); +// ============================================================================= +// Signature algorithm enum (for configurable checkpoint verification) +// ============================================================================= + +/// Signature algorithm used by a transparency log for checkpoint signing. +/// +/// Each log in a `TrustConfig` specifies which algorithm its checkpoints use. +/// The verifier dispatches on this when checking checkpoint signatures. +/// +/// Usage: +/// ```ignore +/// match trust_root.signature_algorithm { +/// SignatureAlgorithm::Ed25519 => verify_ed25519(..), +/// SignatureAlgorithm::EcdsaP256 => verify_ecdsa_p256(..), +/// } +/// ``` +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum SignatureAlgorithm { + /// Ed25519 (RFC 8032). Default for auths-native logs. + #[default] + Ed25519, + /// ECDSA with NIST P-256 and SHA-256. Used by Rekor production shard. + EcdsaP256, +} + +impl fmt::Display for SignatureAlgorithm { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Ed25519 => f.write_str("ed25519"), + Self::EcdsaP256 => f.write_str("ecdsa_p256"), + } + } +} + +// ============================================================================= +// ECDSA P-256 types (for Rekor checkpoint verification) +// ============================================================================= + +/// A DER-encoded ECDSA P-256 public key (PKIX SubjectPublicKeyInfo). +/// +/// Stores the full DER encoding so `ring::signature::UnparsedPublicKey` +/// can consume it directly. +/// +/// Usage: +/// ```ignore +/// let pk = EcdsaP256PublicKey::from_der(&der_bytes)?; +/// ``` +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct EcdsaP256PublicKey(Vec); + +impl EcdsaP256PublicKey { + /// Creates from DER-encoded PKIX SubjectPublicKeyInfo bytes. + /// + /// Performs minimal validation: checks the ASN.1 OID prefix for P-256 + /// (`06 08 2a 86 48 ce 3d 03 01 07`). + pub fn from_der(der: &[u8]) -> Result { + // PKIX P-256 key is typically 91 bytes (SEQUENCE { AlgorithmIdentifier, BIT STRING }) + // The AlgorithmIdentifier contains OID 1.2.840.10045.3.1.7 (P-256) + const P256_OID: &[u8] = &[0x06, 0x08, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x03, 0x01, 0x07]; + if der.len() < 26 { + return Err(EcdsaP256Error::InvalidKey( + "DER too short for P-256 PKIX key".into(), + )); + } + if !der.windows(P256_OID.len()).any(|w| w == P256_OID) { + return Err(EcdsaP256Error::InvalidKey( + "missing P-256 OID in PKIX key".into(), + )); + } + Ok(Self(der.to_vec())) + } + + /// Returns the raw DER bytes. + pub fn as_der(&self) -> &[u8] { + &self.0 + } +} + +impl Serialize for EcdsaP256PublicKey { + fn serialize(&self, s: S) -> Result { + use base64::Engine; + s.serialize_str(&base64::engine::general_purpose::STANDARD.encode(&self.0)) + } +} + +impl<'de> Deserialize<'de> for EcdsaP256PublicKey { + fn deserialize>(d: D) -> Result { + use base64::Engine; + let s = String::deserialize(d)?; + let bytes = base64::engine::general_purpose::STANDARD + .decode(&s) + .map_err(|e| serde::de::Error::custom(format!("invalid base64: {e}")))?; + Self::from_der(&bytes).map_err(serde::de::Error::custom) + } +} + +/// A DER-encoded ECDSA P-256 signature. +/// +/// ECDSA signatures are variable-length ASN.1 DER (typically 70-72 bytes). +/// +/// Usage: +/// ```ignore +/// let sig = EcdsaP256Signature::from_der(&der_bytes)?; +/// ``` +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct EcdsaP256Signature(Vec); + +impl EcdsaP256Signature { + /// Creates from DER-encoded signature bytes. + pub fn from_der(der: &[u8]) -> Result { + // Minimal check: ASN.1 SEQUENCE tag (0x30) + if der.is_empty() || der[0] != 0x30 { + return Err(EcdsaP256Error::InvalidSignature( + "not an ASN.1 SEQUENCE".into(), + )); + } + Ok(Self(der.to_vec())) + } + + /// Returns the raw DER bytes. + pub fn as_der(&self) -> &[u8] { + &self.0 + } +} + +impl Serialize for EcdsaP256Signature { + fn serialize(&self, s: S) -> Result { + use base64::Engine; + s.serialize_str(&base64::engine::general_purpose::STANDARD.encode(&self.0)) + } +} + +impl<'de> Deserialize<'de> for EcdsaP256Signature { + fn deserialize>(d: D) -> Result { + use base64::Engine; + let s = String::deserialize(d)?; + let bytes = base64::engine::general_purpose::STANDARD + .decode(&s) + .map_err(|e| serde::de::Error::custom(format!("invalid base64: {e}")))?; + Self::from_der(&bytes).map_err(serde::de::Error::custom) + } +} + +/// Errors from ECDSA P-256 operations. +#[derive(Debug, Clone, thiserror::Error)] +pub enum EcdsaP256Error { + /// Invalid DER-encoded public key. + #[error("invalid ECDSA P-256 key: {0}")] + InvalidKey(String), + /// Invalid DER-encoded signature. + #[error("invalid ECDSA P-256 signature: {0}")] + InvalidSignature(String), +} + // ============================================================================= // Capability types // ============================================================================= diff --git a/crates/auths-verifier/src/lib.rs b/crates/auths-verifier/src/lib.rs index 15b43374..acfae35f 100644 --- a/crates/auths-verifier/src/lib.rs +++ b/crates/auths-verifier/src/lib.rs @@ -78,10 +78,11 @@ pub use action::ActionEnvelope; // Re-export core types pub use core::{ - Attestation, Capability, CapabilityError, CommitOid, CommitOidError, Ed25519KeyError, - Ed25519PublicKey, Ed25519Signature, IdentityBundle, MAX_ATTESTATION_JSON_SIZE, - MAX_JSON_BATCH_SIZE, OidcBinding, PolicyId, PublicKeyHex, PublicKeyHexError, ResourceId, Role, - RoleParseError, SignatureLengthError, ThresholdPolicy, VerifiedAttestation, + Attestation, Capability, CapabilityError, CommitOid, CommitOidError, EcdsaP256Error, + EcdsaP256PublicKey, EcdsaP256Signature, Ed25519KeyError, Ed25519PublicKey, Ed25519Signature, + IdentityBundle, MAX_ATTESTATION_JSON_SIZE, MAX_JSON_BATCH_SIZE, OidcBinding, PolicyId, + PublicKeyHex, PublicKeyHexError, ResourceId, Role, RoleParseError, SignatureAlgorithm, + SignatureLengthError, ThresholdPolicy, VerifiedAttestation, }; // Re-export test utilities diff --git a/deny.toml b/deny.toml index 791d2733..8c65b6c3 100644 --- a/deny.toml +++ b/deny.toml @@ -28,6 +28,7 @@ multiple-versions = "warn" deny = [ { crate = "reqwest", wrappers = [ "auths-infra-http", + "auths-infra-rekor", "auths-api", "auths-cli", "auths-mcp-server", diff --git a/docs/E2E_TEST_CHECKLIST.md b/docs/E2E_TEST_CHECKLIST.md index 8dc02648..09253678 100644 --- a/docs/E2E_TEST_CHECKLIST.md +++ b/docs/E2E_TEST_CHECKLIST.md @@ -6,8 +6,8 @@ This checklist validates that the complete OIDC machine identity commit signing ## Prerequisites -- [ ] Branch with sign-commit feature changes is ready -- [ ] Workflow file (`.github/workflows/sign-commits.yml`) is present +- [ ] Branch with commit verification and ephemeral signing changes is ready +- [ ] Workflow file (`.github/workflows/release.yml`) includes verify gate and ephemeral signing - [ ] All code changes are committed - [ ] Repository has write permissions for refs/auths/* @@ -15,7 +15,7 @@ This checklist validates that the complete OIDC machine identity commit signing ### Trigger the Workflow -- [ ] Push to main branch or create PR that triggers `.github/workflows/sign-commits.yml` +- [ ] Push to main branch or create PR that triggers `.github/workflows/release.yml` - [ ] Workflow starts automatically in GitHub Actions - [ ] No manual token configuration required (GitHub provides OIDC token automatically) @@ -263,7 +263,7 @@ If any step fails, document: **Workflow doesn't trigger:** - Check branch protection rules -- Verify `.github/workflows/sign-commits.yml` is on main +- Verify `.github/workflows/release.yml` is on main **OIDC token not acquired:** - Check GitHub Actions OIDC issuer is configured diff --git a/docs/contributing/release-process.md b/docs/contributing/release-process.md index d7fdf32b..652dcb09 100644 --- a/docs/contributing/release-process.md +++ b/docs/contributing/release-process.md @@ -40,15 +40,7 @@ This makes similar checks as the github release. It also includes properly order ### One-time CI signing setup -Before the first release, GitHub Actions needs a device key and identity token for artifact signing: - -```bash -auths ci setup -``` - -This creates a limited-capability CI device key and sets a single `AUTHS_CI_TOKEN` secret on GitHub containing everything CI needs. Artifact signing is skipped gracefully if the secret is missing. - -To refresh the token without regenerating the device key, run `auths ci rotate`. +Artifact signing happens automatically in CI using ephemeral keys — no secrets needed. CI verifies commit signatures before building, then signs artifacts with a throwaway key. See `docs/design/ephemeral-signing-threat-model.md` for the security model. ### Manual steps (if needed) diff --git a/docs/design/ephemeral-signing-threat-model.md b/docs/design/ephemeral-signing-threat-model.md new file mode 100644 index 00000000..8973d658 --- /dev/null +++ b/docs/design/ephemeral-signing-threat-model.md @@ -0,0 +1,129 @@ +# Ephemeral CI Signing: Threat Model + +## 1. Trust Chain + +``` +Maintainer's device-bound Ed25519 key (hardware keychain, Touch ID) + ↓ signs git commits +Commit signature (SSH, verifiable via allowed_signers) + ↓ CI verifies before building +CI runner builds artifact from verified commit + ↓ generates throwaway Ed25519 keypair (lives in memory, dies with the run) +Ephemeral key signs artifact hash + commit SHA + build environment + ↓ produces .auths.json attestation +Consumer verifies: artifact hash ← ephemeral signature ← commit SHA ← maintainer signature +``` + +**Root of trust:** The maintainer's KERI-rooted Ed25519 key, stored in the device hardware keychain. Loss or compromise is handled by KERI pre-rotation — the next key is committed before the current key is exposed. + +**Transitive trust:** The ephemeral key has no identity of its own. It's a `did:key:` — self-certifying, born and dead in one CI run. Its authority derives entirely from the fact that CI verified the commit was signed by a maintainer before using the ephemeral key to sign the artifact. The commit signature is the bridge between the maintainer's identity and the artifact. + +**What the attestation binds (all covered by the ephemeral signature):** +- Artifact SHA256 hash (in `payload.digest.hex`) +- Commit SHA (in `commit_sha`) +- Build environment metadata (in `payload.ci_environment`) +- `signer_type: "workload"` (distinguishes ephemeral from device-signed) + +## 2. Attack Surface + +### Compromised CI runner + +An attacker who owns the runner controls the build. They can: +- Build a different binary than what the source code would produce +- Generate their own ephemeral key and sign the malicious artifact +- Claim any commit SHA in the attestation + +**Mitigation:** The commit SHA in the attestation is covered by the ephemeral signature. If the consumer verifies that the commit SHA is signed by a trusted maintainer (via the commit's SSH signature), they confirm the maintainer approved that specific code. The attacker can't forge the maintainer's commit signature without the maintainer's device key. + +**Remaining gap:** The attacker can use a real signed commit SHA but build different code from it. This is a CI-level compromise that no signing scheme prevents — including Sigstore. The only defense is reproducible builds (out of scope) or detection after the fact (maintainer notices unexpected attestations). + +**Detection:** Ephemeral attestations include `signer_type: "workload"` and build environment metadata. A maintainer monitoring their identity's attestation history would see unexpected artifacts claiming their commits. This is analogous to certificate transparency for TLS. + +### Compromised maintainer device + +The maintainer's Ed25519 key is extracted from the hardware keychain. + +**Mitigation:** KERI pre-rotation. The maintainer's next key is cryptographically committed before the current key is active. On detection of compromise: +1. Rotate to the pre-committed next key +2. All future commit signatures use the new key +3. Past attestations anchored before the rotation event remain valid +4. Attestations signed after the compromised key's rotation are rejected + +**Practical note:** Hardware keychain extraction (macOS Secure Enclave, YubiKey) requires physical access to the device and typically biometric authentication. This is significantly harder than stealing a CI secret from runner environment variables. + +### Git history rewrite + +An attacker rewrites git history to change what a commit SHA points to. + +**Irrelevant.** The attestation pins a specific commit SHA, and commit SHAs are content-addressed hashes. You can't change what a SHA points to without changing the SHA itself. Force-pushing to a branch doesn't change existing commit objects — it changes which commit the branch ref points to. The attestation references the object, not the ref. + +### Replay of old ephemeral signatures + +An attacker re-publishes an old ephemeral attestation, claiming a previous artifact is the current release. + +**Current mitigation (partial):** The attestation includes a timestamp and optional `expires_at`. Consumers checking freshness can detect stale attestations. The build environment metadata (run ID, workflow ref) provides additional context for detecting replays. + +**Future mitigation (out of scope this epic):** Witness anchoring. Submit each attestation to the auths witness network, producing a timestamped inclusion proof. This cryptographically pins the attestation to a point in time and enables duplicity detection (two different attestations claiming the same commit SHA). The existing witness infrastructure in `auths-core/src/witness/` handles KERI KEL head consistency — extending it to attestation anchoring is a separate effort. + +### Forged ephemeral key chain + +An attacker generates their own ephemeral key and signs an artifact, claiming it was built from a legitimate commit. + +**This is the same attack as "compromised CI runner"** — the ephemeral key is self-certifying (`did:key:`), so "forgery" just means "generate a key and sign." The ephemeral key alone proves nothing. It's only meaningful when: +1. The commit SHA in the attestation is verifiably signed by a trusted maintainer +2. The consumer trusts that the CI runner built from the commit it claims + +Without both conditions, the ephemeral signature is cryptographically valid but semantically meaningless. + +## 3. What We're NOT Claiming + +**No reproducible builds.** A consumer cannot independently rebuild the binary from source and check that it matches. They're trusting that the CI runner built honestly from the verified commit. This is weaker than SLSA Level 4 but equivalent to how every CI-based signing system works in practice (including Sigstore). + +**No CI runner honesty guarantee.** If the CI runner is fully compromised, the attacker controls the build output. The ephemeral signing scheme makes the attack detectable (via commit signature verification and attestation monitoring) but does not prevent it. Prevention requires either reproducible builds or hardware-attested build environments, both of which are out of scope. + +**No offline verification without the git repo.** Verifying an ephemeral attestation requires access to the git commit object (to check the commit's SSH signature). A consumer who only has the artifact + `.auths.json` and no git clone cannot complete the transitive verification. They can verify the ephemeral signature and check the artifact hash, but they're trusting the commit SHA claim without verification. + +## 4. Comparison with Sigstore + +| Property | Auths (ephemeral) | Sigstore | +|----------|-------------------|----------| +| **Artifact cryptographically signed** | Yes (ephemeral key) | Yes (ephemeral cert from Fulcio) | +| **Signing secrets in CI** | None — key generated per run | None — OIDC + ephemeral cert | +| **Root of trust** | Maintainer's device-bound key | OIDC provider (GitHub, Google) | +| **Identity survives account compromise** | Yes — KERI rotation | No — identity IS the OIDC account | +| **Central authority required** | No | Yes — Fulcio CA + Rekor log | +| **Offline verification** | Partial — needs git repo | No — needs Rekor | +| **Replay protection** | Timestamps + future witness anchoring | Rekor transparency log | +| **SLSA level** | L1 (provenance exists) | L2 (signed provenance from hosted build) | +| **Adoption** | Pre-launch | De facto standard | + +**Where Sigstore wins:** Adoption (it's everywhere), convenience (keyless, one-line integration), OIDC ecosystem (works with GitHub/Google/Microsoft identity), enterprise support via OpenSSF, SLSA L2+ provenance, Rekor transparency log for replay protection. + +**Where Auths wins:** Self-sovereign identity (no CA, no OIDC dependency), offline verification (with git repo), identity durability (survives GitHub account compromise), commit-level provenance (Sigstore signs artifacts, Auths chains to commit signatures), no central infrastructure dependency. + +**Where equivalent:** Both produce artifact-level cryptographic signatures using ephemeral keys. Both require trust in the CI platform for build honesty. Neither provides reproducible builds out of the box. + +**The honest pitch:** If you want the easiest path to signed artifacts and trust the Linux Foundation's infrastructure, use Sigstore. If you want self-sovereign identity that survives account compromise and works without depending on any central authority, use Auths. + +## 5. Future: Witness Anchoring + +The current ephemeral signing model has a replay gap: there's no cryptographic proof that an attestation existed at a specific point in time. Timestamps in the attestation are self-reported by the CI runner. + +**Planned mitigation:** After signing, submit the attestation's content hash to the auths witness network. The witness returns a signed receipt binding the hash to a verified timestamp. This receipt is embedded in the `.auths.json` and checked during verification. + +**What this enables:** +- Replay detection: an attacker can't retroactively claim an attestation existed before it did +- Duplicity detection: two attestations claiming the same commit SHA at different times are flagged +- Revocation timeline: attestations anchored after a key rotation are rejected + +**Current state:** The witness infrastructure in `auths-core/src/witness/` handles KERI key event log consistency (split-view detection). Extending it to attestation anchoring requires a new witness receipt type and a submission API. This is a separate epic. + +## Wire Format: SignerType + +`SignerType::Workload` serializes to `"workload"` in JSON. This string is part of `CanonicalAttestationData` and is covered by the ephemeral key's signature. Once shipped, this string cannot change without breaking verification of existing attestations. + +```json +{ + "signer_type": "workload" +} +``` diff --git a/docs/design/sigstore-comparison.md b/docs/design/sigstore-comparison.md new file mode 100644 index 00000000..b143e6c5 --- /dev/null +++ b/docs/design/sigstore-comparison.md @@ -0,0 +1,47 @@ +# Auths vs Sigstore: Honest Comparison + +## Where Sigstore Wins + +**Adoption.** Sigstore is the de facto standard for open-source artifact signing. Kubernetes, npm, PyPI, Homebrew, Maven Central all use it. Auths has zero users. + +**Convenience.** Sigstore's "keyless" model means developers authenticate via OIDC (GitHub login), sign with an ephemeral certificate from Fulcio, and the proof goes to the Rekor transparency log. One CLI command, no key management. Auths requires `auths init` to create a KERI identity and device key. + +**OIDC ecosystem.** Sigstore works with GitHub, Google, Microsoft identities out of the box. Auths uses its own identity layer. + +**SLSA Level 2+.** Sigstore's signed provenance from a hosted build platform meets SLSA L2. Auths ephemeral signing is L1 (provenance exists, but not from a verified builder). + +**Enterprise support.** Sigstore is backed by the OpenSSF (Linux Foundation) with enterprise adoption programs. Auths is a solo project. + +**Replay protection.** Rekor provides a globally consistent transparency log with timestamped inclusion proofs. Auths currently relies on attestation timestamps (self-reported). Witness anchoring is planned but not shipped. + +## Where Auths Wins + +**Self-sovereign identity.** Auths identity is anchored to a KERI key event log stored in Git. No certificate authority, no OIDC provider, no central infrastructure. If GitHub goes down or changes their OIDC policy, Sigstore signing breaks. Auths signing works with just Git. + +**Identity survives account compromise.** If your GitHub account is compromised, your Sigstore signing identity is compromised — Sigstore identity IS the OIDC account. With Auths, your identity is a device-bound Ed25519 key. Account compromise doesn't give the attacker your signing key. KERI pre-rotation lets you recover. + +**Offline verification.** Sigstore verification requires querying the Rekor transparency log (network call). Auths verification works offline with just a Git clone — the commit signatures and allowed_signers are in the repo. + +**No central authority.** Sigstore depends on Fulcio (CA) and Rekor (transparency log), both operated by the Linux Foundation. Auths depends on nothing external. + +**Commit-level provenance.** Sigstore signs artifacts. Auths chains artifact signatures to signed commits, providing a link from the binary all the way to the specific code change and the developer who approved it. + +**Air-gapped environments.** Auths works in environments with no internet access. Sigstore does not. + +## Where They're Equivalent + +**Artifact-level cryptographic signing.** Both produce Ed25519 signatures on artifacts using ephemeral keys generated per CI run. + +**Zero CI secrets.** Neither requires long-lived signing secrets in CI. Sigstore uses OIDC tokens. Auths generates throwaway keys. + +**Trust in CI.** Both trust the CI platform to build honestly from the claimed source. Neither provides reproducible builds out of the box. + +**Tamper detection.** Both detect post-signing artifact tampering via cryptographic hash verification. + +## The Honest Pitch + +If you want the easiest path to signed artifacts and trust the Linux Foundation's infrastructure, use Sigstore. It's mature, widely adopted, and works with one command. + +If you want self-sovereign identity that survives account compromise, works offline, and doesn't depend on any central authority, use Auths. The tradeoff is more setup and a smaller ecosystem. + +Both are better than unsigned artifacts. diff --git a/docs/design/transparency-log-port.md b/docs/design/transparency-log-port.md new file mode 100644 index 00000000..b6b7ac0e --- /dev/null +++ b/docs/design/transparency-log-port.md @@ -0,0 +1,428 @@ +# TransparencyLog Port Trait Design + +## 1. Trait Shape + +The `TransparencyLog` trait abstracts the operation of appending data to a transparency log and retrieving proofs. It lives in `auths-core/src/ports/transparency_log.rs` and uses `#[async_trait]` for object safety (`Arc`). + +```rust +#[async_trait] +pub trait TransparencyLog: Send + Sync { + /// Submit a leaf to the log and receive an inclusion proof. + /// + /// The adapter is responsible for wrapping `leaf_data` in whatever + /// envelope the backend requires (DSSE, hashedrekord, raw append). + /// `public_key` and `signature` are provided for backends that + /// verify entry signatures on submission (e.g., Rekor). + async fn submit( + &self, + leaf_data: &[u8], + public_key: &[u8], + signature: &[u8], + ) -> Result; + + /// Fetch the log's current signed checkpoint. + async fn get_checkpoint(&self) -> Result; + + /// Fetch an inclusion proof for a leaf at `leaf_index` in a tree of `tree_size`. + async fn get_inclusion_proof( + &self, + leaf_index: u64, + tree_size: u64, + ) -> Result; + + /// Fetch a consistency proof between two tree sizes. + async fn get_consistency_proof( + &self, + old_size: u64, + new_size: u64, + ) -> Result; + + /// Return metadata about this log (ID, origin, public key). + fn metadata(&self) -> LogMetadata; +} +``` + +**Why each method exists:** + +- `submit()` — The core write operation. Every log must accept a leaf and return a proof that it was included. The return includes the inclusion proof and checkpoint at the time of inclusion, so the caller can immediately verify and embed in a bundle. +- `get_checkpoint()` — Allows verification clients to fetch the current tree state without submitting. Used for checkpoint caching and freshness checks. +- `get_inclusion_proof()` — Re-fetch a proof for a previously-logged leaf. Used when a client has a stale checkpoint and needs to verify against the current tree. +- `get_consistency_proof()` — Proves that a smaller tree is a prefix of a larger tree. Used for checkpoint cache updates: "I cached tree-size-100, now I see tree-size-200, prove they're consistent." +- `metadata()` — Sync because it's static configuration. Returns the log's stable ID, origin string, and public key for trust root registration. + +**Result types:** + +```rust +pub struct LogSubmission { + pub leaf_index: u64, + pub inclusion_proof: InclusionProof, + pub signed_checkpoint: SignedCheckpoint, +} + +pub struct LogMetadata { + pub log_id: String, + pub log_origin: LogOrigin, + pub log_public_key: Ed25519PublicKey, + pub api_url: Option, +} +``` + +All types from `auths-transparency` (`InclusionProof`, `SignedCheckpoint`, `ConsistencyProof`, `LogOrigin`) and `auths-verifier` (`Ed25519PublicKey`). No parallel types. + +## 2. Backend Matrix + +| Backend | `submit()` | `get_checkpoint()` | `get_inclusion_proof()` | `get_consistency_proof()` | Notes | +|---|---|---|---|---|---| +| **Rekor public** | POST `/api/v1/log/entries` hashedrekord | GET `/api/v1/log` signed tree head | From v1 entry response `verification.inclusionProof` | GET `/api/v1/log/proof` | Uses ECDSA P-256 checkpoint sig (production shard) | +| **Rekor self-hosted** | Same API, different `api_url` | Same | Same | Same | User supplies key + origin | +| **Sunlight (future)** | POST to sunlight write endpoint | Fetch `/checkpoint` tile | Compute from tiles | Compute from tiles | Pure tlog-tiles, Ed25519 | +| **Native (future)** | Direct Merkle tree append | Local state or file | Local computation | Local computation | No network; self-hosted log | +| **None / Noop** | Returns `LogError::Unavailable` | Returns `LogError::Unavailable` | Returns `LogError::Unavailable` | Returns `LogError::Unavailable` | For `--allow-unlogged` and `auths demo` | + +## 3. Legitimate Backend Differences + +**Leaf format:** Rekor requires a `hashedrekord` or `dsse` envelope wrapping the raw data. Sunlight and native logs may accept raw bytes. The trait takes raw `leaf_data` + `public_key` + `signature` — each adapter wraps these into its backend-specific envelope at the adapter boundary. The core never sees envelope formats. + +**Rate limits:** Rekor rate-limits by IP (HTTP 429 with `Retry-After`). Other backends may not. The trait surfaces `LogError::RateLimited { retry_after_secs }` and the caller (CLI) decides the retry policy. The SDK does not retry. + +**Witness models:** Witnesses are checkpoint properties, not log operations. They appear as cosignatures on `SignedCheckpoint`. The trait does not include witness methods — the verifier handles witnesses through existing `auths-transparency` code. + +**Checkpoint signature algorithm:** Rekor's production shard uses ECDSA P-256; the 2025 shard uses Ed25519. The trait returns `SignedCheckpoint` which carries the signature; the verifier checks it against the trust config's public key. Algorithm differences are handled at verification time, not in the trait. + +## 4. Error Taxonomy + +```rust +#[derive(Debug, thiserror::Error)] +pub enum LogError { + /// The log rejected the submitted entry (malformed, too large, policy violation). + #[error("submission rejected: {reason}")] + SubmissionRejected { reason: String }, + + /// Network or connection error reaching the log. + #[error("network error: {0}")] + NetworkError(String), + + /// Log returned HTTP 429; caller should wait and retry. + #[error("rate limited, retry after {retry_after_secs}s")] + RateLimited { retry_after_secs: u64 }, + + /// Log returned an unparseable or unexpected response. + #[error("invalid response: {0}")] + InvalidResponse(String), + + /// Requested entry not found in the log. + #[error("entry not found")] + EntryNotFound, + + /// Consistency or inclusion proof verification failed. + #[error("consistency violation: {0}")] + ConsistencyViolation(String), + + /// Log is temporarily or permanently unavailable (HTTP 500/503, noop backend). + #[error("log unavailable: {0}")] + Unavailable(String), +} +``` + +**Rekor HTTP code mapping:** + +| HTTP Status | `LogError` Variant | +|---|---| +| 201 Created | Success | +| 409 Conflict | Idempotent success: fetch existing entry, return `LogSubmission` | +| 400 Bad Request | `SubmissionRejected { reason }` | +| 413 Payload Too Large | `SubmissionRejected { reason: "payload too large" }` | +| 422 Unprocessable Entity | `SubmissionRejected { reason }` | +| 429 Too Many Requests | `RateLimited { retry_after_secs }` (parse `Retry-After` header) | +| 500 Internal Server Error | `Unavailable("server error")` | +| 503 Service Unavailable | `Unavailable("service unavailable")` | +| Connection refused / timeout | `NetworkError(details)` | + +**`AuthsErrorInfo` codes:** `AUTHS-E9001` through `AUTHS-E9007` (one per variant). The E40xx-E49xx ranges are taken by existing crates. + +## 5. Ed25519 vs DSSE Decision + +**Decision: Use `hashedrekord` with the existing pure Ed25519 signature on the Rekor v1 API.** + +**Rationale:** + +The earlier concern about Ed25519 incompatibility with `hashedrekord` was based on the assumption that Rekor must verify the signature against a hash. Investigation reveals: + +1. Rekor v1.3.6+ accepts `PKIX_ED25519` in hashedrekord entries. The server stores the entry without requiring prehash verification for Ed25519 — it validates the key format and signature structure, not the signature-over-hash binding. +2. The `dsse` entry type would require wrapping the attestation in a DSSE envelope, adding complexity for no security benefit — the attestation is already signed. +3. `hashedrekord` is the dominant entry type (>99% of Rekor entries) with the most stable API surface. + +The adapter submits: +- `spec.data.hash.algorithm`: `"sha256"` +- `spec.data.hash.value`: hex SHA-256 of the attestation JSON +- `spec.signature.content`: base64 of the attestation's `identity_signature` +- `spec.signature.publicKey.content`: base64 of the issuer's Ed25519 public key in PKIX DER format + +**Mandatory pre-implementation validation:** Before writing any adapter code beyond a minimal submission function, submit one test entry to production Rekor with a throwaway key and verify it using the official `rekor-cli verify` tool. If `rekor-cli verify` fails or returns warnings, switch to `dsse` entry type and re-test. This is a one-hour validation that de-risks the entire adapter. See fn-111.5 task spec. + +**Fallback:** If testing reveals that Rekor rejects pure Ed25519 hashedrekord entries on the production instance or they cannot be verified by standard tooling, switch to `dsse`. The adapter boundary isolates this decision — no core code changes needed. + +## 6. Rekor API Version Commitment + +**Decision: Target Rekor v1 API for entry submission.** + +**Rationale:** + +Production investigation revealed: +- `rekor.sigstore.dev` serves the **v1 API** (`/api/v1/log/entries` returns 422, `/api/v2/log/entries` returns 404) +- `log2025-1.rekor.sigstore.dev` is a **tlog-tiles read-only shard** (static S3 bucket serving tiles and checkpoints, no write API) +- Rekor v2 (rekor-tiles write endpoint) is not deployed on the production instance as of 2026-04-09 + +**Endpoints used:** + +| Operation | Endpoint | Method | +|---|---|---| +| Submit entry | `{api_url}/api/v1/log/entries` | POST | +| Get log info | `{api_url}/api/v1/log` | GET | +| Get entry by UUID | `{api_url}/api/v1/log/entries/{uuid}` | GET | +| Get consistency proof | `{api_url}/api/v1/log/proof?firstSize={m}&lastSize={n}` | GET | +| Get checkpoint (tlog-tiles) | `{tiles_url}/checkpoint` | GET | + +**v1 response shape for POST `/api/v1/log/entries`:** +```json +{ + "": { + "body": "", + "integratedTime": 1712678400, + "logID": "wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0=", + "logIndex": 12345678, + "verification": { + "inclusionProof": { + "checkpoint": "rekor.sigstore.dev - 1193050959916656506\n...", + "hashes": ["abcd...", "ef01..."], + "logIndex": 12345678, + "rootHash": "1107f839...", + "treeSize": 1141633759 + }, + "signedEntryTimestamp": "" + } + } +} +``` + +**When Rekor v2 lands on production:** Switch the adapter to v2 endpoints. Pre-launch, zero users — no backward compatibility concerns. + +## 7. Bundle Format + +The `.auths.json` attestation gains an optional `transparency` section: + +```json +{ + "version": 1, + "rid": "sha256:abcdef...", + "issuer": "did:key:z6MkrTQ...", + "subject": "did:key:z6MkrTQ...", + "device_public_key": "abcdef...", + "identity_signature": "base64...", + "device_signature": "base64...", + "capabilities": ["sign_release"], + "signer_type": "workload", + "commit_sha": "abc123def456...", + "payload": { + "artifact_type": "file", + "digest": { "algorithm": "sha256", "hex": "abcdef..." }, + "name": "release.tar.gz", + "size": 12345678 + }, + "transparency": { + "log_id": "sigstore-rekor", + "leaf_index": 12345678, + "inclusion_proof": { + "index": 12345678, + "size": 1141633759, + "root": "", + "hashes": ["", ""] + }, + "signed_checkpoint": { + "checkpoint": { + "origin": "rekor.sigstore.dev - 1193050959916656506", + "size": 1141633759, + "root": "", + "timestamp": "2026-04-09T12:00:00Z" + }, + "log_signature": "", + "log_public_key": "", + "witnesses": [] + } + } +} +``` + +**When `transparency` is present:** Verifier looks up `log_id` in `TrustConfig`, verifies inclusion proof and checkpoint signature. + +**When `transparency` is absent:** Verifier treats the attestation as unlogged. Rejected by default; accepted only with `--allow-unlogged`. + +**Old `offline_bundle` format:** Unsupported and removed. Pre-launch, zero users. No migration path. Document in launch notes. + +## 8. Trust Config Changes + +`TrustRoot` (existing, kept as-is) becomes the per-log entry. New `TrustConfig` wraps multiple logs: + +```rust +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct TrustConfig { + pub default_log: Option, + pub logs: HashMap, +} +``` + +**JSON format (`~/.auths/trust_config.json`):** + +```json +{ + "default_log": "sigstore-rekor", + "logs": { + "sigstore-rekor": { + "log_public_key": "", + "log_origin": "rekor.sigstore.dev - 1193050959916656506", + "witnesses": [] + } + } +} +``` + +**Edge case behaviors:** + +| Scenario | Behavior | +|---|---| +| File doesn't exist (first run) | Use compiled-in defaults. Do NOT create file. | +| File exists but malformed JSON | Hard fail with parse error. No fallback to defaults. | +| `default_log` references a log ID not in `logs` | Hard fail at load time via `TrustConfig::validate()`. Not a silent `None` at lookup. | +| User overrides a compiled-in log's trust material | User config wins. Print warning: `"Note: trust material for 'sigstore-rekor' overridden by ~/.auths/trust_config.json"` | +| `auths trust log add` with no existing file | Create `~/.auths/` if missing (fail if not writable). Write file with ALL compiled-in defaults plus new entry. | + +**`TrustConfig::validate()`** is called at load time and checks that `default_log` (if `Some`) references a key in `logs`. Returns `Err(TransparencyError)` on misconfiguration. + +## 9. Dependency Graph + +Verified via `cargo tree`: + +``` +auths-core → auths-crypto, auths-verifier, auths-keri, auths-pairing-protocol +auths-transparency → auths-crypto, auths-verifier +``` + +Neither depends on the other. Adding `auths-core → auths-transparency` introduces no cycle. + +The `TransparencyLog` trait in `auths-core` uses types from `auths-transparency` (`InclusionProof`, `SignedCheckpoint`, `ConsistencyProof`, `LogOrigin`) and from `auths-verifier` (`Ed25519PublicKey`). Both are already `auths-core` dependencies (auths-verifier directly, auths-transparency will be added). + +## 10. Async Boundary + +**Decision: Follow the established per-command `Runtime::new().block_on()` pattern.** + +**Rationale:** + +The CLI entry point (`main.rs:11`) is `fn main()` — synchronous, not `#[tokio::main]`. Every command that needs async creates `tokio::runtime::Runtime::new()?` and calls `rt.block_on(...)`. This pattern is used in 15+ places across the CLI (`verify_commit.rs:928`, `id/claim.rs:109`, `id/register.rs:39`, `scim.rs:236`, `utils.rs:116`). + +Making the signing path async end-to-end would require either adding `#[tokio::main]` to `main.rs` (changing every command's execution model) or converting all `ExecutableCommand::execute()` impls to async (a large refactor touching every command). Pre-launch, the per-command runtime pattern works and is understood. + +**Note:** `artifact sign --ci` is fully sync today — it does no async work. Adding transparency log submission introduces the first async call in this code path. Each `Runtime::new()` spins up the reactor, creates worker threads, and tears them down — measurable overhead (~10-50ms) for a single network call. + +**Implementation:** The `artifact sign --ci` handler creates a `Runtime` via `once_cell::sync::Lazy` (lazy global, created once per process, reused across all commands that need async). This eliminates both the per-call overhead and the nested-runtime risk (if the CLI is ever called from within an async context). The lazy runtime is defined in the CLI factory and used by all command handlers that need `block_on()`. + +**Escape hatch:** If the lazy global causes issues (test isolation, thread-local state), fall back to per-command `Runtime::new()`. This is a localized change in the factory and does not affect the trait or adapter code. + +**Future improvement:** When the CLI moves to `#[tokio::main]` (a separate refactor), the lazy global and all `block_on()` calls can be removed. + +## 11. Security: GHSA-whqx-f9j3-ch6m + +**Background:** [GHSA-whqx-f9j3-ch6m](https://github.com/sigstore/cosign/security/advisories/GHSA-whqx-f9j3-ch6m) was a vulnerability in Cosign where the client accepted any valid Rekor entry without confirming it matched the artifact being verified. An attacker could substitute a valid-but-unrelated Rekor entry during verification. + +**Countermeasure:** The SDK function `submit_attestation_to_log()` verifies the Rekor response before returning. Three explicit checks, each a distinct security property: + +1. **Hash check:** Compute `SHA-256(leaf_data)` client-side (where `leaf_data` is the canonicalized attestation JSON bytes — the same bytes over which the Ed25519 signature was computed). Compare against the entry's `spec.data.hash.value` from the Rekor response. Must match. This proves Rekor stored the correct hash for the submitted content. +2. **Public key check:** Compare the entry's `spec.signature.publicKey.content` against the submitted `public_key` (base64 of the PKIX DER). Must match. This proves the entry is attributed to the correct signer. +3. **Signature check:** Compare the entry's `spec.signature.content` against the submitted `signature` (base64 of the Ed25519 signature). Must match. This is the core security property — if Rekor returned a different signature than what was submitted, either Rekor is buggy or an attacker is substituting entries. + +If any check fails, return `LogError::ConsistencyViolation("returned entry does not match submitted data: {which_field} mismatch")`. + +**Where in the call graph:** This check lives in `submit_attestation_to_log()` in `auths-sdk/src/workflows/log_submit.rs`, immediately after the `log.submit()` call returns and before the `LogSubmissionBundle` is constructed. The check is a precondition of the function returning `Ok` — callers cannot accidentally skip it. + +**Regression guard:** fn-111.6 includes a dedicated test that mocks a content-mismatched response and asserts the check catches it. + +## 12. Retry Policy + +**SDK layer (`submit_attestation_to_log()`):** Does NOT retry. Returns `LogError::RateLimited { retry_after_secs }` to the caller. The SDK is a library — retry policy is a composition-root decision. + +**CLI layer (artifact sign handler):** + +``` +On LogError::RateLimited { retry_after_secs }: + 1. Print: "Rate limited by transparency log. Retrying in {retry_after_secs}s..." + 2. Sleep retry_after_secs + 3. Retry once + 4. On second RateLimited: fail with exit code 4 + +On any other LogError: + Fail immediately with appropriate exit code +``` + +## 13. Rekor as Trust Dependency + +v1 default configuration depends on Sigstore's production Rekor instance (`rekor.sigstore.dev`). Users accepting this default are accepting Sigstore's operational security and Linux Foundation's governance of Rekor. This is a deliberate choice to leverage shared public-good infrastructure rather than build and operate a parallel log. Users who cannot accept this dependency should run a private Rekor, wait for the native log operator, or use --allow-unlogged for isolated environments. + +## 14. Rekor Origin Strings + +Fetched from production endpoints on 2026-04-09: + +**Production shard (active):** +- Origin: `rekor.sigstore.dev - 1193050959916656506` +- Source: `GET https://rekor.sigstore.dev/api/v1/log` → `signedTreeHead` field, first line +- Checkpoint signature: ECDSA P-256 + +**2025 read shard:** +- Origin: `log2025-1.rekor.sigstore.dev` +- Source: `GET https://log2025-1.rekor.sigstore.dev/checkpoint`, first line +- Checkpoint signature: Ed25519 + +**For v1 adapter:** Entries are submitted to `rekor.sigstore.dev` (production shard, v1 API). The inclusion proof in the response references the production shard's checkpoint with origin `rekor.sigstore.dev - 1193050959916656506`. + +**Compiled-in default trust config uses:** +- Log ID: `"sigstore-rekor"` +- Origin: `"rekor.sigstore.dev - 1193050959916656506"` +- Public key: The production shard's ECDSA P-256 key (`wNI9atQGlz+VWfO6LRygH4QUfY/8W4RFwiT5i5WRgB0=` is the log ID / key hash; the actual DER public key is `MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+2tabdTV5BcGiBIx0a9fAFwrkBbmLSGtks4L3qX6yYY0zufBnhC8Ur/iy55GhWP/9A/bY2LhC30M9+RYtw==`) + +**ECDSA P-256 for checkpoint signatures:** The production shard uses ECDSA P-256, not Ed25519. Adding ECDSA P-256 verification to `auths-verifier` is required for v1 (see Section 16). `ring` already supports P-256 (`ring::signature::ECDSA_P256_SHA256_ASN1`). A dedicated task (fn-111.2b) adds this before the Rekor adapter is built. + +**Rekor v2 migration:** Pre-launch, zero users. When Rekor v2 write API becomes available on production, switch the adapter to v2 endpoints. No backward compatibility concerns — just replace the adapter implementation. + +## 15. CLI Error Taxonomy + +| Exit Code | Meaning | When | +|---|---|---| +| 0 | Success | Sign/verify completed | +| 1 | Verification failed | Bundle invalid, signature mismatch, proof failed | +| 2 | General error | Parse error, I/O error, unexpected failure | +| 3 | Network error | Log unreachable, connection refused, DNS failure | +| 4 | Rate limited | Retry exhausted after one attempt | +| 5 | Submission rejected | Entry malformed, payload too large, policy violation | +| 6 | Unknown log ID | `log_id` not in trust config, setup needed | + +Each exit code produces a distinct stderr message. Scripts can `case $?` on these codes. + +## 16. ECDSA P-256 Support in auths-verifier + +**Decision: Add ECDSA P-256 signature verification to `auths-verifier` as part of this epic.** This is required for verifying Rekor production checkpoint signatures. Cryptographic verification of checkpoint signatures is not optional — shipping a transparency log integration without it would be incomplete. + +**Implementation approach:** + +The `auths-verifier` crate currently uses `Ed25519PublicKey` and `Ed25519Signature` types throughout. Adding ECDSA P-256 requires: + +1. A `SignatureAlgorithm` enum: `Ed25519`, `EcdsaP256` — configurable per-log in the trust config +2. An `EcdsaP256PublicKey` newtype wrapping the DER-encoded PKIX key +3. An `EcdsaP256Signature` newtype wrapping the ASN.1 DER signature +4. A `verify_checkpoint_signature()` function that dispatches on algorithm +5. Updates to `SignedCheckpoint` verification to accept either algorithm + +**Scope:** Only checkpoint signature verification needs ECDSA P-256. Attestation signatures remain Ed25519. The ECDSA support is narrowly scoped to the transparency log verification path. + +**ring support:** `ring::signature::ECDSA_P256_SHA256_ASN1` already exists. The DER public key from Rekor's trust root (`MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE2G2Y+...`) is a standard PKIX SubjectPublicKeyInfo — `ring::signature::UnparsedPublicKey` can consume it directly. + +**Configurability:** The trust config's `TrustRoot` gains an optional `signature_algorithm` field (defaults to `Ed25519` for backward compatibility). The Rekor production shard entry specifies `EcdsaP256`. When verifying a checkpoint, the verifier looks up the algorithm from the trust config. + +**Task:** fn-111.2b "Add ECDSA P-256 support to auths-verifier" — placed between fn-111.2 (port trait) and fn-111.5 (Rekor adapter). diff --git a/docs/errors/AUTHS-E7001.md b/docs/errors/AUTHS-E7001.md deleted file mode 100644 index f84efa9e..00000000 --- a/docs/errors/AUTHS-E7001.md +++ /dev/null @@ -1,8 +0,0 @@ -# AUTHS-E7001 - -**Crate:** `auths-sdk` -**Type:** `CiError::EnvironmentNotDetected` - -## Message - -CI environment not detected diff --git a/docs/errors/AUTHS-E7002.md b/docs/errors/AUTHS-E7002.md deleted file mode 100644 index f8bb8a0f..00000000 --- a/docs/errors/AUTHS-E7002.md +++ /dev/null @@ -1,8 +0,0 @@ -# AUTHS-E7002 - -**Crate:** `auths-sdk` -**Type:** `CiError::IdentityBundleInvalid` - -## Message - -identity bundle invalid at {path}: {reason} diff --git a/docs/errors/AUTHS-E7003.md b/docs/errors/AUTHS-E7003.md deleted file mode 100644 index de116029..00000000 --- a/docs/errors/AUTHS-E7003.md +++ /dev/null @@ -1,12 +0,0 @@ -# AUTHS-E7003 - -**Crate:** `auths-sdk` -**Type:** `CiError::NoArtifacts` - -## Message - -no artifacts to sign - -## Suggestion - -Check your glob pattern matches at least one file diff --git a/docs/errors/AUTHS-E7004.md b/docs/errors/AUTHS-E7004.md deleted file mode 100644 index 1ca8c222..00000000 --- a/docs/errors/AUTHS-E7004.md +++ /dev/null @@ -1,8 +0,0 @@ -# AUTHS-E7004 - -**Crate:** `auths-sdk` -**Type:** `CiError::CollectionDirFailed` - -## Message - -failed to create attestation directory {path}: {reason} diff --git a/docs/errors/AUTHS-E7005.md b/docs/errors/AUTHS-E7005.md deleted file mode 100644 index 70ac2b07..00000000 --- a/docs/errors/AUTHS-E7005.md +++ /dev/null @@ -1,8 +0,0 @@ -# AUTHS-E7005 - -**Crate:** `auths-sdk` -**Type:** `CiError::CollectionCopyFailed` - -## Message - -failed to collect attestation {src} → {dst}: {reason} diff --git a/docs/errors/index.md b/docs/errors/index.md index 651f64aa..dc5be64a 100644 --- a/docs/errors/index.md +++ b/docs/errors/index.md @@ -320,11 +320,7 @@ All error codes emitted by the Auths CLI and libraries. Run `auths error ` | [AUTHS-E6002](AUTHS-E6002.md) | `auths-sdk` | `AuthChallengeError::EmptyDomain` | domain must not be empty | | [AUTHS-E6003](AUTHS-E6003.md) | `auths-sdk` | `AuthChallengeError::Canonicalization` | canonical JSON serialization failed: {0} | | [AUTHS-E6004](AUTHS-E6004.md) | `auths-sdk` | `AuthChallengeError::SigningFailed` | signing failed: {0} | -| [AUTHS-E7001](AUTHS-E7001.md) | `auths-sdk` | `CiError::EnvironmentNotDetected` | CI environment not detected | -| [AUTHS-E7002](AUTHS-E7002.md) | `auths-sdk` | `CiError::IdentityBundleInvalid` | identity bundle invalid at {path}: {reason} | -| [AUTHS-E7003](AUTHS-E7003.md) | `auths-sdk` | `CiError::NoArtifacts` | no artifacts to sign | -| [AUTHS-E7004](AUTHS-E7004.md) | `auths-sdk` | `CiError::CollectionDirFailed` | failed to create attestation directory {path}: {reason} | -| [AUTHS-E7005](AUTHS-E7005.md) | `auths-sdk` | `CiError::CollectionCopyFailed` | failed to collect attestation {src} → {dst}: {reason} | + | [AUTHS-E8001](AUTHS-E8001.md) | `auths-oidc-port` | `OidcError::JwtDecode` | JWT decode failed: {0} | | [AUTHS-E8002](AUTHS-E8002.md) | `auths-oidc-port` | `OidcError::SignatureVerificationFailed` | signature verification failed | | [AUTHS-E8003](AUTHS-E8003.md) | `auths-oidc-port` | `OidcError::ClaimsValidationFailed` | claim validation failed - {claim}: {reason} | diff --git a/docs/guides/platforms/ci-cd.md b/docs/guides/platforms/ci-cd.md index bb08362f..289b5418 100644 --- a/docs/guides/platforms/ci-cd.md +++ b/docs/guides/platforms/ci-cd.md @@ -1,240 +1,94 @@ -# CI/CD Integration +# CI/CD Integration: Ephemeral Artifact Signing -Sign every commit. Verify every release. Auths uses a limited-capability device key model so your root identity never leaves your machine — the CI runner only ever holds a scoped, revocable token. +> Sigstore made artifact signing easy by making Linux Foundation infrastructure the trust root. Auths makes it sovereign by making the maintainer the trust root — your commit signature, rotated through KERI, anchors the whole chain. No CA, no OIDC dependency, no central transparency log required. Works offline, works air-gapped, works on your own CI. ---- +See [how this compares to Sigstore](../../design/sigstore-comparison.md) for a detailed tradeoff analysis. -## GitHub Actions +## How It Works -The fastest path. Two actions, one secret, zero ongoing maintenance. +1. **You sign commits** with your device-bound Ed25519 key (hardware keychain, Touch ID). This happens automatically after `auths init`. +2. **CI verifies** the tagged commit is signed by a maintainer in `.auths/allowed_signers`. +3. **CI generates a throwaway key**, signs each artifact, and discards the key. No secrets needed. +4. **Consumers verify**: artifact hash → ephemeral signature → commit SHA → maintainer's commit signature. -### Setup (one-time) +Trust derives from your commit signature, not from a CI secret. The ephemeral key dies with the CI run. -```bash -auths ci setup -``` - -This creates a scoped CI device key, links it to your identity with `sign_release` capability, and sets `AUTHS_CI_TOKEN` on your repo via the `gh` CLI. If `gh` isn't authenticated, it prints the token value to paste in manually under **Repository → Settings → Secrets → Actions**. - -### Sign commits - -Add to any workflow that pushes to `main`: +## Setup -```yaml -- uses: auths-dev/sign@v1 - with: - token: ${{ secrets.AUTHS_CI_TOKEN }} - commits: 'HEAD~1..HEAD' -``` - -### Verify commits +### Prerequisites -Add to every pull request and push: - -```yaml -- uses: auths-dev/verify@v1 - with: - fail-on-unsigned: true - post-pr-comment: 'true' - github-token: ${{ secrets.GITHUB_TOKEN }} -``` - -No token needed — the action reads `.auths/allowed_signers` from your repo. - -### Show it off - -Once both workflows are running, add badges to your README: - -```markdown -[![Verify Commits](https://github.com///actions/workflows/verify-commits.yml/badge.svg)](https://github.com///actions/workflows/verify-commits.yml?query=branch%3Amain+event%3Apush) -[![Sign Commits](https://github.com///actions/workflows/sign-commits.yml/badge.svg)](https://github.com///actions/workflows/sign-commits.yml?query=branch%3Amain) +```bash +auths init # creates your signing identity +auths git setup # configures git to sign commits with your device key ``` -### Sign release artifacts +### GitHub Actions -For releases triggered by a tag push, combine signing with your existing build step: +Add a verify gate and ephemeral signing to your release workflow: ```yaml name: Release on: push: - tags: ['v*'] + tags: ["v*"] permissions: contents: write jobs: - release: + verify: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + - uses: auths-dev/verify@v1 + + build: + needs: verify runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - name: Build - run: cargo build --release && tar czf myproject.tar.gz -C target/release myproject + run: cargo build --release - name: Sign artifact - uses: auths-dev/sign@v1 - with: - token: ${{ secrets.AUTHS_CI_TOKEN }} - files: 'myproject.tar.gz' - verify: true + run: | + auths artifact sign target/release/my-binary \ + --ci \ + --commit ${{ github.sha }} - - name: Upload release - uses: softprops/action-gh-release@v2 + - name: Upload + uses: actions/upload-artifact@v4 with: - files: | - myproject.tar.gz - myproject.tar.gz.auths.json -``` - -Signing produces `myproject.tar.gz.auths.json` alongside the artifact — ship both so downstream consumers can verify. - -### Rotating or revoking access - -To refresh the token (new TTL, updated identity snapshot): - -```bash -auths ci rotate -``` - -To revoke a CI device entirely: - -```bash -auths device revoke --device --key -``` - -After revocation the CI key can no longer produce valid attestations, even if the secret is still in GitHub. - ---- - -## Manual setup (other CI platforms) - -Running GitLab CI, CircleCI, Bitbucket Pipelines, or your own runner? The same `AUTHS_CI_TOKEN` approach works anywhere you can set an environment variable and install a binary. - -### Install the CLI - -```yaml -# Example: generic shell step -- name: Install auths - run: | - curl -fsSL https://get.auths.dev | sh - echo "$HOME/.auths/bin" >> $GITHUB_PATH # or equivalent PATH export -``` - -### Sign commits - -Configure Git to use Auths as the signing program, then any `git commit` in the workflow is signed: - -```yaml -- name: Configure Git signing - run: | - git config --global gpg.format ssh - git config --global gpg.ssh.program auths-sign - git config --global commit.gpgsign true - git config --global user.signingkey "$(auths key export --alias ci-release-device --format pub)" -``` - -### Sign artifacts - -```bash -auths sign myproject.tar.gz -# → creates myproject.tar.gz.auths.json -``` - -### Verify commits - -For stateless verification (no access to the identity repo), export a bundle once locally and commit it: - -```bash -# Local — one-time export -auths id export-bundle \ - --alias main \ - --output .auths/identity-bundle.json \ - --max-age-secs 7776000 # 90 days -git add .auths/identity-bundle.json && git commit -m "add identity bundle" -``` - -Then in CI: - -```bash -auths verify HEAD --identity-bundle .auths/identity-bundle.json -``` - -To verify a PR range: - -```bash -auths verify main..HEAD --identity-bundle .auths/identity-bundle.json + path: | + target/release/my-binary + target/release/my-binary.auths.json ``` -### Verify artifacts +No `AUTHS_CI_TOKEN`. No secrets for signing. The `--ci` flag generates a throwaway key, signs, and discards it. -Pass the artifact file directly — Auths finds the `.auths.json` sidecar automatically: +## Verification ```bash -auths verify myproject.tar.gz --signer-key -# or by DID -auths verify myproject.tar.gz --signer did:keri:EaBcDeFg... -``` - -Override the sidecar path with `--signature` if needed: +# Clone the repo (needed for commit signature verification) +git clone https://github.com/owner/repo +cd repo -```bash -auths verify myproject.tar.gz --signature /path/to/custom.auths.json --signer-key +# Verify an artifact +auths artifact verify ./my-binary ``` -### Machine-readable output +The verify command: +1. Checks the artifact hash against the attestation +2. Verifies the ephemeral signature +3. Checks that the commit referenced in the attestation is signed by a trusted maintainer -Add `--json` to any verify command for structured output your pipeline can parse: +## Security Model -```bash -auths verify HEAD --identity-bundle .auths/identity-bundle.json --json -``` +See [Ephemeral Signing Threat Model](../../design/ephemeral-signing-threat-model.md) for the full analysis. -```json -{ - "commit": "abc1234...", - "valid": true, - "ssh_valid": true, - "chain_valid": true, - "signer": "did:keri:EaBcDeFg..." -} -``` - -Exit codes: `0` valid · `1` invalid/unsigned · `2` error. - ---- - -## GitHub Actions OIDC cross-reference - -For higher assurance, combine Auths attestation chains with GitHub Actions OIDC tokens. This creates a two-factor proof: the request must originate from both a valid KERI identity holder and a specific GitHub Actions workflow. - -```yaml -permissions: - id-token: write - contents: read - -steps: - - name: Get GitHub OIDC token - id: github-oidc - uses: actions/github-script@v7 - with: - script: | - const token = await core.getIDToken('auths-bridge'); - core.setOutput('token', token); - - - name: Exchange for bridge credentials - env: - BRIDGE_URL: https://your-bridge.example.com - GITHUB_OIDC_TOKEN: ${{ steps.github-oidc.outputs.token }} - run: | - JWT=$(curl -s -X POST "$BRIDGE_URL/token" \ - -H "Content-Type: application/json" \ - -d "{ - \"attestation_chain\": $ATTESTATION_CHAIN, - \"root_public_key\": \"$ROOT_PK\", - \"github_oidc_token\": \"$GITHUB_OIDC_TOKEN\", - \"github_actor\": \"$GITHUB_ACTOR\" - }" | jq -r '.access_token') -``` +**What's protected:** If a CI runner is compromised, the attacker cannot forge the maintainer's commit signature. If they use a real signed commit SHA but build different code, the maintainer can detect unexpected attestations. -The bridge verifies the KERI attestation chain, validates the GitHub OIDC token against GitHub's JWKS endpoint, and cross-references the GitHub `actor` claim against the expected KERI identity. If both pass, it issues a bridge JWT. +**What's not protected:** A fully compromised CI runner can build malicious artifacts from legitimate source. This is true of all CI-based signing (including Sigstore). Only reproducible builds can close this gap. diff --git a/docs/guides/transparency-logs.md b/docs/guides/transparency-logs.md new file mode 100644 index 00000000..bf9a4d6b --- /dev/null +++ b/docs/guides/transparency-logs.md @@ -0,0 +1,80 @@ +# Transparency Logs + +Auths uses transparency logs to create an immutable, publicly-auditable record of every artifact attestation. When you sign an artifact with `auths artifact sign --ci`, the attestation is submitted to a transparency log and an inclusion proof is embedded in the `.auths.json` file. Verifiers check this proof to confirm the attestation was logged. + +## Default: Sigstore Rekor + +Out of the box, auths submits attestations to [Sigstore's Rekor](https://rekor.sigstore.dev), a free public transparency log operated by the Linux Foundation. No setup required. + +```bash +# Signs and submits to Rekor automatically +auths artifact sign release.tar.gz --ci --commit $(git rev-parse HEAD) +``` + +## Choosing a Backend + +| Backend | When to use | Ops burden | Privacy | +|---|---|---|---| +| **Public Rekor** (default) | Open source projects, shared monitoring | None | Attestations are public | +| **Private Rekor** | Enterprise, private repos | You run it | Your data, your infra | +| **--allow-unlogged** | Local testing only | None | No transparency guarantees | + +## Using a Private Rekor Instance + +Deploy your own Rekor instance, then register it: + +```bash +# Add your private Rekor to trust config +auths trust log add \ + --id my-rekor \ + --key \ + --origin "rekor.example.com - " \ + --url https://rekor.example.com + +# Sign using your private instance +auths artifact sign release.tar.gz --ci --commit HEAD --log my-rekor +``` + +## Local Testing Without a Log + +For development and testing, skip the transparency log: + +```bash +auths artifact sign test.txt --ci --commit HEAD --allow-unlogged +``` + +This produces an attestation without transparency data. Verifiers reject unlogged attestations by default: + +```bash +# This fails: +auths artifact verify test.txt + +# This succeeds with a warning: +auths artifact verify test.txt --allow-unlogged +``` + +## Trust Configuration + +Trust config lives at `~/.auths/trust_config.json`. If the file doesn't exist, compiled-in defaults are used (Rekor production shard). + +```json +{ + "default_log": "sigstore-rekor", + "logs": { + "sigstore-rekor": { + "log_public_key": "0000000000000000000000000000000000000000000000000000000000000000", + "log_origin": "rekor.sigstore.dev - 1193050959916656506", + "witnesses": [], + "signature_algorithm": "ecdsa_p256" + } + } +} +``` + +## Tradeoffs + +**Public Rekor:** You get shared monitoring (the Sigstore community watches for log misbehavior), zero operational burden, and broad ecosystem compatibility. You accept a dependency on Sigstore infrastructure and the Linux Foundation's governance. + +**Private Rekor:** Full control over your log, your data stays on your infrastructure, but you're responsible for uptime, monitoring, and key management. + +**No log (--allow-unlogged):** No transparency guarantees. Suitable for isolated development environments. Not suitable for production artifacts. diff --git a/docs/plans/2026-04-08-hardened-ci-signing.md b/docs/plans/2026-04-08-hardened-ci-signing.md new file mode 100644 index 00000000..c6b67740 --- /dev/null +++ b/docs/plans/2026-04-08-hardened-ci-signing.md @@ -0,0 +1,1030 @@ +> **SUPERSEDED:** This plan described hardening CI tokens. The project has since +> adopted ephemeral signing with no CI tokens. See the transparency log +> architecture at `docs/design/transparency-log-port.md`. + +# Hardened CI Signing: Make AUTHS_CI_TOKEN Worthless If Stolen + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Keep the current CI signing flow (push tag → CI signs artifacts automatically) but make a stolen AUTHS_CI_TOKEN useless to an attacker through short TTL, workflow pinning, and repo scoping. + +**Architecture:** Three layers of defense, each independently useful: +1. **Short TTL** — Token expires in hours, not a year. Auto-rotated by a scheduled workflow. +2. **Workflow pinning** — Token is bound to a specific workflow file hash. Modified workflows can't sign. +3. **Repo scoping** — Attestations are bound to a specific repo. Can't be replayed elsewhere. + +If an attacker steals AUTHS_CI_TOKEN via a compromised Action (LiteLLM vector), the token is either already expired, refuses to sign because the workflow hash doesn't match, or produces attestations that are scoped to the repo and traceable. + +**Tech Stack:** Rust (auths-sdk CiToken, auths-cli ci commands), TypeScript (sign action), GitHub Actions (scheduled rotation) + +--- + +## Design + +### Attack surface analysis + +The LiteLLM attacker: compromised a GitHub Action → exfiltrated CI secrets from runner env → used them hours/days later from an external machine. + +Each defense layer blocks a different part of this: + +| Defense | What it blocks | Attacker must... | +|---------|---------------|------------------| +| **Short TTL (4h)** | Using the token hours later | Use it within the CI run window | +| **Workflow pin** | Running a modified workflow | Not change any workflow file in the repo | +| **Repo scope** | Signing for a different project | Stay within the same repo | +| **Nonce (future)** | Replay / multi-use | Race the legitimate CI run | + +### What changes + +| Component | Before | After | +|-----------|--------|-------| +| `CiToken` struct | version 1, no scoping | version 2, adds `workflow_hash`, `repo`, `max_uses` | +| `auths ci setup` | default TTL: 1 year | default TTL: 4 hours | +| `auths ci rotate` | manual command | also runs via scheduled GitHub Action | +| Sign action (`token.ts`) | checks TTL only | checks TTL + workflow hash + repo | +| Attestation | no CI metadata | includes `ci_binding` with workflow hash + run ID + repo | + +### What does NOT change + +- The `auths-dev/sign@v1` action interface (same inputs/outputs) +- The `.auths.json` v1 format (attestations still self-signed) +- The release workflow structure (push tag → CI builds → CI signs) +- The `auths artifact verify` command +- Local signing (`auths artifact sign`) — unaffected + +### Backward compatibility + +CiToken v2 adds optional fields. The sign action checks the version: +- v1 tokens work as before (no pinning, no scoping) but emit a deprecation warning +- v2 tokens enforce all hardening checks + +--- + +## Task 1: Add v2 fields to `CiToken` + +**Files:** +- Modify: `crates/auths-sdk/src/domains/ci/token.rs` +- Test: `crates/auths-sdk/tests/` (existing or new test file) + +**Step 1: Write the failing test** + +Add to `crates/auths-sdk/tests/cases/ci_token.rs` (create if needed): + +```rust +use auths_sdk::domains::ci::token::CiToken; + +#[test] +fn v2_token_roundtrip() { + let token = CiToken::new_v2( + "passphrase".into(), + "keychain_b64".into(), + "repo_b64".into(), + serde_json::json!({}), + "2026-04-08T00:00:00Z".into(), + 14400, // 4 hours + Some("abc123def456".into()), // workflow_hash + Some("auths-dev/auths".into()), // repo + Some(4), // max_uses (4 platform builds) + ); + + assert_eq!(token.version, 2); + assert_eq!(token.workflow_hash.as_deref(), Some("abc123def456")); + assert_eq!(token.repo.as_deref(), Some("auths-dev/auths")); + assert_eq!(token.max_uses, Some(4)); + + let json = token.to_json().unwrap(); + let parsed = CiToken::from_json(&json).unwrap(); + assert_eq!(parsed.version, 2); + assert_eq!(parsed.workflow_hash, token.workflow_hash); + assert_eq!(parsed.repo, token.repo); + assert_eq!(parsed.max_uses, token.max_uses); +} + +#[test] +fn v1_token_still_parses() { + let v1_json = r#"{ + "version": 1, + "passphrase": "test", + "keychain": "abc", + "identity_repo": "def", + "verify_bundle": {}, + "created_at": "2026-04-08T00:00:00Z", + "max_valid_for_secs": 31536000 + }"#; + let token = CiToken::from_json(v1_json).unwrap(); + assert_eq!(token.version, 1); + assert!(token.workflow_hash.is_none()); + assert!(token.repo.is_none()); + assert!(token.max_uses.is_none()); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo nextest run -p auths_sdk -E 'test(ci_token)'` +Expected: FAIL — `new_v2` and v2 fields don't exist + +**Step 3: Write the implementation** + +Modify `crates/auths-sdk/src/domains/ci/token.rs`: + +```rust +/// Current token format version. +const CURRENT_VERSION: u32 = 2; + +/// Supported token versions for deserialization. +const SUPPORTED_VERSIONS: &[u32] = &[1, 2]; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CiToken { + pub version: u32, + pub passphrase: String, + pub keychain: String, + pub identity_repo: String, + pub verify_bundle: serde_json::Value, + pub created_at: String, + pub max_valid_for_secs: u64, + + // --- v2 fields (optional for backward compat with v1) --- + + /// SHA256 hash of the workflow file that is authorized to use this token. + /// If set, the sign action computes the hash of the running workflow + /// and refuses to sign if it doesn't match. + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow_hash: Option, + + /// Repository this token is scoped to (e.g., "auths-dev/auths"). + /// If set, attestations include this binding and verification + /// can reject attestations from mismatched repos. + #[serde(skip_serializing_if = "Option::is_none")] + pub repo: Option, + + /// Maximum number of signing operations allowed with this token. + /// The sign action tracks usage and refuses to sign beyond this limit. + #[serde(skip_serializing_if = "Option::is_none")] + pub max_uses: Option, +} +``` + +Update `new()` to keep backward compat, add `new_v2()`: + +```rust +impl CiToken { + /// Create a v1-compatible token (no hardening). + pub fn new( + passphrase: String, + keychain: String, + identity_repo: String, + verify_bundle: serde_json::Value, + created_at: String, + max_valid_for_secs: u64, + ) -> Self { + Self { + version: 1, // keep v1 for backward compat when called via old path + passphrase, + keychain, + identity_repo, + verify_bundle, + created_at, + max_valid_for_secs, + workflow_hash: None, + repo: None, + max_uses: None, + } + } + + /// Create a v2 hardened token with workflow pinning, repo scoping, and use limits. + /// + /// Args: + /// * `workflow_hash`: SHA256 of the authorized workflow file. + /// * `repo`: Repository identifier (e.g., "owner/repo"). + /// * `max_uses`: Maximum signing operations (e.g., 4 for 4 platform builds). + /// + /// Usage: + /// ```ignore + /// let token = CiToken::new_v2(pass, kc, repo, bundle, now, 14400, + /// Some(wf_hash), Some("owner/repo".into()), Some(4)); + /// ``` + pub fn new_v2( + passphrase: String, + keychain: String, + identity_repo: String, + verify_bundle: serde_json::Value, + created_at: String, + max_valid_for_secs: u64, + workflow_hash: Option, + repo: Option, + max_uses: Option, + ) -> Self { + Self { + version: CURRENT_VERSION, + passphrase, + keychain, + identity_repo, + verify_bundle, + created_at, + max_valid_for_secs, + workflow_hash, + repo, + max_uses, + } + } + + pub fn from_json(json: &str) -> Result { + let token: Self = + serde_json::from_str(json).map_err(|e| CiError::TokenDeserializationFailed { + reason: e.to_string(), + })?; + + if !SUPPORTED_VERSIONS.contains(&token.version) { + return Err(CiError::TokenVersionUnsupported { + version: token.version, + }); + } + + Ok(token) + } + + // ... rest of existing methods unchanged +} +``` + +**Step 4: Run test to verify it passes** + +Run: `cargo nextest run -p auths_sdk -E 'test(ci_token)'` +Expected: PASS + +**Step 5: Commit** + +```bash +git add crates/auths-sdk/src/domains/ci/token.rs +git commit -m "feat: add CiToken v2 with workflow_hash, repo, and max_uses fields" +``` + +--- + +## Task 2: Update `auths ci setup` to generate v2 tokens + +**Files:** +- Modify: `crates/auths-cli/src/commands/ci/setup.rs` +- Modify: `crates/auths-cli/src/commands/ci/mod.rs` + +**Step 1: Add `--workflow` and `--max-uses` flags to `ci setup`** + +In `crates/auths-cli/src/commands/ci/mod.rs`, update the `Setup` variant: + +```rust + Setup { + #[arg(long)] + repo: Option, + + /// Max age for the token in seconds (default: 4 hours). + #[arg(long, default_value = "14400")] + max_age_secs: u64, + + #[arg(long)] + manual_passphrase: bool, + + /// Path to the workflow file to pin (e.g., .github/workflows/release.yml). + /// The SHA256 hash of this file is baked into the token. + /// If the workflow is modified, signing will be refused. + #[arg(long)] + workflow: Option, + + /// Maximum number of signing operations per token (e.g., 4 for 4 platform builds). + #[arg(long)] + max_uses: Option, + }, +``` + +Note the default TTL change: `31536000` (1 year) → `14400` (4 hours). + +**Step 2: Update `run_setup` to use `new_v2`** + +In `crates/auths-cli/src/commands/ci/setup.rs`, modify the token assembly (around line 183): + +```rust + // Compute workflow hash (if pinning requested) + let workflow_hash = if let Some(ref wf_path) = workflow_path { + let wf_content = std::fs::read(wf_path) + .with_context(|| format!("Failed to read workflow file: {}", wf_path))?; + let hash = sha2_hex(&wf_content); + println!("\x1b[0;32m\u{2713}\x1b[0m Workflow pinned: {} (sha256:{}...)", wf_path, &hash[..12]); + Some(hash) + } else { + None + }; + + // Detect repo for scoping + let repo_id = match &repo_override { + Some(url) => Some(Forge::from_url(url).repo_identifier()), + None => git_stdout(&["remote", "get-url", "origin"]) + .ok() + .map(|url| Forge::from_url(&url).repo_identifier()), + }; + + let token = CiToken::new_v2( + ci_pass.to_string(), + keychain_b64, + identity_repo_b64, + verify_bundle_json, + now.to_rfc3339(), + max_age_secs, + workflow_hash, + repo_id.clone(), + max_uses, + ); +``` + +Add the `sha2_hex` helper (or use an existing SHA256 function from `auths-crypto`): + +```rust +fn sha2_hex(data: &[u8]) -> String { + use sha2::{Sha256, Digest}; + let mut hasher = Sha256::new(); + hasher.update(data); + hex::encode(hasher.finalize()) +} +``` + +**Step 3: Update the `execute` match arm** + +In `mod.rs`, pass the new fields through: + +```rust +CiSubcommand::Setup { + repo, + max_age_secs, + manual_passphrase, + workflow, + max_uses, +} => setup::run_setup( + repo.clone(), + *max_age_secs, + !manual_passphrase, + pp, + &ctx.env_config, + &repo_path, + workflow.clone(), + *max_uses, +), +``` + +Update `run_setup` signature to accept the new params. + +**Step 4: Build and verify** + +Run: `cargo build --package auths_cli` +Expected: Compiles + +**Step 5: Commit** + +```bash +git add crates/auths-cli/src/commands/ci/setup.rs crates/auths-cli/src/commands/ci/mod.rs +git commit -m "feat: auths ci setup generates v2 hardened tokens (4h TTL, workflow pin, repo scope)" +``` + +--- + +## Task 3: Enforce v2 checks in the sign action + +**Files:** +- Modify: `/Users/bordumb/workspace/repositories/auths-base/sign/src/token.ts` +- Modify: `/Users/bordumb/workspace/repositories/auths-base/sign/src/main.ts` +- Create: `/Users/bordumb/workspace/repositories/auths-base/sign/src/__tests__/hardening.test.ts` + +This is where the hardening actually enforces. The sign action checks the v2 fields before allowing signing. + +**Step 1: Write the failing test** + +Create `sign/src/__tests__/hardening.test.ts`: + +```typescript +import * as crypto from 'crypto'; +import * as fs from 'fs'; + +describe('CiToken v2 hardening', () => { + + it('rejects token with wrong workflow hash', () => { + const token = { + version: 2, + passphrase: 'test', + keychain: 'abc', + identity_repo: 'def', + verify_bundle: {}, + created_at: new Date().toISOString(), + max_valid_for_secs: 14400, + workflow_hash: 'aaaa', // pinned hash + repo: 'auths-dev/auths', + }; + + const actualWorkflowHash = 'bbbb'; // different hash + expect(() => validateV2Token(token, actualWorkflowHash, 'auths-dev/auths')) + .toThrow('Workflow hash mismatch'); + }); + + it('rejects token with wrong repo', () => { + const token = { + version: 2, + passphrase: 'test', + keychain: 'abc', + identity_repo: 'def', + verify_bundle: {}, + created_at: new Date().toISOString(), + max_valid_for_secs: 14400, + repo: 'auths-dev/auths', + }; + + expect(() => validateV2Token(token, undefined, 'evil-org/auths')) + .toThrow('Repository mismatch'); + }); + + it('accepts valid v2 token', () => { + const token = { + version: 2, + passphrase: 'test', + keychain: 'abc', + identity_repo: 'def', + verify_bundle: {}, + created_at: new Date().toISOString(), + max_valid_for_secs: 14400, + workflow_hash: 'abc123', + repo: 'auths-dev/auths', + }; + + expect(() => validateV2Token(token, 'abc123', 'auths-dev/auths')) + .not.toThrow(); + }); + + it('accepts v1 token with deprecation warning', () => { + const token = { + version: 1, + passphrase: 'test', + keychain: 'abc', + identity_repo: 'def', + verify_bundle: {}, + created_at: new Date().toISOString(), + max_valid_for_secs: 31536000, + }; + + // v1 tokens skip v2 checks + expect(() => validateV2Token(token, undefined, undefined)) + .not.toThrow(); + }); +}); +``` + +**Step 2: Add validation function to `token.ts`** + +Add to `/Users/bordumb/workspace/repositories/auths-base/sign/src/token.ts`: + +```typescript +interface CiTokenV2 extends CiToken { + workflow_hash?: string; + repo?: string; + max_uses?: number; +} + +/** + * Validate v2 hardening constraints. + * Throws if any constraint is violated. + */ +export function validateV2Constraints( + token: CiTokenV2, + actualWorkflowHash: string | undefined, + actualRepo: string | undefined, +): void { + if (token.version < 2) { + core.warning( + 'Using CiToken v1 (unhardened). Upgrade with: auths ci setup --workflow .github/workflows/release.yml' + ); + return; + } + + // Workflow hash check + if (token.workflow_hash) { + if (!actualWorkflowHash) { + throw new Error( + 'CiToken v2 requires workflow hash validation but GITHUB_WORKFLOW_REF is not available. ' + + 'Ensure this runs in GitHub Actions.' + ); + } + if (token.workflow_hash !== actualWorkflowHash) { + throw new Error( + `Workflow hash mismatch: token pinned to ${token.workflow_hash.substring(0, 12)}..., ` + + `but running workflow hashes to ${actualWorkflowHash.substring(0, 12)}... ` + + `This could indicate a tampered workflow. Rotate with: auths ci rotate` + ); + } + core.info('✓ Workflow hash verified'); + } + + // Repo scope check + if (token.repo) { + if (!actualRepo) { + throw new Error( + 'CiToken v2 requires repo validation but GITHUB_REPOSITORY is not available.' + ); + } + if (token.repo !== actualRepo) { + throw new Error( + `Repository mismatch: token scoped to ${token.repo}, ` + + `but running in ${actualRepo}. ` + + `This token cannot be used in this repository.` + ); + } + core.info('✓ Repository scope verified'); + } +} + +/** + * Compute SHA256 of a workflow file for pinning verification. + */ +export function computeWorkflowHash(): string | undefined { + const workflowRef = process.env.GITHUB_WORKFLOW_REF; + if (!workflowRef) return undefined; + + // GITHUB_WORKFLOW_REF is like "owner/repo/.github/workflows/release.yml@refs/tags/v1" + // Extract the workflow path + const match = workflowRef.match(/^[^/]+\/[^/]+\/(.+)@/); + if (!match) return undefined; + + const workflowPath = match[1]; + try { + const content = fs.readFileSync(workflowPath); + return crypto.createHash('sha256').update(content).digest('hex'); + } catch { + core.warning(`Could not read workflow file at ${workflowPath} for hash verification`); + return undefined; + } +} +``` + +**Step 3: Call validation in `main.ts`** + +In `/Users/bordumb/workspace/repositories/auths-base/sign/src/main.ts`, after credentials are resolved (after line 22), add: + +```typescript + // Enforce v2 hardening checks + const tokenInput = core.getInput('token'); + if (tokenInput) { + try { + const tokenParsed = JSON.parse(tokenInput); + const workflowHash = computeWorkflowHash(); + const repo = process.env.GITHUB_REPOSITORY; + validateV2Constraints(tokenParsed, workflowHash, repo); + } catch (e) { + if (e instanceof SyntaxError) { + // Not JSON — skip v2 checks (individual inputs mode) + } else { + throw e; + } + } + } +``` + +**Step 4: Run tests** + +Run: `cd /Users/bordumb/workspace/repositories/auths-base/sign && npm test` +Expected: PASS + +**Step 5: Commit** + +```bash +git add sign/src/token.ts sign/src/main.ts sign/src/__tests__/hardening.test.ts +git commit -m "feat: enforce v2 hardening checks in sign action (workflow pin, repo scope)" +``` + +--- + +## Task 4: Add `ci_binding` to attestations + +**Files:** +- Modify: `crates/auths-verifier/src/core.rs` (add `CiBinding` struct) +- Modify: `crates/auths-sdk/src/domains/signing/service.rs` (include CI metadata in attestation) + +The attestation should record WHERE it was signed so that verification can detect anomalies (e.g., a CI-scoped key signing from a non-CI environment). + +**Step 1: Write the failing test** + +Add to `crates/auths-verifier/tests/cases/release_provenance.rs` (or new file): + +```rust +use auths_verifier::core::CiBinding; + +#[test] +fn ci_binding_roundtrip() { + let binding = CiBinding { + platform: "github-actions".into(), + repo: Some("auths-dev/auths".into()), + workflow: Some(".github/workflows/release.yml".into()), + workflow_hash: Some("abc123".into()), + run_id: Some("99".into()), + }; + + let json = serde_json::to_string(&binding).unwrap(); + let parsed: CiBinding = serde_json::from_str(&json).unwrap(); + assert_eq!(parsed.repo.as_deref(), Some("auths-dev/auths")); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo nextest run -p auths_verifier -E 'test(ci_binding)'` +Expected: FAIL + +**Step 3: Add `CiBinding` struct** + +In `crates/auths-verifier/src/core.rs`: + +```rust +/// CI/CD environment binding recorded in attestations. +/// +/// Captures where and how an artifact was signed in CI. +/// Used for audit trails and anomaly detection. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct CiBinding { + pub platform: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub repo: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow_hash: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub run_id: Option, +} +``` + +Add `ci_binding` field to the `Attestation` struct (optional, after `environment_claim`): + +```rust + /// CI/CD environment binding — records where the signing happened. + #[serde(skip_serializing_if = "Option::is_none")] + pub ci_binding: Option, +``` + +Re-export in `lib.rs`: +```rust +pub use core::CiBinding; +``` + +**Step 4: Run test to verify it passes** + +Run: `cargo nextest run -p auths_verifier -E 'test(ci_binding)'` +Expected: PASS + +**Step 5: Commit** + +```bash +git add crates/auths-verifier/src/core.rs crates/auths-verifier/src/lib.rs +git commit -m "feat: add CiBinding struct for CI environment audit trail in attestations" +``` + +--- + +## Task 5: Pass CI metadata through the sign action into attestations + +**Files:** +- Modify: `/Users/bordumb/workspace/repositories/auths-base/sign/src/main.ts` + +The sign action already sets environment variables (`authsEnv` on line 41-45). Add CI binding metadata so the CLI includes it in the attestation. + +**Step 1: Add CI env vars to the signing environment** + +In `sign/src/main.ts`, extend the `authsEnv` object (line 41): + +```typescript + const authsEnv = { + ...process.env, + AUTHS_PASSPHRASE: credentials.passphrase, + AUTHS_KEYCHAIN_BACKEND: 'file', + AUTHS_KEYCHAIN_FILE: credentials.keychainPath, + // v2 CI binding metadata + AUTHS_CI_PLATFORM: 'github-actions', + AUTHS_CI_REPO: process.env.GITHUB_REPOSITORY || '', + AUTHS_CI_WORKFLOW: process.env.GITHUB_WORKFLOW || '', + AUTHS_CI_WORKFLOW_HASH: computeWorkflowHash() || '', + AUTHS_CI_RUN_ID: process.env.GITHUB_RUN_ID || '', + }; +``` + +**Step 2: Read CI env vars in the SDK signing service** + +In `crates/auths-sdk/src/domains/signing/service.rs`, after creating the attestation (around line 534), check for CI env vars and populate `ci_binding`: + +```rust + // Populate CI binding from environment (set by auths-dev/sign action) + #[allow(clippy::disallowed_methods)] + let ci_binding = std::env::var("AUTHS_CI_PLATFORM").ok().map(|platform| { + auths_verifier::CiBinding { + platform, + repo: std::env::var("AUTHS_CI_REPO").ok().filter(|s| !s.is_empty()), + workflow: std::env::var("AUTHS_CI_WORKFLOW").ok().filter(|s| !s.is_empty()), + workflow_hash: std::env::var("AUTHS_CI_WORKFLOW_HASH").ok().filter(|s| !s.is_empty()), + run_id: std::env::var("AUTHS_CI_RUN_ID").ok().filter(|s| !s.is_empty()), + } + }); + + if ci_binding.is_some() { + attestation.ci_binding = ci_binding; + } +``` + +Note: This uses `#[allow(clippy::disallowed_methods)]` for env var access since this is at the SDK boundary receiving CI metadata, not domain logic calling `Utc::now()`. + +**Step 3: Build and verify** + +Run: `cargo build --package auths_sdk` +Expected: Compiles + +**Step 4: Commit** + +```bash +git add sign/src/main.ts crates/auths-sdk/src/domains/signing/service.rs +git commit -m "feat: pass CI binding metadata through sign action into attestations" +``` + +--- + +## Task 6: Add auto-rotation scheduled workflow + +**Files:** +- Create: `/Users/bordumb/workspace/repositories/auths-base/auths/.github/workflows/rotate-ci-token.yml` + +This workflow runs on a schedule and rotates AUTHS_CI_TOKEN. Since the default TTL is now 4 hours, this ensures the token is always fresh. + +**Step 1: Create the workflow** + +```yaml +name: Rotate CI Token + +on: + # Run every 3 hours (before the 4-hour TTL expires) + schedule: + - cron: '0 */3 * * *' + # Allow manual trigger + workflow_dispatch: + +permissions: + contents: read + +jobs: + rotate: + runs-on: macos-latest # needs keychain access + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Auths CLI + run: brew install auths-base/tap/auths + + - name: Rotate CI token + env: + AUTHS_PASSPHRASE: ${{ secrets.AUTHS_CI_PASSPHRASE }} + run: | + auths ci rotate \ + --max-age-secs 14400 \ + --repo ${{ github.repository }} +``` + +**Important design note:** This workflow needs the CI device key passphrase (stored as a separate secret `AUTHS_CI_PASSPHRASE`) to regenerate the token. The passphrase alone is not enough to sign — it's just the decryption key for the CI device key which is only in the maintainer's keychain. This rotation workflow runs on a runner that has the keychain installed. + +**Alternative (simpler, no separate workflow):** The maintainer runs `auths ci rotate` locally before each release. With a 4-hour TTL, this means running it within 4 hours of pushing a tag. The `auths release` command from the tag-signing plan could incorporate this: `auths release v1.0.0` rotates the token as a side effect. + +**Step 2: Commit** + +```bash +git add .github/workflows/rotate-ci-token.yml +git commit -m "feat: add scheduled CI token rotation workflow (every 3 hours)" +``` + +--- + +## Task 7: Update `auths ci rotate` to support v2 fields + +**Files:** +- Modify: `crates/auths-cli/src/commands/ci/rotate.rs` +- Modify: `crates/auths-cli/src/commands/ci/mod.rs` + +The rotate command needs to preserve v2 fields (workflow_hash, repo, max_uses) when refreshing the token. + +**Step 1: Add flags to rotate subcommand** + +In `mod.rs`: + +```rust + Rotate { + #[arg(long)] + repo: Option, + + #[arg(long, default_value = "14400")] + max_age_secs: u64, + + #[arg(long)] + manual_passphrase: bool, + + /// Path to the workflow file to pin. + #[arg(long)] + workflow: Option, + + /// Maximum signing operations per token. + #[arg(long)] + max_uses: Option, + }, +``` + +**Step 2: Update `run_rotate` to generate v2 tokens** + +In `rotate.rs`, change the token creation (around line 130) to use `CiToken::new_v2`: + +```rust + let workflow_hash = if let Some(ref wf_path) = workflow_path { + let wf_content = std::fs::read(wf_path) + .with_context(|| format!("Failed to read workflow file: {}", wf_path))?; + Some(sha2_hex(&wf_content)) + } else { + None + }; + + let token = CiToken::new_v2( + ci_pass.to_string(), + keychain_b64, + identity_repo_b64, + verify_bundle_json, + now.to_rfc3339(), + max_age_secs, + workflow_hash, + repo_id, + max_uses, + ); +``` + +**Step 3: Build and verify** + +Run: `cargo build --package auths_cli` +Expected: Compiles + +**Step 4: Commit** + +```bash +git add crates/auths-cli/src/commands/ci/rotate.rs crates/auths-cli/src/commands/ci/mod.rs +git commit -m "feat: auths ci rotate generates v2 hardened tokens" +``` + +--- + +## Task 8: Update release workflow to use hardened setup + +**Files:** +- Modify: `/Users/bordumb/workspace/repositories/auths-base/auths/.github/workflows/release.yml` + +Update the release workflow documentation to show the new setup command with pinning. + +**Step 1: Add setup instructions as comments** + +At the top of `release.yml`: + +```yaml +# Setup (run once on your machine): +# auths ci setup \ +# --workflow .github/workflows/release.yml \ +# --max-uses 4 \ +# --max-age-secs 14400 +# +# This creates a v2 hardened AUTHS_CI_TOKEN that: +# - Expires in 4 hours (not 1 year) +# - Only works with this exact workflow file +# - Only allows 4 signing operations (one per platform build) +# - Is scoped to this repository +# +# Rotate before each release: +# auths ci rotate --workflow .github/workflows/release.yml --max-uses 4 +``` + +**Step 2: Commit** + +```bash +git add .github/workflows/release.yml +git commit -m "docs: add v2 hardened token setup instructions to release workflow" +``` + +--- + +## Task 9: Documentation + +**Files:** +- Create: `docs/ci-signing-security.md` + +**Step 1: Write the security documentation** + +```markdown +# CI Signing Security Model + +## Overview + +Auths CI signing uses a short-lived, scoped token (AUTHS_CI_TOKEN) stored as a +GitHub secret. The token contains a delegated CI device key that can sign +release artifacts. + +## Defense Layers (v2 tokens) + +### 1. Short TTL (default: 4 hours) + +Tokens expire quickly. Even if stolen, the attacker has a narrow window. + +```bash +auths ci setup --max-age-secs 14400 # 4 hours +``` + +### 2. Workflow Pinning + +The token records the SHA256 hash of the authorized workflow file. If an +attacker modifies the workflow (to exfiltrate secrets or sign different +artifacts), the hash check fails and signing is refused. + +```bash +auths ci setup --workflow .github/workflows/release.yml +``` + +### 3. Repository Scoping + +Attestations are bound to a specific repository. A token stolen from repo A +cannot produce valid attestations for repo B. + +### 4. Use Limits + +Tokens can be limited to N signing operations. A release building 4 platform +binaries needs exactly 4 uses. + +```bash +auths ci setup --max-uses 4 +``` + +## Comparison with LiteLLM/Axios attacks + +| Attack vector | npm/PyPI token | AUTHS_CI_TOKEN v1 | AUTHS_CI_TOKEN v2 | +|--------------|----------------|-------------------|-------------------| +| Token stolen from CI env | Full publish access | Full signing access | Expires in hours | +| Attacker modifies workflow | Works | Works | Blocked (hash mismatch) | +| Attacker uses token from external machine | Works | Works | Blocked (workflow hash unavailable) | +| Token used days later | Works | Works (1yr TTL) | Expired | +| Cross-repo replay | Works | Works | Blocked (repo scope) | + +## Rotation + +Rotate tokens regularly: + +```bash +auths ci rotate --workflow .github/workflows/release.yml --max-uses 4 +``` + +Or automate via scheduled workflow (see `.github/workflows/rotate-ci-token.yml`). + +## Revocation + +Instantly revoke the CI device key: + +```bash +auths device revoke --device --key +``` +``` + +**Step 2: Commit** + +```bash +git add docs/ci-signing-security.md +git commit -m "docs: add CI signing security model documentation" +``` + +--- + +## Summary + +| Task | What | Effort | +|------|------|--------| +| 1 | CiToken v2 struct with new fields | 1 hour | +| 2 | `auths ci setup` generates v2 tokens (4h TTL default) | 1.5 hours | +| 3 | Sign action enforces workflow pin + repo scope | 2 hours | +| 4 | `CiBinding` struct in attestations | 45 min | +| 5 | Pass CI metadata through sign action | 1 hour | +| 6 | Auto-rotation scheduled workflow | 30 min | +| 7 | `auths ci rotate` supports v2 fields | 1 hour | +| 8 | Release workflow setup instructions | 15 min | +| 9 | Security documentation | 30 min | + +**Total: ~8.5 hours** + +### After implementation, the security posture becomes: + +``` +auths ci setup --workflow .github/workflows/release.yml --max-uses 4 + +→ Token expires in 4 hours (not 1 year) +→ Only this workflow file can use it (hash-pinned) +→ Only this repo can use it (scoped) +→ Only 4 signing operations allowed (limited) +→ Every attestation records CI binding metadata (auditable) +``` + +An attacker who steals AUTHS_CI_TOKEN gets a token that is either expired, refuses to sign because the workflow hash doesn't match their modified workflow, or is scoped to a repo they can't publish from. diff --git a/docs/plans/2026-04-08-tag-signed-releases.md b/docs/plans/2026-04-08-tag-signed-releases.md new file mode 100644 index 00000000..b502672e --- /dev/null +++ b/docs/plans/2026-04-08-tag-signed-releases.md @@ -0,0 +1,1272 @@ +# Tag-Signed Releases: Eliminate AUTHS_CI_TOKEN + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Replace CI-based artifact signing (AUTHS_CI_TOKEN) with local tag signing, so the signing key never leaves the maintainer's device. + +**Architecture:** The maintainer signs a git tag locally using their hardware-keychain-bound device key. CI builds artifacts from the signed tag, generates provenance `.auths.json` files that reference the tag attestation (no key material needed), and publishes everything to GitHub Release. Verification chains: signed tag → artifact hash → trust. + +**Tech Stack:** Rust (auths-cli, auths-sdk, auths-verifier), TypeScript (GitHub Actions), Git refs (`refs/auths/tags/`) + +--- + +## Design + +### What changes + +| Before | After | +|--------|-------| +| Maintainer runs `git tag v0.1.0 && git push origin v0.1.0` | Maintainer runs `auths release v0.1.0` (creates tag + signs + pushes) | +| CI uses `auths-dev/sign@v1` with `AUTHS_CI_TOKEN` secret | CI uses `auths-dev/attest@v1` with zero secrets | +| `.auths.json` is a self-signed attestation (contains Ed25519 signatures) | `.auths.json` is a provenance doc referencing the signed tag | +| Verification checks the artifact signature directly | Verification checks: tag signature → artifact hash match | +| `AUTHS_CI_TOKEN` in GitHub secrets (stealable) | No secrets needed for signing | + +### Trust model + +``` +auths release v0.1.0 (maintainer's machine, hardware keychain) + ↓ +creates git tag v0.1.0 pointing at commit SHA + ↓ +creates attestation at refs/auths/tags/v0.1.0 containing: + - tag name, commit SHA, maintainer DID + - Ed25519 signature from device key + - capabilities: [sign_release] + ↓ +pushes tag + attestation refs to origin + ↓ +CI triggers on v* tag push (same as today) + ↓ +CI builds artifacts, computes SHA256 hashes + ↓ +CI generates .auths.json per artifact (NO key needed): + { + "version": 2, + "type": "release-provenance", + "tag": "v0.1.0", + "tag_attestation_ref": "refs/auths/tags/v0.1.0", + "artifact": { "name": "...", "digest": { "sha256": "..." } }, + "builder": { "platform": "github-actions", "run_id": "..." } + } + ↓ +Verification: fetch tag attestation → verify signature → check artifact hash +``` + +### What does NOT change + +- The `auths-dev/verify@v1` action (it verifies commits, not artifacts — unaffected) +- The `auths artifact sign` command (still works for local signing) +- The `auths artifact verify` command (gains v2 support, keeps v1 backward compat) +- The existing v1 `.auths.json` format (still valid, still verifiable) +- The Attestation struct in `auths-verifier` (we ADD a new struct, not modify) + +--- + +## Task 1: Add `ReleaseProvenance` type to `auths-verifier` + +**Files:** +- Modify: `crates/auths-verifier/src/core.rs` +- Test: `crates/auths-verifier/tests/cases/` (new file: `release_provenance.rs`) +- Modify: `crates/auths-verifier/tests/cases/mod.rs` + +**Step 1: Write the failing test** + +Create `crates/auths-verifier/tests/cases/release_provenance.rs`: + +```rust +use auths_verifier::core::ReleaseProvenance; + +#[test] +fn deserialize_release_provenance() { + let json = r#"{ + "version": 2, + "type": "release-provenance", + "tag": "v0.1.0", + "commit": "abc123def456abc123def456abc123def456abc1", + "tag_attestation_ref": "refs/auths/tags/v0.1.0", + "artifact": { + "name": "auths-linux-x86_64.tar.gz", + "digest": { + "algorithm": "sha256", + "hex": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + }, + "size": 12345678 + }, + "builder": { + "platform": "github-actions", + "workflow": ".github/workflows/release.yml", + "run_id": "12345" + } + }"#; + + let prov: ReleaseProvenance = serde_json::from_str(json).unwrap(); + assert_eq!(prov.version, 2); + assert_eq!(prov.provenance_type, "release-provenance"); + assert_eq!(prov.tag, "v0.1.0"); + assert_eq!(prov.commit, "abc123def456abc123def456abc123def456abc1"); + assert_eq!(prov.tag_attestation_ref, "refs/auths/tags/v0.1.0"); + assert_eq!(prov.artifact.name, "auths-linux-x86_64.tar.gz"); + assert_eq!(prov.artifact.digest.algorithm, "sha256"); + assert_eq!(prov.builder.platform, "github-actions"); +} + +#[test] +fn serialize_roundtrip() { + let prov = ReleaseProvenance { + version: 2, + provenance_type: "release-provenance".to_string(), + tag: "v0.1.0".to_string(), + commit: "abc123def456abc123def456abc123def456abc1".to_string(), + tag_attestation_ref: "refs/auths/tags/v0.1.0".to_string(), + artifact: ProvenanceArtifact { + name: "test.tar.gz".to_string(), + digest: ProvenanceDigest { + algorithm: "sha256".to_string(), + hex: "deadbeef".to_string(), + }, + size: Some(1024), + }, + builder: ProvenanceBuilder { + platform: "github-actions".to_string(), + workflow: Some(".github/workflows/release.yml".to_string()), + run_id: Some("99".to_string()), + }, + }; + + let json = serde_json::to_string(&prov).unwrap(); + let deser: ReleaseProvenance = serde_json::from_str(&json).unwrap(); + assert_eq!(deser.tag, prov.tag); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo nextest run -p auths_verifier -E 'test(release_provenance)'` +Expected: FAIL — `ReleaseProvenance` not found + +**Step 3: Write the implementation** + +Add to `crates/auths-verifier/src/core.rs` (after the existing `Attestation` struct): + +```rust +/// A release provenance document (version 2 `.auths.json`). +/// +/// Unlike v1 attestations which contain their own Ed25519 signatures, +/// provenance documents derive trust from a signed git tag. CI generates +/// these without any key material. +/// +/// Args: +/// * `tag` — The git tag name (e.g., "v0.1.0") +/// * `commit` — The commit SHA the tag points to +/// * `tag_attestation_ref` — Git ref containing the signed tag attestation +/// * `artifact` — Artifact name, digest, and size +/// * `builder` — CI platform metadata +/// +/// Usage: +/// ```ignore +/// let prov: ReleaseProvenance = serde_json::from_str(&json)?; +/// assert_eq!(prov.tag, "v0.1.0"); +/// ``` +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ReleaseProvenance { + pub version: u32, + #[serde(rename = "type")] + pub provenance_type: String, + pub tag: String, + pub commit: String, + pub tag_attestation_ref: String, + pub artifact: ProvenanceArtifact, + pub builder: ProvenanceBuilder, +} + +/// Artifact metadata within a release provenance document. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ProvenanceArtifact { + pub name: String, + pub digest: ProvenanceDigest, + #[serde(skip_serializing_if = "Option::is_none")] + pub size: Option, +} + +/// Digest within provenance artifact metadata. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ProvenanceDigest { + pub algorithm: String, + pub hex: String, +} + +/// CI builder metadata within a release provenance document. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct ProvenanceBuilder { + pub platform: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub workflow: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub run_id: Option, +} +``` + +Also add the re-export in `crates/auths-verifier/src/lib.rs`: +```rust +pub use core::{ReleaseProvenance, ProvenanceArtifact, ProvenanceDigest, ProvenanceBuilder}; +``` + +Register the test module in `crates/auths-verifier/tests/cases/mod.rs`: +```rust +mod release_provenance; +``` + +**Step 4: Run test to verify it passes** + +Run: `cargo nextest run -p auths_verifier -E 'test(release_provenance)'` +Expected: PASS + +**Step 5: Commit** + +```bash +git add crates/auths-verifier/src/core.rs crates/auths-verifier/src/lib.rs crates/auths-verifier/tests/cases/release_provenance.rs crates/auths-verifier/tests/cases/mod.rs +git commit -m "feat: add ReleaseProvenance type for v2 .auths.json format" +``` + +--- + +## Task 2: Add `auths release` CLI command (tag signing) + +**Files:** +- Create: `crates/auths-cli/src/commands/release.rs` +- Modify: `crates/auths-cli/src/commands/mod.rs` +- Modify: `crates/auths-cli/src/cli.rs` +- Modify: `crates/auths-cli/src/main.rs` + +This task is the core of the feature. The `auths release ` command: +1. Creates an annotated git tag +2. Signs it with the maintainer's device key (hardware keychain) +3. Stores the attestation at `refs/auths/tags/` +4. Pushes tag + attestation refs to origin + +**Step 1: Write the failing test** + +Add `crates/auths-cli/tests/cases/release.rs` (if the test structure allows — otherwise this will be tested via the integration test pattern used by other commands): + +```rust +// Integration test: verify the release command creates a tag attestation. +// This test is validated manually / via E2E since it requires git + keychain. +// See Task 6 for E2E test. +``` + +For now, we validate via the E2E flow in Task 6. The command itself is a thin CLI layer over SDK logic. + +**Step 2: Create the command** + +Create `crates/auths-cli/src/commands/release.rs`: + +```rust +//! `auths release` — sign a release tag with the maintainer's device key. + +use anyhow::{Context, Result, anyhow}; +use clap::Parser; +use std::path::Path; +use std::sync::Arc; + +use auths_sdk::core_config::EnvironmentConfig; +use auths_sdk::domains::signing::service::{SigningKeyMaterial, sign_artifact}; +use auths_sdk::keychain::KeyAlias; +use auths_sdk::signing::PassphraseProvider; + +use crate::config::CliConfig; +use crate::factories::storage::build_auths_context; +use crate::subprocess::git_stdout; + +/// Sign a release tag with your device key and push it. +/// +/// Creates an annotated git tag, signs it with an Auths attestation +/// stored at `refs/auths/tags/`, and pushes both to origin. +/// No secrets are needed in CI — the signing happens here, on your device. +/// +/// Usage: +/// auths release v0.1.0 +/// auths release v0.1.0 --note "Production release" +/// auths release v0.1.0 --no-push # sign locally, push later +#[derive(Parser, Debug, Clone)] +#[command( + about = "Sign a release tag with your device key.", + after_help = "Examples:\n auths release v1.0.0\n auths release v1.0.0 --note 'First stable release'\n auths release v1.0.0 --no-push" +)] +pub struct ReleaseCommand { + /// Tag name (e.g., v0.1.0). Will be created if it doesn't exist. + pub tag: String, + + /// Optional note for the attestation. + #[arg(long)] + pub note: Option, + + /// Device key alias to sign with (auto-detected if omitted). + #[arg(long, default_value = "default")] + pub device_key: String, + + /// Skip pushing to origin after signing. + #[arg(long)] + pub no_push: bool, + + /// Optional message for the annotated tag. + #[arg(long, short = 'm')] + pub message: Option, +} + +impl crate::commands::executable::ExecutableCommand for ReleaseCommand { + fn execute(&self, ctx: &CliConfig) -> Result<()> { + handle_release(self.clone(), ctx) + } +} + +fn handle_release(cmd: ReleaseCommand, ctx: &CliConfig) -> Result<()> { + let auths_repo = ctx + .repo_path + .clone() + .unwrap_or_else(|| { + auths_sdk::paths::auths_home_with_config(&ctx.env_config) + .unwrap_or_else(|_| std::path::PathBuf::from(".auths")) + }); + + // 1. Resolve HEAD commit + let commit_sha = git_stdout(&["rev-parse", "HEAD"]) + .context("Failed to resolve HEAD commit")?; + + // 2. Create annotated tag (if it doesn't exist) + let tag_message = cmd.message.clone().unwrap_or_else(|| format!("Release {}", cmd.tag)); + let tag_exists = git_stdout(&["rev-parse", &format!("refs/tags/{}", cmd.tag)]).is_ok(); + + if tag_exists { + println!("\x1b[2mTag {} already exists — signing existing tag.\x1b[0m", cmd.tag); + } else { + let tag_result = std::process::Command::new("git") + .args(["tag", "-a", &cmd.tag, "-m", &tag_message]) + .status() + .context("Failed to create git tag")?; + + if !tag_result.success() { + return Err(anyhow!("Failed to create tag {}", cmd.tag)); + } + println!("\x1b[0;32m\u{2713}\x1b[0m Created tag {}", cmd.tag); + } + + // 3. Build auths context and sign + let auths_ctx = build_auths_context( + &auths_repo, + &ctx.env_config, + Some(ctx.passphrase_provider.clone()), + ) + .context("Failed to initialize auths context. Run `auths init` first.")?; + + let identity = auths_ctx + .identity_storage + .load_identity() + .map_err(|_| anyhow!("No auths identity found. Run `auths init` first."))?; + + let identity_did = identity.controller_did.to_string(); + + // 4. Resolve device key and sign the tag data + let tag_data = format!("{}:{}:{}", cmd.tag, commit_sha, identity_did); + let tag_data_bytes = tag_data.as_bytes(); + + // Use the existing artifact signing infrastructure to create the attestation. + // The "artifact" is the tag data string; the attestation proves the maintainer + // approved this tag. + let artifact = auths_sdk::ports::artifact::BytesArtifact::new( + tag_data_bytes.to_vec(), + format!("tag:{}", cmd.tag), + ); + + let params = auths_sdk::domains::signing::service::ArtifactSigningParams { + artifact: Arc::new(artifact), + identity_key: None, // auto-detect + device_key: SigningKeyMaterial::Alias(KeyAlias::new_unchecked(&cmd.device_key)), + expires_in: None, + note: cmd.note.clone().or_else(|| Some(format!("Release tag {}", cmd.tag))), + commit_sha: Some(commit_sha.clone()), + }; + + let result = sign_artifact(params, &auths_ctx) + .map_err(|e| anyhow!("Failed to sign tag: {e}"))?; + + println!("\x1b[0;32m\u{2713}\x1b[0m Signed tag {} (issuer: {})", cmd.tag, identity_did); + + // 5. Store attestation at refs/auths/tags/ + store_tag_attestation(&cmd.tag, &result.attestation_json) + .context("Failed to store tag attestation in git ref")?; + + println!( + "\x1b[0;32m\u{2713}\x1b[0m Attestation stored at refs/auths/tags/{}", + cmd.tag + ); + + // 6. Push (unless --no-push) + if !cmd.no_push { + let push_tag = std::process::Command::new("git") + .args(["push", "origin", &format!("refs/tags/{}", cmd.tag)]) + .status() + .context("Failed to push tag")?; + + if !push_tag.success() { + return Err(anyhow!("Failed to push tag {} to origin", cmd.tag)); + } + + let push_ref = std::process::Command::new("git") + .args([ + "push", "origin", + &format!("refs/auths/tags/{}:refs/auths/tags/{}", cmd.tag, cmd.tag), + ]) + .status() + .context("Failed to push attestation ref")?; + + if !push_ref.success() { + return Err(anyhow!("Failed to push attestation ref")); + } + + println!("\x1b[0;32m\u{2713}\x1b[0m Pushed tag + attestation to origin"); + } else { + println!("\x1b[2mSkipped push (--no-push). Push manually:\x1b[0m"); + println!(" git push origin refs/tags/{}", cmd.tag); + println!( + " git push origin refs/auths/tags/{}:refs/auths/tags/{}", + cmd.tag, cmd.tag + ); + } + + println!(); + println!("CI will build artifacts from this signed tag."); + println!("No AUTHS_CI_TOKEN needed — the signature is in the tag attestation."); + + Ok(()) +} + +/// Store the tag attestation JSON as a git ref. +/// +/// Creates a git blob + tree + commit at `refs/auths/tags/`. +/// Follows the same pattern as `refs/auths/commits/`. +fn store_tag_attestation(tag: &str, attestation_json: &str) -> Result<()> { + // Write attestation to a temp file, then use git hash-object + update-ref + let tmp = tempfile::NamedTempFile::new().context("Failed to create temp file")?; + std::fs::write(tmp.path(), attestation_json)?; + + let blob_hash = git_stdout(&[ + "hash-object", "-w", "--stdin", + ]); + + // Use git plumbing to create the ref + // 1. Create blob + let blob = std::process::Command::new("git") + .args(["hash-object", "-w"]) + .arg(tmp.path()) + .output() + .context("git hash-object failed")?; + let blob_sha = String::from_utf8(blob.stdout)?.trim().to_string(); + + // 2. Create tree containing the blob as "attestation.json" + let tree_input = format!("100644 blob {}\tattestation.json\n", blob_sha); + let tree = std::process::Command::new("git") + .args(["mktree"]) + .stdin(std::process::Stdio::piped()) + .stdout(std::process::Stdio::piped()) + .spawn() + .context("git mktree failed")?; + + let mut tree_proc = tree; + { + use std::io::Write; + tree_proc + .stdin + .as_mut() + .ok_or_else(|| anyhow!("Failed to open stdin for mktree"))? + .write_all(tree_input.as_bytes())?; + } + let tree_output = tree_proc.wait_with_output()?; + let tree_sha = String::from_utf8(tree_output.stdout)?.trim().to_string(); + + // 3. Create commit with the tree + let commit = std::process::Command::new("git") + .args(["commit-tree", &tree_sha, "-m", &format!("auths: tag attestation for {}", tag)]) + .output() + .context("git commit-tree failed")?; + let commit_sha = String::from_utf8(commit.stdout)?.trim().to_string(); + + // 4. Update ref + let ref_name = format!("refs/auths/tags/{}", tag); + let update = std::process::Command::new("git") + .args(["update-ref", &ref_name, &commit_sha]) + .status() + .context("git update-ref failed")?; + + if !update.success() { + return Err(anyhow!("Failed to update ref {}", ref_name)); + } + + Ok(()) +} +``` + +**Step 3: Register the command** + +Add to `crates/auths-cli/src/commands/mod.rs`: +```rust +pub mod release; +``` + +Add to `crates/auths-cli/src/cli.rs` imports: +```rust +use crate::commands::release::ReleaseCommand; +``` + +Add to `RootCommand` enum (in the Primary section): +```rust + Release(ReleaseCommand), +``` + +Add to `crates/auths-cli/src/main.rs` match arms: +```rust + RootCommand::Release(cmd) => cmd.execute(&ctx), +``` + +Add to `audit_action`: +```rust + RootCommand::Release(_) => Some("release_signed"), +``` + +**Step 4: Check `BytesArtifact` exists — if not, add it** + +Check if `auths_sdk::ports::artifact::BytesArtifact` exists. If not, add to `crates/auths-sdk/src/ports/artifact.rs`: + +```rust +/// In-memory artifact for signing arbitrary byte data. +/// +/// Args: +/// * `data` — The raw bytes to sign +/// * `name` — A human-readable name for the artifact +/// +/// Usage: +/// ```ignore +/// let art = BytesArtifact::new(b"hello".to_vec(), "tag:v0.1.0".into()); +/// let meta = art.metadata()?; +/// ``` +pub struct BytesArtifact { + data: Vec, + name: String, +} + +impl BytesArtifact { + pub fn new(data: Vec, name: String) -> Self { + Self { data, name } + } +} + +impl ArtifactSource for BytesArtifact { + fn metadata(&self) -> Result { + let digest = sha256_hex(&self.data); + Ok(ArtifactMetadata { + artifact_type: "tag".to_string(), + digest: ArtifactDigest { + algorithm: "sha256".to_string(), + hex: digest, + }, + name: self.name.clone(), + size: Some(self.data.len() as u64), + }) + } + + fn digest(&self) -> Result { + let hex = sha256_hex(&self.data); + Ok(ArtifactDigest { + algorithm: "sha256".to_string(), + hex, + }) + } +} +``` + +**Step 5: Build and verify** + +Run: `cargo build --package auths_cli` +Expected: Compiles without errors + +**Step 6: Commit** + +```bash +git add crates/auths-cli/src/commands/release.rs crates/auths-cli/src/commands/mod.rs crates/auths-cli/src/cli.rs crates/auths-cli/src/main.rs +git commit -m "feat: add auths release command for tag-signed releases" +``` + +--- + +## Task 3: Add v2 provenance verification to `auths artifact verify` + +**Files:** +- Modify: `crates/auths-cli/src/commands/artifact/verify.rs` + +The artifact verify command already checks for `offline_bundle` in the JSON (line 90). We add a similar check for `"type": "release-provenance"` to handle v2 `.auths.json` files. + +**Step 1: Write the failing test** + +The test will be in the verifier crate integration tests. Add to `crates/auths-verifier/tests/cases/release_provenance.rs`: + +```rust +#[test] +fn detect_v2_provenance_type() { + let json = r#"{"version": 2, "type": "release-provenance", "tag": "v1.0"}"#; + let val: serde_json::Value = serde_json::from_str(json).unwrap(); + assert_eq!(val.get("type").and_then(|t| t.as_str()), Some("release-provenance")); + assert_eq!(val.get("version").and_then(|v| v.as_u64()), Some(2)); +} +``` + +**Step 2: Add v2 handling to artifact verify** + +In `crates/auths-cli/src/commands/artifact/verify.rs`, after the `offline_bundle` check (line 90-92), add: + +```rust + // Check for v2 release provenance format + if sig_value.get("type").and_then(|t| t.as_str()) == Some("release-provenance") { + return handle_provenance_verify(file, &sig_content).await; + } +``` + +Then add the handler function: + +```rust +/// Verify a v2 release provenance `.auths.json`. +/// +/// Trust chain: fetch tag attestation from git ref → verify signature +/// → check artifact hash matches. +async fn handle_provenance_verify(file: &Path, sig_content: &str) -> Result<()> { + let file_str = file.to_string_lossy().to_string(); + + let prov: auths_verifier::ReleaseProvenance = match serde_json::from_str(sig_content) { + Ok(p) => p, + Err(e) => { + return output_error(&file_str, 2, &format!("Failed to parse provenance: {}", e)); + } + }; + + // 1. Compute file digest and compare with provenance + let file_artifact = FileArtifact::new(file); + let file_digest = match file_artifact.digest() { + Ok(d) => d, + Err(e) => { + return output_error(&file_str, 2, &format!("Failed to compute file digest: {}", e)); + } + }; + + if file_digest.hex != prov.artifact.digest.hex { + return output_result( + 1, + VerifyArtifactResult { + file: file_str, + valid: false, + digest_match: Some(false), + chain_valid: None, + chain_report: None, + capability_valid: None, + witness_quorum: None, + issuer: None, + commit_sha: Some(prov.commit.clone()), + commit_verified: None, + error: Some(format!( + "Digest mismatch: file={}, provenance={}", + file_digest.hex, prov.artifact.digest.hex + )), + }, + ); + } + + // 2. Fetch tag attestation from git ref + let ref_name = &prov.tag_attestation_ref; + let attestation_json = match crate::subprocess::git_stdout(&[ + "show", &format!("{}:attestation.json", ref_name), + ]) { + Ok(json) => json, + Err(_) => { + // Try fetching from remote first + let _ = std::process::Command::new("git") + .args(["fetch", "origin", &format!("{}:{}", ref_name, ref_name)]) + .status(); + + match crate::subprocess::git_stdout(&[ + "show", &format!("{}:attestation.json", ref_name), + ]) { + Ok(json) => json, + Err(_) => { + return output_error( + &file_str, + 2, + &format!( + "Tag attestation not found at {}. \ + Fetch with: git fetch origin {}:{}", + ref_name, ref_name, ref_name + ), + ); + } + } + } + }; + + // 3. Parse the tag attestation (this is a v1 Attestation — the signed one) + let tag_attestation: Attestation = match serde_json::from_str(&attestation_json) { + Ok(a) => a, + Err(e) => { + return output_error( + &file_str, + 2, + &format!("Failed to parse tag attestation: {}", e), + ); + } + }; + + // 4. Resolve identity key and verify the tag attestation signature + let (root_pk, _identity_did) = match resolve_identity_key(&None, &tag_attestation) { + Ok(v) => v, + Err(e) => { + return output_error(&file_str, 2, &e.to_string()); + } + }; + + let chain = vec![tag_attestation.clone()]; + let chain_result = + verify_chain_with_capability(&chain, &Capability::sign_release(), &root_pk).await; + + let (chain_valid, chain_report, capability_valid) = match chain_result { + Ok(report) => { + let is_valid = report.is_valid(); + (Some(is_valid), Some(report), Some(true)) + } + Err(auths_verifier::error::AttestationError::MissingCapability { .. }) => { + let report = verify_chain(&chain, &root_pk).await.ok(); + let chain_ok = report.as_ref().map(|r| r.is_valid()); + (chain_ok, report, Some(false)) + } + Err(e) => { + return output_error(&file_str, 1, &format!("Tag attestation verification failed: {}", e)); + } + }; + + let valid = chain_valid.unwrap_or(false) && capability_valid.unwrap_or(true); + + output_result( + if valid { 0 } else { 1 }, + VerifyArtifactResult { + file: file_str, + valid, + digest_match: Some(true), + chain_valid, + chain_report, + capability_valid, + witness_quorum: None, + issuer: Some(tag_attestation.issuer.to_string()), + commit_sha: Some(prov.commit), + commit_verified: None, + error: if valid { None } else { Some("Tag attestation signature invalid".to_string()) }, + }, + ) +} +``` + +**Step 3: Build and verify** + +Run: `cargo build --package auths_cli` +Expected: Compiles + +**Step 4: Commit** + +```bash +git add crates/auths-cli/src/commands/artifact/verify.rs +git commit -m "feat: add v2 release provenance verification to artifact verify" +``` + +--- + +## Task 4: Create `auths-dev/attest@v1` GitHub Action (replaces sign for CI) + +**Files:** +- Create: `/Users/bordumb/workspace/repositories/auths-base/sign/src/attest.ts` (new) +- Modify: `/Users/bordumb/workspace/repositories/auths-base/sign/action.yml` (add attest mode) + +This is the CI-side action. It needs ZERO secrets. It: +1. Computes SHA256 of each artifact +2. Reads the tag name from `GITHUB_REF` +3. Generates a v2 `.auths.json` provenance file per artifact + +**Step 1: Create the attest module** + +Create `/Users/bordumb/workspace/repositories/auths-base/sign/src/attest.ts`: + +```typescript +import * as core from '@actions/core'; +import * as crypto from 'crypto'; +import * as fs from 'fs'; +import * as path from 'path'; +import * as glob from '@actions/glob'; + +export interface AttestOptions { + files: string[]; + tag: string; + commit: string; + runId: string; + workflow: string; +} + +export interface AttestResult { + attestedFiles: string[]; + provenanceFiles: string[]; +} + +/** + * Generate v2 release provenance .auths.json files for artifacts. + * No signing key needed — trust derives from the signed git tag. + */ +export async function attestArtifacts(options: AttestOptions): Promise { + const attestedFiles: string[] = []; + const provenanceFiles: string[] = []; + + for (const file of options.files) { + const digest = computeSha256(file); + const stat = fs.statSync(file); + const basename = path.basename(file); + + const provenance = { + version: 2, + type: 'release-provenance', + tag: options.tag, + commit: options.commit, + tag_attestation_ref: `refs/auths/tags/${options.tag}`, + artifact: { + name: basename, + digest: { + algorithm: 'sha256', + hex: digest, + }, + size: stat.size, + }, + builder: { + platform: 'github-actions', + workflow: options.workflow, + run_id: options.runId, + }, + }; + + const provenancePath = `${file}.auths.json`; + fs.writeFileSync(provenancePath, JSON.stringify(provenance, null, 2)); + + attestedFiles.push(file); + provenanceFiles.push(provenancePath); + + core.info(`✓ ${basename} → ${path.basename(provenancePath)}`); + core.info(` SHA256: ${digest}`); + } + + return { attestedFiles, provenanceFiles }; +} + +function computeSha256(filePath: string): string { + const content = fs.readFileSync(filePath); + return crypto.createHash('sha256').update(content).digest('hex'); +} +``` + +**Step 2: Create a standalone attest action entry point** + +Create `/Users/bordumb/workspace/repositories/auths-base/sign/src/attest-main.ts`: + +```typescript +import * as core from '@actions/core'; +import * as github from '@actions/github'; +import * as glob from '@actions/glob'; +import * as path from 'path'; +import { attestArtifacts } from './attest'; + +async function run(): Promise { + try { + const filePatterns = core.getMultilineInput('files').filter(p => p.trim()); + if (filePatterns.length === 0) { + throw new Error('`files` input is required'); + } + + // Resolve tag from GITHUB_REF (e.g., refs/tags/v0.1.0 → v0.1.0) + const ref = process.env.GITHUB_REF || ''; + const tagMatch = ref.match(/^refs\/tags\/(.+)$/); + const tag = core.getInput('tag') || (tagMatch ? tagMatch[1] : ''); + if (!tag) { + throw new Error('Could not determine tag. Set the `tag` input or run on a tag push event.'); + } + + const commit = process.env.GITHUB_SHA || ''; + const runId = process.env.GITHUB_RUN_ID || ''; + const workflow = process.env.GITHUB_WORKFLOW || ''; + + // Glob files + const patterns = filePatterns.join('\n'); + const globber = await glob.create(patterns, { followSymbolicLinks: false }); + let files = await globber.glob(); + + // Workspace containment + const workspace = path.resolve(process.env.GITHUB_WORKSPACE || process.cwd()); + files = files.filter(f => { + const resolved = path.resolve(f); + if (!resolved.startsWith(workspace + path.sep) && resolved !== workspace) { + core.warning(`Skipping path outside workspace: ${f}`); + return false; + } + return true; + }); + + files = [...new Set(files)]; + + if (files.length === 0) { + core.warning('No files matched the provided glob patterns'); + return; + } + + core.info(`Found ${files.length} file(s) to attest`); + + const result = await attestArtifacts({ + files, + tag, + commit, + runId, + workflow, + }); + + // Set outputs + core.setOutput('attested-files', JSON.stringify(result.attestedFiles)); + core.setOutput('provenance-files', JSON.stringify(result.provenanceFiles)); + + // Step summary + const lines = [ + '## Auths Release Provenance', + '', + `**Tag:** \`${tag}\` | **Commit:** \`${commit.substring(0, 8)}\``, + '', + '| Artifact | SHA256 | Provenance |', + '|----------|--------|------------|', + ]; + + for (let i = 0; i < result.attestedFiles.length; i++) { + const file = path.basename(result.attestedFiles[i]); + const prov = path.basename(result.provenanceFiles[i]); + lines.push(`| \`${file}\` | ✅ | \`${prov}\` |`); + } + + lines.push(''); + lines.push(`**${result.attestedFiles.length}** artifact(s) attested. No signing secrets used.`); + lines.push(''); + lines.push(`Trust chain: \`refs/auths/tags/${tag}\` (signed by maintainer) → artifact hash`); + + await core.summary.addRaw(lines.join('\n')).write(); + + } catch (error) { + if (error instanceof Error) { + core.setFailed(error.message); + } else { + core.setFailed('An unexpected error occurred'); + } + } +} + +run(); +``` + +**Step 3: Create `attest/action.yml`** + +Create `/Users/bordumb/workspace/repositories/auths-base/sign/attest/action.yml`: + +```yaml +name: 'Attest with Auths' +description: 'Generate release provenance .auths.json files — no secrets needed' +author: 'auths' + +inputs: + files: + description: 'Glob patterns for files to attest, one per line' + required: true + tag: + description: 'Tag name (auto-detected from GITHUB_REF if on a tag push)' + required: false + default: '' + +outputs: + attested-files: + description: 'JSON array of attested file paths' + provenance-files: + description: 'JSON array of provenance file paths (.auths.json)' + +runs: + using: 'node20' + main: '../dist/attest-index.js' + +branding: + icon: 'shield' + color: 'green' +``` + +**Step 4: Commit** + +```bash +git add sign/src/attest.ts sign/src/attest-main.ts sign/attest/action.yml +git commit -m "feat: add auths-dev/attest action for zero-secret release provenance" +``` + +--- + +## Task 5: Update release workflow to use tag signing + +**Files:** +- Modify: `/Users/bordumb/workspace/repositories/auths-base/auths/.github/workflows/release.yml` + +Replace the `auths-dev/sign@v1` step with `auths-dev/sign/attest@v1` (or inline SHA256 generation + provenance JSON). + +**Step 1: Update the workflow** + +Replace the "Sign artifact" step (lines 100-107) with: + +```yaml + - name: Generate release provenance (Unix) + if: matrix.ext == '.tar.gz' + uses: auths-dev/sign/attest@v1 + with: + files: ${{ matrix.asset_name }}${{ matrix.ext }} +``` + +And for Windows (add after the Windows checksum step): + +```yaml + - name: Generate release provenance (Windows) + if: matrix.ext == '.zip' + uses: auths-dev/sign/attest@v1 + with: + files: ${{ matrix.asset_name }}${{ matrix.ext }} +``` + +Remove `AUTHS_CI_TOKEN` from all references. + +The `update-homebrew` job at line 137 currently extracts hashes from `.auths.json` attestation files. Update the hash extraction to use the v2 format: + +```yaml + extract_hash() { python3 -c "import json; d=json.load(open('$1')); print(d['artifact']['digest']['hex'])"; } +``` + +(Changed from `d['payload']['digest']['hex']` to `d['artifact']['digest']['hex']`) + +**Step 2: Verify the workflow is syntactically correct** + +Run: `python3 -c "import yaml; yaml.safe_load(open('.github/workflows/release.yml'))"` +Expected: No errors + +**Step 3: Commit** + +```bash +git add .github/workflows/release.yml +git commit -m "feat: replace AUTHS_CI_TOKEN signing with tag-based provenance in release workflow" +``` + +--- + +## Task 6: E2E test — full release flow + +**Files:** +- Create: `tests/e2e/test_release_flow.py` (or add to existing E2E suite) + +This validates the complete flow: `auths release v0.0.1-test` → verify the tag attestation exists → generate provenance → verify provenance. + +**Step 1: Write the E2E test script** + +```python +"""E2E test: tag-signed release flow.""" +import json +import os +import subprocess +import tempfile + +def test_release_tag_signing(): + """Test that auths release creates a signed tag with attestation ref.""" + # This test runs in a git repo with auths identity initialized. + + # 1. Create a release tag + result = subprocess.run( + ["auths", "release", "v0.0.1-e2e-test", "--no-push", "-m", "E2E test release"], + capture_output=True, text=True + ) + assert result.returncode == 0, f"auths release failed: {result.stderr}" + assert "Signed tag" in result.stdout + assert "refs/auths/tags/v0.0.1-e2e-test" in result.stdout + + # 2. Verify the tag exists + tag_check = subprocess.run( + ["git", "rev-parse", "refs/tags/v0.0.1-e2e-test"], + capture_output=True, text=True + ) + assert tag_check.returncode == 0, "Tag was not created" + + # 3. Verify the attestation ref exists + ref_check = subprocess.run( + ["git", "show", "refs/auths/tags/v0.0.1-e2e-test:attestation.json"], + capture_output=True, text=True + ) + assert ref_check.returncode == 0, "Attestation ref not found" + + attestation = json.loads(ref_check.stdout) + assert "issuer" in attestation + assert "device_signature" in attestation + assert attestation.get("capabilities") == ["sign_release"] + + # 4. Create a fake artifact and generate provenance + with tempfile.NamedTemporaryFile(suffix=".tar.gz", delete=False) as f: + f.write(b"fake artifact content for testing") + artifact_path = f.name + + # The provenance would normally be generated by CI action. + # For this test, verify the artifact verify command can handle v2 format. + import hashlib + digest = hashlib.sha256(b"fake artifact content for testing").hexdigest() + + provenance = { + "version": 2, + "type": "release-provenance", + "tag": "v0.0.1-e2e-test", + "commit": subprocess.run( + ["git", "rev-parse", "HEAD"], capture_output=True, text=True + ).stdout.strip(), + "tag_attestation_ref": "refs/auths/tags/v0.0.1-e2e-test", + "artifact": { + "name": os.path.basename(artifact_path), + "digest": {"algorithm": "sha256", "hex": digest}, + "size": 32, + }, + "builder": { + "platform": "test", + "workflow": "e2e", + "run_id": "1", + }, + } + + provenance_path = f"{artifact_path}.auths.json" + with open(provenance_path, "w") as pf: + json.dump(provenance, pf) + + # 5. Verify the artifact using the provenance + verify_result = subprocess.run( + ["auths", "artifact", "verify", artifact_path, "--json"], + capture_output=True, text=True + ) + + verify_json = json.loads(verify_result.stdout) + assert verify_json["valid"] is True, f"Verification failed: {verify_json}" + assert verify_json["digest_match"] is True + + # Cleanup + os.unlink(artifact_path) + os.unlink(provenance_path) + subprocess.run(["git", "tag", "-d", "v0.0.1-e2e-test"], capture_output=True) + subprocess.run(["git", "update-ref", "-d", "refs/auths/tags/v0.0.1-e2e-test"], capture_output=True) +``` + +**Step 2: Run the E2E test** + +Run: `cd tests/e2e && python3 -m pytest test_release_flow.py -v` +Expected: PASS (requires auths identity to be initialized in test environment) + +**Step 3: Commit** + +```bash +git add tests/e2e/test_release_flow.py +git commit -m "test: add E2E test for tag-signed release flow" +``` + +--- + +## Task 7: Documentation update + +**Files:** +- Modify: `README.md` (add release signing section) +- Modify: `docs/E2E_TEST_CHECKLIST.md` (add release flow checklist) + +**Step 1: Add release section to README** + +Add after the existing signing documentation: + +```markdown +## Release Signing + +Sign releases with your device key — no CI secrets needed: + +```bash +auths release v1.0.0 +``` + +This creates a signed git tag, stores the attestation at `refs/auths/tags/v1.0.0`, +and pushes both to origin. CI builds artifacts and generates provenance files +that chain back to your signed tag. No `AUTHS_CI_TOKEN` required. + +Verify a release artifact: + +```bash +auths artifact verify ./auths-linux-x86_64.tar.gz +# ✅ Valid — signed by did:keri:... via tag v1.0.0 +``` +``` + +**Step 2: Commit** + +```bash +git add README.md docs/E2E_TEST_CHECKLIST.md +git commit -m "docs: add release signing documentation" +``` + +--- + +## Task 8: Deprecate `auths ci setup` for signing (keep for verify-only tokens) + +**Files:** +- Modify: `crates/auths-cli/src/commands/ci/setup.rs` + +**Step 1: Add deprecation warning** + +At the top of `run_setup()`, add: + +```rust + eprintln!("\x1b[1;33mNote:\x1b[0m For release artifact signing, consider using `auths release` instead."); + eprintln!(" `auths release` signs tags with your device key — no CI secrets needed."); + eprintln!(" `auths ci setup` is still useful for commit signing in CI."); + eprintln!(); +``` + +**Step 2: Commit** + +```bash +git add crates/auths-cli/src/commands/ci/setup.rs +git commit -m "chore: add deprecation note to auths ci setup for artifact signing" +``` + +--- + +## Summary + +| Task | What | Effort | +|------|------|--------| +| 1 | `ReleaseProvenance` type in auths-verifier | 1 hour | +| 2 | `auths release` CLI command | 3 hours | +| 3 | v2 provenance verification in artifact verify | 2 hours | +| 4 | `auths-dev/attest@v1` GitHub Action | 2 hours | +| 5 | Update release workflow | 30 min | +| 6 | E2E test | 1 hour | +| 7 | Documentation | 30 min | +| 8 | Deprecation note on ci setup | 15 min | + +**Total: ~10 hours** + +### After implementation, the release flow becomes: + +``` +Maintainer: auths release v1.0.0 # one command, device key, hardware keychain +CI: builds + attests artifacts # zero secrets +Consumer: auths artifact verify ... # checks tag signature → artifact hash +``` + +No `AUTHS_CI_TOKEN`. No secrets in CI. No key material to steal. The signing key never leaves the maintainer's device. diff --git a/justfile b/justfile index d1f4a173..7349875c 100644 --- a/justfile +++ b/justfile @@ -111,8 +111,3 @@ release-github: # Publish all workspace crates to crates.io in dependency order. release-crates: python scripts/releases/2_crates.py --publish - -# One-time setup: create a CI release-signing device and set AUTHS_CI_TOKEN secret. -# Run this once locally — detects forge from git remote and sets the secret automatically. -ci-setup: - auths ci setup diff --git a/mkdocs.yml b/mkdocs.yml index eb439f81..01a294e3 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -517,11 +517,6 @@ nav: - "AUTHS-E6002": errors/AUTHS-E6002.md - "AUTHS-E6003": errors/AUTHS-E6003.md - "AUTHS-E6004": errors/AUTHS-E6004.md - - "AUTHS-E7001": errors/AUTHS-E7001.md - - "AUTHS-E7002": errors/AUTHS-E7002.md - - "AUTHS-E7003": errors/AUTHS-E7003.md - - "AUTHS-E7004": errors/AUTHS-E7004.md - - "AUTHS-E7005": errors/AUTHS-E7005.md - auths-oidc-port: - "AUTHS-E8001": errors/AUTHS-E8001.md - "AUTHS-E8002": errors/AUTHS-E8002.md diff --git a/packages/auths-node/src/org.rs b/packages/auths-node/src/org.rs index 6d9ac612..6ed91a83 100644 --- a/packages/auths-node/src/org.rs +++ b/packages/auths-node/src/org.rs @@ -166,6 +166,7 @@ pub fn create_org( Some(Role::Admin), None, None, // commit_sha + None, ) .map_err(|e| format_error("AUTHS_ORG_ERROR", e))?; diff --git a/packages/auths-python/Cargo.lock b/packages/auths-python/Cargo.lock index ee039b76..a89d5f42 100644 --- a/packages/auths-python/Cargo.lock +++ b/packages/auths-python/Cargo.lock @@ -138,6 +138,7 @@ dependencies = [ "auths-crypto", "auths-keri", "auths-pairing-protocol", + "auths-transparency", "auths-verifier", "base64", "blake3", diff --git a/packages/auths-python/src/org.rs b/packages/auths-python/src/org.rs index 9e4da170..9dce185c 100644 --- a/packages/auths-python/src/org.rs +++ b/packages/auths-python/src/org.rs @@ -150,6 +150,7 @@ pub fn create_org( Some(Role::Admin), None, None, // commit_sha + None, ) .map_err(|e| PyRuntimeError::new_err(format!("[AUTHS_ORG_ERROR] {e}")))?; diff --git a/scripts/auths_workflows/artifact_signing.py b/scripts/auths_workflows/artifact_signing.py index 87a6da38..eb4ea4fe 100644 --- a/scripts/auths_workflows/artifact_signing.py +++ b/scripts/auths_workflows/artifact_signing.py @@ -270,7 +270,7 @@ def main() -> None: print(f"{BOLD}{GREEN} Artifact signing workflow completed successfully!{RESET}") print(f"{BOLD}{GREEN}{'='*60}{RESET}") print(f"\n This confirms the release.yml signing step will work in CI.") - print(f" Make sure AUTHS_CI_TOKEN is set (via 'auths ci setup').\n") + print(f" Artifacts are signed with ephemeral keys. No CI token needed.\n") if __name__ == "__main__": diff --git a/tests/e2e/test_ephemeral_signing.py b/tests/e2e/test_ephemeral_signing.py new file mode 100644 index 00000000..91a8a1ed --- /dev/null +++ b/tests/e2e/test_ephemeral_signing.py @@ -0,0 +1,83 @@ +"""E2E test: ephemeral CI signing → verify pipeline.""" +import hashlib +import json +import os +import subprocess +import tempfile + + +def run(cmd, **kwargs): + """Run a command and return the result.""" + return subprocess.run(cmd, capture_output=True, text=True, **kwargs) + + +def test_ephemeral_sign_and_verify(): + """Test that ephemeral CI signing produces a verifiable attestation.""" + # Get current commit SHA + head = run(["git", "rev-parse", "HEAD"]) + assert head.returncode == 0, f"git rev-parse HEAD failed: {head.stderr}" + commit_sha = head.stdout.strip() + + with tempfile.TemporaryDirectory() as tmpdir: + # Create a test artifact + artifact_path = os.path.join(tmpdir, "test-artifact.tar.gz") + with open(artifact_path, "wb") as f: + f.write(b"ephemeral signing e2e test content") + + # Sign with ephemeral CI key + sign_result = run([ + "cargo", "run", "-p", "auths_cli", "--", + "artifact", "sign", artifact_path, + "--ci", + "--ci-platform", "local", + "--commit", commit_sha, + "--note", "E2E test", + ]) + assert sign_result.returncode == 0, ( + f"Ephemeral sign failed: {sign_result.stderr}\n{sign_result.stdout}" + ) + + # Check .auths.json was created + attestation_path = f"{artifact_path}.auths.json" + assert os.path.exists(attestation_path), f"No .auths.json at {attestation_path}" + + # Parse and validate the attestation + with open(attestation_path) as f: + att = json.load(f) + + assert att["issuer"].startswith("did:key:z"), ( + f"Issuer should be did:key:, got: {att['issuer']}" + ) + assert att["signer_type"] == "Workload", ( + f"signer_type should be Workload, got: {att.get('signer_type')}" + ) + assert att["commit_sha"] == commit_sha, ( + f"commit_sha mismatch: {att.get('commit_sha')} != {commit_sha}" + ) + assert att["capabilities"] == ["sign_release"], ( + f"capabilities should be [sign_release], got: {att.get('capabilities')}" + ) + + # Check payload has artifact metadata + payload = att["payload"] + assert payload["digest"]["algorithm"] == "sha256" + expected_hash = hashlib.sha256(b"ephemeral signing e2e test content").hexdigest() + assert payload["digest"]["hex"] == expected_hash, ( + f"Hash mismatch: {payload['digest']['hex']} != {expected_hash}" + ) + + # Check CI environment in payload + ci_env = payload.get("ci_environment") + assert ci_env is not None, "ci_environment should be in payload" + assert ci_env["platform"] == "local", ( + f"Platform should be local, got: {ci_env.get('platform')}" + ) + + print(f"✓ Ephemeral attestation valid: issuer={att['issuer'][:30]}...") + print(f" Commit: {commit_sha[:8]}") + print(f" Digest: sha256:{expected_hash[:16]}...") + + +if __name__ == "__main__": + test_ephemeral_sign_and_verify() + print("\n✓ All E2E ephemeral signing tests passed")