diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ec5be61..59b6c96 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,6 +17,8 @@ jobs: build-and-test: name: Build & Test (features=${{ matrix.features }}) runs-on: ubuntu-latest + env: + CONFIG_FIXTURES_TOKEN: ${{ secrets.CONFIG_FIXTURES_TOKEN }} strategy: fail-fast: false matrix: @@ -25,6 +27,30 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4 + - name: Checkout shared pipe fixtures repo + id: checkout_shared_fixtures + if: ${{ env.CONFIG_FIXTURES_TOKEN != '' }} + continue-on-error: true + uses: actions/checkout@v4 + with: + repository: trydirect/config + ref: main + token: ${{ env.CONFIG_FIXTURES_TOKEN }} + path: config-fixtures-repo + fetch-depth: 1 + persist-credentials: false + sparse-checkout: | + shared-fixtures + - name: Shared pipe fixtures unavailable + if: ${{ env.CONFIG_FIXTURES_TOKEN == '' || steps.checkout_shared_fixtures.outcome != 'success' }} + run: | + echo "::notice::Shared pipe fixtures are unavailable for this workflow run; shared-fixture tests will be skipped." + - name: Link shared pipe fixtures + if: ${{ env.CONFIG_FIXTURES_TOKEN != '' && steps.checkout_shared_fixtures.outcome == 'success' }} + run: | + rm -rf "${GITHUB_WORKSPACE}/../config" "${GITHUB_WORKSPACE}/../shared-fixtures" + ln -sfn "${GITHUB_WORKSPACE}/config-fixtures-repo/shared-fixtures" "${GITHUB_WORKSPACE}/../shared-fixtures" + test -d "${GITHUB_WORKSPACE}/../shared-fixtures/pipe-contract" - name: Setup Rust toolchain (${{ matrix.rust }}) uses: dtolnay/rust-toolchain@stable diff --git a/.gitignore b/.gitignore index 1f8702f..57cc9e1 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,5 @@ __pycache__ .DS_Store .ai target -.env \ No newline at end of file +.env +.claude \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index c07b7e3..68f2025 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 0.1.8 — 2026-04-21 +### Added +- `status --version` now includes the git short hash (for example `0.1.8 (abc1234)`) so production builds can be identified instantly. + +### Changed +- Docker builds now include the protobuf build inputs required for gRPC client code generation in musl/release images. +- Pipe-contract fixtures remain sourced from `trydirect/config`, while fork PRs and unauthorized CI runs now skip only the shared-fixture tests instead of failing the entire workflow. + ## 0.1.7 — 2026-04-10 ### Security — OWASP Top 10 Hardening @@ -207,4 +215,3 @@ This is a **security release** addressing 6 Critical and 5 High severity finding - Planned: align build and runtime images to avoid glibc drift; keep the musl-based build variant as the default container target. - Planned: update CI to build and test using the production base image so linker/runtime errors are caught early. - Planned: add a container startup smoke check to surface missing runtime dependencies before release. - diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..0062556 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,70 @@ +# Status Panel + +On-server status panel agent. Runs on deployed servers to report health metrics, manage containers, handle self-updates, and provide a WebSocket interface for real-time monitoring. + +## Tech Stack +- **Language**: Rust (2021 edition) +- **Framework**: Axum 0.8 (with WebSocket support) +- **Async**: Tokio (full features) +- **Docker**: Bollard 0.19 (Docker API via Unix socket, optional) +- **HTTP Client**: reqwest 0.12 (rustls-tls) +- **System Metrics**: sysinfo 0.30 +- **Security**: HMAC-SHA256, ring 0.17 +- **Daemonization**: daemonize 0.5 +- **Testing**: assert_cmd, tokio-test, mockito, tower + +## Project Structure +``` +src/ + main.rs # Binary entry point + lib.rs # Library root (core logic) + test_utils.rs # Shared test utilities +tests/ + http_routes.rs # HTTP route integration tests + security_integration.rs # Security/auth integration tests + self_update_integration.rs # Self-update mechanism tests +examples/ + command_execution.rs # Command execution example +``` + +## Features +- `default = ["docker"]` — includes Docker management via Bollard +- `docker` — Docker container management (Unix socket) +- `minimal` — builds without Docker support + +## Commands +```bash +# Build +cargo build + +# Build minimal (without Docker) +cargo build --no-default-features --features minimal + +# Run tests +cargo test + +# Run tests without Docker feature +cargo test --no-default-features --features minimal + +# Format & lint +cargo fmt +cargo clippy -- -D warnings + +# Run +cargo run --bin status +``` + +## Critical Rules +- NEVER expose system commands without HMAC authentication +- NEVER trust incoming WebSocket data without signature verification +- ALWAYS validate self-update binary integrity before replacing +- Docker socket access is privileged — validate all container operations +- System metrics collection must not block the async runtime +- Test both `docker` and `minimal` feature configurations +- Test with `cargo test` after every change +- DO NOT yet add to repo .claude CLAUDE.md .copilot directories and files + +## Agents +- Use `planner` before any feature work +- Use `tester` after every code change (must run cargo test) +- Use `code-reviewer` before commits — focus on security and system safety \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index b72d982..e939bc3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,17 @@ # It is not intended for manual editing. version = 4 +[[package]] +name = "aes" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" +dependencies = [ + "cfg-if", + "cipher", + "cpufeatures", +] + [[package]] name = "aho-corasick" version = "1.1.4" @@ -11,6 +22,54 @@ dependencies = [ "memchr", ] +[[package]] +name = "amq-protocol" +version = "7.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "587d313f3a8b4a40f866cc84b6059fe83133bf172165ac3b583129dd211d8e1c" +dependencies = [ + "amq-protocol-tcp", + "amq-protocol-types", + "amq-protocol-uri", + "cookie-factory", + "nom", + "serde", +] + +[[package]] +name = "amq-protocol-tcp" +version = "7.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc707ab9aa964a85d9fc25908a3fdc486d2e619406883b3105b48bf304a8d606" +dependencies = [ + "amq-protocol-uri", + "tcp-stream", + "tracing", +] + +[[package]] +name = "amq-protocol-types" +version = "7.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf99351d92a161c61ec6ecb213bc7057f5b837dd4e64ba6cb6491358efd770c4" +dependencies = [ + "cookie-factory", + "nom", + "serde", + "serde_json", +] + +[[package]] +name = "amq-protocol-uri" +version = "7.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f89f8273826a676282208e5af38461a07fe939def57396af6ad5997fcf56577d" +dependencies = [ + "amq-protocol-types", + "percent-encoding", + "url", +] + [[package]] name = "android_system_properties" version = "0.1.5" @@ -76,6 +135,45 @@ version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" +[[package]] +name = "asn1-rs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "assert-json-diff" version = "2.0.2" @@ -101,6 +199,127 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "async-channel" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c96bf972d85afc50bf5ab8fe2d54d1586b4e0b46c97c50a0c9e71e2f7bcd812a" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand 2.3.0", + "futures-lite 2.6.1", + "pin-project-lite", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13f937e26114b93193065fd44f507aa2e9169ad0cdabbb996920b1fe1ddea7ba" +dependencies = [ + "async-channel", + "async-executor", + "async-io 2.6.0", + "async-lock 3.4.2", + "blocking", + "futures-lite 2.6.1", +] + +[[package]] +name = "async-global-executor-trait" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9af57045d58eeb1f7060e7025a1631cbc6399e0a1d10ad6735b3d0ea7f8346ce" +dependencies = [ + "async-global-executor", + "async-trait", + "executor-trait", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.28", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" +dependencies = [ + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.6.1", + "parking", + "polling 3.11.0", + "rustix 1.1.2", + "slab", + "windows-sys 0.61.2", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f7f2596bd5b78a9fec8088ccd89180d7f9f55b94b0576823bbbdc72ee8311" +dependencies = [ + "event-listener 5.4.1", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-reactor-trait" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6012d170ad00de56c9ee354aef2e358359deb1ec504254e0e5a3774771de0e" +dependencies = [ + "async-io 1.13.0", + "async-trait", + "futures-core", + "reactor-trait", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -123,6 +342,12 @@ dependencies = [ "syn", ] +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "async-trait" version = "0.1.89" @@ -146,13 +371,40 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core 0.4.5", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit 0.7.3", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + [[package]] name = "axum" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" dependencies = [ - "axum-core", + "axum-core 0.5.5", "base64", "bytes", "form_urlencoded", @@ -163,7 +415,7 @@ dependencies = [ "hyper", "hyper-util", "itoa", - "matchit", + "matchit 0.8.4", "memchr", "mime", "percent-encoding", @@ -176,12 +428,32 @@ dependencies = [ "sync_wrapper", "tokio", "tokio-tungstenite", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", ] +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", +] + [[package]] name = "axum-core" version = "0.5.5" @@ -207,6 +479,18 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64ct" +version = "1.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.10.0" @@ -222,6 +506,28 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-padding" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" +dependencies = [ + "generic-array", +] + +[[package]] +name = "blocking" +version = "1.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83f8d02be6967315521be875afa792a316e28d57b5a2d401897e2a7921b7f21" +dependencies = [ + "async-channel", + "async-task", + "futures-io", + "futures-lite 2.6.1", + "piper", +] + [[package]] name = "bollard" version = "0.19.4" @@ -292,6 +598,15 @@ version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher", +] + [[package]] name = "cc" version = "1.2.49" @@ -350,6 +665,16 @@ dependencies = [ "phf_codegen", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + [[package]] name = "clap" version = "4.5.53" @@ -390,6 +715,18 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +[[package]] +name = "cms" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b77c319abfd5219629c45c34c89ba945ed3c5e49fcde9d16b6c3885f118a730" +dependencies = [ + "const-oid", + "der", + "spki", + "x509-cert", +] + [[package]] name = "colorchoice" version = "1.0.4" @@ -405,6 +742,37 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "cookie-factory" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9885fa71e26b8ab7855e2ec7cae6e9b380edff76cd052e07c683a0319d51b3a2" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -470,6 +838,44 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "der_derive", + "flagset", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "der-parser" +version = "10.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "der_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "deranged" version = "0.5.5" @@ -480,6 +886,15 @@ dependencies = [ "serde_core", ] +[[package]] +name = "des" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdd80ce8ce993de27e9f063a444a4d53ce8e8db4c1f00cc03af5ad5a9867a1e" +dependencies = [ + "cipher", +] + [[package]] name = "deunicode" version = "1.6.2" @@ -514,6 +929,12 @@ dependencies = [ "syn", ] +[[package]] +name = "doc-comment" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "780955b8b195a21ab8e4ac6b60dd1dbdcec1dc6c51c0617964b08c81785e12c9" + [[package]] name = "dotenvy" version = "0.15.7" @@ -548,6 +969,51 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "5.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" +dependencies = [ + "event-listener 5.4.1", + "pin-project-lite", +] + +[[package]] +name = "executor-trait" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c39dff9342e4e0e16ce96be751eb21a94e94a87bb2f6e63ad1961c2ce109bf" +dependencies = [ + "async-trait", +] + +[[package]] +name = "fastrand" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + [[package]] name = "fastrand" version = "2.3.0" @@ -560,6 +1026,29 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + +[[package]] +name = "flagset" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7ac824320a75a52197e8f2d787f6a38b6718bb6897a35142d749af3c0e8f4fe" + +[[package]] +name = "flume" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" +dependencies = [ + "futures-core", + "futures-sink", + "spin", +] + [[package]] name = "fnv" version = "1.0.7" @@ -590,6 +1079,40 @@ version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +[[package]] +name = "futures-io" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" + +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" +dependencies = [ + "fastrand 2.3.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] + [[package]] name = "futures-macro" version = "0.3.31" @@ -684,7 +1207,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bf760ebf69878d9fd8f110c89703d90ce35095324d1f1edcb595c63945ee757" dependencies = [ - "bitflags", + "bitflags 2.10.0", "ignore", "walkdir", ] @@ -726,6 +1249,18 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + [[package]] name = "hex" version = "0.4.3" @@ -851,9 +1386,22 @@ dependencies = [ "rustls", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls", + "tower-service", + "webpki-roots 1.0.4", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", "tower-service", - "webpki-roots", ] [[package]] @@ -874,7 +1422,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.1", "tokio", "tower-service", "tracing", @@ -1060,6 +1608,36 @@ dependencies = [ "serde_core", ] +[[package]] +name = "inout" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" +dependencies = [ + "block-padding", + "generic-array", +] + +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.48.0", +] + [[package]] name = "ipnet" version = "2.11.0" @@ -1082,6 +1660,15 @@ version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.15" @@ -1098,6 +1685,28 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "lapin" +version = "2.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d2aa4725b9607915fa1a73e940710a3be6af508ce700e56897cbe8847fbb07" +dependencies = [ + "amq-protocol", + "async-global-executor-trait", + "async-reactor-trait", + "async-trait", + "executor-trait", + "flume", + "futures-core", + "futures-io", + "parking_lot", + "pinky-swear", + "reactor-trait", + "serde", + "tracing", + "waker-fn", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -1116,6 +1725,12 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +[[package]] +name = "linux-raw-sys" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" + [[package]] name = "linux-raw-sys" version = "0.11.0" @@ -1158,6 +1773,12 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "matchit" version = "0.8.4" @@ -1186,6 +1807,12 @@ dependencies = [ "unicase", ] +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "mio" version = "1.1.1" @@ -1222,18 +1849,34 @@ dependencies = [ "tokio", ] +[[package]] +name = "multimap" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" + [[package]] name = "nix" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags", + "bitflags 2.10.0", "cfg-if", "cfg_aliases", "libc", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "ntapi" version = "0.4.1" @@ -1252,12 +1895,31 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + [[package]] name = "num-conv" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -1267,6 +1929,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "oid-registry" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.21.3" @@ -1279,6 +1950,40 @@ version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "p12-keystore" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cae83056e7cb770211494a0ecf66d9fa7eba7d00977e5bb91f0e925b40b937f" +dependencies = [ + "cbc", + "cms", + "der", + "des", + "hex", + "hmac", + "pkcs12", + "pkcs5", + "rand 0.9.2", + "rc2", + "sha1", + "sha2", + "thiserror", + "x509-parser", +] + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + [[package]] name = "parking_lot" version = "0.12.5" @@ -1311,6 +2016,25 @@ dependencies = [ "regex", ] +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest", + "hmac", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.2" @@ -1360,6 +2084,16 @@ dependencies = [ "sha2", ] +[[package]] +name = "petgraph" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +dependencies = [ + "fixedbitset", + "indexmap 2.12.1", +] + [[package]] name = "phf" version = "0.11.3" @@ -1398,6 +2132,26 @@ dependencies = [ "siphasher", ] +[[package]] +name = "pin-project" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1749c7ed4bcaf4c3d0a3efc28538844fb29bcdd7d2b67b2be7e20ba861ff517" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pin-project-lite" version = "0.2.16" @@ -1410,6 +2164,89 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pinky-swear" +version = "6.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1ea6e230dd3a64d61bcb8b79e597d3ab6b4c94ec7a234ce687dd718b4f2e657" +dependencies = [ + "doc-comment", + "flume", + "parking_lot", + "tracing", +] + +[[package]] +name = "piper" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c835479a4443ded371d6c535cbfd8d31ad92c5d23ae9770a61bc155e4992a3c1" +dependencies = [ + "atomic-waker", + "fastrand 2.3.0", + "futures-io", +] + +[[package]] +name = "pkcs12" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "695b3df3d3cc1015f12d70235e35b6b79befc5fa7a9b95b951eab1dd07c9efc2" +dependencies = [ + "cms", + "const-oid", + "der", + "digest", + "spki", + "x509-cert", + "zeroize", +] + +[[package]] +name = "pkcs5" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e847e2c91a18bfa887dd028ec33f2fe6f25db77db3619024764914affe8b69a6" +dependencies = [ + "aes", + "cbc", + "der", + "pbkdf2", + "scrypt", + "sha2", + "spki", +] + +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi 0.5.2", + "pin-project-lite", + "rustix 1.1.2", + "windows-sys 0.61.2", +] + [[package]] name = "potential_utf" version = "0.1.4" @@ -1446,29 +2283,155 @@ dependencies = [ ] [[package]] -name = "predicates-core" -version = "1.0.9" +name = "predicates-core" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" + +[[package]] +name = "predicates-tree" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +dependencies = [ + "predicates-core", + "termtree", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-build" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" +dependencies = [ + "heck", + "itertools", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost", + "prost-types", + "regex", + "syn", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost", +] + +[[package]] +name = "protoc-bin-vendored" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1c381df33c98266b5f08186583660090a4ffa0889e76c7e9a5e175f645a67fa" +dependencies = [ + "protoc-bin-vendored-linux-aarch_64", + "protoc-bin-vendored-linux-ppcle_64", + "protoc-bin-vendored-linux-s390_64", + "protoc-bin-vendored-linux-x86_32", + "protoc-bin-vendored-linux-x86_64", + "protoc-bin-vendored-macos-aarch_64", + "protoc-bin-vendored-macos-x86_64", + "protoc-bin-vendored-win32", +] + +[[package]] +name = "protoc-bin-vendored-linux-aarch_64" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c350df4d49b5b9e3ca79f7e646fde2377b199e13cfa87320308397e1f37e1a4c" + +[[package]] +name = "protoc-bin-vendored-linux-ppcle_64" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a55a63e6c7244f19b5c6393f025017eb5d793fd5467823a099740a7a4222440c" + +[[package]] +name = "protoc-bin-vendored-linux-s390_64" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dba5565db4288e935d5330a07c264a4ee8e4a5b4a4e6f4e83fad824cc32f3b0" + +[[package]] +name = "protoc-bin-vendored-linux-x86_32" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8854774b24ee28b7868cd71dccaae8e02a2365e67a4a87a6cd11ee6cdbdf9cf5" + +[[package]] +name = "protoc-bin-vendored-linux-x86_64" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b38b07546580df720fa464ce124c4b03630a6fb83e05c336fea2a241df7e5d78" + +[[package]] +name = "protoc-bin-vendored-macos-aarch_64" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" +checksum = "89278a9926ce312e51f1d999fee8825d324d603213344a9a706daa009f1d8092" [[package]] -name = "predicates-tree" -version = "1.0.12" +name = "protoc-bin-vendored-macos-x86_64" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" -dependencies = [ - "predicates-core", - "termtree", -] +checksum = "81745feda7ccfb9471d7a4de888f0652e806d5795b61480605d4943176299756" [[package]] -name = "proc-macro2" -version = "1.0.103" +name = "protoc-bin-vendored-win32" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" -dependencies = [ - "unicode-ident", -] +checksum = "95067976aca6421a523e491fce939a3e65249bac4b977adee0ee9771568e8aa3" [[package]] name = "quinn" @@ -1483,7 +2446,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls", - "socket2", + "socket2 0.6.1", "thiserror", "tokio", "tracing", @@ -1520,7 +2483,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.6.1", "tracing", "windows-sys 0.60.2", ] @@ -1619,13 +2582,33 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rc2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62c64daa8e9438b84aaae55010a93f396f8e60e3911590fcba770d04643fc1dd" +dependencies = [ + "cipher", +] + +[[package]] +name = "reactor-trait" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "438a4293e4d097556730f4711998189416232f009c137389e0f961d2bc0ddc58" +dependencies = [ + "async-trait", + "futures-core", + "futures-io", +] + [[package]] name = "redox_syscall" version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags", + "bitflags 2.10.0", ] [[package]] @@ -1705,14 +2688,14 @@ dependencies = [ "sync_wrapper", "tokio", "tokio-rustls", - "tower", + "tower 0.5.2", "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", + "webpki-roots 1.0.4", ] [[package]] @@ -1754,16 +2737,39 @@ dependencies = [ "semver", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + +[[package]] +name = "rustix" +version = "0.37.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "519165d378b97752ca44bbe15047d5d3409e875f39327546b42ac81d7e18c1b6" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + [[package]] name = "rustix" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags", + "bitflags 2.10.0", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.11.0", "windows-sys 0.61.2", ] @@ -1773,6 +2779,7 @@ version = "0.23.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" dependencies = [ + "log", "once_cell", "ring", "rustls-pki-types", @@ -1781,6 +2788,41 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-connector" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70cc376c6ba1823ae229bacf8ad93c136d93524eab0e4e5e0e4f96b9c4e5b212" +dependencies = [ + "log", + "rustls", + "rustls-native-certs", + "rustls-pki-types", + "rustls-webpki", +] + +[[package]] +name = "rustls-native-certs" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "rustls-pki-types", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "rustls-pki-types" version = "1.13.1" @@ -1814,6 +2856,15 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" +dependencies = [ + "cipher", +] + [[package]] name = "same-file" version = "1.0.6" @@ -1823,6 +2874,15 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "schannel" +version = "0.1.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91c1b7e4904c873ef0710c1f407dde2e6287de2bebc1bbbf7d430bb7cbffd939" +dependencies = [ + "windows-sys 0.61.2", +] + [[package]] name = "schemars" version = "0.9.0" @@ -1853,6 +2913,40 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "scrypt" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f" +dependencies = [ + "pbkdf2", + "salsa20", + "sha2", +] + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.10.0", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "semver" version = "1.0.27" @@ -2047,6 +3141,26 @@ version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +[[package]] +name = "socket2" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "socket2" version = "0.6.1" @@ -2057,6 +3171,25 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -2065,10 +3198,11 @@ checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "status-panel" -version = "0.1.7" +version = "0.1.8" dependencies = [ "anyhow", "assert_cmd", + "axum 0.8.7", "async-trait", "axum", "base64", @@ -2082,8 +3216,12 @@ dependencies = [ "hmac", "http-body-util", "hyper", + "lapin", "mockito", "nix", + "prost", + "prost-types", + "protoc-bin-vendored", "rand 0.8.5", "regex", "reqwest", @@ -2101,7 +3239,10 @@ dependencies = [ "thiserror", "tokio", "tokio-test", - "tower", + "tokio-tungstenite", + "tonic", + "tonic-build", + "tower 0.5.2", "tower-http", "tracing", "tracing-subscriber", @@ -2167,16 +3308,28 @@ dependencies = [ "windows", ] +[[package]] +name = "tcp-stream" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "495b0abdce3dc1f8fd27240651c9e68890c14e9d9c61527b1ce44d8a5a7bd3d5" +dependencies = [ + "cfg-if", + "p12-keystore", + "rustls-connector", + "rustls-pemfile", +] + [[package]] name = "tempfile" version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ - "fastrand", + "fastrand 2.3.0", "getrandom 0.3.4", "once_cell", - "rustix", + "rustix 1.1.2", "windows-sys 0.61.2", ] @@ -2305,7 +3458,7 @@ dependencies = [ "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.1", "tokio-macros", "windows-sys 0.61.2", ] @@ -2363,8 +3516,12 @@ checksum = "d25a406cddcc431a75d3d9afc6a7c0f7428d4891dd973e4d54c56b46127bf857" dependencies = [ "futures-util", "log", + "rustls", + "rustls-pki-types", "tokio", + "tokio-rustls", "tungstenite", + "webpki-roots 0.26.11", ] [[package]] @@ -2380,6 +3537,72 @@ dependencies = [ "tokio", ] +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum 0.7.9", + "base64", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost", + "rustls-pemfile", + "socket2 0.5.10", + "tokio", + "tokio-rustls", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-build" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9557ce109ea773b399c9b9e5dca39294110b74f1f342cb347a80d1fce8c26a11" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "prost-types", + "quote", + "syn", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower" version = "0.5.2" @@ -2402,7 +3625,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags", + "bitflags 2.10.0", "bytes", "futures-core", "futures-util", @@ -2418,7 +3641,7 @@ dependencies = [ "pin-project-lite", "tokio", "tokio-util", - "tower", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -2529,6 +3752,8 @@ dependencies = [ "httparse", "log", "rand 0.9.2", + "rustls", + "rustls-pki-types", "sha1", "thiserror", "utf-8", @@ -2638,6 +3863,12 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" + [[package]] name = "walkdir" version = "2.5.0" @@ -2750,6 +3981,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.4", +] + [[package]] name = "webpki-roots" version = "1.0.4" @@ -2868,6 +4108,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + [[package]] name = "windows-sys" version = "0.52.0" @@ -2895,6 +4144,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -2928,6 +4192,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -2940,6 +4210,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" @@ -2952,6 +4228,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -2976,6 +4258,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + [[package]] name = "windows_i686_msvc" version = "0.52.6" @@ -2988,6 +4276,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" @@ -3000,6 +4294,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" @@ -3012,6 +4312,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" @@ -3036,6 +4342,34 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" +[[package]] +name = "x509-cert" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1301e935010a701ae5f8655edc0ad17c44bad3ac5ce8c39185f75453b720ae94" +dependencies = [ + "const-oid", + "der", + "spki", +] + +[[package]] +name = "x509-parser" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4569f339c0c402346d4a75a9e39cf8dad310e287eef1ff56d4c68e5067f53460" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror", + "time", +] + [[package]] name = "yoke" version = "0.8.1" diff --git a/Cargo.toml b/Cargo.toml index b21dd1f..c43fc6e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "status-panel" -version = "0.1.7" +version = "0.1.8" edition = "2021" [features] @@ -28,6 +28,11 @@ uuid = { version = "1", features = ["v4"] } chrono = { version = "0.4", features = ["serde"] } serde_yaml = "0.9" futures-util = "0.3" +tokio-tungstenite = { version = "0.28", features = ["rustls-tls-webpki-roots"] } +tonic = { version = "0.12", features = ["tls"] } +prost = "0.13" +prost-types = "0.13" +lapin = "2" tera = "1" tower-http = { version = "0.6", features = ["fs"] } base64 = "0.22" @@ -56,6 +61,10 @@ nix = { version = "0.29", features = ["signal"] } name = "status" path = "src/main.rs" +[build-dependencies] +tonic-build = "0.12" +protoc-bin-vendored = "3" + [dev-dependencies] assert_cmd = "2.0" tokio-test = "0.4" diff --git a/Dockerfile b/Dockerfile index 7481a78..f020fa4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,6 +2,8 @@ FROM clux/muslrust:stable AS builder WORKDIR /app COPY Cargo.toml Cargo.lock* ./ +COPY build.rs build.rs +COPY proto proto COPY src src COPY templates templates COPY static static @@ -44,4 +46,4 @@ ENV MODE="serve-ui" # CMD ["/usr/local/bin/status", "serve", "--port", "5000", "--with-ui"] ENTRYPOINT ["/usr/local/bin/status"] -CMD ["serve", "--port", "5000", "--with-ui"] \ No newline at end of file +CMD ["serve", "--port", "5000", "--with-ui"] diff --git a/Dockerfile.compose-agent b/Dockerfile.compose-agent index 2e0e329..69aec62 100644 --- a/Dockerfile.compose-agent +++ b/Dockerfile.compose-agent @@ -2,6 +2,8 @@ FROM clux/muslrust:stable AS builder WORKDIR /app COPY Cargo.toml Cargo.lock* ./ +COPY build.rs build.rs +COPY proto proto COPY src src COPY templates templates COPY static static diff --git a/Dockerfile.prod b/Dockerfile.prod index 3f9ee9c..fd567d1 100644 --- a/Dockerfile.prod +++ b/Dockerfile.prod @@ -1,7 +1,10 @@ +# syntax=docker/dockerfile:1.4 FROM clux/muslrust:stable AS builder WORKDIR /app COPY Cargo.toml Cargo.lock* ./ +COPY build.rs build.rs +COPY proto proto COPY src src COPY templates templates COPY static static @@ -21,4 +24,4 @@ ENV RUST_LOG=info EXPOSE 5000 USER 0 ENTRYPOINT ["/status"] -CMD ["serve", "--port", "5000", "--with-ui"] \ No newline at end of file +CMD ["serve", "--port", "5000", "--with-ui"] diff --git a/README.md b/README.md index 3b37314..5c59e88 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ curl -sSfL https://raw.githubusercontent.com/trydirect/status/master/install.sh Pin a specific version or choose a custom directory: ```bash -VERSION=v0.1.7 curl -sSfL https://raw.githubusercontent.com/trydirect/status/master/install.sh | sh +VERSION=v0.1.8 curl -sSfL https://raw.githubusercontent.com/trydirect/status/master/install.sh | sh INSTALL_DIR=~/.local/bin curl -sSfL https://raw.githubusercontent.com/trydirect/status/master/install.sh | sh ``` @@ -111,6 +111,15 @@ status serve --port 5000 # JSON API only status serve --port 5000 --with-ui # API + web dashboard ``` +## Command Transport Split + +Status Panel uses **two different command transport paths**: + +1. **Normal Status Panel commands** use the dashboard DB queue plus HTTP long-polling. The agent waits on `/api/v1/agent/commands/wait/{deployment_hash}`, executes the command locally, then reports back to `/api/v1/agent/commands/report`. +2. **Agent-executor pipe steps** are a separate path. AMQP/RabbitMQ support belongs to that executor flow, not to the normal Status Panel command queue. + +This means RabbitMQ is **not** the transport for regular `health`, `logs`, `deploy_app`, or other Status Panel commands. Pipe operations such as `activate_pipe`, `deactivate_pipe`, and `trigger_pipe` run inside the agent runtime, but the normal command delivery path is still DB queue + long-polling. + ## Build from Source ```bash @@ -151,6 +160,8 @@ Or use Docker Compose with the included `docker-compose.yml` for a full setup wi | `POST` | `/api/v1/commands/enqueue` | Enqueue a command | | `POST` | `/api/v1/commands/report` | Report execution result | +The local `/api/v1/commands/*` endpoints are the agent's own Axum API surface. When connected to the remote dashboard, the daemon uses the `/api/v1/agent/commands/*` contract instead. AMQP-backed executor traffic is separate from both of these HTTP command paths. + ### Self-Update | Method | Path | Description | @@ -179,6 +190,7 @@ The agent accepts signed commands from the Stacker dashboard covering the full l | `config_diff` | Detect configuration drift | | `configure_proxy` | Nginx proxy management | | `configure_firewall` | iptables policy management | +| `activate_pipe` / `deactivate_pipe` / `trigger_pipe` | Agent-side pipe registration and runtime execution | ## Security diff --git a/TODO.md b/TODO.md index 14726ae..d4767c1 100644 --- a/TODO.md +++ b/TODO.md @@ -3,13 +3,13 @@ ## Marketplace Integration: Agent Registration & Local Deploy ### Agent Self-Registration (for curl one-liner and manual install entry points) -- [ ] **`POST /api/v1/register`** (local endpoint on Status Panel) — Triggered after install.sh completes +- [x] **`POST /api/v1/register`** (local endpoint on Status Panel) — Triggered after install.sh completes - Accept `{ purchase_token, stack_id }` from install script - Collect server fingerprint (hostname, IP, OS, CPU, RAM, disk) - Call Stacker Server: `POST /api/v1/agents/register { purchase_token, server_fingerprint, stack_id }` - Store returned `agent_id`, `deployment_hash`, `dashboard_url` locally - Begin heartbeat loop to Stacker Server -- [ ] **Local `stacker deploy` trigger** — After registration, Status Panel invokes Stacker CLI locally +- [x] **Local `stacker deploy` trigger** — After registration, Status Panel invokes Stacker CLI locally - `stacker deploy --from /opt/stacker/stacks/{stack_id}/` (the downloaded archive) - Monitor deploy progress, report status back to Stacker Server via existing agent report endpoint - No Install Service involved — fully local execution @@ -17,31 +17,63 @@ ### Dashboard Linking (optional, user-initiated) - [x] Provide web UI page at `http://localhost:{STATUS_PORT}/link` to connect Status Panel to TryDirect dashboard - [x] Support unlinking from dashboard (agent continues to work standalone) -- [ ] **Login-based linking flow (Entry Point C):** +- [x] **Login-based linking flow (Entry Point C):** - User logs in with TryDirect email + password from Status Panel UI - Status Panel calls Stacker: `POST /api/v1/agent/login { email, password }` → returns `session_token` + user's deployments - User selects a deployment from the list → Status Panel calls Stacker: `POST /api/v1/agent/link { session_token, deployment_id, server_fingerprint, capabilities }` - Stacker validates session, checks user owns the deployment, issues `agent_id` + `agent_token` - No purchase_token needed — user's identity is the trust anchor - `purchase_token` flow retained only for headless Entry Point B (curl one-liner) -- [ ] Add "Use Standalone" option for users without TryDirect account (skip linking entirely) +- [x] Add "Use Standalone" option for users without TryDirect account (skip linking entirely) ### Standalone Status Panel Entry Point (Phase 2) -- [ ] **"Deploy a Stack" page** in Status Panel web UI +- [x] **"Deploy a Stack" page** in Status Panel web UI - Browse available stacks from marketplace API: `GET /api/v1/marketplace/stacks` - User selects stack → Status Panel downloads archive + calls `stacker deploy` locally - This enables Entry Point C: user installs Status Panel first, then deploys stacks from its UI ### Notifications Relay -- [ ] Forward marketplace notifications (stack published, update available) from Stacker Server to Status Panel UI -- [ ] Show "Update Available" badge when a newer version of the deployed stack exists +- [x] Forward marketplace notifications (stack published, update available) from Stacker Server to Status Panel UI +- [x] Show "Update Available" badge when a newer version of the deployed stack exists --- -- Align build and runtime images so the compiled `status` binary links against the same glibc version (or older) as production. -- Add a musl-based build target and image variant to provide a statically linked binary that avoids glibc drift. +- ~~Align build and runtime images so the compiled `status` binary links against the same glibc version (or older) as production.~~ ✅ Done — Dockerfiles use `clux/muslrust:stable` → `gcr.io/distroless/cc`, musl avoids glibc drift. +- ~~Add a musl-based build target and image variant to provide a statically linked binary that avoids glibc drift.~~ ✅ Done — CI builds `x86_64-unknown-linux-musl` target, releases musl binary. - Update CI to build/test using the production base image to prevent future GLIBC_x.y.z mismatches. - Add a simple container start-up check that surfaces linker/runtime errors early in the pipeline. +## Missing Features Implementation Plan (2026-04) + +### Phase 1 - Reliability and Production Readiness +- [x] **[status-auth-refresh]** Refresh agent auth immediately on 401/403 and retry polling/report calls with backoff. + - Wire the retry path into the polling loop instead of waiting for the periodic refresh task. + - Define the Vault path/role contract for `status_panel_token` and document failure handling. +- [x] **[status-alerting]** Add outbound alert delivery for unhealthy containers, command failures, and host-level incidents. + - Webhook delivery with env-configured thresholds (`ALERT_WEBHOOK_URL`, CPU/memory/disk thresholds). + - Includes alert deduplication, severity escalation, and recovery notifications. +- [x] **[status-command-provenance]** Surface which control plane executed each action (`status_panel` vs `compose_agent`). + - Expose provenance in command reports, health metrics, and `/capabilities`-driven diagnostics. + - Publish and implement the separate token/cache schema for `compose_agent_token`. +- [ ] **[status-ssl-renewal]** Automate SSL certificate renewal for hosts that enable HTTPS. + - Add renewal scheduling, renewal result logging, and certificate reload without manual intervention. + +### Phase 2 - Data Safety and Day-2 Operations +- [ ] **[status-volume-backups]** Add scheduled backup and restore support for Docker volumes. + - Support policy-driven backups for stateful services, retention, restore validation, and signed metadata. + - Reuse existing backup/security primitives where possible instead of introducing a separate backup path. + +### Phase 3 - Standalone and Dashboard UX +- [x] **[status-login-linking]** Complete the login-based dashboard linking flow and standalone mode. + - Finish the UI + daemon wiring for email/password linking to an owned deployment. + - Add "Use Standalone" so the panel is usable without a TryDirect account. +- [x] **[status-deploy-stack-ui]** Build the local "Deploy a Stack" flow in Status Panel. + - Browse marketplace stacks, download the selected archive, and trigger local `stacker deploy`. + - Show deployment progress, update availability, and compatibility checks in the local UI. + +### Cross-Project Coordination +- [ ] Coordinate `status-deploy-stack-ui` with Stacker marketplace archive/download validation. +- [ ] Coordinate `status-command-provenance` and future pipe execution with the Stacker control-plane roadmap. + ## Status Panel Agent Commands (Pull Model) **Key principle**: Agent polls Stacker; Stacker never pushes to the agent. Agent is responsible for adding HMAC headers on its outbound calls. @@ -51,14 +83,14 @@ - [x] Restart: restart container by app_code, then emit updated state in report payload; include errors array on failure. - [x] Reporting: call Stacker `POST /api/v1/agent/commands/report` with HMAC headers (`X-Agent-Id`, `X-Timestamp`, `X-Request-Id`, `X-Agent-Signature`) signed using Vault token. - [x] Wire agent to poll loop: `GET /api/v1/agent/commands/wait/{deployment_hash}` with HMAC headers. -- [ ] On 401/403, refresh token from Vault and retry with backoff (which Vault path/role should we use for the agent token?). +- [x] On 401/403, refresh token from Vault and retry with backoff (TokenProvider with Vault → env fallback, 10s cooldown). - [x] Ensure agent generates HMAC signature for every outbound request (wait + report + app status); no secrets expected from Stacker side. ## Compose Agent Sidecar - [x] Ship a separate `compose-agent` container (Docker Compose + MCP Gateway) deployed alongside the Status Panel container; Service file should ensure it mounts the Docker socket while Status Panel does not. - [x] Implement watchdog to restart only the compose container on failure/glibc mismatch without touching the Status Panel daemon; prove via integration test. -- [ ] Expose health metrics indicating which control plane executed each command (`status_panel` vs `compose_agent`) so ops can track rollout and fallbacks. -- [ ] Publish Vault secret schema: `secret/agent/{hash}/status_panel_token` and `secret/agent/{hash}/compose_agent_token`; refresh + cache them independently. +- [x] Expose health metrics indicating which control plane executed each command (`status_panel` vs `compose_agent`) so ops can track rollout and fallbacks. +- [x] Publish Vault secret schema: `secret/agent/{hash}/status_panel_token` and `secret/agent/{hash}/compose_agent_token`; refresh + cache them independently. - [x] Add config flag to disable compose agent (legacy mode) and emit warning log so Blog receives `compose_agent=false` via `/capabilities`. ## Kata Containers Support (Stacker Server) diff --git a/build.rs b/build.rs new file mode 100644 index 0000000..b14b34a --- /dev/null +++ b/build.rs @@ -0,0 +1,78 @@ +use std::path::{Path, PathBuf}; +use std::process::Command; + +fn main() -> Result<(), Box> { + emit_display_version(); + println!("cargo:rerun-if-changed=proto/pipe.proto"); + // Vendor protoc so builds work without a system-installed protoc + let _protoc = protoc_bin_vendored::protoc_bin_path().expect("vendored protoc not found"); + std::env::set_var("PROTOC", &_protoc); + tonic_build::configure() + .build_server(false) + .build_client(true) + .compile_protos(&["proto/pipe.proto"], &["proto"])?; + Ok(()) +} + +fn emit_display_version() { + let cargo_version = std::env::var("CARGO_PKG_VERSION").unwrap_or_else(|_| "0.0.0".to_string()); + let display_version = match git_short_hash() { + Some(hash) => format!("{cargo_version} ({hash})"), + None => cargo_version, + }; + println!("cargo:rustc-env=STATUS_DISPLAY_VERSION={display_version}"); + + if let Some(git_dir) = git_dir() { + emit_git_rerun_paths(&git_dir); + } +} + +fn git_short_hash() -> Option { + git_output(&["rev-parse", "--short=7", "HEAD"]) +} + +fn git_dir() -> Option { + let manifest_dir = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").ok()?); + let git_dir = git_output(&["rev-parse", "--git-dir"])?; + let path = PathBuf::from(git_dir); + Some(if path.is_absolute() { + path + } else { + manifest_dir.join(path) + }) +} + +fn emit_git_rerun_paths(git_dir: &Path) { + let head_path = git_dir.join("HEAD"); + println!("cargo:rerun-if-changed={}", head_path.display()); + + let packed_refs = git_dir.join("packed-refs"); + println!("cargo:rerun-if-changed={}", packed_refs.display()); + + if let Ok(head_contents) = std::fs::read_to_string(&head_path) { + if let Some(reference) = head_contents.strip_prefix("ref: ") { + let ref_path = git_dir.join(reference.trim()); + println!("cargo:rerun-if-changed={}", ref_path.display()); + } + } +} + +fn git_output(args: &[&str]) -> Option { + let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").ok()?; + let output = Command::new("git") + .args(args) + .current_dir(manifest_dir) + .output() + .ok()?; + if !output.status.success() { + return None; + } + + let value = String::from_utf8(output.stdout).ok()?; + let trimmed = value.trim(); + if trimmed.is_empty() { + None + } else { + Some(trimmed.to_string()) + } +} diff --git a/docs/AGENT_ROTATION_GUIDE.md b/docs/AGENT_ROTATION_GUIDE.md index 95b5373..4da38dc 100644 --- a/docs/AGENT_ROTATION_GUIDE.md +++ b/docs/AGENT_ROTATION_GUIDE.md @@ -143,3 +143,131 @@ spawn(refresh_loop(vault.clone(), deployment_hash.clone(), cache.clone())); - Action: check request headers, clock skew, and signature; ensure using current token - Symptoms: Vault errors - Action: verify `VAULT_ADDRESS`, `VAULT_TOKEN`, network connectivity, and KV path prefix + +--- + +## Auth Refresh on 401/403 — Implementation Details + +### Problem + +When the agent token expires or is rotated server-side, all outbound requests +(polling, reporting, notifications) receive 401/403 from Stacker. Previously +these were treated as generic errors with fixed backoff, causing prolonged +downtime until manual restart. + +### Solution: `TokenProvider` + Retry Helpers + +Two new modules handle automatic recovery: + +| Module | Path | Purpose | +|--------|------|---------| +| `TokenProvider` | `src/security/token_provider.rs` | Shared mutable token with on-demand refresh | +| `RetryClient` | `src/transport/retry.rs` | HTTP helpers that detect 401/403 and retry | + +### Request Flow + +``` +Daemon / Notification Poller + │ + ▼ +┌───────────────────┐ +│ TokenProvider │ .get() → current token +│ .get() │ +└────────┬──────────┘ + ▼ +┌───────────────────┐ +│ Build signed │ build_signed_headers(agent_id, token, body) +│ HMAC headers │ → Bearer + X-Agent-Signature + X-Timestamp +└────────┬──────────┘ + ▼ +┌───────────────────┐ +│ Send HTTP │ signed_get_with_retry / signed_post_with_retry +│ request │ +└────────┬──────────┘ + ▼ +┌────── Status code? ──────┐ +│ │ │ +200/204 401/403 5xx / network error +│ │ │ +✅ Done ▼ ▼ + ┌──────────────┐ Exponential backoff + │ TokenProvider │ 2s → 4s → 8s → … 60s cap + │ .refresh() │ retry up to 3× + └──────┬───────┘ + │ + ├─ 1. Try Vault: + │ vault_client.fetch_agent_token(deployment_hash) + │ + ├─ 2. If Vault fails or returns same token: + │ re-read AGENT_TOKEN from environment + │ + ├─ 3. Cooldown: 10s between refresh attempts + │ (prevents hammering Vault on repeated failures) + │ + ▼ + Retry request once with new token + │ + ┌────┴────┐ + 200 401 again + │ │ + ✅ Done Propagate error + (token truly invalid) +``` + +### TokenProvider API + +```rust +use crate::security::token_provider::TokenProvider; + +// Create (both daemon and serve mode) +let tp = TokenProvider::new(initial_token, Some(vault_client), deployment_hash); +// or +let tp = TokenProvider::from_env(Some(vault_client)); + +tp.get().await // → current token (Arc>) +tp.refresh().await // → Ok(true) if token changed, Ok(false) if unchanged +tp.swap(new).await // → direct swap (used by background rotation task) +``` + +### Wired Consumers + +| Consumer | File | Mechanism | +|----------|------|-----------| +| Daemon polling (`wait_for_command`) | `src/agent/daemon.rs` | `wait_for_command_with_retry` (auth-only retry) | +| Daemon reporting (`report_result`) | `src/agent/daemon.rs` | `report_result_with_retry` (full retry) | +| Daemon app status | `src/agent/daemon.rs` | `update_app_status_with_retry` (full retry) | +| Notification poller | `src/comms/notifications.rs` | Explicit 401/403 check → `refresh()` → 5s backoff | + +### RetryConfig Presets + +```rust +use crate::transport::retry::RetryConfig; + +RetryConfig::default() // 1 auth retry + 3 server retries (2–60s backoff) +RetryConfig::auth_only() // 1 auth retry + 0 server retries (for long-poll) +``` + +### Refresh Strategy + +1. **Vault first** — If `VaultClient` is configured, call + `fetch_agent_token(deployment_hash)`. If it returns a different token, + swap it in and retry. +2. **Environment fallback** — If Vault is unavailable or returns the same + token, re-read `AGENT_TOKEN` from the process environment. This covers + cases where an orchestrator (Docker, systemd) injects a new token via + env without restarting the process. +3. **Cooldown** — A 10-second minimum gap between refresh attempts prevents + hammering Vault during cascading failures. +4. **Single retry** — After refreshing, the request is retried exactly once. + If it still gets 401/403, the error propagates (the token is truly invalid + and requires operator intervention). + +### Environment Variables + +| Variable | Default | Purpose | +|----------|---------|---------| +| `AGENT_TOKEN` | _(empty)_ | Bearer token for Stacker API auth | +| `DEPLOYMENT_HASH` | `"default"` | Vault path isolation key | +| `VAULT_ADDRESS` | _(none)_ | Vault server URL (enables Vault refresh) | +| `VAULT_TOKEN` | _(none)_ | Vault auth token | +| `VAULT_AGENT_PATH_PREFIX` | `"status_panel"` | Vault KV path prefix | diff --git a/docs/COMPOSE_AGENT_SIDECAR.md b/docs/COMPOSE_AGENT_SIDECAR.md index 8af7d13..0da0cf4 100644 --- a/docs/COMPOSE_AGENT_SIDECAR.md +++ b/docs/COMPOSE_AGENT_SIDECAR.md @@ -35,6 +35,14 @@ The Compose Agent Sidecar is a separate container that handles Docker Compose op 5. **Watchdog Monitoring**: Automatic health checks and restart logic for the compose-agent container. +### Command Transport Boundary + +The sidecar split does **not** change the main Status Panel command transport: + +- **Normal dashboard commands** still move through the Status Panel DB queue and the agent's HTTP long-poll loop (`/api/v1/agent/commands/wait/{deployment_hash}` → execute locally → `/api/v1/agent/commands/report`). +- **AMQP/RabbitMQ** belongs to the separate agent-executor pipe-step path, not to the normal Status Panel command queue. +- Pipe commands such as `activate_pipe`, `deactivate_pipe`, and `trigger_pipe` now execute inside the agent runtime, but their command delivery is still initiated through the regular dashboard command path unless a separate executor flow is used. + ## Configuration ### Docker Compose Setup diff --git a/docs/LONG_POLLING_QUICKSTART.md b/docs/LONG_POLLING_QUICKSTART.md index 10ee51a..24f943b 100644 --- a/docs/LONG_POLLING_QUICKSTART.md +++ b/docs/LONG_POLLING_QUICKSTART.md @@ -13,6 +13,8 @@ COMMAND_TIMEOUT_SECS=300 ``` > **Note:** The commands in this quick start target the agent's local Axum API (`/api/v1/commands/*`). When the agent polls the remote Stacker dashboard it calls the `/api/v1/agent/commands/*` endpoints and sends `Authorization: Bearer $AGENT_TOKEN` (for example `/api/v1/agent/commands/wait/{deployment_hash}`). +> +> **Transport split:** regular Status Panel commands still use the DB queue + HTTP long-polling path. AMQP/RabbitMQ belongs to the separate agent-executor pipe-step flow and is not the transport used for normal `health`, `logs`, `deploy_app`, or other dashboard commands. ## 2️⃣ Start the Agent diff --git a/proto/pipe.proto b/proto/pipe.proto new file mode 100644 index 0000000..bd068bc --- /dev/null +++ b/proto/pipe.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; + +package pipe; + +import "google/protobuf/struct.proto"; + +service PipeService { + // Send data to a pipe target (unary) + rpc Send(PipeMessage) returns (PipeResponse); + + // Subscribe to a pipe source (server-streaming) + rpc Subscribe(SubscribeRequest) returns (stream PipeMessage); +} + +message PipeMessage { + string pipe_instance_id = 1; + string step_id = 2; + google.protobuf.Struct payload = 3; + int64 timestamp_ms = 4; +} + +message PipeResponse { + bool success = 1; + string message = 2; +} + +message SubscribeRequest { + string pipe_instance_id = 1; + string step_id = 2; + map filters = 3; +} diff --git a/src/agent/daemon.rs b/src/agent/daemon.rs index c492906..da4e930 100644 --- a/src/agent/daemon.rs +++ b/src/agent/daemon.rs @@ -12,8 +12,12 @@ use crate::agent::config::Config; use crate::commands::executor::CommandExecutor; use crate::commands::firewall::FirewallPolicy; use crate::commands::validator::CommandValidator; -use crate::commands::TimeoutStrategy; -use crate::monitoring::{spawn_heartbeat, MetricsCollector, MetricsSnapshot, MetricsStore}; +use crate::commands::{default_pipe_runtime_state_path, PipeRuntime, TimeoutStrategy}; +use crate::monitoring::{ + spawn_heartbeat, ControlPlane, MetricsCollector, MetricsSnapshot, MetricsStore, +}; +use crate::security::token_provider::TokenProvider; +use crate::security::vault_client::VaultClient; use crate::transport::{http_polling, CommandResult}; use serde_json::{json, Value}; @@ -28,12 +32,14 @@ pub async fn run(config_path: String) -> Result<()> { .or(Some(cfg.compose_agent_enabled)) .unwrap_or(false); - let control_plane = std::env::var("CONTROL_PLANE") - .ok() - .or(cfg.control_plane.clone()) - .unwrap_or_else(|| "status_panel".to_string()); + let control_plane = ControlPlane::from_value( + std::env::var("CONTROL_PLANE") + .ok() + .as_deref() + .or(cfg.control_plane.as_deref()), + ); - if !compose_agent_enabled && control_plane == "status_panel" { + if !compose_agent_enabled && control_plane == ControlPlane::StatusPanel { warn!("compose_agent=false - running in legacy mode (Status Panel handles all operations)"); } else if compose_agent_enabled { info!("compose_agent=true - compose-agent sidecar handling Docker operations"); @@ -51,7 +57,25 @@ pub async fn run(config_path: String) -> Result<()> { .map(Duration::from_secs) .unwrap_or(Duration::from_secs(10)); - let heartbeat_handle = spawn_heartbeat(collector, store, metrics_interval, tx, webhook.clone()); + let alert_manager = { + let cfg = crate::monitoring::alerting::AlertConfig::from_env(); + let mgr = crate::monitoring::alerting::AlertManager::new(cfg); + if mgr.is_enabled() { + info!("outbound alerting enabled"); + Some(std::sync::Arc::new(mgr)) + } else { + None + } + }; + + let heartbeat_handle = spawn_heartbeat( + collector, + store, + metrics_interval, + tx, + webhook.clone(), + alert_manager, + ); info!( interval_secs = metrics_interval.as_secs(), webhook = webhook.as_deref().unwrap_or("none"), @@ -81,6 +105,10 @@ pub async fn run(config_path: String) -> Result<()> { warn!("AGENT_TOKEN is not set; authenticated dashboard requests will fail"); } + // Build a shared token provider (Vault → env fallback on 401/403) + let vault_client = VaultClient::from_env().ok().flatten(); + let token_provider = TokenProvider::new(agent_token, vault_client, deployment_hash.clone()); + info!( dashboard_url = %dashboard_url, agent_id = %agent_id, @@ -94,15 +122,31 @@ pub async fn run(config_path: String) -> Result<()> { // Build firewall policy from config (no API port in daemon mode) let firewall_policy = FirewallPolicy::from_config(&cfg, None); + let pipe_runtime = PipeRuntime::new(); + pipe_runtime + .configure_persistence(default_pipe_runtime_state_path(Some(&config_path))) + .await; + match pipe_runtime.restore_from_disk().await { + Ok(restored) if restored > 0 => { + info!(restored, "restored persisted pipe runtime registrations"); + } + Ok(_) => {} + Err(error) => { + warn!(error = %error, "failed to restore persisted pipe runtime registrations"); + } + } + let ctx = PollingContext { dashboard_url, deployment_hash, agent_id, - agent_token, + token_provider, polling_timeout, polling_backoff, command_timeout, firewall_policy, + control_plane, + pipe_runtime, }; // Spawn the long-polling loop @@ -126,11 +170,13 @@ struct PollingContext { dashboard_url: String, deployment_hash: String, agent_id: String, - agent_token: String, + token_provider: TokenProvider, polling_timeout: u64, polling_backoff: u64, command_timeout: u64, firewall_policy: FirewallPolicy, + control_plane: ControlPlane, + pipe_runtime: PipeRuntime, } /// Long-polling loop: continuously waits for commands and executes them @@ -138,11 +184,11 @@ async fn polling_loop(ctx: PollingContext) { let executor = CommandExecutor::new(); loop { - match http_polling::wait_for_command( + match http_polling::wait_for_command_with_retry( &ctx.dashboard_url, &ctx.deployment_hash, &ctx.agent_id, - &ctx.agent_token, + &ctx.token_provider, ctx.polling_timeout, None, ) @@ -201,7 +247,14 @@ async fn execute_and_report( command_type = %cmd.name, "executing stacker command" ); - match execute_stacker_command(&cmd, &stacker_cmd, &ctx.firewall_policy).await { + match execute_stacker_command( + &cmd, + &stacker_cmd, + &ctx.firewall_policy, + &ctx.pipe_runtime, + ) + .await + { Ok(result) => result, Err(e) => { error!(command_id = %cmd.command_id, error = %e, "stacker command execution failed"); @@ -211,6 +264,7 @@ async fn execute_and_report( result: None, error: Some(e.to_string()), completed_at: Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true), + executed_by: Some(ctx.control_plane.to_string()), ..CommandResult::default() } } @@ -232,6 +286,7 @@ async fn execute_and_report( result: None, error: Some(format!("Command validation failed: {}", e)), completed_at: Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true), + executed_by: Some(ctx.control_plane.to_string()), ..CommandResult::default() } } else { @@ -254,6 +309,7 @@ async fn execute_and_report( })), error: None, completed_at: Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true), + executed_by: Some(ctx.control_plane.to_string()), ..CommandResult::default() }, Err(e) => CommandResult { @@ -262,6 +318,7 @@ async fn execute_and_report( result: None, error: Some(e.to_string()), completed_at: Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true), + executed_by: Some(ctx.control_plane.to_string()), ..CommandResult::default() }, } @@ -276,6 +333,7 @@ async fn execute_and_report( result: None, error: Some(format!("Invalid command parameters: {}", e)), completed_at: Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true), + executed_by: Some(ctx.control_plane.to_string()), ..CommandResult::default() } } @@ -287,16 +345,17 @@ async fn execute_and_report( status = %cmd_result.status, "reporting command result to stacker" ); - http_polling::report_result( + http_polling::report_result_with_retry( &ctx.dashboard_url, &ctx.agent_id, - &ctx.agent_token, + &ctx.token_provider, &cmd_result.command_id, &ctx.deployment_hash, &cmd_result.status, &cmd_result.result, &cmd_result.error, &cmd_result.completed_at, + cmd_result.executed_by.as_deref(), ) .await?; info!( @@ -305,10 +364,10 @@ async fn execute_and_report( ); if let Some(app_status) = build_app_status_update(&cmd_result) { - if let Err(e) = http_polling::update_app_status( + if let Err(e) = http_polling::update_app_status_with_retry( &ctx.dashboard_url, &ctx.agent_id, - &ctx.agent_token, + &ctx.token_provider, &app_status, ) .await diff --git a/src/commands/mod.rs b/src/commands/mod.rs index c34147c..972f14d 100644 --- a/src/commands/mod.rs +++ b/src/commands/mod.rs @@ -16,7 +16,10 @@ pub use deploy::{ pub use docker_executor::execute_docker_operation; pub use docker_ops::DockerOperation; pub use self_update::{get_update_status, start_update_job, UpdateJobs, UpdatePhase, UpdateStatus}; -pub use stacker::{execute_stacker_command, parse_stacker_command, StackerCommand}; +pub use stacker::{ + default_pipe_runtime_state_path, execute_stacker_command, parse_stacker_command, PipeRuntime, + StackerCommand, +}; pub use timeout::{TimeoutPhase, TimeoutStrategy, TimeoutTracker}; pub use validator::{CommandValidator, ValidatorConfig}; pub use version_check::check_remote_version; diff --git a/src/commands/stacker.rs b/src/commands/stacker.rs index 586586c..78cf579 100644 --- a/src/commands/stacker.rs +++ b/src/commands/stacker.rs @@ -1,18 +1,31 @@ use anyhow::{bail, Context, Result}; -#[cfg(feature = "docker")] use chrono::{SecondsFormat, Utc}; +use futures_util::StreamExt; +use lapin::{ + options::{BasicAckOptions, BasicConsumeOptions, QueueBindOptions}, + types::FieldTable, + Connection, ConnectionProperties, +}; #[cfg(feature = "docker")] use regex::Regex; use serde::Deserialize; use serde::Serialize; -#[cfg(any(feature = "docker", test))] use serde_json::json; -#[cfg(any(feature = "docker", test))] use serde_json::Value; +use std::collections::HashMap; #[cfg(feature = "docker")] -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; +#[cfg(unix)] +use std::os::unix::fs::PermissionsExt; +use std::path::PathBuf; +use std::sync::Arc; #[cfg(feature = "docker")] use std::sync::OnceLock; +use tokio::io::AsyncWriteExt; +use tokio::sync::RwLock; +use tokio::task::AbortHandle; +use tokio::time::Duration; +use tracing::{debug, info, warn}; #[cfg(feature = "docker")] use crate::transport::CommandError; @@ -37,6 +50,546 @@ pub enum ContainerRuntime { Kata, } +#[cfg(all(test, feature = "docker"))] +mod trigger_pipe_handler_tests { + use super::*; + use mockito::{Matcher, Server}; + + fn make_trigger_agent_command() -> AgentCommand { + AgentCommand { + id: "cmd-trigger".into(), + command_id: "cmd-trigger".into(), + name: "trigger_pipe".into(), + params: json!({}), + deployment_hash: Some("dep-123".into()), + app_code: None, + } + } + + #[tokio::test] + async fn handle_trigger_pipe_posts_mapped_payload_to_external_target() { + let mut server = Server::new_async().await; + let mock = server + .mock("POST", "/webhook/pipe") + .match_body(Matcher::Exact(r#"{"email":"dev@try.direct"}"#.into())) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(r#"{"accepted":true}"#) + .create_async() + .await; + + let agent_cmd = make_trigger_agent_command(); + let pipe_runtime = PipeRuntime::new(); + let data = TriggerPipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "11111111-1111-1111-1111-111111111111".into(), + input_data: Some(json!({ "user": { "email": "dev@try.direct" } })), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + target_url: Some(server.url()), + target_container: None, + target_endpoint: "/webhook/pipe".into(), + target_method: "POST".into(), + field_mapping: Some(json!({ "email": "$.user.email" })), + trigger_type: "manual".into(), + }; + + let result = handle_trigger_pipe(&agent_cmd, &data, &pipe_runtime) + .await + .expect("trigger_pipe should execute"); + + mock.assert_async().await; + assert_eq!(result.status, "success"); + assert!(result.error.is_none()); + + let body = result.result.expect("trigger_pipe result body"); + assert_eq!(body["success"], true); + assert_eq!(body["mapped_data"], json!({ "email": "dev@try.direct" })); + assert_eq!(body["target_response"]["transport"], "http"); + assert_eq!(body["target_response"]["status"], 200); + assert_eq!(body["target_response"]["delivered"], true); + assert_eq!(body["target_response"]["body"], json!({ "accepted": true })); + assert_eq!(body["lifecycle"], Value::Null); + } + + #[tokio::test] + async fn handle_trigger_pipe_requires_external_target_url() { + let agent_cmd = make_trigger_agent_command(); + let pipe_runtime = PipeRuntime::new(); + let data = TriggerPipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "11111111-1111-1111-1111-111111111111".into(), + input_data: Some(json!({ "user": { "email": "dev@try.direct" } })), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + target_url: None, + target_container: None, + target_endpoint: "/webhook/pipe".into(), + target_method: "POST".into(), + field_mapping: Some(json!({ "email": "$.user.email" })), + trigger_type: "manual".into(), + }; + + let result = handle_trigger_pipe(&agent_cmd, &data, &pipe_runtime) + .await + .expect("trigger_pipe should return structured failure"); + + assert_eq!(result.status, "failed"); + assert_eq!( + result.error.as_deref(), + Some("trigger_pipe requires target_url or target_container") + ); + } + + #[test] + fn build_trigger_pipe_container_command_posts_json_payload() { + let command = build_trigger_pipe_container_command( + "/webhook/pipe", + "POST", + &json!({ "email": "dev@try.direct", "name": "O'Reilly" }), + ); + + assert!(command.contains("curl -sS -X POST")); + assert!(command.contains("http://127.0.0.1/webhook/pipe")); + assert!(command.contains("\"email\":\"dev@try.direct\"")); + assert!(!command.contains("\"name\":\"O'Reilly\"")); + assert!(command.contains("Reilly")); + assert!(command.contains("%{http_code}")); + } + + #[test] + fn build_trigger_pipe_source_command_fetches_json_payload() { + let command = build_trigger_pipe_source_command("/source/data", "get"); + + assert!(command.contains("curl -sS -X GET")); + assert!(command.contains("http://127.0.0.1/source/data")); + assert!(command.contains("%{http_code}")); + } + + #[test] + fn build_trigger_pipe_container_command_normalizes_invalid_method() { + let command = + build_trigger_pipe_container_command("/webhook/pipe", "POST; rm -rf /", &json!({})); + + assert!(command.contains("curl -sS -X POST ")); + assert!(!command.contains("rm -rf")); + } + + #[test] + fn normalize_trigger_pipe_method_falls_back_to_default() { + assert_eq!(normalize_trigger_pipe_method(" patch ", "POST"), "PATCH"); + assert_eq!( + normalize_trigger_pipe_method("POST;echo nope", "POST"), + "POST" + ); + assert_eq!(normalize_trigger_pipe_method("", "GET"), "GET"); + } + + #[test] + fn given_http_target_response_when_delivery_succeeds_then_report_includes_transport_and_delivery_status( + ) { + let response = + build_trigger_pipe_target_response("http", Some(202), json!({"accepted": true})); + + assert_eq!(response["transport"], "http"); + assert_eq!(response["status"], 202); + assert_eq!(response["delivered"], true); + assert_eq!(response["body"], json!({"accepted": true})); + } + + #[test] + fn given_target_send_error_when_delivery_fails_then_report_preserves_transport_context() { + let response = build_trigger_pipe_target_response("websocket", None, Value::Null); + + assert_eq!(response["transport"], "websocket"); + assert_eq!(response["status"], Value::Null); + assert_eq!(response["delivered"], false); + assert_eq!(response["body"], Value::Null); + } + + #[test] + fn pipe_source_worker_kind_recognizes_rabbitmq_sources() { + assert_eq!( + pipe_source_worker_kind("rabbitmq"), + Some(PipeSourceWorkerKind::Amqp) + ); + assert_eq!( + pipe_source_worker_kind("amqp"), + Some(PipeSourceWorkerKind::Amqp) + ); + } + + #[tokio::test] + async fn handle_trigger_pipe_requires_input_or_source_details() { + let agent_cmd = make_trigger_agent_command(); + let pipe_runtime = PipeRuntime::new(); + let data = TriggerPipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "11111111-1111-1111-1111-111111111111".into(), + input_data: None, + source_container: None, + source_endpoint: "/source/data".into(), + source_method: "GET".into(), + target_url: None, + target_container: Some("target-app".into()), + target_endpoint: "/webhook/pipe".into(), + target_method: "POST".into(), + field_mapping: Some(json!({ "email": "$.user.email" })), + trigger_type: "manual".into(), + }; + + let result = handle_trigger_pipe(&agent_cmd, &data, &pipe_runtime) + .await + .expect("trigger_pipe should return structured failure"); + + assert_eq!(result.status, "failed"); + assert_eq!( + result.error.as_deref(), + Some("trigger_pipe requires input_data or source_container") + ); + } + + #[tokio::test] + async fn handle_trigger_pipe_routes_ws_target() { + // Use a port that is not listening so the WS connection fails fast + let agent_cmd = make_trigger_agent_command(); + let pipe_runtime = PipeRuntime::new(); + let data = TriggerPipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "pipe-ws-1".into(), + input_data: Some(json!({ "key": "value" })), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + target_url: Some("ws://127.0.0.1:19999".into()), + target_container: None, + target_endpoint: "/ws-target".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "manual".into(), + }; + + let result = handle_trigger_pipe(&agent_cmd, &data, &pipe_runtime) + .await + .expect("trigger_pipe should return structured result"); + + // WS connection will fail (nothing listening), so we expect a failed status + assert_eq!(result.status, "failed"); + let error = result.error.as_deref().unwrap_or(""); + assert!( + error.contains("WebSocket") || error.contains("Connection refused"), + "expected WS connection error, got: {error}" + ); + let body = result.result.expect("trigger_pipe failure result body"); + assert_eq!(body["target_response"]["transport"], "websocket"); + assert_eq!(body["target_response"]["delivered"], false); + assert_eq!(body["target_response"]["status"], Value::Null); + } + + #[tokio::test] + async fn handle_trigger_pipe_routes_grpc_target() { + let agent_cmd = make_trigger_agent_command(); + let pipe_runtime = PipeRuntime::new(); + let data = TriggerPipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "pipe-grpc-1".into(), + input_data: Some(json!({ "key": "value" })), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + target_url: Some("grpc://127.0.0.1:19998".into()), + target_container: None, + target_endpoint: "/grpc-target".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "manual".into(), + }; + + let result = handle_trigger_pipe(&agent_cmd, &data, &pipe_runtime) + .await + .expect("trigger_pipe should return structured result"); + + // gRPC connection will fail (nothing listening), so we expect a failed status + assert_eq!(result.status, "failed"); + let error = result.error.as_deref().unwrap_or(""); + assert!( + error.contains("gRPC") || error.contains("connection") || error.contains("connect"), + "expected gRPC connection error, got: {error}" + ); + let body = result.result.expect("trigger_pipe failure result body"); + assert_eq!(body["target_response"]["transport"], "grpc"); + assert_eq!(body["target_response"]["delivered"], false); + assert_eq!(body["target_response"]["status"], Value::Null); + } + + #[tokio::test] + async fn handle_trigger_pipe_routes_grpcs_target() { + let agent_cmd = make_trigger_agent_command(); + let pipe_runtime = PipeRuntime::new(); + let data = TriggerPipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "pipe-grpcs-1".into(), + input_data: Some(json!({ "key": "value" })), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + target_url: Some("grpcs://127.0.0.1:19997".into()), + target_container: None, + target_endpoint: "/".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "manual".into(), + }; + + let result = handle_trigger_pipe(&agent_cmd, &data, &pipe_runtime) + .await + .expect("trigger_pipe should return structured result"); + + // grpcs:// should be routed to gRPC transport (fails on connect) + assert_eq!(result.status, "failed"); + let error = result.error.as_deref().unwrap_or(""); + assert!( + error.contains("gRPC") || error.contains("connection") || error.contains("connect"), + "expected gRPC connection error for grpcs://, got: {error}" + ); + let body = result.result.expect("trigger_pipe failure result body"); + assert_eq!(body["target_response"]["transport"], "grpc"); + assert_eq!(body["target_response"]["delivered"], false); + assert_eq!(body["target_response"]["status"], Value::Null); + } + + #[tokio::test] + async fn handle_trigger_pipe_grpc_rejects_empty_pipe_instance_id() { + let agent_cmd = make_trigger_agent_command(); + let pipe_runtime = PipeRuntime::new(); + let data = TriggerPipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "".into(), + input_data: Some(json!({ "key": "value" })), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + target_url: Some("grpc://127.0.0.1:19996".into()), + target_container: None, + target_endpoint: "/".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "manual".into(), + }; + + let result = handle_trigger_pipe(&agent_cmd, &data, &pipe_runtime) + .await + .expect("trigger_pipe should return structured result"); + + assert_eq!(result.status, "failed"); + let error = result.error.as_deref().unwrap_or(""); + assert!( + error.contains("non-empty"), + "expected empty step_id error, got: {error}" + ); + } + + #[tokio::test] + async fn handle_activate_and_trigger_pipe_uses_registered_runtime_config() { + let mut server = Server::new_async().await; + let mock = server + .mock("POST", "/runtime/pipe") + .match_body(Matcher::Exact(r#"{"email":"runtime@try.direct"}"#.into())) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(r#"{"accepted":true}"#) + .create_async() + .await; + + let agent_cmd = make_trigger_agent_command(); + let pipe_runtime = PipeRuntime::new(); + let activate = ActivatePipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "pipe-runtime-1".into(), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + source_broker_url: None, + source_queue: None, + source_exchange: None, + source_routing_key: None, + target_url: Some(server.url()), + target_container: None, + target_endpoint: "/runtime/pipe".into(), + target_method: "POST".into(), + field_mapping: Some(json!({ "email": "$.user.email" })), + trigger_type: "manual".into(), + }; + + let activate_result = handle_activate_pipe(&agent_cmd, &activate, &pipe_runtime) + .await + .expect("activate_pipe should succeed"); + assert_eq!(activate_result.status, "success"); + assert_eq!( + activate_result + .result + .as_ref() + .and_then(|body| body.get("lifecycle")) + .and_then(|body| body.get("state")), + Some(&json!("active")) + ); + + let trigger = TriggerPipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "pipe-runtime-1".into(), + input_data: Some(json!({ "user": { "email": "runtime@try.direct" } })), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + target_url: None, + target_container: None, + target_endpoint: "/".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "manual".into(), + }; + + let trigger_result = handle_trigger_pipe(&agent_cmd, &trigger, &pipe_runtime) + .await + .expect("trigger_pipe should use registered target"); + + mock.assert_async().await; + assert_eq!(trigger_result.status, "success"); + let stored = pipe_runtime.snapshot("dep-123", "pipe-runtime-1").await; + assert_eq!( + stored.and_then(|snapshot| snapshot.last_triggered_at), + trigger_result + .result + .as_ref() + .and_then(|body| body.get("triggered_at")) + .and_then(Value::as_str) + .map(str::to_string) + ); + assert_eq!( + trigger_result + .result + .as_ref() + .and_then(|body| body.get("lifecycle")) + .and_then(|body| body.get("trigger_count")), + Some(&json!(1)) + ); + } + + #[tokio::test] + async fn handle_deactivate_pipe_is_idempotent() { + let agent_cmd = make_trigger_agent_command(); + let pipe_runtime = PipeRuntime::new(); + let activate = ActivatePipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "pipe-runtime-2".into(), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + source_broker_url: None, + source_queue: None, + source_exchange: None, + source_routing_key: None, + target_url: Some("https://example.com".into()), + target_container: None, + target_endpoint: "/runtime/pipe".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "manual".into(), + }; + let deactivate = DeactivatePipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "pipe-runtime-2".into(), + }; + + handle_activate_pipe(&agent_cmd, &activate, &pipe_runtime) + .await + .expect("activate_pipe should succeed"); + let first = handle_deactivate_pipe(&agent_cmd, &deactivate, &pipe_runtime) + .await + .expect("deactivate_pipe should succeed"); + let second = handle_deactivate_pipe(&agent_cmd, &deactivate, &pipe_runtime) + .await + .expect("deactivate_pipe should stay idempotent"); + + assert_eq!(first.status, "success"); + assert_eq!(second.status, "success"); + assert_eq!( + first.result.as_ref().and_then(|body| body.get("removed")), + Some(&json!(true)) + ); + assert_eq!( + second.result.as_ref().and_then(|body| body.get("removed")), + Some(&json!(false)) + ); + assert_eq!( + second + .result + .as_ref() + .and_then(|body| body.get("lifecycle")) + .and_then(|body| body.get("state")), + Some(&json!("inactive")) + ); + } + + #[tokio::test] + async fn trigger_pipe_failure_updates_lifecycle_state() { + let agent_cmd = make_trigger_agent_command(); + let pipe_runtime = PipeRuntime::new(); + let activate = ActivatePipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "pipe-runtime-3".into(), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + source_broker_url: None, + source_queue: None, + source_exchange: None, + source_routing_key: None, + target_url: Some("ws://127.0.0.1:19995".into()), + target_container: None, + target_endpoint: "/runtime/pipe".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "manual".into(), + }; + + handle_activate_pipe(&agent_cmd, &activate, &pipe_runtime) + .await + .expect("activate_pipe should succeed"); + + let trigger = TriggerPipeCommand { + deployment_hash: "dep-123".into(), + pipe_instance_id: "pipe-runtime-3".into(), + input_data: Some(json!({ "user": { "email": "runtime@try.direct" } })), + source_container: None, + source_endpoint: "/".into(), + source_method: "GET".into(), + target_url: None, + target_container: None, + target_endpoint: "/".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "manual".into(), + }; + + let trigger_result = handle_trigger_pipe(&agent_cmd, &trigger, &pipe_runtime) + .await + .expect("trigger_pipe should return structured failure"); + + assert_eq!(trigger_result.status, "failed"); + assert_eq!( + trigger_result + .result + .as_ref() + .and_then(|body| body.get("lifecycle")) + .and_then(|body| body.get("state")), + Some(&json!("failed")) + ); + } +} + impl std::fmt::Display for ContainerRuntime { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -138,6 +691,9 @@ pub enum StackerCommand { ListContainers(ListContainersCommand), ConfigureFirewall(ConfigureFirewallCommand), ProbeEndpoints(ProbeEndpointsCommand), + ActivatePipe(ActivatePipeCommand), + DeactivatePipe(DeactivatePipeCommand), + TriggerPipe(TriggerPipeCommand), } #[cfg_attr(not(feature = "docker"), allow(dead_code))] @@ -224,364 +780,977 @@ pub struct ErrorSummaryCommand { redact: bool, } -/// Command to fetch app configuration from Vault -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct FetchConfigCommand { - #[serde(default)] - deployment_hash: String, - #[serde(default)] - app_code: String, - /// If true, also write the config to the destination path - #[serde(default)] - apply: bool, -} - -/// Command to apply configuration from Vault to the filesystem and restart container #[cfg_attr(not(feature = "docker"), allow(dead_code))] #[derive(Debug, Clone, Deserialize)] -pub struct ApplyConfigCommand { +pub struct ActivatePipeCommand { #[serde(default)] deployment_hash: String, + pipe_instance_id: String, #[serde(default)] - app_code: String, - /// Optional: override the config content (instead of fetching from Vault) - #[serde(default)] - config_content: Option, - /// Optional: override the destination path - #[serde(default)] - destination_path: Option, - /// Whether to restart the container after applying config - #[serde(default = "default_true")] - restart_after: bool, -} - -/// Command to deploy a new app container via docker compose -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct DeployAppCommand { - #[serde(default)] - deployment_hash: String, + source_container: Option, + #[serde(default = "default_pipe_source_endpoint")] + source_endpoint: String, + #[serde(default = "default_pipe_source_method")] + source_method: String, #[serde(default)] - app_code: String, - /// Optional: docker-compose.yml content (generated from J2 template) - /// If provided, will be written to disk before deploying + source_broker_url: Option, #[serde(default)] - compose_content: Option, - /// Optional: specific image to use (overrides compose file) + source_queue: Option, #[serde(default)] - image: Option, - /// Optional: environment variables to set + source_exchange: Option, #[serde(default)] - env_vars: Option>, - /// Whether to pull the image before starting (default: true) - #[serde(default = "default_true")] - pull: bool, - /// Whether to remove existing container before deploying + source_routing_key: Option, #[serde(default)] - force_recreate: bool, - /// Optional: config files to write before deploying (uses existing AppConfig struct) + target_url: Option, #[serde(default)] - config_files: Option>, - /// Container runtime to use: "runc" (default) or "kata" for microVM isolation + target_container: Option, + #[serde(default = "default_pipe_target_endpoint")] + target_endpoint: String, + #[serde(default = "default_pipe_target_method")] + target_method: String, #[serde(default)] - runtime: Option, + field_mapping: Option, + #[serde(default = "default_activate_pipe_trigger_type")] + trigger_type: String, } -/// Command to remove an app container and associated config #[cfg_attr(not(feature = "docker"), allow(dead_code))] #[derive(Debug, Clone, Deserialize)] -pub struct RemoveAppCommand { +pub struct DeactivatePipeCommand { #[serde(default)] deployment_hash: String, - #[serde(default)] - app_code: String, - #[serde(default = "default_true")] - delete_config: bool, - #[serde(default)] - remove_volumes: bool, - #[serde(default)] - remove_image: bool, + pipe_instance_id: String, } -/// Command to fetch all app configurations from Vault for a deployment #[cfg_attr(not(feature = "docker"), allow(dead_code))] #[derive(Debug, Clone, Deserialize)] -pub struct FetchAllConfigsCommand { +pub struct TriggerPipeCommand { #[serde(default)] deployment_hash: String, - /// Optional: specific app codes to fetch (if empty, fetches all) - #[serde(default)] - app_codes: Vec, - /// Whether to apply configs to disk after fetching + pipe_instance_id: String, #[serde(default)] - apply: bool, - /// Whether to create a ZIP archive of all configs - #[serde(default)] - archive: bool, -} - -/// Command to fetch configs and deploy an app in one operation -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct DeployWithConfigsCommand { + input_data: Option, #[serde(default)] - deployment_hash: String, + source_container: Option, + #[serde(default = "default_pipe_source_endpoint")] + source_endpoint: String, + #[serde(default = "default_pipe_source_method")] + source_method: String, #[serde(default)] - app_code: String, - /// Whether to pull the image before starting - #[serde(default = "default_true")] - pull: bool, - /// Whether to force recreate the container + target_url: Option, #[serde(default)] - force_recreate: bool, - /// Whether to apply all project configs before deploying - #[serde(default = "default_true")] - apply_configs: bool, - /// Container runtime to use: "runc" (default) or "kata" for microVM isolation + target_container: Option, + #[serde(default = "default_pipe_target_endpoint")] + target_endpoint: String, + #[serde(default = "default_pipe_target_method")] + target_method: String, #[serde(default)] - runtime: Option, + field_mapping: Option, + #[serde(default = "default_pipe_trigger_type")] + trigger_type: String, } -/// Command to detect configuration drift between Vault and deployed files -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct ConfigDiffCommand { - #[serde(default)] - deployment_hash: String, - /// Optional: specific app codes to check (if empty, checks all) - #[serde(default)] - app_codes: Vec, - /// Whether to include full diff content in response - #[serde(default)] - include_diff: bool, +fn default_pipe_source_endpoint() -> String { + "/".to_string() } -/// Command to configure nginx proxy manager for an app -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct ConfigureProxyCommand { - #[serde(default)] - deployment_hash: String, - #[serde(default)] - app_code: String, - /// Domain name(s) to proxy (e.g., ["komodo.example.com"]) - #[serde(default)] - domain_names: Vec, - /// Container/service name to forward to (defaults to app_code) - #[serde(default)] - forward_host: Option, - /// Port on the container to forward to - forward_port: u16, - /// Enable SSL with Let's Encrypt - #[serde(default = "default_true")] - ssl_enabled: bool, - /// Force HTTPS redirect - #[serde(default = "default_true")] - ssl_forced: bool, - /// HTTP/2 support - #[serde(default = "default_true")] - http2_support: bool, - /// Action: "create", "update", "delete" - #[serde(default = "default_create_action")] - action: String, - /// NPM admin credentials (optional, can use defaults from config) - #[serde(default)] - npm_host: Option, - #[serde(default)] - npm_email: Option, - #[serde(default)] - npm_password: Option, +fn default_pipe_source_method() -> String { + "GET".to_string() } -/// Command to execute a shell command inside a running container -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct ExecCommand { - #[serde(default)] - deployment_hash: String, - #[serde(default)] - app_code: String, - #[serde(default)] - container: Option, - /// The command to execute inside the container - command: String, - /// Timeout in seconds (default: 30, max: 120) - #[serde(default = "default_exec_timeout")] - timeout: u32, - /// Whether to redact sensitive data from output - #[serde(default = "default_true")] - redact_output: bool, +fn default_pipe_target_endpoint() -> String { + "/".to_string() } -/// Command to get server resource metrics (CPU, RAM, disk) -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct ServerResourcesCommand { - #[serde(default)] - deployment_hash: String, - /// Include disk metrics - #[serde(default = "default_true")] - include_disk: bool, - /// Include network metrics - #[serde(default = "default_true")] - include_network: bool, +fn default_pipe_target_method() -> String { + "POST".to_string() } -/// Command to list all containers in the deployment -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct ListContainersCommand { - #[serde(default)] - deployment_hash: String, - /// Include container health metrics - #[serde(default = "default_true")] - include_health: bool, - /// Include container logs (last N lines) - #[serde(default)] - include_logs: bool, - /// Number of log lines to include if include_logs is true - #[serde(default = "default_logs_tail")] - log_lines: usize, - /// Optional container mapping for grouping by app_code - #[serde(default)] - app_container_map: Vec, +fn normalize_trigger_pipe_method(method: &str, default_method: &str) -> String { + let normalized = trimmed(method).to_ascii_uppercase(); + match normalized.as_str() { + "GET" | "POST" | "PUT" | "PATCH" | "DELETE" | "HEAD" | "OPTIONS" => normalized, + _ => default_method.to_string(), + } } -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct ContainerMapEntry { - container_name_pattern: String, - container_role: String, - #[serde(default)] - maps_to_app_code: Option, - #[serde(default)] - display_name: Option, +fn default_activate_pipe_trigger_type() -> String { + "webhook".to_string() } -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct AppContainerMap { - app_code: String, - #[serde(default)] - container_map: Vec, +fn default_pipe_trigger_type() -> String { + "manual".to_string() } -/// Command to probe a containerized app for connectable API endpoints -#[cfg_attr(not(feature = "docker"), allow(dead_code))] -#[derive(Debug, Clone, Deserialize)] -pub struct ProbeEndpointsCommand { - #[serde(default)] +pub fn default_pipe_runtime_state_path(config_path: Option<&str>) -> Option { + if let Ok(path) = std::env::var("PIPE_RUNTIME_STATE_PATH") { + let trimmed = path.trim(); + if trimmed.is_empty() { + return None; + } + return Some(PathBuf::from(trimmed)); + } + + if let Some(config_path) = config_path { + let config_path = PathBuf::from(config_path); + let base_dir = config_path + .parent() + .filter(|path| !path.as_os_str().is_empty()) + .map(|path| path.to_path_buf()) + .or_else(|| std::env::current_dir().ok()); + return base_dir.map(|dir| dir.join(".status").join("pipe-runtime-state.json")); + } + + std::env::current_dir() + .ok() + .map(|dir| dir.join(".status").join("pipe-runtime-state.json")) +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +struct PipeRuntimeKey { deployment_hash: String, - #[serde(default)] - app_code: String, - #[serde(default)] - container: Option, - /// Protocols to probe: "openapi", "html_forms", "graphql", "mcp", "rest" - #[serde(default = "default_probe_protocols")] - protocols: Vec, - /// Timeout per probe request in seconds - #[serde(default = "default_probe_timeout")] - probe_timeout: u32, + pipe_instance_id: String, } -fn default_probe_protocols() -> Vec { - vec!["openapi".to_string(), "rest".to_string()] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +enum PipeLifecycleState { + Active, + Inactive, + Failed, } -fn default_probe_timeout() -> u32 { - 5 +#[derive(Debug, Clone, Serialize, Deserialize)] +struct PipeLifecycleSnapshot { + state: PipeLifecycleState, + activated_at: String, + #[serde(skip_serializing_if = "Option::is_none")] + deactivated_at: Option, + #[serde(skip_serializing_if = "Option::is_none")] + last_triggered_at: Option, + #[serde(skip_serializing_if = "Option::is_none")] + last_error: Option, + trigger_count: u64, + last_updated_at: String, } -fn default_exec_timeout() -> u32 { - 30 +#[derive(Debug, Clone, Serialize, Deserialize)] +struct PipeRegistration { + source_container: Option, + source_endpoint: String, + source_method: String, + source_broker_url: Option, + source_queue: Option, + source_exchange: Option, + source_routing_key: Option, + target_url: Option, + target_container: Option, + target_endpoint: String, + target_method: String, + field_mapping: Option, + trigger_type: String, + lifecycle: PipeLifecycleSnapshot, } -fn default_logs_tail() -> usize { - 10 +#[derive(Debug, Clone, Default)] +pub struct PipeRuntime { + registrations: Arc>>, + lifecycle: Arc>>, + workers: Arc>>, + state_path: Arc>>, } -fn default_create_action() -> String { - "create".to_string() +#[derive(Debug, Clone, Serialize, Deserialize)] +struct PersistedPipeEntry { + deployment_hash: String, + pipe_instance_id: String, + registration: PipeRegistration, } -pub fn parse_stacker_command(cmd: &AgentCommand) -> Result> { - let normalized = cmd.name.trim().to_lowercase(); - match normalized.as_str() { - "health" | "stacker.health" => { - let payload: HealthCommand = serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid health payload")?; - let payload = payload.normalize().with_command_context(cmd); - payload.validate()?; - Ok(Some(StackerCommand::Health(payload))) +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +struct PersistedPipeRuntime { + entries: Vec, +} + +#[derive(Debug, Clone)] +struct ActivationResult { + replaced: bool, + registration: PipeRegistration, + previous_lifecycle: Option, +} + +#[derive(Debug, Clone)] +struct DeactivationResult { + removed: bool, + lifecycle: PipeLifecycleSnapshot, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum PipeSourceWorkerKind { + Poll, + Websocket, + Grpc, + Amqp, +} + +impl PipeRuntime { + pub fn new() -> Self { + Self::default() + } + + pub async fn configure_persistence(&self, path: Option) { + let mut state_path = self.state_path.write().await; + *state_path = path; + } + + pub async fn restore_from_disk(&self) -> Result { + let Some(path) = self.state_path.read().await.clone() else { + return Ok(0); + }; + if !path.exists() { + return Ok(0); } - "logs" | "stacker.logs" => { - let payload: LogsCommand = serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid logs payload")?; - let payload = payload.normalize().with_command_context(cmd); - payload.validate()?; - Ok(Some(StackerCommand::Logs(payload))) + + let body = tokio::fs::read_to_string(&path) + .await + .with_context(|| format!("reading pipe runtime state from {}", path.display()))?; + if body.trim().is_empty() { + return Ok(0); } - "restart" | "stacker.restart" => { - let payload: RestartCommand = serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid restart payload")?; - let payload = payload.normalize().with_command_context(cmd); - payload.validate()?; - Ok(Some(StackerCommand::Restart(payload))) + + let persisted: PersistedPipeRuntime = serde_json::from_str(&body) + .with_context(|| format!("parsing pipe runtime state from {}", path.display()))?; + + { + let mut registrations = self.registrations.write().await; + let mut lifecycle = self.lifecycle.write().await; + registrations.clear(); + lifecycle.clear(); + for entry in &persisted.entries { + let key = PipeRuntimeKey { + deployment_hash: entry.deployment_hash.clone(), + pipe_instance_id: entry.pipe_instance_id.clone(), + }; + registrations.insert(key.clone(), entry.registration.clone()); + lifecycle.insert(key, entry.registration.lifecycle.clone()); + } } - "stop" | "stacker.stop" => { - let payload: StopCommand = serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid stop payload")?; - let payload = payload.normalize().with_command_context(cmd); - payload.validate()?; - Ok(Some(StackerCommand::Stop(payload))) + + for entry in &persisted.entries { + self.spawn_source_worker_if_needed( + &entry.deployment_hash, + &entry.pipe_instance_id, + entry.registration.clone(), + ) + .await; } - "start" | "stacker.start" => { - let payload: StartCommand = serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid start payload")?; - let payload = payload.normalize().with_command_context(cmd); - payload.validate()?; - Ok(Some(StackerCommand::Start(payload))) + + Ok(persisted.entries.len()) + } + + async fn persist_active_registrations(&self) -> Result<()> { + let Some(path) = self.state_path.read().await.clone() else { + return Ok(()); + }; + + let registrations = self.registrations.read().await; + let persisted = PersistedPipeRuntime { + entries: registrations + .iter() + .map(|(key, registration)| PersistedPipeEntry { + deployment_hash: key.deployment_hash.clone(), + pipe_instance_id: key.pipe_instance_id.clone(), + registration: redact_persisted_registration(registration), + }) + .collect(), + }; + drop(registrations); + + if let Some(parent) = path.parent() { + tokio::fs::create_dir_all(parent) + .await + .with_context(|| format!("creating pipe runtime state dir {}", parent.display()))?; } - "error_summary" | "stacker.error_summary" => { - let payload: ErrorSummaryCommand = serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid error_summary payload")?; - let payload = payload.normalize().with_command_context(cmd); - payload.validate()?; - Ok(Some(StackerCommand::ErrorSummary(payload))) + + let body = serde_json::to_vec_pretty(&persisted).context("serializing pipe runtime")?; + let mut options = tokio::fs::OpenOptions::new(); + options.create(true).write(true).truncate(true); + #[cfg(unix)] + options.mode(0o600); + + let mut file = options + .open(&path) + .await + .with_context(|| format!("opening pipe runtime state {}", path.display()))?; + file.write_all(&body) + .await + .with_context(|| format!("writing pipe runtime state to {}", path.display()))?; + file.flush() + .await + .with_context(|| format!("flushing pipe runtime state {}", path.display()))?; + #[cfg(unix)] + tokio::fs::set_permissions(&path, std::fs::Permissions::from_mode(0o600)) + .await + .with_context(|| format!("setting permissions on {}", path.display()))?; + Ok(()) + } + + async fn activate( + &self, + key: PipeRuntimeKey, + registration: PipeRegistration, + ) -> ActivationResult { + let (replaced, previous_lifecycle) = { + let mut registrations = self.registrations.write().await; + let mut lifecycle = self.lifecycle.write().await; + let previous_lifecycle = lifecycle.get(&key).cloned(); + let replaced = registrations + .insert(key.clone(), registration.clone()) + .is_some(); + lifecycle.insert(key, registration.lifecycle.clone()); + (replaced, previous_lifecycle) + }; + if let Err(error) = self.persist_active_registrations().await { + warn!(error = %error, "failed to persist active pipe registrations after activate"); } - "fetch_config" | "stacker.fetch_config" => { - let payload: FetchConfigCommand = serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid fetch_config payload")?; - let payload = payload.normalize().with_command_context(cmd); - payload.validate()?; - Ok(Some(StackerCommand::FetchConfig(payload))) + ActivationResult { + replaced, + registration, + previous_lifecycle, } - "apply_config" | "stacker.apply_config" => { - let payload: ApplyConfigCommand = serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid apply_config payload")?; - let payload = payload.normalize().with_command_context(cmd); - payload.validate()?; - Ok(Some(StackerCommand::ApplyConfig(payload))) + } + + async fn deactivate( + &self, + deployment_hash: &str, + pipe_instance_id: &str, + deactivated_at: String, + ) -> DeactivationResult { + let key = PipeRuntimeKey { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), + }; + let (removed, snapshot) = { + let mut registrations = self.registrations.write().await; + let mut lifecycle = self.lifecycle.write().await; + let removed = registrations.remove(&key).is_some(); + let mut snapshot = lifecycle + .get(&key) + .cloned() + .unwrap_or(PipeLifecycleSnapshot { + state: PipeLifecycleState::Inactive, + activated_at: deactivated_at.clone(), + deactivated_at: None, + last_triggered_at: None, + last_error: None, + trigger_count: 0, + last_updated_at: deactivated_at.clone(), + }); + snapshot.state = PipeLifecycleState::Inactive; + snapshot.deactivated_at = Some(deactivated_at.clone()); + snapshot.last_updated_at = deactivated_at; + lifecycle.insert(key, snapshot.clone()); + (removed, snapshot) + }; + if let Err(error) = self.persist_active_registrations().await { + warn!(error = %error, "failed to persist active pipe registrations after deactivate"); } - "deploy_app" | "stacker.deploy_app" => { - let payload: DeployAppCommand = serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid deploy_app payload")?; - let payload = payload.normalize().with_command_context(cmd); - payload.validate()?; - Ok(Some(StackerCommand::DeployApp(payload))) + DeactivationResult { + removed, + lifecycle: snapshot, } - "remove_app" | "stacker.remove_app" => { - let payload: RemoveAppCommand = serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid remove_app payload")?; - let payload = payload.normalize().with_command_context(cmd); - payload.validate()?; - Ok(Some(StackerCommand::RemoveApp(payload))) + } + + async fn resolve( + &self, + deployment_hash: &str, + pipe_instance_id: &str, + ) -> Option { + let registrations = self.registrations.read().await; + registrations + .get(&PipeRuntimeKey { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), + }) + .cloned() + } + + async fn mark_triggered( + &self, + deployment_hash: &str, + pipe_instance_id: &str, + triggered_at: String, + ) { + let mut registrations = self.registrations.write().await; + if let Some(registration) = registrations.get_mut(&PipeRuntimeKey { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), + }) { + registration.lifecycle.state = PipeLifecycleState::Active; + registration.lifecycle.last_triggered_at = Some(triggered_at.clone()); + registration.lifecycle.last_error = None; + registration.lifecycle.trigger_count += 1; + registration.lifecycle.last_updated_at = triggered_at.clone(); + } + drop(registrations); + + let mut lifecycle = self.lifecycle.write().await; + if let Some(snapshot) = lifecycle.get_mut(&PipeRuntimeKey { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), + }) { + snapshot.state = PipeLifecycleState::Active; + snapshot.last_triggered_at = Some(triggered_at.clone()); + snapshot.last_error = None; + snapshot.trigger_count += 1; + snapshot.last_updated_at = triggered_at; + } + } + + async fn mark_failed( + &self, + deployment_hash: &str, + pipe_instance_id: &str, + failed_at: String, + error: String, + ) { + let mut registrations = self.registrations.write().await; + if let Some(registration) = registrations.get_mut(&PipeRuntimeKey { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), + }) { + registration.lifecycle.state = PipeLifecycleState::Failed; + registration.lifecycle.last_error = Some(error.clone()); + registration.lifecycle.last_updated_at = failed_at.clone(); + } + drop(registrations); + + let mut lifecycle = self.lifecycle.write().await; + lifecycle + .entry(PipeRuntimeKey { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), + }) + .and_modify(|snapshot| { + snapshot.state = PipeLifecycleState::Failed; + snapshot.last_error = Some(error.clone()); + snapshot.last_updated_at = failed_at.clone(); + }) + .or_insert(PipeLifecycleSnapshot { + state: PipeLifecycleState::Failed, + activated_at: failed_at.clone(), + deactivated_at: None, + last_triggered_at: None, + last_error: Some(error), + trigger_count: 0, + last_updated_at: failed_at, + }); + } + + async fn snapshot( + &self, + deployment_hash: &str, + pipe_instance_id: &str, + ) -> Option { + let lifecycle = self.lifecycle.read().await; + lifecycle + .get(&PipeRuntimeKey { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), + }) + .cloned() + } + + async fn replace_worker(&self, key: PipeRuntimeKey, handle: AbortHandle) { + let mut workers = self.workers.write().await; + if let Some(existing) = workers.insert(key, handle) { + existing.abort(); } - "fetch_all_configs" | "stacker.fetch_all_configs" => { - let payload: FetchAllConfigsCommand = - serde_json::from_value(unwrap_params(&cmd.params)) - .context("invalid fetch_all_configs payload")?; + } + + async fn stop_worker(&self, deployment_hash: &str, pipe_instance_id: &str) { + let mut workers = self.workers.write().await; + if let Some(existing) = workers.remove(&PipeRuntimeKey { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), + }) { + existing.abort(); + } + } + + async fn spawn_source_worker_if_needed( + &self, + deployment_hash: &str, + pipe_instance_id: &str, + registration: PipeRegistration, + ) { + self.stop_worker(deployment_hash, pipe_instance_id).await; + + let Some(kind) = pipe_source_worker_kind(®istration.trigger_type) else { + return; + }; + + let runtime = self.clone(); + let key = PipeRuntimeKey { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), + }; + let key_for_task = key.clone(); + let handle = tokio::spawn(async move { + match kind { + PipeSourceWorkerKind::Poll => { + run_poll_source_worker(runtime, key_for_task, registration).await + } + PipeSourceWorkerKind::Websocket => { + run_websocket_source_worker(runtime, key_for_task, registration).await + } + PipeSourceWorkerKind::Grpc => { + run_grpc_source_worker(runtime, key_for_task, registration).await + } + PipeSourceWorkerKind::Amqp => { + run_amqp_source_worker(runtime, key_for_task, registration).await + } + } + }); + + self.replace_worker(key, handle.abort_handle()).await; + } + + pub async fn trigger_registered_payload( + &self, + deployment_hash: &str, + pipe_instance_id: &str, + payload: Value, + trigger_type: &str, + ) -> Result { + let command_id = format!( + "pipe-{}-{}", + pipe_instance_id, + chrono::Utc::now().timestamp_millis() + ); + let agent_cmd = AgentCommand { + id: command_id.clone(), + command_id, + name: "trigger_pipe".to_string(), + params: json!({}), + deployment_hash: Some(deployment_hash.to_string()), + app_code: None, + }; + let trigger = TriggerPipeCommand { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), + input_data: Some(payload), + source_container: None, + source_endpoint: default_pipe_source_endpoint(), + source_method: default_pipe_source_method(), + target_url: None, + target_container: None, + target_endpoint: default_pipe_target_endpoint(), + target_method: default_pipe_target_method(), + field_mapping: None, + trigger_type: trigger_type.to_string(), + }; + + handle_trigger_pipe(&agent_cmd, &trigger, self).await + } +} + +impl PipeLifecycleSnapshot { + fn active(activated_at: String) -> Self { + Self { + state: PipeLifecycleState::Active, + activated_at: activated_at.clone(), + deactivated_at: None, + last_triggered_at: None, + last_error: None, + trigger_count: 0, + last_updated_at: activated_at, + } + } +} + +impl From for PipeRegistration { + fn from(value: ActivatePipeCommand) -> Self { + Self { + source_container: value.source_container, + source_endpoint: value.source_endpoint, + source_method: value.source_method, + source_broker_url: value.source_broker_url, + source_queue: value.source_queue, + source_exchange: value.source_exchange, + source_routing_key: value.source_routing_key, + target_url: value.target_url, + target_container: value.target_container, + target_endpoint: value.target_endpoint, + target_method: value.target_method, + field_mapping: value.field_mapping, + trigger_type: value.trigger_type, + lifecycle: PipeLifecycleSnapshot::active(String::new()), + } + } +} + +/// Command to fetch app configuration from Vault +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct FetchConfigCommand { + #[serde(default)] + deployment_hash: String, + #[serde(default)] + app_code: String, + /// If true, also write the config to the destination path + #[serde(default)] + apply: bool, +} + +/// Command to apply configuration from Vault to the filesystem and restart container +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct ApplyConfigCommand { + #[serde(default)] + deployment_hash: String, + #[serde(default)] + app_code: String, + /// Optional: override the config content (instead of fetching from Vault) + #[serde(default)] + config_content: Option, + /// Optional: override the destination path + #[serde(default)] + destination_path: Option, + /// Whether to restart the container after applying config + #[serde(default = "default_true")] + restart_after: bool, +} + +/// Command to deploy a new app container via docker compose +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct DeployAppCommand { + #[serde(default)] + deployment_hash: String, + #[serde(default)] + app_code: String, + /// Optional: docker-compose.yml content (generated from J2 template) + /// If provided, will be written to disk before deploying + #[serde(default)] + compose_content: Option, + /// Optional: specific image to use (overrides compose file) + #[serde(default)] + image: Option, + /// Optional: environment variables to set + #[serde(default)] + env_vars: Option>, + /// Whether to pull the image before starting (default: true) + #[serde(default = "default_true")] + pull: bool, + /// Whether to remove existing container before deploying + #[serde(default)] + force_recreate: bool, + /// Optional: config files to write before deploying (uses existing AppConfig struct) + #[serde(default)] + config_files: Option>, + /// Container runtime to use: "runc" (default) or "kata" for microVM isolation + #[serde(default)] + runtime: Option, +} + +/// Command to remove an app container and associated config +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct RemoveAppCommand { + #[serde(default)] + deployment_hash: String, + #[serde(default)] + app_code: String, + #[serde(default = "default_true")] + delete_config: bool, + #[serde(default)] + remove_volumes: bool, + #[serde(default)] + remove_image: bool, +} + +/// Command to fetch all app configurations from Vault for a deployment +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct FetchAllConfigsCommand { + #[serde(default)] + deployment_hash: String, + /// Optional: specific app codes to fetch (if empty, fetches all) + #[serde(default)] + app_codes: Vec, + /// Whether to apply configs to disk after fetching + #[serde(default)] + apply: bool, + /// Whether to create a ZIP archive of all configs + #[serde(default)] + archive: bool, +} + +/// Command to fetch configs and deploy an app in one operation +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct DeployWithConfigsCommand { + #[serde(default)] + deployment_hash: String, + #[serde(default)] + app_code: String, + /// Whether to pull the image before starting + #[serde(default = "default_true")] + pull: bool, + /// Whether to force recreate the container + #[serde(default)] + force_recreate: bool, + /// Whether to apply all project configs before deploying + #[serde(default = "default_true")] + apply_configs: bool, + /// Container runtime to use: "runc" (default) or "kata" for microVM isolation + #[serde(default)] + runtime: Option, +} + +/// Command to detect configuration drift between Vault and deployed files +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct ConfigDiffCommand { + #[serde(default)] + deployment_hash: String, + /// Optional: specific app codes to check (if empty, checks all) + #[serde(default)] + app_codes: Vec, + /// Whether to include full diff content in response + #[serde(default)] + include_diff: bool, +} + +/// Command to configure nginx proxy manager for an app +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct ConfigureProxyCommand { + #[serde(default)] + deployment_hash: String, + #[serde(default)] + app_code: String, + /// Domain name(s) to proxy (e.g., ["komodo.example.com"]) + #[serde(default)] + domain_names: Vec, + /// Container/service name to forward to (defaults to app_code) + #[serde(default)] + forward_host: Option, + /// Port on the container to forward to + forward_port: u16, + /// Enable SSL with Let's Encrypt + #[serde(default = "default_true")] + ssl_enabled: bool, + /// Force HTTPS redirect + #[serde(default = "default_true")] + ssl_forced: bool, + /// HTTP/2 support + #[serde(default = "default_true")] + http2_support: bool, + /// Action: "create", "update", "delete" + #[serde(default = "default_create_action")] + action: String, + /// NPM admin credentials (optional, can use defaults from config) + #[serde(default)] + npm_host: Option, + #[serde(default)] + npm_email: Option, + #[serde(default)] + npm_password: Option, +} + +/// Command to execute a shell command inside a running container +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct ExecCommand { + #[serde(default)] + deployment_hash: String, + #[serde(default)] + app_code: String, + #[serde(default)] + container: Option, + /// The command to execute inside the container + command: String, + /// Timeout in seconds (default: 30, max: 120) + #[serde(default = "default_exec_timeout")] + timeout: u32, + /// Whether to redact sensitive data from output + #[serde(default = "default_true")] + redact_output: bool, +} + +/// Command to get server resource metrics (CPU, RAM, disk) +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct ServerResourcesCommand { + #[serde(default)] + deployment_hash: String, + /// Include disk metrics + #[serde(default = "default_true")] + include_disk: bool, + /// Include network metrics + #[serde(default = "default_true")] + include_network: bool, +} + +/// Command to list all containers in the deployment +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct ListContainersCommand { + #[serde(default)] + deployment_hash: String, + /// Include container health metrics + #[serde(default = "default_true")] + include_health: bool, + /// Include container logs (last N lines) + #[serde(default)] + include_logs: bool, + /// Number of log lines to include if include_logs is true + #[serde(default = "default_logs_tail")] + log_lines: usize, + /// Optional container mapping for grouping by app_code + #[serde(default)] + app_container_map: Vec, +} + +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct ContainerMapEntry { + container_name_pattern: String, + container_role: String, + #[serde(default)] + maps_to_app_code: Option, + #[serde(default)] + display_name: Option, +} + +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct AppContainerMap { + app_code: String, + #[serde(default)] + container_map: Vec, +} + +/// Command to probe a containerized app for connectable API endpoints +#[cfg_attr(not(feature = "docker"), allow(dead_code))] +#[derive(Debug, Clone, Deserialize)] +pub struct ProbeEndpointsCommand { + #[serde(default)] + deployment_hash: String, + #[serde(default)] + app_code: String, + #[serde(default)] + container: Option, + /// Protocols to probe: "openapi", "html_forms", "graphql", "mcp", "rest" + #[serde(default = "default_probe_protocols")] + protocols: Vec, + /// Timeout per probe request in seconds + #[serde(default = "default_probe_timeout")] + probe_timeout: u32, + /// Whether to capture sample responses from discovered endpoints + #[serde(default)] + capture_samples: bool, +} + +fn default_probe_protocols() -> Vec { + vec!["openapi".to_string(), "rest".to_string()] +} + +fn default_probe_timeout() -> u32 { + 5 +} + +fn default_exec_timeout() -> u32 { + 30 +} + +fn default_logs_tail() -> usize { + 10 +} + +fn default_create_action() -> String { + "create".to_string() +} + +pub fn parse_stacker_command(cmd: &AgentCommand) -> Result> { + let normalized = cmd.name.trim().to_lowercase(); + match normalized.as_str() { + "health" | "stacker.health" => { + let payload: HealthCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid health payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::Health(payload))) + } + "logs" | "stacker.logs" => { + let payload: LogsCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid logs payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::Logs(payload))) + } + "restart" | "stacker.restart" => { + let payload: RestartCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid restart payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::Restart(payload))) + } + "stop" | "stacker.stop" => { + let payload: StopCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid stop payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::Stop(payload))) + } + "start" | "stacker.start" => { + let payload: StartCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid start payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::Start(payload))) + } + "error_summary" | "stacker.error_summary" => { + let payload: ErrorSummaryCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid error_summary payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::ErrorSummary(payload))) + } + "fetch_config" | "stacker.fetch_config" => { + let payload: FetchConfigCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid fetch_config payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::FetchConfig(payload))) + } + "apply_config" | "stacker.apply_config" => { + let payload: ApplyConfigCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid apply_config payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::ApplyConfig(payload))) + } + "deploy_app" | "stacker.deploy_app" => { + let payload: DeployAppCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid deploy_app payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::DeployApp(payload))) + } + "remove_app" | "stacker.remove_app" => { + let payload: RemoveAppCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid remove_app payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::RemoveApp(payload))) + } + "fetch_all_configs" | "stacker.fetch_all_configs" => { + let payload: FetchAllConfigsCommand = + serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid fetch_all_configs payload")?; let payload = payload.normalize().with_command_context(cmd); payload.validate()?; Ok(Some(StackerCommand::FetchAllConfigs(payload))) @@ -645,6 +1814,27 @@ pub fn parse_stacker_command(cmd: &AgentCommand) -> Result { + let payload: ActivatePipeCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid activate_pipe payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::ActivatePipe(payload))) + } + "deactivate_pipe" | "stacker.deactivate_pipe" => { + let payload: DeactivatePipeCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid deactivate_pipe payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::DeactivatePipe(payload))) + } + "trigger_pipe" | "stacker.trigger_pipe" => { + let payload: TriggerPipeCommand = serde_json::from_value(unwrap_params(&cmd.params)) + .context("invalid trigger_pipe payload")?; + let payload = payload.normalize().with_command_context(cmd); + payload.validate()?; + Ok(Some(StackerCommand::TriggerPipe(payload))) + } _ => Ok(None), } } @@ -653,6 +1843,7 @@ pub async fn execute_stacker_command( agent_cmd: &AgentCommand, command: &StackerCommand, firewall_policy: &firewall::FirewallPolicy, + pipe_runtime: &PipeRuntime, ) -> Result { // Firewall commands don't require Docker if let StackerCommand::ConfigureFirewall(data) = command { @@ -661,12 +1852,25 @@ pub async fn execute_stacker_command( #[cfg(feature = "docker")] { - execute_with_docker(agent_cmd, command, firewall_policy).await + execute_with_docker(agent_cmd, command, firewall_policy, pipe_runtime).await } #[cfg(not(feature = "docker"))] { - let _ = (agent_cmd, command); - bail!("docker feature not enabled for stacker commands") + match command { + StackerCommand::ActivatePipe(data) => { + handle_activate_pipe(agent_cmd, data, pipe_runtime).await + } + StackerCommand::DeactivatePipe(data) => { + handle_deactivate_pipe(agent_cmd, data, pipe_runtime).await + } + StackerCommand::TriggerPipe(data) => { + handle_trigger_pipe(agent_cmd, data, pipe_runtime).await + } + _ => { + let _ = (firewall_policy, pipe_runtime); + bail!("docker feature not enabled for stacker commands") + } + } } } @@ -693,18 +1897,158 @@ fn unwrap_params(params: &serde_json::Value) -> serde_json::Value { .unwrap_or_else(|| params.clone()) } -#[cfg(feature = "docker")] -fn resolve_container_name(app_code: &str, container: &Option) -> String { - if let Some(value) = container.as_ref() { - let trimmed_value = trimmed(value); - if !trimmed_value.is_empty() { - return trimmed_value; +#[cfg(feature = "docker")] +fn resolve_container_name(app_code: &str, container: &Option) -> String { + if let Some(value) = container.as_ref() { + let trimmed_value = trimmed(value); + if !trimmed_value.is_empty() { + return trimmed_value; + } + } + trimmed(app_code) +} + +impl HealthCommand { + fn normalize(mut self) -> Self { + self.deployment_hash = trimmed(&self.deployment_hash); + self.app_code = trimmed(&self.app_code); + if let Some(value) = self.container.take() { + let trimmed_value = trimmed(&value); + if !trimmed_value.is_empty() { + self.container = Some(trimmed_value); + } + } + self + } + + fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { + if self.deployment_hash.is_empty() { + if let Some(hash) = &agent_cmd.deployment_hash { + self.deployment_hash = hash.clone(); + } + } + if self.app_code.is_empty() { + if let Some(code) = &agent_cmd.app_code { + self.app_code = code.clone(); + } + } + self + } + + fn validate(&self) -> Result<()> { + if self.deployment_hash.is_empty() { + bail!("deployment_hash is required"); + } + if self.app_code.is_empty() { + bail!("app_code is required"); + } + Ok(()) + } +} + +impl LogsCommand { + fn normalize(mut self) -> Self { + self.deployment_hash = trimmed(&self.deployment_hash); + self.app_code = trimmed(&self.app_code); + if let Some(value) = self.container.take() { + let trimmed_value = trimmed(&value); + if !trimmed_value.is_empty() { + self.container = Some(trimmed_value); + } + } + self.limit = self.limit.clamp(1, LOGS_MAX_LIMIT); + if let Some(streams) = &mut self.streams { + let filtered: Vec = streams + .iter() + .filter_map(|s| { + let normalized = s.trim().to_lowercase(); + match normalized.as_str() { + "stdout" | "stderr" => Some(normalized), + _ => None, + } + }) + .collect(); + self.streams = if filtered.is_empty() { + None + } else { + Some(filtered) + }; + } + self + } + + fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { + if self.deployment_hash.is_empty() { + if let Some(hash) = &agent_cmd.deployment_hash { + self.deployment_hash = hash.clone(); + } + } + if self.app_code.is_empty() { + if let Some(code) = &agent_cmd.app_code { + self.app_code = code.clone(); + } + } + self + } + + fn validate(&self) -> Result<()> { + if self.deployment_hash.is_empty() { + bail!("deployment_hash is required"); + } + if self.app_code.is_empty() { + bail!("app_code is required"); + } + Ok(()) + } + + #[cfg(feature = "docker")] + fn includes_stream(&self, stream: &str) -> bool { + match &self.streams { + Some(allowed) => allowed.iter().any(|s| s == stream), + None => true, + } + } +} + +impl RestartCommand { + fn normalize(mut self) -> Self { + self.deployment_hash = trimmed(&self.deployment_hash); + self.app_code = trimmed(&self.app_code); + if let Some(value) = self.container.take() { + let trimmed_value = trimmed(&value); + if !trimmed_value.is_empty() { + self.container = Some(trimmed_value); + } + } + self + } + + fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { + if self.deployment_hash.is_empty() { + if let Some(hash) = &agent_cmd.deployment_hash { + self.deployment_hash = hash.clone(); + } + } + if self.app_code.is_empty() { + if let Some(code) = &agent_cmd.app_code { + self.app_code = code.clone(); + } + } + self + } + + fn validate(&self) -> Result<()> { + if self.deployment_hash.is_empty() { + bail!("deployment_hash is required"); + } + if self.app_code.is_empty() { + bail!("app_code is required"); } + Ok(()) } - trimmed(app_code) } -impl HealthCommand { +impl StopCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); self.app_code = trimmed(&self.app_code); @@ -714,6 +2058,7 @@ impl HealthCommand { self.container = Some(trimmed_value); } } + self.timeout = self.timeout.clamp(1, 300); // Max 5 minutes self } @@ -742,7 +2087,7 @@ impl HealthCommand { } } -impl LogsCommand { +impl StartCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); self.app_code = trimmed(&self.app_code); @@ -752,24 +2097,6 @@ impl LogsCommand { self.container = Some(trimmed_value); } } - self.limit = self.limit.clamp(1, LOGS_MAX_LIMIT); - if let Some(streams) = &mut self.streams { - let filtered: Vec = streams - .iter() - .filter_map(|s| { - let normalized = s.trim().to_lowercase(); - match normalized.as_str() { - "stdout" | "stderr" => Some(normalized), - _ => None, - } - }) - .collect(); - self.streams = if filtered.is_empty() { - None - } else { - Some(filtered) - }; - } self } @@ -796,29 +2123,200 @@ impl LogsCommand { } Ok(()) } +} - #[cfg(feature = "docker")] - fn includes_stream(&self, stream: &str) -> bool { - match &self.streams { - Some(allowed) => allowed.iter().any(|s| s == stream), - None => true, +impl ErrorSummaryCommand { + fn normalize(mut self) -> Self { + self.deployment_hash = trimmed(&self.deployment_hash); + self.app_code = trimmed(&self.app_code); + self.hours = self.hours.clamp(1, 168); // Max 7 days + self + } + + fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { + if self.deployment_hash.is_empty() { + if let Some(hash) = &agent_cmd.deployment_hash { + self.deployment_hash = hash.clone(); + } + } + if self.app_code.is_empty() { + if let Some(code) = &agent_cmd.app_code { + self.app_code = code.clone(); + } + } + self + } + + fn validate(&self) -> Result<()> { + if self.deployment_hash.is_empty() { + bail!("deployment_hash is required"); } + // app_code is optional for error_summary - if empty, analyze all containers + Ok(()) } } -impl RestartCommand { +impl ActivatePipeCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); - self.app_code = trimmed(&self.app_code); - if let Some(value) = self.container.take() { - let trimmed_value = trimmed(&value); - if !trimmed_value.is_empty() { - self.container = Some(trimmed_value); + self.pipe_instance_id = trimmed(&self.pipe_instance_id); + self.source_container = self.source_container.map(|value| trimmed(&value)); + self.source_endpoint = trimmed(&self.source_endpoint); + if self.source_endpoint.is_empty() { + self.source_endpoint = default_pipe_source_endpoint(); + } + self.source_method = + normalize_trigger_pipe_method(&self.source_method, &default_pipe_source_method()); + self.source_broker_url = self.source_broker_url.map(|value| trimmed(&value)); + self.source_queue = self.source_queue.map(|value| trimmed(&value)); + self.source_exchange = self.source_exchange.map(|value| trimmed(&value)); + self.source_routing_key = self.source_routing_key.map(|value| trimmed(&value)); + self.target_url = self.target_url.map(|value| trimmed(&value)); + self.target_container = self.target_container.map(|value| trimmed(&value)); + self.target_endpoint = trimmed(&self.target_endpoint); + if self.target_endpoint.is_empty() { + self.target_endpoint = default_pipe_target_endpoint(); + } + self.target_method = + normalize_trigger_pipe_method(&self.target_method, &default_pipe_target_method()); + self.trigger_type = trimmed(&self.trigger_type).to_lowercase(); + if self.trigger_type.is_empty() { + self.trigger_type = default_activate_pipe_trigger_type(); + } + self + } + + fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { + if self.deployment_hash.is_empty() { + if let Some(hash) = &agent_cmd.deployment_hash { + self.deployment_hash = hash.clone(); + } + } + self + } + + fn validate(&self) -> Result<()> { + if self.deployment_hash.is_empty() { + bail!("deployment_hash is required"); + } + if self.pipe_instance_id.is_empty() { + bail!("pipe_instance_id is required"); + } + if matches!(self.trigger_type.as_str(), "amqp" | "rabbitmq") { + if self + .source_broker_url + .as_deref() + .filter(|value| !value.is_empty()) + .is_none() + { + bail!("activate_pipe with rabbitmq trigger_type requires source_broker_url"); + } + if self + .source_queue + .as_deref() + .filter(|value| !value.is_empty()) + .is_none() + { + bail!("activate_pipe with rabbitmq trigger_type requires source_queue"); + } + } + if self + .target_url + .as_deref() + .filter(|value| !value.is_empty()) + .is_none() + && self + .target_container + .as_deref() + .filter(|value| !value.is_empty()) + .is_none() + { + bail!("activate_pipe requires target_url or target_container"); + } + Ok(()) + } +} + +impl DeactivatePipeCommand { + fn normalize(mut self) -> Self { + self.deployment_hash = trimmed(&self.deployment_hash); + self.pipe_instance_id = trimmed(&self.pipe_instance_id); + self + } + + fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { + if self.deployment_hash.is_empty() { + if let Some(hash) = &agent_cmd.deployment_hash { + self.deployment_hash = hash.clone(); + } + } + self + } + + fn validate(&self) -> Result<()> { + if self.deployment_hash.is_empty() { + bail!("deployment_hash is required"); + } + if self.pipe_instance_id.is_empty() { + bail!("pipe_instance_id is required"); + } + Ok(()) + } +} + +impl TriggerPipeCommand { + fn normalize(mut self) -> Self { + self.deployment_hash = trimmed(&self.deployment_hash); + self.pipe_instance_id = trimmed(&self.pipe_instance_id); + self.source_container = self.source_container.map(|value| trimmed(&value)); + self.source_endpoint = trimmed(&self.source_endpoint); + if self.source_endpoint.is_empty() { + self.source_endpoint = default_pipe_source_endpoint(); + } + self.source_method = + normalize_trigger_pipe_method(&self.source_method, &default_pipe_source_method()); + self.target_url = self.target_url.map(|value| trimmed(&value)); + self.target_container = self.target_container.map(|value| trimmed(&value)); + self.target_endpoint = trimmed(&self.target_endpoint); + if self.target_endpoint.is_empty() { + self.target_endpoint = "/".to_string(); + } + self.target_method = + normalize_trigger_pipe_method(&self.target_method, &default_pipe_target_method()); + self.trigger_type = trimmed(&self.trigger_type).to_lowercase(); + if self.trigger_type.is_empty() { + self.trigger_type = default_pipe_trigger_type(); + } + self + } + + fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { + if self.deployment_hash.is_empty() { + if let Some(hash) = &agent_cmd.deployment_hash { + self.deployment_hash = hash.clone(); } } self } + fn validate(&self) -> Result<()> { + if self.deployment_hash.is_empty() { + bail!("deployment_hash is required"); + } + if self.pipe_instance_id.is_empty() { + bail!("pipe_instance_id is required"); + } + Ok(()) + } +} + +impl FetchConfigCommand { + fn normalize(mut self) -> Self { + self.deployment_hash = trimmed(&self.deployment_hash); + self.app_code = trimmed(&self.app_code); + self + } + fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { if self.deployment_hash.is_empty() { if let Some(hash) = &agent_cmd.deployment_hash { @@ -844,17 +2342,13 @@ impl RestartCommand { } } -impl StopCommand { +impl ApplyConfigCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); self.app_code = trimmed(&self.app_code); - if let Some(value) = self.container.take() { - let trimmed_value = trimmed(&value); - if !trimmed_value.is_empty() { - self.container = Some(trimmed_value); - } + if let Some(path) = &self.destination_path { + self.destination_path = Some(trimmed(path)); } - self.timeout = self.timeout.clamp(1, 300); // Max 5 minutes self } @@ -879,19 +2373,18 @@ impl StopCommand { if self.app_code.is_empty() { bail!("app_code is required"); } + // Either config_content must be provided OR we fetch from Vault + // destination_path is optional if we're fetching from Vault (it has the path) Ok(()) } } -impl StartCommand { +impl DeployAppCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); self.app_code = trimmed(&self.app_code); - if let Some(value) = self.container.take() { - let trimmed_value = trimmed(&value); - if !trimmed_value.is_empty() { - self.container = Some(trimmed_value); - } + if let Some(img) = &self.image { + self.image = Some(trimmed(img)); } self } @@ -921,11 +2414,10 @@ impl StartCommand { } } -impl ErrorSummaryCommand { +impl RemoveAppCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); self.app_code = trimmed(&self.app_code); - self.hours = self.hours.clamp(1, 168); // Max 7 days self } @@ -947,12 +2439,43 @@ impl ErrorSummaryCommand { if self.deployment_hash.is_empty() { bail!("deployment_hash is required"); } - // app_code is optional for error_summary - if empty, analyze all containers + if self.app_code.is_empty() { + bail!("app_code is required"); + } Ok(()) } } -impl FetchConfigCommand { +impl FetchAllConfigsCommand { + fn normalize(mut self) -> Self { + self.deployment_hash = trimmed(&self.deployment_hash); + self.app_codes = self + .app_codes + .into_iter() + .map(|s| trimmed(&s)) + .filter(|s| !s.is_empty()) + .collect(); + self + } + + fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { + if self.deployment_hash.is_empty() { + if let Some(hash) = &agent_cmd.deployment_hash { + self.deployment_hash = hash.clone(); + } + } + self + } + + fn validate(&self) -> Result<()> { + if self.deployment_hash.is_empty() { + bail!("deployment_hash is required"); + } + Ok(()) + } +} + +impl DeployWithConfigsCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); self.app_code = trimmed(&self.app_code); @@ -984,12 +2507,48 @@ impl FetchConfigCommand { } } -impl ApplyConfigCommand { +impl ConfigDiffCommand { + fn normalize(mut self) -> Self { + self.deployment_hash = trimmed(&self.deployment_hash); + self.app_codes = self + .app_codes + .into_iter() + .map(|s| trimmed(&s)) + .filter(|s| !s.is_empty()) + .collect(); + self + } + + fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { + if self.deployment_hash.is_empty() { + if let Some(hash) = &agent_cmd.deployment_hash { + self.deployment_hash = hash.clone(); + } + } + self + } + + fn validate(&self) -> Result<()> { + if self.deployment_hash.is_empty() { + bail!("deployment_hash is required"); + } + Ok(()) + } +} + +impl ConfigureProxyCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); self.app_code = trimmed(&self.app_code); - if let Some(path) = &self.destination_path { - self.destination_path = Some(trimmed(path)); + self.domain_names = self + .domain_names + .into_iter() + .map(|s| trimmed(&s)) + .filter(|s| !s.is_empty()) + .collect(); + self.action = trimmed(&self.action).to_lowercase(); + if self.action.is_empty() { + self.action = "create".to_string(); } self } @@ -1000,11 +2559,6 @@ impl ApplyConfigCommand { self.deployment_hash = hash.clone(); } } - if self.app_code.is_empty() { - if let Some(code) = &agent_cmd.app_code { - self.app_code = code.clone(); - } - } self } @@ -1015,19 +2569,32 @@ impl ApplyConfigCommand { if self.app_code.is_empty() { bail!("app_code is required"); } - // Either config_content must be provided OR we fetch from Vault - // destination_path is optional if we're fetching from Vault (it has the path) + if self.domain_names.is_empty() { + bail!("at least one domain_name is required"); + } + if self.forward_port == 0 { + bail!("forward_port is required and must be > 0"); + } + if !["create", "update", "delete"].contains(&self.action.as_str()) { + bail!("action must be one of: create, update, delete"); + } Ok(()) } } -impl DeployAppCommand { +impl ExecCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); self.app_code = trimmed(&self.app_code); - if let Some(img) = &self.image { - self.image = Some(trimmed(img)); + if let Some(value) = self.container.take() { + let trimmed_value = trimmed(&value); + if !trimmed_value.is_empty() { + self.container = Some(trimmed_value); + } } + self.command = self.command.trim().to_string(); + // Clamp timeout between 1 and 120 seconds + self.timeout = self.timeout.clamp(1, 120); self } @@ -1037,29 +2604,34 @@ impl DeployAppCommand { self.deployment_hash = hash.clone(); } } - if self.app_code.is_empty() { - if let Some(code) = &agent_cmd.app_code { - self.app_code = code.clone(); - } - } self } fn validate(&self) -> Result<()> { - if self.deployment_hash.is_empty() { - bail!("deployment_hash is required"); - } if self.app_code.is_empty() { - bail!("app_code is required"); + bail!("app_code is required for exec command"); + } + if self.command.is_empty() { + bail!("command is required for exec"); + } + // Block dangerous commands + let blocked_patterns = [ + "rm -rf /", "mkfs", "dd if=", ":(){", "shutdown", "reboot", "halt", "poweroff", + "init 0", "init 6", + ]; + let cmd_lower = self.command.to_lowercase(); + for pattern in &blocked_patterns { + if cmd_lower.contains(pattern) { + bail!("Command '{}' is blocked for security reasons", pattern); + } } Ok(()) } } -impl RemoveAppCommand { +impl ServerResourcesCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); - self.app_code = trimmed(&self.app_code); self } @@ -1069,34 +2641,20 @@ impl RemoveAppCommand { self.deployment_hash = hash.clone(); } } - if self.app_code.is_empty() { - if let Some(code) = &agent_cmd.app_code { - self.app_code = code.clone(); - } - } self } fn validate(&self) -> Result<()> { - if self.deployment_hash.is_empty() { - bail!("deployment_hash is required"); - } - if self.app_code.is_empty() { - bail!("app_code is required"); - } + // No strict validation needed - deployment_hash is optional for this command Ok(()) } } -impl FetchAllConfigsCommand { +impl ListContainersCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); - self.app_codes = self - .app_codes - .into_iter() - .map(|s| trimmed(&s)) - .filter(|s| !s.is_empty()) - .collect(); + // Clamp log lines + self.log_lines = self.log_lines.clamp(1, 100); self } @@ -1110,17 +2668,31 @@ impl FetchAllConfigsCommand { } fn validate(&self) -> Result<()> { - if self.deployment_hash.is_empty() { - bail!("deployment_hash is required"); - } + // No strict validation needed Ok(()) } } -impl DeployWithConfigsCommand { +impl ProbeEndpointsCommand { fn normalize(mut self) -> Self { self.deployment_hash = trimmed(&self.deployment_hash); self.app_code = trimmed(&self.app_code); + if let Some(value) = self.container.take() { + let trimmed_value = trimmed(&value); + if !trimmed_value.is_empty() { + self.container = Some(trimmed_value); + } + } + // Normalize protocol names to lowercase + self.protocols = self + .protocols + .iter() + .map(|p| p.trim().to_lowercase()) + .filter(|p| !p.is_empty()) + .collect(); + if self.protocols.is_empty() { + self.protocols = default_probe_protocols(); + } self } @@ -1145,535 +2717,1470 @@ impl DeployWithConfigsCommand { if self.app_code.is_empty() { bail!("app_code is required"); } + let valid_protocols = ["openapi", "html_forms", "graphql", "mcp", "rest"]; + for p in &self.protocols { + if !valid_protocols.contains(&p.as_str()) { + bail!("unsupported protocol: {}. Valid: {:?}", p, valid_protocols); + } + } Ok(()) } } -impl ConfigDiffCommand { - fn normalize(mut self) -> Self { - self.deployment_hash = trimmed(&self.deployment_hash); - self.app_codes = self - .app_codes - .into_iter() - .map(|s| trimmed(&s)) - .filter(|s| !s.is_empty()) - .collect(); - self +fn trimmed(value: &str) -> String { + value.trim().to_string() +} + +#[cfg(feature = "docker")] +fn resolve_compose_paths(deployment_hash: &str, app_code: &str) -> (String, String) { + use std::path::Path; + + if let Ok(dir) = std::env::var("COMPOSE_PROJECT_DIR") { + let file = format!("{}/docker-compose.yml", dir); + return (dir, file); + } + + let hash_dir = format!("/home/trydirect/{}", deployment_hash); + let hash_file = format!("{}/docker-compose.yml", hash_dir); + if Path::new(&hash_file).exists() { + return (hash_dir, hash_file); + } + + let app_dir = format!("/home/trydirect/{}", app_code); + let app_file = format!("{}/docker-compose.yml", app_dir); + (app_dir, app_file) +} + +/// Represents which compose command variant is available on the system. +#[cfg(feature = "docker")] +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum ComposeVariant { + /// Docker Compose V2 plugin: `docker compose` + Plugin, + /// Standalone docker-compose binary: `docker-compose` + Standalone, +} + +/// Detect which docker compose variant is available on the system. +/// Tries `docker compose version` first (plugin), then `docker-compose version` (standalone). +/// Result is cached for the lifetime of the process. +#[cfg(feature = "docker")] +pub async fn detect_compose_variant() -> Option { + use tokio::process::Command; + + static COMPOSE_VARIANT: OnceLock> = OnceLock::new(); + + // Return cached result if available + if let Some(variant) = COMPOSE_VARIANT.get() { + return *variant; + } + + // Try docker compose (plugin) first + let plugin_result = Command::new("docker") + .arg("compose") + .arg("version") + .output() + .await; + + if let Ok(output) = plugin_result { + if output.status.success() { + let _ = COMPOSE_VARIANT.set(Some(ComposeVariant::Plugin)); + return Some(ComposeVariant::Plugin); + } + } + + // Try docker-compose (standalone) as fallback + let standalone_result = Command::new("docker-compose").arg("version").output().await; + + if let Ok(output) = standalone_result { + if output.status.success() { + let _ = COMPOSE_VARIANT.set(Some(ComposeVariant::Standalone)); + return Some(ComposeVariant::Standalone); + } + } + + // Neither is available + let _ = COMPOSE_VARIANT.set(None); + None +} + +/// Build a compose command with the correct binary/syntax based on what's available. +/// Returns (command_program, initial_args) where initial_args should be prepended to actual args. +#[cfg(feature = "docker")] +pub fn build_compose_command(variant: ComposeVariant) -> (String, Vec) { + match variant { + ComposeVariant::Plugin => ("docker".to_string(), vec!["compose".to_string()]), + ComposeVariant::Standalone => ("docker-compose".to_string(), vec![]), + } +} + +fn base_result( + agent_cmd: &AgentCommand, + deployment_hash: &str, + app_code: &str, + command_type: &str, +) -> CommandResult { + CommandResult { + command_id: agent_cmd.command_id.clone(), + status: "success".into(), + result: None, + error: None, + completed_at: now_timestamp(), + deployment_hash: Some(deployment_hash.to_string()), + app_code: Some(app_code.to_string()), + command_type: Some(command_type.to_string()), + ..CommandResult::default() + } +} + +fn now_timestamp() -> String { + Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true) +} + +#[cfg(feature = "docker")] +fn errors_value(errors: &[CommandError]) -> Value { + serde_json::to_value(errors).unwrap_or_else(|_| json!([])) +} + +#[cfg(feature = "docker")] +fn redact_message(message: &str, enabled: bool) -> (String, bool) { + if !enabled || message.is_empty() { + return (message.to_string(), false); + } + + static REDACTION_RE: OnceLock = OnceLock::new(); + let regex = REDACTION_RE.get_or_init(|| { + Regex::new(r"(?i)(token|secret|password|key|credential)(\s*[=:]\s*)([^\s]+)").unwrap() + }); + + let mut redacted = false; + let replaced = regex.replace_all(message, |caps: ®ex::Captures| { + redacted = true; + format!("{}{}***", &caps[1], &caps[2]) + }); + (replaced.into_owned(), redacted) +} + +#[cfg(feature = "docker")] +fn make_error(code: &str, message: impl Into, details: Option) -> CommandError { + CommandError { + code: code.to_string(), + message: message.into(), + details, } +} - fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { - if self.deployment_hash.is_empty() { - if let Some(hash) = &agent_cmd.deployment_hash { - self.deployment_hash = hash.clone(); - } - } - self +#[cfg(feature = "docker")] +fn env_flag_enabled(name: &str, default: bool) -> bool { + match std::env::var(name) { + Ok(value) => match value.trim().to_ascii_lowercase().as_str() { + "0" | "false" | "no" | "off" => false, + "1" | "true" | "yes" | "on" => true, + _ => default, + }, + Err(_) => default, } +} - fn validate(&self) -> Result<()> { - if self.deployment_hash.is_empty() { - bail!("deployment_hash is required"); - } - Ok(()) - } +#[cfg(feature = "docker")] +fn proxy_owner_enabled() -> bool { + env_flag_enabled("STATUS_PANEL_PROXY_OWNER", true) } -impl ConfigureProxyCommand { - fn normalize(mut self) -> Self { - self.deployment_hash = trimmed(&self.deployment_hash); - self.app_code = trimmed(&self.app_code); - self.domain_names = self - .domain_names - .into_iter() - .map(|s| trimmed(&s)) - .filter(|s| !s.is_empty()) - .collect(); - self.action = trimmed(&self.action).to_lowercase(); - if self.action.is_empty() { - self.action = "create".to_string(); - } - self - } +#[cfg(feature = "docker")] +fn npm_env_fallback_enabled() -> bool { + env_flag_enabled("NPM_ALLOW_ENV_FALLBACK", false) +} - fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { - if self.deployment_hash.is_empty() { - if let Some(hash) = &agent_cmd.deployment_hash { - self.deployment_hash = hash.clone(); - } - } - self - } +#[cfg(feature = "docker")] +fn resolve_server_id( +) -> std::result::Result { + std::env::var("STACKER_SERVER_ID") + .ok() + .filter(|value| !value.trim().is_empty()) + .ok_or(crate::security::vault_client::NpmCredentialError::MissingServerId) +} - fn validate(&self) -> Result<()> { - if self.deployment_hash.is_empty() { - bail!("deployment_hash is required"); - } - if self.app_code.is_empty() { - bail!("app_code is required"); - } - if self.domain_names.is_empty() { - bail!("at least one domain_name is required"); - } - if self.forward_port == 0 { - bail!("forward_port is required and must be > 0"); +#[cfg(feature = "docker")] +fn configure_proxy_error( + error: &crate::security::vault_client::NpmCredentialError, +) -> CommandError { + let details = match error { + crate::security::vault_client::NpmCredentialError::MissingSecret { path } + | crate::security::vault_client::NpmCredentialError::InvalidPayload { path, .. } + | crate::security::vault_client::NpmCredentialError::UnknownAuthMode { path, .. } => { + Some(path.clone()) } - if !["create", "update", "delete"].contains(&self.action.as_str()) { - bail!("action must be one of: create, update, delete"); + crate::security::vault_client::NpmCredentialError::ExistingHostConflict { domain } => { + Some(domain.clone()) } - Ok(()) - } + _ => None, + }; + + make_error(error.code(), error.operator_message(), details) } -impl ExecCommand { - fn normalize(mut self) -> Self { - self.deployment_hash = trimmed(&self.deployment_hash); - self.app_code = trimmed(&self.app_code); - if let Some(value) = self.container.take() { - let trimmed_value = trimmed(&value); - if !trimmed_value.is_empty() { - self.container = Some(trimmed_value); - } - } - self.command = self.command.trim().to_string(); - // Clamp timeout between 1 and 120 seconds - self.timeout = self.timeout.clamp(1, 120); - self - } +#[cfg(feature = "docker")] +async fn resolve_npm_config( + data: &ConfigureProxyCommand, +) -> std::result::Result< + crate::connectors::npm::NpmConfig, + crate::security::vault_client::NpmCredentialError, +> { + use crate::connectors::npm::NpmConfig; + use crate::security::vault_client::{NpmCredentialError, VaultClient}; - fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { - if self.deployment_hash.is_empty() { - if let Some(hash) = &agent_cmd.deployment_hash { - self.deployment_hash = hash.clone(); - } + match (&data.npm_host, &data.npm_email, &data.npm_password) { + (Some(host), Some(email), Some(password)) => { + return Ok(NpmConfig::new( + host.clone(), + email.clone(), + password.clone(), + )); } - self + (None, None, None) => {} + _ => return Err(NpmCredentialError::InvalidOverride), } - fn validate(&self) -> Result<()> { - if self.app_code.is_empty() { - bail!("app_code is required for exec command"); - } - if self.command.is_empty() { - bail!("command is required for exec"); - } - // Block dangerous commands - let blocked_patterns = [ - "rm -rf /", "mkfs", "dd if=", ":(){", "shutdown", "reboot", "halt", "poweroff", - "init 0", "init 6", - ]; - let cmd_lower = self.command.to_lowercase(); - for pattern in &blocked_patterns { - if cmd_lower.contains(pattern) { - bail!("Command '{}' is blocked for security reasons", pattern); - } - } - Ok(()) + if let Some(vault_client) = VaultClient::from_env().map_err(|error| { + tracing::warn!( + error = %error, + "Failed to initialize Vault client for configure_proxy" + ); + NpmCredentialError::VaultUnavailable + })? { + let server_id = resolve_server_id()?; + let credentials = vault_client.fetch_npm_credentials(&server_id).await?; + return Ok(NpmConfig::from_credentials(&credentials)); } -} -impl ServerResourcesCommand { - fn normalize(mut self) -> Self { - self.deployment_hash = trimmed(&self.deployment_hash); - self + if npm_env_fallback_enabled() { + return NpmConfig::from_env().ok_or_else(|| NpmCredentialError::MissingSecret { + path: "NPM_HOST/NPM_EMAIL/NPM_PASSWORD".to_string(), + }); } - fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { - if self.deployment_hash.is_empty() { - if let Some(hash) = &agent_cmd.deployment_hash { - self.deployment_hash = hash.clone(); - } - } - self - } + Err(NpmCredentialError::MissingVaultConfiguration) +} - fn validate(&self) -> Result<()> { - // No strict validation needed - deployment_hash is optional for this command - Ok(()) - } +#[cfg(feature = "docker")] +fn extract_proxy_domains(existing_host: &Value) -> Vec { + let mut domains = existing_host["domain_names"] + .as_array() + .map(|items| { + items + .iter() + .filter_map(|item| item.as_str().map(ToString::to_string)) + .collect::>() + }) + .unwrap_or_default(); + domains.sort(); + domains } -impl ListContainersCommand { - fn normalize(mut self) -> Self { - self.deployment_hash = trimmed(&self.deployment_hash); - // Clamp log lines - self.log_lines = self.log_lines.clamp(1, 100); - self - } +#[cfg(feature = "docker")] +fn proxy_host_matches( + existing_host: &Value, + request: &crate::connectors::npm::ProxyHostRequest, +) -> bool { + let mut requested_domains = request.domain_names.clone(); + requested_domains.sort(); - fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { - if self.deployment_hash.is_empty() { - if let Some(hash) = &agent_cmd.deployment_hash { - self.deployment_hash = hash.clone(); - } + let existing_ssl_enabled = existing_host + .get("certificate_id") + .map(|value| !value.is_null() && value.as_i64().unwrap_or_default() != 0) + .unwrap_or(false); + + extract_proxy_domains(existing_host) == requested_domains + && existing_host["forward_host"].as_str().unwrap_or_default() == request.forward_host + && existing_host["forward_port"].as_u64().unwrap_or_default() as u16 == request.forward_port + && existing_host["ssl_forced"].as_bool().unwrap_or(false) == request.ssl_forced + && existing_host["http2_support"].as_bool().unwrap_or(false) == request.http2_support + && existing_ssl_enabled == request.ssl_enabled +} + +#[cfg(feature = "docker")] +async fn execute_with_docker( + agent_cmd: &AgentCommand, + command: &StackerCommand, + firewall_policy: &firewall::FirewallPolicy, + pipe_runtime: &PipeRuntime, +) -> Result { + match command { + StackerCommand::Health(data) => handle_health(agent_cmd, data).await, + StackerCommand::Logs(data) => handle_logs(agent_cmd, data).await, + StackerCommand::Restart(data) => handle_restart(agent_cmd, data).await, + StackerCommand::Stop(data) => handle_stop(agent_cmd, data).await, + StackerCommand::Start(data) => handle_start(agent_cmd, data).await, + StackerCommand::ErrorSummary(data) => handle_error_summary(agent_cmd, data).await, + StackerCommand::FetchConfig(data) => handle_fetch_config(agent_cmd, data).await, + StackerCommand::ApplyConfig(data) => handle_apply_config(agent_cmd, data).await, + StackerCommand::DeployApp(data) => handle_deploy_app(agent_cmd, data).await, + StackerCommand::RemoveApp(data) => handle_remove_app(agent_cmd, data).await, + StackerCommand::FetchAllConfigs(data) => handle_fetch_all_configs(agent_cmd, data).await, + StackerCommand::DeployWithConfigs(data) => { + handle_deploy_with_configs(agent_cmd, data).await + } + StackerCommand::ConfigDiff(data) => handle_config_diff(agent_cmd, data).await, + StackerCommand::ConfigureProxy(data) => handle_configure_proxy(agent_cmd, data).await, + StackerCommand::Exec(data) => handle_exec(agent_cmd, data).await, + StackerCommand::ServerResources(data) => handle_server_resources(agent_cmd, data).await, + StackerCommand::ListContainers(data) => handle_list_containers(agent_cmd, data).await, + StackerCommand::ProbeEndpoints(data) => handle_probe_endpoints(agent_cmd, data).await, + StackerCommand::ActivatePipe(data) => { + handle_activate_pipe(agent_cmd, data, pipe_runtime).await } - self - } - - fn validate(&self) -> Result<()> { - // No strict validation needed - Ok(()) - } -} - -impl ProbeEndpointsCommand { - fn normalize(mut self) -> Self { - self.deployment_hash = trimmed(&self.deployment_hash); - self.app_code = trimmed(&self.app_code); - if let Some(value) = self.container.take() { - let trimmed_value = trimmed(&value); - if !trimmed_value.is_empty() { - self.container = Some(trimmed_value); - } + StackerCommand::DeactivatePipe(data) => { + handle_deactivate_pipe(agent_cmd, data, pipe_runtime).await } - // Normalize protocol names to lowercase - self.protocols = self - .protocols - .iter() - .map(|p| p.trim().to_lowercase()) - .filter(|p| !p.is_empty()) - .collect(); - if self.protocols.is_empty() { - self.protocols = default_probe_protocols(); + StackerCommand::TriggerPipe(data) => { + handle_trigger_pipe(agent_cmd, data, pipe_runtime).await + } + StackerCommand::ConfigureFirewall(data) => { + firewall::handle_configure_firewall(agent_cmd, data, firewall_policy).await } - self } +} - fn with_command_context(mut self, agent_cmd: &AgentCommand) -> Self { - if self.deployment_hash.is_empty() { - if let Some(hash) = &agent_cmd.deployment_hash { - self.deployment_hash = hash.clone(); - } - } - if self.app_code.is_empty() { - if let Some(code) = &agent_cmd.app_code { - self.app_code = code.clone(); - } - } - self +fn extract_json_path_value(source: &Value, path: &str) -> Value { + let trimmed = path.trim(); + if !trimmed.starts_with("$.") { + return Value::Null; } - fn validate(&self) -> Result<()> { - if self.deployment_hash.is_empty() { - bail!("deployment_hash is required"); - } - if self.app_code.is_empty() { - bail!("app_code is required"); + let mut current = source; + for segment in trimmed.trim_start_matches("$.").split('.') { + if segment.is_empty() { + continue; } - let valid_protocols = ["openapi", "html_forms", "graphql", "mcp", "rest"]; - for p in &self.protocols { - if !valid_protocols.contains(&p.as_str()) { - bail!("unsupported protocol: {}. Valid: {:?}", p, valid_protocols); - } + match current { + Value::Object(map) => match map.get(segment) { + Some(value) => current = value, + None => return Value::Null, + }, + _ => return Value::Null, } - Ok(()) } -} -fn trimmed(value: &str) -> String { - value.trim().to_string() + current.clone() } -#[cfg(feature = "docker")] -fn resolve_compose_paths(deployment_hash: &str, app_code: &str) -> (String, String) { - use std::path::Path; +fn apply_pipe_field_mapping(source: &Value, field_mapping: Option<&Value>) -> Value { + let Some(Value::Object(mapping)) = field_mapping else { + return source.clone(); + }; - if let Ok(dir) = std::env::var("COMPOSE_PROJECT_DIR") { - let file = format!("{}/docker-compose.yml", dir); - return (dir, file); + if mapping.is_empty() { + return source.clone(); } - let hash_dir = format!("/home/trydirect/{}", deployment_hash); - let hash_file = format!("{}/docker-compose.yml", hash_dir); - if Path::new(&hash_file).exists() { - return (hash_dir, hash_file); + let mut mapped = serde_json::Map::new(); + for (key, rule) in mapping { + let value = match rule { + Value::String(path) if path.starts_with("$.") => extract_json_path_value(source, path), + other => other.clone(), + }; + mapped.insert(key.clone(), value); } + Value::Object(mapped) +} - let app_dir = format!("/home/trydirect/{}", app_code); - let app_file = format!("{}/docker-compose.yml", app_dir); - (app_dir, app_file) +fn build_pipe_target_url(base: &str, endpoint: &str) -> String { + let trimmed_base = base.trim_end_matches('/'); + let trimmed_endpoint = endpoint.trim(); + if trimmed_endpoint.is_empty() || trimmed_endpoint == "/" { + return format!("{}/", trimmed_base); + } + format!( + "{}/{}", + trimmed_base, + trimmed_endpoint.trim_start_matches('/') + ) } -/// Represents which compose command variant is available on the system. -#[cfg(feature = "docker")] -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum ComposeVariant { - /// Docker Compose V2 plugin: `docker compose` - Plugin, - /// Standalone docker-compose binary: `docker-compose` - Standalone, +fn pipe_source_worker_kind(trigger_type: &str) -> Option { + match trigger_type { + "poll" => Some(PipeSourceWorkerKind::Poll), + "websocket" | "ws" => Some(PipeSourceWorkerKind::Websocket), + "grpc" => Some(PipeSourceWorkerKind::Grpc), + "amqp" | "rabbitmq" => Some(PipeSourceWorkerKind::Amqp), + _ => None, + } } -/// Detect which docker compose variant is available on the system. -/// Tries `docker compose version` first (plugin), then `docker-compose version` (standalone). -/// Result is cached for the lifetime of the process. -#[cfg(feature = "docker")] -pub async fn detect_compose_variant() -> Option { - use tokio::process::Command; +fn pipe_source_poll_interval() -> Duration { + std::env::var("PIPE_POLL_INTERVAL_SECS") + .ok() + .and_then(|value| value.parse::().ok()) + .map(|secs| secs.max(1)) + .map(Duration::from_secs) + .unwrap_or_else(|| Duration::from_secs(30)) +} - static COMPOSE_VARIANT: OnceLock> = OnceLock::new(); +fn pipe_source_retry_delay() -> Duration { + Duration::from_secs(5) +} - // Return cached result if available - if let Some(variant) = COMPOSE_VARIANT.get() { - return *variant; +fn trigger_pipe_target_transport(target_mode: &str, target_value: &str) -> &'static str { + match target_mode { + "container" => "container_http", + "external" if target_value.starts_with("ws://") || target_value.starts_with("wss://") => { + "websocket" + } + "external" + if target_value.starts_with("grpc://") || target_value.starts_with("grpcs://") => + { + "grpc" + } + _ => "http", } +} - // Try docker compose (plugin) first - let plugin_result = Command::new("docker") - .arg("compose") - .arg("version") - .output() - .await; +fn build_trigger_pipe_target_response(transport: &str, status: Option, body: Value) -> Value { + json!({ + "transport": transport, + "status": status, + "delivered": status.map(|value| (200..300).contains(&value)).unwrap_or(false), + "body": body, + }) +} - if let Ok(output) = plugin_result { - if output.status.success() { - let _ = COMPOSE_VARIANT.set(Some(ComposeVariant::Plugin)); - return Some(ComposeVariant::Plugin); - } - } +fn redact_persisted_registration(registration: &PipeRegistration) -> PipeRegistration { + let mut registration = registration.clone(); + registration.source_broker_url = registration + .source_broker_url + .as_deref() + .map(redact_url_credentials); + registration.target_url = registration + .target_url + .as_deref() + .map(redact_url_credentials); + registration +} - // Try docker-compose (standalone) as fallback - let standalone_result = Command::new("docker-compose").arg("version").output().await; +fn redact_url_credentials(raw: &str) -> String { + let Some((scheme, remainder)) = raw.split_once("://") else { + return raw.to_string(); + }; + let authority_end = remainder.find('/').unwrap_or(remainder.len()); + let (authority, rest) = remainder.split_at(authority_end); + let Some((_, host)) = authority.rsplit_once('@') else { + return raw.to_string(); + }; + format!("{scheme}://***@{host}{rest}") +} - if let Ok(output) = standalone_result { - if output.status.success() { - let _ = COMPOSE_VARIANT.set(Some(ComposeVariant::Standalone)); - return Some(ComposeVariant::Standalone); - } +fn registered_pipe_key(deployment_hash: &str, pipe_instance_id: &str) -> PipeRuntimeKey { + PipeRuntimeKey { + deployment_hash: deployment_hash.to_string(), + pipe_instance_id: pipe_instance_id.to_string(), } +} - // Neither is available - let _ = COMPOSE_VARIANT.set(None); - None +fn trigger_has_inline_source(data: &TriggerPipeCommand) -> bool { + data.input_data.is_some() + || data + .source_container + .as_deref() + .filter(|value| !value.is_empty()) + .is_some() } -/// Build a compose command with the correct binary/syntax based on what's available. -/// Returns (command_program, initial_args) where initial_args should be prepended to actual args. -#[cfg(feature = "docker")] -pub fn build_compose_command(variant: ComposeVariant) -> (String, Vec) { - match variant { - ComposeVariant::Plugin => ("docker".to_string(), vec!["compose".to_string()]), - ComposeVariant::Standalone => ("docker-compose".to_string(), vec![]), +fn trigger_has_inline_target(data: &TriggerPipeCommand) -> bool { + data.target_url + .as_deref() + .filter(|value| !value.is_empty()) + .is_some() + || data + .target_container + .as_deref() + .filter(|value| !value.is_empty()) + .is_some() +} + +fn merge_trigger_with_registration( + data: &TriggerPipeCommand, + registration: Option<&PipeRegistration>, +) -> TriggerPipeCommand { + let mut merged = data.clone(); + if let Some(registration) = registration { + if merged + .source_container + .as_deref() + .filter(|value| !value.is_empty()) + .is_none() + { + merged.source_container = registration.source_container.clone(); + } + if merged.source_endpoint == default_pipe_source_endpoint() { + merged.source_endpoint = registration.source_endpoint.clone(); + } + if merged.source_method == default_pipe_source_method() { + merged.source_method = registration.source_method.clone(); + } + if merged + .target_url + .as_deref() + .filter(|value| !value.is_empty()) + .is_none() + { + merged.target_url = registration.target_url.clone(); + } + if merged + .target_container + .as_deref() + .filter(|value| !value.is_empty()) + .is_none() + { + merged.target_container = registration.target_container.clone(); + } + if merged.target_endpoint == default_pipe_target_endpoint() { + merged.target_endpoint = registration.target_endpoint.clone(); + } + if merged.target_method == default_pipe_target_method() { + merged.target_method = registration.target_method.clone(); + } + if merged.field_mapping.is_none() { + merged.field_mapping = registration.field_mapping.clone(); + } + if merged.trigger_type == default_pipe_trigger_type() { + merged.trigger_type = registration.trigger_type.clone(); + } } + merged } #[cfg(feature = "docker")] -fn base_result( - agent_cmd: &AgentCommand, - deployment_hash: &str, - app_code: &str, - command_type: &str, -) -> CommandResult { - CommandResult { - command_id: agent_cmd.command_id.clone(), - status: "success".into(), - result: None, - error: None, - completed_at: now_timestamp(), - deployment_hash: Some(deployment_hash.to_string()), - app_code: Some(app_code.to_string()), - command_type: Some(command_type.to_string()), - ..CommandResult::default() - } +fn shell_escape_single_quotes(value: &str) -> String { + value.replace('\'', r#"'\"'\"'"#) } #[cfg(feature = "docker")] -fn now_timestamp() -> String { - Utc::now().to_rfc3339_opts(SecondsFormat::Secs, true) +fn build_trigger_pipe_container_command(endpoint: &str, method: &str, payload: &Value) -> String { + let json_payload = serde_json::to_string(payload).unwrap_or_else(|_| "{}".to_string()); + let escaped_payload = shell_escape_single_quotes(&json_payload); + let normalized_method = normalize_trigger_pipe_method(method, "POST"); + let url = build_pipe_target_url("http://127.0.0.1", endpoint); + let escaped_url = shell_escape_single_quotes(&url); + format!( + "curl -sS -X {} -H 'Content-Type: application/json' --data-raw '{}' -w '\\n%{{http_code}}' '{}'", + normalized_method, escaped_payload, escaped_url + ) } #[cfg(feature = "docker")] -fn errors_value(errors: &[CommandError]) -> Value { - serde_json::to_value(errors).unwrap_or_else(|_| json!([])) +fn build_trigger_pipe_source_command(endpoint: &str, method: &str) -> String { + let normalized_method = normalize_trigger_pipe_method(method, "GET"); + let url = build_pipe_target_url("http://127.0.0.1", endpoint); + let escaped_url = shell_escape_single_quotes(&url); + format!( + "curl -sS -X {} -w '\\n%{{http_code}}' '{}'", + normalized_method, escaped_url + ) +} + +async fn send_trigger_pipe_request( + url: &str, + method: &str, + payload: &Value, +) -> Result<(u16, Value)> { + let method = reqwest::Method::from_bytes(method.as_bytes()) + .with_context(|| format!("invalid target_method '{}'", method))?; + let client = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(30)) + .build() + .context("building trigger_pipe http client")?; + + let response = client + .request(method, url) + .json(payload) + .send() + .await + .with_context(|| format!("sending trigger_pipe request to {}", url))?; + + let status = response.status().as_u16(); + let body_text = response + .text() + .await + .context("reading trigger_pipe response body")?; + let body = if body_text.trim().is_empty() { + Value::Null + } else { + serde_json::from_str(&body_text).unwrap_or(Value::String(body_text)) + }; + + Ok((status, body)) +} + +async fn fetch_external_pipe_source_request(url: &str, method: &str) -> Result<(u16, Value)> { + let method = reqwest::Method::from_bytes(method.as_bytes()) + .with_context(|| format!("invalid source_method '{}'", method))?; + let client = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(30)) + .build() + .context("building trigger_pipe source http client")?; + + let response = client + .request(method, url) + .send() + .await + .with_context(|| format!("fetching trigger_pipe source from {}", url))?; + + let status = response.status().as_u16(); + let body_text = response + .text() + .await + .context("reading trigger_pipe source response body")?; + let body = if body_text.trim().is_empty() { + Value::Null + } else { + serde_json::from_str(&body_text).unwrap_or(Value::String(body_text)) + }; + + Ok((status, body)) } #[cfg(feature = "docker")] -fn redact_message(message: &str, enabled: bool) -> (String, bool) { - if !enabled || message.is_empty() { - return (message.to_string(), false); +async fn fetch_trigger_pipe_source_request( + container: &str, + endpoint: &str, + method: &str, +) -> Result<(u16, Value)> { + let command = build_trigger_pipe_source_command(endpoint, method); + let (exit_code, stdout, stderr) = docker::exec_in_container_with_output(container, &command) + .await + .with_context(|| { + format!( + "fetching trigger_pipe source inside container {}", + container + ) + })?; + + if exit_code != 0 { + bail!( + "source container request failed with code {}: {}", + exit_code, + stderr.trim() + ); } - static REDACTION_RE: OnceLock = OnceLock::new(); - let regex = REDACTION_RE.get_or_init(|| { - Regex::new(r"(?i)(token|secret|password|key|credential)(\s*[=:]\s*)([^\s]+)").unwrap() - }); + let mut lines = stdout.lines().collect::>(); + let status_line = lines.pop().unwrap_or("000").trim(); + let status = status_line.parse::().unwrap_or(0); + let body_text = lines.join("\n"); + let body = if body_text.trim().is_empty() { + Value::Null + } else { + serde_json::from_str(&body_text).unwrap_or(Value::String(body_text)) + }; - let mut redacted = false; - let replaced = regex.replace_all(message, |caps: ®ex::Captures| { - redacted = true; - format!("{}{}***", &caps[1], &caps[2]) - }); - (replaced.into_owned(), redacted) + Ok((status, body)) +} + +#[cfg(not(feature = "docker"))] +async fn fetch_trigger_pipe_source_request( + _container: &str, + _endpoint: &str, + _method: &str, +) -> Result<(u16, Value)> { + bail!("source_container requires docker feature") } #[cfg(feature = "docker")] -fn make_error(code: &str, message: impl Into, details: Option) -> CommandError { - CommandError { - code: code.to_string(), - message: message.into(), - details, +async fn send_trigger_pipe_container_request( + container: &str, + endpoint: &str, + method: &str, + payload: &Value, +) -> Result<(u16, Value)> { + let command = build_trigger_pipe_container_command(endpoint, method, payload); + let (exit_code, stdout, stderr) = docker::exec_in_container_with_output(container, &command) + .await + .with_context(|| { + format!( + "sending trigger_pipe request inside container {}", + container + ) + })?; + + if exit_code != 0 { + bail!( + "target container request failed with code {}: {}", + exit_code, + stderr.trim() + ); } + + let mut lines = stdout.lines().collect::>(); + let status_line = lines.pop().unwrap_or("000").trim(); + let status = status_line.parse::().unwrap_or(0); + let body_text = lines.join("\n"); + let body = if body_text.trim().is_empty() { + Value::Null + } else { + serde_json::from_str(&body_text).unwrap_or(Value::String(body_text)) + }; + + Ok((status, body)) +} + +#[cfg(not(feature = "docker"))] +async fn send_trigger_pipe_container_request( + _container: &str, + _endpoint: &str, + _method: &str, + _payload: &Value, +) -> Result<(u16, Value)> { + bail!("target_container requires docker feature") } -#[cfg(feature = "docker")] -fn env_flag_enabled(name: &str, default: bool) -> bool { - match std::env::var(name) { - Ok(value) => match value.trim().to_ascii_lowercase().as_str() { - "0" | "false" | "no" | "off" => false, - "1" | "true" | "yes" | "on" => true, - _ => default, - }, - Err(_) => default, +async fn run_poll_source_worker( + runtime: PipeRuntime, + key: PipeRuntimeKey, + registration: PipeRegistration, +) { + let interval = pipe_source_poll_interval(); + info!( + deployment_hash = %key.deployment_hash, + pipe_instance_id = %key.pipe_instance_id, + interval_secs = interval.as_secs(), + "pipe poll source worker started" + ); + + loop { + let fetched = match registration.source_container.as_deref() { + Some(container) if !container.is_empty() => { + fetch_trigger_pipe_source_request( + container, + ®istration.source_endpoint, + ®istration.source_method, + ) + .await + } + _ => { + fetch_external_pipe_source_request( + ®istration.source_endpoint, + ®istration.source_method, + ) + .await + } + }; + + match fetched { + Ok((status_code, payload)) if (200..300).contains(&status_code) => { + if let Err(error) = runtime + .trigger_registered_payload( + &key.deployment_hash, + &key.pipe_instance_id, + payload, + "poll", + ) + .await + { + warn!( + deployment_hash = %key.deployment_hash, + pipe_instance_id = %key.pipe_instance_id, + error = %error, + "poll source trigger failed" + ); + } + } + Ok((status_code, _)) => { + runtime + .mark_failed( + &key.deployment_hash, + &key.pipe_instance_id, + now_timestamp(), + format!("poll source failed with status {}", status_code), + ) + .await; + } + Err(error) => { + runtime + .mark_failed( + &key.deployment_hash, + &key.pipe_instance_id, + now_timestamp(), + format!("poll source error: {}", error), + ) + .await; + } + } + + tokio::time::sleep(interval).await; } } -#[cfg(feature = "docker")] -fn proxy_owner_enabled() -> bool { - env_flag_enabled("STATUS_PANEL_PROXY_OWNER", true) -} - -#[cfg(feature = "docker")] -fn npm_env_fallback_enabled() -> bool { - env_flag_enabled("NPM_ALLOW_ENV_FALLBACK", false) -} +async fn run_websocket_source_worker( + runtime: PipeRuntime, + key: PipeRuntimeKey, + registration: PipeRegistration, +) { + info!( + deployment_hash = %key.deployment_hash, + pipe_instance_id = %key.pipe_instance_id, + source = %registration.source_endpoint, + "pipe websocket source worker started" + ); -#[cfg(feature = "docker")] -fn resolve_server_id( -) -> std::result::Result { - std::env::var("STACKER_SERVER_ID") - .ok() - .filter(|value| !value.trim().is_empty()) - .ok_or(crate::security::vault_client::NpmCredentialError::MissingServerId) + loop { + match crate::transport::websocket::ws_fetch_source(®istration.source_endpoint).await { + Ok(payload) => { + if let Err(error) = runtime + .trigger_registered_payload( + &key.deployment_hash, + &key.pipe_instance_id, + payload, + "websocket", + ) + .await + { + warn!( + deployment_hash = %key.deployment_hash, + pipe_instance_id = %key.pipe_instance_id, + error = %error, + "websocket source trigger failed" + ); + } + } + Err(error) => { + runtime + .mark_failed( + &key.deployment_hash, + &key.pipe_instance_id, + now_timestamp(), + format!("websocket source error: {}", error), + ) + .await; + debug!( + deployment_hash = %key.deployment_hash, + pipe_instance_id = %key.pipe_instance_id, + error = %error, + "websocket source worker will retry" + ); + tokio::time::sleep(pipe_source_retry_delay()).await; + } + } + } } -#[cfg(feature = "docker")] -fn configure_proxy_error( - error: &crate::security::vault_client::NpmCredentialError, -) -> CommandError { - let details = match error { - crate::security::vault_client::NpmCredentialError::MissingSecret { path } - | crate::security::vault_client::NpmCredentialError::InvalidPayload { path, .. } - | crate::security::vault_client::NpmCredentialError::UnknownAuthMode { path, .. } => { - Some(path.clone()) - } - crate::security::vault_client::NpmCredentialError::ExistingHostConflict { domain } => { - Some(domain.clone()) - } - _ => None, +async fn run_grpc_source_worker( + runtime: PipeRuntime, + key: PipeRuntimeKey, + registration: PipeRegistration, +) { + let grpc_endpoint = if registration.source_endpoint.starts_with("grpcs://") { + registration + .source_endpoint + .replacen("grpcs://", "https://", 1) + } else { + registration + .source_endpoint + .replacen("grpc://", "http://", 1) }; - make_error(error.code(), error.operator_message(), details) + info!( + deployment_hash = %key.deployment_hash, + pipe_instance_id = %key.pipe_instance_id, + endpoint = %grpc_endpoint, + "pipe gRPC source worker started" + ); + + loop { + match crate::transport::grpc_client::grpc_fetch_source( + &grpc_endpoint, + &key.pipe_instance_id, + &key.pipe_instance_id, + ) + .await + { + Ok(payload) => { + if let Err(error) = runtime + .trigger_registered_payload( + &key.deployment_hash, + &key.pipe_instance_id, + payload, + "grpc", + ) + .await + { + warn!( + deployment_hash = %key.deployment_hash, + pipe_instance_id = %key.pipe_instance_id, + error = %error, + "gRPC source trigger failed" + ); + } + } + Err(error) => { + runtime + .mark_failed( + &key.deployment_hash, + &key.pipe_instance_id, + now_timestamp(), + format!("gRPC source error: {}", error), + ) + .await; + debug!( + deployment_hash = %key.deployment_hash, + pipe_instance_id = %key.pipe_instance_id, + error = %error, + "gRPC source worker will retry" + ); + tokio::time::sleep(pipe_source_retry_delay()).await; + } + } + } } -#[cfg(feature = "docker")] -async fn resolve_npm_config( - data: &ConfigureProxyCommand, -) -> std::result::Result< - crate::connectors::npm::NpmConfig, - crate::security::vault_client::NpmCredentialError, -> { - use crate::connectors::npm::NpmConfig; - use crate::security::vault_client::{NpmCredentialError, VaultClient}; +async fn run_amqp_source_worker( + runtime: PipeRuntime, + key: PipeRuntimeKey, + registration: PipeRegistration, +) { + let broker_url = registration.source_broker_url.clone().unwrap_or_default(); + let queue = registration.source_queue.clone().unwrap_or_default(); + let exchange = registration.source_exchange.clone().unwrap_or_default(); + let routing_key = registration.source_routing_key.clone().unwrap_or_default(); + + info!( + deployment_hash = %key.deployment_hash, + pipe_instance_id = %key.pipe_instance_id, + queue = %queue, + "pipe AMQP source worker started" + ); - match (&data.npm_host, &data.npm_email, &data.npm_password) { - (Some(host), Some(email), Some(password)) => { - return Ok(NpmConfig::new( - host.clone(), - email.clone(), - password.clone(), - )); + loop { + match Connection::connect(&broker_url, ConnectionProperties::default()).await { + Ok(connection) => match connection.create_channel().await { + Ok(channel) => { + if !exchange.is_empty() { + if let Err(error) = channel + .queue_bind( + &queue, + &exchange, + &routing_key, + QueueBindOptions::default(), + FieldTable::default(), + ) + .await + { + runtime + .mark_failed( + &key.deployment_hash, + &key.pipe_instance_id, + now_timestamp(), + format!("AMQP queue bind failed: {}", error), + ) + .await; + tokio::time::sleep(pipe_source_retry_delay()).await; + continue; + } + } + + match channel + .basic_consume( + &queue, + &format!("status-panel-pipe-{}", key.pipe_instance_id), + BasicConsumeOptions::default(), + FieldTable::default(), + ) + .await + { + Ok(mut consumer) => { + while let Some(delivery) = consumer.next().await { + match delivery { + Ok(delivery) => { + let payload = + serde_json::from_slice::(&delivery.data) + .unwrap_or_else(|_| { + Value::String( + String::from_utf8_lossy(&delivery.data) + .to_string(), + ) + }); + if let Err(error) = runtime + .trigger_registered_payload( + &key.deployment_hash, + &key.pipe_instance_id, + payload, + "rabbitmq", + ) + .await + { + warn!( + deployment_hash = %key.deployment_hash, + pipe_instance_id = %key.pipe_instance_id, + error = %error, + "AMQP source trigger failed" + ); + } + if let Err(error) = + delivery.ack(BasicAckOptions::default()).await + { + runtime + .mark_failed( + &key.deployment_hash, + &key.pipe_instance_id, + now_timestamp(), + format!("AMQP ack failed: {}", error), + ) + .await; + break; + } + } + Err(error) => { + runtime + .mark_failed( + &key.deployment_hash, + &key.pipe_instance_id, + now_timestamp(), + format!("AMQP consume failed: {}", error), + ) + .await; + break; + } + } + } + } + Err(error) => { + runtime + .mark_failed( + &key.deployment_hash, + &key.pipe_instance_id, + now_timestamp(), + format!("AMQP consumer setup failed: {}", error), + ) + .await; + } + } + } + Err(error) => { + runtime + .mark_failed( + &key.deployment_hash, + &key.pipe_instance_id, + now_timestamp(), + format!("AMQP channel creation failed: {}", error), + ) + .await; + } + }, + Err(error) => { + runtime + .mark_failed( + &key.deployment_hash, + &key.pipe_instance_id, + now_timestamp(), + format!("AMQP connection failed: {}", error), + ) + .await; + } } - (None, None, None) => {} - _ => return Err(NpmCredentialError::InvalidOverride), - } - if let Some(vault_client) = VaultClient::from_env().map_err(|error| { - tracing::warn!( - error = %error, - "Failed to initialize Vault client for configure_proxy" - ); - NpmCredentialError::VaultUnavailable - })? { - let server_id = resolve_server_id()?; - let credentials = vault_client.fetch_npm_credentials(&server_id).await?; - return Ok(NpmConfig::from_credentials(&credentials)); + tokio::time::sleep(pipe_source_retry_delay()).await; } +} - if npm_env_fallback_enabled() { - return NpmConfig::from_env().ok_or_else(|| NpmCredentialError::MissingSecret { - path: "NPM_HOST/NPM_EMAIL/NPM_PASSWORD".to_string(), - }); - } +async fn handle_activate_pipe( + agent_cmd: &AgentCommand, + data: &ActivatePipeCommand, + pipe_runtime: &PipeRuntime, +) -> Result { + let mut result = base_result(agent_cmd, &data.deployment_hash, "", "activate_pipe"); + let activated_at = now_timestamp(); + let mut registration = PipeRegistration::from(data.clone()); + registration.lifecycle = PipeLifecycleSnapshot::active(activated_at); + + let activation = pipe_runtime + .activate( + registered_pipe_key(&data.deployment_hash, &data.pipe_instance_id), + registration, + ) + .await; - Err(NpmCredentialError::MissingVaultConfiguration) -} + result.result = Some(json!({ + "type": "activate_pipe", + "deployment_hash": data.deployment_hash, + "pipe_instance_id": data.pipe_instance_id, + "active": true, + "replaced": activation.replaced, + "reactivated": activation.previous_lifecycle.is_some(), + "trigger_type": data.trigger_type, + "lifecycle": activation.registration.lifecycle, + })); -#[cfg(feature = "docker")] -fn extract_proxy_domains(existing_host: &Value) -> Vec { - let mut domains = existing_host["domain_names"] - .as_array() - .map(|items| { - items - .iter() - .filter_map(|item| item.as_str().map(ToString::to_string)) - .collect::>() - }) - .unwrap_or_default(); - domains.sort(); - domains + pipe_runtime + .spawn_source_worker_if_needed( + &data.deployment_hash, + &data.pipe_instance_id, + activation.registration, + ) + .await; + + Ok(result) } -#[cfg(feature = "docker")] -fn proxy_host_matches( - existing_host: &Value, - request: &crate::connectors::npm::ProxyHostRequest, -) -> bool { - let mut requested_domains = request.domain_names.clone(); - requested_domains.sort(); +async fn handle_deactivate_pipe( + agent_cmd: &AgentCommand, + data: &DeactivatePipeCommand, + pipe_runtime: &PipeRuntime, +) -> Result { + let mut result = base_result(agent_cmd, &data.deployment_hash, "", "deactivate_pipe"); + let deactivated_at = now_timestamp(); + let deactivation = pipe_runtime + .deactivate( + &data.deployment_hash, + &data.pipe_instance_id, + deactivated_at, + ) + .await; + pipe_runtime + .stop_worker(&data.deployment_hash, &data.pipe_instance_id) + .await; - let existing_ssl_enabled = existing_host - .get("certificate_id") - .map(|value| !value.is_null() && value.as_i64().unwrap_or_default() != 0) - .unwrap_or(false); + result.result = Some(json!({ + "type": "deactivate_pipe", + "deployment_hash": data.deployment_hash, + "pipe_instance_id": data.pipe_instance_id, + "active": false, + "removed": deactivation.removed, + "lifecycle": deactivation.lifecycle, + })); - extract_proxy_domains(existing_host) == requested_domains - && existing_host["forward_host"].as_str().unwrap_or_default() == request.forward_host - && existing_host["forward_port"].as_u64().unwrap_or_default() as u16 == request.forward_port - && existing_host["ssl_forced"].as_bool().unwrap_or(false) == request.ssl_forced - && existing_host["http2_support"].as_bool().unwrap_or(false) == request.http2_support - && existing_ssl_enabled == request.ssl_enabled + Ok(result) } -#[cfg(feature = "docker")] -async fn execute_with_docker( +async fn handle_trigger_pipe( agent_cmd: &AgentCommand, - command: &StackerCommand, - firewall_policy: &firewall::FirewallPolicy, + data: &TriggerPipeCommand, + pipe_runtime: &PipeRuntime, ) -> Result { - match command { - StackerCommand::Health(data) => handle_health(agent_cmd, data).await, - StackerCommand::Logs(data) => handle_logs(agent_cmd, data).await, - StackerCommand::Restart(data) => handle_restart(agent_cmd, data).await, - StackerCommand::Stop(data) => handle_stop(agent_cmd, data).await, - StackerCommand::Start(data) => handle_start(agent_cmd, data).await, - StackerCommand::ErrorSummary(data) => handle_error_summary(agent_cmd, data).await, - StackerCommand::FetchConfig(data) => handle_fetch_config(agent_cmd, data).await, - StackerCommand::ApplyConfig(data) => handle_apply_config(agent_cmd, data).await, - StackerCommand::DeployApp(data) => handle_deploy_app(agent_cmd, data).await, - StackerCommand::RemoveApp(data) => handle_remove_app(agent_cmd, data).await, - StackerCommand::FetchAllConfigs(data) => handle_fetch_all_configs(agent_cmd, data).await, - StackerCommand::DeployWithConfigs(data) => { - handle_deploy_with_configs(agent_cmd, data).await + let mut result = base_result(agent_cmd, &data.deployment_hash, "", "trigger_pipe"); + let registration = pipe_runtime + .resolve(&data.deployment_hash, &data.pipe_instance_id) + .await; + if registration.is_none() + && !trigger_has_inline_source(data) + && !trigger_has_inline_target(data) + { + let error = format!( + "pipe_instance_id {} is not active on this agent", + data.pipe_instance_id + ); + pipe_runtime + .mark_failed( + &data.deployment_hash, + &data.pipe_instance_id, + now_timestamp(), + error.clone(), + ) + .await; + result.status = "failed".into(); + result.result = Some(json!({ + "type": "trigger_pipe", + "deployment_hash": data.deployment_hash, + "pipe_instance_id": data.pipe_instance_id, + "success": false, + "source_data": Value::Null, + "mapped_data": Value::Null, + "target_response": Value::Null, + "error": error, + "triggered_at": now_timestamp(), + "trigger_type": data.trigger_type, + })); + result.error = Some(error); + return Ok(result); + } + + let resolved = merge_trigger_with_registration(data, registration.as_ref()); + let source_data = match resolved.input_data.clone() { + Some(value) => value, + None => match resolved + .source_container + .as_deref() + .filter(|value| !value.is_empty()) + { + Some(container) => match fetch_trigger_pipe_source_request( + container, + &resolved.source_endpoint, + &resolved.source_method, + ) + .await + { + Ok((status_code, response_body)) if (200..300).contains(&status_code) => { + response_body + } + Ok((status_code, response_body)) => { + let error = format!("source fetch failed with status {}", status_code); + pipe_runtime + .mark_failed( + &data.deployment_hash, + &data.pipe_instance_id, + now_timestamp(), + error.clone(), + ) + .await; + result.status = "failed".into(); + result.result = Some(json!({ + "type": "trigger_pipe", + "deployment_hash": data.deployment_hash, + "pipe_instance_id": data.pipe_instance_id, + "success": false, + "source_data": response_body, + "mapped_data": Value::Null, + "target_response": Value::Null, + "error": error, + "triggered_at": now_timestamp(), + "trigger_type": resolved.trigger_type, + "lifecycle": pipe_runtime.snapshot(&data.deployment_hash, &data.pipe_instance_id).await, + })); + result.error = Some(error); + return Ok(result); + } + Err(err) => { + let error = format!("failed to fetch trigger_pipe source: {}", err); + pipe_runtime + .mark_failed( + &data.deployment_hash, + &data.pipe_instance_id, + now_timestamp(), + error.clone(), + ) + .await; + result.status = "failed".into(); + result.result = Some(json!({ + "type": "trigger_pipe", + "deployment_hash": data.deployment_hash, + "pipe_instance_id": data.pipe_instance_id, + "success": false, + "source_data": Value::Null, + "mapped_data": Value::Null, + "target_response": Value::Null, + "error": error, + "triggered_at": now_timestamp(), + "trigger_type": resolved.trigger_type, + "lifecycle": pipe_runtime.snapshot(&data.deployment_hash, &data.pipe_instance_id).await, + })); + result.error = Some(error); + return Ok(result); + } + }, + None => { + let error = "trigger_pipe requires input_data or source_container"; + pipe_runtime + .mark_failed( + &data.deployment_hash, + &data.pipe_instance_id, + now_timestamp(), + error.to_string(), + ) + .await; + result.status = "failed".into(); + result.result = Some(json!({ + "type": "trigger_pipe", + "deployment_hash": data.deployment_hash, + "pipe_instance_id": data.pipe_instance_id, + "success": false, + "source_data": Value::Null, + "mapped_data": Value::Null, + "target_response": Value::Null, + "error": error, + "triggered_at": now_timestamp(), + "trigger_type": resolved.trigger_type, + "lifecycle": pipe_runtime.snapshot(&data.deployment_hash, &data.pipe_instance_id).await, + })); + result.error = Some(error.into()); + return Ok(result); + } + }, + }; + + let mapped_data = apply_pipe_field_mapping(&source_data, resolved.field_mapping.as_ref()); + let target = match ( + resolved + .target_url + .as_deref() + .filter(|value| !value.is_empty()), + resolved + .target_container + .as_deref() + .filter(|value| !value.is_empty()), + ) { + (Some(value), _) => Ok(( + "external", + build_pipe_target_url(value, &resolved.target_endpoint), + )), + (None, Some(value)) => Ok(("container", value.to_string())), + (None, None) => { + let error = "trigger_pipe requires target_url or target_container"; + pipe_runtime + .mark_failed( + &data.deployment_hash, + &data.pipe_instance_id, + now_timestamp(), + error.to_string(), + ) + .await; + result.status = "failed".into(); + result.result = Some(json!({ + "type": "trigger_pipe", + "deployment_hash": data.deployment_hash, + "pipe_instance_id": data.pipe_instance_id, + "success": false, + "source_data": source_data, + "mapped_data": mapped_data, + "target_response": Value::Null, + "error": error, + "triggered_at": now_timestamp(), + "trigger_type": resolved.trigger_type, + "lifecycle": pipe_runtime.snapshot(&data.deployment_hash, &data.pipe_instance_id).await, + })); + result.error = Some(error.into()); + Err(()) + } + }; + if target.is_err() { + return Ok(result); + } + let (target_mode, target_value) = target.unwrap(); + let target_transport = trigger_pipe_target_transport(target_mode, &target_value); + + let send_result = match target_mode { + "external" => { + if target_value.starts_with("ws://") || target_value.starts_with("wss://") { + crate::transport::websocket::ws_send_target(&target_value, &mapped_data) + .await + .map_err(|e| anyhow::anyhow!(e)) + } else if target_value.starts_with("grpc://") || target_value.starts_with("grpcs://") { + let grpc_endpoint = if target_value.starts_with("grpcs://") { + target_value.replacen("grpcs://", "https://", 1) + } else { + target_value.replacen("grpc://", "http://", 1) + }; + let step_id = data.pipe_instance_id.trim(); + if step_id.is_empty() { + Err(anyhow::anyhow!( + "trigger_pipe gRPC target requires a non-empty pipe_instance_id for step_id" + )) + } else { + crate::transport::grpc_client::grpc_send_target( + &grpc_endpoint, + &data.pipe_instance_id, + step_id, + &mapped_data, + ) + .await + .map_err(|e| anyhow::anyhow!(e)) + } + } else { + send_trigger_pipe_request(&target_value, &resolved.target_method, &mapped_data) + .await + } + } + "container" => { + send_trigger_pipe_container_request( + &target_value, + &resolved.target_endpoint, + &resolved.target_method, + &mapped_data, + ) + .await } - StackerCommand::ConfigDiff(data) => handle_config_diff(agent_cmd, data).await, - StackerCommand::ConfigureProxy(data) => handle_configure_proxy(agent_cmd, data).await, - StackerCommand::Exec(data) => handle_exec(agent_cmd, data).await, - StackerCommand::ServerResources(data) => handle_server_resources(agent_cmd, data).await, - StackerCommand::ListContainers(data) => handle_list_containers(agent_cmd, data).await, - StackerCommand::ProbeEndpoints(data) => handle_probe_endpoints(agent_cmd, data).await, - StackerCommand::ConfigureFirewall(data) => { - firewall::handle_configure_firewall(agent_cmd, data, firewall_policy).await + _ => unreachable!(), + }; + + match send_result { + Ok((status_code, response_body)) if (200..300).contains(&status_code) => { + let triggered_at = now_timestamp(); + pipe_runtime + .mark_triggered( + &data.deployment_hash, + &data.pipe_instance_id, + triggered_at.clone(), + ) + .await; + result.status = "success".into(); + result.result = Some(json!({ + "type": "trigger_pipe", + "deployment_hash": data.deployment_hash, + "pipe_instance_id": data.pipe_instance_id, + "success": true, + "source_data": source_data, + "mapped_data": mapped_data, + "target_response": build_trigger_pipe_target_response( + target_transport, + Some(status_code), + response_body, + ), + "triggered_at": triggered_at, + "trigger_type": resolved.trigger_type, + "lifecycle": pipe_runtime.snapshot(&data.deployment_hash, &data.pipe_instance_id).await, + })); + } + Ok((status_code, response_body)) => { + let error = format!("target request failed with status {}", status_code); + pipe_runtime + .mark_failed( + &data.deployment_hash, + &data.pipe_instance_id, + now_timestamp(), + error.clone(), + ) + .await; + result.status = "failed".into(); + result.result = Some(json!({ + "type": "trigger_pipe", + "deployment_hash": data.deployment_hash, + "pipe_instance_id": data.pipe_instance_id, + "success": false, + "source_data": source_data, + "mapped_data": mapped_data, + "target_response": build_trigger_pipe_target_response( + target_transport, + Some(status_code), + response_body, + ), + "error": error, + "triggered_at": now_timestamp(), + "trigger_type": resolved.trigger_type, + "lifecycle": pipe_runtime.snapshot(&data.deployment_hash, &data.pipe_instance_id).await, + })); + result.error = Some(error); + } + Err(err) => { + let error = err.to_string(); + pipe_runtime + .mark_failed( + &data.deployment_hash, + &data.pipe_instance_id, + now_timestamp(), + error.clone(), + ) + .await; + result.status = "failed".into(); + result.result = Some(json!({ + "type": "trigger_pipe", + "deployment_hash": data.deployment_hash, + "pipe_instance_id": data.pipe_instance_id, + "success": false, + "source_data": source_data, + "mapped_data": mapped_data, + "target_response": build_trigger_pipe_target_response( + target_transport, + None, + Value::Null, + ), + "error": error, + "triggered_at": now_timestamp(), + "trigger_type": resolved.trigger_type, + "lifecycle": pipe_runtime.snapshot(&data.deployment_hash, &data.pipe_instance_id).await, + })); + result.error = Some(error); } } + + Ok(result) } #[cfg(feature = "docker")] @@ -4649,7 +7156,7 @@ async fn get_container_ports(container_name: &str) -> Result> { } #[cfg(any(feature = "docker", test))] -fn extract_openapi_operations(spec: &Value) -> Vec { +fn extract_openapi_operations(spec: &Value, capture_samples: bool) -> Vec { let mut operations = Vec::new(); if let Some(paths) = spec.get("paths").and_then(|p| p.as_object()) { @@ -4673,12 +7180,21 @@ fn extract_openapi_operations(spec: &Value) -> Vec { // Extract field names from request body schema let fields = extract_request_fields(spec, details); - operations.push(json!({ + let mut op = json!({ "path": path, "method": method_upper, "summary": summary, "fields": fields, - })); + }); + + // Extract sample response from OpenAPI spec examples + if capture_samples { + if let Some(sample) = extract_response_example(spec, details) { + op["sample_response"] = sample; + } + } + + operations.push(op); } } } @@ -4687,6 +7203,67 @@ fn extract_openapi_operations(spec: &Value) -> Vec { operations } +/// Extract a sample response from an OpenAPI operation's response schema. +/// Looks for: responses -> 200 -> content -> application/json -> example/schema/examples +#[cfg(any(feature = "docker", test))] +fn extract_response_example(spec: &Value, operation: &Value) -> Option { + let responses = operation.get("responses")?; + + // Try 200, 201, then default + let response = responses + .get("200") + .or_else(|| responses.get("201")) + .or_else(|| responses.get("default"))?; + + // OpenAPI 3.x: content -> application/json -> example or schema -> example + if let Some(content) = response.get("content") { + if let Some(json_content) = content.get("application/json") { + // Direct example on the media type + if let Some(example) = json_content.get("example") { + return Some(example.clone()); + } + // Examples (named) — take the first one + if let Some(examples) = json_content.get("examples").and_then(|e| e.as_object()) { + if let Some((_, first)) = examples.iter().next() { + if let Some(value) = first.get("value") { + return Some(value.clone()); + } + } + } + // Schema example + if let Some(schema) = json_content.get("schema") { + if let Some(example) = schema.get("example") { + return Some(example.clone()); + } + // Resolve $ref if present + if let Some(ref_path) = schema.get("$ref").and_then(|r| r.as_str()) { + if let Some(resolved) = resolve_ref(spec, ref_path) { + if let Some(example) = resolved.get("example") { + return Some(example.clone()); + } + } + } + } + } + } + + // Swagger 2.x: examples -> application/json + if let Some(examples) = response.get("examples") { + if let Some(json_example) = examples.get("application/json") { + return Some(json_example.clone()); + } + } + + // Swagger 2.x: schema -> example + if let Some(schema) = response.get("schema") { + if let Some(example) = schema.get("example") { + return Some(example.clone()); + } + } + + None +} + #[cfg(any(feature = "docker", test))] fn extract_request_fields(spec: &Value, operation: &Value) -> Vec { let mut fields = Vec::new(); @@ -4882,7 +7459,8 @@ async fn handle_probe_endpoints( if !protocols_detected.contains(&"openapi".to_string()) { protocols_detected.push("openapi".to_string()); } - let operations = extract_openapi_operations(&spec); + let operations = + extract_openapi_operations(&spec, data.capture_samples); endpoints.push(json!({ "protocol": "openapi", "base_url": format!("http://{}:{}", data.app_code, port), @@ -4949,12 +7527,48 @@ async fn handle_probe_endpoints( if !protocols_detected.contains(&"rest".to_string()) { protocols_detected.push("rest".to_string()); } - endpoints.push(json!({ + + // Capture sample response body for REST endpoints + let mut sample_response = None; + if data.capture_samples && code == "200" { + let escaped_url = shell_escape_single_quotes(&format!( + "http://localhost:{}{}", + port, path + )); + let body_cmd = format!( + "curl -sf -m {} '{}' 2>/dev/null || true", + data.probe_timeout, escaped_url + ); + if let Ok(Ok((0, body, _))) = tokio::time::timeout( + std::time::Duration::from_secs((data.probe_timeout + 2) as u64), + docker::exec_in_container_with_output(&target_name, &body_cmd), + ) + .await + { + let body = body.trim(); + if !body.is_empty() { + // Try to parse as JSON; fall back to string + sample_response = Some( + serde_json::from_str::(body) + .unwrap_or_else(|_| json!(body)), + ); + } + } + } + + let mut ep = json!({ "protocol": "rest", "base_url": format!("http://{}:{}", data.app_code, port), "spec_url": path, "operations": [], - })); + }); + + // Attach sample_response at endpoint level for REST heuristic + if let Some(sample) = sample_response { + ep["sample_response"] = sample; + } + + endpoints.push(ep); } } _ => continue, @@ -4963,43 +7577,480 @@ async fn handle_probe_endpoints( } } - result.result = Some(json!({ - "type": "probe_endpoints", - "deployment_hash": data.deployment_hash, - "app_code": data.app_code, - "protocols_detected": protocols_detected, - "endpoints": endpoints, - "forms": forms, - "probed_at": now_timestamp(), - })); + result.result = Some(json!({ + "type": "probe_endpoints", + "deployment_hash": data.deployment_hash, + "app_code": data.app_code, + "protocols_detected": protocols_detected, + "endpoints": endpoints, + "forms": forms, + "probed_at": now_timestamp(), + })); + + Ok(result) +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + #[cfg(unix)] + use std::os::unix::fs::PermissionsExt; + use tempfile::tempdir; + + fn fixture_path(path: &str) -> PathBuf { + let relative_path = match path { + "activate_pipe.webhook.command.json" => { + "../shared-fixtures/pipe-contract/activate_pipe.webhook.command.json" + } + "activate_pipe.rabbitmq.command.json" => { + "../shared-fixtures/pipe-contract/activate_pipe.rabbitmq.command.json" + } + "deactivate_pipe.command.json" => { + "../shared-fixtures/pipe-contract/deactivate_pipe.command.json" + } + "trigger_pipe.manual.command.json" => { + "../shared-fixtures/pipe-contract/trigger_pipe.manual.command.json" + } + "trigger_pipe.replay.command.json" => { + "../shared-fixtures/pipe-contract/trigger_pipe.replay.command.json" + } + other => panic!("unknown fixture: {}", other), + }; + PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(relative_path) + } + + fn shared_fixtures_available() -> bool { + fixture_path("activate_pipe.webhook.command.json").exists() + } + + fn fixture(path: &str) -> Value { + let fixture_path = fixture_path(path); + let body = std::fs::read_to_string(&fixture_path).unwrap_or_else(|error| { + panic!( + "failed to read fixture {} at {}: {}", + path, + fixture_path.display(), + error + ) + }); + + serde_json::from_str(&body).expect("fixture should be valid json") + } + + struct EnvGuard { + vars: Vec<(String, Option)>, + } + + impl EnvGuard { + fn new(keys: &[&str]) -> Self { + let vars = keys + .iter() + .map(|k| (k.to_string(), std::env::var(k).ok())) + .collect(); + Self { vars } + } + } + + impl Drop for EnvGuard { + fn drop(&mut self) { + for (key, original) in &self.vars { + match original { + Some(value) => std::env::set_var(key, value), + None => std::env::remove_var(key), + } + } + } + } + + macro_rules! stacker_test { + ($name:ident, $cmd_name:expr, $payload:expr, $variant:path) => { + #[test] + fn $name() { + let cmd = AgentCommand { + id: "cmd-test".into(), + command_id: "cmd-test".into(), + name: $cmd_name.into(), + params: $payload, + deployment_hash: Some("testhash".into()), + app_code: Some("testapp".into()), + }; + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some($variant(_)) => {} + _ => panic!("Did not parse {} command correctly", $cmd_name), + } + } + }; + } + + #[test] + fn parses_activate_pipe_shared_webhook_fixture() { + if !shared_fixtures_available() { + eprintln!("skipping shared fixture test: shared fixtures are unavailable"); + return; + } + let cmd = AgentCommand { + id: "cmd-activate-fixture".into(), + command_id: "cmd-activate-fixture".into(), + name: "activate_pipe".into(), + params: json!({ "params": fixture("activate_pipe.webhook.command.json") }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::ActivatePipe(data)) => { + assert_eq!(data.deployment_hash, "dep-123"); + assert_eq!(data.source_container.as_deref(), Some("source-app")); + assert_eq!(data.trigger_type, "webhook"); + } + other => panic!("Expected ActivatePipe command, got {:?}", other), + } + } + + #[test] + fn parses_activate_pipe_shared_rabbitmq_fixture() { + if !shared_fixtures_available() { + eprintln!("skipping shared fixture test: shared fixtures are unavailable"); + return; + } + let cmd = AgentCommand { + id: "cmd-activate-rabbit-fixture".into(), + command_id: "cmd-activate-rabbit-fixture".into(), + name: "activate_pipe".into(), + params: json!({ "params": fixture("activate_pipe.rabbitmq.command.json") }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::ActivatePipe(data)) => { + assert_eq!(data.deployment_hash, "dep-123"); + assert_eq!(data.trigger_type, "rabbitmq"); + assert_eq!(data.source_queue.as_deref(), Some("events.queue")); + } + other => panic!("Expected ActivatePipe command, got {:?}", other), + } + } + + #[test] + fn parses_deactivate_pipe_shared_fixture() { + if !shared_fixtures_available() { + eprintln!("skipping shared fixture test: shared fixtures are unavailable"); + return; + } + let cmd = AgentCommand { + id: "cmd-deactivate-fixture".into(), + command_id: "cmd-deactivate-fixture".into(), + name: "deactivate_pipe".into(), + params: json!({ "params": fixture("deactivate_pipe.command.json") }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::DeactivatePipe(data)) => { + assert_eq!( + data.pipe_instance_id, + "11111111-1111-1111-1111-111111111111" + ); + } + other => panic!("Expected DeactivatePipe command, got {:?}", other), + } + } + + #[test] + fn parses_trigger_pipe_shared_manual_fixture() { + if !shared_fixtures_available() { + eprintln!("skipping shared fixture test: shared fixtures are unavailable"); + return; + } + let cmd = AgentCommand { + id: "cmd-trigger-fixture".into(), + command_id: "cmd-trigger-fixture".into(), + name: "trigger_pipe".into(), + params: json!({ "params": fixture("trigger_pipe.manual.command.json") }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::TriggerPipe(data)) => { + assert_eq!(data.trigger_type, "manual"); + assert_eq!(data.target_url.as_deref(), Some("https://example.com")); + } + other => panic!("Expected TriggerPipe command, got {:?}", other), + } + } + + #[test] + fn parses_trigger_pipe_shared_replay_fixture() { + if !shared_fixtures_available() { + eprintln!("skipping shared fixture test: shared fixtures are unavailable"); + return; + } + let cmd = AgentCommand { + id: "cmd-trigger-replay-fixture".into(), + command_id: "cmd-trigger-replay-fixture".into(), + name: "trigger_pipe".into(), + params: json!({ "params": fixture("trigger_pipe.replay.command.json") }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::TriggerPipe(data)) => { + assert_eq!(data.trigger_type, "replay"); + assert_eq!(data.input_data, Some(json!({ "invoice_id": "inv-replay" }))); + } + other => panic!("Expected TriggerPipe command, got {:?}", other), + } + } + + #[tokio::test] + async fn pipe_runtime_persists_and_restores_active_registration() { + let dir = tempdir().unwrap(); + let state_path = dir.path().join("pipe-runtime.json"); + + let runtime = PipeRuntime::new(); + runtime + .configure_persistence(Some(state_path.clone())) + .await; + + let mut registration = PipeRegistration::from(ActivatePipeCommand { + deployment_hash: "dep-restore".into(), + pipe_instance_id: "pipe-restore-1".into(), + source_container: Some("source-app".into()), + source_endpoint: "/source".into(), + source_method: "GET".into(), + source_broker_url: None, + source_queue: None, + source_exchange: None, + source_routing_key: None, + target_url: Some("https://example.com".into()), + target_container: None, + target_endpoint: "/runtime/pipe".into(), + target_method: "POST".into(), + field_mapping: Some(json!({ "email": "$.user.email" })), + trigger_type: "webhook".into(), + }); + registration.lifecycle = PipeLifecycleSnapshot::active("2026-01-01T00:00:00Z".into()); + + runtime + .activate( + PipeRuntimeKey { + deployment_hash: "dep-restore".into(), + pipe_instance_id: "pipe-restore-1".into(), + }, + registration, + ) + .await; + + let restored = PipeRuntime::new(); + restored.configure_persistence(Some(state_path)).await; + let count = restored.restore_from_disk().await.unwrap(); + + assert_eq!(count, 1); + let registration = restored + .resolve("dep-restore", "pipe-restore-1") + .await + .expect("registration should restore"); + assert_eq!( + registration.target_url.as_deref(), + Some("https://example.com") + ); + assert_eq!(registration.trigger_type, "webhook"); + + let snapshot = restored + .snapshot("dep-restore", "pipe-restore-1") + .await + .expect("lifecycle should restore"); + assert_eq!(snapshot.state, PipeLifecycleState::Active); + assert_eq!(snapshot.activated_at, "2026-01-01T00:00:00Z"); + } + + #[tokio::test] + async fn pipe_runtime_deactivate_removes_persisted_registration() { + let dir = tempdir().unwrap(); + let state_path = dir.path().join("pipe-runtime.json"); + + let runtime = PipeRuntime::new(); + runtime + .configure_persistence(Some(state_path.clone())) + .await; + + let mut registration = PipeRegistration::from(ActivatePipeCommand { + deployment_hash: "dep-deactivate".into(), + pipe_instance_id: "pipe-deactivate-1".into(), + source_container: Some("source-app".into()), + source_endpoint: "/source".into(), + source_method: "GET".into(), + source_broker_url: None, + source_queue: None, + source_exchange: None, + source_routing_key: None, + target_url: Some("https://example.com".into()), + target_container: None, + target_endpoint: "/runtime/pipe".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "webhook".into(), + }); + registration.lifecycle = PipeLifecycleSnapshot::active("2026-01-01T00:00:00Z".into()); + + runtime + .activate( + PipeRuntimeKey { + deployment_hash: "dep-deactivate".into(), + pipe_instance_id: "pipe-deactivate-1".into(), + }, + registration, + ) + .await; + + runtime + .deactivate( + "dep-deactivate", + "pipe-deactivate-1", + "2026-01-01T00:05:00Z".into(), + ) + .await; + + let restored = PipeRuntime::new(); + restored.configure_persistence(Some(state_path)).await; + let count = restored.restore_from_disk().await.unwrap(); + + assert_eq!(count, 0); + assert!(restored + .resolve("dep-deactivate", "pipe-deactivate-1") + .await + .is_none()); + } + + #[tokio::test] + async fn pipe_runtime_restore_restarts_poll_worker() { + let dir = tempdir().unwrap(); + let state_path = dir.path().join("pipe-runtime.json"); + + let runtime = PipeRuntime::new(); + runtime + .configure_persistence(Some(state_path.clone())) + .await; + + let mut registration = PipeRegistration::from(ActivatePipeCommand { + deployment_hash: "dep-poll".into(), + pipe_instance_id: "pipe-poll-1".into(), + source_container: None, + source_endpoint: "http://127.0.0.1:1/source".into(), + source_method: "GET".into(), + source_broker_url: None, + source_queue: None, + source_exchange: None, + source_routing_key: None, + target_url: Some("https://example.com".into()), + target_container: None, + target_endpoint: "/runtime/pipe".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "poll".into(), + }); + registration.lifecycle = PipeLifecycleSnapshot::active("2026-01-01T00:00:00Z".into()); + + runtime + .activate( + PipeRuntimeKey { + deployment_hash: "dep-poll".into(), + pipe_instance_id: "pipe-poll-1".into(), + }, + registration, + ) + .await; + + let restored = PipeRuntime::new(); + restored.configure_persistence(Some(state_path)).await; + let count = restored.restore_from_disk().await.unwrap(); + assert_eq!(count, 1); + + tokio::time::sleep(std::time::Duration::from_millis(25)).await; + let workers = restored.workers.read().await; + assert_eq!(workers.len(), 1); + drop(workers); + restored.stop_worker("dep-poll", "pipe-poll-1").await; + } + + #[test] + fn default_pipe_runtime_state_path_uses_config_directory() { + let path = default_pipe_runtime_state_path(Some("/tmp/status/config.json")).unwrap(); + assert_eq!( + path, + PathBuf::from("/tmp/status/.status/pipe-runtime-state.json") + ); + } + + #[test] + fn pipe_source_poll_interval_clamps_zero_to_one_second() { + let _env = EnvGuard::new(&["PIPE_POLL_INTERVAL_SECS"]); + std::env::set_var("PIPE_POLL_INTERVAL_SECS", "0"); + + assert_eq!(pipe_source_poll_interval(), Duration::from_secs(1)); + } + + #[tokio::test] + async fn pipe_runtime_persistence_redacts_credentials() { + let dir = tempdir().unwrap(); + let state_path = dir.path().join("pipe-runtime.json"); + + let runtime = PipeRuntime::new(); + runtime + .configure_persistence(Some(state_path.clone())) + .await; + + let mut registration = PipeRegistration::from(ActivatePipeCommand { + deployment_hash: "dep-secret".into(), + pipe_instance_id: "pipe-secret-1".into(), + source_container: None, + source_endpoint: "/source".into(), + source_method: "GET".into(), + source_broker_url: Some("amqp://guest:guest@localhost:5672/%2f".into()), + source_queue: Some("events.queue".into()), + source_exchange: Some("events.exchange".into()), + source_routing_key: Some("events.created".into()), + target_url: Some("https://user:token@example.com/hooks".into()), + target_container: None, + target_endpoint: "/runtime/pipe".into(), + target_method: "POST".into(), + field_mapping: None, + trigger_type: "rabbitmq".into(), + }); + registration.lifecycle = PipeLifecycleSnapshot::active("2026-01-01T00:00:00Z".into()); - Ok(result) -} + runtime + .activate( + PipeRuntimeKey { + deployment_hash: "dep-secret".into(), + pipe_instance_id: "pipe-secret-1".into(), + }, + registration, + ) + .await; -#[cfg(test)] -mod tests { - use super::*; - use serde_json::json; + let body = tokio::fs::read_to_string(&state_path).await.unwrap(); + assert!(!body.contains("guest:guest")); + assert!(!body.contains("user:token")); + assert!(body.contains("amqp://***@localhost:5672/%2f")); + assert!(body.contains("https://***@example.com/hooks")); - macro_rules! stacker_test { - ($name:ident, $cmd_name:expr, $payload:expr, $variant:path) => { - #[test] - fn $name() { - let cmd = AgentCommand { - id: "cmd-test".into(), - command_id: "cmd-test".into(), - name: $cmd_name.into(), - params: $payload, - deployment_hash: Some("testhash".into()), - app_code: Some("testapp".into()), - }; - let parsed = parse_stacker_command(&cmd).unwrap(); - match parsed { - Some($variant(_)) => {} - _ => panic!("Did not parse {} command correctly", $cmd_name), - } - } - }; + #[cfg(unix)] + { + let mode = std::fs::metadata(&state_path).unwrap().permissions().mode() & 0o777; + assert_eq!(mode, 0o600); + } } stacker_test!( @@ -5149,6 +8200,66 @@ mod tests { }), StackerCommand::ListContainers ); + stacker_test!( + parses_activate_pipe_command, + "activate_pipe", + json!({ + "params": { + "pipe_instance_id": "11111111-1111-1111-1111-111111111111", + "target_url": "https://example.com" + } + }), + StackerCommand::ActivatePipe + ); + + #[test] + fn activate_pipe_defaults_trigger_type_to_webhook() { + let cmd = AgentCommand { + id: "cmd-activate-default".into(), + command_id: "cmd-activate-default".into(), + name: "activate_pipe".into(), + params: json!({ + "params": { + "pipe_instance_id": "11111111-1111-1111-1111-111111111111", + "target_url": "https://example.com" + } + }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::ActivatePipe(data)) => { + assert_eq!(data.trigger_type, "webhook"); + } + other => panic!("Expected ActivatePipe command, got {:?}", other), + } + } + + stacker_test!( + parses_deactivate_pipe_command, + "deactivate_pipe", + json!({ + "params": { + "pipe_instance_id": "11111111-1111-1111-1111-111111111111" + } + }), + StackerCommand::DeactivatePipe + ); + stacker_test!( + parses_trigger_pipe_command, + "trigger_pipe", + json!({ + "params": { + "pipe_instance_id": "11111111-1111-1111-1111-111111111111", + "input_data": { + "invoice_id": "inv-1" + } + } + }), + StackerCommand::TriggerPipe + ); stacker_test!( parses_stacker_list_containers_command, "stacker.list_containers", @@ -5175,6 +8286,203 @@ mod tests { assert!(parsed.is_none()); } + #[test] + fn parses_trigger_pipe_external_target_fields() { + let cmd = AgentCommand { + id: "cmd-trigger".into(), + command_id: "cmd-trigger".into(), + name: "trigger_pipe".into(), + params: json!({ + "params": { + "pipe_instance_id": "11111111-1111-1111-1111-111111111111", + "target_url": "https://example.com", + "target_endpoint": "/webhook/pipe", + "target_method": "post", + "field_mapping": { "email": "$.user.email" }, + "trigger_type": "manual", + "input_data": { "user": { "email": "dev@try.direct" } } + } + }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::TriggerPipe(data)) => { + assert_eq!(data.deployment_hash, "dep-123"); + assert_eq!(data.target_url.as_deref(), Some("https://example.com")); + assert_eq!(data.target_endpoint, "/webhook/pipe"); + assert_eq!(data.target_method, "POST"); + assert_eq!(data.trigger_type, "manual"); + assert_eq!(data.field_mapping, Some(json!({ "email": "$.user.email" }))); + } + other => panic!("Expected TriggerPipe command, got {:?}", other), + } + } + + #[test] + fn parses_trigger_pipe_internal_target_fields() { + let cmd = AgentCommand { + id: "cmd-trigger".into(), + command_id: "cmd-trigger".into(), + name: "trigger_pipe".into(), + params: json!({ + "params": { + "pipe_instance_id": "11111111-1111-1111-1111-111111111111", + "target_container": "target-app", + "target_endpoint": "/hooks/pipe", + "target_method": "post", + "field_mapping": { "email": "$.user.email" }, + "input_data": { "user": { "email": "dev@try.direct" } } + } + }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::TriggerPipe(data)) => { + assert_eq!(data.target_container.as_deref(), Some("target-app")); + assert_eq!(data.target_endpoint, "/hooks/pipe"); + assert_eq!(data.target_method, "POST"); + } + other => panic!("Expected TriggerPipe command, got {:?}", other), + } + } + + #[test] + fn parses_trigger_pipe_source_fetch_fields() { + let cmd = AgentCommand { + id: "cmd-trigger".into(), + command_id: "cmd-trigger".into(), + name: "trigger_pipe".into(), + params: json!({ + "params": { + "pipe_instance_id": "11111111-1111-1111-1111-111111111111", + "source_container": "source-app", + "source_endpoint": "/source/data", + "source_method": "get", + "target_container": "target-app", + "target_endpoint": "/hooks/pipe", + "target_method": "post" + } + }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::TriggerPipe(data)) => { + assert_eq!(data.source_container.as_deref(), Some("source-app")); + assert_eq!(data.source_endpoint, "/source/data"); + assert_eq!(data.source_method, "GET"); + assert_eq!(data.target_container.as_deref(), Some("target-app")); + } + other => panic!("Expected TriggerPipe command, got {:?}", other), + } + } + + #[test] + fn parses_activate_pipe_fields() { + let cmd = AgentCommand { + id: "cmd-activate".into(), + command_id: "cmd-activate".into(), + name: "activate_pipe".into(), + params: json!({ + "params": { + "pipe_instance_id": "11111111-1111-1111-1111-111111111111", + "target_url": "https://example.com", + "target_endpoint": "/runtime/pipe", + "target_method": "post", + "field_mapping": { "email": "$.user.email" }, + "trigger_type": "webhook" + } + }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::ActivatePipe(data)) => { + assert_eq!(data.deployment_hash, "dep-123"); + assert_eq!(data.target_url.as_deref(), Some("https://example.com")); + assert_eq!(data.target_endpoint, "/runtime/pipe"); + assert_eq!(data.target_method, "POST"); + assert_eq!(data.trigger_type, "webhook"); + } + other => panic!("Expected ActivatePipe command, got {:?}", other), + } + } + + #[test] + fn parses_activate_pipe_rabbitmq_fields() { + let cmd = AgentCommand { + id: "cmd-activate-amqp".into(), + command_id: "cmd-activate-amqp".into(), + name: "activate_pipe".into(), + params: json!({ + "params": { + "pipe_instance_id": "pipe-amqp-1", + "source_broker_url": "amqp://guest:guest@localhost:5672/%2f", + "source_queue": "events.queue", + "source_exchange": "events.exchange", + "source_routing_key": "events.created", + "target_url": "https://example.com", + "trigger_type": "rabbitmq" + } + }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::ActivatePipe(data)) => { + assert_eq!(data.trigger_type, "rabbitmq"); + assert_eq!( + data.source_broker_url.as_deref(), + Some("amqp://guest:guest@localhost:5672/%2f") + ); + assert_eq!(data.source_queue.as_deref(), Some("events.queue")); + assert_eq!(data.source_exchange.as_deref(), Some("events.exchange")); + assert_eq!(data.source_routing_key.as_deref(), Some("events.created")); + } + other => panic!("Expected ActivatePipe command, got {:?}", other), + } + } + + #[test] + fn parses_deactivate_pipe_fields() { + let cmd = AgentCommand { + id: "cmd-deactivate".into(), + command_id: "cmd-deactivate".into(), + name: "deactivate_pipe".into(), + params: json!({ + "params": { + "pipe_instance_id": "11111111-1111-1111-1111-111111111111" + } + }), + deployment_hash: Some("dep-123".into()), + app_code: None, + }; + + let parsed = parse_stacker_command(&cmd).unwrap(); + match parsed { + Some(StackerCommand::DeactivatePipe(data)) => { + assert_eq!(data.deployment_hash, "dep-123"); + assert_eq!( + data.pipe_instance_id, + "11111111-1111-1111-1111-111111111111" + ); + } + other => panic!("Expected DeactivatePipe command, got {:?}", other), + } + } + // --- ContainerRuntime tests --- #[test] @@ -6001,6 +9309,7 @@ mod probe_endpoints_command_tests { container: Some(" crm-web ".to_string()), protocols: vec![" OpenAPI ".to_string(), " REST ".to_string()], probe_timeout: 5, + capture_samples: false, }; let normalized = cmd.normalize(); assert_eq!(normalized.deployment_hash, "abc123"); @@ -6017,6 +9326,7 @@ mod probe_endpoints_command_tests { container: None, protocols: vec![], probe_timeout: 5, + capture_samples: false, }; let normalized = cmd.normalize(); assert_eq!(normalized.protocols, vec!["openapi", "rest"]); @@ -6030,6 +9340,7 @@ mod probe_endpoints_command_tests { container: None, protocols: vec!["openapi".to_string(), " ".to_string(), "".to_string()], probe_timeout: 5, + capture_samples: false, }; let normalized = cmd.normalize(); assert_eq!(normalized.protocols, vec!["openapi"]); @@ -6043,6 +9354,7 @@ mod probe_endpoints_command_tests { container: None, protocols: vec![" ".to_string(), "".to_string()], probe_timeout: 5, + capture_samples: false, }; let normalized = cmd.normalize(); assert_eq!(normalized.protocols, vec!["openapi", "rest"]); @@ -6056,6 +9368,7 @@ mod probe_endpoints_command_tests { container: Some(" ".to_string()), protocols: vec!["openapi".to_string()], probe_timeout: 5, + capture_samples: false, }; let normalized = cmd.normalize(); assert!(normalized.container.is_none()); @@ -6071,6 +9384,7 @@ mod probe_endpoints_command_tests { container: None, protocols: vec!["openapi".to_string()], probe_timeout: 5, + capture_samples: false, }; let agent_cmd = AgentCommand { id: "test-id".into(), @@ -6092,6 +9406,7 @@ mod probe_endpoints_command_tests { container: None, protocols: vec!["openapi".to_string()], probe_timeout: 5, + capture_samples: false, }; let agent_cmd = AgentCommand { id: "test-id".into(), @@ -6113,6 +9428,7 @@ mod probe_endpoints_command_tests { container: None, protocols: vec!["openapi".to_string()], probe_timeout: 5, + capture_samples: false, }; let agent_cmd = AgentCommand { id: "test-id".into(), @@ -6137,6 +9453,7 @@ mod probe_endpoints_command_tests { container: None, protocols: vec!["openapi".to_string()], probe_timeout: 5, + capture_samples: false, }; let result = cmd.validate(); assert!(result.is_err()); @@ -6151,6 +9468,7 @@ mod probe_endpoints_command_tests { container: None, protocols: vec!["openapi".to_string()], probe_timeout: 5, + capture_samples: false, }; let result = cmd.validate(); assert!(result.is_err()); @@ -6165,6 +9483,7 @@ mod probe_endpoints_command_tests { container: None, protocols: vec!["openapi".to_string(), "invalid_proto".to_string()], probe_timeout: 5, + capture_samples: false, }; let result = cmd.validate(); assert!(result.is_err()); @@ -6188,6 +9507,7 @@ mod probe_endpoints_command_tests { "rest".to_string(), ], probe_timeout: 5, + capture_samples: false, }; assert!(cmd.validate().is_ok()); } @@ -6200,6 +9520,7 @@ mod probe_endpoints_command_tests { container: None, protocols: vec!["openapi".to_string()], probe_timeout: 5, + capture_samples: false, }; assert!(cmd.validate().is_ok()); } @@ -6234,7 +9555,7 @@ mod probe_endpoints_command_tests { } }); - let ops = extract_openapi_operations(&spec); + let ops = extract_openapi_operations(&spec, false); assert_eq!(ops.len(), 2); // Find the GET operation @@ -6270,7 +9591,7 @@ mod probe_endpoints_command_tests { } }); - let ops = extract_openapi_operations(&spec); + let ops = extract_openapi_operations(&spec, false); assert_eq!(ops.len(), 1); assert_eq!(ops[0]["method"], "GET"); } @@ -6282,7 +9603,7 @@ mod probe_endpoints_command_tests { "paths": {} }); - let ops = extract_openapi_operations(&spec); + let ops = extract_openapi_operations(&spec, false); assert!(ops.is_empty()); } @@ -6293,7 +9614,7 @@ mod probe_endpoints_command_tests { "info": { "title": "test" } }); - let ops = extract_openapi_operations(&spec); + let ops = extract_openapi_operations(&spec, false); assert!(ops.is_empty()); } @@ -6308,11 +9629,192 @@ mod probe_endpoints_command_tests { } }); - let ops = extract_openapi_operations(&spec); + let ops = extract_openapi_operations(&spec, false); assert_eq!(ops.len(), 1); assert_eq!(ops[0]["summary"], ""); } + #[test] + fn extract_openapi_operations_capture_samples_from_example() { + let spec = json!({ + "openapi": "3.0.0", + "paths": { + "/api/v1/posts": { + "get": { + "summary": "List posts", + "responses": { + "200": { + "content": { + "application/json": { + "example": [ + {"id": 1, "title": "Hello World", "author": 42} + ] + } + } + } + } + } + } + } + }); + + // Without capture_samples + let ops = extract_openapi_operations(&spec, false); + assert_eq!(ops.len(), 1); + assert!(ops[0].get("sample_response").is_none()); + + // With capture_samples + let ops = extract_openapi_operations(&spec, true); + assert_eq!(ops.len(), 1); + let sample = &ops[0]["sample_response"]; + assert!(sample.is_array()); + assert_eq!(sample[0]["id"], 1); + assert_eq!(sample[0]["title"], "Hello World"); + } + + #[test] + fn extract_openapi_operations_capture_samples_from_schema_example() { + let spec = json!({ + "openapi": "3.0.0", + "paths": { + "/api/users": { + "get": { + "summary": "Get users", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "type": "object", + "example": {"id": 1, "name": "Alice"} + } + } + } + } + } + } + } + } + }); + + let ops = extract_openapi_operations(&spec, true); + assert_eq!(ops[0]["sample_response"]["name"], "Alice"); + } + + #[test] + fn extract_openapi_operations_capture_samples_swagger2() { + let spec = json!({ + "swagger": "2.0", + "paths": { + "/api/items": { + "get": { + "summary": "List items", + "responses": { + "200": { + "examples": { + "application/json": [ + {"id": 1, "name": "Widget"} + ] + } + } + } + } + } + } + }); + + let ops = extract_openapi_operations(&spec, true); + assert_eq!(ops[0]["sample_response"][0]["name"], "Widget"); + } + + #[test] + fn extract_openapi_operations_capture_samples_with_ref() { + let spec = json!({ + "openapi": "3.0.0", + "paths": { + "/api/posts": { + "get": { + "summary": "List posts", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/PostList" + } + } + } + } + } + } + } + }, + "components": { + "schemas": { + "PostList": { + "type": "array", + "example": [{"id": 1, "title": "First Post"}] + } + } + } + }); + + let ops = extract_openapi_operations(&spec, true); + assert_eq!(ops[0]["sample_response"][0]["title"], "First Post"); + } + + #[test] + fn probe_endpoints_command_capture_samples_defaults_false() { + let cmd: ProbeEndpointsCommand = serde_json::from_value(json!({ + "app_code": "wordpress" + })) + .unwrap(); + assert!(!cmd.capture_samples); + } + + #[test] + fn probe_endpoints_command_capture_samples_true() { + let cmd: ProbeEndpointsCommand = serde_json::from_value(json!({ + "app_code": "wordpress", + "capture_samples": true + })) + .unwrap(); + assert!(cmd.capture_samples); + } + + #[test] + fn extract_response_example_from_direct_example() { + let spec = json!({}); + let operation = json!({ + "responses": { + "200": { + "content": { + "application/json": { + "example": {"id": 1, "name": "Test"} + } + } + } + } + }); + let sample = extract_response_example(&spec, &operation); + assert!(sample.is_some()); + assert_eq!(sample.unwrap()["name"], "Test"); + } + + #[test] + fn extract_response_example_returns_none_when_missing() { + let spec = json!({}); + let operation = json!({ + "responses": { + "200": { + "description": "Success" + } + } + }); + let sample = extract_response_example(&spec, &operation); + assert!(sample.is_none()); + } + // ==================== EXTRACT_REQUEST_FIELDS TESTS ==================== #[test] diff --git a/src/comms/local_api.rs b/src/comms/local_api.rs index fcff03b..69f47bc 100644 --- a/src/comms/local_api.rs +++ b/src/comms/local_api.rs @@ -42,11 +42,13 @@ use crate::commands::{ check_remote_version, get_update_status, start_update_job, UpdateJobs, UpdatePhase, }; use crate::commands::{ - execute_stacker_command, parse_stacker_command, CommandValidator, DockerOperation, + execute_stacker_command, parse_stacker_command, CommandValidator, DockerOperation, PipeRuntime, TimeoutStrategy, }; +use crate::comms::notifications::{self, MarkReadRequest, NotificationStore, UnreadCountResponse}; use crate::monitoring::{ - spawn_heartbeat, MetricsCollector, MetricsSnapshot, MetricsStore, MetricsTx, + spawn_heartbeat, CommandExecutionMetrics, CommandMetricsStore, ControlPlane, MetricsCollector, + MetricsSnapshot, MetricsStore, MetricsTx, }; use crate::security::audit_log::AuditLogger; use crate::security::auth::{Credentials, SessionStore, SessionUser}; @@ -108,6 +110,7 @@ pub struct AppState { pub with_ui: bool, pub metrics_collector: Arc, pub metrics_store: MetricsStore, + pub command_metrics: CommandMetricsStore, pub metrics_tx: MetricsTx, pub metrics_webhook: Option, pub backup_path: Option, @@ -123,6 +126,8 @@ pub struct AppState { pub update_jobs: UpdateJobs, pub firewall_policy: FirewallPolicy, pub login_limiter: RateLimiter, + pub notification_store: NotificationStore, + pub pipe_runtime: PipeRuntime, } impl AppState { @@ -160,6 +165,7 @@ impl AppState { with_ui, metrics_collector: Arc::new(MetricsCollector::new()), metrics_store: Arc::new(tokio::sync::RwLock::new(MetricsSnapshot::default())), + command_metrics: Arc::new(tokio::sync::RwLock::new(CommandExecutionMetrics::default())), metrics_tx: broadcast::channel(32).0, metrics_webhook: std::env::var("METRICS_WEBHOOK").ok(), backup_path: std::env::var("BACKUP_PATH").ok(), @@ -187,6 +193,8 @@ impl AppState { update_jobs: Arc::new(tokio::sync::RwLock::new(std::collections::HashMap::new())), firewall_policy, login_limiter: RateLimiter::new_per_minute(5), + notification_store: notifications::new_notification_store(), + pipe_runtime: PipeRuntime::new(), } } } @@ -233,6 +241,7 @@ pub struct HealthResponse { pub status: String, pub token_age_seconds: u64, pub last_refresh_ok: Option, + pub command_metrics: CommandExecutionMetrics, } // ---- Marketplace types ---- @@ -301,13 +310,36 @@ async fn health(State(state): State) -> impl IntoResponse { None }; + let command_metrics = state.command_metrics.read().await.clone(); + Json(HealthResponse { status: "ok".to_string(), token_age_seconds, last_refresh_ok, + command_metrics, }) } +async fn command_metrics_handler(State(state): State) -> impl IntoResponse { + Json(state.command_metrics.read().await.clone()) +} + +async fn record_command_execution(state: &SharedState, executed_by: &str) { + let control_plane = ControlPlane::from_value(Some(executed_by)); + let mut metrics = state.command_metrics.write().await; + metrics.record_execution(control_plane); +} + +async fn attach_command_provenance( + state: &SharedState, + mut result: CommandResult, + executed_by: &str, +) -> CommandResult { + record_command_execution(state, executed_by).await; + result.executed_by = Some(executed_by.to_string()); + result +} + // Login form (GET) async fn login_page(State(state): State) -> impl IntoResponse { if state.with_ui { @@ -1091,8 +1123,29 @@ async fn capabilities_handler(State(state): State) -> impl IntoResp .or_else(|| state.config.control_plane.clone()) .unwrap_or_else(|| "status_panel".to_string()); - let features = - crate::agent::registration::collect_capabilities(state.config.compose_agent_enabled).await; + // Basic capability set; extend if docker feature is enabled + let mut features = vec!["monitoring".to_string()]; + if cfg!(feature = "docker") { + features.push("docker".to_string()); + features.push("compose".to_string()); + features.push("logs".to_string()); + features.push("restart".to_string()); + } + features.push("pipes".to_string()); + features.push("activate_pipe".to_string()); + features.push("deactivate_pipe".to_string()); + features.push("trigger_pipe".to_string()); + if compose_agent { + features.push("compose_agent".to_string()); + } + + // Detect Kata Containers runtime availability + #[cfg(feature = "docker")] + { + if crate::commands::stacker::detect_kata_runtime().await { + features.push("kata".to_string()); + } + } let resp = CapabilitiesResponse { compose_agent, @@ -1409,12 +1462,35 @@ async fn unlink_handler(State(state): State) -> impl IntoResponse { } } +// ---- Notification API handlers ---- + +async fn notifications_list(State(state): State) -> impl IntoResponse { + let summary = notifications::get_summary(&state.notification_store).await; + Json(summary) +} + +async fn notifications_mark_read( + State(state): State, + Json(req): Json, +) -> impl IntoResponse { + notifications::mark_read(&state.notification_store, &req.ids, req.all).await; + Json(json!({"status": "ok"})) +} + +async fn notifications_unread_count(State(state): State) -> impl IntoResponse { + let count = notifications::get_unread_count(&state.notification_store).await; + Json(UnreadCountResponse { + unread_count: count, + }) +} + pub fn create_router(state: SharedState) -> Router { let mut router = Router::new() .route("/health", get(health)) .route("/capabilities", get(capabilities_handler)) .route("/metrics", get(metrics_handler)) .route("/metrics/stream", get(metrics_ws_handler)) + .route("/api/v1/diagnostics/commands", get(command_metrics_handler)) // Self-update endpoints .route("/api/self/version", get(self_version)) .route("/api/self/update/start", post(self_update_start)) @@ -1433,6 +1509,11 @@ pub fn create_router(state: SharedState) -> Router { .route("/api/v1/commands/enqueue", post(commands_enqueue)) .route("/api/v1/auth/rotate-token", post(rotate_token)); + router = router.route( + "/api/v1/pipes/webhook/{deployment_hash}/{pipe_instance_id}", + post(pipe_webhook_ingest), + ); + // Marketplace & dashboard linking router = router .route("/marketplace", get(marketplace_page)) @@ -1442,6 +1523,15 @@ pub fn create_router(state: SharedState) -> Router { .route("/link/select", post(link_select_handler)) .route("/link/unlink", post(unlink_handler)); + // Notifications + router = router + .route("/api/v1/notifications", get(notifications_list)) + .route("/api/v1/notifications/read", post(notifications_mark_read)) + .route( + "/api/v1/notifications/unread-count", + get(notifications_unread_count), + ); + #[cfg(feature = "docker")] { router = router @@ -1847,6 +1937,9 @@ async fn commands_report( .into_response() } }; + if let Some(executed_by) = res.executed_by.as_deref() { + record_command_execution(&state, executed_by).await; + } info!(command_id = %res.command_id, status = %res.status, "command result reported"); (StatusCode::OK, Json(json!({"accepted": true}))).into_response() } @@ -1880,10 +1973,25 @@ async fn commands_execute( .into_response() } }; + let executed_by = ControlPlane::from_value( + std::env::var("CONTROL_PLANE") + .ok() + .as_deref() + .or(state.config.control_plane.as_deref()), + ) + .to_string(); if let Some(stacker_cmd) = parsed_stacker_cmd { - match execute_stacker_command(&cmd, &stacker_cmd, &state.firewall_policy).await { + match execute_stacker_command( + &cmd, + &stacker_cmd, + &state.firewall_policy, + &state.pipe_runtime, + ) + .await + { Ok(result) => { - return Json(result).into_response(); + return Json(attach_command_provenance(&state, result, &executed_by).await) + .into_response(); } Err(e) => { error!( @@ -1920,7 +2028,10 @@ async fn commands_execute( } #[cfg(feature = "docker")] match execute_docker_operation(&cmd.command_id, op).await { - Ok(result) => return Json(result).into_response(), + Ok(result) => { + return Json(attach_command_provenance(&state, result, &executed_by).await) + .into_response() + } Err(e) => { return ( StatusCode::INTERNAL_SERVER_ERROR, @@ -1967,7 +2078,10 @@ async fn commands_execute( let executor = CommandExecutor::new(); match executor.execute(&cmd, strategy).await { - Ok(exec) => Json(exec.to_command_result()).into_response(), + Ok(exec) => { + Json(attach_command_provenance(&state, exec.to_command_result(), &executed_by).await) + .into_response() + } Err(e) => ( StatusCode::INTERNAL_SERVER_ERROR, Json(json!({"error": e.to_string()})), @@ -2009,6 +2123,41 @@ async fn commands_enqueue( (StatusCode::ACCEPTED, Json(json!({"queued": true}))).into_response() } +async fn pipe_webhook_ingest( + State(state): State, + Path((deployment_hash, pipe_instance_id)): Path<(String, String)>, + headers: HeaderMap, + body: Bytes, +) -> impl IntoResponse { + if let Err(resp) = verify_stacker_post(&state, &headers, &body, "commands:execute").await { + return resp.into_response(); + } + + let payload: serde_json::Value = match serde_json::from_slice(&body) { + Ok(value) => value, + Err(error) => { + return ( + StatusCode::BAD_REQUEST, + Json(json!({"error": format!("invalid webhook payload: {}", error)})), + ) + .into_response() + } + }; + + match state + .pipe_runtime + .trigger_registered_payload(&deployment_hash, &pipe_instance_id, payload, "webhook") + .await + { + Ok(result) => Json(result).into_response(), + Err(error) => ( + StatusCode::INTERNAL_SERVER_ERROR, + Json(json!({"error": error.to_string()})), + ) + .into_response(), + } +} + #[derive(Deserialize)] struct RotateTokenRequest { new_token: String, @@ -2056,9 +2205,24 @@ pub fn default_bind_address(bind: Option) -> std::net::Ipv4Addr { } } -pub async fn serve(config: Config, port: u16, with_ui: bool) -> Result<()> { +pub async fn serve(config: Config, config_path: &str, port: u16, with_ui: bool) -> Result<()> { let cfg = Arc::new(config); let state = Arc::new(AppState::new(cfg, with_ui, Some(port))); + state + .pipe_runtime + .configure_persistence(crate::commands::default_pipe_runtime_state_path(Some( + config_path, + ))) + .await; + match state.pipe_runtime.restore_from_disk().await { + Ok(restored) if restored > 0 => { + info!(restored, "restored persisted pipe runtime registrations"); + } + Ok(_) => {} + Err(error) => { + error!(error = %error, "failed to restore persisted pipe runtime registrations"); + } + } // Spawn token refresh task if Vault is configured if let (Some(vault_client), Some(token_cache)) = (&state.vault_client, &state.token_cache) { @@ -2078,14 +2242,64 @@ pub async fn serve(config: Config, port: u16, with_ui: bool) -> Result<()> { .and_then(|s| s.parse::().ok()) .map(Duration::from_secs) .unwrap_or(Duration::from_secs(30)); + + let alert_manager = { + let cfg = crate::monitoring::alerting::AlertConfig::from_env(); + let mgr = crate::monitoring::alerting::AlertManager::new(cfg); + if mgr.is_enabled() { + tracing::info!("outbound alerting enabled"); + Some(Arc::new(mgr)) + } else { + tracing::debug!("outbound alerting disabled (ALERT_WEBHOOK_URL not set)"); + None + } + }; + spawn_heartbeat( state.metrics_collector.clone(), state.metrics_store.clone(), heartbeat_interval, state.metrics_tx.clone(), state.metrics_webhook.clone(), + alert_manager, ); + // Spawn notification poller if dashboard connection is configured + { + let dashboard_url = + std::env::var("DASHBOARD_URL").unwrap_or_else(|_| "http://localhost:5000".to_string()); + let agent_id = std::env::var("AGENT_ID").unwrap_or_default(); + let agent_token = std::env::var("AGENT_TOKEN").unwrap_or_default(); + + if !agent_token.is_empty() { + // Build a TokenProvider so the poller can refresh on 401/403 + let token_provider = crate::security::token_provider::TokenProvider::from_env( + state.vault_client.clone(), + ); + + let poll_interval = std::env::var("NOTIFICATION_POLL_SECS") + .ok() + .and_then(|s| s.parse::().ok()) + .map(Duration::from_secs) + .unwrap_or(Duration::from_secs(300)); + + let deployment_hash = + std::env::var("DEPLOYMENT_HASH").unwrap_or_else(|_| "default".to_string()); + + notifications::spawn_notification_poller( + dashboard_url, + agent_id, + token_provider, + deployment_hash, + state.notification_store.clone(), + poll_interval, + ); + info!("Notification poller spawned"); + } else { + info!("Notification poller skipped (no AGENT_TOKEN configured)"); + } + } + // Periodic cleanup of rate limiter, login limiter, replay protection, and expired sessions { let state_cleanup = state.clone(); @@ -2121,3 +2335,87 @@ pub async fn serve(config: Config, port: u16, with_ui: bool) -> Result<()> { axum::serve(listener, app).into_future().await?; Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + use axum::body::to_bytes; + use serde_json::Value; + + fn test_state(control_plane: Option<&str>) -> SharedState { + Arc::new(AppState::new( + Arc::new(Config { + domain: None, + subdomains: None, + apps_info: None, + reqdata: crate::agent::config::ReqData { + email: "ops@example.com".to_string(), + }, + ssl: None, + compose_agent_enabled: false, + control_plane: control_plane.map(str::to_string), + firewall: None, + }), + false, + None, + )) + } + + #[tokio::test] + async fn health_includes_command_metrics() { + let state = test_state(Some("compose_agent")); + record_command_execution(&state, "compose_agent").await; + + let response = health(State(state)).await.into_response(); + let body = to_bytes(response.into_body(), usize::MAX) + .await + .expect("health body"); + let payload: Value = serde_json::from_slice(&body).expect("health json"); + + assert_eq!(payload["command_metrics"]["compose_agent_count"], 1); + assert_eq!(payload["command_metrics"]["total_count"], 1); + assert_eq!( + payload["command_metrics"]["last_control_plane"], + Value::String("compose_agent".to_string()) + ); + } + + #[tokio::test] + async fn command_metrics_handler_returns_snapshot() { + let state = test_state(Some("status_panel")); + record_command_execution(&state, "status_panel").await; + + let response = command_metrics_handler(State(state)).await.into_response(); + let body = to_bytes(response.into_body(), usize::MAX) + .await + .expect("metrics body"); + let payload: Value = serde_json::from_slice(&body).expect("metrics json"); + + assert_eq!(payload["status_panel_count"], 1); + assert_eq!(payload["compose_agent_count"], 0); + assert_eq!(payload["total_count"], 1); + assert_eq!( + payload["last_control_plane"], + Value::String("status_panel".to_string()) + ); + } + + #[tokio::test] + async fn capabilities_include_pipe_operations() { + let state = test_state(Some("status_panel")); + + let response = capabilities_handler(State(state)).await.into_response(); + let body = to_bytes(response.into_body(), usize::MAX) + .await + .expect("capabilities body"); + let payload: Value = serde_json::from_slice(&body).expect("capabilities json"); + let features = payload["features"] + .as_array() + .expect("features should be an array"); + + assert!(features.contains(&Value::String("pipes".to_string()))); + assert!(features.contains(&Value::String("activate_pipe".to_string()))); + assert!(features.contains(&Value::String("deactivate_pipe".to_string()))); + assert!(features.contains(&Value::String("trigger_pipe".to_string()))); + } +} diff --git a/src/comms/mod.rs b/src/comms/mod.rs index f2536a5..a6a3499 100644 --- a/src/comms/mod.rs +++ b/src/comms/mod.rs @@ -1 +1,2 @@ pub mod local_api; +pub mod notifications; diff --git a/src/comms/notifications.rs b/src/comms/notifications.rs new file mode 100644 index 0000000..357d2d3 --- /dev/null +++ b/src/comms/notifications.rs @@ -0,0 +1,358 @@ +use std::sync::Arc; +use std::time::Duration; + +use reqwest::Client; +use serde::{Deserialize, Serialize}; +use tokio::sync::RwLock; +use tokio::task::JoinHandle; +use tracing::{debug, error, info, warn}; + +use crate::security::token_provider::TokenProvider; +use crate::transport::http_polling::build_signed_headers; + +// ---- Types ---- + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum NotificationKind { + StackUpdateAvailable, + StackPublished, + SystemNotice, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Notification { + pub id: String, + pub kind: NotificationKind, + pub title: String, + pub message: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub stack_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub stack_name: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub new_version: Option, + pub created_at: String, + #[serde(default)] + pub read: bool, +} + +#[derive(Debug, Serialize)] +pub struct NotificationSummary { + pub unread_count: usize, + pub notifications: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct MarkReadRequest { + #[serde(default)] + pub ids: Vec, + #[serde(default)] + pub all: bool, +} + +#[derive(Debug, Serialize)] +pub struct UnreadCountResponse { + pub unread_count: usize, +} + +pub type NotificationStore = Arc>>; + +pub fn new_notification_store() -> NotificationStore { + Arc::new(RwLock::new(Vec::new())) +} + +// ---- Store operations ---- + +pub async fn get_unread_count(store: &NotificationStore) -> usize { + let notifications = store.read().await; + notifications.iter().filter(|n| !n.read).count() +} + +pub async fn get_summary(store: &NotificationStore) -> NotificationSummary { + let notifications = store.read().await; + let unread_count = notifications.iter().filter(|n| !n.read).count(); + NotificationSummary { + unread_count, + notifications: notifications.clone(), + } +} + +pub async fn mark_read(store: &NotificationStore, ids: &[String], all: bool) { + let mut notifications = store.write().await; + for n in notifications.iter_mut() { + if all || ids.contains(&n.id) { + n.read = true; + } + } +} + +/// Merge incoming notifications into the store, deduplicating by id. +/// New notifications are prepended (most recent first). +pub async fn merge_notifications(store: &NotificationStore, incoming: Vec) { + let mut notifications = store.write().await; + for n in incoming { + if !notifications.iter().any(|existing| existing.id == n.id) { + notifications.insert(0, n); + } + } + // Cap at 100 notifications to prevent unbounded growth + notifications.truncate(100); +} + +// ---- Poller ---- + +#[derive(Debug, Deserialize)] +struct StackerNotificationsResponse { + notifications: Vec, +} + +pub fn spawn_notification_poller( + dashboard_url: String, + agent_id: String, + token_provider: TokenProvider, + deployment_hash: String, + store: NotificationStore, + interval: Duration, +) -> JoinHandle<()> { + tokio::spawn(async move { + let client = Client::builder() + .timeout(Duration::from_secs(15)) + .build() + .expect("failed to build HTTP client for notification poller"); + + let mut suppressed_404 = false; + let mut backoff_secs = 0u64; + + info!( + interval_secs = interval.as_secs(), + "notification poller started" + ); + + loop { + tokio::time::sleep(if backoff_secs > 0 { + Duration::from_secs(backoff_secs) + } else { + interval + }) + .await; + + let url = format!( + "{}/api/v1/agent/notifications?deployment_hash={}", + dashboard_url, deployment_hash + ); + + let token = token_provider.get().await; + let headers = match build_signed_headers(&agent_id, &token, &[]) { + Ok(h) => h, + Err(e) => { + error!(error = %e, "failed to build HMAC headers for notification poll"); + backoff_secs = (backoff_secs * 2).clamp(5, 300); + continue; + } + }; + + match client.get(&url).headers(headers).send().await { + Ok(resp) => { + let status = resp.status().as_u16(); + + // Handle 401/403: refresh token and retry on next iteration + if status == 401 || status == 403 { + warn!( + status, + "auth error from notifications endpoint; refreshing token" + ); + if let Err(e) = token_provider.refresh().await { + warn!(error = %e, "token refresh failed"); + } + backoff_secs = 5; // short backoff before retry with new token + continue; + } + + backoff_secs = 0; + match status { + 200 => { + suppressed_404 = false; + match resp.json::().await { + Ok(body) => { + let count = body.notifications.len(); + if count > 0 { + debug!(count, "received notifications from Stacker"); + merge_notifications(&store, body.notifications).await; + } + } + Err(e) => { + warn!(error = %e, "failed to parse notifications response"); + } + } + } + 204 => { + // No new notifications + } + 404 => { + if !suppressed_404 { + info!("Stacker notifications endpoint not available (404), will retry silently"); + suppressed_404 = true; + } + } + _ => { + warn!(status, "unexpected status from notifications endpoint"); + } + } + } + Err(e) => { + debug!(error = %e, "notification poll failed (network)"); + backoff_secs = (backoff_secs * 2).clamp(5, 300); + } + } + } + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_notification(id: &str, kind: NotificationKind) -> Notification { + Notification { + id: id.to_string(), + kind, + title: format!("Test {}", id), + message: "Test message".to_string(), + stack_id: Some("stack-1".to_string()), + stack_name: Some("MyStack".to_string()), + new_version: Some("2.0".to_string()), + created_at: "2026-04-12T00:00:00Z".to_string(), + read: false, + } + } + + #[test] + fn notification_kind_serialization() { + let json = serde_json::to_string(&NotificationKind::StackUpdateAvailable).unwrap(); + assert_eq!(json, r#""stack_update_available""#); + + let json = serde_json::to_string(&NotificationKind::StackPublished).unwrap(); + assert_eq!(json, r#""stack_published""#); + + let json = serde_json::to_string(&NotificationKind::SystemNotice).unwrap(); + assert_eq!(json, r#""system_notice""#); + } + + #[test] + fn notification_kind_deserialization() { + let kind: NotificationKind = serde_json::from_str(r#""stack_update_available""#).unwrap(); + assert_eq!(kind, NotificationKind::StackUpdateAvailable); + } + + #[test] + fn notification_roundtrip() { + let n = sample_notification("n1", NotificationKind::StackPublished); + let json = serde_json::to_string(&n).unwrap(); + let deserialized: Notification = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.id, "n1"); + assert_eq!(deserialized.kind, NotificationKind::StackPublished); + assert!(!deserialized.read); + } + + #[test] + fn notification_read_defaults_false() { + let json = + r#"{"id":"x","kind":"system_notice","title":"t","message":"m","created_at":"now"}"#; + let n: Notification = serde_json::from_str(json).unwrap(); + assert!(!n.read); + } + + #[tokio::test] + async fn store_merge_deduplicates() { + let store = new_notification_store(); + let n1 = sample_notification("n1", NotificationKind::StackUpdateAvailable); + let n2 = sample_notification("n2", NotificationKind::StackPublished); + + merge_notifications(&store, vec![n1.clone(), n2]).await; + assert_eq!(store.read().await.len(), 2); + + // Merge again with duplicate id + let n1_dup = sample_notification("n1", NotificationKind::SystemNotice); + let n3 = sample_notification("n3", NotificationKind::SystemNotice); + merge_notifications(&store, vec![n1_dup, n3]).await; + assert_eq!(store.read().await.len(), 3); + + // Original n1 should still be StackUpdateAvailable (not replaced) + let locked = store.read().await; + let found = locked.iter().find(|n| n.id == "n1").unwrap(); + assert_eq!(found.kind, NotificationKind::StackUpdateAvailable); + } + + #[tokio::test] + async fn store_unread_count() { + let store = new_notification_store(); + let n1 = sample_notification("n1", NotificationKind::StackUpdateAvailable); + let mut n2 = sample_notification("n2", NotificationKind::StackPublished); + n2.read = true; + + merge_notifications(&store, vec![n1, n2]).await; + assert_eq!(get_unread_count(&store).await, 1); + } + + #[tokio::test] + async fn mark_read_by_ids() { + let store = new_notification_store(); + merge_notifications( + &store, + vec![ + sample_notification("n1", NotificationKind::StackUpdateAvailable), + sample_notification("n2", NotificationKind::StackPublished), + sample_notification("n3", NotificationKind::SystemNotice), + ], + ) + .await; + + mark_read(&store, &["n1".to_string(), "n3".to_string()], false).await; + + let locked = store.read().await; + assert!(locked.iter().find(|n| n.id == "n1").unwrap().read); + assert!(!locked.iter().find(|n| n.id == "n2").unwrap().read); + assert!(locked.iter().find(|n| n.id == "n3").unwrap().read); + } + + #[tokio::test] + async fn mark_read_all() { + let store = new_notification_store(); + merge_notifications( + &store, + vec![ + sample_notification("n1", NotificationKind::StackUpdateAvailable), + sample_notification("n2", NotificationKind::StackPublished), + ], + ) + .await; + + mark_read(&store, &[], true).await; + assert_eq!(get_unread_count(&store).await, 0); + } + + #[tokio::test] + async fn store_caps_at_100() { + let store = new_notification_store(); + let batch: Vec = (0..120) + .map(|i| sample_notification(&format!("n{}", i), NotificationKind::SystemNotice)) + .collect(); + merge_notifications(&store, batch).await; + assert_eq!(store.read().await.len(), 100); + } + + #[tokio::test] + async fn get_summary_returns_correct_data() { + let store = new_notification_store(); + let mut n1 = sample_notification("n1", NotificationKind::StackUpdateAvailable); + n1.read = true; + let n2 = sample_notification("n2", NotificationKind::StackPublished); + + merge_notifications(&store, vec![n1, n2]).await; + + let summary = get_summary(&store).await; + assert_eq!(summary.unread_count, 1); + assert_eq!(summary.notifications.len(), 2); + } +} diff --git a/src/main.rs b/src/main.rs index 75189e7..bdad5a2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,6 +7,7 @@ use tracing::info; /// Application version from Cargo.toml const VERSION: &str = env!("CARGO_PKG_VERSION"); +const DISPLAY_VERSION: &str = env!("STATUS_DISPLAY_VERSION"); const PKG_NAME: &str = env!("CARGO_PKG_NAME"); /// Check that `path` points to a readable file. Prints a friendly error and @@ -161,7 +162,7 @@ fn print_banner() { } #[derive(Parser)] -#[command(name = "status", version, about = "")] +#[command(name = "status", version = DISPLAY_VERSION, about = "")] struct AppCli { /// Run in daemon mode (background) #[arg(long)] @@ -180,6 +181,13 @@ struct AppCli { command: Option, } +fn is_direct_version_request() -> bool { + matches!( + std::env::args().skip(1).collect::>().as_slice(), + [flag] if flag == "--version" || flag == "-V" + ) +} + #[derive(Subcommand)] enum Commands { /// Start HTTP server (local API) @@ -281,14 +289,17 @@ fn run_daemon() -> Result<()> { #[tokio::main] async fn main() -> Result<()> { + if is_direct_version_request() { + println!("{}", DISPLAY_VERSION); + return Ok(()); + } + // Load environment variables from .env if present let _ = dotenv(); utils::logging::init(); - // Show startup banner - print_banner(); - let args = AppCli::parse(); + print_banner(); if args.daemon { run_daemon()?; } @@ -302,7 +313,7 @@ async fn main() -> Result<()> { info!("Starting local API server on port {port}"); } let config = agent::config::Config::from_file(&args.config)?; - comms::local_api::serve(config, port, with_ui).await?; + comms::local_api::serve(config, &args.config, port, with_ui).await?; } #[cfg(feature = "docker")] Some(Commands::Containers) => { diff --git a/src/monitoring/alerting.rs b/src/monitoring/alerting.rs new file mode 100644 index 0000000..c6d44c6 --- /dev/null +++ b/src/monitoring/alerting.rs @@ -0,0 +1,573 @@ +use std::collections::HashMap; +use std::sync::Arc; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; + +use reqwest::Client; +use serde::Serialize; +use tokio::sync::RwLock; +use tracing::{info, warn}; + +use crate::monitoring::MetricsSnapshot; + +// ---- Types ---- + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum AlertSeverity { + Warning, + Critical, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize)] +#[serde(rename_all = "snake_case")] +pub enum AlertKind { + HighCpu, + HighMemory, + HighDisk, +} + +impl std::fmt::Display for AlertKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AlertKind::HighCpu => write!(f, "high_cpu"), + AlertKind::HighMemory => write!(f, "high_memory"), + AlertKind::HighDisk => write!(f, "high_disk"), + } + } +} + +/// An alert event ready for dispatch. +#[derive(Debug, Clone, Serialize)] +pub struct Alert { + pub kind: AlertKind, + pub severity: AlertSeverity, + pub message: String, + pub value: f32, + pub threshold: f32, + pub recovered: bool, + pub timestamp_ms: u128, + #[serde(skip_serializing_if = "Option::is_none")] + pub agent_id: Option, +} + +/// Threshold configuration for a single metric. +#[derive(Debug, Clone, Copy)] +pub struct Threshold { + pub warning: f32, + pub critical: f32, +} + +impl Threshold { + fn evaluate(&self, value: f32) -> Option { + if value >= self.critical { + Some(AlertSeverity::Critical) + } else if value >= self.warning { + Some(AlertSeverity::Warning) + } else { + None + } + } +} + +/// Alert system configuration. +#[derive(Debug, Clone)] +pub struct AlertConfig { + pub webhook_url: Option, + pub cpu: Threshold, + pub memory: Threshold, + pub disk: Threshold, +} + +impl AlertConfig { + /// Build config from environment variables. + /// + /// | Variable | Default | Description | + /// |----------|---------|-------------| + /// | `ALERT_WEBHOOK_URL` | _(none)_ | Webhook endpoint; alerting disabled if unset | + /// | `ALERT_CPU_WARNING` | 80 | CPU % warning threshold | + /// | `ALERT_CPU_CRITICAL` | 95 | CPU % critical threshold | + /// | `ALERT_MEMORY_WARNING` | 80 | Memory % warning threshold | + /// | `ALERT_MEMORY_CRITICAL` | 95 | Memory % critical threshold | + /// | `ALERT_DISK_WARNING` | 80 | Disk % warning threshold | + /// | `ALERT_DISK_CRITICAL` | 95 | Disk % critical threshold | + pub fn from_env() -> Self { + let parse = |var: &str, default: f32| -> f32 { + std::env::var(var) + .ok() + .and_then(|v| v.parse::().ok()) + .unwrap_or(default) + }; + + Self { + webhook_url: std::env::var("ALERT_WEBHOOK_URL").ok(), + cpu: Threshold { + warning: parse("ALERT_CPU_WARNING", 80.0), + critical: parse("ALERT_CPU_CRITICAL", 95.0), + }, + memory: Threshold { + warning: parse("ALERT_MEMORY_WARNING", 80.0), + critical: parse("ALERT_MEMORY_CRITICAL", 95.0), + }, + disk: Threshold { + warning: parse("ALERT_DISK_WARNING", 80.0), + critical: parse("ALERT_DISK_CRITICAL", 95.0), + }, + } + } +} + +// ---- Alert State Tracker (deduplication + recovery) ---- + +/// Tracks which alerts are currently active so we avoid duplicates and detect recovery. +#[derive(Debug, Clone)] +struct ActiveAlert { + severity: AlertSeverity, + #[allow(dead_code)] + fired_at_ms: u128, +} + +/// Evaluates metrics against thresholds, deduplicates, and detects recovery. +#[derive(Debug)] +pub struct AlertManager { + config: AlertConfig, + active: RwLock>, + agent_id: Option, +} + +pub type SharedAlertManager = Arc; + +impl AlertManager { + pub fn new(config: AlertConfig) -> Self { + let agent_id = std::env::var("AGENT_ID").ok(); + Self { + config, + active: RwLock::new(HashMap::new()), + agent_id, + } + } + + /// Returns `true` if alerting is enabled (webhook URL configured). + pub fn is_enabled(&self) -> bool { + self.config + .webhook_url + .as_ref() + .is_some_and(|u| !u.is_empty()) + } + + /// Read-only access to the alert configuration. + pub fn config(&self) -> &AlertConfig { + &self.config + } + + /// Evaluate a metrics snapshot and return any new, escalated, or recovery alerts. + pub async fn evaluate(&self, snapshot: &MetricsSnapshot) -> Vec { + let checks: [(AlertKind, f32, &Threshold); 3] = [ + (AlertKind::HighCpu, snapshot.cpu_usage_pct, &self.config.cpu), + ( + AlertKind::HighMemory, + snapshot.memory_used_pct, + &self.config.memory, + ), + ( + AlertKind::HighDisk, + snapshot.disk_used_pct, + &self.config.disk, + ), + ]; + + let now_ms = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_millis()) + .unwrap_or_default(); + + let mut alerts = Vec::new(); + let mut active = self.active.write().await; + + for (kind, value, threshold) in checks { + match threshold.evaluate(value) { + Some(severity) => { + // Check if already firing at this severity (dedup) + let should_fire = match active.get(&kind) { + Some(existing) => existing.severity != severity, + None => true, + }; + + if should_fire { + let label = match kind { + AlertKind::HighCpu => "CPU usage", + AlertKind::HighMemory => "Memory usage", + AlertKind::HighDisk => "Disk usage", + }; + let threshold_val = match severity { + AlertSeverity::Warning => threshold.warning, + AlertSeverity::Critical => threshold.critical, + }; + alerts.push(Alert { + kind, + severity, + message: format!( + "{} at {:.1}% (threshold: {:.0}%)", + label, value, threshold_val + ), + value, + threshold: threshold_val, + recovered: false, + timestamp_ms: now_ms, + agent_id: self.agent_id.clone(), + }); + + active.insert( + kind, + ActiveAlert { + severity, + fired_at_ms: now_ms, + }, + ); + } + } + None => { + // Value dropped below all thresholds — recovery + if active.remove(&kind).is_some() { + let label = match kind { + AlertKind::HighCpu => "CPU usage", + AlertKind::HighMemory => "Memory usage", + AlertKind::HighDisk => "Disk usage", + }; + alerts.push(Alert { + kind, + severity: AlertSeverity::Warning, + message: format!("{} recovered to {:.1}%", label, value), + value, + threshold: threshold.warning, + recovered: true, + timestamp_ms: now_ms, + agent_id: self.agent_id.clone(), + }); + } + } + } + } + + alerts + } +} + +// ---- Webhook Dispatcher ---- + +/// Payload sent to the alert webhook. +#[derive(Debug, Serialize)] +struct WebhookPayload { + alerts: Vec, + agent_id: Option, + timestamp_ms: u128, +} + +/// Dispatch alerts to the configured webhook with retry and backoff. +pub async fn dispatch_alerts( + client: &Client, + webhook_url: &str, + alerts: Vec, + agent_id: Option, +) { + if alerts.is_empty() { + return; + } + + let now_ms = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map(|d| d.as_millis()) + .unwrap_or_default(); + + let payload = WebhookPayload { + alerts, + agent_id: agent_id.clone(), + timestamp_ms: now_ms, + }; + + let max_retries: u8 = 3; + let mut delay = Duration::from_secs(1); + + for attempt in 1..=max_retries { + let mut req = client.post(webhook_url).json(&payload); + if let Some(aid) = agent_id.as_ref() { + req = req.header("X-Agent-Id", aid); + } + + match req.send().await { + Ok(resp) => { + let status = resp.status(); + if status.is_success() { + info!(count = payload.alerts.len(), "alerts dispatched to webhook"); + return; + } + if status.is_client_error() { + warn!( + attempt, + status = %status, + "alert webhook client error; not retrying" + ); + return; + } + warn!( + attempt, + status = %status, + "alert webhook server error; retrying" + ); + } + Err(e) => { + warn!(attempt, error = %e, "alert webhook dispatch failed; retrying"); + } + } + + tokio::time::sleep(delay).await; + delay = (delay * 2).min(Duration::from_secs(16)); + } + + warn!("alert dispatch exhausted retries"); +} + +#[cfg(test)] +mod tests { + use super::*; + + fn test_config(warning: f32, critical: f32) -> AlertConfig { + AlertConfig { + webhook_url: Some("http://test/alerts".into()), + cpu: Threshold { warning, critical }, + memory: Threshold { warning, critical }, + disk: Threshold { warning, critical }, + } + } + + fn snapshot_with(cpu: f32, mem: f32, disk: f32) -> MetricsSnapshot { + MetricsSnapshot { + timestamp_ms: 1700000000000, + cpu_usage_pct: cpu, + memory_total_bytes: 16_000_000_000, + memory_used_bytes: 8_000_000_000, + memory_used_pct: mem, + disk_total_bytes: 500_000_000_000, + disk_used_bytes: 250_000_000_000, + disk_used_pct: disk, + } + } + + #[test] + fn threshold_evaluate_below() { + let t = Threshold { + warning: 80.0, + critical: 95.0, + }; + assert_eq!(t.evaluate(50.0), None); + } + + #[test] + fn threshold_evaluate_warning() { + let t = Threshold { + warning: 80.0, + critical: 95.0, + }; + assert_eq!(t.evaluate(85.0), Some(AlertSeverity::Warning)); + } + + #[test] + fn threshold_evaluate_critical() { + let t = Threshold { + warning: 80.0, + critical: 95.0, + }; + assert_eq!(t.evaluate(96.0), Some(AlertSeverity::Critical)); + } + + #[test] + fn threshold_evaluate_exact_boundary() { + let t = Threshold { + warning: 80.0, + critical: 95.0, + }; + assert_eq!(t.evaluate(80.0), Some(AlertSeverity::Warning)); + assert_eq!(t.evaluate(95.0), Some(AlertSeverity::Critical)); + } + + #[tokio::test] + async fn no_alerts_when_all_normal() { + let mgr = AlertManager::new(test_config(80.0, 95.0)); + let snap = snapshot_with(30.0, 40.0, 50.0); + let alerts = mgr.evaluate(&snap).await; + assert!(alerts.is_empty()); + } + + #[tokio::test] + async fn fires_warning_on_high_cpu() { + let mgr = AlertManager::new(test_config(80.0, 95.0)); + let snap = snapshot_with(85.0, 40.0, 50.0); + let alerts = mgr.evaluate(&snap).await; + assert_eq!(alerts.len(), 1); + assert_eq!(alerts[0].kind, AlertKind::HighCpu); + assert_eq!(alerts[0].severity, AlertSeverity::Warning); + assert!(!alerts[0].recovered); + } + + #[tokio::test] + async fn fires_critical_on_high_memory() { + let mgr = AlertManager::new(test_config(80.0, 95.0)); + let snap = snapshot_with(30.0, 96.0, 50.0); + let alerts = mgr.evaluate(&snap).await; + assert_eq!(alerts.len(), 1); + assert_eq!(alerts[0].kind, AlertKind::HighMemory); + assert_eq!(alerts[0].severity, AlertSeverity::Critical); + } + + #[tokio::test] + async fn dedup_same_severity() { + let mgr = AlertManager::new(test_config(80.0, 95.0)); + let snap = snapshot_with(85.0, 40.0, 50.0); + + let first = mgr.evaluate(&snap).await; + assert_eq!(first.len(), 1); + + // Same severity again → deduplicated + let second = mgr.evaluate(&snap).await; + assert!(second.is_empty()); + } + + #[tokio::test] + async fn escalation_fires_new_alert() { + let mgr = AlertManager::new(test_config(80.0, 95.0)); + + // Warning + let alerts = mgr.evaluate(&snapshot_with(85.0, 40.0, 50.0)).await; + assert_eq!(alerts.len(), 1); + assert_eq!(alerts[0].severity, AlertSeverity::Warning); + + // Escalate to critical + let alerts = mgr.evaluate(&snapshot_with(96.0, 40.0, 50.0)).await; + assert_eq!(alerts.len(), 1); + assert_eq!(alerts[0].severity, AlertSeverity::Critical); + } + + #[tokio::test] + async fn recovery_fires_alert() { + let mgr = AlertManager::new(test_config(80.0, 95.0)); + + // Fire + let _ = mgr.evaluate(&snapshot_with(85.0, 40.0, 50.0)).await; + + // Recover + let alerts = mgr.evaluate(&snapshot_with(50.0, 40.0, 50.0)).await; + assert_eq!(alerts.len(), 1); + assert!(alerts[0].recovered); + assert_eq!(alerts[0].kind, AlertKind::HighCpu); + } + + #[tokio::test] + async fn multiple_alerts_at_once() { + let mgr = AlertManager::new(test_config(80.0, 95.0)); + let snap = snapshot_with(96.0, 90.0, 85.0); + let alerts = mgr.evaluate(&snap).await; + assert_eq!(alerts.len(), 3); + } + + #[tokio::test] + async fn recovery_only_fires_once() { + let mgr = AlertManager::new(test_config(80.0, 95.0)); + + let _ = mgr.evaluate(&snapshot_with(85.0, 40.0, 50.0)).await; + let recovery = mgr.evaluate(&snapshot_with(50.0, 40.0, 50.0)).await; + assert_eq!(recovery.len(), 1); + + // No more recovery alerts + let again = mgr.evaluate(&snapshot_with(50.0, 40.0, 50.0)).await; + assert!(again.is_empty()); + } + + #[test] + fn alert_kind_display() { + assert_eq!(AlertKind::HighCpu.to_string(), "high_cpu"); + assert_eq!(AlertKind::HighMemory.to_string(), "high_memory"); + assert_eq!(AlertKind::HighDisk.to_string(), "high_disk"); + } + + #[test] + fn alert_serialization() { + let alert = Alert { + kind: AlertKind::HighCpu, + severity: AlertSeverity::Critical, + message: "CPU at 96%".into(), + value: 96.0, + threshold: 95.0, + recovered: false, + timestamp_ms: 1700000000000, + agent_id: Some("agent-1".into()), + }; + let json = serde_json::to_string(&alert).unwrap(); + assert!(json.contains("\"kind\":\"high_cpu\"")); + assert!(json.contains("\"severity\":\"critical\"")); + assert!(json.contains("\"recovered\":false")); + } + + #[test] + fn alert_config_defaults() { + // Don't set env vars — test defaults + let cfg = AlertConfig { + webhook_url: None, + cpu: Threshold { + warning: 80.0, + critical: 95.0, + }, + memory: Threshold { + warning: 80.0, + critical: 95.0, + }, + disk: Threshold { + warning: 80.0, + critical: 95.0, + }, + }; + assert!(cfg.webhook_url.is_none()); + assert_eq!(cfg.cpu.warning, 80.0); + assert_eq!(cfg.disk.critical, 95.0); + } + + #[test] + fn alert_manager_disabled_without_webhook() { + let config = AlertConfig { + webhook_url: None, + cpu: Threshold { + warning: 80.0, + critical: 95.0, + }, + memory: Threshold { + warning: 80.0, + critical: 95.0, + }, + disk: Threshold { + warning: 80.0, + critical: 95.0, + }, + }; + let mgr = AlertManager::new(config); + assert!(!mgr.is_enabled()); + } + + #[test] + fn alert_manager_enabled_with_webhook() { + let config = AlertConfig { + webhook_url: Some("http://hooks.example.com/alerts".into()), + cpu: Threshold { + warning: 80.0, + critical: 95.0, + }, + memory: Threshold { + warning: 80.0, + critical: 95.0, + }, + disk: Threshold { + warning: 80.0, + critical: 95.0, + }, + }; + let mgr = AlertManager::new(config); + assert!(mgr.is_enabled()); + } +} diff --git a/src/monitoring/mod.rs b/src/monitoring/mod.rs index 9a41984..5bb1573 100644 --- a/src/monitoring/mod.rs +++ b/src/monitoring/mod.rs @@ -1,3 +1,5 @@ +pub mod alerting; + use reqwest::Client; use serde::Serialize; use std::sync::Arc; @@ -8,6 +10,8 @@ use tokio::sync::{Mutex, RwLock}; use tokio::task::JoinHandle; use tracing::info; +use crate::monitoring::alerting::{dispatch_alerts, SharedAlertManager}; + #[derive(Debug, Clone, Serialize, Default)] pub struct MetricsSnapshot { pub timestamp_ms: u128, @@ -28,6 +32,15 @@ pub enum ControlPlane { ComposeAgent, } +impl ControlPlane { + pub fn from_value(value: Option<&str>) -> Self { + match value { + Some("compose_agent") => ControlPlane::ComposeAgent, + _ => ControlPlane::StatusPanel, + } + } +} + impl std::fmt::Display for ControlPlane { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { @@ -146,8 +159,14 @@ pub fn spawn_heartbeat( interval: Duration, tx: MetricsTx, webhook: Option, + alert_manager: Option, ) -> JoinHandle<()> { - let client = webhook.as_ref().map(|_| Client::new()); + let client = webhook.as_ref().map(|_| Client::new()).or_else(|| { + alert_manager + .as_ref() + .filter(|m| m.is_enabled()) + .map(|_| Client::new()) + }); let agent_id = std::env::var("AGENT_ID").ok(); tokio::spawn(async move { loop { @@ -184,7 +203,6 @@ pub fn spawn_heartbeat( tracing::debug!(attempt, status = %status, "metrics webhook push succeeded"); break; } else if status.is_client_error() { - // Do not retry on client-side errors (e.g., 401/403/404) tracing::warn!(attempt, status = %status, "metrics webhook push client error; not retrying"); break; } else { @@ -196,19 +214,32 @@ pub fn spawn_heartbeat( } } - // Jitter derived from current time to avoid herd effects let nanos = SystemTime::now() .duration_since(UNIX_EPOCH) .map(|d| d.subsec_nanos()) .unwrap_or(0); let jitter = Duration::from_millis(50 + (nanos % 200) as u64); tokio::time::sleep(delay + jitter).await; - // Exponential backoff capped at ~8s delay = delay.saturating_mul(2).min(Duration::from_secs(8)); } }); } + // Evaluate alert thresholds and dispatch if needed + if let (Some(mgr), Some(http)) = (alert_manager.as_ref(), client.as_ref()) { + if mgr.is_enabled() { + let alerts = mgr.evaluate(&snapshot).await; + if !alerts.is_empty() { + let http = http.clone(); + let url = mgr.config().webhook_url.clone().unwrap_or_default(); + let agent = agent_id.clone(); + tokio::spawn(async move { + dispatch_alerts(&http, &url, alerts, agent).await; + }); + } + } + } + info!( cpu = snapshot.cpu_usage_pct, mem_used_bytes = snapshot.memory_used_bytes, @@ -277,6 +308,23 @@ mod tests { assert_ne!(ControlPlane::StatusPanel, ControlPlane::ComposeAgent); } + #[test] + fn control_plane_from_value_defaults_to_status_panel() { + assert_eq!( + ControlPlane::from_value(Some("compose_agent")), + ControlPlane::ComposeAgent + ); + assert_eq!( + ControlPlane::from_value(Some("status_panel")), + ControlPlane::StatusPanel + ); + assert_eq!( + ControlPlane::from_value(Some("unexpected")), + ControlPlane::StatusPanel + ); + assert_eq!(ControlPlane::from_value(None), ControlPlane::StatusPanel); + } + #[test] fn command_execution_metrics_default() { let metrics = CommandExecutionMetrics::default(); diff --git a/src/security/mod.rs b/src/security/mod.rs index c2f5e55..86631ab 100644 --- a/src/security/mod.rs +++ b/src/security/mod.rs @@ -10,5 +10,6 @@ pub mod validation; // Vault integration for token rotation pub mod token_cache; +pub mod token_provider; pub mod token_refresh; pub mod vault_client; diff --git a/src/security/token_provider.rs b/src/security/token_provider.rs new file mode 100644 index 0000000..61f8bfc --- /dev/null +++ b/src/security/token_provider.rs @@ -0,0 +1,193 @@ +use std::sync::Arc; + +use anyhow::Result; +use chrono::{DateTime, Utc}; +use tokio::sync::RwLock; +use tracing::{debug, info, warn}; + +use super::vault_client::VaultClient; + +/// Minimum seconds between consecutive refresh attempts to prevent hammering. +const REFRESH_COOLDOWN_SECS: i64 = 10; + +/// Provides shared, refreshable access to the agent token. +/// +/// When a 401/403 is received from Stacker, callers invoke `refresh()` which: +/// 1. Checks a cooldown to avoid hammering Vault or env re-reads. +/// 2. Tries Vault (if configured) to get a new token. +/// 3. Falls back to re-reading `AGENT_TOKEN` from the environment. +/// 4. Returns whether the token actually changed. +#[derive(Debug, Clone)] +pub struct TokenProvider { + token: Arc>, + vault_client: Option, + deployment_hash: String, + last_refresh: Arc>>>, +} + +impl TokenProvider { + pub fn new( + initial_token: String, + vault_client: Option, + deployment_hash: String, + ) -> Self { + Self { + token: Arc::new(RwLock::new(initial_token)), + vault_client, + deployment_hash, + last_refresh: Arc::new(RwLock::new(None)), + } + } + + /// Build a provider from environment variables, optionally attaching a Vault client. + pub fn from_env(vault_client: Option) -> Self { + let token = std::env::var("AGENT_TOKEN").unwrap_or_default(); + let deployment_hash = + std::env::var("DEPLOYMENT_HASH").unwrap_or_else(|_| "default".to_string()); + Self::new(token, vault_client, deployment_hash) + } + + /// Get the current token value. + pub async fn get(&self) -> String { + self.token.read().await.clone() + } + + /// Attempt to refresh the token after a 401/403. + /// + /// Returns `Ok(true)` if the token was actually changed, `Ok(false)` if + /// it stayed the same (cooldown, Vault returned same token, no env change). + pub async fn refresh(&self) -> Result { + // Cooldown check + { + let last = self.last_refresh.read().await; + if let Some(t) = *last { + let elapsed = (Utc::now() - t).num_seconds(); + if elapsed < REFRESH_COOLDOWN_SECS { + debug!( + elapsed, + cooldown = REFRESH_COOLDOWN_SECS, + "token refresh skipped (cooldown)" + ); + return Ok(false); + } + } + } + + // Record this attempt + { + let mut last = self.last_refresh.write().await; + *last = Some(Utc::now()); + } + + let old_token = self.token.read().await.clone(); + + // Strategy 1: Vault + if let Some(vault) = &self.vault_client { + match vault.fetch_agent_token(&self.deployment_hash, None).await { + Ok(new_token) if new_token != old_token => { + let mut token = self.token.write().await; + *token = new_token; + info!("Agent token refreshed from Vault after auth error"); + return Ok(true); + } + Ok(_) => { + debug!("Vault returned same token; trying env fallback"); + } + Err(e) => { + warn!(error = %e, "Vault token refresh failed; trying env fallback"); + } + } + } + + // Strategy 2: re-read AGENT_TOKEN from environment + let env_token = std::env::var("AGENT_TOKEN").unwrap_or_default(); + if !env_token.is_empty() && env_token != old_token { + let mut token = self.token.write().await; + *token = env_token; + info!("Agent token refreshed from environment after auth error"); + return Ok(true); + } + + debug!("No new token available after refresh attempt"); + Ok(false) + } + + /// Directly swap the token (used by background rotation tasks). + pub async fn swap(&self, new_token: String) { + let mut token = self.token.write().await; + if *token != new_token { + *token = new_token; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test_utils::EnvGuard; + use std::sync::{Mutex, OnceLock}; + + /// Serializes tests that mutate AGENT_TOKEN env var. + fn env_lock() -> &'static Mutex<()> { + static LOCK: OnceLock> = OnceLock::new(); + LOCK.get_or_init(|| Mutex::new(())) + } + + #[tokio::test] + async fn get_returns_initial_token() { + let tp = TokenProvider::new("tok123".into(), None, "hash".into()); + assert_eq!(tp.get().await, "tok123"); + } + + #[tokio::test] + async fn swap_updates_token() { + let tp = TokenProvider::new("old".into(), None, "hash".into()); + tp.swap("new".into()).await; + assert_eq!(tp.get().await, "new"); + } + + #[tokio::test] + async fn refresh_without_vault_reads_env() { + let _guard = env_lock().lock().unwrap(); + let _env = EnvGuard::set("AGENT_TOKEN", "env_refreshed_tp"); + let tp = TokenProvider::new("stale".into(), None, "hash".into()); + + let changed = tp.refresh().await.unwrap(); + assert!(changed); + assert_eq!(tp.get().await, "env_refreshed_tp"); + } + + #[tokio::test] + async fn refresh_respects_cooldown() { + let _guard = env_lock().lock().unwrap(); + let _env = EnvGuard::set("AGENT_TOKEN", "fresh_tp"); + let tp = TokenProvider::new("stale".into(), None, "hash".into()); + + let first = tp.refresh().await.unwrap(); + assert!(first); + + // Second attempt within cooldown should be skipped + std::env::set_var("AGENT_TOKEN", "even_fresher_tp"); + let second = tp.refresh().await.unwrap(); + assert!(!second); + assert_eq!(tp.get().await, "fresh_tp"); + } + + #[tokio::test] + async fn refresh_noop_when_env_same() { + let _guard = env_lock().lock().unwrap(); + let _env = EnvGuard::set("AGENT_TOKEN", "same"); + let tp = TokenProvider::new("same".into(), None, "hash".into()); + + let changed = tp.refresh().await.unwrap(); + assert!(!changed); + } + + #[tokio::test] + async fn clone_shares_state() { + let tp = TokenProvider::new("a".into(), None, "h".into()); + let tp2 = tp.clone(); + tp2.swap("b".into()).await; + assert_eq!(tp.get().await, "b"); + } +} diff --git a/src/transport/grpc_client.rs b/src/transport/grpc_client.rs new file mode 100644 index 0000000..973a9cf --- /dev/null +++ b/src/transport/grpc_client.rs @@ -0,0 +1,190 @@ +use anyhow::{Context, Result}; +use serde_json::Value; +use std::time::Duration; +use tracing::info; + +pub mod pipe_proto { + tonic::include_proto!("pipe"); +} + +use pipe_proto::pipe_service_client::PipeServiceClient; +use pipe_proto::{PipeMessage, SubscribeRequest}; + +const GRPC_CONNECT_TIMEOUT: Duration = Duration::from_secs(10); +const GRPC_REQUEST_TIMEOUT: Duration = Duration::from_secs(30); + +/// Subscribe to a gRPC pipe source and read the first message. +pub async fn grpc_fetch_source( + endpoint: &str, + pipe_instance_id: &str, + step_id: &str, +) -> Result { + info!(endpoint, "grpc_fetch_source: connecting"); + let channel = tonic::transport::Endpoint::from_shared(endpoint.to_string()) + .with_context(|| format!("invalid gRPC endpoint: {endpoint}"))? + .connect_timeout(GRPC_CONNECT_TIMEOUT) + .timeout(GRPC_REQUEST_TIMEOUT) + .connect() + .await + .with_context(|| format!("gRPC connection failed: {endpoint}"))?; + + let mut client = PipeServiceClient::new(channel); + + let request = tonic::Request::new(SubscribeRequest { + pipe_instance_id: pipe_instance_id.to_string(), + step_id: step_id.to_string(), + filters: Default::default(), + }); + + let mut stream = client + .subscribe(request) + .await + .with_context(|| "gRPC subscribe failed")? + .into_inner(); + + match stream.message().await { + Ok(Some(msg)) => { + let payload = msg + .payload + .map(|s| struct_to_json(&s)) + .unwrap_or_else(|| serde_json::json!({})); + Ok(payload) + } + Ok(None) => Err(anyhow::anyhow!("gRPC stream closed without data")), + Err(e) => Err(anyhow::anyhow!("gRPC read error: {e}")), + } +} + +/// Send data to a gRPC pipe target via unary RPC. +pub async fn grpc_send_target( + endpoint: &str, + pipe_instance_id: &str, + step_id: &str, + data: &Value, +) -> Result<(u16, Value)> { + info!(endpoint, "grpc_send_target: connecting"); + let channel = tonic::transport::Endpoint::from_shared(endpoint.to_string()) + .with_context(|| format!("invalid gRPC endpoint: {endpoint}"))? + .connect_timeout(GRPC_CONNECT_TIMEOUT) + .timeout(GRPC_REQUEST_TIMEOUT) + .connect() + .await + .with_context(|| format!("gRPC connection failed: {endpoint}"))?; + + let mut client = PipeServiceClient::new(channel); + + let payload_struct = + json_to_struct(data).with_context(|| "failed to convert payload to gRPC Struct")?; + + let request = tonic::Request::new(PipeMessage { + pipe_instance_id: pipe_instance_id.to_string(), + step_id: step_id.to_string(), + payload: Some(payload_struct), + timestamp_ms: chrono::Utc::now().timestamp_millis(), + }); + + let response = client + .send(request) + .await + .with_context(|| "gRPC send failed")? + .into_inner(); + + let status = if response.success { 200 } else { 500 }; + Ok(( + status, + serde_json::json!({ + "grpc_delivered": response.success, + "message": response.message, + }), + )) +} + +// ── Conversion helpers: serde_json ↔ prost_types::Struct ── + +fn json_to_struct(value: &Value) -> Result { + let fields = match value.as_object() { + Some(map) => map + .iter() + .map(|(k, v)| Ok((k.clone(), json_to_prost_value(v)?))) + .collect::>()?, + None => { + return Err(anyhow::anyhow!( + "gRPC Struct conversion requires a JSON object, got: {}", + match value { + Value::Array(_) => "array", + Value::String(_) => "string", + Value::Number(_) => "number", + Value::Bool(_) => "bool", + Value::Null => "null", + _ => "unknown", + } + )); + } + }; + Ok(prost_types::Struct { fields }) +} + +fn json_to_prost_value(value: &Value) -> Result { + use prost_types::value::Kind; + let kind = match value { + Value::Null => Kind::NullValue(0), + Value::Bool(b) => Kind::BoolValue(*b), + Value::Number(n) => Kind::NumberValue( + n.as_f64() + .ok_or_else(|| anyhow::anyhow!("number {n} cannot be represented as f64"))?, + ), + Value::String(s) => Kind::StringValue(s.clone()), + Value::Array(arr) => Kind::ListValue(prost_types::ListValue { + values: arr.iter().map(json_to_prost_value).collect::>()?, + }), + Value::Object(_) => Kind::StructValue(json_to_struct(value)?), + }; + Ok(prost_types::Value { kind: Some(kind) }) +} + +fn struct_to_json(s: &prost_types::Struct) -> Value { + let map: serde_json::Map = s + .fields + .iter() + .map(|(k, v)| (k.clone(), prost_value_to_json(v))) + .collect(); + Value::Object(map) +} + +fn prost_value_to_json(v: &prost_types::Value) -> Value { + use prost_types::value::Kind; + match &v.kind { + Some(Kind::NullValue(_)) => Value::Null, + Some(Kind::BoolValue(b)) => Value::Bool(*b), + Some(Kind::NumberValue(n)) => serde_json::json!(*n), + Some(Kind::StringValue(s)) => Value::String(s.clone()), + Some(Kind::ListValue(list)) => { + Value::Array(list.values.iter().map(prost_value_to_json).collect()) + } + Some(Kind::StructValue(s)) => struct_to_json(s), + None => Value::Null, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_json_struct_roundtrip() { + let original = serde_json::json!({"name": "test", "count": 42, "active": true}); + let proto = json_to_struct(&original).unwrap(); + let back = struct_to_json(&proto); + assert_eq!(back["name"], "test"); + assert_eq!(back["count"], 42.0); + assert_eq!(back["active"], true); + } + + #[test] + fn test_json_to_struct_rejects_non_object() { + assert!(json_to_struct(&serde_json::json!("string")).is_err()); + assert!(json_to_struct(&serde_json::json!(42)).is_err()); + assert!(json_to_struct(&serde_json::json!([1, 2])).is_err()); + assert!(json_to_struct(&serde_json::json!(null)).is_err()); + } +} diff --git a/src/transport/http_polling.rs b/src/transport/http_polling.rs index a29b288..4845f50 100644 --- a/src/transport/http_polling.rs +++ b/src/transport/http_polling.rs @@ -11,6 +11,8 @@ use tracing::{debug, trace}; use uuid::Uuid; use crate::security::request_signer::compute_signature_base64; +use crate::security::token_provider::TokenProvider; +use crate::transport::retry::{signed_get_with_retry, signed_post_with_retry, RetryConfig}; use crate::transport::Command; const TS_OVERRIDE_ENV: &str = "HTTP_POLLING_TS_OVERRIDE"; @@ -34,7 +36,7 @@ fn signing_meta() -> (i64, String) { (ts, request_id) } -fn build_signed_headers(agent_id: &str, agent_token: &str, body: &[u8]) -> Result { +pub fn build_signed_headers(agent_id: &str, agent_token: &str, body: &[u8]) -> Result { let (ts, request_id) = signing_meta(); let sig = compute_signature_base64(agent_token, body); @@ -349,6 +351,7 @@ pub async fn report_result( result: &Option, error: &Option, completed_at: &str, + executed_by: Option<&str>, ) -> Result<()> { let url = format!("{}/api/v1/agent/commands/report", base_url); @@ -369,6 +372,12 @@ pub async fn report_result( "completed_at".to_string(), serde_json::Value::String(completed_at.to_string()), ); + if let Some(executed_by) = executed_by { + body.insert( + "executed_by".to_string(), + serde_json::Value::String(executed_by.to_string()), + ); + } if let Some(res) = result { body.insert("result".to_string(), res.clone()); @@ -424,6 +433,127 @@ pub async fn update_app_status( } } +// ---- Retry-aware variants (use TokenProvider + automatic 401/403 refresh) ---- + +/// Long-poll for a command with automatic token refresh on 401/403. +pub async fn wait_for_command_with_retry( + base_url: &str, + deployment_hash: &str, + agent_id: &str, + token_provider: &TokenProvider, + timeout_secs: u64, + priority: Option<&str>, +) -> Result { + let url = build_wait_command_url(base_url, deployment_hash, timeout_secs, priority); + let client = create_http_client()?; + + debug!( + url = %url, + deployment_hash = %deployment_hash, + timeout_secs = %timeout_secs, + "initiating long-poll with retry" + ); + + let config = RetryConfig::auth_only(); + let response = signed_get_with_retry( + &client, + &url, + agent_id, + token_provider, + Duration::from_secs(timeout_secs + 5), + &config, + ) + .await?; + + handle_poll_response(response, &url).await +} + +/// Report command result with automatic token refresh on 401/403. +#[allow(clippy::too_many_arguments)] +pub async fn report_result_with_retry( + base_url: &str, + agent_id: &str, + token_provider: &TokenProvider, + command_id: &str, + deployment_hash: &str, + status: &str, + result: &Option, + error: &Option, + completed_at: &str, + executed_by: Option<&str>, +) -> Result<()> { + let url = format!("{}/api/v1/agent/commands/report", base_url); + + let mut body = serde_json::Map::new(); + body.insert("command_id".into(), Value::String(command_id.into())); + body.insert( + "deployment_hash".into(), + Value::String(deployment_hash.into()), + ); + body.insert("status".into(), Value::String(status.into())); + body.insert("completed_at".into(), Value::String(completed_at.into())); + if let Some(executed_by) = executed_by { + body.insert("executed_by".into(), Value::String(executed_by.to_string())); + } + + if let Some(res) = result { + body.insert("result".into(), res.clone()); + } + body.insert( + "error".into(), + error + .as_ref() + .map(|e| Value::String(e.clone())) + .unwrap_or(Value::Null), + ); + + debug!(url = %url, body = ?body, "reporting result with retry"); + + let client = Client::new(); + let config = RetryConfig::default(); + let resp = + signed_post_with_retry(&client, &url, agent_id, token_provider, &body, &config).await?; + let status_code = resp.status(); + + if status_code.is_success() { + debug!(status_code = %status_code.as_u16(), "command result reported successfully"); + Ok(()) + } else { + let error_body = resp + .text() + .await + .unwrap_or_else(|_| "".to_string()); + Err(anyhow!( + "report failed: {} | body: {}", + status_code, + error_body + )) + } +} + +/// Update app status with automatic token refresh on 401/403. +pub async fn update_app_status_with_retry( + base_url: &str, + agent_id: &str, + token_provider: &TokenProvider, + payload: &T, +) -> Result<()> { + let url = format!("{}/api/v1/apps/status", base_url); + let client = Client::new(); + let config = RetryConfig::default(); + let resp = + signed_post_with_retry(&client, &url, agent_id, token_provider, payload, &config).await?; + + if resp.status().is_success() { + Ok(()) + } else { + Err(anyhow::anyhow!( + "app status update failed: {}", + resp.status() + )) + } +} + #[cfg(test)] mod tests { use super::*; @@ -453,6 +583,7 @@ mod tests { let result: Option = None; let error = None; let completed_at = "2023-11-15T10:00:00Z"; + let executed_by = Some("compose_agent"); let mut payload = serde_json::Map::new(); payload.insert( @@ -471,6 +602,10 @@ mod tests { "completed_at".to_string(), serde_json::Value::String(completed_at.to_string()), ); + payload.insert( + "executed_by".to_string(), + serde_json::Value::String(executed_by.unwrap().to_string()), + ); if let Some(value) = result.clone() { payload.insert("result".to_string(), value); } @@ -505,12 +640,92 @@ mod tests { &result, &error, completed_at, + executed_by, ) .await .expect("report_result should succeed"); mock.assert(); } + #[tokio::test] + async fn report_result_with_retry_posts_executed_by() { + let _guard = env_lock().lock().expect("env lock poisoned"); + env::set_var(TS_OVERRIDE_ENV, "1700000000"); + env::set_var(REQUEST_ID_OVERRIDE_ENV, "req-123-retry"); + + let mut server = Server::new_async().await; + let base_url = server.url(); + let agent_id = "agent-123"; + let agent_token = "token-abc"; + let token_provider = + TokenProvider::new(agent_token.to_string(), None, "dep-hash-123".into()); + let command_id = "cmd-1"; + let deployment_hash = "dep-hash-123"; + let status = "success"; + let result: Option = None; + let error = None; + let completed_at = "2023-11-15T10:00:00Z"; + let executed_by = Some("compose_agent"); + + let mut payload = serde_json::Map::new(); + payload.insert( + "command_id".into(), + serde_json::Value::String(command_id.to_string()), + ); + payload.insert( + "deployment_hash".into(), + serde_json::Value::String(deployment_hash.to_string()), + ); + payload.insert( + "status".into(), + serde_json::Value::String(status.to_string()), + ); + payload.insert( + "completed_at".into(), + serde_json::Value::String(completed_at.to_string()), + ); + payload.insert( + "executed_by".into(), + serde_json::Value::String(executed_by.unwrap().to_string()), + ); + payload.insert("error".into(), serde_json::Value::Null); + + let body = serde_json::to_vec(&payload).unwrap(); + let signature = compute_signature_base64(agent_token, &body); + let ts = env::var(TS_OVERRIDE_ENV).unwrap(); + let req_id = env::var(REQUEST_ID_OVERRIDE_ENV).unwrap(); + let mock = server + .mock("POST", "/api/v1/agent/commands/report") + .match_header("X-Agent-Id", Matcher::Exact(agent_id.into())) + .match_header( + "Authorization", + Matcher::Exact(format!("Bearer {}", agent_token)), + ) + .match_header("X-Timestamp", Matcher::Exact(ts)) + .match_header("X-Request-Id", Matcher::Exact(req_id)) + .match_header("X-Agent-Signature", Matcher::Exact(signature)) + .match_body(Matcher::Exact(String::from_utf8(body).unwrap())) + .with_status(200) + .create_async() + .await; + + report_result_with_retry( + &base_url, + agent_id, + &token_provider, + command_id, + deployment_hash, + status, + &result, + &error, + completed_at, + executed_by, + ) + .await + .expect("report_result_with_retry should succeed"); + mock.assert(); + } + #[tokio::test] async fn update_app_status_posts_payload() { let _guard = env_lock().lock().expect("env lock poisoned"); diff --git a/src/transport/mod.rs b/src/transport/mod.rs index 293213b..fc4271c 100644 --- a/src/transport/mod.rs +++ b/src/transport/mod.rs @@ -1,4 +1,6 @@ +pub mod grpc_client; pub mod http_polling; +pub mod retry; pub mod websocket; use serde::{Deserialize, Serialize}; @@ -35,6 +37,8 @@ pub struct CommandResult { pub truncated: Option, #[serde(skip_serializing_if = "Option::is_none")] pub cursor: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub executed_by: Option, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -54,4 +58,6 @@ pub struct StackerCommandReport { pub result: Option, #[serde(skip_serializing_if = "Option::is_none")] pub error: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub executed_by: Option, } diff --git a/src/transport/retry.rs b/src/transport/retry.rs new file mode 100644 index 0000000..1d4f8db --- /dev/null +++ b/src/transport/retry.rs @@ -0,0 +1,227 @@ +use std::time::Duration; + +use anyhow::{Context, Result}; +use reqwest::header::CONTENT_TYPE; +use reqwest::{Client, Response}; +use serde::Serialize; +use tracing::warn; + +use crate::security::token_provider::TokenProvider; +use crate::transport::http_polling::build_signed_headers; + +/// Configuration for retry behaviour on outbound Stacker requests. +#[derive(Debug, Clone)] +pub struct RetryConfig { + /// How many times to retry after a 401/403 (each attempt refreshes the token first). + pub max_auth_retries: u32, + /// How many times to retry on 5xx / network errors with exponential backoff. + pub max_server_retries: u32, + /// Starting backoff duration for server/network retries. + pub initial_backoff: Duration, + /// Maximum backoff cap. + pub max_backoff: Duration, +} + +impl Default for RetryConfig { + fn default() -> Self { + Self { + max_auth_retries: 1, + max_server_retries: 3, + initial_backoff: Duration::from_secs(2), + max_backoff: Duration::from_secs(60), + } + } +} + +impl RetryConfig { + /// Suitable for long-poll requests where server retries are handled by the outer loop. + pub fn auth_only() -> Self { + Self { + max_auth_retries: 1, + max_server_retries: 0, + initial_backoff: Duration::from_secs(2), + max_backoff: Duration::from_secs(60), + } + } +} + +/// Returns `true` if the status code indicates an auth failure (401 or 403). +fn is_auth_error(status: u16) -> bool { + status == 401 || status == 403 +} + +/// Send a signed GET request, automatically refreshing the token on 401/403. +pub async fn signed_get_with_retry( + client: &Client, + url: &str, + agent_id: &str, + token_provider: &TokenProvider, + timeout: Duration, + config: &RetryConfig, +) -> Result { + let mut auth_retries = 0u32; + let mut server_retries = 0u32; + let mut backoff = config.initial_backoff; + + loop { + let token = token_provider.get().await; + let headers = build_signed_headers(agent_id, &token, &[])?; + + let result = client + .get(url) + .headers(headers) + .timeout(timeout) + .send() + .await; + + match result { + Ok(resp) => { + let status = resp.status().as_u16(); + + if is_auth_error(status) && auth_retries < config.max_auth_retries { + auth_retries += 1; + warn!( + status, + attempt = auth_retries, + url = %url, + "auth error from Stacker; refreshing token and retrying" + ); + token_provider.refresh().await?; + continue; + } + + if resp.status().is_server_error() && server_retries < config.max_server_retries { + server_retries += 1; + warn!( + status, + attempt = server_retries, + backoff_ms = backoff.as_millis() as u64, + "server error; retrying with backoff" + ); + tokio::time::sleep(backoff).await; + backoff = (backoff * 2).min(config.max_backoff); + continue; + } + + return Ok(resp); + } + Err(e) => { + if server_retries < config.max_server_retries { + server_retries += 1; + warn!( + error = %e, + attempt = server_retries, + "network error; retrying with backoff" + ); + tokio::time::sleep(backoff).await; + backoff = (backoff * 2).min(config.max_backoff); + continue; + } + return Err(e).context("signed GET failed after retries"); + } + } + } +} + +/// Send a signed POST (JSON body) request with 401/403 retry. +pub async fn signed_post_with_retry( + client: &Client, + url: &str, + agent_id: &str, + token_provider: &TokenProvider, + payload: &T, + config: &RetryConfig, +) -> Result { + let body_bytes = serde_json::to_vec(payload).context("serialize JSON body")?; + let mut auth_retries = 0u32; + let mut server_retries = 0u32; + let mut backoff = config.initial_backoff; + + loop { + let token = token_provider.get().await; + let headers = build_signed_headers(agent_id, &token, &body_bytes)?; + + let result = client + .post(url) + .headers(headers) + .header(CONTENT_TYPE, "application/json") + .body(body_bytes.clone()) + .send() + .await; + + match result { + Ok(resp) => { + let status = resp.status().as_u16(); + + if is_auth_error(status) && auth_retries < config.max_auth_retries { + auth_retries += 1; + warn!( + status, + attempt = auth_retries, + url = %url, + "auth error on POST; refreshing token and retrying" + ); + token_provider.refresh().await?; + continue; + } + + if resp.status().is_server_error() && server_retries < config.max_server_retries { + server_retries += 1; + warn!( + status, + attempt = server_retries, + backoff_ms = backoff.as_millis() as u64, + "server error on POST; retrying with backoff" + ); + tokio::time::sleep(backoff).await; + backoff = (backoff * 2).min(config.max_backoff); + continue; + } + + return Ok(resp); + } + Err(e) => { + if server_retries < config.max_server_retries { + server_retries += 1; + warn!( + error = %e, + attempt = server_retries, + "network error on POST; retrying with backoff" + ); + tokio::time::sleep(backoff).await; + backoff = (backoff * 2).min(config.max_backoff); + continue; + } + return Err(e).context("signed POST failed after retries"); + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn default_config_values() { + let cfg = RetryConfig::default(); + assert_eq!(cfg.max_auth_retries, 1); + assert_eq!(cfg.max_server_retries, 3); + } + + #[test] + fn auth_only_config_no_server_retries() { + let cfg = RetryConfig::auth_only(); + assert_eq!(cfg.max_server_retries, 0); + assert_eq!(cfg.max_auth_retries, 1); + } + + #[test] + fn is_auth_error_detects_401_403() { + assert!(is_auth_error(401)); + assert!(is_auth_error(403)); + assert!(!is_auth_error(200)); + assert!(!is_auth_error(500)); + assert!(!is_auth_error(404)); + } +} diff --git a/src/transport/websocket.rs b/src/transport/websocket.rs index 2810ba3..39a609f 100644 --- a/src/transport/websocket.rs +++ b/src/transport/websocket.rs @@ -1,11 +1,122 @@ -use anyhow::Result; -use tracing::{debug, info}; - -/// Placeholder for WebSocket streaming (logs/metrics/status). -/// This stub will be replaced with a `tokio_tungstenite` client. -pub async fn connect_and_stream(_ws_url: &str) -> Result<()> { - info!("WebSocket stub: connect_and_stream called"); - // TODO: implement ping/pong heartbeat and reconnection - debug!("Streaming stub active"); +use anyhow::{Context, Result}; +use futures_util::{SinkExt, StreamExt}; +use serde_json::Value; +use std::time::Duration; +use tokio_tungstenite::{connect_async, tungstenite::Message}; +use tracing::{debug, info, warn}; + +const WS_READ_TIMEOUT: Duration = Duration::from_secs(30); + +/// Connect to a WebSocket endpoint and read the first message as pipe source data. +pub async fn ws_fetch_source(url: &str) -> Result { + info!(url, "ws_fetch_source: connecting"); + let (ws_stream, _) = connect_async(url) + .await + .with_context(|| format!("WebSocket connection failed: {url}"))?; + + let (mut write, mut read) = ws_stream.split(); + + loop { + let msg = tokio::time::timeout(WS_READ_TIMEOUT, read.next()) + .await + .with_context(|| format!("ws_fetch_source: timed out after {WS_READ_TIMEOUT:?}"))?; + + match msg { + Some(Ok(Message::Text(text))) => { + debug!(len = text.len(), "ws_fetch_source: received text"); + return serde_json::from_str::(&text) + .with_context(|| "ws_fetch_source: failed to parse JSON"); + } + Some(Ok(Message::Binary(bin))) => { + debug!(len = bin.len(), "ws_fetch_source: received binary"); + return serde_json::from_slice::(&bin) + .with_context(|| "ws_fetch_source: failed to parse binary JSON"); + } + Some(Ok(Message::Ping(payload))) => { + debug!(len = payload.len(), "ws_fetch_source: received ping"); + write + .send(Message::Pong(payload)) + .await + .with_context(|| "ws_fetch_source: failed to send pong")?; + } + Some(Ok(Message::Pong(_))) => { + debug!("ws_fetch_source: received pong, ignoring"); + } + Some(Ok(Message::Close(frame))) => { + return Err(anyhow::anyhow!( + "ws_fetch_source: stream closed before data: {frame:?}" + )); + } + Some(Ok(other)) => { + debug!(message = %other, "ws_fetch_source: ignoring non-data frame"); + } + Some(Err(e)) => return Err(anyhow::anyhow!("ws_fetch_source read error: {e}")), + None => { + return Err(anyhow::anyhow!( + "ws_fetch_source: stream closed without data" + )) + } + } + } +} + +/// Send JSON data to a WebSocket endpoint (pipe target). +pub async fn ws_send_target(url: &str, data: &Value) -> Result<(u16, Value)> { + info!(url, "ws_send_target: connecting"); + let (ws_stream, _) = connect_async(url) + .await + .with_context(|| format!("WebSocket connection failed: {url}"))?; + + let (mut write, _read) = ws_stream.split(); + + let payload = + serde_json::to_string(data).with_context(|| "ws_send_target: failed to serialize")?; + + write + .send(Message::Text(payload.into())) + .await + .with_context(|| "ws_send_target: failed to send")?; + + info!(url, "ws_send_target: data sent"); + Ok((200, serde_json::json!({"ws_delivered": true}))) +} + +/// Connect to a WebSocket endpoint for streaming logs/metrics/status. +/// Reads messages in a loop until the stream closes or an error occurs. +pub async fn connect_and_stream(ws_url: &str) -> Result<()> { + info!(ws_url, "connect_and_stream: connecting"); + let (ws_stream, _) = connect_async(ws_url) + .await + .with_context(|| format!("WebSocket streaming connection failed: {ws_url}"))?; + + let (mut write, mut read) = ws_stream.split(); + + while let Some(msg) = read.next().await { + match msg { + Ok(Message::Text(text)) => { + debug!(len = text.len(), "stream message received"); + } + Ok(Message::Ping(payload)) => { + debug!(len = payload.len(), "stream ping received"); + if let Err(e) = write.send(Message::Pong(payload)).await { + warn!(error = %e, "failed to send pong"); + break; + } + } + Ok(Message::Close(frame)) => { + info!("stream closed by server"); + if let Err(e) = write.send(Message::Close(frame)).await { + warn!(error = %e, "failed to acknowledge close frame"); + } + break; + } + Err(e) => { + warn!(error = %e, "stream error"); + break; + } + _ => {} + } + } + Ok(()) } diff --git a/static/css/style.css b/static/css/style.css index eac406d..4eb10aa 100644 --- a/static/css/style.css +++ b/static/css/style.css @@ -265,6 +265,137 @@ h1, h2, h3, h4 { .badge-warning .badge-dot { background: var(--color-warning); } .badge-error .badge-dot { background: var(--color-error); } +/* ===== Notification bell ===== */ +.notification-bell { + position: relative; + cursor: pointer; + background: none; + border: none; + color: var(--color-text-light); + padding: 4px; + border-radius: var(--radius-md); + transition: color 0.15s, background 0.15s; +} +.notification-bell:hover { + color: var(--color-accent); + background: var(--color-accent-light); +} +.notification-bell .material-icons-outlined { + font-size: 24px; + display: block; +} +.notification-bell .bell-badge { + position: absolute; + top: 0; + right: -2px; + min-width: 18px; + height: 18px; + padding: 0 5px; + border-radius: 9px; + background: var(--color-error); + color: #fff; + font-size: 11px; + font-weight: 700; + line-height: 18px; + text-align: center; + display: none; +} +.notification-bell .bell-badge.has-unread { + display: block; +} + +.notification-dropdown { + position: absolute; + top: calc(100% + 8px); + right: 0; + width: 360px; + max-height: 420px; + background: var(--color-bg-card); + border: 1px solid var(--color-border); + border-radius: var(--radius-lg); + box-shadow: var(--shadow-lg); + z-index: 100; + display: none; + overflow: hidden; +} +.notification-dropdown.open { + display: flex; + flex-direction: column; +} +.notification-dropdown-header { + padding: 12px 16px; + border-bottom: 1px solid var(--color-border); + display: flex; + align-items: center; + justify-content: space-between; +} +.notification-dropdown-header h4 { + font-size: 14px; + font-weight: 700; + margin: 0; +} +.notification-dropdown-header button { + font-size: 12px; + color: var(--color-accent); + background: none; + border: none; + cursor: pointer; + padding: 0; +} +.notification-dropdown-header button:hover { + text-decoration: underline; +} +.notification-dropdown-list { + overflow-y: auto; + flex: 1; + max-height: 360px; +} +.notification-item { + padding: 12px 16px; + border-bottom: 1px solid var(--color-border); + display: flex; + gap: 10px; + align-items: flex-start; + transition: background 0.1s; +} +.notification-item:last-child { border-bottom: none; } +.notification-item:hover { background: var(--color-bg); } +.notification-item.unread { background: var(--color-accent-light); } +.notification-item .notif-icon { + font-size: 20px; + color: var(--color-accent); + flex-shrink: 0; + margin-top: 2px; +} +.notification-item .notif-icon.update { color: var(--color-warning); } +.notification-item .notif-icon.publish { color: var(--color-success); } +.notification-item .notif-icon.system { color: var(--color-info); } +.notification-item .notif-body { flex: 1; min-width: 0; } +.notification-item .notif-title { + font-size: 13px; + font-weight: 600; + color: var(--color-text-title); + margin: 0 0 2px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; +} +.notification-item .notif-message { + font-size: 12px; + color: var(--color-text-light); + margin: 0; + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; + overflow: hidden; +} +.notification-empty { + padding: 32px 16px; + text-align: center; + color: var(--color-text-muted); + font-size: 13px; +} + /* ===== Buttons ===== */ .btn { display: inline-flex; diff --git a/static/js/app.js b/static/js/app.js index 1a1f612..c4be247 100644 --- a/static/js/app.js +++ b/static/js/app.js @@ -58,4 +58,94 @@ document.addEventListener('DOMContentLoaded', () => { if (e.target === overlay) overlay.classList.remove('open'); }); }); + + // ---- Notification bell ---- + const bell = document.getElementById('notification-bell'); + const bellBadge = document.getElementById('bell-badge'); + const dropdown = document.getElementById('notification-dropdown'); + const notifList = document.getElementById('notification-list'); + const markAllBtn = document.getElementById('mark-all-read'); + + if (bell && dropdown) { + bell.addEventListener('click', (e) => { + e.stopPropagation(); + const isOpen = dropdown.classList.toggle('open'); + if (isOpen) fetchNotifications(); + }); + + document.addEventListener('click', (e) => { + if (!dropdown.contains(e.target) && !bell.contains(e.target)) { + dropdown.classList.remove('open'); + } + }); + + if (markAllBtn) { + markAllBtn.addEventListener('click', async () => { + try { + await fetch('/api/v1/notifications/read', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ all: true }), + }); + fetchNotifications(); + pollUnreadCount(); + } catch (_) {} + }); + } + + function notifIcon(kind) { + switch (kind) { + case 'stack_update_available': + return 'system_update'; + case 'stack_published': + return 'new_releases'; + default: + return 'info'; + } + } + + async function fetchNotifications() { + try { + const resp = await fetch('/api/v1/notifications'); + if (!resp.ok) return; + const data = await resp.json(); + if (!data.notifications || data.notifications.length === 0) { + notifList.innerHTML = '
No notifications
'; + return; + } + notifList.innerHTML = data.notifications.map(n => + `
+ ${notifIcon(n.kind)} +
+

${escapeHtml(n.title)}

+

${escapeHtml(n.message)}

+
+
` + ).join(''); + } catch (_) {} + } + + async function pollUnreadCount() { + try { + const resp = await fetch('/api/v1/notifications/unread-count'); + if (!resp.ok) return; + const data = await resp.json(); + const count = data.unread_count || 0; + if (bellBadge) { + bellBadge.textContent = count > 99 ? '99+' : String(count); + bellBadge.classList.toggle('has-unread', count > 0); + } + } catch (_) {} + } + + function escapeHtml(str) { + const d = document.createElement('div'); + d.textContent = str; + return d.innerHTML; + } + + // Initial poll + periodic refresh + pollUnreadCount(); + setInterval(pollUnreadCount, 60000); + } }); \ No newline at end of file diff --git a/templates/base.html b/templates/base.html index f3e76cb..1549fd8 100644 --- a/templates/base.html +++ b/templates/base.html @@ -52,6 +52,21 @@

{% block page_title %}Dashboard{% endblock %}

+
+ +
+
+

Notifications

+ +
+
+
No notifications
+
+
+
{% block topbar_actions %}{% endblock %}
diff --git a/templates/marketplace.html b/templates/marketplace.html index bee0fb8..9d73193 100644 --- a/templates/marketplace.html +++ b/templates/marketplace.html @@ -32,7 +32,15 @@ {% endif %}
-
{{ stack.name }}
+
+ {{ stack.name }} + +
{{ stack.description }}
{{ stack.author }} @@ -145,5 +153,26 @@

Deploy Stack

document.getElementById('deploy-status').textContent = 'Network error: ' + err.message; } } + +// Show "Update Available" badges from notifications +(async function checkUpdateBadges() { + try { + const resp = await fetch('/api/v1/notifications'); + if (!resp.ok) return; + const data = await resp.json(); + if (!data.notifications) return; + const updateIds = new Set(); + data.notifications.forEach(n => { + if (n.kind === 'stack_update_available' && n.stack_id) { + updateIds.add(n.stack_id); + } + }); + document.querySelectorAll('.update-badge[data-stack-id]').forEach(el => { + if (updateIds.has(el.dataset.stackId)) { + el.style.display = 'inline'; + } + }); + } catch (_) {} +})(); {% endblock %} diff --git a/tests/cli_version.rs b/tests/cli_version.rs new file mode 100644 index 0000000..67e4825 --- /dev/null +++ b/tests/cli_version.rs @@ -0,0 +1,38 @@ +use assert_cmd::Command; + +fn expected_version_output() -> String { + let base = env!("CARGO_PKG_VERSION"); + let git_hash = std::process::Command::new("git") + .args(["rev-parse", "--short=7", "HEAD"]) + .output() + .ok() + .filter(|output| output.status.success()) + .and_then(|output| String::from_utf8(output.stdout).ok()) + .map(|value| value.trim().to_string()) + .filter(|value| !value.is_empty()); + + match git_hash { + Some(hash) => format!("{base} ({hash})"), + None => base.to_string(), + } +} + +#[test] +fn status_version_prints_display_version_only() { + let mut cmd = Command::cargo_bin("status").unwrap(); + cmd.arg("--version") + .assert() + .success() + .stdout(format!("{}\n", expected_version_output())) + .stderr(""); +} + +#[test] +fn status_short_version_flag_prints_display_version_only() { + let mut cmd = Command::cargo_bin("status").unwrap(); + cmd.arg("-V") + .assert() + .success() + .stdout(format!("{}\n", expected_version_output())) + .stderr(""); +} diff --git a/tests/http_routes.rs b/tests/http_routes.rs index 8068dd7..c5b5a51 100644 --- a/tests/http_routes.rs +++ b/tests/http_routes.rs @@ -84,6 +84,33 @@ async fn test_capabilities_endpoint() { assert!(value.get("features").is_some()); } +#[tokio::test] +async fn given_capabilities_request_when_agent_supports_pipe_runtime_then_pipe_features_are_advertised( +) { + let app = test_router(); + + let response = app + .oneshot( + Request::builder() + .uri("/capabilities") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + + let body_bytes = response.into_body().collect().await.unwrap().to_bytes(); + let value: Value = serde_json::from_slice(&body_bytes).unwrap(); + let features = value["features"].as_array().expect("features array"); + + assert!(features.contains(&Value::String("pipes".to_string()))); + assert!(features.contains(&Value::String("activate_pipe".to_string()))); + assert!(features.contains(&Value::String("deactivate_pipe".to_string()))); + assert!(features.contains(&Value::String("trigger_pipe".to_string()))); +} + #[tokio::test] async fn test_login_page_get() { let app = test_router(); @@ -479,3 +506,193 @@ async fn test_backup_download_success() { let body = response.into_body().collect().await.unwrap().to_bytes(); assert_eq!(body.as_ref(), b"test backup content"); } + +// ---- Notification endpoint tests ---- + +#[tokio::test] +async fn test_notifications_unread_count_starts_at_zero() { + let app = test_router(); + + let response = app + .oneshot( + Request::builder() + .uri("/api/v1/notifications/unread-count") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + let body = response.into_body().collect().await.unwrap().to_bytes(); + let json: Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(json["unread_count"], 0); +} + +#[tokio::test] +async fn test_notifications_list_empty() { + let app = test_router(); + + let response = app + .oneshot( + Request::builder() + .uri("/api/v1/notifications") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + let body = response.into_body().collect().await.unwrap().to_bytes(); + let json: Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(json["unread_count"], 0); + assert_eq!(json["notifications"].as_array().unwrap().len(), 0); +} + +#[tokio::test] +async fn test_notifications_mark_read_all_on_empty() { + let app = test_router(); + + let response = app + .oneshot( + Request::builder() + .method("POST") + .uri("/api/v1/notifications/read") + .header("Content-Type", "application/json") + .body(Body::from(r#"{"all": true}"#)) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + let body = response.into_body().collect().await.unwrap().to_bytes(); + let json: Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(json["status"], "ok"); +} + +#[tokio::test] +async fn test_notifications_full_lifecycle() { + use status_panel::comms::notifications::{self, Notification, NotificationKind}; + + let state = Arc::new(AppState::new(test_config(), false, None)); + let app = create_router(state.clone()); + + // Seed notifications into the store directly + let notifs = vec![ + Notification { + id: "test-1".to_string(), + kind: NotificationKind::StackUpdateAvailable, + title: "Update for MyStack".to_string(), + message: "Version 2.0 is available".to_string(), + stack_id: Some("stack-1".to_string()), + stack_name: Some("MyStack".to_string()), + new_version: Some("2.0".to_string()), + created_at: "2026-04-12T00:00:00Z".to_string(), + read: false, + }, + Notification { + id: "test-2".to_string(), + kind: NotificationKind::StackPublished, + title: "New stack: CoolApp".to_string(), + message: "CoolApp has been published".to_string(), + stack_id: Some("stack-2".to_string()), + stack_name: Some("CoolApp".to_string()), + new_version: None, + created_at: "2026-04-12T01:00:00Z".to_string(), + read: false, + }, + ]; + notifications::merge_notifications(&state.notification_store, notifs).await; + + // Check unread count = 2 + let response = app + .clone() + .oneshot( + Request::builder() + .uri("/api/v1/notifications/unread-count") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let body = response.into_body().collect().await.unwrap().to_bytes(); + let json: Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(json["unread_count"], 2); + + // List all notifications + let response = app + .clone() + .oneshot( + Request::builder() + .uri("/api/v1/notifications") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let body = response.into_body().collect().await.unwrap().to_bytes(); + let json: Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(json["unread_count"], 2); + assert_eq!(json["notifications"].as_array().unwrap().len(), 2); + + // Mark one as read + let response = app + .clone() + .oneshot( + Request::builder() + .method("POST") + .uri("/api/v1/notifications/read") + .header("Content-Type", "application/json") + .body(Body::from(r#"{"ids": ["test-1"]}"#)) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + // Unread count should now be 1 + let response = app + .clone() + .oneshot( + Request::builder() + .uri("/api/v1/notifications/unread-count") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let body = response.into_body().collect().await.unwrap().to_bytes(); + let json: Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(json["unread_count"], 1); + + // Mark all as read + let response = app + .clone() + .oneshot( + Request::builder() + .method("POST") + .uri("/api/v1/notifications/read") + .header("Content-Type", "application/json") + .body(Body::from(r#"{"all": true}"#)) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + // Unread count should now be 0 + let response = app + .oneshot( + Request::builder() + .uri("/api/v1/notifications/unread-count") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + let body = response.into_body().collect().await.unwrap().to_bytes(); + let json: Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(json["unread_count"], 0); +} diff --git a/tests/security_integration.rs b/tests/security_integration.rs index ea73c18..690b374 100644 --- a/tests/security_integration.rs +++ b/tests/security_integration.rs @@ -1,14 +1,22 @@ +use axum::extract::{Path, State}; use axum::http::{Request, StatusCode}; -use axum::{body::Body, Router}; +use axum::routing::{get, post}; +use axum::{body::Body, Json, Router}; use base64::{engine::general_purpose, Engine}; use hmac::{Hmac, Mac}; use http_body_util::BodyExt; +use mockito::{Matcher, Server}; use serde_json::json; +use serde_json::Value; use sha2::Sha256; use status_panel::agent::config::{Config, ReqData}; use status_panel::comms::local_api::{create_router, AppState}; use std::sync::Arc; use std::sync::{Mutex, OnceLock}; +use tokio::net::TcpListener; +use tokio::sync::Mutex as AsyncMutex; +use tokio::task::JoinHandle; +use tokio::time::{sleep, timeout, Duration}; use tower::ServiceExt; // for Router::oneshot use uuid::Uuid; @@ -20,6 +28,31 @@ fn lock_tests() -> std::sync::MutexGuard<'static, ()> { } } +struct EnvGuard { + vars: Vec<(String, Option)>, +} + +impl EnvGuard { + fn new(keys: &[&str]) -> Self { + let vars = keys + .iter() + .map(|k| (k.to_string(), std::env::var(k).ok())) + .collect(); + Self { vars } + } +} + +impl Drop for EnvGuard { + fn drop(&mut self) { + for (key, original) in &self.vars { + match original { + Some(v) => std::env::set_var(key, v), + None => std::env::remove_var(key), + } + } + } +} + fn test_config() -> Arc { Arc::new(Config { domain: Some("test.example.com".to_string()), @@ -35,17 +68,30 @@ fn test_config() -> Arc { }) } -fn router_with_env(agent_id: &str, token: &str, scopes: &str) -> Router { +fn router_with_env(agent_id: &str, token: &str, scopes: &str) -> (Router, EnvGuard) { + let env = EnvGuard::new(&[ + "AGENT_ID", + "AGENT_TOKEN", + "AGENT_SCOPES", + "RATE_LIMIT_PER_MIN", + ]); std::env::set_var("AGENT_ID", agent_id); std::env::set_var("AGENT_TOKEN", token); std::env::set_var("AGENT_SCOPES", scopes); std::env::set_var("RATE_LIMIT_PER_MIN", "1000"); let state = Arc::new(AppState::new(test_config(), false, None)); - create_router(state) + (create_router(state), env) } type HmacSha256 = Hmac; +#[derive(Clone)] +struct TargetCaptureState { + requests: Arc>>, + status: StatusCode, + response_body: Value, +} + fn sign_b64(token: &str, body: &[u8]) -> String { let mut mac = HmacSha256::new_from_slice(token.as_bytes()).unwrap(); mac.update(body); @@ -86,10 +132,114 @@ async fn post_with_sig( (status, body) } +async fn post_raw_with_sig( + app: &Router, + path: &str, + agent_id: &str, + token: &str, + body: &str, + timestamp: Option, + request_id: Option, +) -> (StatusCode, bytes::Bytes) { + let ts = timestamp.unwrap_or_else(|| format!("{}", chrono::Utc::now().timestamp())); + let rid = request_id.unwrap_or_else(|| Uuid::new_v4().to_string()); + let sig = sign_b64(token, body.as_bytes()); + let response = app + .clone() + .oneshot( + Request::builder() + .method("POST") + .uri(path) + .header("content-type", "application/json") + .header("X-Agent-Id", agent_id) + .header("X-Timestamp", ts) + .header("X-Request-Id", rid) + .header("X-Agent-Signature", sig) + .body(Body::from(body.to_string())) + .unwrap(), + ) + .await + .unwrap(); + let status = response.status(); + let body = response.into_body().collect().await.unwrap().to_bytes(); + (status, body) +} + +async fn capture_target_request( + Path(path): Path, + State(state): State, + Json(payload): Json, +) -> (StatusCode, Json) { + state + .requests + .lock() + .await + .push((format!("/{}", path), payload)); + (state.status, Json(state.response_body.clone())) +} + +async fn source_payload_handler(State(payload): State) -> Json { + Json(payload) +} + +async fn spawn_target_capture_server( + status: StatusCode, + response_body: Value, +) -> ( + String, + Arc>>, + JoinHandle<()>, +) { + let requests = Arc::new(AsyncMutex::new(Vec::new())); + let state = TargetCaptureState { + requests: requests.clone(), + status, + response_body, + }; + let app = Router::new() + .route("/{path}", post(capture_target_request)) + .with_state(state); + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let base_url = format!("http://{}", listener.local_addr().unwrap()); + let handle = tokio::spawn(async move { + axum::serve(listener, app).await.unwrap(); + }); + (base_url, requests, handle) +} + +async fn spawn_source_server(payload: Value) -> (String, JoinHandle<()>) { + let app = Router::new() + .route("/source", get(source_payload_handler)) + .with_state(payload); + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + let base_url = format!("http://{}", listener.local_addr().unwrap()); + let handle = tokio::spawn(async move { + axum::serve(listener, app).await.unwrap(); + }); + (base_url, handle) +} + +async fn wait_for_request_count( + requests: &Arc>>, + expected: usize, +) -> Vec<(String, Value)> { + timeout(Duration::from_secs(5), async { + loop { + let snapshot = requests.lock().await.clone(); + if snapshot.len() >= expected { + return snapshot; + } + sleep(Duration::from_millis(50)).await; + } + }) + .await + .expect("timed out waiting for captured requests") +} + #[tokio::test] async fn execute_requires_signature_and_scope() { let _g = lock_tests(); - let app = router_with_env("agent-1", "secret-token", "commands:execute"); + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:execute"); // Missing signature let response = app @@ -131,7 +281,7 @@ async fn execute_requires_signature_and_scope() { #[tokio::test] async fn replay_detection_returns_409() { let _g = lock_tests(); - let app = router_with_env("agent-1", "secret-token", "commands:execute"); + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:execute"); let rid = Uuid::new_v4().to_string(); let path = "/api/v1/commands/execute"; let body = json!({"id": "cmd-3", "command_id": "cmd-exec-3", "name": "echo hi", "params": {}}); @@ -192,7 +342,7 @@ async fn rate_limit_returns_429() { async fn scope_denied_returns_403() { let _g = lock_tests(); // Do not include commands:execute - let app = router_with_env("agent-1", "secret-token", "commands:report"); + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:report"); let (status, body) = post_with_sig( &app, "/api/v1/commands/execute", @@ -210,9 +360,10 @@ async fn scope_denied_returns_403() { #[tokio::test] async fn wait_can_require_signature() { let _g = lock_tests(); + let _env = EnvGuard::new(&["WAIT_REQUIRE_SIGNATURE"]); // Enable signing for GET /wait std::env::set_var("WAIT_REQUIRE_SIGNATURE", "true"); - let app = router_with_env("agent-1", "secret-token", "commands:wait"); + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:wait"); // Missing signature should fail let response = app @@ -251,3 +402,702 @@ async fn wait_can_require_signature() { // No commands queued -> 204 No Content assert_eq!(response.status(), StatusCode::NO_CONTENT); } + +#[tokio::test] +async fn given_signed_local_wait_request_when_queue_is_empty_then_local_wait_returns_no_content() { + let _g = lock_tests(); + let _env = EnvGuard::new(&["WAIT_REQUIRE_SIGNATURE"]); + std::env::set_var("WAIT_REQUIRE_SIGNATURE", "true"); + let (app, _router_env) = router_with_env("agent-1", "secret-token", "commands:wait"); + + let ts = format!("{}", chrono::Utc::now().timestamp()); + let rid = Uuid::new_v4().to_string(); + let sig = sign_b64("secret-token", b""); + let response = app + .clone() + .oneshot( + Request::builder() + .method("GET") + .uri("/api/v1/commands/wait/session?timeout=1") + .header("X-Agent-Id", "agent-1") + .header("X-Timestamp", ts) + .header("X-Request-Id", rid) + .header("X-Agent-Signature", sig) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), StatusCode::NO_CONTENT); +} + +#[tokio::test] +async fn given_pipe_command_enqueued_when_agent_waits_and_reports_result_then_transport_path_delivers_and_records_execution( +) { + let _g = lock_tests(); + let _env = EnvGuard::new(&["WAIT_REQUIRE_SIGNATURE"]); + std::env::set_var("WAIT_REQUIRE_SIGNATURE", "true"); + let (app, _router_env) = router_with_env( + "agent-1", + "secret-token", + "commands:enqueue,commands:wait,commands:report", + ); + + let (enqueue_status, enqueue_body) = post_with_sig( + &app, + "/api/v1/commands/enqueue", + "agent-1", + "secret-token", + json!({ + "id": "queued-activate-pipe", + "command_id": "queued-activate-pipe", + "name": "activate_pipe", + "deployment_hash": "dep-daemon", + "params": { + "pipe_instance_id": "pipe-daemon-1", + "target_url": "https://example.com", + "trigger_type": "manual" + } + }), + None, + ) + .await; + assert_eq!(enqueue_status, StatusCode::ACCEPTED); + let enqueue_payload: Value = serde_json::from_slice(&enqueue_body).unwrap(); + assert_eq!(enqueue_payload["queued"], true); + + let ts = format!("{}", chrono::Utc::now().timestamp()); + let rid = Uuid::new_v4().to_string(); + let sig = sign_b64("secret-token", b""); + let wait_response = app + .clone() + .oneshot( + Request::builder() + .method("GET") + .uri("/api/v1/commands/wait/dep-daemon?timeout=1") + .header("X-Agent-Id", "agent-1") + .header("X-Timestamp", ts) + .header("X-Request-Id", rid) + .header("X-Agent-Signature", sig) + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(wait_response.status(), StatusCode::OK); + let waited_body = wait_response + .into_body() + .collect() + .await + .unwrap() + .to_bytes(); + let waited_payload: Value = serde_json::from_slice(&waited_body).unwrap(); + assert_eq!(waited_payload["name"], "activate_pipe"); + assert_eq!(waited_payload["deployment_hash"], "dep-daemon"); + assert_eq!( + waited_payload["params"]["pipe_instance_id"], + "pipe-daemon-1" + ); + + let (report_status, report_body) = post_with_sig( + &app, + "/api/v1/commands/report", + "agent-1", + "secret-token", + json!({ + "command_id": "queued-activate-pipe", + "status": "success", + "result": { + "type": "activate_pipe", + "pipe_instance_id": "pipe-daemon-1" + }, + "completed_at": chrono::Utc::now().to_rfc3339(), + "deployment_hash": "dep-daemon", + "command_type": "activate_pipe", + "executed_by": "status_panel" + }), + None, + ) + .await; + assert_eq!(report_status, StatusCode::OK); + let report_payload: Value = serde_json::from_slice(&report_body).unwrap(); + assert_eq!(report_payload["accepted"], true); + + let metrics_response = app + .clone() + .oneshot( + Request::builder() + .method("GET") + .uri("/api/v1/diagnostics/commands") + .body(Body::empty()) + .unwrap(), + ) + .await + .unwrap(); + assert_eq!(metrics_response.status(), StatusCode::OK); + let metrics_body = metrics_response + .into_body() + .collect() + .await + .unwrap() + .to_bytes(); + let metrics_payload: Value = serde_json::from_slice(&metrics_body).unwrap(); + assert_eq!(metrics_payload["status_panel_count"], 1); + assert_eq!(metrics_payload["total_count"], 1); + assert_eq!(metrics_payload["last_control_plane"], "status_panel"); +} + +#[tokio::test] +async fn given_registered_webhook_pipe_when_signed_webhook_arrives_then_payload_is_forwarded_to_target( +) { + let _g = lock_tests(); + let mut server = Server::new_async().await; + let target = server + .mock("POST", "/pipe-target") + .match_body(Matcher::Exact(r#"{"email":"webhook@try.direct"}"#.into())) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(r#"{"accepted":true}"#) + .create_async() + .await; + + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:execute"); + + let (activate_status, _) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-activate-webhook", + "command_id": "cmd-activate-webhook", + "name": "activate_pipe", + "params": { + "deployment_hash": "dep-webhook", + "pipe_instance_id": "pipe-webhook-1", + "target_url": server.url(), + "target_endpoint": "/pipe-target", + "target_method": "POST", + "field_mapping": { "email": "$.user.email" }, + "trigger_type": "webhook" + } + }), + None, + ) + .await; + assert_eq!(activate_status, StatusCode::OK); + + let (webhook_status, webhook_body) = post_with_sig( + &app, + "/api/v1/pipes/webhook/dep-webhook/pipe-webhook-1", + "agent-1", + "secret-token", + json!({ + "user": { + "email": "webhook@try.direct" + } + }), + None, + ) + .await; + + assert_eq!(webhook_status, StatusCode::OK); + let payload: serde_json::Value = serde_json::from_slice(&webhook_body).unwrap(); + assert_eq!(payload["status"], "success"); + assert_eq!(payload["result"]["target_response"]["transport"], "http"); + assert_eq!(payload["result"]["target_response"]["delivered"], true); + target.assert_async().await; +} + +#[tokio::test] +async fn given_signed_webhook_request_without_execute_scope_when_pipe_ingest_is_called_then_it_is_rejected( +) { + let _g = lock_tests(); + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:report"); + + let (status, body) = post_with_sig( + &app, + "/api/v1/pipes/webhook/dep-webhook/pipe-webhook-1", + "agent-1", + "secret-token", + json!({"user": {"email": "webhook@try.direct"}}), + None, + ) + .await; + + assert_eq!(status, StatusCode::FORBIDDEN); + let payload: Value = serde_json::from_slice(&body).unwrap(); + assert_eq!(payload["error"], "insufficient scope"); +} + +#[tokio::test] +async fn given_signed_webhook_request_with_invalid_json_when_pipe_ingest_is_called_then_it_returns_bad_request( +) { + let _g = lock_tests(); + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:execute"); + + let (status, body) = post_raw_with_sig( + &app, + "/api/v1/pipes/webhook/dep-webhook/pipe-webhook-1", + "agent-1", + "secret-token", + "{invalid-json", + None, + None, + ) + .await; + + assert_eq!(status, StatusCode::BAD_REQUEST); + let payload: Value = serde_json::from_slice(&body).unwrap(); + assert!(payload["error"] + .as_str() + .unwrap_or_default() + .contains("invalid webhook payload")); +} + +#[tokio::test] +async fn given_replayed_signed_webhook_request_when_pipe_ingest_is_called_then_replay_is_blocked() { + let _g = lock_tests(); + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:execute"); + let request_id = Uuid::new_v4().to_string(); + + let (first_status, first_body) = post_with_sig( + &app, + "/api/v1/pipes/webhook/dep-webhook/missing-pipe", + "agent-1", + "secret-token", + json!({"user": {"email": "webhook@try.direct"}}), + Some(request_id.clone()), + ) + .await; + assert_eq!(first_status, StatusCode::OK); + let first_payload: Value = serde_json::from_slice(&first_body).unwrap(); + assert_eq!(first_payload["status"], "failed"); + assert_eq!(first_payload["result"]["success"], false); + + let (second_status, second_body) = post_with_sig( + &app, + "/api/v1/pipes/webhook/dep-webhook/missing-pipe", + "agent-1", + "secret-token", + json!({"user": {"email": "webhook@try.direct"}}), + Some(request_id), + ) + .await; + assert_eq!(second_status, StatusCode::CONFLICT); + let second_payload: Value = serde_json::from_slice(&second_body).unwrap(); + assert_eq!(second_payload["error"], "replay detected"); +} + +#[tokio::test] +async fn given_reactivated_manual_pipe_when_it_is_triggered_then_only_the_latest_target_receives_payload( +) { + let _g = lock_tests(); + let (target_url, requests, target_handle) = + spawn_target_capture_server(StatusCode::OK, json!({"accepted": true})).await; + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:execute"); + + let (first_activate_status, first_activate_body) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-activate-first", + "command_id": "cmd-activate-first", + "name": "activate_pipe", + "params": { + "deployment_hash": "dep-reactivate", + "pipe_instance_id": "pipe-reactivate-1", + "target_url": target_url, + "target_endpoint": "/first", + "target_method": "POST", + "field_mapping": { "email": "$.user.email" }, + "trigger_type": "manual" + } + }), + None, + ) + .await; + assert_eq!(first_activate_status, StatusCode::OK); + let first_activate_payload: Value = serde_json::from_slice(&first_activate_body).unwrap(); + assert_eq!(first_activate_payload["result"]["replaced"], false); + assert_eq!(first_activate_payload["result"]["reactivated"], false); + + let (second_activate_status, second_activate_body) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-activate-second", + "command_id": "cmd-activate-second", + "name": "activate_pipe", + "params": { + "deployment_hash": "dep-reactivate", + "pipe_instance_id": "pipe-reactivate-1", + "target_url": target_url, + "target_endpoint": "/second", + "target_method": "POST", + "field_mapping": { "email": "$.user.email" }, + "trigger_type": "manual" + } + }), + None, + ) + .await; + assert_eq!(second_activate_status, StatusCode::OK); + let second_activate_payload: Value = serde_json::from_slice(&second_activate_body).unwrap(); + assert_eq!(second_activate_payload["result"]["replaced"], true); + assert_eq!(second_activate_payload["result"]["reactivated"], true); + + let (trigger_status, trigger_body) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-trigger-reactivate", + "command_id": "cmd-trigger-reactivate", + "name": "trigger_pipe", + "params": { + "deployment_hash": "dep-reactivate", + "pipe_instance_id": "pipe-reactivate-1", + "input_data": { + "user": { + "email": "replace@try.direct" + } + } + } + }), + None, + ) + .await; + assert_eq!(trigger_status, StatusCode::OK); + let trigger_payload: Value = serde_json::from_slice(&trigger_body).unwrap(); + assert_eq!(trigger_payload["status"], "success"); + + let captured = wait_for_request_count(&requests, 1).await; + assert_eq!( + captured, + vec![( + "/second".to_string(), + json!({"email": "replace@try.direct"}) + )] + ); + + target_handle.abort(); +} + +#[tokio::test] +async fn given_poll_pipe_when_source_worker_fetches_payload_then_target_receives_it_and_deactivation_stops_future_deliveries( +) { + let _g = lock_tests(); + let _env = EnvGuard::new(&["PIPE_POLL_INTERVAL_SECS"]); + std::env::set_var("PIPE_POLL_INTERVAL_SECS", "1"); + let (source_url, source_handle) = + spawn_source_server(json!({"user": {"email": "poll@try.direct"}})).await; + let (target_url, requests, target_handle) = + spawn_target_capture_server(StatusCode::OK, json!({"accepted": true})).await; + let (app, _router_env) = router_with_env("agent-1", "secret-token", "commands:execute"); + + let (activate_status, activate_body) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-activate-poll", + "command_id": "cmd-activate-poll", + "name": "activate_pipe", + "params": { + "deployment_hash": "dep-poll", + "pipe_instance_id": "pipe-poll-1", + "source_endpoint": format!("{}/source", source_url), + "source_method": "GET", + "target_url": target_url, + "target_endpoint": "/pipe-target", + "target_method": "POST", + "field_mapping": { "email": "$.user.email" }, + "trigger_type": "poll" + } + }), + None, + ) + .await; + assert_eq!(activate_status, StatusCode::OK); + let activate_payload: Value = serde_json::from_slice(&activate_body).unwrap(); + assert_eq!(activate_payload["status"], "success"); + + let first_delivery = wait_for_request_count(&requests, 1).await; + assert_eq!( + first_delivery, + vec![( + "/pipe-target".to_string(), + json!({"email": "poll@try.direct"}) + )] + ); + + let (deactivate_status, deactivate_body) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-deactivate-poll", + "command_id": "cmd-deactivate-poll", + "name": "deactivate_pipe", + "params": { + "deployment_hash": "dep-poll", + "pipe_instance_id": "pipe-poll-1" + } + }), + None, + ) + .await; + assert_eq!(deactivate_status, StatusCode::OK); + let deactivate_payload: Value = serde_json::from_slice(&deactivate_body).unwrap(); + assert_eq!( + deactivate_payload["result"]["lifecycle"]["state"], + "inactive" + ); + + sleep(Duration::from_millis(1300)).await; + let final_snapshot = requests.lock().await.clone(); + assert_eq!(final_snapshot.len(), 1); + + target_handle.abort(); + source_handle.abort(); +} + +#[tokio::test] +async fn given_registered_manual_pipe_when_target_returns_server_error_then_failed_delivery_shape_and_lifecycle_are_reported( +) { + let _g = lock_tests(); + let (target_url, requests, target_handle) = spawn_target_capture_server( + StatusCode::INTERNAL_SERVER_ERROR, + json!({"error": "downstream unavailable"}), + ) + .await; + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:execute"); + + let (activate_status, _) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-activate-fail", + "command_id": "cmd-activate-fail", + "name": "activate_pipe", + "params": { + "deployment_hash": "dep-fail", + "pipe_instance_id": "pipe-fail-1", + "target_url": target_url, + "target_endpoint": "/pipe-target", + "target_method": "POST", + "field_mapping": { "email": "$.user.email" }, + "trigger_type": "manual" + } + }), + None, + ) + .await; + assert_eq!(activate_status, StatusCode::OK); + + let (trigger_status, trigger_body) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-trigger-fail", + "command_id": "cmd-trigger-fail", + "name": "trigger_pipe", + "params": { + "deployment_hash": "dep-fail", + "pipe_instance_id": "pipe-fail-1", + "input_data": { + "user": { + "email": "failure@try.direct" + } + } + } + }), + None, + ) + .await; + assert_eq!(trigger_status, StatusCode::OK); + let trigger_payload: Value = serde_json::from_slice(&trigger_body).unwrap(); + assert_eq!(trigger_payload["status"], "failed"); + assert_eq!(trigger_payload["result"]["success"], false); + assert_eq!( + trigger_payload["result"]["target_response"]["transport"], + "http" + ); + assert_eq!(trigger_payload["result"]["target_response"]["status"], 500); + assert_eq!( + trigger_payload["result"]["target_response"]["delivered"], + false + ); + assert_eq!( + trigger_payload["result"]["target_response"]["body"], + json!({"error": "downstream unavailable"}) + ); + assert_eq!(trigger_payload["result"]["lifecycle"]["state"], "failed"); + + let captured = wait_for_request_count(&requests, 1).await; + assert_eq!( + captured, + vec![( + "/pipe-target".to_string(), + json!({"email": "failure@try.direct"}) + )] + ); + + target_handle.abort(); +} + +#[tokio::test] +async fn given_registered_manual_pipe_when_it_is_triggered_and_deactivated_then_follow_up_trigger_fails_cleanly( +) { + let _g = lock_tests(); + let mut server = Server::new_async().await; + let target = server + .mock("POST", "/pipe-target") + .match_body(Matcher::Exact(r#"{"email":"manual@try.direct"}"#.into())) + .with_status(200) + .with_header("content-type", "application/json") + .with_body(r#"{"accepted":true}"#) + .expect(1) + .create_async() + .await; + + let (app, _env) = router_with_env("agent-1", "secret-token", "commands:execute"); + + let (activate_status, activate_body) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-activate-manual", + "command_id": "cmd-activate-manual", + "name": "activate_pipe", + "params": { + "deployment_hash": "dep-manual", + "pipe_instance_id": "pipe-manual-1", + "target_url": server.url(), + "target_endpoint": "/pipe-target", + "target_method": "POST", + "field_mapping": { "email": "$.user.email" }, + "trigger_type": "manual" + } + }), + None, + ) + .await; + assert_eq!(activate_status, StatusCode::OK); + let activate_payload: serde_json::Value = serde_json::from_slice(&activate_body).unwrap(); + assert_eq!(activate_payload["status"], "success"); + assert_eq!(activate_payload["result"]["active"], true); + assert_eq!(activate_payload["result"]["lifecycle"]["state"], "active"); + + let (trigger_status, trigger_body) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-trigger-manual", + "command_id": "cmd-trigger-manual", + "name": "trigger_pipe", + "params": { + "deployment_hash": "dep-manual", + "pipe_instance_id": "pipe-manual-1", + "input_data": { + "user": { + "email": "manual@try.direct" + } + } + } + }), + None, + ) + .await; + assert_eq!(trigger_status, StatusCode::OK); + let trigger_payload: serde_json::Value = serde_json::from_slice(&trigger_body).unwrap(); + assert_eq!(trigger_payload["status"], "success"); + assert_eq!(trigger_payload["result"]["success"], true); + assert_eq!( + trigger_payload["result"]["target_response"]["transport"], + "http" + ); + assert_eq!( + trigger_payload["result"]["target_response"]["delivered"], + true + ); + assert_eq!(trigger_payload["result"]["lifecycle"]["state"], "active"); + assert_eq!(trigger_payload["result"]["lifecycle"]["trigger_count"], 1); + + let (deactivate_status, deactivate_body) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-deactivate-manual", + "command_id": "cmd-deactivate-manual", + "name": "deactivate_pipe", + "params": { + "deployment_hash": "dep-manual", + "pipe_instance_id": "pipe-manual-1" + } + }), + None, + ) + .await; + assert_eq!(deactivate_status, StatusCode::OK); + let deactivate_payload: serde_json::Value = serde_json::from_slice(&deactivate_body).unwrap(); + assert_eq!(deactivate_payload["status"], "success"); + assert_eq!(deactivate_payload["result"]["active"], false); + assert_eq!( + deactivate_payload["result"]["lifecycle"]["state"], + "inactive" + ); + + let (follow_up_status, follow_up_body) = post_with_sig( + &app, + "/api/v1/commands/execute", + "agent-1", + "secret-token", + json!({ + "id": "cmd-trigger-after-deactivate", + "command_id": "cmd-trigger-after-deactivate", + "name": "trigger_pipe", + "params": { + "deployment_hash": "dep-manual", + "pipe_instance_id": "pipe-manual-1", + "input_data": { + "user": { + "email": "manual@try.direct" + } + } + } + }), + None, + ) + .await; + assert_eq!(follow_up_status, StatusCode::OK); + let follow_up_payload: serde_json::Value = serde_json::from_slice(&follow_up_body).unwrap(); + assert_eq!(follow_up_payload["status"], "failed"); + assert_eq!(follow_up_payload["result"]["success"], false); + assert_eq!( + follow_up_payload["result"]["error"], + "trigger_pipe requires target_url or target_container" + ); + assert_eq!(follow_up_payload["result"]["lifecycle"]["state"], "failed"); + + target.assert_async().await; +}