diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100755 index 0000000000..737f43e1dc --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,96 @@ +# CI workflow for Flynn +# +# Builds all components and runs unit tests on every push and pull request +# to the tuf-rebuild branch. Uses a Debian container with Go 1.13.15 and +# all required system dependencies. +# +# The build uses script/bootstrap-build which compiles all 34 Flynn binaries +# from source without requiring a running Flynn cluster. +# +# Note: Debian Buster (10) is EOL and its repos are offline. We use +# Bookworm (12) for system packages; Go 1.13 is installed separately. + +name: CI + +on: + push: + branches: [tuf-rebuild, master] + pull_request: + branches: [tuf-rebuild, master] + +jobs: + build: + name: Build all components + runs-on: ubuntu-latest + container: + image: debian:bookworm-slim + + steps: + - name: Install system dependencies + run: | + apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + gcc \ + libseccomp-dev \ + git \ + python3 \ + curl \ + ca-certificates + + - name: Install Go 1.13.15 + run: | + curl -fsSL "https://go.dev/dl/go1.13.15.linux-amd64.tar.gz" \ + | tar -C /usr/local -xz + echo "/usr/local/go/bin" >> $GITHUB_PATH + echo "GOROOT=/usr/local/go" >> $GITHUB_ENV + echo "GOPATH=/go" >> $GITHUB_ENV + echo "/go/bin" >> $GITHUB_PATH + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Build all components + run: script/bootstrap-build --version "ci-$(echo "$GITHUB_SHA" | cut -c1-8)" + + - name: Verify binaries + run: | + echo "Built binaries:" + ls -la build/bin/ + echo "" + echo "Binary count: $(ls -1 build/bin/ | grep -v '.gz$' | grep -v '^flynn$' | wc -l)" + echo "" + echo "flynn-host version:" + build/bin/flynn-host version || true + + test: + name: Unit tests + runs-on: ubuntu-latest + container: + image: debian:bookworm-slim + + steps: + - name: Install system dependencies + run: | + apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + gcc \ + libseccomp-dev \ + git \ + python3 \ + curl \ + ca-certificates + + - name: Install Go 1.13.15 + run: | + curl -fsSL "https://go.dev/dl/go1.13.15.linux-amd64.tar.gz" \ + | tar -C /usr/local -xz + echo "/usr/local/go/bin" >> $GITHUB_PATH + echo "GOROOT=/usr/local/go" >> $GITHUB_ENV + echo "GOPATH=/go" >> $GITHUB_ENV + echo "/go/bin" >> $GITHUB_PATH + + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run standalone unit tests + run: make test-unit-standalone diff --git a/.gitignore b/.gitignore index f2ce88eb5b..e8d5e30463 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,5 @@ demo/*.log *.test /tmp /build +._* +*.backup diff --git a/Dockerfile.ci b/Dockerfile.ci new file mode 100755 index 0000000000..420ea390f6 --- /dev/null +++ b/Dockerfile.ci @@ -0,0 +1,53 @@ +# Dockerfile.ci — Reproducible build environment for Flynn CI +# +# This image provides Go 1.13.15 with CGO support and all system dependencies +# needed to build all Flynn components and run unit tests. +# +# Usage: +# docker build -f Dockerfile.ci -t flynn-ci . +# docker run --rm -v $(pwd):/go/src/github.com/flynn/flynn -w /go/src/github.com/flynn/flynn flynn-ci script/bootstrap-build --version dev +# +# For unit tests: +# docker run --rm -v $(pwd):/go/src/github.com/flynn/flynn -w /go/src/github.com/flynn/flynn flynn-ci make test-unit-standalone + +FROM debian:buster-slim + +# Avoid interactive prompts during package installation +ENV DEBIAN_FRONTEND=noninteractive + +# Install system dependencies: +# - build-essential, gcc: CGO compilation (required for flynn-host / libcontainer) +# - libseccomp-dev: seccomp support for container runtime +# - git: version detection, go module operations +# - python3: TUF root key extraction from builder/manifest.json +# - squashfs-tools: mksquashfs for building image layers (export-tuf) +# - debootstrap: building Ubuntu base layers (export-tuf) +# - busybox-static: building busybox base layer (export-tuf) +# - curl, ca-certificates: downloading Go toolchain +RUN apt-get update && apt-get install -y --no-install-recommends \ + build-essential \ + gcc \ + libseccomp-dev \ + git \ + python3 \ + squashfs-tools \ + debootstrap \ + busybox-static \ + curl \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +# Install Go 1.13.15 (the required version for this project) +ENV GO_VERSION=1.13.15 +RUN curl -fsSL "https://go.dev/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -C /usr/local -xz + +ENV GOROOT=/usr/local/go +ENV GOPATH=/go +ENV PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" + +# Set the working directory to the Flynn source tree +WORKDIR /go/src/github.com/flynn/flynn + +# Default: show Go version to verify setup +CMD ["go", "version"] diff --git a/Makefile b/Makefile old mode 100644 new mode 100755 index 7abbce9769..8e22d39470 --- a/Makefile +++ b/Makefile @@ -1,8 +1,13 @@ -GO_ENV=GOROOT=`readlink -f build/_go` +# Portable readlink -f (works on macOS and GNU/Linux) +_readlink_f = $(shell cd $(1) && pwd -P 2>/dev/null) +GO_ENV = GOROOT=$(call _readlink_f,build/_go) build: script/build-flynn +bootstrap-build: + script/bootstrap-build + release: script/build-flynn --git-version @@ -11,13 +16,49 @@ clean: test: test-unit test-integration +# test-unit requires a prior 'make build' (uses GOROOT from build output) test-unit: build env $(GO_ENV) PATH=${PWD}/build/bin:${PATH} go test -race -cover ./... test-unit-root: test-unit sudo -E env $(GO_ENV) PATH=${PWD}/build/bin:${PATH} go test -race -cover ./host/volume/... +# test-unit-standalone runs pure Go tests without requiring 'make build'. +# Uses the system Go toolchain directly. Suitable for CI where Go is installed +# via the Dockerfile rather than extracted from a build image. +# +# These packages have no external service dependencies (no PostgreSQL, discoverd, +# Redis, ZFS, etc.) and can run in any Linux environment with Go installed. +TEST_PACKAGES_STANDALONE = \ + ./pkg/attempt/... \ + ./pkg/cors/... \ + ./pkg/ipallocator/... \ + ./pkg/lru/... \ + ./pkg/mauth/compare/... \ + ./pkg/mux/... \ + ./pkg/pinned/... \ + ./pkg/rpcplus/... \ + ./pkg/signal/... \ + ./pkg/stream/... \ + ./pkg/syslog/rfc5424/... \ + ./pkg/sirenia/state/... \ + ./flannel/pkg/ip/... \ + ./flannel/subnet/... \ + ./host/resource/... \ + ./controller/scheduler/... \ + ./discoverd/client/... \ + ./discoverd/health/... \ + ./logaggregator/buffer/... \ + ./logaggregator/snapshot/... \ + ./router/proxyproto/... +# Excluded from standalone tests: +# ./pkg/lockedfile/... — imports internal/testenv (Go stdlib internal, not allowed) +# ./pkg/term/... — requires /dev/tty (not available in CI containers) + +test-unit-standalone: + GO111MODULE=on go test -mod=vendor -race -cover $(TEST_PACKAGES_STANDALONE) + test-integration: build script/run-integration-tests -.PHONY: build release clean test test-unit test-unit-root test-integration +.PHONY: build bootstrap-build release clean test test-unit test-unit-root test-unit-standalone test-integration diff --git a/builder/build.go b/builder/build.go index 4f8c0436df..1588957a53 100644 --- a/builder/build.go +++ b/builder/build.go @@ -298,7 +298,7 @@ func newTUFClient(config *TUFConfig, dbPath string) (*tuf.Client, error) { return client, nil } if err == tuf.ErrNoRootKeys { - if err := client.Init(config.RootKeys, len(config.RootKeys)); err != nil { + if err := client.Init(config.RootKeys, 1); err != nil { return nil, err } _, err = client.Update() @@ -1052,7 +1052,8 @@ type fileInput struct { // inputs and computing the SHA512/256 sum of the resulting bytes. // // TODO: consider storing a map of filenames to hashes and cache based -// on the last modified time to avoid unnecessary work. +// +// on the last modified time to avoid unnecessary work. func (b *Builder) generateLayerID(name string, run []string, env map[string]string, artifact *ct.Artifact, inputs ...string) (id string, err error) { start := time.Now() defer func() { diff --git a/builder/img/busybox.sh b/builder/img/busybox.sh old mode 100644 new mode 100755 index 505da33e4e..9d47c6c8b0 --- a/builder/img/busybox.sh +++ b/builder/img/busybox.sh @@ -1,28 +1,38 @@ #!/bin/bash +# +# Build a minimal busybox rootfs as a squashfs layer. +# +# Requires busybox-static to be installed on the host system: +# apt-get install -y busybox-static +# -TMP="$(mktemp --directory)" +set -e -URL="http://archive.ubuntu.com/ubuntu/pool/main/b/busybox/busybox-static_1.22.0-19ubuntu2_amd64.deb" -SHA="f5796bf9d10d60850ab379e3d1cfee138ae8d636691dfe62f86854980baa408b" -curl -fSLo "${TMP}/busybox.deb" "${URL}" -echo "${SHA} ${TMP}/busybox.deb" | shasum -a "256" -c - +TMP="$(mktemp --directory)" +trap "rm -rf '${TMP}'" EXIT -dpkg -i "${TMP}/busybox.deb" +# Use the system-installed busybox-static binary +BUSYBOX="$(which busybox)" +if [[ ! -x "${BUSYBOX}" ]]; then + echo "ERROR: busybox-static not found. Install with: apt-get install -y busybox-static" >&2 + exit 1 +fi mkdir "${TMP}/root" cd "${TMP}/root" mkdir bin etc dev dev/pts lib proc sys tmp touch etc/resolv.conf cp /etc/nsswitch.conf etc/nsswitch.conf -echo root:x:0:0:root:/:/bin/sh > etc/passwd -echo root:x:0: > etc/group +echo root:x:0:0:root:/:/bin/sh >etc/passwd +echo root:x:0: >etc/group ln -s lib lib64 ln -s bin sbin -cp /bin/busybox bin +cp "${BUSYBOX}" bin/busybox for name in $(busybox --list); do - ln -s busybox "bin/${name}" + [[ "${name}" = "busybox" ]] && continue + ln -s busybox "bin/${name}" done -cp /lib/x86_64-linux-gnu/lib{c,dl,nsl,nss_*,pthread,resolv}.so.* lib -cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib +cp /lib/x86_64-linux-gnu/lib{c,dl,nsl,nss_*,pthread,resolv}.so.* lib 2>/dev/null || true +cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib 2>/dev/null || true mksquashfs "${TMP}/root" "/mnt/out/layer.squashfs" -noappend diff --git a/builder/img/ubuntu-bionic.sh b/builder/img/ubuntu-bionic.sh old mode 100644 new mode 100755 index 40ad35db5d..2891430038 --- a/builder/img/ubuntu-bionic.sh +++ b/builder/img/ubuntu-bionic.sh @@ -1,21 +1,55 @@ #!/bin/bash +# +# Build an Ubuntu 18.04 (Bionic) rootfs as a squashfs layer. +# +# Requires debootstrap on the host: +# apt-get install -y debootstrap +# + +set -e TMP="$(mktemp --directory)" -URL="https://partner-images.canonical.com/core/bionic/20190621/ubuntu-bionic-core-cloudimg-amd64-root.tar.gz" -SHA="ed1753585d70724010e9ca26cf47337201ecc5c65c7251ca7a97b5d1c0ed6365" -curl -fSLo "${TMP}/ubuntu.tar.gz" "${URL}" -echo "${SHA} ${TMP}/ubuntu.tar.gz" | sha256sum -c - +cleanup() { + # Unmount bind mounts + umount "${TMP}/root/dev/pts" 2>/dev/null || true + umount "${TMP}/root/dev" 2>/dev/null || true + umount "${TMP}/root/proc" 2>/dev/null || true + umount "${TMP}/root/sys" 2>/dev/null || true + # Clear resolv.conf + >"${TMP}/root/etc/resolv.conf" 2>/dev/null || true + rm -rf "${TMP}" +} +trap cleanup EXIT mkdir -p "${TMP}/root" -tar xf "${TMP}/ubuntu.tar.gz" -C "${TMP}/root" + +# Use debootstrap to create a minimal Bionic rootfs +if command -v debootstrap >/dev/null 2>&1; then + echo "Building Ubuntu Bionic rootfs via debootstrap..." + debootstrap --variant=minbase --arch=amd64 bionic "${TMP}/root" http://archive.ubuntu.com/ubuntu +else + # Fallback: download the minimal cloud image root tarball + echo "Building Ubuntu Bionic rootfs via cloud image download..." + URL="https://cloud-images.ubuntu.com/minimal/releases/bionic/release/ubuntu-18.04-minimal-cloudimg-amd64-root.tar.xz" + curl -fSLo "${TMP}/ubuntu.tar.xz" "${URL}" + tar xf "${TMP}/ubuntu.tar.xz" -C "${TMP}/root" +fi + +# Set up bind mounts for chroot +mount --bind /dev "${TMP}/root/dev" +mount --bind /dev/pts "${TMP}/root/dev/pts" +mount -t proc proc "${TMP}/root/proc" +mount -t sysfs sysfs "${TMP}/root/sys" cp "/etc/resolv.conf" "${TMP}/root/etc/resolv.conf" -cleanup() { - >"${TMP}/root/etc/resolv.conf" -} -trap cleanup EXIT -chroot "${TMP}/root" bash -e < "builder/ubuntu-setup.sh" +chroot "${TMP}/root" bash -e <"builder/ubuntu-setup.sh" + +# Unmount before creating squashfs +umount "${TMP}/root/sys" 2>/dev/null || true +umount "${TMP}/root/proc" 2>/dev/null || true +umount "${TMP}/root/dev/pts" 2>/dev/null || true +umount "${TMP}/root/dev" 2>/dev/null || true mksquashfs "${TMP}/root" "/mnt/out/layer.squashfs" -noappend diff --git a/builder/manifest.json b/builder/manifest.json old mode 100644 new mode 100755 index 430fcb2327..9be5f61d8b --- a/builder/manifest.json +++ b/builder/manifest.json @@ -1,99 +1,162 @@ { "tuf": { - "repository": "https://dl.flynn.io/tuf", + "repository": "https://consolving.github.io/flynn-tuf-repo/repository", "root_keys": [ - {"keytype":"ed25519","keyval":{"public":"6cfda23aa48f530aebd5b9c01030d06d02f25876b5508d681675270027af4731"}} + { + "keytype": "ed25519", + "keyval": { + "public": "cddd70123e8303002498fc7f9f8c1fff87cdb321444c67b1ba9190d0394f6134" + } + }, + { + "keytype": "ed25519", + "keyval": { + "public": "22f67c648aaade626bbd8a85aac1e02d77cb476488a967b1ece129c701ed314c" + } + }, + { + "keytype": "ed25519", + "keyval": { + "public": "29e3309c3ed70d4927b2f55adc7ac5f5d547731fb62c5f197c02d0c1c2abac21" + } + }, + { + "keytype": "ed25519", + "keyval": { + "public": "d77ef5acdccc6ffba650edd4bc4d292014e7afbd1f3d5af945395e587c1430b1" + } + } ] }, "base_layer": { - "type": "squashfs", - "id": "4b6eb1c283dac64590f32a38e61cc448fb3ca499946d41dba51edab97b46a10e", - "url": "https://dl.flynn.io/tuf/targets/layers/8804e92cba40018baec15a29dae84d210c0fe57da12654aa53928fb4c4a2ff6a65158906d4f3d044a282ec61d59fd340694f48130023bce22243279a6af3d900.4b6eb1c283dac64590f32a38e61cc448fb3ca499946d41dba51edab97b46a10e.squashfs", - "size": 62631936, - "hashes": {"sha512_256": "066e1829132a8571b7a1c59e86b54340b22cfa47427725c322ca21ec63946ec5"} + "type": "squashfs", + "id": "4b6eb1c283dac64590f32a38e61cc448fb3ca499946d41dba51edab97b46a10e", + "url": "https://consolving.github.io/flynn-tuf-repo/repository/targets/layers/4b6eb1c283dac64590f32a38e61cc448fb3ca499946d41dba51edab97b46a10e.squashfs", + "size": 62631936, + "hashes": { + "sha512_256": "066e1829132a8571b7a1c59e86b54340b22cfa47427725c322ca21ec63946ec5" + } }, "images": [ { "id": "ubuntu-trusty", - "layers": [{ - "script": "builder/img/ubuntu-trusty.sh", - "inputs": ["builder/ubuntu-setup.sh"], - "limits": { "temp_disk": "2GB" }, - "linux_capabilities": ["CAP_SYS_ADMIN"] - }] + "layers": [ + { + "script": "builder/img/ubuntu-trusty.sh", + "inputs": [ + "builder/ubuntu-setup.sh" + ], + "limits": { + "temp_disk": "2GB" + }, + "linux_capabilities": [ + "CAP_SYS_ADMIN" + ] + } + ] }, { "id": "ubuntu-xenial", - "layers": [{ - "script": "builder/img/ubuntu-xenial.sh", - "inputs": ["builder/ubuntu-setup.sh"], - "limits": { "temp_disk": "2GB" }, - "linux_capabilities": ["CAP_SYS_ADMIN"] - }] + "layers": [ + { + "script": "builder/img/ubuntu-xenial.sh", + "inputs": [ + "builder/ubuntu-setup.sh" + ], + "limits": { + "temp_disk": "2GB" + }, + "linux_capabilities": [ + "CAP_SYS_ADMIN" + ] + } + ] }, { "id": "ubuntu-bionic", - "layers": [{ - "script": "builder/img/ubuntu-bionic.sh", - "inputs": ["builder/ubuntu-setup.sh"], - "limits": { "temp_disk": "2GB" }, - "linux_capabilities": ["CAP_SYS_ADMIN"] - }] + "layers": [ + { + "script": "builder/img/ubuntu-bionic.sh", + "inputs": [ + "builder/ubuntu-setup.sh" + ], + "limits": { + "temp_disk": "2GB" + }, + "linux_capabilities": [ + "CAP_SYS_ADMIN" + ] + } + ] }, { "id": "busybox", - "layers": [{ - "script": "builder/img/busybox.sh" - }] + "layers": [ + { + "script": "builder/img/busybox.sh" + } + ] }, { "id": "go", "base": "ubuntu-bionic", - "layers": [{ - "script": "builder/img/go.sh", - "inputs": ["builder/go-wrapper.sh"] - }] + "layers": [ + { + "script": "builder/img/go.sh", + "inputs": [ + "builder/go-wrapper.sh" + ] + } + ] }, { "id": "protoc", "base": "go", - "layers": [{ - "gobin": [ - "github.com/golang/protobuf/protoc-gen-go" - ], - "script": "builder/img/protoc.sh" - }] + "layers": [ + { + "gobin": [ + "github.com/golang/protobuf/protoc-gen-go" + ], + "script": "builder/img/protoc.sh" + } + ] }, { "id": "builder", "base": "go", - "layers": [{ - "gobuild": { - "builder": "/bin/flynn-builder" + "layers": [ + { + "gobuild": { + "builder": "/bin/flynn-builder" + } } - }] + ] }, { "id": "build-tools", "base": "busybox", - "layers": [{ - "gobin": [ - "github.com/flynn/go-tuf/cmd/tuf", - "github.com/flynn/go-tuf/cmd/tuf-client" - ] - }] + "layers": [ + { + "gobin": [ + "github.com/flynn/go-tuf/cmd/tuf", + "github.com/flynn/go-tuf/cmd/tuf-client" + ] + } + ] }, { "id": "discoverd", "base": "busybox", - "layers": [{ - "gobuild": { - "discoverd": "/bin/discoverd" - }, - "copy": { - "discoverd/start.sh": "/bin/start-discoverd" + "layers": [ + { + "gobuild": { + "discoverd": "/bin/discoverd" + }, + "copy": { + "discoverd/start.sh": "/bin/start-discoverd" + } } - }], + ], "entrypoint": { "args": [ "/bin/start-discoverd" @@ -103,12 +166,14 @@ { "id": "flannel", "base": "busybox", - "layers": [{ - "gobuild": { - "flannel": "/bin/flanneld", - "flannel/wrapper": "/bin/flannel-wrapper" + "layers": [ + { + "gobuild": { + "flannel": "/bin/flanneld", + "flannel/wrapper": "/bin/flannel-wrapper" + } } - }], + ], "entrypoint": { "args": [ "/bin/flannel-wrapper" @@ -122,7 +187,9 @@ { "name": "host-packages", "script": "host/img/packages.sh", - "limits": { "temp_disk": "4G" } + "limits": { + "temp_disk": "4G" + } }, { "name": "host-binaries", @@ -138,10 +205,10 @@ }, "copy": { "util/ca-certs/ca-certs.pem": "/etc/ssl/certs/ca-certs.pem", - "host/zfs-mknod.sh": "/usr/local/bin/zfs-mknod", - "host/udev.rules": "/lib/udev/rules.d/10-local.rules", - "host/start.sh": "/usr/local/bin/start-flynn-host.sh", - "host/cleanup.sh": "/usr/local/bin/cleanup-flynn-host.sh" + "host/zfs-mknod.sh": "/usr/local/bin/zfs-mknod", + "host/udev.rules": "/lib/udev/rules.d/10-local.rules", + "host/start.sh": "/usr/local/bin/start-flynn-host.sh", + "host/cleanup.sh": "/usr/local/bin/cleanup-flynn-host.sh" } } ], @@ -162,12 +229,12 @@ }, { "gobuild": { - "controller": "/bin/flynn-controller", + "controller": "/bin/flynn-controller", "controller/scheduler": "/bin/flynn-scheduler", - "controller/worker": "/bin/flynn-worker" + "controller/worker": "/bin/flynn-worker" }, "copy": { - "controller/start.sh": "/bin/start-flynn-controller", + "controller/start.sh": "/bin/start-flynn-controller", "util/ca-certs/ca-certs.pem": "/etc/ssl/certs/ca-certs.pem" }, "inputs": [ @@ -179,7 +246,9 @@ "mkdir -p /etc/flynn-controller", "cp -r schema /etc/flynn-controller/jsonschema" ], - "limits": { "temp_disk": "1G" } + "limits": { + "temp_disk": "1G" + } } ], "entrypoint": { @@ -191,11 +260,13 @@ { "id": "controller-examples", "base": "busybox", - "layers": [{ - "gobuild": { - "controller/examples": "/bin/flynn-controller-examples" + "layers": [ + { + "gobuild": { + "controller/examples": "/bin/flynn-controller-examples" + } } - }], + ], "entrypoint": { "args": [ "/bin/flynn-controller-examples" @@ -209,12 +280,14 @@ { "name": "postgres-packages", "script": "appliance/postgresql/img/packages.sh", - "limits": { "temp_disk": "4G" } + "limits": { + "temp_disk": "4G" + } }, { "name": "postgres-binaries", "gobuild": { - "appliance/postgresql/cmd/flynn-postgres": "/bin/flynn-postgres", + "appliance/postgresql/cmd/flynn-postgres": "/bin/flynn-postgres", "appliance/postgresql/cmd/flynn-postgres-api": "/bin/flynn-postgres-api" }, "copy": { @@ -235,12 +308,14 @@ { "name": "mariadb-packages", "script": "appliance/mariadb/img/packages.sh", - "limits": { "temp_disk": "4G" } + "limits": { + "temp_disk": "4G" + } }, { "name": "mariadb-binaries", "gobuild": { - "appliance/mariadb/cmd/flynn-mariadb": "/bin/flynn-mariadb", + "appliance/mariadb/cmd/flynn-mariadb": "/bin/flynn-mariadb", "appliance/mariadb/cmd/flynn-mariadb-api": "/bin/flynn-mariadb-api" }, "copy": { @@ -261,17 +336,19 @@ { "name": "mongodb-packages", "script": "appliance/mongodb/img/packages.sh", - "limits": { "temp_disk": "4G" } + "limits": { + "temp_disk": "4G" + } }, { "name": "mongodb-binaries", "gobuild": { - "appliance/mongodb/cmd/flynn-mongodb": "/bin/flynn-mongodb", + "appliance/mongodb/cmd/flynn-mongodb": "/bin/flynn-mongodb", "appliance/mongodb/cmd/flynn-mongodb-api": "/bin/flynn-mongodb-api" }, "copy": { - "appliance/mongodb/start.sh": "/bin/start-flynn-mongodb", - "appliance/mongodb/dump.sh": "/bin/dump-flynn-mongodb", + "appliance/mongodb/start.sh": "/bin/start-flynn-mongodb", + "appliance/mongodb/dump.sh": "/bin/dump-flynn-mongodb", "appliance/mongodb/restore.sh": "/bin/restore-flynn-mongodb" } } @@ -289,17 +366,19 @@ { "name": "redis-packages", "script": "appliance/redis/img/packages.sh", - "limits": { "temp_disk": "4G" } + "limits": { + "temp_disk": "4G" + } }, { "name": "redis-binaries", "gobuild": { - "appliance/redis/cmd/flynn-redis": "/bin/flynn-redis", + "appliance/redis/cmd/flynn-redis": "/bin/flynn-redis", "appliance/redis/cmd/flynn-redis-api": "/bin/flynn-redis-api" }, "copy": { - "appliance/redis/start.sh": "/bin/start-flynn-redis", - "appliance/redis/dump.sh": "/bin/dump-flynn-redis", + "appliance/redis/start.sh": "/bin/start-flynn-redis", + "appliance/redis/dump.sh": "/bin/dump-flynn-redis", "appliance/redis/restore.sh": "/bin/restore-flynn-redis" } } @@ -313,32 +392,39 @@ { "id": "blobstore", "base": "ubuntu-bionic", - "layers": [{ - "gobuild": { - "blobstore": "/bin/flynn-blobstore" - }, - "copy": { - "util/ca-certs/ca-certs.pem": "/etc/ssl/certs/ca-certs.pem" - }, - "limits": { "temp_disk": "2G" } - }], + "layers": [ + { + "gobuild": { + "blobstore": "/bin/flynn-blobstore" + }, + "copy": { + "util/ca-certs/ca-certs.pem": "/etc/ssl/certs/ca-certs.pem" + }, + "limits": { + "temp_disk": "2G" + } + } + ], "entrypoint": { "args": [ - "/bin/flynn-blobstore", "server" + "/bin/flynn-blobstore", + "server" ] } }, { "id": "router", "base": "busybox", - "layers": [{ - "gobuild": { - "router": "/bin/flynn-router" - }, - "copy": { - "util/ca-certs/ca-certs.pem": "/etc/ssl/certs/ca-certs.pem" + "layers": [ + { + "gobuild": { + "router": "/bin/flynn-router" + }, + "copy": { + "util/ca-certs/ca-certs.pem": "/etc/ssl/certs/ca-certs.pem" + } } - }], + ], "entrypoint": { "args": [ "/bin/flynn-router" @@ -352,12 +438,14 @@ { "name": "gitreceive-packages", "script": "gitreceive/img/packages.sh", - "limits": { "temp_disk": "4G" } + "limits": { + "temp_disk": "4G" + } }, { "name": "gitreceive-binaries", "gobuild": { - "gitreceive": "/bin/gitreceived", + "gitreceive": "/bin/gitreceived", "gitreceive/receiver": "/bin/flynn-receiver" }, "copy": { @@ -374,11 +462,13 @@ { "id": "tarreceive", "base": "ubuntu-bionic", - "layers": [{ - "gobuild": { - "tarreceive": "/bin/tarreceive" + "layers": [ + { + "gobuild": { + "tarreceive": "/bin/tarreceive" + } } - }], + ], "entrypoint": { "args": [ "/bin/tarreceive" @@ -388,11 +478,13 @@ { "id": "logaggregator", "base": "busybox", - "layers": [{ - "gobuild": { - "logaggregator": "/bin/logaggregator" + "layers": [ + { + "gobuild": { + "logaggregator": "/bin/logaggregator" + } } - }], + ], "entrypoint": { "args": [ "/bin/logaggregator" @@ -406,12 +498,14 @@ { "name": "taffy-packages", "script": "gitreceive/img/packages.sh", - "limits": { "temp_disk": "4G" } + "limits": { + "temp_disk": "4G" + } }, { "name": "taffy-binaries", "gobuild": { - "taffy": "/bin/taffy", + "taffy": "/bin/taffy", "gitreceive/receiver": "/bin/flynn-receiver" } } @@ -425,26 +519,38 @@ { "id": "heroku-18", "base": "ubuntu-bionic", - "layers": [{ - "script": "builder/img/heroku-18.sh", - "limits": { "temp_disk": "2G" } - }] + "layers": [ + { + "script": "builder/img/heroku-18.sh", + "limits": { + "temp_disk": "2G" + } + } + ] }, { "id": "heroku-18-build", "base": "heroku-18", - "layers": [{ - "script": "builder/img/heroku-18-build.sh", - "limits": { "temp_disk": "2G" } - }] + "layers": [ + { + "script": "builder/img/heroku-18-build.sh", + "limits": { + "temp_disk": "2G" + } + } + ] }, { "id": "cedar-14", "base": "ubuntu-trusty", - "layers": [{ - "script": "builder/img/cedar-14.sh", - "limits": { "temp_disk": "2G" } - }] + "layers": [ + { + "script": "builder/img/cedar-14.sh", + "limits": { + "temp_disk": "2G" + } + } + ] }, { "id": "slugbuilder-18", @@ -466,7 +572,7 @@ }, "copy": { "slugbuilder/convert-legacy-slug.sh": "/bin/convert-legacy-slug.sh", - "slugbuilder/builder/build.sh": "/builder/build.sh", + "slugbuilder/builder/build.sh": "/builder/build.sh", "slugbuilder/builder/create-user.sh": "/builder/create-user.sh" } } @@ -497,7 +603,7 @@ }, "copy": { "slugbuilder/convert-legacy-slug.sh": "/bin/convert-legacy-slug.sh", - "slugbuilder/builder/build.sh": "/builder/build.sh", + "slugbuilder/builder/build.sh": "/builder/build.sh", "slugbuilder/builder/create-user.sh": "/builder/create-user.sh" } } @@ -511,11 +617,13 @@ { "id": "slugrunner-18", "base": "heroku-18", - "layers": [{ - "copy": { - "slugrunner/runner/init": "/runner/init" + "layers": [ + { + "copy": { + "slugrunner/runner/init": "/runner/init" + } } - }], + ], "entrypoint": { "args": [ "/runner/init" @@ -525,11 +633,13 @@ { "id": "slugrunner-14", "base": "cedar-14", - "layers": [{ - "copy": { - "slugrunner/runner/init": "/runner/init" + "layers": [ + { + "copy": { + "slugrunner/runner/init": "/runner/init" + } } - }], + ], "entrypoint": { "args": [ "/runner/init" @@ -539,11 +649,13 @@ { "id": "status", "base": "busybox", - "layers": [{ - "gobuild": { - "status": "/bin/flynn-status" + "layers": [ + { + "gobuild": { + "status": "/bin/flynn-status" + } } - }], + ], "entrypoint": { "args": [ "/bin/flynn-status" @@ -553,11 +665,13 @@ { "id": "updater", "base": "busybox", - "layers": [{ - "gobuild": { - "updater": "/bin/updater" + "layers": [ + { + "gobuild": { + "updater": "/bin/updater" + } } - }], + ], "entrypoint": { "args": [ "/bin/updater" @@ -567,40 +681,48 @@ { "id": "release", "base": "busybox", - "layers": [{ - "gobuild": { - "util/release": "/bin/flynn-release" + "layers": [ + { + "gobuild": { + "util/release": "/bin/flynn-release" + } } - }] - }, - { - "id": "cli-linux-amd64", - "base": "busybox", - "env": { "GOOS": "linux", "GOARCH": "amd64"}, - "template": "cli" + ] }, { - "id": "cli-linux-386", + "id": "cli-linux-aarch64", "base": "busybox", - "env": { "GOOS": "linux", "GOARCH": "386"}, + "env": { + "GOOS": "linux", + "GOARCH": "arm64" + }, "template": "cli" }, { "id": "cli-darwin-amd64", "base": "busybox", - "env": { "GOOS": "darwin", "GOARCH": "amd64"}, + "env": { + "GOOS": "darwin", + "GOARCH": "amd64" + }, "template": "cli" }, { "id": "cli-windows-amd64", "base": "busybox", - "env": { "GOOS": "windows", "GOARCH": "amd64"}, + "env": { + "GOOS": "windows", + "GOARCH": "amd64" + }, "template": "cli" }, { "id": "cli-windows-386", "base": "busybox", - "env": { "GOOS": "windows", "GOARCH": "386"}, + "env": { + "GOOS": "windows", + "GOARCH": "386" + }, "template": "cli" }, { @@ -610,28 +732,30 @@ { "name": "test-packages", "script": "test/img/packages.sh", - "limits": { "temp_disk": "4G" } + "limits": { + "temp_disk": "4G" + } }, { "name": "test-binaries", "cgobuild": { - "test": "/bin/flynn-test", - "test/runner": "/bin/flynn-test-runner" + "test": "/bin/flynn-test", + "test/runner": "/bin/flynn-test-runner" }, "gobuild": { "test/util/file-server": "/bin/flynn-test-file-server" }, "copy": { - "test/run.sh": "/bin/run-flynn-test.sh", - "test/vm.sh": "/bin/run-vm.sh", + "test/run.sh": "/bin/run-flynn-test.sh", + "test/vm.sh": "/bin/run-vm.sh", "test/scripts/start.sh": "/test/bin/start-runner.sh", - "test/rootfs/build.sh": "/test/rootfs/build.sh", - "test/rootfs/setup.sh": "/test/rootfs/setup.sh", + "test/rootfs/build.sh": "/test/rootfs/build.sh", + "test/rootfs/setup.sh": "/test/rootfs/setup.sh", "test/runner/assets/build-log.html": "/test/assets/", - "test/runner/assets/build-log.js": "/test/assets/", - "test/runner/assets/builds.js": "/test/assets/", - "test/runner/assets/index.html": "/test/assets/", - "test/runner/assets/style.css": "/test/assets/", + "test/runner/assets/build-log.js": "/test/assets/", + "test/runner/assets/builds.js": "/test/assets/", + "test/runner/assets/index.html": "/test/assets/", + "test/runner/assets/style.css": "/test/assets/", "test/runner/assets/ansi_up/ansi_up.js": "/test/assets/ansi_up/" } } @@ -654,14 +778,14 @@ { "name": "test-apps-binaries", "gobuild": { - "test/apps/echoer": "/bin/echoer", - "test/apps/ping": "/bin/pingserv", - "test/apps/signal": "/bin/signal", - "test/apps/ish": "/bin/ish", + "test/apps/echoer": "/bin/echoer", + "test/apps/ping": "/bin/pingserv", + "test/apps/signal": "/bin/signal", + "test/apps/ish": "/bin/ish", "test/apps/partial-logger": "/bin/partial-logger", - "test/apps/http-blocker": "/bin/http-blocker", - "test/apps/oom": "/bin/oom", - "test/apps/proxy": "/bin/proxy" + "test/apps/http-blocker": "/bin/http-blocker", + "test/apps/oom": "/bin/oom", + "test/apps/proxy": "/bin/proxy" } } ] @@ -669,20 +793,22 @@ ], "templates": { "cli": { - "layers": [{ - "env": { - "FLYNN_VERSION": "{{ .Version }}", - "TUF_ROOT_KEYS": "{{ .TUFRootKeys }}", - "TUF_REPOSITORY": "{{ .TUFRepository }}" - }, - "gobuild": { - "cli": "/bin/flynn-${GOOS}-${GOARCH}" + "layers": [ + { + "env": { + "FLYNN_VERSION": "{{ .Version }}", + "TUF_ROOT_KEYS": "{{ .TUFRootKeys }}", + "TUF_REPOSITORY": "{{ .TUFRepository }}" + }, + "gobuild": { + "cli": "/bin/flynn-${GOOS}-${GOARCH}" + } } - }] + ] } }, "manifests": { - "bootstrap/manifest_template.json": "bootstrap-manifest.json", + "bootstrap/manifest_template.json": "bootstrap-manifest.json", "util/release/images_template.json": "images.json" } -} +} \ No newline at end of file diff --git a/cli/update.go b/cli/update.go index 22a8291ce1..f4ac6789e2 100644 --- a/cli/update.go +++ b/cli/update.go @@ -133,7 +133,7 @@ func (u *Updater) updateTUFClient(client *tuf.Client) error { return nil } if err == tuf.ErrNoRootKeys { - if err := client.Init(tufconfig.RootKeys, len(tufconfig.RootKeys)); err != nil { + if err := client.Init(tufconfig.RootKeys, 1); err != nil { return err } return u.updateTUFClient(client) diff --git a/controller/examples/examples.go b/controller/examples/examples.go index 89587bfca6..4a526394a2 100644 --- a/controller/examples/examples.go +++ b/controller/examples/examples.go @@ -329,7 +329,7 @@ func (e *generator) createArtifact() { RawManifest: manifest.RawManifest(), Hashes: manifest.Hashes(), Size: int64(len(manifest.RawManifest())), - LayerURLTemplate: "https://dl.flynn.io/tuf?target=/layers/{id}.squashfs", + LayerURLTemplate: "https://consolving.github.io/flynn-tuf-repo/repository/targets/layers/{id}.squashfs", } err := e.client.CreateArtifact(artifact) if err != nil { diff --git a/controller/scheduler/telemetry.go b/controller/scheduler/telemetry.go index 17d26ef166..762a23c82b 100644 --- a/controller/scheduler/telemetry.go +++ b/controller/scheduler/telemetry.go @@ -11,7 +11,7 @@ import ( "github.com/flynn/flynn/pkg/version" ) -var telemetryURL = "https://dl.flynn.io/measure/scheduler" +var telemetryURL = "" // original dl.flynn.io endpoint is offline; set TELEMETRY_URL env to re-enable func init() { if u := os.Getenv("TELEMETRY_URL"); u != "" { @@ -35,7 +35,7 @@ func (s *Scheduler) triggerSendTelemetry() { } func (s *Scheduler) SendTelemetry() { - if !s.IsLeader() || os.Getenv("TELEMETRY_DISABLED") == "true" { + if telemetryURL == "" || !s.IsLeader() || os.Getenv("TELEMETRY_DISABLED") == "true" { return } diff --git a/host/cli/download.go b/host/cli/download.go index 825a3bc9e9..cc7a5c3340 100644 --- a/host/cli/download.go +++ b/host/cli/download.go @@ -29,7 +29,7 @@ func init() { usage: flynn-host download [--repository=] [--tuf-db=] [--config-dir=] [--bin-dir=] [--volpath=] Options: - -r --repository= TUF repository URI [default: https://dl.flynn.io/tuf] + -r --repository= TUF repository URI [default: https://consolving.github.io/flynn-tuf-repo/repository] -t --tuf-db= local TUF file [default: /etc/flynn/tuf.db] -c --config-dir= config directory [default: /etc/flynn] -b --bin-dir= binary directory [default: /usr/local/bin] @@ -149,7 +149,7 @@ func updateTUFClient(client *tuf.Client) error { return nil } if err == tuf.ErrNoRootKeys { - if err := client.Init(tufconfig.RootKeys, len(tufconfig.RootKeys)); err != nil { + if err := client.Init(tufconfig.RootKeys, 1); err != nil { return err } return updateTUFClient(client) diff --git a/host/cli/update.go b/host/cli/update.go index 1017152d57..2c731c0a12 100644 --- a/host/cli/update.go +++ b/host/cli/update.go @@ -32,7 +32,7 @@ func init() { usage: flynn-host update [options] Options: - -r --repository= image repository URI [default: https://dl.flynn.io/tuf] + -r --repository= image repository URI [default: https://consolving.github.io/flynn-tuf-repo/repository] -t --tuf-db= local TUF file [default: /etc/flynn/tuf.db] -b --bin-dir= directory to download binaries to [default: /usr/local/bin] -c --config-dir= directory to download config files to [default: /etc/flynn] diff --git a/pkg/tufconfig/tufconfig.go b/pkg/tufconfig/tufconfig.go old mode 100644 new mode 100755 index b3558b2e3f..aa5d4b92df --- a/pkg/tufconfig/tufconfig.go +++ b/pkg/tufconfig/tufconfig.go @@ -8,8 +8,8 @@ import ( var ( // these constants are overridden at build time (see builder/go-wrapper.sh) - RootKeysJSON = `[{"keytype":"ed25519","keyval":{"public":"6cfda23aa48f530aebd5b9c01030d06d02f25876b5508d681675270027af4731"}}]` - Repository = "https://dl.flynn.io/tuf" + RootKeysJSON = `[{"keytype":"ed25519","keyval":{"public":"cddd70123e8303002498fc7f9f8c1fff87cdb321444c67b1ba9190d0394f6134"}},{"keytype":"ed25519","keyval":{"public":"22f67c648aaade626bbd8a85aac1e02d77cb476488a967b1ece129c701ed314c"}},{"keytype":"ed25519","keyval":{"public":"29e3309c3ed70d4927b2f55adc7ac5f5d547731fb62c5f197c02d0c1c2abac21"}},{"keytype":"ed25519","keyval":{"public":"d77ef5acdccc6ffba650edd4bc4d292014e7afbd1f3d5af945395e587c1430b1"}}]` + Repository = "https://consolving.github.io/flynn-tuf-repo/repository" ) var RootKeys []*data.Key diff --git a/script/bootstrap-build b/script/bootstrap-build new file mode 100755 index 0000000000..2f3e653907 --- /dev/null +++ b/script/bootstrap-build @@ -0,0 +1,289 @@ +#!/bin/bash +# +# bootstrap-build builds all core Flynn components from source without +# requiring a running Flynn cluster or access to the TUF repository. +# +# This script breaks the original chicken-and-egg problem where building +# Flynn required a running Flynn cluster. +# +# Usage: +# script/bootstrap-build [--version VERSION] +# +# Environment: +# FLYNN_TUF_REPOSITORY TUF repository URL [default: https://consolving.github.io/flynn-tuf-repo/repository] +# +# Requirements: +# - Go 1.13+ with CGO_ENABLED=1 +# - libseccomp-dev, build-essential (for flynn-host CGO) +# - Linux x86_64 (flynn-host uses libcontainer) + +set -e + +ROOT="$(cd "$(dirname "$0")/.." && pwd)" +source "${ROOT}/script/lib/ui.sh" + +usage() { + cat <&2 +usage: $0 [options] + +Build core Flynn components from source (no cluster required). + +OPTIONS: + -h, --help Show this message + -x, --version=VERSION Version string to embed [default: dev] + --output-dir=DIR Directory for built binaries [default: build/bin] +USAGE +} + +main() { + local version="dev" + local output_dir="${ROOT}/build/bin" + + while true; do + case "$1" in + -h | --help) + usage + exit 0 + ;; + -x | --version) + if [[ -z "$2" ]]; then + fail "--version flag requires an argument" + fi + version="$2" + shift 2 + ;; + --output-dir) + if [[ -z "$2" ]]; then + fail "--output-dir flag requires an argument" + fi + output_dir="$2" + shift 2 + ;; + *) + break + ;; + esac + done + + local tuf_repo="${FLYNN_TUF_REPOSITORY:-"https://consolving.github.io/flynn-tuf-repo/repository"}" + + # Extract TUF root keys from builder/manifest.json (compact JSON, no spaces) + local tuf_root_keys + tuf_root_keys="$(python3 -c "import json; m=json.load(open('${ROOT}/builder/manifest.json')); print(json.dumps(m['tuf']['root_keys'],separators=(',',':')))")" + if [[ -z "${tuf_root_keys}" ]]; then + fail "unable to extract TUF root keys from builder/manifest.json" + fi + + local go_ldflags="-X github.com/flynn/flynn/pkg/version.version=${version}" + go_ldflags="${go_ldflags} -X github.com/flynn/flynn/pkg/tufconfig.Repository=${tuf_repo}" + go_ldflags="${go_ldflags} -X 'github.com/flynn/flynn/pkg/tufconfig.RootKeysJSON=${tuf_root_keys}'" + + mkdir -p "${output_dir}" + + info "building flynn-host (CGO enabled)" + CGO_ENABLED=1 GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-host" ./host + chmod +x "${output_dir}/flynn-host" + + info "building flynn-init" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-init" ./host/flynn-init + chmod +x "${output_dir}/flynn-init" + + info "building flynn-controller" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-controller" ./controller + chmod +x "${output_dir}/flynn-controller" + + info "building flynn-scheduler" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-scheduler" ./controller/scheduler + chmod +x "${output_dir}/flynn-scheduler" + + info "building flynn-worker" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-worker" ./controller/worker + chmod +x "${output_dir}/flynn-worker" + + info "building flynn CLI (linux-amd64)" + GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-linux-amd64" ./cli + chmod +x "${output_dir}/flynn-linux-amd64" + ln -nfs "flynn-linux-amd64" "${output_dir}/flynn" + + info "building discoverd" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/discoverd" ./discoverd + chmod +x "${output_dir}/discoverd" + + info "building flynn-router" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-router" ./router + chmod +x "${output_dir}/flynn-router" + + info "building flynn-blobstore" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-blobstore" ./blobstore + chmod +x "${output_dir}/flynn-blobstore" + + info "building logaggregator" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/logaggregator" ./logaggregator + chmod +x "${output_dir}/logaggregator" + + # --- Additional component binaries for image building --- + + info "building flanneld" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flanneld" ./flannel + chmod +x "${output_dir}/flanneld" + + info "building flannel-wrapper" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flannel-wrapper" ./flannel/wrapper + chmod +x "${output_dir}/flannel-wrapper" + + info "building flynn-postgres" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-postgres" ./appliance/postgresql/cmd/flynn-postgres + chmod +x "${output_dir}/flynn-postgres" + + info "building flynn-postgres-api" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-postgres-api" ./appliance/postgresql/cmd/flynn-postgres-api + chmod +x "${output_dir}/flynn-postgres-api" + + info "building flynn-redis" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-redis" ./appliance/redis/cmd/flynn-redis + chmod +x "${output_dir}/flynn-redis" + + info "building flynn-redis-api" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-redis-api" ./appliance/redis/cmd/flynn-redis-api + chmod +x "${output_dir}/flynn-redis-api" + + info "building flynn-mariadb" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-mariadb" ./appliance/mariadb/cmd/flynn-mariadb + chmod +x "${output_dir}/flynn-mariadb" + + info "building flynn-mariadb-api" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-mariadb-api" ./appliance/mariadb/cmd/flynn-mariadb-api + chmod +x "${output_dir}/flynn-mariadb-api" + + info "building flynn-mongodb" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-mongodb" ./appliance/mongodb/cmd/flynn-mongodb + chmod +x "${output_dir}/flynn-mongodb" + + info "building flynn-mongodb-api" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-mongodb-api" ./appliance/mongodb/cmd/flynn-mongodb-api + chmod +x "${output_dir}/flynn-mongodb-api" + + info "building gitreceived" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/gitreceived" ./gitreceive + chmod +x "${output_dir}/gitreceived" + + info "building flynn-receiver" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-receiver" ./gitreceive/receiver + chmod +x "${output_dir}/flynn-receiver" + + info "building tarreceive" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/tarreceive" ./tarreceive + chmod +x "${output_dir}/tarreceive" + + info "building taffy" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/taffy" ./taffy + chmod +x "${output_dir}/taffy" + + info "building create-artifact (slugbuilder)" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/create-artifact" ./slugbuilder/artifact + chmod +x "${output_dir}/create-artifact" + + info "building slug-migrator (slugbuilder)" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/slug-migrator" ./slugbuilder/migrator + chmod +x "${output_dir}/slug-migrator" + + info "building flynn-status" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-status" ./status + chmod +x "${output_dir}/flynn-status" + + info "building updater" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/updater" ./updater + chmod +x "${output_dir}/updater" + + info "building flynn-builder" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-builder" ./builder + chmod +x "${output_dir}/flynn-builder" + + info "building flynn-controller-examples" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-controller-examples" ./controller/examples + chmod +x "${output_dir}/flynn-controller-examples" + + info "building flynn-release" + GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${output_dir}/flynn-release" ./util/release + chmod +x "${output_dir}/flynn-release" + + # --- Build the export-tuf tool --- + info "building export-tuf tool" + GO111MODULE=on go build -mod=vendor \ + -o "${output_dir}/export-tuf" ./script/export-tuf + chmod +x "${output_dir}/export-tuf" + + # Create gzipped flynn-host for bootstrap compatibility + info "creating gzipped flynn-host archive" + gzip -c "${output_dir}/flynn-host" > "${output_dir}/flynn-host.gz" + + info "" + info "bootstrap build complete!" + info "Total binaries built: $(ls -1 "${output_dir}/" | grep -v '.gz$' | grep -v '^flynn$' | wc -l)" + info "binaries are in ${output_dir}/" + ls -la "${output_dir}/" +} + +main "$@" diff --git a/script/build-flynn b/script/build-flynn index f35675c84c..79ec4313ef 100755 --- a/script/build-flynn +++ b/script/build-flynn @@ -18,6 +18,7 @@ OPTIONS: -f, --force-bootstrap Force bootstrap --host=HOST Host to run the build on --git-version Generate the version using git status + --skip-download Skip TUF download step (use locally built binaries only) USAGE } @@ -26,6 +27,7 @@ main() { local version="dev" local verbose=false local force_bootstrap=false + local skip_download=false while true; do case "$1" in @@ -59,6 +61,10 @@ main() { version="$(git_version)" shift ;; + --skip-download) + skip_download=true + shift + ;; *) break ;; @@ -73,37 +79,66 @@ main() { local flynn_host="${ROOT}/build/bin/flynn-host" local builder_image="${ROOT}/build/image/builder.json" - # if building from clean, download binaries + images - if ! [[ -e "${builder_image}" ]]; then - local sha="de87e0d04d262a134fe7e7471e3c4c20e40ae229f20bf753a54a78656f72737e1a3ef3e8a165e8d4e39201f412173faf9bf4518c24678681ff5449e95fb82374" - local url="https://dl.flynn.io/tuf/targets/${sha}.flynn-host.gz" + # TUF repository configuration (new self-hosted repo) + local tuf_repo="${FLYNN_TUF_REPOSITORY:-"https://consolving.github.io/flynn-tuf-repo/repository"}" - info "downloading flynn-host from ${url}" + # if building from clean, build flynn-host locally instead of downloading + # from the now-offline dl.flynn.io + if ! [[ -e "${builder_image}" ]]; then mkdir -p "${ROOT}/build/bin" - curl -fsSLo "${flynn_host}.gz" "${url}" - echo "${sha} ${flynn_host}.gz" | shasum -a "512" -c - - gunzip --force "${flynn_host}.gz" - chmod +x "${flynn_host}" - - # kill existing cluster to unlock the volume database - "${ROOT}/script/kill-flynn" - - info "getting nightly release version" - local dl_version="$(curl -fsSL "https://releases.flynn.io/api/channels" | jq -r '.[] | select(.name == "nightly") | .version')" - if [[ -z "${dl_version}" ]]; then - fail "unable to determine nightly release version" - fi - info "downloading binaries + images (${dl_version})" - mkdir -p "${ROOT}/build/manifests" - sudo FLYNN_VERSION="${dl_version}" "${flynn_host}" download \ - --tuf-db "/tmp/tuf.db" \ - --bin-dir "${ROOT}/build/bin" \ - --config-dir "${ROOT}/build/manifests" \ - --volpath "/var/lib/flynn/volumes-0" + # Build flynn-host from source if not already present + if ! [[ -e "${flynn_host}" ]]; then + info "building flynn-host from source (bootstrap)" + + # Extract TUF root keys from builder/manifest.json (compact JSON, no spaces) + local tuf_root_keys + tuf_root_keys="$(python3 -c "import json; m=json.load(open('${ROOT}/builder/manifest.json')); print(json.dumps(m['tuf']['root_keys'],separators=(',',':')))")" + if [[ -z "${tuf_root_keys}" ]]; then + fail "unable to extract TUF root keys from builder/manifest.json" + fi + + local go_ldflags="-X github.com/flynn/flynn/pkg/version.version=${version}" + go_ldflags="${go_ldflags} -X github.com/flynn/flynn/pkg/tufconfig.Repository=${tuf_repo}" + go_ldflags="${go_ldflags} -X 'github.com/flynn/flynn/pkg/tufconfig.RootKeysJSON=${tuf_root_keys}'" + + CGO_ENABLED=1 GO111MODULE=on go build -mod=vendor \ + -ldflags "${go_ldflags}" \ + -o "${flynn_host}" ./host + chmod +x "${flynn_host}" + + info "building flynn-init from source" + GO111MODULE=on go build -mod=vendor \ + -o "${ROOT}/build/bin/flynn-init" ./host/flynn-init + chmod +x "${ROOT}/build/bin/flynn-init" + + info "flynn-host bootstrap build complete" + else + info "using existing flynn-host at ${flynn_host}" + fi - mkdir -p "${ROOT}/build/image" - jq '.builder' "${ROOT}/build/manifests/images.${dl_version}.json" > "${builder_image}" + # Use FLYNN_VERSION env var or default to "dev" — no longer queries + # the now-offline releases.flynn.io channel API + local dl_version="${FLYNN_VERSION:-"${version}"}" + + if $skip_download; then + info "skipping TUF download (--skip-download), using locally built binaries only" + else + # kill existing cluster to unlock the volume database + "${ROOT}/script/kill-flynn" + + info "downloading binaries + images (${dl_version}) from ${tuf_repo}" + mkdir -p "${ROOT}/build/manifests" + sudo FLYNN_VERSION="${dl_version}" "${flynn_host}" download \ + --repository "${tuf_repo}" \ + --tuf-db "/tmp/tuf.db" \ + --bin-dir "${ROOT}/build/bin" \ + --config-dir "${ROOT}/build/manifests" \ + --volpath "/var/lib/flynn/volumes-0" + + mkdir -p "${ROOT}/build/image" + jq '.builder' "${ROOT}/build/manifests/images.${dl_version}.json" > "${builder_image}" + fi fi # bootstrap the cluster if not running @@ -120,7 +155,7 @@ main() { args+=("--tuf-db=/tmp/tuf.db") sudo mkdir -p "/var/lib/flynn/layer-cache" - "${flynn_host}" run \ + sudo "${flynn_host}" run \ --host "${host}" \ --bind "${ROOT}:/src,/var/lib/flynn/layer-cache:/var/lib/flynn/layer-cache" \ --limits "temp_disk=4G,memory=4G" \ diff --git a/script/export-components b/script/export-components index bfa44fa38a..6b70857107 100755 --- a/script/export-components +++ b/script/export-components @@ -67,7 +67,7 @@ main() { info "running flynn-builder export" export DISCOVERD="${DISCOVERD:-"1.localflynn.com:1111"}" - "${ROOT}/build/bin/flynn-host" run \ + sudo "${ROOT}/build/bin/flynn-host" run \ --host "${host}" \ --bind "${ROOT}:${ROOT},${tuf_dir}:${tuf_dir},/var/lib/flynn/layer-cache:/var/lib/flynn/layer-cache" \ --limits "memory=2G,temp_disk=1G" \ diff --git a/script/export-tuf/main.go b/script/export-tuf/main.go new file mode 100755 index 0000000000..6940b25e12 --- /dev/null +++ b/script/export-tuf/main.go @@ -0,0 +1,1027 @@ +// export-tuf is a standalone tool for populating a TUF repository with Flynn +// artifacts, without requiring a running Flynn cluster. +// +// It reads pre-built binaries, constructs squashfs layers, builds ImageManifests +// and Artifacts, generates images.json and bootstrap-manifest.json, and stages +// everything as signed TUF targets. +// +// Usage: +// +// export-tuf --tuf-dir=/path/to/flynn-tuf-repo \ +// --build-dir=/path/to/build \ +// --source-dir=/path/to/flynn \ +// --version=v20250412.0 \ +// --layer-cache=/path/to/layer-cache +package main + +import ( + "bytes" + "compress/gzip" + "crypto/sha512" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strings" + + ct "github.com/flynn/flynn/controller/types" + tuf "github.com/flynn/go-tuf" + tufdata "github.com/flynn/go-tuf/data" + "github.com/flynn/go-tuf/util" +) + +// imageSpec defines a Flynn component image: what base layers it inherits, +// what binaries and files it contains, and its entrypoint. +type imageSpec struct { + Name string + Base string // base image name for layer inheritance + Binaries map[string]string // source binary name -> dest path in squashfs + ExtraFiles map[string]string // source file (relative to source-dir) -> dest path + Entrypoint *ct.ImageEntrypoint +} + +func main() { + if err := run(); err != nil { + fmt.Fprintf(os.Stderr, "error: %s\n", err) + os.Exit(1) + } +} + +func run() error { + var ( + tufDir string + buildDir string + sourceDir string + version string + layerCache string + tufRepo string + ) + + // Parse flags manually (avoid adding dependencies) + for _, arg := range os.Args[1:] { + if strings.HasPrefix(arg, "--tuf-dir=") { + tufDir = strings.TrimPrefix(arg, "--tuf-dir=") + } else if strings.HasPrefix(arg, "--build-dir=") { + buildDir = strings.TrimPrefix(arg, "--build-dir=") + } else if strings.HasPrefix(arg, "--source-dir=") { + sourceDir = strings.TrimPrefix(arg, "--source-dir=") + } else if strings.HasPrefix(arg, "--version=") { + version = strings.TrimPrefix(arg, "--version=") + } else if strings.HasPrefix(arg, "--layer-cache=") { + layerCache = strings.TrimPrefix(arg, "--layer-cache=") + } else if strings.HasPrefix(arg, "--tuf-repo=") { + tufRepo = strings.TrimPrefix(arg, "--tuf-repo=") + } else if arg == "--help" || arg == "-h" { + printUsage() + return nil + } + } + + if tufDir == "" || buildDir == "" || sourceDir == "" || version == "" || layerCache == "" { + printUsage() + return fmt.Errorf("missing required flags") + } + if tufRepo == "" { + tufRepo = "https://consolving.github.io/flynn-tuf-repo/repository" + } + + binDir := filepath.Join(buildDir, "bin") + + // Verify directories exist + for _, dir := range []string{tufDir, binDir, sourceDir, layerCache} { + if _, err := os.Stat(dir); err != nil { + return fmt.Errorf("directory does not exist: %s", dir) + } + } + + e := &exporter{ + tufDir: tufDir, + buildDir: buildDir, + binDir: binDir, + sourceDir: sourceDir, + version: version, + layerCache: layerCache, + tufRepoURL: tufRepo, + layerURLTpl: fmt.Sprintf("%s?target=/layers/{id}.squashfs", tufRepo), + baseLayers: make(map[string][]*ct.ImageLayer), + artifacts: make(map[string]*ct.Artifact), + } + + return e.Run() +} + +func printUsage() { + fmt.Fprintf(os.Stderr, `Usage: export-tuf [options] + +Options: + --tuf-dir=DIR Path to TUF repository (with keys/ and repository/) + --build-dir=DIR Path to build output directory (with bin/) + --source-dir=DIR Path to Flynn source directory + --version=VERSION Version string (e.g., v20250412.0) + --layer-cache=DIR Path to layer cache directory + --tuf-repo=URL TUF repository URL [default: https://consolving.github.io/flynn-tuf-repo/repository] +`) +} + +type exporter struct { + tufDir string + buildDir string + binDir string + sourceDir string + version string + layerCache string + tufRepoURL string + layerURLTpl string + + baseLayers map[string][]*ct.ImageLayer // base image name -> accumulated layers + artifacts map[string]*ct.Artifact // image name -> artifact +} + +func (e *exporter) Run() error { + fmt.Printf("=== Flynn TUF Export ===\n") + fmt.Printf("Version: %s\n", e.version) + fmt.Printf("TUF dir: %s\n", e.tufDir) + fmt.Printf("Build dir: %s\n", e.buildDir) + fmt.Printf("Source dir: %s\n", e.sourceDir) + fmt.Printf("Layer cache: %s\n", e.layerCache) + fmt.Printf("TUF repo: %s\n", e.tufRepoURL) + fmt.Printf("\n") + + // Step 1: Build base OS squashfs layers + fmt.Printf("--- Step 1: Building base OS layers ---\n") + if err := e.buildBaseLayers(); err != nil { + return fmt.Errorf("building base layers: %s", err) + } + + // Step 2: Build component squashfs layers and construct artifacts + fmt.Printf("\n--- Step 2: Building component images ---\n") + if err := e.buildComponentImages(); err != nil { + return fmt.Errorf("building component images: %s", err) + } + + // Step 3: Generate images.json and bootstrap-manifest.json + fmt.Printf("\n--- Step 3: Generating manifests ---\n") + if err := e.generateManifests(); err != nil { + return fmt.Errorf("generating manifests: %s", err) + } + + // Step 4: Stage TUF targets and sign + fmt.Printf("\n--- Step 4: Staging TUF targets ---\n") + if err := e.stageTUFTargets(); err != nil { + return fmt.Errorf("staging TUF targets: %s", err) + } + + fmt.Printf("\n=== Export complete! ===\n") + return nil +} + +// ----- Step 1: Build base OS layers ----- + +func (e *exporter) buildBaseLayers() error { + // Build the base OS layers in dependency order. + // Each base layer becomes a squashfs file in the layer cache. + // + // Dependency tree: + // busybox (standalone) + // ubuntu-bionic (standalone) + // ubuntu-xenial (standalone, needed only for host image) + + bases := []struct { + name string + script string + }{ + {"busybox", "builder/img/busybox.sh"}, + {"ubuntu-bionic", "builder/img/ubuntu-bionic.sh"}, + // ubuntu-xenial needed for host image but host image also needs + // kernel packages which require a full apt - skip for now as + // the host image will use ubuntu-bionic in the simplified pipeline + } + + for _, base := range bases { + fmt.Printf(" Building base layer: %s\n", base.name) + layer, err := e.buildBaseLayer(base.name, base.script) + if err != nil { + return fmt.Errorf("building %s: %s", base.name, err) + } + e.baseLayers[base.name] = []*ct.ImageLayer{layer} + fmt.Printf(" -> %s: id=%s size=%d\n", base.name, layer.ID, layer.Length) + } + + return nil +} + +func (e *exporter) buildBaseLayer(name, scriptPath string) (*ct.ImageLayer, error) { + scriptAbs := filepath.Join(e.sourceDir, scriptPath) + + // Create a temporary output directory for the squashfs + outDir, err := ioutil.TempDir("", "flynn-base-"+name) + if err != nil { + return nil, err + } + defer os.RemoveAll(outDir) + + outFile := filepath.Join(outDir, "layer.squashfs") + + // The base image scripts expect to write to /mnt/out/layer.squashfs + // and be run from the flynn source root. We'll create a wrapper that + // sets up the environment. + wrapper := fmt.Sprintf(`#!/bin/bash +set -e +mkdir -p /mnt/out +rm -f /mnt/out/layer.squashfs +cd %q +bash %q +cp /mnt/out/layer.squashfs %q +`, e.sourceDir, scriptAbs, outFile) + + cmd := exec.Command("bash", "-c", wrapper) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Dir = e.sourceDir + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("running %s: %s", scriptPath, err) + } + + return e.importSquashfs(outFile) +} + +// importSquashfs reads a squashfs file, computes its hash, copies it to the +// layer cache, and returns an ImageLayer. +func (e *exporter) importSquashfs(squashfsPath string) (*ct.ImageLayer, error) { + data, err := ioutil.ReadFile(squashfsPath) + if err != nil { + return nil, err + } + + digest := sha512.Sum512_256(data) + id := hex.EncodeToString(digest[:]) + + // Copy to layer cache + cachePath := filepath.Join(e.layerCache, id+".squashfs") + if err := ioutil.WriteFile(cachePath, data, 0644); err != nil { + return nil, err + } + + layer := &ct.ImageLayer{ + ID: id, + Type: ct.ImageLayerTypeSquashfs, + Length: int64(len(data)), + Hashes: map[string]string{ + "sha512_256": id, + }, + } + + // Write layer config JSON + configPath := filepath.Join(e.layerCache, id+".json") + configData, err := json.Marshal(layer) + if err != nil { + return nil, err + } + if err := ioutil.WriteFile(configPath, configData, 0644); err != nil { + return nil, err + } + + return layer, nil +} + +// ----- Step 2: Build component images ----- + +func (e *exporter) buildComponentImages() error { + specs := e.imageSpecs() + + for _, spec := range specs { + fmt.Printf(" Building image: %s\n", spec.Name) + if err := e.buildComponentImage(spec); err != nil { + return fmt.Errorf("building %s: %s", spec.Name, err) + } + } + + return nil +} + +func (e *exporter) buildComponentImage(spec imageSpec) error { + // Create a temporary directory with the component's file layout + tmpDir, err := ioutil.TempDir("", "flynn-img-"+spec.Name) + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + // Copy binaries + for srcName, destPath := range spec.Binaries { + srcPath := filepath.Join(e.binDir, srcName) + dst := filepath.Join(tmpDir, destPath) + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + if err := copyFile(srcPath, dst, 0755); err != nil { + return fmt.Errorf("copying binary %s: %s", srcName, err) + } + } + + // Copy extra files + for srcRel, destPath := range spec.ExtraFiles { + srcPath := filepath.Join(e.sourceDir, srcRel) + dst := filepath.Join(tmpDir, destPath) + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + if err := copyFile(srcPath, dst, 0755); err != nil { + return fmt.Errorf("copying extra file %s: %s", srcRel, err) + } + } + + // Create squashfs from the directory + squashfsPath := filepath.Join(tmpDir, "layer.squashfs") + cmd := exec.Command("mksquashfs", tmpDir, squashfsPath, "-noappend", + "-e", "layer.squashfs") // exclude the output file itself + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("mksquashfs: %s", err) + } + + // Import the squashfs + componentLayer, err := e.importSquashfs(squashfsPath) + if err != nil { + return err + } + + // Build the ImageManifest with base layers + component layer + var allLayers []*ct.ImageLayer + if baseLayers, ok := e.baseLayers[spec.Base]; ok { + allLayers = append(allLayers, baseLayers...) + } + allLayers = append(allLayers, componentLayer) + + manifest := ct.ImageManifest{ + Type: ct.ImageManifestTypeV1, + Rootfs: []*ct.ImageRootfs{{ + Platform: ct.DefaultImagePlatform, + Layers: allLayers, + }}, + } + if spec.Entrypoint != nil { + manifest.Entrypoints = map[string]*ct.ImageEntrypoint{ + "_default": spec.Entrypoint, + } + } + + imageURL := fmt.Sprintf("%s?name=%s&target=/images/%s.json", e.tufRepoURL, spec.Name, manifest.ID()) + artifact := &ct.Artifact{ + Type: ct.ArtifactTypeFlynn, + URI: imageURL, + RawManifest: manifest.RawManifest(), + Hashes: manifest.Hashes(), + Size: int64(len(manifest.RawManifest())), + LayerURLTemplate: e.layerURLTpl, + Meta: map[string]string{ + "manifest.id": manifest.ID(), + "flynn.component": spec.Name, + "flynn.system-image": "true", + }, + } + + e.artifacts[spec.Name] = artifact + fmt.Printf(" -> %s: manifest=%s layers=%d\n", spec.Name, manifest.ID()[:16], len(allLayers)) + + return nil +} + +// imageSpecs returns the specifications for all component images. +func (e *exporter) imageSpecs() []imageSpec { + return []imageSpec{ + // --- busybox-based images --- + { + Name: "discoverd", + Base: "busybox", + Binaries: map[string]string{ + "discoverd": "/bin/discoverd", + }, + ExtraFiles: map[string]string{ + "discoverd/start.sh": "/bin/start-discoverd", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/start-discoverd"}, + }, + }, + { + Name: "flannel", + Base: "busybox", + Binaries: map[string]string{ + "flanneld": "/bin/flanneld", + "flannel-wrapper": "/bin/flannel-wrapper", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/flannel-wrapper"}, + }, + }, + { + Name: "controller", + Base: "busybox", + Binaries: map[string]string{ + "flynn-controller": "/bin/flynn-controller", + "flynn-scheduler": "/bin/flynn-scheduler", + "flynn-worker": "/bin/flynn-worker", + }, + ExtraFiles: map[string]string{ + "controller/start.sh": "/bin/start-flynn-controller", + "util/ca-certs/ca-certs.pem": "/etc/ssl/certs/ca-certs.pem", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/start-flynn-controller"}, + }, + }, + { + Name: "router", + Base: "busybox", + Binaries: map[string]string{ + "flynn-router": "/bin/flynn-router", + }, + ExtraFiles: map[string]string{ + "util/ca-certs/ca-certs.pem": "/etc/ssl/certs/ca-certs.pem", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/flynn-router"}, + }, + }, + { + Name: "logaggregator", + Base: "busybox", + Binaries: map[string]string{ + "logaggregator": "/bin/logaggregator", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/logaggregator"}, + }, + }, + { + Name: "status", + Base: "busybox", + Binaries: map[string]string{ + "flynn-status": "/bin/flynn-status", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/flynn-status"}, + }, + }, + { + Name: "updater", + Base: "busybox", + Binaries: map[string]string{ + "updater": "/bin/updater", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/updater"}, + }, + }, + + // --- ubuntu-bionic-based images --- + { + Name: "blobstore", + Base: "ubuntu-bionic", + Binaries: map[string]string{ + "flynn-blobstore": "/bin/flynn-blobstore", + }, + ExtraFiles: map[string]string{ + "util/ca-certs/ca-certs.pem": "/etc/ssl/certs/ca-certs.pem", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/flynn-blobstore", "server"}, + }, + }, + { + Name: "host", + Base: "ubuntu-bionic", // simplified: using bionic instead of xenial + Binaries: map[string]string{ + "flynn-host": "/usr/local/bin/flynn-host", + "flynn-init": "/usr/local/bin/flynn-init", + }, + ExtraFiles: map[string]string{ + "util/ca-certs/ca-certs.pem": "/etc/ssl/certs/ca-certs.pem", + "host/zfs-mknod.sh": "/usr/local/bin/zfs-mknod", + "host/udev.rules": "/lib/udev/rules.d/10-local.rules", + "host/start.sh": "/usr/local/bin/start-flynn-host.sh", + "host/cleanup.sh": "/usr/local/bin/cleanup-flynn-host.sh", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/usr/local/bin/start-flynn-host.sh"}, + }, + }, + { + Name: "postgres", + Base: "ubuntu-bionic", + Binaries: map[string]string{ + "flynn-postgres": "/bin/flynn-postgres", + "flynn-postgres-api": "/bin/flynn-postgres-api", + }, + ExtraFiles: map[string]string{ + "appliance/postgresql/start.sh": "/bin/start-flynn-postgres", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/start-flynn-postgres"}, + }, + }, + { + Name: "redis", + Base: "ubuntu-bionic", + Binaries: map[string]string{ + "flynn-redis": "/bin/flynn-redis", + "flynn-redis-api": "/bin/flynn-redis-api", + }, + ExtraFiles: map[string]string{ + "appliance/redis/start.sh": "/bin/start-flynn-redis", + "appliance/redis/dump.sh": "/bin/dump-flynn-redis", + "appliance/redis/restore.sh": "/bin/restore-flynn-redis", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/start-flynn-redis"}, + }, + }, + { + Name: "mariadb", + Base: "ubuntu-bionic", + Binaries: map[string]string{ + "flynn-mariadb": "/bin/flynn-mariadb", + "flynn-mariadb-api": "/bin/flynn-mariadb-api", + }, + ExtraFiles: map[string]string{ + "appliance/mariadb/start.sh": "/bin/start-flynn-mariadb", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/start-flynn-mariadb"}, + }, + }, + { + Name: "mongodb", + Base: "ubuntu-bionic", + Binaries: map[string]string{ + "flynn-mongodb": "/bin/flynn-mongodb", + "flynn-mongodb-api": "/bin/flynn-mongodb-api", + }, + ExtraFiles: map[string]string{ + "appliance/mongodb/start.sh": "/bin/start-flynn-mongodb", + "appliance/mongodb/dump.sh": "/bin/dump-flynn-mongodb", + "appliance/mongodb/restore.sh": "/bin/restore-flynn-mongodb", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/start-flynn-mongodb"}, + }, + }, + { + Name: "gitreceive", + Base: "ubuntu-bionic", + Binaries: map[string]string{ + "gitreceived": "/bin/gitreceived", + "flynn-receiver": "/bin/flynn-receiver", + }, + ExtraFiles: map[string]string{ + "gitreceive/start.sh": "/bin/start-flynn-receiver", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/start-flynn-receiver"}, + }, + }, + { + Name: "tarreceive", + Base: "ubuntu-bionic", + Binaries: map[string]string{ + "tarreceive": "/bin/tarreceive", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/tarreceive"}, + }, + }, + { + Name: "taffy", + Base: "ubuntu-bionic", + Binaries: map[string]string{ + "taffy": "/bin/taffy", + "flynn-receiver": "/bin/flynn-receiver", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/bin/taffy"}, + }, + }, + { + Name: "slugbuilder-18", + Base: "ubuntu-bionic", + Binaries: map[string]string{ + "create-artifact": "/bin/create-artifact", + "slug-migrator": "/bin/slug-migrator", + }, + ExtraFiles: map[string]string{ + "slugbuilder/convert-legacy-slug.sh": "/bin/convert-legacy-slug.sh", + "slugbuilder/builder/build.sh": "/builder/build.sh", + "slugbuilder/builder/create-user.sh": "/builder/create-user.sh", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/builder/build.sh"}, + }, + }, + { + Name: "slugbuilder-14", + Base: "ubuntu-bionic", // simplified: using bionic instead of cedar-14/trusty + Binaries: map[string]string{ + "create-artifact": "/bin/create-artifact", + "slug-migrator": "/bin/slug-migrator", + }, + ExtraFiles: map[string]string{ + "slugbuilder/convert-legacy-slug.sh": "/bin/convert-legacy-slug.sh", + "slugbuilder/builder/build.sh": "/builder/build.sh", + "slugbuilder/builder/create-user.sh": "/builder/create-user.sh", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/builder/build.sh"}, + }, + }, + { + Name: "slugrunner-18", + Base: "ubuntu-bionic", + ExtraFiles: map[string]string{ + "slugrunner/runner/init": "/runner/init", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/runner/init"}, + }, + }, + { + Name: "slugrunner-14", + Base: "ubuntu-bionic", // simplified + ExtraFiles: map[string]string{ + "slugrunner/runner/init": "/runner/init", + }, + Entrypoint: &ct.ImageEntrypoint{ + Args: []string{"/runner/init"}, + }, + }, + { + Name: "builder", + Base: "ubuntu-bionic", + Binaries: map[string]string{ + "flynn-builder": "/bin/flynn-builder", + }, + }, + } +} + +// ----- Step 3: Generate manifests ----- + +func (e *exporter) generateManifests() error { + manifestsDir := filepath.Join(e.buildDir, "manifests") + if err := os.MkdirAll(manifestsDir, 0755); err != nil { + return err + } + + // Generate images.json + fmt.Printf(" Generating images.json\n") + imagesJSON, err := e.generateImagesJSON() + if err != nil { + return fmt.Errorf("generating images.json: %s", err) + } + if err := ioutil.WriteFile(filepath.Join(manifestsDir, "images.json"), imagesJSON, 0644); err != nil { + return err + } + + // Generate bootstrap-manifest.json + fmt.Printf(" Generating bootstrap-manifest.json\n") + bootstrapManifest, err := e.generateBootstrapManifest() + if err != nil { + return fmt.Errorf("generating bootstrap-manifest.json: %s", err) + } + if err := ioutil.WriteFile(filepath.Join(manifestsDir, "bootstrap-manifest.json"), bootstrapManifest, 0644); err != nil { + return err + } + + return nil +} + +func (e *exporter) generateImagesJSON() ([]byte, error) { + // Read the template + templatePath := filepath.Join(e.sourceDir, "util/release/images_template.json") + template, err := ioutil.ReadFile(templatePath) + if err != nil { + return nil, err + } + + // Replace $image_artifact[name] placeholders with actual artifact JSON + pattern := regexp.MustCompile(`\$image_artifact\[([^\]]+)\]`) + var replaceErr error + result := pattern.ReplaceAllFunc(template, func(match []byte) []byte { + name := string(match[16 : len(match)-1]) + artifact, ok := e.artifacts[name] + if !ok { + replaceErr = fmt.Errorf("unknown image %q", name) + return nil + } + // Set meta for the artifact in the manifest + artifact.Meta = map[string]string{ + "flynn.component": name, + "flynn.system-image": "true", + } + data, err := json.Marshal(artifact) + if err != nil { + replaceErr = err + return nil + } + return data + }) + if replaceErr != nil { + return nil, replaceErr + } + + // Validate the result is valid JSON + var check interface{} + if err := json.Unmarshal(result, &check); err != nil { + return nil, fmt.Errorf("generated images.json is not valid JSON: %s", err) + } + + return result, nil +} + +func (e *exporter) generateBootstrapManifest() ([]byte, error) { + templatePath := filepath.Join(e.sourceDir, "bootstrap/manifest_template.json") + template, err := ioutil.ReadFile(templatePath) + if err != nil { + return nil, err + } + + // Replace $image_artifact[name] placeholders + pattern := regexp.MustCompile(`\$image_artifact\[([^\]]+)\]`) + var replaceErr error + result := pattern.ReplaceAllFunc(template, func(match []byte) []byte { + name := string(match[16 : len(match)-1]) + artifact, ok := e.artifacts[name] + if !ok { + replaceErr = fmt.Errorf("unknown image %q in bootstrap manifest", name) + return nil + } + artifact.Meta = map[string]string{ + "flynn.component": name, + "flynn.system-image": "true", + } + data, err := json.Marshal(artifact) + if err != nil { + replaceErr = err + return nil + } + return data + }) + if replaceErr != nil { + return nil, replaceErr + } + + return result, nil +} + +// ----- Step 4: Stage TUF targets ----- + +func (e *exporter) stageTUFTargets() error { + // Open the TUF repository + store := tuf.FileSystemStore(e.tufDir, func(role string, confirm bool) ([]byte, error) { + // Keys are unencrypted, return empty passphrase + return []byte(""), nil + }) + repo, err := tuf.NewRepo(store) + if err != nil { + return fmt.Errorf("opening TUF repo: %s", err) + } + + // Get existing targets (may be empty) + existingTargets, err := repo.Targets() + if err != nil { + return fmt.Errorf("getting existing targets: %s", err) + } + _ = existingTargets + + // Clean staged area + if err := repo.Clean(); err != nil { + return fmt.Errorf("cleaning TUF repo: %s", err) + } + + targetMeta, _ := json.Marshal(map[string]string{"version": e.version}) + + stagedTargetsDir := filepath.Join(e.tufDir, "staged", "targets") + + // 4a: Stage versioned binaries (gzipped) + fmt.Printf(" Staging versioned binaries\n") + for _, bin := range []string{"flynn-host", "flynn-init", "flynn-linux-amd64"} { + target := filepath.Join(e.version, bin+".gz") + srcPath := filepath.Join(e.binDir, bin) + if err := e.stageGzipped(stagedTargetsDir, target, srcPath); err != nil { + return fmt.Errorf("staging %s: %s", bin, err) + } + if err := repo.AddTarget(util.NormalizeTarget(target), targetMeta); err != nil { + return fmt.Errorf("adding target %s: %s", target, err) + } + fmt.Printf(" + %s\n", target) + } + + // 4b: Stage top-level flynn-host binary (for install script) + fmt.Printf(" Staging top-level binaries\n") + { + target := "flynn-host.gz" + srcPath := filepath.Join(e.binDir, "flynn-host") + if err := e.stageGzipped(stagedTargetsDir, target, srcPath); err != nil { + return fmt.Errorf("staging top-level flynn-host: %s", err) + } + if err := repo.AddTarget(util.NormalizeTarget(target), targetMeta); err != nil { + return fmt.Errorf("adding target %s: %s", target, err) + } + fmt.Printf(" + %s\n", target) + } + + // Top-level CLI binary + { + target := "flynn-linux-amd64.gz" + srcPath := filepath.Join(e.binDir, "flynn-linux-amd64") + if err := e.stageGzipped(stagedTargetsDir, target, srcPath); err != nil { + return fmt.Errorf("staging top-level CLI: %s", err) + } + if err := repo.AddTarget(util.NormalizeTarget(target), targetMeta); err != nil { + return fmt.Errorf("adding target %s: %s", target, err) + } + fmt.Printf(" + %s\n", target) + } + + // 4c: Stage versioned manifests (gzipped) + fmt.Printf(" Staging versioned manifests\n") + manifestsDir := filepath.Join(e.buildDir, "manifests") + for _, manifest := range []string{"bootstrap-manifest.json", "images.json"} { + target := filepath.Join(e.version, manifest+".gz") + srcPath := filepath.Join(manifestsDir, manifest) + + // Read the manifest and rewrite layer URLs + data, err := ioutil.ReadFile(srcPath) + if err != nil { + return fmt.Errorf("reading %s: %s", manifest, err) + } + + if err := e.stageGzippedData(stagedTargetsDir, target, data); err != nil { + return fmt.Errorf("staging %s: %s", manifest, err) + } + if err := repo.AddTarget(util.NormalizeTarget(target), targetMeta); err != nil { + return fmt.Errorf("adding target %s: %s", target, err) + } + fmt.Printf(" + %s\n", target) + } + + // 4d: Stage image manifests and layers + fmt.Printf(" Staging images and layers\n") + layersStaged := make(map[string]bool) // track deduplicated layers + for name, artifact := range e.artifacts { + manifestID := artifact.Manifest().ID() + imageTarget := util.NormalizeTarget(path.Join("images", manifestID+".json")) + + // Stage image manifest + imagePath := filepath.Join(stagedTargetsDir, imageTarget) + if err := os.MkdirAll(filepath.Dir(imagePath), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(imagePath, artifact.RawManifest, 0644); err != nil { + return err + } + if err := repo.AddTarget(imageTarget, targetMeta); err != nil { + return fmt.Errorf("adding image target %s: %s", name, err) + } + fmt.Printf(" + images/%s.json (%s)\n", manifestID[:16], name) + + // Stage layers + for _, rootfs := range artifact.Manifest().Rootfs { + for _, layer := range rootfs.Layers { + if layersStaged[layer.ID] { + continue + } + layersStaged[layer.ID] = true + + // Stage squashfs layer + layerTarget := util.NormalizeTarget(path.Join("layers", layer.ID+".squashfs")) + layerSrc := filepath.Join(e.layerCache, layer.ID+".squashfs") + layerDst := filepath.Join(stagedTargetsDir, layerTarget) + if err := os.MkdirAll(filepath.Dir(layerDst), 0755); err != nil { + return err + } + if err := copyFile(layerSrc, layerDst, 0644); err != nil { + return fmt.Errorf("staging layer %s: %s", layer.ID[:16], err) + } + if err := repo.AddTarget(layerTarget, targetMeta); err != nil { + return fmt.Errorf("adding layer target %s: %s", layer.ID[:16], err) + } + + // Stage layer config JSON + layerConfigTarget := util.NormalizeTarget(path.Join("layers", layer.ID+".json")) + layerConfigData, err := json.Marshal(layer) + if err != nil { + return err + } + layerConfigDst := filepath.Join(stagedTargetsDir, layerConfigTarget) + if err := ioutil.WriteFile(layerConfigDst, layerConfigData, 0644); err != nil { + return err + } + if err := repo.AddTarget(layerConfigTarget, targetMeta); err != nil { + return fmt.Errorf("adding layer config target %s: %s", layer.ID[:16], err) + } + + fmt.Printf(" + layers/%s.squashfs (%s)\n", layer.ID[:16], humanSize(layer.Length)) + } + } + } + + // 4e: Stage channel file + fmt.Printf(" Staging channel file\n") + channelTarget := util.NormalizeTarget(path.Join("channels", "stable")) + channelPath := filepath.Join(stagedTargetsDir, channelTarget) + if err := os.MkdirAll(filepath.Dir(channelPath), 0755); err != nil { + return err + } + if err := ioutil.WriteFile(channelPath, []byte(e.version+"\n"), 0644); err != nil { + return err + } + if err := repo.AddTarget(channelTarget, targetMeta); err != nil { + return fmt.Errorf("adding channel target: %s", err) + } + fmt.Printf(" + channels/stable -> %s\n", e.version) + + // 4f: Sign and commit + fmt.Printf(" Signing TUF metadata\n") + if err := repo.Snapshot(tuf.CompressionTypeNone); err != nil { + return fmt.Errorf("TUF snapshot: %s", err) + } + if err := repo.Timestamp(); err != nil { + return fmt.Errorf("TUF timestamp: %s", err) + } + if err := repo.Commit(); err != nil { + return fmt.Errorf("TUF commit: %s", err) + } + fmt.Printf(" TUF metadata signed and committed\n") + + return nil +} + +func (e *exporter) stageGzipped(stagedTargetsDir, target, srcPath string) error { + data, err := ioutil.ReadFile(srcPath) + if err != nil { + return err + } + return e.stageGzippedData(stagedTargetsDir, target, data) +} + +func (e *exporter) stageGzippedData(stagedTargetsDir, target string, data []byte) error { + dstPath := filepath.Join(stagedTargetsDir, target) + if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil { + return err + } + f, err := os.Create(dstPath) + if err != nil { + return err + } + defer f.Close() + gz, err := gzip.NewWriterLevel(f, gzip.BestCompression) + if err != nil { + return err + } + if _, err := io.Copy(gz, bytes.NewReader(data)); err != nil { + gz.Close() + return err + } + return gz.Close() +} + +// ----- Utilities ----- + +func copyFile(src, dst string, perm os.FileMode) error { + data, err := ioutil.ReadFile(src) + if err != nil { + return err + } + return ioutil.WriteFile(dst, data, perm) +} + +func humanSize(bytes int64) string { + const ( + KB = 1024 + MB = 1024 * KB + ) + switch { + case bytes >= MB: + return fmt.Sprintf("%.1fMB", float64(bytes)/float64(MB)) + case bytes >= KB: + return fmt.Sprintf("%.1fKB", float64(bytes)/float64(KB)) + default: + return fmt.Sprintf("%dB", bytes) + } +} + +// Ensure these imports are used +var ( + _ = tufdata.Files{} + _ = sha512.Sum512_256 + _ = hex.EncodeToString + _ = path.Join +) diff --git a/script/install-flynn.tmpl b/script/install-flynn.tmpl index ed7accc9a7..b821646b3a 100755 --- a/script/install-flynn.tmpl +++ b/script/install-flynn.tmpl @@ -26,7 +26,7 @@ OPTIONS: --no-ntp Don't install ntp package - -r, --repo URL The TUF repository to download files from [default: https://dl.flynn.io] + -r, --repo URL The TUF repository to download files from [default: https://consolving.github.io/flynn-tuf-repo/repository] --zpool-create-device DEVICE Device to create the flynn-default zpool on @@ -146,7 +146,7 @@ main() { exit fi - repo_url="${repo_url:="https://dl.flynn.io"}" + repo_url="${repo_url:="https://consolving.github.io/flynn-tuf-repo/repository"}" local packages=("iptables" "zfsutils-linux") diff --git a/script/rotate-tuf-keys/main.go b/script/rotate-tuf-keys/main.go new file mode 100755 index 0000000000..b9e4447117 --- /dev/null +++ b/script/rotate-tuf-keys/main.go @@ -0,0 +1,243 @@ +// rotate-tuf-keys performs a complete TUF key rotation. +// +// It generates new ed25519 keys for all 4 TUF roles (root, targets, snapshot, +// timestamp), revokes the old keys, re-signs all metadata, and moves the new +// private keys to a secure location outside the repository. +// +// The rotation follows the TUF specification: the new root.json is signed by +// both old and new root keys so that existing clients can verify the transition. +// +// Usage: +// +// go run ./script/rotate-tuf-keys \ +// --tuf-dir=/path/to/flynn-tuf-repo \ +// --keys-out=/path/to/secure/keys +// +// The --keys-out directory will contain the new private keys and must be kept +// secure (outside any git repository). +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + + tuf "github.com/flynn/go-tuf" + "github.com/flynn/go-tuf/data" +) + +func main() { + tufDir := flag.String("tuf-dir", "", "Path to the TUF repository directory (containing keys/ and repository/)") + keysOut := flag.String("keys-out", "", "Path to store new private keys (outside the repo)") + flag.Parse() + + if *tufDir == "" || *keysOut == "" { + fmt.Fprintf(os.Stderr, "Usage: rotate-tuf-keys --tuf-dir=DIR --keys-out=DIR\n") + os.Exit(1) + } + + if err := run(*tufDir, *keysOut); err != nil { + fmt.Fprintf(os.Stderr, "Error: %s\n", err) + os.Exit(1) + } +} + +func run(tufDir, keysOut string) error { + // Verify the TUF repo exists + rootPath := filepath.Join(tufDir, "repository", "root.json") + if _, err := os.Stat(rootPath); err != nil { + return fmt.Errorf("TUF repository not found at %s: %s", tufDir, err) + } + + // Create the output directory for new keys + if err := os.MkdirAll(keysOut, 0700); err != nil { + return fmt.Errorf("creating keys output directory: %s", err) + } + + // Open the TUF repository (keys are unencrypted, no passphrase needed) + store := tuf.FileSystemStore(tufDir, nil) + repo, err := tuf.NewRepo(store) + if err != nil { + return fmt.Errorf("opening TUF repo: %s", err) + } + + // Step 1: Read current root metadata to get old key IDs + fmt.Println("=== Step 1: Reading current root metadata ===") + currentRootData, err := ioutil.ReadFile(rootPath) + if err != nil { + return fmt.Errorf("reading root.json: %s", err) + } + var currentSigned data.Signed + if err := json.Unmarshal(currentRootData, ¤tSigned); err != nil { + return fmt.Errorf("parsing root.json: %s", err) + } + var currentRoot data.Root + if err := json.Unmarshal(currentSigned.Signed, ¤tRoot); err != nil { + return fmt.Errorf("parsing root.json signed data: %s", err) + } + + fmt.Printf(" Current root.json version: %d\n", currentRoot.Version) + fmt.Printf(" Current root.json expires: %s\n", currentRoot.Expires.Format(time.RFC3339)) + + // Collect old key IDs for each role + oldKeyIDs := make(map[string][]string) + for role, roleData := range currentRoot.Roles { + oldKeyIDs[role] = make([]string, len(roleData.KeyIDs)) + copy(oldKeyIDs[role], roleData.KeyIDs) + fmt.Printf(" Role %s: %d key(s), threshold=%d\n", role, len(roleData.KeyIDs), roleData.Threshold) + for _, kid := range roleData.KeyIDs { + fmt.Printf(" old key: %s\n", kid) + } + } + + // Step 2: Generate new keys for all roles + // Order matters: root first, then the others. + // GenKey adds the new key alongside the old one and bumps root.json version. + fmt.Println("\n=== Step 2: Generating new keys ===") + roles := []string{"root", "targets", "snapshot", "timestamp"} + newKeyIDs := make(map[string]string) + + for _, role := range roles { + expires := data.DefaultExpires("root") // All GenKey calls update root.json expiry + newID, err := repo.GenKeyWithExpires(role, expires) + if err != nil { + return fmt.Errorf("generating new %s key: %s", role, err) + } + newKeyIDs[role] = newID + fmt.Printf(" Generated new %s key: %s\n", role, newID) + } + + // Step 3: Revoke old keys for all roles + // For root: go-tuf automatically signs with both old and new keys + fmt.Println("\n=== Step 3: Revoking old keys ===") + for _, role := range roles { + for _, oldID := range oldKeyIDs[role] { + fmt.Printf(" Revoking %s key: %s\n", role, oldID) + if err := repo.RevokeKey(role, oldID); err != nil { + return fmt.Errorf("revoking %s key %s: %s", role, oldID, err) + } + } + } + + // Step 4: Re-sign targets, snapshot, timestamp metadata + // The targets metadata needs to be re-signed with the new targets key. + // Snapshot and timestamp are always re-generated. + fmt.Println("\n=== Step 4: Re-signing metadata ===") + + // Re-sign targets.json with the new targets key + fmt.Println(" Re-signing targets.json...") + if err := repo.Sign("targets.json"); err != nil { + return fmt.Errorf("re-signing targets.json: %s", err) + } + + // Re-generate snapshot (includes hashes of root.json and targets.json) + fmt.Println(" Re-generating snapshot.json...") + if err := repo.Snapshot(tuf.CompressionTypeNone); err != nil { + return fmt.Errorf("generating snapshot: %s", err) + } + + // Re-generate timestamp (includes hash of snapshot.json) + fmt.Println(" Re-generating timestamp.json...") + if err := repo.Timestamp(); err != nil { + return fmt.Errorf("generating timestamp: %s", err) + } + + // Commit: validates all signatures and copies staged -> repository + fmt.Println(" Committing...") + if err := repo.Commit(); err != nil { + return fmt.Errorf("committing TUF metadata: %s", err) + } + + // Step 5: Copy new private keys to secure location outside the repo + fmt.Printf("\n=== Step 5: Copying keys to %s ===\n", keysOut) + keysDir := filepath.Join(tufDir, "keys") + for _, role := range roles { + src := filepath.Join(keysDir, role+".json") + dst := filepath.Join(keysOut, role+".json") + data, err := ioutil.ReadFile(src) + if err != nil { + return fmt.Errorf("reading key file %s: %s", src, err) + } + if err := ioutil.WriteFile(dst, data, 0600); err != nil { + return fmt.Errorf("writing key file %s: %s", dst, err) + } + fmt.Printf(" Copied %s.json (%d bytes)\n", role, len(data)) + } + + // Step 6: Remove old private keys from the TUF repo directory + fmt.Println("\n=== Step 6: Removing keys from TUF repo ===") + for _, role := range roles { + keyFile := filepath.Join(keysDir, role+".json") + if err := os.Remove(keyFile); err != nil { + return fmt.Errorf("removing key file %s: %s", keyFile, err) + } + fmt.Printf(" Removed %s\n", keyFile) + } + // Remove the keys directory itself if empty + if err := os.Remove(keysDir); err != nil { + fmt.Printf(" Warning: could not remove keys/ directory: %s\n", err) + } else { + fmt.Println(" Removed keys/ directory") + } + + // Step 7: Print summary with new public keys for configuration updates + fmt.Println("\n=== Step 7: Summary ===") + + // Read the final root.json to get the new public keys + finalRootData, err := ioutil.ReadFile(filepath.Join(tufDir, "repository", "root.json")) + if err != nil { + return fmt.Errorf("reading final root.json: %s", err) + } + var finalSigned data.Signed + if err := json.Unmarshal(finalRootData, &finalSigned); err != nil { + return fmt.Errorf("parsing final root.json: %s", err) + } + var finalRoot data.Root + if err := json.Unmarshal(finalSigned.Signed, &finalRoot); err != nil { + return fmt.Errorf("parsing final root.json signed data: %s", err) + } + + fmt.Printf("\n New root.json version: %d\n", finalRoot.Version) + fmt.Printf(" New root.json expires: %s\n", finalRoot.Expires.Format(time.RFC3339)) + fmt.Printf(" Consistent snapshots: %v\n", finalRoot.ConsistentSnapshot) + fmt.Println() + + // Print new root public keys in the format needed for configuration + rootRole := finalRoot.Roles["root"] + fmt.Println(" New root public keys (for tup.config, builder/manifest.json, tufconfig.go):") + fmt.Println() + + // Build the JSON array for CONFIG_TUF_ROOT_KEYS + var rootKeys []map[string]interface{} + for _, kid := range rootRole.KeyIDs { + key := finalRoot.Keys[kid] + rootKeys = append(rootKeys, map[string]interface{}{ + "keytype": key.Type, + "keyval": map[string]string{ + "public": fmt.Sprintf("%x", []byte(key.Value.Public)), + }, + }) + } + rootKeysJSON, err := json.MarshalIndent(rootKeys, " ", " ") + if err != nil { + return fmt.Errorf("marshaling root keys: %s", err) + } + fmt.Printf(" %s\n", rootKeysJSON) + + fmt.Println() + fmt.Println(" Key rotation complete!") + fmt.Println() + fmt.Println(" IMPORTANT: You must now update the following files with the new root public keys:") + fmt.Println(" 1. flynn/tup.config (CONFIG_TUF_ROOT_KEYS)") + fmt.Println(" 2. flynn/builder/manifest.json (tuf.root_keys)") + fmt.Println(" 3. flynn/pkg/tufconfig/tufconfig.go (RootKeysJSON)") + fmt.Println() + fmt.Printf(" Private keys are stored at: %s\n", keysOut) + fmt.Println(" Keep this directory secure and NEVER commit it to git!") + + return nil +} diff --git a/script/update b/script/update index 23c63a2793..edd300f832 100755 --- a/script/update +++ b/script/update @@ -40,7 +40,7 @@ main() { alias flynn="${ROOT}/cli/bin/flynn" flynn -a "${app}" release show --json | jq -r 'del(.id)' > "${config}" - flynn -a "${app}" release add --file "${config}" "https://dl.flynn.io/tuf?name=${image_name}&id=${image_id}" + flynn -a "${app}" release add --file "${config}" "https://consolving.github.io/flynn-tuf-repo/repository/targets?name=${image_name}&id=${image_id}" } main $@ diff --git a/tup.config b/tup.config old mode 100644 new mode 100755 index 774983a484..b83bb0101b --- a/tup.config +++ b/tup.config @@ -1,2 +1,2 @@ -CONFIG_IMAGE_REPOSITORY=https://dl.flynn.io/tuf -CONFIG_TUF_ROOT_KEYS=[{"keytype":"ed25519","keyval":{"public":"6cfda23aa48f530aebd5b9c01030d06d02f25876b5508d681675270027af4731"}}] +CONFIG_IMAGE_REPOSITORY=https://consolving.github.io/flynn-tuf-repo/repository +CONFIG_TUF_ROOT_KEYS=[{"keytype": "ed25519", "keyval": {"public": "cddd70123e8303002498fc7f9f8c1fff87cdb321444c67b1ba9190d0394f6134"}}, {"keytype": "ed25519", "keyval": {"public": "22f67c648aaade626bbd8a85aac1e02d77cb476488a967b1ece129c701ed314c"}}, {"keytype": "ed25519", "keyval": {"public": "29e3309c3ed70d4927b2f55adc7ac5f5d547731fb62c5f197c02d0c1c2abac21"}}, {"keytype": "ed25519", "keyval": {"public": "d77ef5acdccc6ffba650edd4bc4d292014e7afbd1f3d5af945395e587c1430b1"}}] diff --git a/util/packer/ubuntu.json b/util/packer/ubuntu.json index cd15f43e83..11a82e8f6d 100644 --- a/util/packer/ubuntu.json +++ b/util/packer/ubuntu.json @@ -3,7 +3,7 @@ "variables": { "boot_command_prefix": "", "cpus": "4", - "flynn_repository": "https://dl.flynn.io", + "flynn_repository": "https://consolving.github.io/flynn-tuf-repo/repository", "headless": "false", "iso_checksum": "", "iso_checksum_type": "sha256", diff --git a/util/packer/ubuntu/install.sh b/util/packer/ubuntu/install.sh old mode 100644 new mode 100755 index 57aa1f7432..6619721fed --- a/util/packer/ubuntu/install.sh +++ b/util/packer/ubuntu/install.sh @@ -6,205 +6,205 @@ source /etc/lsb-release export DEBIAN_FRONTEND=noninteractive main() { - stop_cron - - if virtualbox_build; then - # run early to speed up subsequent steps - fix_dns_resolution - fi - - if vagrant_build; then - setup_sudo - install_vagrant_ssh_key - install_nfs - package_cleanup - fi - - if virtualbox_build; then - install_vbox_guest_additions - change_hostname - fi - - enable_cgroups - create_groups - add_apt_sources - install_packages - install_flynn - apt_cleanup - packer_cleanup - - if vagrant_build; then - net_cleanup - compress - fi + stop_cron + + if virtualbox_build; then + # run early to speed up subsequent steps + fix_dns_resolution + fi + + if vagrant_build; then + setup_sudo + install_vagrant_ssh_key + install_nfs + package_cleanup + fi + + if virtualbox_build; then + install_vbox_guest_additions + change_hostname + fi + + enable_cgroups + create_groups + add_apt_sources + install_packages + install_flynn + apt_cleanup + packer_cleanup + + if vagrant_build; then + net_cleanup + compress + fi } stop_cron() { - # cron can run apt/dpkg commands that will disrupt our tasks - case "${DISTRIB_RELEASE}" in - 14.04) - service cron stop - ;; - 16.04) - systemctl stop cron - ;; - esac + # cron can run apt/dpkg commands that will disrupt our tasks + case "${DISTRIB_RELEASE}" in + 14.04) + service cron stop + ;; + 16.04) + systemctl stop cron + ;; + esac } virtualbox_build() { - [[ "${PACKER_BUILDER_TYPE}" == "virtualbox-iso" ]] + [[ "${PACKER_BUILDER_TYPE}" == "virtualbox-iso" ]] } vagrant_build() { - virtualbox_build + virtualbox_build } setup_sudo() { - if [[ ! -f /etc/sudoers.d/vagrant ]]; then - echo "%vagrant ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/vagrant - chmod 0440 /etc/sudoers.d/vagrant - fi + if [[ ! -f /etc/sudoers.d/vagrant ]]; then + echo "%vagrant ALL=(ALL) NOPASSWD:ALL" >/etc/sudoers.d/vagrant + chmod 0440 /etc/sudoers.d/vagrant + fi } install_vagrant_ssh_key() { - local pub="https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub" - if [[ ! -f /home/vagrant/.ssh/authorized_keys ]]; then - mkdir /home/vagrant/.ssh - chmod 700 /home/vagrant/.ssh - wget ${pub} \ - -O /home/vagrant/.ssh/authorized_keys - chmod 600 /home/vagrant/.ssh/authorized_keys - chown -R vagrant /home/vagrant/.ssh - fi + local pub="https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub" + if [[ ! -f /home/vagrant/.ssh/authorized_keys ]]; then + mkdir /home/vagrant/.ssh + chmod 700 /home/vagrant/.ssh + wget ${pub} \ + -O /home/vagrant/.ssh/authorized_keys + chmod 600 /home/vagrant/.ssh/authorized_keys + chown -R vagrant /home/vagrant/.ssh + fi } install_nfs() { - apt-get install -y nfs-common + apt-get install -y nfs-common } package_cleanup() { - apt-get purge -y puppet byobu juju ruby + apt-get purge -y puppet byobu juju ruby } install_vbox_guest_additions() { - local vbox_version="$(cat /home/vagrant/.vbox_version)" - local vbox_iso="VBoxGuestAdditions_${vbox_version}.iso" + local vbox_version="$(cat /home/vagrant/.vbox_version)" + local vbox_iso="VBoxGuestAdditions_${vbox_version}.iso" - apt-get install -y dkms - mount -o loop "${vbox_iso}" /mnt - yes | sh /mnt/VBoxLinuxAdditions.run || true - umount /mnt - rm "${vbox_iso}" + apt-get install -y dkms + mount -o loop "${vbox_iso}" /mnt + yes | sh /mnt/VBoxLinuxAdditions.run || true + umount /mnt + rm "${vbox_iso}" } change_hostname() { - local hostname="flynn" + local hostname="flynn" - echo "${hostname}" > /etc/hostname - echo "127.0.1.1 ${hostname}" >> /etc/hosts - hostname -F /etc/hostname + echo "${hostname}" >/etc/hostname + echo "127.0.1.1 ${hostname}" >>/etc/hosts + hostname -F /etc/hostname } fix_dns_resolution() { - # Address issues some hosts experience with DNS latency. - # See https://github.com/mitchellh/vagrant/issues/1172 for a detailed discussion of the problem. - echo "options single-request-reopen" >> /etc/resolvconf/resolv.conf.d/base - resolvconf -u + # Address issues some hosts experience with DNS latency. + # See https://github.com/mitchellh/vagrant/issues/1172 for a detailed discussion of the problem. + echo "options single-request-reopen" >>/etc/resolvconf/resolv.conf.d/base + resolvconf -u } enable_cgroups() { - perl -p -i -e \ - 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/g' \ - /etc/default/grub - /usr/sbin/update-grub + perl -p -i -e \ + 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"/g' \ + /etc/default/grub + /usr/sbin/update-grub } create_groups() { - groupadd fuse || true - usermod -a -G fuse "${SUDO_USER}" + groupadd fuse || true + usermod -a -G fuse "${SUDO_USER}" } add_apt_sources() { - # tup - apt-key adv --keyserver keyserver.ubuntu.com \ - --recv 27947298A222DFA46E207200B34FBCAA90EA7F4E - echo deb http://ppa.launchpad.net/titanous/tup/ubuntu trusty main \ - > /etc/apt/sources.list.d/tup.list + # tup + apt-key adv --keyserver keyserver.ubuntu.com \ + --recv 27947298A222DFA46E207200B34FBCAA90EA7F4E + echo deb http://ppa.launchpad.net/titanous/tup/ubuntu trusty main \ + >/etc/apt/sources.list.d/tup.list - apt-get update + apt-get update } install_packages() { - local packages=( - "curl" - "git" - "iptables" - "make" - "squashfs-tools" - "tup" - "vim-tiny" - "libsasl2-dev" - ) + local packages=( + "curl" + "git" + "iptables" + "make" + "squashfs-tools" + "tup" + "vim-tiny" + "libsasl2-dev" + ) - apt-get install -y ${packages[@]} + apt-get install -y ${packages[@]} - # make tup suid root so that we can build in chroots - chmod ug+s /usr/bin/tup + # make tup suid root so that we can build in chroots + chmod ug+s /usr/bin/tup - # give non-root users access to tup fuse mounts - sed 's/#user_allow_other/user_allow_other/' -i /etc/fuse.conf + # give non-root users access to tup fuse mounts + sed 's/#user_allow_other/user_allow_other/' -i /etc/fuse.conf } install_flynn() { - local repo="${FLYNN_REPOSITORY:-"https://dl.flynn.io"}" + local repo="${FLYNN_REPOSITORY:-"https://consolving.github.io/flynn-tuf-repo/repository"}" - local script="install-flynn" - if [[ -n "${FLYNN_VERSION}" ]]; then - script="${script}-${FLYNN_VERSION}" - fi + local script="install-flynn" + if [[ -n "${FLYNN_VERSION}" ]]; then + script="${script}-${FLYNN_VERSION}" + fi - bash -es -- -r "${repo}" < <(curl -sL --fail "${repo}/${script}") + bash -es -- -r "${repo}" < <(curl -sL --fail "${repo}/${script}") - case "${DISTRIB_RELEASE}" in - 14.04) - sed -i 's/start on/#start on/' /etc/init/flynn-host.conf - ;; - 16.04) - systemctl disable flynn-host - ;; - esac + case "${DISTRIB_RELEASE}" in + 14.04) + sed -i 's/start on/#start on/' /etc/init/flynn-host.conf + ;; + 16.04) + systemctl disable flynn-host + ;; + esac } apt_cleanup() { - echo "cleaning apt cache" - apt-get autoremove -y - apt-get clean + echo "cleaning apt cache" + apt-get autoremove -y + apt-get clean - echo "deleting old kernels" - cur_kernel=$(uname -r | sed 's/-*[a-z]//g' | sed 's/-386//g') - kernel_pkg="linux-(image|headers|ubuntu-modules|restricted-modules)" - meta_pkg="${kernel_pkg}-(generic|i386|server|common|rt|xen|ec2|virtual)" - apt-get purge -y $(dpkg -l \ - | egrep ${kernel_pkg} \ - | egrep -v "${cur_kernel}|${meta_pkg}" \ - | awk '{print $2}') + echo "deleting old kernels" + cur_kernel=$(uname -r | sed 's/-*[a-z]//g' | sed 's/-386//g') + kernel_pkg="linux-(image|headers|ubuntu-modules|restricted-modules)" + meta_pkg="${kernel_pkg}-(generic|i386|server|common|rt|xen|ec2|virtual)" + apt-get purge -y $(dpkg -l | + egrep ${kernel_pkg} | + egrep -v "${cur_kernel}|${meta_pkg}" | + awk '{print $2}') } packer_cleanup() { - rm -f /home/ubuntu/.ssh/authorized_keys + rm -f /home/ubuntu/.ssh/authorized_keys } net_cleanup() { - # Removing leftover leases and persistent rules - echo "cleaning up dhcp leases" - rm /var/lib/dhcp/* + # Removing leftover leases and persistent rules + echo "cleaning up dhcp leases" + rm /var/lib/dhcp/* } compress() { - # Zero out the free space to save space in the final image: - echo "Zeroing device to make space..." - dd if=/dev/zero of=/EMPTY bs=1M || true - rm -f /EMPTY + # Zero out the free space to save space in the final image: + echo "Zeroing device to make space..." + dd if=/dev/zero of=/EMPTY bs=1M || true + rm -f /EMPTY } main $@