From f1ae04d36da472dc4012f870fd39165bfaa64a85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Sch=C3=A4fer?= Date: Sun, 10 May 2026 22:08:27 +0200 Subject: [PATCH 1/5] docs(self-host/kubernetes): add Kubernetes deployment guides with Helm, Kustomize, Helmfile, and GitOps for Newt and Pangolin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marc Schäfer --- docs.json | 39 +- self-host/manual/kubernetes/choose-method.mdx | 180 +++ self-host/manual/kubernetes/gitops/argocd.mdx | 468 ++++++++ self-host/manual/kubernetes/gitops/flux.mdx | 559 +++++++++ .../manual/kubernetes/gitops/overview.mdx | 351 ++++++ self-host/manual/kubernetes/helm.mdx | 399 +++++++ self-host/manual/kubernetes/helmfile.mdx | 386 +++++++ self-host/manual/kubernetes/kustomize.mdx | 403 +++++++ .../manual/kubernetes/newt/configuration.mdx | 811 +++++++++++++ self-host/manual/kubernetes/newt/helm.mdx | 437 +++++++ .../manual/kubernetes/newt/kustomize.mdx | 626 ++++++++++ .../kubernetes/newt/troubleshooting.mdx | 752 ++++++++++++ self-host/manual/kubernetes/overview.mdx | 108 ++ .../kubernetes/pangolin/configuration.mdx | 1016 +++++++++++++++++ self-host/manual/kubernetes/pangolin/helm.mdx | 408 +++++++ .../manual/kubernetes/pangolin/kustomize.mdx | 708 ++++++++++++ .../kubernetes/pangolin/troubleshooting.mdx | 888 ++++++++++++++ self-host/manual/kubernetes/prerequisites.mdx | 426 +++++++ 18 files changed, 8964 insertions(+), 1 deletion(-) create mode 100644 self-host/manual/kubernetes/choose-method.mdx create mode 100644 self-host/manual/kubernetes/gitops/argocd.mdx create mode 100644 self-host/manual/kubernetes/gitops/flux.mdx create mode 100644 self-host/manual/kubernetes/gitops/overview.mdx create mode 100644 self-host/manual/kubernetes/helm.mdx create mode 100644 self-host/manual/kubernetes/helmfile.mdx create mode 100644 self-host/manual/kubernetes/kustomize.mdx create mode 100644 self-host/manual/kubernetes/newt/configuration.mdx create mode 100644 self-host/manual/kubernetes/newt/helm.mdx create mode 100644 self-host/manual/kubernetes/newt/kustomize.mdx create mode 100644 self-host/manual/kubernetes/newt/troubleshooting.mdx create mode 100644 self-host/manual/kubernetes/overview.mdx create mode 100644 self-host/manual/kubernetes/pangolin/configuration.mdx create mode 100644 self-host/manual/kubernetes/pangolin/helm.mdx create mode 100644 self-host/manual/kubernetes/pangolin/kustomize.mdx create mode 100644 self-host/manual/kubernetes/pangolin/troubleshooting.mdx create mode 100644 self-host/manual/kubernetes/prerequisites.mdx diff --git a/docs.json b/docs.json index 2b1b204..e4088a8 100644 --- a/docs.json +++ b/docs.json @@ -175,7 +175,44 @@ "group": "Manual Installation", "pages": [ "self-host/manual/docker-compose", - "self-host/manual/unraid" + "self-host/manual/unraid", + { + "group": "Kubernetes", + "pages": [ + "self-host/manual/kubernetes/overview", + "self-host/manual/kubernetes/choose-method", + "self-host/manual/kubernetes/prerequisites", + "self-host/manual/kubernetes/helm", + "self-host/manual/kubernetes/kustomize", + "self-host/manual/kubernetes/helmfile", + { + "group": "GitOps", + "pages": [ + "self-host/manual/kubernetes/gitops/overview", + "self-host/manual/kubernetes/gitops/argocd", + "self-host/manual/kubernetes/gitops/flux" + ] + }, + { + "group": "Newt", + "pages": [ + "self-host/manual/kubernetes/newt/helm", + "self-host/manual/kubernetes/newt/kustomize", + "self-host/manual/kubernetes/newt/configuration", + "self-host/manual/kubernetes/newt/troubleshooting" + ] + }, + { + "group": "Pangolin", + "pages": [ + "self-host/manual/kubernetes/pangolin/helm", + "self-host/manual/kubernetes/pangolin/kustomize", + "self-host/manual/kubernetes/pangolin/configuration", + "self-host/manual/kubernetes/pangolin/troubleshooting" + ] + } + ] + } ] }, "self-host/dns-and-networking", diff --git a/self-host/manual/kubernetes/choose-method.mdx b/self-host/manual/kubernetes/choose-method.mdx new file mode 100644 index 0000000..ac18077 --- /dev/null +++ b/self-host/manual/kubernetes/choose-method.mdx @@ -0,0 +1,180 @@ +--- +title: "Choose a Method" +description: "Choose the right Kubernetes installation workflow for Pangolin and Newt." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +This page helps you choose the right Kubernetes workflow for installing and managing Pangolin and related components. + +## Quick decision table + +| If you... | Use | Why | +| --- | --- | --- | +| Want the recommended Kubernetes install path | **Helm** | Standard chart-based workflow for installing, upgrading, and uninstalling releases | +| Need environment-specific overlays or manifest customization | **Kustomize** | Patch and reuse Kubernetes manifests without a separate templating language | +| Already use Argo CD or want GitOps with a web UI | **Argo CD** | Git-driven reconciliation, sync status, drift detection, and optional auto-sync | +| Already use Flux or want GitOps defined through Kubernetes CRDs | **Flux** | Declarative reconciliation with resources such as `HelmRelease` and `Kustomization` | +| Need to manage several Helm releases together | **Helmfile** | Declarative orchestration for multiple Helm releases and shared values | + +## Detailed method descriptions + +### Use Helm if... + +- You want the recommended Kubernetes install workflow for Pangolin or Newt. +- You want a straightforward chart-based install. +- You manage releases manually or through CI/CD. +- You want normal Helm release operations such as install, upgrade, rollback, and uninstall. +- You are comfortable managing configuration through `values.yaml`. + +Helm is the default choice for most Kubernetes installations. It packages Kubernetes resources into versioned charts and manages releases in the cluster. + +**Get started**: [Helm Quick-Start](/self-host/manual/kubernetes/helm) + +### Use Kustomize if... + +- You need environment-specific overlays for dev, staging, or production. +- You want to patch Kubernetes manifests without using a templating language. +- You prefer a manifest-driven workflow. +- You want to keep rendered or curated manifests in Git. +- You are comfortable with `kubectl apply -k` or `kustomize build`. + +Kustomize works well when you want a shared base with small environment-specific changes. It can be used with curated manifests, generated manifests, or GitOps tools. + +**Common scenario**: Keep a base deployment and apply overlays for each environment. + +**Get started**: [Kustomize Quick-Start](/self-host/manual/kubernetes/kustomize) + +### Use Argo CD if... + +- Your Git repository should be the source of truth. +- You want a web UI for application status, sync state, and troubleshooting. +- You want drift detection when the live cluster state differs from the desired state. +- You want manual sync, automated sync, or self-healing behavior. +- You already use Argo CD for other applications. + +Argo CD reconciles applications from a declared source into the cluster. It can use Helm charts, Kustomize overlays, plain YAML, Jsonnet, or configured plugins as sources. + +When Argo CD deploys a Helm chart, Helm is used to render the manifests. The application lifecycle is then managed by Argo CD, not by the local `helm` CLI. + +**Argo CD can deploy**: + +- Helm charts +- Kustomize overlays +- Plain YAML manifests +- Jsonnet or custom config-management plugin output + +**Get started**: [Argo CD Guide](/self-host/manual/kubernetes/gitops/argocd) + +### Use Flux if... + +- You want GitOps managed through Kubernetes custom resources. +- You already use Flux for other workloads. +- You want Helm releases reconciled by a controller. +- You want Kustomize overlays reconciled from Git. +- You prefer a lightweight workflow without depending on a central web UI. + +Flux defines sources and desired state as Kubernetes resources. Typical resources include `GitRepository`, `HelmRepository`, `OCIRepository`, `Kustomization`, and `HelmRelease`. + +**Flux can deploy**: + +- Helm charts with `HelmRepository` and `HelmRelease` +- OCI-based Helm charts with `OCIRepository` and `HelmRelease` +- Kustomize overlays with `GitRepository` and `Kustomization` +- Plain manifests through a Flux `Kustomization` + +**Get started**: [Flux Guide](/self-host/manual/kubernetes/gitops/flux) + +### Use Helmfile if... + +- You need to manage multiple Helm releases as one deployment stack. +- You want to install Pangolin together with supporting components. +- You want one declarative file for releases, values files, and release ordering. +- You prefer running one controlled workflow instead of several manual `helm upgrade --install` commands. + +Helmfile is a declarative wrapper around Helm. It does not replace Helm; it calls Helm to apply the declared releases. + +**Common scenario**: One Helmfile manages supporting components such as an ingress controller, certificate management, database components, Pangolin, and Newt. + +**Get started**: [Helmfile Guide](/self-host/manual/kubernetes/helmfile) + +## Important clarifications + +### Argo CD and Flux are not Helm replacements + +Argo CD and Flux are delivery and reconciliation tools. They do not replace Helm or Kustomize. + +- **Helm** packages and renders Kubernetes resources from charts. +- **Kustomize** customizes Kubernetes manifests through bases, overlays, and patches. +- **Argo CD** reconciles applications from Git, Helm repositories, OCI registries, or other configured sources. +- **Flux** reconciles sources and workloads through Kubernetes custom resources such as `HelmRelease` and `Kustomization`. + +You can use Argo CD or Flux with Helm charts, Kustomize overlays, or plain manifests. + +### OCI is not a separate install method + +OCI (Open Container Initiative) describes a chart distribution format, not a separate deployment workflow. + +For Pangolin and Newt, OCI chart publishing is available in GHCR: + +- Newt: `oci://ghcr.io/fosrl/helm-charts/newt` (for example `1.4.0`) +- Pangolin: `oci://ghcr.io/fosrl/helm-charts/pangolin` (for example `0.1.0-alpha.0`) + +You still choose the same deployment method (Helm directly, or GitOps with Argo CD/Flux). OCI only changes where charts are pulled from. + +Classic Helm repository flow is still valid: + +```bash +helm repo add fossorial https://charts.fossorial.io +helm repo update fossorial +helm install my-newt fossorial/newt +helm install my-pangolin fossorial/pangolin +``` + +OCI workflow with Helm: + +```bash +helm pull oci://ghcr.io/fosrl/helm-charts/newt --version 1.4.0 +helm pull oci://ghcr.io/fosrl/helm-charts/pangolin --version 0.1.0-alpha.0 +``` + +```bash +helm install my-newt oci://ghcr.io/fosrl/helm-charts/newt \ + --version 1.4.0 + +helm install my-pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \ + --version 0.1.0-alpha.0 +``` + +### Raw YAML is not a separate primary workflow + +This documentation does not provide a dedicated raw-YAML installation path. + +Raw manifests are still possible: + +- Render a Helm chart with `helm template`. +- Render Kustomize overlays with `kustomize build` or `kubectl kustomize`. +- Apply generated manifests with `kubectl apply -f`. +- Reconcile plain manifests with Argo CD or Flux. + +For most users, Helm, Kustomize, Argo CD, or Flux is easier to maintain than applying standalone YAML files manually. + +## Next steps + + + + Review cluster, tooling, ingress, DNS, storage, and secret requirements. + + + Install Pangolin or Newt with the recommended chart-based workflow. + + + Use bases, overlays, and patches for manifest-driven deployments. + + + Deploy and reconcile Pangolin or Newt with Argo CD or Flux. + + diff --git a/self-host/manual/kubernetes/gitops/argocd.mdx b/self-host/manual/kubernetes/gitops/argocd.mdx new file mode 100644 index 0000000..14a04b3 --- /dev/null +++ b/self-host/manual/kubernetes/gitops/argocd.mdx @@ -0,0 +1,468 @@ +--- +title: "Argo CD" +description: "Deploy Pangolin and Newt using Argo CD for Git-driven GitOps reconciliation." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +Argo CD is a declarative GitOps tool that continuously syncs your cluster state to your Git repository. This guide covers installing Pangolin and Newt using Argo CD. + +## Argo CD overview + +Argo CD watches your Git repository (or Helm chart repository) and automatically reconciles Kubernetes resources to match the desired state defined in Git. + +**Key concepts**: + +- **Application**: Argo CD custom resource that defines what to deploy, where, and how +- **Helm source**: Argo CD uses Helm to render charts; you provide values +- **Kustomize source**: Argo CD uses Kustomize to build manifests +- **Sync**: Process of applying desired state to the cluster +- **Drift**: When cluster state diverges from Git (Argo CD can detect and correct) + +## Prerequisites + +- Argo CD installed in your cluster (in `argocd` namespace, typically) +- Helm repo configured: `helm repo add fossorial https://charts.fossorial.io` +- Git repository with Argo CD configuration (optional, can use chart repo as source) +- Newt auth secret (if installing Newt) + +## Install Argo CD + +If you don't have Argo CD yet: + +```bash +# Create namespace +kubectl create namespace argocd + +# Install Argo CD +helm repo add argo https://argoproj.github.io/argo-helm +helm repo update argo +helm install argocd argo/argo-cd -n argocd +``` + +Access the Argo CD UI: + +```bash +# Port-forward +kubectl port-forward -n argocd svc/argocd-server 8080:443 + +# Visit https://localhost:8080 +# Default username: admin +# Password: kubectl get secret -n argocd argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d +``` + +## Install Pangolin with Argo CD using Helm + +### Step 1: Create Pangolin namespace + +```bash +kubectl create namespace pangolin +``` + +### Step 2: Create Application + +Create an Argo CD Application resource that tells Argo CD to deploy Pangolin using the Helm chart: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: pangolin + namespace: argocd +spec: + project: default + + source: + repoURL: https://charts.fossorial.io + chart: pangolin + targetRevision: 0.1.0-alpha.0 # or use ~0.1.0 for range + helm: + values: | + deployment: + type: controller + mode: multi + + database: + mode: cloudnativepg + + pangolin: + config: + app: + dashboard_url: https://pangolin.example.com + domains: + domain1: + base_domain: example.com + gerbil: + base_endpoint: vpn.example.com + + ingress: + enabled: true + className: traefik + hosts: + - host: pangolin.example.com + paths: + - path: / + pathType: Prefix + tls: + - secretName: pangolin-tls + hosts: + - pangolin.example.com + + destination: + server: https://kubernetes.default.svc + namespace: pangolin + + syncPolicy: + syncOptions: + - CreateNamespace=true + automated: + prune: true + selfHeal: true +``` + +Apply the Application: + +```bash +kubectl apply -f pangolin-app.yaml +``` + +### Step 3: Monitor in Argo CD + +In the Argo CD UI, you should see the `pangolin` application. Argo CD will: + +1. Fetch the Helm chart from `https://charts.fossorial.io` +2. Render the chart with your inline `values` +3. Create all resources in the `pangolin` namespace +4. Continuously monitor for drift + +### Step 4: Verify deployment + +```bash +# Check Argo CD status +kubectl describe app -n argocd pangolin + +# Check pod status +kubectl get pods -n pangolin +``` + +## Install Newt with Argo CD using Helm + +### Step 1: Create Newt auth secret + +```bash +kubectl create secret generic newt-auth \ + -n pangolin \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= +``` + +### Step 2: Create Newt Application + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: newt + namespace: argocd +spec: + project: default + + source: + repoURL: https://charts.fossorial.io + chart: newt + targetRevision: 1.4.0 + helm: + values: | + newtInstances: + - name: main-tunnel + enabled: true + auth: + existingSecretName: newt-auth + + destination: + server: https://kubernetes.default.svc + namespace: pangolin + + syncPolicy: + syncOptions: + - CreateNamespace=true + automated: + prune: true + selfHeal: true +``` + +Apply: + +```bash +kubectl apply -f newt-app.yaml +``` + +## Using Argo CD with Git repository + +Instead of inline values, you can store configuration in Git and have Argo CD deploy from there: + +### Repository structure + +``` +infrastructure/ +├── apps/ +│ ├── pangolin/ +│ │ ├── values-base.yaml +│ │ ├── values-prod.yaml +│ │ └── app.yaml (Argo CD Application CRD) +│ └── newt/ +│ ├── values.yaml +│ └── app.yaml +└── clusters/ + └── production/ + ├── pangolin.yaml (reference to app) + └── newt.yaml +``` + +### Git-based Application + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: pangolin + namespace: argocd +spec: + project: default + + source: + repoURL: https://github.com/my-org/infrastructure + path: apps/pangolin + targetRevision: main + helm: + valuesObject: + deployment: + type: controller + mode: multi + releaseName: pangolin + + destination: + server: https://kubernetes.default.svc + namespace: pangolin + + syncPolicy: + syncOptions: + - CreateNamespace=true + automated: + prune: true + selfHeal: true +``` + +Argo CD will watch the Git repository and auto-sync on changes to `apps/pangolin`. + +## Using Argo CD with Kustomize + +Deploy Pangolin using Kustomize overlays: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: pangolin + namespace: argocd +spec: + project: default + + source: + repoURL: https://github.com/my-org/infrastructure + path: overlays/production + targetRevision: main + + destination: + server: https://kubernetes.default.svc + namespace: pangolin + + syncPolicy: + syncOptions: + - CreateNamespace=true + automated: + prune: true + selfHeal: true +``` + +## Sync policies + +### Automated sync + +**prune: true**: Deletes resources in cluster that are no longer in Git + +**selfHeal: true**: Resyncs if cluster drifts from Git (e.g., manual `kubectl apply`) + +```yaml +syncPolicy: + automated: + prune: true + selfHeal: true + allowEmpty: false # prevent accidental deletion of all resources +``` + +### Manual sync + +Sync only when you explicitly trigger it: + +```yaml +syncPolicy: + syncOptions: + - CreateNamespace=true +``` + +Manually sync: + +```bash +argocd app sync pangolin +# or use UI +``` + +## Advanced: ApplicationSet for multi-environment + +Deploy Pangolin and Newt across multiple clusters or environments: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: pangolin-multienv + namespace: argocd +spec: + generators: + - list: + elements: + - cluster: production + env: prod + - cluster: staging + env: staging + template: + metadata: + name: pangolin-{{ .cluster }} + spec: + project: default + source: + repoURL: https://github.com/my-org/infrastructure + path: clusters/{{ .cluster }}/pangolin + targetRevision: main + destination: + name: '{{ .cluster }}' + namespace: pangolin + syncPolicy: + automated: + prune: true + selfHeal: true +``` + +## OCI Helm sources (if available) + +If the Helm chart is available in an OCI registry: + +```yaml +source: + repoURL: oci://registry.example.com/fossorial + chart: pangolin + targetRevision: 0.1.0-alpha.0 + helm: + values: | + # ... values ... +``` + +OCI chart references work the same as traditional Helm repository references in Argo CD. + +## Troubleshooting Argo CD deployments + +### Check Application status + +```bash +kubectl describe app -n argocd pangolin +kubectl get app -n argocd pangolin -o yaml +``` + +### Check sync status + +```bash +argocd app get pangolin +argocd app logs pangolin +``` + +### Manual sync + +```bash +argocd app sync pangolin --force +``` + +### Refresh from repository + +```bash +argocd app diff pangolin +``` + +### Delete Application + +```bash +kubectl delete app -n argocd pangolin +``` + +## Common patterns + +### Different values per environment + +Use multiple Applications: + +```yaml +# production/pangolin-app.yaml +spec: + source: + helm: + values: | + resources: + limits: + cpu: 2000m + memory: 2Gi + replicas: 3 + +# staging/pangolin-app.yaml +spec: + source: + helm: + values: | + resources: + limits: + cpu: 500m + memory: 512Mi + replicas: 1 +``` + +### Secrets with sealed-secrets + +Use sealed-secrets to safely store secrets in Git: + +```yaml +# In Git +apiVersion: bitnami.com/v1alpha1 +kind: SealedSecret +metadata: + name: newt-auth + namespace: pangolin +spec: + encryptedData: + PANGOLIN_ENDPOINT: AgC4F5qd... + NEWT_ID: AgB9l2pK... + NEWT_SECRET: AgDq3jX... +``` + +Argo CD applies the sealed secret; the cluster decrypts it. + +## Next steps + + + + + + + diff --git a/self-host/manual/kubernetes/gitops/flux.mdx b/self-host/manual/kubernetes/gitops/flux.mdx new file mode 100644 index 0000000..d4c823f --- /dev/null +++ b/self-host/manual/kubernetes/gitops/flux.mdx @@ -0,0 +1,559 @@ +--- +title: "Flux" +description: "Deploy Pangolin and Newt using Flux for Git-driven GitOps reconciliation." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +Flux is a declarative GitOps tool that uses Kubernetes-native Custom Resources to manage deployments. This guide covers installing Pangolin and Newt using Flux. + +## Flux overview + +Flux watches your Git repository and continuous reconciles cluster state using Kubernetes CRDs: + +- **HelmRepository**: Defines a Helm chart repository +- **HelmRelease**: Declaratively manages a Helm chart deployment +- **GitRepository**: References a Git repository +- **Kustomization**: Reconciles Kustomize overlays +- **OCIRepository**: References an OCI-based container registry (for Helm charts) + +**Key benefits**: + +- Native Kubernetes reconciliation (no separate UI needed, though one exists) +- Lightweight footprint +- Excellent for multi-cluster deployments +- Declarative everything: sources, releases, dependencies + +## Flux prerequisites + +- Kubernetes 1.25+ +- `flux` CLI installed: [Flux install guide](https://fluxcd.io/flux/installation/) +- Git repository for configuration (optional, can use built-in sources) +- GitHub, GitLab, or other Git provider account (optional) + +Install Flux CLI: + +```bash +# macOS/Linux with brew +brew install flux + +# or curl +curl -s https://fluxcd.io/install.sh | sudo bash + +# Verify +flux --version +``` + +## Install Flux on your cluster + +### Option 1: Bootstrap Flux from GitHub + +Flux `bootstrap` automatically installs Flux and configures Git sync: + +```bash +flux bootstrap github \ + --owner=my-org \ + --repo=infrastructure \ + --personal \ + --path=clusters/production +``` + +This creates the Git repository structure and installs Flux components. + +### Option 2: Manual Flux installation + +```bash +# Create flux-system namespace and install Flux +flux install --namespace=flux-system --network-policy=true +``` + +## Install Pangolin with Flux using HelmRelease + +### Step 1: Create HelmRepository + +Define the Fossorial Helm chart repository: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: HelmRepository +metadata: + name: fossorial + namespace: flux-system +spec: + interval: 5m + url: https://charts.fossorial.io +``` + +Apply: + +```bash +kubectl apply -f helmrepo.yaml + +# Verify +kubectl get helmrepo -n flux-system +``` + +### Step 2: Create Pangolin HelmRelease + +```yaml +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: pangolin + namespace: pangolin +spec: + interval: 10m + chart: + spec: + chart: pangolin + version: 0.1.0-alpha.0 # or use ~0.1.0 for auto-upgrades + sourceRef: + kind: HelmRepository + name: fossorial + namespace: flux-system + + install: + crds: Create + upgrade: + crds: CreateReplace + + values: + deployment: + type: controller + mode: multi + + database: + mode: cloudnativepg + + pangolin: + config: + app: + dashboard_url: https://pangolin.example.com + domains: + domain1: + base_domain: example.com + gerbil: + base_endpoint: vpn.example.com + + ingress: + enabled: true + className: traefik + hosts: + - host: pangolin.example.com + paths: + - path: / + pathType: Prefix + tls: + - secretName: pangolin-tls + hosts: + - pangolin.example.com +``` + +Create namespace: + +```bash +kubectl create namespace pangolin +``` + +Apply: + +```bash +kubectl apply -f pangolin-helmrelease.yaml +``` + +### Step 3: Monitor reconciliation + +```bash +# Check HelmRelease status +kubectl get helmrelease -n pangolin + +# Watch live +kubectl get helmrelease -n pangolin -w + +# Describe for details +kubectl describe helmrelease pangolin -n pangolin + +# Check Flux logs +flux logs --all-namespaces --follow +``` + +## Install Newt with Flux using HelmRelease + +### Step 1: Create Newt auth secret + +```bash +kubectl create secret generic newt-auth \ + -n pangolin \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= +``` + +### Step 2: Create Newt HelmRelease + +```yaml +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: newt + namespace: pangolin +spec: + interval: 10m + chart: + spec: + chart: newt + version: 1.4.0 + sourceRef: + kind: HelmRepository + name: fossorial + namespace: flux-system + + values: + newtInstances: + - name: main-tunnel + enabled: true + auth: + existingSecretName: newt-auth +``` + +Apply: + +```bash +kubectl apply -f newt-helmrelease.yaml +``` + +### Step 3: Verify + +```bash +kubectl get helmrelease -n pangolin +kubectl describe helmrelease newt -n pangolin +``` + +## Using Flux with Git repository (GitOps) + +Store Flux configuration in Git and have Flux automatically reconcile changes: + +### Repository structure + +``` +infrastructure/ +├── clusters/ +│ └── production/ +│ ├── flux-system/ +│ │ └── gotk-components.yaml (auto-generated) +│ ├── pangolin/ +│ │ ├── helmrepo.yaml +│ │ ├── pangolin-helmrelease.yaml +│ │ └── newt-helmrelease.yaml +│ └── kustomization.yaml +└── apps/ + ├── pangolin/ + │ └── values.yaml + └── newt/ + └── values.yaml +``` + +### GitRepository for configuration + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: GitRepository +metadata: + name: infrastructure + namespace: flux-system +spec: + interval: 1m + url: https://github.com/my-org/infrastructure + ref: + branch: main +``` + +### Kustomization for syncing + +```yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: production + namespace: flux-system +spec: + interval: 10m + sourceRef: + kind: GitRepository + name: infrastructure + path: ./clusters/production + prune: true + wait: true +``` + +Flux watches `clusters/production` in Git and auto-applies all resources. + +## Using Flux with Kustomize overlays + +Manage environment-specific overlays with Flux: + +### Repository structure + +``` +overlays/ +├── dev/ +│ ├── kustomization.yaml +│ └── pangolin-patch.yaml +├── staging/ +│ └── kustomization.yaml +└── prod/ + ├── kustomization.yaml + └── pangolin-patch.yaml +``` + +### Kustomization resource + +```yaml +apiVersion: kustomize.toolkit.fluxcd.io/v1 +kind: Kustomization +metadata: + name: pangolin-prod + namespace: flux-system +spec: + interval: 10m + sourceRef: + kind: GitRepository + name: infrastructure + path: ./overlays/prod + prune: true + wait: true +``` + +Flux builds and applies the Kustomize overlay automatically. + +## Using Flux with OCI Helm charts + +If Helm charts are available in an OCI registry: + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1beta2 +kind: OCIRepository +metadata: + name: fossorial-oci + namespace: flux-system +spec: + interval: 5m + url: oci://registry.example.com/fossorial + +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: pangolin + namespace: pangolin +spec: + interval: 10m + chart: + spec: + chart: pangolin + version: 0.1.0-alpha.0 + sourceRef: + kind: OCIRepository + name: fossorial-oci + namespace: flux-system + values: + # ... values ... +``` + +## Advanced: Dependency ordering + +Order HelmReleases to install dependencies first: + +```yaml +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: cert-manager + namespace: cert-manager +spec: + interval: 10m + chart: + spec: + chart: cert-manager + # ... + +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: pangolin + namespace: pangolin +spec: + interval: 10m + dependsOn: + - name: cert-manager + namespace: cert-manager + chart: + spec: + chart: pangolin + # ... +``` + +Flux ensures `cert-manager` reconciles before `pangolin`. + +## Advanced: valuesFrom ConfigMap/Secret + +Store values in ConfigMaps or Secrets, referenced from HelmRelease: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: pangolin-values + namespace: pangolin +data: + values.yaml: | + deployment: + type: controller + mode: multi + +--- +apiVersion: helm.toolkit.fluxcd.io/v2 +kind: HelmRelease +metadata: + name: pangolin + namespace: pangolin +spec: + interval: 10m + chart: + spec: + chart: pangolin + # ... + valuesFrom: + - kind: ConfigMap + name: pangolin-values +``` + +Flux extracts values from the ConfigMap and applies them to the HelmRelease. + +## Troubleshooting Flux + +### Check Flux components + +```bash +kubectl get deployments -n flux-system +flux check --all-namespaces +``` + +### Check HelmRelease status + +```bash +kubectl get helmrelease -n pangolin +kubectl describe helmrelease pangolin -n pangolin +kubectl get helmrelease pangolin -n pangolin -o yaml +``` + +### View reconciliation logs + +```bash +flux logs --all-namespaces --follow + +# Specific resource +kubectl logs -n pangolin deployment/helm-operator -f +``` + +### Manual reconciliation + +```bash +flux reconcile helmrelease pangolin -n pangolin +flux reconcile kustomization production -n flux-system +``` + +### Suspend reconciliation + +```bash +flux suspend helmrelease pangolin -n pangolin +``` + +### Resume reconciliation + +```bash +flux resume helmrelease pangolin -n pangolin +``` + +## Multi-environment example + +### Bootstrap multiple clusters + +```bash +# Production cluster +flux bootstrap github \ + --owner=my-org \ + --repo=infrastructure \ + --personal \ + --path=clusters/production + +# Staging cluster (from different checkout) +flux bootstrap github \ + --owner=my-org \ + --repo=infrastructure \ + --personal \ + --path=clusters/staging +``` + +Each cluster reconciles its own `clusters/*/` directory. + +### Repository structure + +``` +clusters/ +├── production/ +│ ├── kustomization.yaml +│ └── pangolin/ +│ ├── helmrepo.yaml +│ └── helmrelease.yaml (prod values) +├── staging/ +│ ├── kustomization.yaml +│ └── pangolin/ +│ ├── helmrepo.yaml +│ └── helmrelease.yaml (staging values) +└── dev/ + ├── kustomization.yaml + └── pangolin/ + └── helmrelease.yaml (dev values) +``` + +Each environment's HelmRelease uses environment-specific values. + +## Important notes + +### CRD management + +When using Flux with Helm charts that include CRDs: + +```yaml +spec: + install: + crds: Create # Create CRDs on first install + upgrade: + crds: CreateReplace # Update CRDs on upgrade +``` + +### Namespace creation + +Flux automatically creates namespaces if they don't exist. Ensure appropriate RBAC. + +### GitOps best practices + +- Use branches for different environments +- Protect production branches with review requirements +- Store secrets using sealed-secrets or external-secrets +- Track all changes in Git +- Use consistent naming conventions + +## Next steps + + + + + + + diff --git a/self-host/manual/kubernetes/gitops/overview.mdx b/self-host/manual/kubernetes/gitops/overview.mdx new file mode 100644 index 0000000..f5cbc57 --- /dev/null +++ b/self-host/manual/kubernetes/gitops/overview.mdx @@ -0,0 +1,351 @@ +--- +title: "GitOps Overview" +description: "Git-driven Kubernetes deployments for Pangolin and Newt using Argo CD or Flux." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +GitOps is a declarative approach to infrastructure management where your Git repository is the single source of truth for cluster state. Changes go through Git; the GitOps tool automatically reconciles the cluster to match. + +## What is GitOps? + +GitOps reconciliation loop: + + + + **Git repository** contains desired state (manifests, Helm values, Kustomize overlays). + + + **GitOps tool** (Argo CD or Flux) watches the Git repository. + + + The controller detects new commits or drift between Git and the live cluster. + + + The controller syncs cluster state to match Git state. + + + **Result**: Cluster stays aligned with the declared Git configuration. + + + +## Benefits + +- **Version control**: All infrastructure changes tracked in Git +- **Audit trail**: See who changed what and when +- **Rollback**: Revert to previous state by reverting Git commits +- **Automation**: No manual `kubectl apply` commands needed +- **Drift detection**: Automatic alerts if cluster diverges from Git + +## GitOps tools for Kubernetes + +### Argo CD + +- **UI**: Web-based dashboard for monitoring and manual syncs +- **Approach**: External reconciler (watches Git, applies to cluster) +- **Supports**: Helm charts, Kustomize overlays, raw YAML +- **Best for**: Teams who want GitOps with a UI, hybrid manual/automated workflows + +**Use Argo CD if**: +- You want a visual dashboard +- You need frequent manual sync capabilities +- You're already using Argo CD for other workloads + +See: [Argo CD Install Guide](/self-host/manual/kubernetes/gitops/argocd) + +### Flux + +- **CRDs**: Kubernetes-native Custom Resources (HelmRelease, Kustomization, GitRepository) +- **Approach**: Declarative reconciliation using Kubernetes resources +- **Supports**: Helm charts, Kustomize overlays, raw YAML, OCI registries +- **Best for**: Teams who want declarative Kubernetes-way GitOps, lightweight controllers + +**Use Flux if**: +- You prefer Kubernetes-native CRDs +- You want a lightweight, modern GitOps tool +- You're already using Flux for other workloads + +See: [Flux Install Guide](/self-host/manual/kubernetes/gitops/flux) + +## Recommended repository structure + +For multi-environment Pangolin/Newt deployments, organize your Git repository like this: + +``` +my-org/infrastructure/ +├── clusters/ +│ ├── production/ +│ │ ├── pangolin/ +│ │ │ ├── values.yaml +│ │ │ └── kustomization.yaml (if using Kustomize) +│ │ └── newt/ +│ │ ├── values.yaml +│ │ └── kustomization.yaml +│ ├── staging/ +│ │ ├── pangolin/ +│ │ └── newt/ +│ └── dev/ +│ ├── pangolin/ +│ └── newt/ +├── apps/ +│ ├── pangolin/ +│ │ ├── helm/ +│ │ │ ├── values-base.yaml +│ │ │ ├── values-prod.yaml +│ │ │ └── values-staging.yaml +│ │ └── kustomize/ +│ │ ├── base/ +│ │ └── overlays/ +│ └── newt/ +│ ├── helm/ +│ └── kustomize/ +└── .gitignore +``` + +**Pattern**: +- `clusters/` → environment-specific configuration +- `apps/` → shared, reusable application configuration +- Environment overlays layer on top of app definitions + +## Secrets in GitOps + + +**Never commit plaintext secrets to Git.** Use secret management tools instead. + + +Options for managing secrets in GitOps: + +### Sealed Secrets + +- **Tool**: [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) +- **How**: Encrypt secrets with cluster-specific key; safe to commit encrypted secrets +- **Decrypt**: Only the cluster can decrypt (uses private key) + +```bash +# Encrypt a secret +echo -n mypassword | kubeseal -f - > secret.yaml + +# Git tracks encrypted secret.yaml +# Cluster auto-decrypts on apply +``` + +### External Secrets Operator + +- **Tool**: [external-secrets](https://external-secrets.io/) +- **How**: Reference secrets stored in external vault (AWS Secrets Manager, Azure Key Vault, HashiCorp Vault) +- **Git**: Stores reference only, not secret values + +```yaml +apiVersion: external-secrets.io/v1beta1 +kind: SecretStore +metadata: + name: vault-backend +spec: + provider: + vault: + server: "https://vault.example.com" +``` + +### SOPS (Secrets Operations) + +- **Tool**: [SOPS](https://github.com/mozilla/sops) +- **How**: Encrypt YAML files; decrypt at deploy time +- **Git**: Stores encrypted files + +```bash +sops --encrypt secrets.yaml > secrets.enc.yaml +# Commit secrets.enc.yaml; tool decrypts on apply +``` + +### Cloud Provider Secrets + +- **AWS**: Use AWS Secrets Manager or Parameter Store with IRSA (IAM Roles for Service Accounts) +- **Azure**: Use Azure Key Vault with pod identity +- **GCP**: Use Google Secret Manager with workload identity + + +Choose a secret management strategy **before** setting up GitOps. Seal secrets once; keep approach consistent. + + +## GitOps workflow example + +### 1. Set up Git repository + +```bash +git clone https://github.com/my-org/infrastructure.git +cd infrastructure +mkdir -p clusters/production/pangolin +cd clusters/production/pangolin +``` + +### 2. Create configuration + +```bash +# values.yaml with Pangolin config +cat > values.yaml <=0.1.0" # auto-upgrade to latest 0.1.x +``` + +### Drift detection and remediation + +Argo CD and Flux both support continuous drift detection: + +- **Argo CD**: Detects drift on demand or continuously; can auto-sync on drift +- **Flux**: Reconciles on interval; rolls back manual cluster changes + +## Next steps + + + + + + + diff --git a/self-host/manual/kubernetes/helm.mdx b/self-host/manual/kubernetes/helm.mdx new file mode 100644 index 0000000..8bc5069 --- /dev/null +++ b/self-host/manual/kubernetes/helm.mdx @@ -0,0 +1,399 @@ +--- +title: "Helm" +description: "Kubernetes installation using Helm charts for Pangolin and Newt." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +Helm is the recommended method for standard Kubernetes installations of Pangolin and Newt. + +Use Helm when you want a chart-based workflow for installing, upgrading, rolling back, and removing releases from your cluster. + +## Helm repository setup + +Add the Fossorial Helm chart repository: + +```bash +helm repo add fossorial https://charts.fossorial.io +helm repo update fossorial +``` + +Search for available charts: + +```bash +helm search repo fossorial +``` + +The classic Helm repository flow is the default path for most installations: + +```bash +helm install my-newt fossorial/newt +helm install my-pangolin fossorial/pangolin +``` + +## Installation overview + +A typical Helm installation flow looks like this: + + + + Create the namespace manually and apply required labels or annotations. + + + Create a `values.yaml` file for each release (`values-pangolin.yaml`, `values-newt.yaml`). + + + Install with `helm upgrade --install` to support first install and future updates with the same command. + + + Confirm Helm release status and Kubernetes resources after deployment. + + + + +It is recommended to create the namespace explicitly before installation. This allows you to apply Pod Security Admission labels, policy labels, annotations, or other cluster-specific metadata before the chart creates workloads. + + +For detailed installation steps, see: + +* [Pangolin Helm Quick-Start](/self-host/manual/kubernetes/pangolin/helm) — Install Pangolin +* [Newt Helm Quick-Start](/self-host/manual/kubernetes/newt/helm) — Install Newt + +## Install command patterns + + +```bash Classic Helm repository +helm upgrade --install pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values-pangolin.yaml + +helm upgrade --install newt fossorial/newt \ + --namespace pangolin \ + --values values-newt.yaml +``` + +```bash OCI (GHCR) +helm upgrade --install pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \ + --version 0.1.0-alpha.0 \ + --namespace pangolin \ + --values values-pangolin.yaml + +helm upgrade --install newt oci://ghcr.io/fosrl/helm-charts/newt \ + --version 1.4.0 \ + --namespace pangolin \ + --values values-newt.yaml +``` + + +## Namespace preparation + +Create the namespace before installing the chart: + +```bash +kubectl create namespace pangolin +``` + +If your cluster uses Pod Security Admission or namespace-based policies, apply the required labels before installation. + +Example: + +```bash +kubectl label namespace pangolin \ + pod-security.kubernetes.io/enforce=baseline \ + pod-security.kubernetes.io/audit=restricted \ + pod-security.kubernetes.io/warn=restricted +``` + + +Pangolin deployments that include Gerbil require permissions that are not compatible with a restricted namespace profile, because Gerbil manages WireGuard and requires capabilities such as `NET_ADMIN`. + + +For more details, see [Prerequisites](/self-host/manual/kubernetes/prerequisites). + +## Install with a values file + +Both charts use values files for configuration. + +Pangolin example: + +```bash +helm upgrade --install pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values-pangolin.yaml +``` + +Newt example: + +```bash +helm upgrade --install newt fossorial/newt \ + --namespace pangolin \ + --values values-newt.yaml +``` + +Using `helm upgrade --install` keeps the command usable for both the first installation and later configuration changes. + + +Do not use `--create-namespace` if you need custom namespace labels or annotations. Create the namespace first and then run Helm against that namespace. + + +## Values and configuration + +Keep reusable configuration in a values file: + +```bash +helm upgrade --install pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values-pangolin.yaml +``` + +Use `--set` only for small tests or temporary overrides: + +```bash +helm upgrade --install pangolin fossorial/pangolin \ + --namespace pangolin \ + --set example.key=value +``` + +Common value sources: + +* `values-pangolin.yaml` for Pangolin. +* `values-newt.yaml` for Newt. +* Kubernetes Secrets for credentials. +* Existing cluster resources such as TLS secrets, StorageClasses, or ingress controllers. + +Full configuration options are documented here: + +* [Pangolin Configuration](/self-host/manual/kubernetes/pangolin/configuration) +* [Newt Configuration](/self-host/manual/kubernetes/newt/configuration) + +## Artifact Hub and chart discovery + +The Fossorial charts can be installed from the Fossorial Helm repository: + +```bash +helm repo add fossorial https://charts.fossorial.io +helm repo update fossorial +helm search repo fossorial +``` + +Artifact Hub can also be used to discover published chart metadata, available versions, install commands, and repository information. + + +Always verify the chart name, chart version, and repository URL before copying install commands into production. + + +## OCI-based charts + +OCI is not a separate installation method. It only changes where Helm pulls the chart from. + +For Pangolin and Newt, OCI chart publishing is available in GHCR: + +* Newt: `oci://ghcr.io/fosrl/helm-charts/newt` +* Pangolin: `oci://ghcr.io/fosrl/helm-charts/pangolin` + +You still use Helm in the same way: choose a chart, select a version, provide values, and install the release. + +### Pull OCI charts + +Newt example: + +```bash +helm pull oci://ghcr.io/fosrl/helm-charts/newt \ + --version 1.4.0 +``` + +Pangolin example: + +```bash +helm pull oci://ghcr.io/fosrl/helm-charts/pangolin \ + --version 0.1.0-alpha.0 +``` + +### Install from OCI + +Newt example: + +```bash +helm upgrade --install newt oci://ghcr.io/fosrl/helm-charts/newt \ + --version 1.4.0 \ + --namespace pangolin \ + --values values-newt.yaml +``` + +Pangolin example: + +```bash +helm upgrade --install pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \ + --version 0.1.0-alpha.0 \ + --namespace pangolin \ + --values values-pangolin.yaml +``` + + +Use the classic Helm repository when you want the normal `helm repo add` and `helm search repo` workflow. Use OCI when you want to pull charts directly from GHCR or when your deployment tooling expects OCI chart references. + + +## Upgrade and maintenance + +### Update the classic Helm repository + +```bash +helm repo update fossorial +``` + +This step is only needed when using the classic Helm repository. OCI installs pull the chart by OCI reference and version. + +### Upgrade Pangolin + +Classic Helm repository: + +```bash +helm upgrade pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values-pangolin.yaml +``` + +OCI: + +```bash +helm upgrade pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \ + --version 0.1.0-alpha.0 \ + --namespace pangolin \ + --values values-pangolin.yaml +``` + +### Upgrade Newt + +Classic Helm repository: + +```bash +helm upgrade newt fossorial/newt \ + --namespace pangolin \ + --values values-newt.yaml +``` + +OCI: + +```bash +helm upgrade newt oci://ghcr.io/fosrl/helm-charts/newt \ + --version 1.4.0 \ + --namespace pangolin \ + --values values-newt.yaml +``` + +### Check release status + +```bash +helm status pangolin --namespace pangolin +helm history pangolin --namespace pangolin +``` + +```bash +helm status newt --namespace pangolin +helm history newt --namespace pangolin +``` + +### View rendered manifests + +```bash +helm get manifest pangolin --namespace pangolin +``` + +```bash +helm get manifest newt --namespace pangolin +``` + +### View applied values + +```bash +helm get values pangolin --namespace pangolin +``` + +```bash +helm get values newt --namespace pangolin +``` + +### Roll back a release + +```bash +helm rollback pangolin --namespace pangolin +``` + +```bash +helm rollback newt --namespace pangolin +``` + +### Uninstall a release + +```bash +helm uninstall pangolin --namespace pangolin +``` + +```bash +helm uninstall newt --namespace pangolin +``` + + +Uninstalling a Helm release does not always remove persistent volumes, externally managed secrets, DNS records, certificates, or cloud load balancers. Review the namespace and related cluster resources before deleting data. + + +## Using Helm with GitOps + +Helm charts can also be installed and reconciled through GitOps tools. + +* [Argo CD](/self-host/manual/kubernetes/gitops/argocd) can deploy Helm charts from a Helm repository, Git repository, or OCI source. +* [Flux](/self-host/manual/kubernetes/gitops/flux) can reconcile Helm charts through `HelmRepository`, `OCIRepository`, and `HelmRelease`. + +In these workflows, Helm is the chart format. The GitOps controller manages reconciliation. + +## Troubleshooting + +For component-specific troubleshooting, see: + +* [Pangolin Troubleshooting](/self-host/manual/kubernetes/pangolin/troubleshooting) +* [Newt Troubleshooting](/self-host/manual/kubernetes/newt/troubleshooting) + +Useful Helm commands: + +```bash +helm list --all-namespaces +helm status --namespace +helm history --namespace +helm get values --namespace +helm get manifest --namespace +``` + +Useful Kubernetes commands: + +```bash +kubectl get pods -n pangolin +kubectl get events -n pangolin --sort-by=.lastTimestamp +kubectl describe pod -n pangolin +kubectl logs -n pangolin +``` + +## Next steps + + + + Install Pangolin with the Helm chart. + + + Install Newt with the Helm chart. + + + Configure Pangolin chart values for your cluster. + + + Configure Newt chart values and credentials. + + + Deploy the charts with Argo CD. + + + Deploy the charts with Flux. + + diff --git a/self-host/manual/kubernetes/helmfile.mdx b/self-host/manual/kubernetes/helmfile.mdx new file mode 100644 index 0000000..605b2ad --- /dev/null +++ b/self-host/manual/kubernetes/helmfile.mdx @@ -0,0 +1,386 @@ +--- +title: "Helmfile" +description: "Advanced Kubernetes installation using Helmfile for multi-release orchestration." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +Helmfile is a declarative way to manage multiple Helm releases in a single workflow. Use Helmfile when you need to install Pangolin and/or Newt alongside other Kubernetes components or manage multiple releases together. + +## When to use Helmfile + +Use Helmfile if you want to: + +- **Orchestrate multiple Helm releases** in a single file (Pangolin + Newt + dependencies). +- **Manage dependencies** between releases (e.g., install cert-manager before Pangolin). +- **Keep release definitions** in version control and synchronized. +- **Avoid repeated `helm install` commands** for complex multi-release setups. + +**Not using Helmfile?** If you're installing only Pangolin or only Newt without additional services, [Helm quick-start](/self-host/manual/kubernetes/helm) is simpler. + +## Helm vs. Helmfile + +| Aspect | Helm | Helmfile | +| --- | --- | --- | +| **Purpose** | Install/manage a single Helm chart release | Orchestrate multiple Helm chart releases | +| **Command** | `helm install`, `helm upgrade` | `helmfile sync`, `helmfile apply` | +| **Use case** | Quick install, single app | Multi-release, dependencies, fleet management | +| **Complexity** | Low | Medium | + +## Helmfile prerequisites + +- Helm 3.10+ +- `helmfile` CLI installed: [Helmfile GitHub](https://github.com/roboll/helmfile) +- Basic knowledge of Helm values and YAML + +Install helmfile: + +```bash +# macOS/Linux with brew +brew install helmfile + +# or download from releases +wget https://github.com/roboll/helmfile/releases/download/v/helmfile__ +chmod +x helmfile +sudo mv helmfile /usr/local/bin/ +``` + +Verify: + +```bash +helmfile --version +``` + +## Basic Helmfile structure + +A Helmfile is a YAML file (typically named `helmfile.yaml`) that declares multiple releases: + +```yaml +# helmfile.yaml +releases: + - name: cert-manager + namespace: cert-manager + createNamespace: true + chart: jetstack/cert-manager + version: v1.14.0 + + - name: pangolin + namespace: pangolin + createNamespace: true + chart: fossorial/pangolin + version: 0.1.0-alpha.0 + values: + - pangolin-values.yaml + + - name: newt + namespace: pangolin + chart: fossorial/newt + version: 1.4.0 + values: + - newt-values.yaml + dependsOn: + - pangolin +``` + +## Helmfile with Pangolin and Newt + +### 1. Add Helm repositories + +```bash +helm repo add jetstack https://charts.jetstack.io +helm repo add fossorial https://charts.fossorial.io +helm repo update +``` + +### 2. Create Helmfile + +Create `helmfile.yaml`: + +```yaml +helmDefaults: + atomic: true + cleanupOnFail: true + wait: true + timeout: 600 + recreatePods: true + force: false + +repositories: + - name: jetstack + url: https://charts.jetstack.io + - name: fossorial + url: https://charts.fossorial.io + +releases: + - name: cert-manager + namespace: cert-manager + createNamespace: true + chart: jetstack/cert-manager + version: v1.14.0 + set: + installCRDs: true + + - name: pangolin + namespace: pangolin + createNamespace: true + chart: fossorial/pangolin + version: 0.1.0-alpha.0 + values: + - ./values/pangolin.yaml + dependsOn: + - cert-manager + + - name: newt + namespace: pangolin + chart: fossorial/newt + version: 1.4.0 + values: + - ./values/newt.yaml + dependsOn: + - pangolin +``` + +### 3. Create values files + +Create `values/pangolin.yaml`: + +```yaml +deployment: + type: controller + mode: multi + +database: + mode: cloudnativepg + +pangolin: + config: + app: + dashboard_url: https://pangolin.example.com + domains: + domain1: + base_domain: example.com + gerbil: + base_endpoint: vpn.example.com + +ingress: + enabled: true + className: traefik + hosts: + - host: pangolin.example.com + paths: + - path: / + pathType: Prefix + tls: + - secretName: pangolin-tls + hosts: + - pangolin.example.com +``` + +Create `values/newt.yaml`: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + existingSecretName: newt-auth +``` + +### 4. Create Newt auth secret + +Before applying Helmfile: + +```bash +kubectl create namespace pangolin +kubectl create secret generic newt-auth \ + -n pangolin \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= +``` + +### 5. Deploy with Helmfile + +```bash +# Preview changes +helmfile diff + +# Apply releases +helmfile sync + +# or +helmfile apply +``` + +### 6. Verify deployment + +```bash +helmfile status + +# Check individual releases +helm status cert-manager -n cert-manager +helm status pangolin -n pangolin +helm status newt -n pangolin + +# Check pods +kubectl get pods -n pangolin +kubectl get pods -n cert-manager +``` + +## Advanced: Helmfile with environments + +For multi-environment setups (dev, staging, prod), use Helmfile environments: + +```yaml +environments: + dev: + values: + environment: dev + domain: dev.example.com + replicaCount: 1 + prod: + values: + environment: prod + domain: pangolin.example.com + replicaCount: 3 + +helmDefaults: + atomic: true + wait: true + +repositories: + - name: fossorial + url: https://charts.fossorial.io + +releases: + - name: pangolin + namespace: pangolin + createNamespace: true + chart: fossorial/pangolin + version: 0.1.0-alpha.0 + values: + - ./values/pangolin-{{ .Environment.Values.environment }}.yaml +``` + +Deploy to specific environment: + +```bash +helmfile -e dev sync +helmfile -e prod sync +``` + +## Helmfile with GitOps + +### Using Helmfile with FluxCD + +FluxCD can reconcile Helmfile declarations using the `helmfile-controller`. This allows Git-driven Helmfile updates: + +1. Commit Helmfile and values to Git +2. Create HelmRelease for each release in your Helmfile +3. Flux reconciles and applies changes + +See [Flux Guide](/self-host/manual/kubernetes/gitops/flux) for details. + +### Using Helmfile with Argo CD + +While Argo CD has native Helm and Kustomize support, you can: + +1. Use Helmfile to render manifests: `helmfile template > manifests.yaml` +2. Commit manifests to Git +3. Have Argo CD manage the raw YAML + +Alternatively, use Helm source in Argo CD (simpler than Helmfile for single releases). + +## Troubleshooting Helmfile + +### Check syntax + +```bash +helmfile lint +``` + +### Debug release dependencies + +```bash +helmfile template +``` + +### See what will be deployed + +```bash +helmfile diff +``` + +### Remove releases + +```bash +helmfile destroy +``` + + +`helmfile destroy` uninstalls all releases and may delete data (e.g., databases). Use with caution in production. + + +## Common patterns + +### Helmfile with local chart overrides + +```yaml +releases: + - name: pangolin + namespace: pangolin + chart: ./charts/pangolin # local path + values: + - values.yaml +``` + +### Helmfile with inline values + +```yaml +releases: + - name: pangolin + namespace: pangolin + chart: fossorial/pangolin + set: + deployment.type: controller + deployment.mode: multi +``` + +### Helmfile with conditional releases + +```yaml +releases: + - name: cert-manager + namespace: cert-manager + createNamespace: true + chart: jetstack/cert-manager + installed: {{ .Environment.Values.installCertManager | default true }} +``` + +## Important notes + +### Official support + +Helmfile for Pangolin/Newt Kubernetes deployments is **advanced/community-supported**. The primary supported methods are: + +- Helm directly +- Kustomize overlays +- GitOps tools (Argo CD, Flux) + +If you encounter Helmfile-specific issues, refer to the [Helmfile documentation](https://github.com/roboll/helmfile) and community. + +### Helm chart dependencies + +The Pangolin Helm chart includes optional sub-chart dependencies (e.g., CloudNativePG operator). Helmfile does not manage these—they're handled by Helm. Ensure chart dependencies are available when installing. + +## Next steps + + + + + + + diff --git a/self-host/manual/kubernetes/kustomize.mdx b/self-host/manual/kubernetes/kustomize.mdx new file mode 100644 index 0000000..8651e2d --- /dev/null +++ b/self-host/manual/kubernetes/kustomize.mdx @@ -0,0 +1,403 @@ +--- +title: "Kustomize" +description: "Customize Helm-rendered Kubernetes manifests with Kustomize overlays." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +Kustomize can be used to customize Kubernetes manifests with bases, overlays, and patches. + +For Pangolin and Newt, the supported Kustomize workflow is to render the Helm charts into manifests and use those rendered manifests as the Kustomize base. + +Use Kustomize when you need: + +- environment-specific overlays for dev, staging, or production +- explicit manifest patches in Git +- a manifest-driven workflow for GitOps tools +- small changes on top of a shared base without maintaining separate full manifests + +## When to use Kustomize + +Use Kustomize if: + +- you want to manage rendered Pangolin or Newt manifests in Git +- you need different overlays for different environments +- your team prefers reviewing concrete Kubernetes manifests +- you use Argo CD or Flux with Kustomize sources +- you want to patch generated manifests without forking the Helm chart + +For a single environment or a first installation, [Helm](/self-host/manual/kubernetes/helm) is usually simpler. + +## Supported workflow + +The chart repository does not provide native Kustomize bases. Use this workflow instead: + + + + Render the Helm chart with your values file and save the output as base manifests. + + + Commit rendered manifests as the Kustomize base in Git. + + + Create overlays for each environment (for example dev, staging, production). + + + Apply overlays manually or reconcile them with Argo CD or Flux. + + + + +Do not manage the same resources with both a live Helm release and Kustomize. Pick one ownership model per environment. + + +Recommended ownership model: + +- Use Helm only to render manifests. +- Use Kustomize, Argo CD, or Flux to apply and reconcile the rendered manifests. +- Re-render the base when upgrading the chart version. + +## Example repository layout + +```text +my-pangolin-k8s/ +├── base/ +│ ├── kustomization.yaml +│ ├── pangolin.yaml +│ └── newt.yaml +├── overlays/ +│ ├── dev/ +│ │ ├── kustomization.yaml +│ │ └── pangolin-resources.patch.yaml +│ ├── staging/ +│ │ ├── kustomization.yaml +│ │ └── pangolin-resources.patch.yaml +│ └── prod/ +│ ├── kustomization.yaml +│ └── pangolin-resources.patch.yaml +└── values/ + ├── values-pangolin.yaml + └── values-newt.yaml +``` + +## Step 1: Render manifests from Helm + +Create a base directory: + +```bash +mkdir -p base overlays/dev overlays/staging overlays/prod +``` + +Render Pangolin: + + +```bash Classic Helm repository +helm template pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values/values-pangolin.yaml \ + > base/pangolin.yaml +``` + +```bash OCI (GHCR) +helm template pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \ + --version 0.1.0-alpha.0 \ + --namespace pangolin \ + --values values/values-pangolin.yaml \ + > base/pangolin.yaml +``` + + +Render Newt: + + +```bash Classic Helm repository +helm template newt fossorial/newt \ + --namespace pangolin \ + --values values/values-newt.yaml \ + > base/newt.yaml +``` + +```bash OCI (GHCR) +helm template newt oci://ghcr.io/fosrl/helm-charts/newt \ + --version 1.4.0 \ + --namespace pangolin \ + --values values/values-newt.yaml \ + > base/newt.yaml +``` + + +## Step 2: Create the base kustomization + +```yaml +# base/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - pangolin.yaml + - newt.yaml +``` + +## Step 3: Create an overlay + +Use `resources` to reference the base. + +```yaml +# overlays/prod/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - ../../base + +labels: + - pairs: + app.kubernetes.io/environment: production + app.kubernetes.io/managed-by: kustomize + +patches: + - path: pangolin-resources.patch.yaml + target: + group: apps + version: v1 + kind: Deployment + name: pangolin +``` + + +Avoid `namePrefix` and `nameSuffix` for Helm-rendered bases unless you have verified every generated reference. Renaming chart-generated resources can break service names, selectors, secret references, and workload dependencies. + + +## Step 4: Add patches + +Example Strategic Merge patch for container resources: + +```yaml +# overlays/prod/pangolin-resources.patch.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pangolin +spec: + template: + spec: + containers: + - name: pangolin + resources: + requests: + cpu: 1000m + memory: 1Gi + limits: + memory: 2Gi +``` + +Example JSON6902-style inline patch: + +```yaml +# overlays/prod/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - ../../base + +patches: + - target: + group: apps + version: v1 + kind: Deployment + name: pangolin + patch: |- + - op: replace + path: /spec/template/spec/containers/0/resources/requests/cpu + value: "1000m" +``` + + +Modern Kustomize uses the `patches` field for both Strategic Merge and JSON6902-style patches. Avoid `patchesStrategicMerge`, `patchesJson6902`, and `bases` in new examples. + + +## Apply an overlay + +Preview the rendered output: + +```bash +kustomize build overlays/prod +``` + +Compare with the live cluster: + +```bash +kustomize build overlays/prod | kubectl diff -f - +``` + +Apply the overlay: + +```bash +kubectl apply -k overlays/prod +``` + +Or apply the rendered output: + +```bash +kustomize build overlays/prod | kubectl apply -f - +``` + +## Updating the base + +When upgrading chart versions or changing Helm values, re-render the base and review the diff. + +```bash +helm repo update fossorial +``` + +Render the updated chart output: + +```bash +helm template pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values/values-pangolin.yaml \ + > base/pangolin.yaml +``` + +```bash +helm template newt fossorial/newt \ + --namespace pangolin \ + --values values/values-newt.yaml \ + > base/newt.yaml +``` + +Then validate the overlay: + +```bash +kustomize build overlays/prod +``` + +Review changes before applying: + +```bash +git diff +kustomize build overlays/prod | kubectl diff -f - +``` + +Apply after review: + +```bash +kubectl apply -k overlays/prod +``` + +## Kustomize with GitOps + +Kustomize overlays work well with GitOps tools. + +* [Argo CD](/self-host/manual/kubernetes/gitops/argocd) can reconcile a Kustomize overlay path directly. +* [Flux](/self-host/manual/kubernetes/gitops/flux) can reconcile a Kustomize overlay with a `Kustomization` resource. + +In GitOps workflows, the controller applies the overlay. Do not also apply the same overlay manually unless you are debugging. + +## Important considerations + +### Namespace handling + +Render the charts with the namespace you intend to use: + +```bash +helm template pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values/values-pangolin.yaml \ + > base/pangolin.yaml +``` + +Create the namespace before applying the overlay: + +```bash +kubectl create namespace pangolin +``` + +Apply any required Pod Security Admission labels or cluster-policy labels before workloads are created. + +### Secrets + +Do not commit plaintext secrets into rendered manifests. + +Use one of these approaches instead: + +* reference existing Kubernetes Secrets in the values file before rendering +* create secrets separately with your secret-management workflow +* use Sealed Secrets, External Secrets Operator, SOPS, or another GitOps-safe secret solution + +### Do not mix ownership models + +Avoid this pattern: + +```text +helm upgrade pangolin fossorial/pangolin +kubectl apply -k overlays/prod +``` + +This creates two tools managing the same objects. + +Use one of these models instead: + +| Model | Description | +| ----------------- | ---------------------------------------------------------------------------------------- | +| Helm-managed | Helm installs and upgrades the live release. Kustomize is not used for the same objects. | +| Kustomize-managed | Helm only renders the base. Kustomize applies and owns the live objects. | +| GitOps-managed | Argo CD or Flux applies the Kustomize overlay and owns reconciliation. | + +## Troubleshooting + +Validate the overlay: + +```bash +kustomize build overlays/prod +``` + +Check the generated YAML: + +```bash +kustomize build overlays/prod > manifests.yaml +``` + +Run a server-side dry run: + +```bash +kubectl apply -f manifests.yaml --dry-run=server +``` + +Preview live changes: + +```bash +kubectl diff -f manifests.yaml +``` + +Check live resources: + +```bash +kubectl get all -n pangolin +kubectl get events -n pangolin --sort-by=.lastTimestamp +``` + +## Next steps + + + + Install Pangolin with rendered manifests and Kustomize overlays. + + + Install Newt with rendered manifests and Kustomize overlays. + + + Reconcile Kustomize overlays with Argo CD. + + + Reconcile Kustomize overlays with Flux. + + + Troubleshoot Pangolin deployments on Kubernetes. + + +``` diff --git a/self-host/manual/kubernetes/newt/configuration.mdx b/self-host/manual/kubernetes/newt/configuration.mdx new file mode 100644 index 0000000..e376c69 --- /dev/null +++ b/self-host/manual/kubernetes/newt/configuration.mdx @@ -0,0 +1,811 @@ +--- +title: "Newt Configuration" +description: "Configuration reference for Newt Kubernetes deployments." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +This page covers the main Newt Kubernetes configuration options for Helm and Kustomize workflows. + +For exhaustive option coverage, refer to the chart resources: + + + + + + + +## Version context + +This page is aligned with the Newt Helm chart `1.4.0`. + +| Item | Value | +| --- | --- | +| Chart version | `1.4.0` | +| App version | `1.12.3` | +| Kubernetes version | `>=1.30.14-0` | +| Default image | `docker.io/fosrl/newt:1.12.3` | + +Chart `1.4.0` also publishes the Newt image metadata for Docker Hub and GHCR and includes Artifact Hub signing metadata. + +## Configuration sections + + + + + +Use `global.image` to control the Newt container image used by all instances. + +```yaml +global: + image: + registry: docker.io + repository: fosrl/newt + tag: "" + digest: "" + imagePullPolicy: IfNotPresent + imagePullSecrets: [] + + logLevel: INFO +``` + +Recommendations: + +- Leave `tag` empty to use the chart `appVersion`. +- Use `digest` when you need immutable image pinning. +- Use `imagePullSecrets` when pulling from a private registry. +- Use per-instance overrides only when `allowGlobalOverride` is enabled for that instance. + + + + + +The chart can render Namespace resources, including Pod Security Admission labels. + +```yaml +namespace: + create: false + name: "" + labels: {} + podSecurity: + enforce: "" + warn: "" + audit: "" +``` + +Recommended production pattern: + +1. Create the namespace manually. +2. Apply required Pod Security Admission labels or policy labels. +3. Install the chart into that namespace. + +```bash +kubectl create namespace pangolin +``` + +Example namespace labels: + +```bash +kubectl label namespace pangolin \ + pod-security.kubernetes.io/enforce=baseline \ + pod-security.kubernetes.io/audit=restricted \ + pod-security.kubernetes.io/warn=restricted +``` + +Per-instance namespace overrides are available when `allowGlobalOverride: true` is set: + +```yaml +newtInstances: + - name: main-tunnel + allowGlobalOverride: true + namespace: + name: pangolin + create: false + labels: {} + podSecurity: + enforce: "" + warn: "" + audit: "" +``` + + +Creating the namespace manually is recommended when your cluster uses Pod Security Admission, policy labels, admission webhooks, or namespace annotations. + + + + + + +For production, use an existing Kubernetes Secret. + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + existingSecretName: newt-auth +``` + +Create the Secret before installing the chart: + +```bash +kubectl create secret generic newt-auth \ + --namespace pangolin \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= +``` + +The default Secret keys are: + +```yaml +PANGOLIN_ENDPOINT +NEWT_ID +NEWT_SECRET +``` + +Use `auth.keys.*` only when your Secret uses different key names: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + existingSecretName: newt-auth + keys: + endpointKey: PANGOLIN_ENDPOINT + idKey: NEWT_ID + secretKey: NEWT_SECRET +``` + +`auth.keys.*` are Secret key names, not credential values. + +Inline credentials are supported, but should only be used for local testing: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + pangolinEndpoint: "https://pangolin.example.com" + id: "" + secret: "" +``` + + +Inline credentials can appear in rendered manifests and Helm release history. Use `auth.existingSecretName` for production. + + + +Do not commit plaintext credentials to Git. For GitOps workflows, use encrypted or external secret backends such as SOPS, Sealed Secrets, External Secrets Operator, Vault, or Infisical. + + +Chart `1.4.0` also includes `auth.createSecret` and `auth.envVarsDirect` modes for generated Secret and direct environment-variable workflows. Use these only when they match your operational model. + + + + + +Newt 1.11+ supports provisioning-based installs. + +Use provisioning when Newt should bootstrap credentials from a provisioning key instead of using a static `NEWT_ID` and `NEWT_SECRET`. + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + pangolinEndpoint: https://pangolin.example.com + provisioningKey: "" + newtName: "my-site" + configPersistence: + enabled: true + type: emptyDir + mountPath: /var/lib/newt + fileName: config.json +``` + +Provisioning requires writable config persistence so Newt can store the generated configuration. + +For durable storage, use an existing PVC: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + pangolinEndpoint: https://pangolin.example.com + provisioningKey: "" + newtName: "my-site" + configPersistence: + enabled: true + type: persistentVolumeClaim + existingClaim: my-newt-config + mountPath: /var/lib/newt + fileName: config.json +``` + +You can also provide a provisioning blueprint: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + pangolinEndpoint: https://pangolin.example.com + provisioningKey: "" + newtName: "my-site" + configPersistence: + enabled: true + type: emptyDir + provisioningBlueprintFile: /etc/newt/provisioning-blueprint.yaml + provisioningBlueprintData: | + version: 1 + routes: [] +``` + + + + + +Each Newt instance is configured under `newtInstances[]`. + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + replicas: 1 + logLevel: INFO + mtu: 1280 + dns: "" + pingInterval: "" + pingTimeout: "" + acceptClients: false + useNativeInterface: false + interface: newt + keepInterface: false + noCloud: false + disableClients: false +``` + +Key settings: + +| Setting | Purpose | +| ------------------------------ | ----------------------------------------------------------- | +| `replicas` | Number of replicas for this Newt instance | +| `mtu` | WireGuard interface MTU | +| `dns` | Optional DNS server address pushed to the client | +| `pingInterval` / `pingTimeout` | Optional Newt ping timing overrides | +| `acceptClients` | Allows client connections at runtime | +| `useNativeInterface` | Uses native WireGuard interface when native mode is enabled | +| `noCloud` | Disables cloud connectivity | +| `disableClients` | Disables client connections | + + +Newt 1.11 changed upstream ping defaults. Set `pingInterval` and `pingTimeout` explicitly if you need older timing behavior. + + + + + + +Service exposure is controlled separately from `acceptClients`. + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + service: + enabled: false + type: ClusterIP + port: 51820 + testerPort: "" + externalTrafficPolicy: "" + loadBalancerSourceRanges: [] +``` + +Important behavior: + +- `acceptClients` does not create a Service. +- `newtInstances[].service.enabled` controls whether a Service is created. +- Tester port exposure is disabled by default unless enabled through test settings or explicit legacy tester-port configuration. + +Common Service types: + +| Type | Use case | +| -------------- | --------------------------------------------- | +| `ClusterIP` | Internal cluster access | +| `LoadBalancer` | External exposure through cloud load balancer | +| `NodePort` | Node-level port exposure | + + + + + +Use `configPersistence` when Newt needs writable configuration storage. + +```yaml +newtInstances: + - name: main-tunnel + configPersistence: + enabled: false + type: emptyDir + mountPath: /var/lib/newt + fileName: config.json + existingClaim: "" +``` + +Storage types: + +| Type | Behavior | +| ----------------------- | ----------------------------------------- | +| `emptyDir` | Ephemeral storage, recreated with the pod | +| `persistentVolumeClaim` | Durable storage using an existing PVC | + +Provisioning-based installs should enable config persistence. For production provisioning, prefer a PVC over `emptyDir`. + + +`emptyDir` is recreated when a pod is replaced. Newt can require a reconnect and handshake after restart, which may briefly interrupt active traffic. + + + +For production, prefer an existing PersistentVolumeClaim to keep writable Newt configuration across restarts and rescheduling. + + + + + + +The chart supports blueprints, provisioning blueprints, mTLS certificate mounts, Docker socket mounts, and up/down scripts. + +Blueprint example: + +```yaml +newtInstances: + - name: main-tunnel + blueprintFile: /etc/newt/blueprint.yaml + blueprintData: | + version: 1 + routes: [] +``` + +Provisioning blueprint example: + +```yaml +newtInstances: + - name: main-tunnel + provisioningBlueprintFile: /etc/newt/provisioning-blueprint.yaml + provisioningBlueprintData: | + version: 1 + routes: [] +``` + +mTLS using an existing PEM Secret: + +```yaml +newtInstances: + - name: main-tunnel + mtls: + enabled: true + mode: pem + pem: + secretName: newt-mtls + clientCertPath: /certs/client.crt + clientKeyPath: /certs/client.key + caPath: /certs/ca.crt +``` + +Up/down scripts: + +```yaml +global: + updownScripts: + route.sh: | + #!/bin/sh + echo "Newt interface changed" + +newtInstances: + - name: main-tunnel + updown: + enabled: true + mountPath: /opt/newt/updown +``` + + +Use Secrets for certificates and sensitive script inputs. Avoid inline private keys or credentials in values files. + + + + + + +By default, Newt runs without native WireGuard mode. + +```yaml +global: + nativeMode: + enabled: false + +newtInstances: + - name: main-tunnel + useNativeInterface: false +``` + +Native mode requires elevated privileges. + +When native mode or `useNativeInterface` is enabled, Newt runs as root with privileged settings and capabilities such as `NET_ADMIN` and `SYS_MODULE`. + + +Only enable native WireGuard mode if your cluster policy allows privileged workloads and you understand the security impact. + + + + + + +ServiceAccount creation is enabled by default. + +```yaml +serviceAccount: + create: true + name: "" + automountServiceAccountToken: false +``` + +RBAC is disabled by default in chart `1.4.0`: + +```yaml +rbac: + create: false + clusterRole: false +``` + +Enable RBAC only when your selected configuration needs Kubernetes API permissions: + +```yaml +rbac: + create: true + clusterRole: false +``` + +Per-instance ServiceAccount overrides are available when `allowGlobalOverride: true` is set: + +```yaml +newtInstances: + - name: main-tunnel + allowGlobalOverride: true + serviceAccount: + create: true + name: newt-main-tunnel + automountServiceAccountToken: false +``` + + +Chart `1.4.0` changed the RBAC default to `rbac.create=false`. Existing installations that relied on auto-created RBAC must opt in explicitly during upgrade. + + + + + + +Global resource requests and limits apply to Newt workloads. + +```yaml +global: + resources: + requests: + cpu: 100m + memory: 128Mi + ephemeral-storage: 128Mi + limits: + cpu: 200m + memory: 256Mi + ephemeral-storage: 256Mi +``` + +Scheduling defaults: + +```yaml +global: + priorityClassName: "" + nodeSelector: {} + tolerations: [] + affinity: + nodeAffinity: {} + podAffinity: {} + podAntiAffinity: {} + topologySpreadConstraints: [] +``` + +Pod Disruption Budget: + +```yaml +global: + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" +``` + +Recommendations: + +- Start with the chart defaults. +- Increase requests and limits based on traffic volume. +- Use node selectors, tolerations, affinity, or topology spread constraints when you need placement control. +- Enable a PodDisruptionBudget only when your replica count and maintenance policy support it. + + +Avoid CPU limits unless you explicitly need hard caps. CPU limits can trigger throttling even when spare node CPU exists. For most deployments, use CPU requests and memory limits as the starting point. + + + + + + +Health probes are disabled by default. + +```yaml +global: + health: + enabled: false + path: /tmp/healthy + readinessFailureThreshold: 3 +``` + +Per-instance health options: + +```yaml +newtInstances: + - name: main-tunnel + healthFile: /tmp/healthy + enforceHcCert: false +``` + +Helm test jobs are disabled by default: + +```yaml +global: + tests: + enabled: false + image: + repository: registry.k8s.io/kubectl + tag: "1.30.14" + pullPolicy: IfNotPresent +``` + +Enable tests only when you want chart test jobs and tester-port related resources. + + + + + +Metrics are disabled by default. + +```yaml +global: + metrics: + enabled: false + port: 9090 + path: /metrics + adminAddr: ":2112" + asyncBytes: false + region: "" + otlpEnabled: false + pprofEnabled: false +``` + +The default `adminAddr` is `:2112`, which listens on all interfaces and allows in-cluster scraping. Use `127.0.0.1:2112` only when scraping from other pods is not required. + +Metrics Service: + +```yaml +global: + metrics: + service: + enabled: false + type: ClusterIP + port: 2112 + portName: metrics +``` + +Prometheus Operator resources: + +```yaml +global: + metrics: + podMonitor: + enabled: false + serviceMonitor: + enabled: false + prometheusRule: + enabled: false +``` + +Example with ServiceMonitor: + +```yaml +global: + metrics: + enabled: true + service: + enabled: true + serviceMonitor: + enabled: true +``` + +Optional pprof endpoint: + +```yaml +global: + metrics: + pprofEnabled: true +``` + + + + + +NetworkPolicy rendering is disabled by default. + +```yaml +global: + networkPolicy: + enabled: false + defaultMode: merge + components: + defaultApp: + enabled: true + dns: + enabled: false + kubeApi: + enabled: false + custom: + enabled: false + ruleSets: {} +``` + +Per-instance NetworkPolicy overrides: + +```yaml +newtInstances: + - name: main-tunnel + networkPolicy: + enabled: null + mode: merge + useGlobalComponents: + defaultApp: true + dns: false + kubeApi: false + custom: true + components: + dns: + enabled: false + custom: + enabled: false + includeRuleSets: [] +``` + +Modes: + +| Mode | Behavior | +| --------- | ------------------------------------------------- | +| `inherit` | Use global components and rule sets only | +| `merge` | Combine global and instance-level policy settings | +| `replace` | Use only the instance-level policy settings | + +Enable DNS egress rules if your default network policy blocks DNS. + + + + + +## Configuration by install method + +### Helm + +Use a values file: + +```bash +helm upgrade --install newt fossorial/newt \ + --namespace pangolin \ + --values values-newt.yaml +``` + +Use inline values only for small tests: + +```bash +helm upgrade --install newt fossorial/newt \ + --namespace pangolin \ + --set 'newtInstances[0].name=main-tunnel' \ + --set 'newtInstances[0].auth.existingSecretName=newt-auth' +``` + +See [Newt Helm](/self-host/manual/kubernetes/newt/helm) for the installation flow. + +### Kustomize + +Render the chart with Helm, then use Kustomize overlays: + +```bash +helm template newt fossorial/newt \ + --namespace pangolin \ + --values values-newt.yaml \ + > base/newt.yaml +``` + +Then apply an overlay: + +```bash +kubectl apply -k overlays/site-a +``` + +See [Newt Kustomize](/self-host/manual/kubernetes/newt/kustomize) for the Kustomize workflow. + +### GitOps + +Store Helm values or Kustomize overlays in Git. Argo CD or Flux reconciles the desired state. + +Argo CD Helm example: + +```yaml +spec: + source: + helm: + values: | + newtInstances: + - name: main-tunnel + enabled: true + auth: + existingSecretName: newt-auth +``` + +Flux HelmRelease example: + +```yaml +spec: + values: + newtInstances: + - name: main-tunnel + enabled: true + auth: + existingSecretName: newt-auth +``` + +See [GitOps](/self-host/manual/kubernetes/gitops/overview) for GitOps guidance. + +## Production checklist + +Before deploying Newt to production: + +- [ ] Create the namespace before installation and apply required labels or annotations. +- [ ] Store credentials in Kubernetes Secrets. +- [ ] Avoid inline plaintext credentials. +- [ ] Use provisioning only with writable config persistence. +- [ ] Use a PVC for durable provisioning state. +- [ ] Keep native WireGuard mode disabled unless privileged workloads are allowed. +- [ ] Confirm the Pangolin endpoint is reachable from the Newt pod. +- [ ] Confirm TLS certificates are valid for the Pangolin endpoint. +- [ ] Set resources based on expected traffic. +- [ ] Configure NetworkPolicy rules if your cluster enforces network isolation. +- [ ] Enable metrics only when you have a scraping path. +- [ ] Review RBAC settings before upgrading from older chart versions. + +## Next steps + + + + Install Newt with Helm. + + + Install Newt with rendered manifests and Kustomize overlays. + + + Debug Newt deployment and connection issues. + + + Deploy Newt with Argo CD or Flux. + + diff --git a/self-host/manual/kubernetes/newt/helm.mdx b/self-host/manual/kubernetes/newt/helm.mdx new file mode 100644 index 0000000..e715a37 --- /dev/null +++ b/self-host/manual/kubernetes/newt/helm.mdx @@ -0,0 +1,437 @@ +--- +title: "Newt Helm" +description: "Quick-start guide for installing Newt on Kubernetes using Helm." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +Newt is the site connector used to expose private resources through Pangolin. It runs close to the resources you want to publish and connects back to Pangolin. + +Newt is a user-space WireGuard tunnel client and TCP/UDP proxy. It does not require users to manage WireGuard tunnels or NAT rules manually. :contentReference[oaicite:1]{index=1} + +## Version matrix + +| Item | Value | +| --- | --- | +| Chart version | `1.4.0` | +| App version | `1.12.3` | +| Kubernetes version | `>=1.30.14-0` | +| Default image tag | `1.12.3` | + +Newt chart `1.4.0` includes Newt `1.12.3` and supports Kubernetes `>=1.30.14-0`. :contentReference[oaicite:2]{index=2} + +## What the chart supports + +The Newt chart can deploy one or more Newt instances through `newtInstances[]`. + +Newt chart `1.4.0` includes support for: + +- Newt 1.11+ provisioning with `NEWT_PROVISIONING_KEY` and `NEWT_NAME` +- legacy credential installs with `NEWT_ID` and `NEWT_SECRET` +- existing Kubernetes Secrets for production credentials +- writable config persistence with `emptyDir` or an existing PVC +- optional metrics, PodMonitor, ServiceMonitor, and PrometheusRule +- optional NetworkPolicy +- optional native WireGuard mode +- multi-instance deployments with per-instance overrides + +The chart README lists these features for version `1.4.0`. :contentReference[oaicite:3]{index=3} + +## Prerequisites + +Before installing Newt, you need: + +- Kubernetes `1.30.14` or newer +- Helm 3.x +- `kubectl` access to the target cluster +- a reachable Pangolin instance +- either: + - Newt credentials from Pangolin: `NEWT_ID` and `NEWT_SECRET` + - or a provisioning key for Newt 1.11+ provisioning + +The chart quickstart lists Kubernetes `>=1.30.14`, Helm 3.x, configured `kubectl`, and Newt credentials from Pangolin as prerequisites. :contentReference[oaicite:4]{index=4} + +See [Prerequisites](/self-host/manual/kubernetes/prerequisites) for cluster, namespace, storage, networking, and security planning. + +## Authentication options + +Newt chart `1.4.0` supports three credential patterns: + +| Method | Recommended for | Notes | +| --- | --- | --- | +| Existing Secret | Production | Credentials are stored in a Kubernetes Secret created outside Helm | +| Provisioning key | Newt 1.11+ provisioning | Requires writable config persistence | +| Inline values | Local testing only | Credentials may be stored in Helm release history | + +For production, use `auth.existingSecretName` or a GitOps-safe secret workflow. The chart values explicitly warn that inline credentials can be stored in Helm release history and recommend existing Secrets for production. :contentReference[oaicite:5]{index=5} + +## Quick install with existing Secret + +This is the recommended simple production pattern. + +### Step 1: Create the namespace + +Create the namespace before installing the chart: + +```bash +kubectl create namespace pangolin +``` + +If your cluster uses Pod Security Admission labels, namespace labels, or policy annotations, apply them before installing Newt. + +Example: + +```bash +kubectl label namespace pangolin \ + pod-security.kubernetes.io/enforce=baseline \ + pod-security.kubernetes.io/audit=restricted \ + pod-security.kubernetes.io/warn=restricted +``` + + +The chart can create namespaces through `namespace.create`, but creating the namespace explicitly is recommended when your cluster uses Pod Security Admission, namespace labels, or policy annotations. + + +### Step 2: Create the Newt Secret + +Create a Secret with the credentials from Pangolin: + +```bash +kubectl create secret generic newt-auth \ + --namespace pangolin \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= +``` + + +Get the Newt credentials from the Pangolin dashboard for the site you want this Newt instance to connect to. + + +### Step 3: Create a values file + +Create `values-newt.yaml`: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + existingSecretName: newt-auth + replicas: 1 +``` + +The default Secret keys are: + +```yaml +PANGOLIN_ENDPOINT +NEWT_ID +NEWT_SECRET +``` + +You only need to set `auth.keys.*` if your Secret uses different key names. + +Example with custom Secret keys: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + existingSecretName: newt-auth + keys: + endpointKey: PANGOLIN_ENDPOINT + idKey: NEWT_ID + secretKey: NEWT_SECRET + replicas: 1 +``` + +`auth.keys.*` are key names inside the Kubernetes Secret, not the credential values themselves. ([GitHub][2]) + +### Step 4: Install Newt + +Add the Helm repository: + +```bash +helm repo add fossorial https://charts.fossorial.io +helm repo update fossorial +``` + +Install Newt: + +```bash +helm upgrade --install newt fossorial/newt \ + --namespace pangolin \ + --values values-newt.yaml +``` + +Do not use `--create-namespace` here if you created and labeled the namespace manually. + +### Step 5: Verify the deployment + +Check the Helm release: + +```bash +helm status newt --namespace pangolin +``` + +Check the pods: + +```bash +kubectl get pods --namespace pangolin \ + -l app.kubernetes.io/name=newt +``` + +Check the logs: + +```bash +kubectl logs --namespace pangolin \ + -l app.kubernetes.io/name=newt \ + --tail=50 +``` + +Wait for the Newt pod to become ready: + +```bash +kubectl wait --for=condition=ready pod \ + -l app.kubernetes.io/name=newt \ + --namespace pangolin \ + --timeout=60s +``` + +## Quick install with provisioning key + +Newt 1.11+ supports provisioning-based installs. Use this when you want Newt to bootstrap credentials from a provisioning key. + +Provisioning requires writable config persistence so Newt can store the generated configuration. The chart quickstart explicitly notes that provisioning requires a writable `CONFIG_FILE` target and that the chart provides this through `newtInstances[x].configPersistence`. ([GitHub][3]) + +Create `values-newt.yaml`: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + pangolinEndpoint: https://pangolin.example.com + provisioningKey: "" + newtName: "my-site" + configPersistence: + enabled: true + type: emptyDir + mountPath: /var/lib/newt + fileName: config.json +``` + +Install Newt: + +```bash +helm upgrade --install newt fossorial/newt \ + --namespace pangolin \ + --values values-newt.yaml +``` + + +`emptyDir` is enough for testing, but it is ephemeral. For durable provisioning state, use `type: persistentVolumeClaim` with an existing PVC. + + +Example with an existing PVC: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + pangolinEndpoint: https://pangolin.example.com + provisioningKey: "" + newtName: "my-site" + configPersistence: + enabled: true + type: persistentVolumeClaim + existingClaim: my-newt-config + mountPath: /var/lib/newt + fileName: config.json +``` + +The Newt README includes both `emptyDir` and existing PVC provisioning examples. ([GitHub][4]) + +## Verifying connectivity + +Follow the Newt logs: + +```bash +kubectl logs --namespace pangolin \ + -l app.kubernetes.io/name=newt \ + --follow +``` + +In the Pangolin dashboard, verify that the site connected by this Newt instance is online. + +If the pod is running but the site does not connect, check: + +* `PANGOLIN_ENDPOINT` +* Newt credentials or provisioning key +* DNS resolution from inside the cluster +* outbound network access from the Newt pod +* TLS validity for the Pangolin endpoint + +## Upgrade + +Update the Helm repository: + +```bash +helm repo update fossorial +``` + +Upgrade the release: + +```bash +helm upgrade newt fossorial/newt \ + --namespace pangolin \ + --values values-newt.yaml +``` + +Check upgrade status: + +```bash +helm status newt --namespace pangolin +helm history newt --namespace pangolin +``` + +Rollback to a previous revision if needed: + +```bash +helm rollback newt --namespace pangolin +``` + +## Multiple Newt instances + +You can deploy multiple Newt instances with one chart release. + +Example: + +```yaml +newtInstances: + - name: site-a + enabled: true + auth: + existingSecretName: newt-auth-site-a + replicas: 1 + + - name: site-b + enabled: true + auth: + existingSecretName: newt-auth-site-b + replicas: 1 +``` + +Create a separate Secret for each site: + +```bash +kubectl create secret generic newt-auth-site-a \ + --namespace pangolin \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= + +kubectl create secret generic newt-auth-site-b \ + --namespace pangolin \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= +``` + +## Architecture notes + +### Instance-based deployment + +* `newtInstances[]` defines the Newt instances rendered by the chart. +* Each enabled instance creates its own workload. +* Each instance can use its own Secret, provisioning settings, resources, service settings, and network policy settings. +* Per-instance namespace and service account overrides require `allowGlobalOverride: true`. + +The chart values include `newtInstances[]`, per-instance namespace settings, and per-instance service account overrides. ([GitHub][2]) + +### Security defaults + +By default, Newt runs without native WireGuard mode. + +The chart values describe the default non-native mode as non-root with privilege escalation disabled, read-only root filesystem, and dropped capabilities. Native WireGuard mode requires a privileged container with capabilities such as `NET_ADMIN` and `SYS_MODULE`. ([GitHub][2]) + + +Only enable native WireGuard mode if you understand the required privileges and your cluster policy allows them. + + +### RBAC + +Newt chart `1.4.0` defaults `rbac.create` to `false`. Enable RBAC only when your selected Newt configuration requires Kubernetes API permissions. + +```yaml +rbac: + create: true +``` + +The chart changelog for `1.4.0` marks this as a breaking change: installations that relied on auto-created RBAC must explicitly enable `rbac.create=true` during upgrade. ([GitHub][1]) + +### Helm tests + +Helm test Jobs are disabled by default. + +Enable them only when you want to run chart test jobs: + +```yaml +global: + tests: + enabled: true +``` + +The chart quickstart notes that test Jobs are gated behind `global.tests.enabled`, which defaults to `false`. ([GitHub][3]) + +## OCI install + +The Newt chart is also published as an OCI chart in GHCR. + +Pull the chart: + +```bash +helm pull oci://ghcr.io/fosrl/helm-charts/newt \ + --version 1.4.0 +``` + +Install from OCI: + +```bash +helm upgrade --install newt oci://ghcr.io/fosrl/helm-charts/newt \ + --version 1.4.0 \ + --namespace pangolin \ + --values values-newt.yaml +``` + +OCI changes where Helm pulls the chart from. It does not change the values file or the release behavior. + +## References + + + + + + + + + +## Next steps + + + + Review all Newt chart options. + + + Debug Newt deployment and connection issues. + + + Install Newt with rendered manifests and Kustomize overlays. + + + Install the Pangolin control plane. + + diff --git a/self-host/manual/kubernetes/newt/kustomize.mdx b/self-host/manual/kubernetes/newt/kustomize.mdx new file mode 100644 index 0000000..d031f14 --- /dev/null +++ b/self-host/manual/kubernetes/newt/kustomize.mdx @@ -0,0 +1,626 @@ +--- +title: "Newt Kustomize" +description: "Deploy Newt on Kubernetes using Helm-rendered manifests and Kustomize overlays." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +Use Kustomize when you want to manage Newt with rendered manifests, environment-specific overlays, and explicit patches in Git. + +For Newt, the supported Kustomize workflow is: + +1. Render the Newt Helm chart to manifests. +2. Use the rendered output as the Kustomize base. +3. Create overlays per site, cluster, or environment. +4. Apply the overlay with `kubectl apply -k` or reconcile it with Argo CD or Flux. + +## When to use Kustomize for Newt + +Use Kustomize if you: + +- want site-specific or environment-specific overlays +- need explicit patches committed to Git +- prefer reviewing rendered Kubernetes manifests before applying them +- use Argo CD or Flux with Kustomize sources +- want to customize Helm-rendered output without forking the chart + +For a simpler single-site setup, use [Newt Helm](/self-host/manual/kubernetes/newt/helm). + +## Supported approach + +The Newt chart does not provide native Kustomize bases. Render the Helm chart first, then use Kustomize on the rendered manifests. + + +Do not manage the same Newt resources with both a live Helm release and Kustomize. Pick one ownership model per environment. + + +Recommended ownership model: + +- Use Helm only to render the Newt chart. +- Use Kustomize, Argo CD, or Flux to apply and reconcile the rendered manifests. +- Re-render the base when upgrading the chart or changing Helm values. + +## Example directory structure + +```text +newt-deployment/ +├── base/ +│ ├── kustomization.yaml +│ └── newt.yaml +├── overlays/ +│ ├── site-a/ +│ │ ├── kustomization.yaml +│ │ └── patches/ +│ │ └── deployment-resources.patch.yaml +│ └── site-b/ +│ ├── kustomization.yaml +│ └── patches/ +│ └── deployment-resources.patch.yaml +└── values/ + ├── values-base.yaml + ├── values-site-a.yaml + └── values-site-b.yaml +``` + +## Step 1: Create the namespace + +Create the namespace before applying rendered manifests: + +```bash +kubectl create namespace pangolin +``` + +If your cluster uses Pod Security Admission, namespace labels, or other policy labels, apply them before creating workloads. + +Example: + +```bash +kubectl label namespace pangolin \ + pod-security.kubernetes.io/enforce=baseline \ + pod-security.kubernetes.io/audit=restricted \ + pod-security.kubernetes.io/warn=restricted +``` + +## Step 2: Create Newt credentials + +Create a Kubernetes Secret for each Newt site or instance. + +```bash +kubectl create secret generic newt-auth-site-a \ + --namespace pangolin \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= +``` + +For a second site: + +```bash +kubectl create secret generic newt-auth-site-b \ + --namespace pangolin \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= +``` + + +Use existing Kubernetes Secrets for production. Do not commit Newt credentials into Helm values, rendered manifests, or Kustomize patches. + + +## Step 3: Create base values + +Create `values/values-base.yaml`: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + replicas: 1 + auth: + existingSecretName: newt-auth-site-a +``` + +This values file uses an existing Secret. The default Secret keys are: + +```text +PANGOLIN_ENDPOINT +NEWT_ID +NEWT_SECRET +``` + +Use `auth.keys.*` only when your Secret uses different key names. + +## Step 4: Render Newt to the base + +Add and update the Helm repository: + +```bash +helm repo add fossorial https://charts.fossorial.io +helm repo update fossorial +``` + +Render the Newt chart: + +```bash +mkdir -p base overlays/site-a/patches overlays/site-b/patches values + +helm template newt fossorial/newt \ + --namespace pangolin \ + --values values/values-base.yaml \ + > base/newt.yaml +``` + +You can also render from the GHCR OCI chart: + +```bash +helm template newt oci://ghcr.io/fosrl/helm-charts/newt \ + --version 1.4.0 \ + --namespace pangolin \ + --values values/values-base.yaml \ + > base/newt.yaml +``` + +## Step 5: Create the base kustomization + +```yaml +# base/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - newt.yaml +``` + + +The namespace is already rendered by Helm through `--namespace pangolin`. You can also set `namespace: pangolin` in Kustomize, but avoid changing namespaces in overlays unless you have verified all rendered resources and references. + + +## Step 6: Inspect the rendered resource names + +Before writing patches, check the generated names: + +```bash +kustomize build base | grep -E "^(kind:| name:)" +``` + +Or list the deployments: + +```bash +kustomize build base | yq '. | select(.kind == "Deployment") | .metadata.name' +``` + +Use the actual rendered Deployment name in your patch targets. + + +Do not assume the rendered Deployment name without checking the generated manifests. Helm naming can change with release name, chart name, `nameOverride`, or `fullnameOverride`. + + +## Step 7: Create site-specific overlays + +Example overlay for Site A: + +```yaml +# overlays/site-a/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - ../../base + +labels: + - pairs: + app.kubernetes.io/site: site-a + app.kubernetes.io/environment: production + +patches: + - path: patches/deployment-resources.patch.yaml + target: + group: apps + version: v1 + kind: Deployment + name: newt-main-tunnel +``` + +Example resource patch: + +```yaml +# overlays/site-a/patches/deployment-resources.patch.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: newt-main-tunnel +spec: + replicas: 1 + template: + spec: + containers: + - name: newt + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + memory: 256Mi +``` + + +Replace `newt-main-tunnel` with the actual Deployment name from your rendered manifests. + + +Example overlay for Site B with a different Secret is usually better handled by rendering a second base with a different values file. + +Create `values/values-site-b.yaml`: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + replicas: 1 + auth: + existingSecretName: newt-auth-site-b +``` + +Then render a separate base for Site B: + +```bash +mkdir -p site-b/base + +helm template newt-site-b fossorial/newt \ + --namespace pangolin \ + --values values/values-site-b.yaml \ + > site-b/base/newt.yaml +``` + + +For different credentials, endpoints, provisioning keys, or instance names, prefer separate Helm-rendered bases. Use Kustomize patches for environment-level changes such as labels, annotations, resources, scheduling, or NetworkPolicy adjustments. + + +## Common Kustomize patches for Newt + +### Patch resource requests and limits + +```yaml +# overlays/site-a/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - ../../base + +patches: + - path: patches/resources.patch.yaml + target: + group: apps + version: v1 + kind: Deployment + name: newt-main-tunnel +``` + +```yaml +# overlays/site-a/patches/resources.patch.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: newt-main-tunnel +spec: + template: + spec: + containers: + - name: newt + resources: + requests: + cpu: 200m + memory: 256Mi + limits: + memory: 512Mi +``` + +### Patch log level + +Prefer configuring log level through Helm values before rendering. If you still need a manifest patch, patch the generated environment variable carefully after inspecting the rendered Deployment. + +Example JSON6902-style patch: + +```yaml +# overlays/site-a/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - ../../base + +patches: + - target: + group: apps + version: v1 + kind: Deployment + name: newt-main-tunnel + patch: |- + - op: add + path: /spec/template/spec/containers/0/env/- + value: + name: LOG_LEVEL + value: DEBUG +``` + + +Only use index-based JSON patches after checking the rendered manifest. Container order and environment variable layout can change between chart versions. + + +### Add node affinity + +```yaml +# overlays/site-a/patches/node-affinity.patch.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: newt-main-tunnel +spec: + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: site + operator: In + values: + - site-a +``` + +Reference the patch: + +```yaml +# overlays/site-a/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - ../../base + +patches: + - path: patches/node-affinity.patch.yaml + target: + group: apps + version: v1 + kind: Deployment + name: newt-main-tunnel +``` + +### Add annotations + +```yaml +# overlays/site-a/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - ../../base + +patches: + - target: + group: apps + version: v1 + kind: Deployment + name: newt-main-tunnel + patch: |- + - op: add + path: /metadata/annotations + value: + example.com/owner: platform +``` + +## Do not rename rendered Helm resources by default + +Avoid Kustomize options such as `namePrefix` and `nameSuffix` for Helm-rendered bases unless you have validated every generated reference. + +Renaming rendered resources can break: + +* Service selectors +* Secret references +* ConfigMap references +* ServiceAccount references +* NetworkPolicy selectors +* Prometheus monitor selectors + +If you need different resource names, prefer changing the Helm release name or chart naming values before rendering. + +## Apply the overlay + +Preview the rendered output: + +```bash +kustomize build overlays/site-a +``` + +Compare with the live cluster: + +```bash +kustomize build overlays/site-a | kubectl diff -f - +``` + +Apply the overlay: + +```bash +kubectl apply -k overlays/site-a +``` + +Verify the deployment: + +```bash +kubectl get pods --namespace pangolin \ + -l app.kubernetes.io/name=newt + +kubectl logs --namespace pangolin \ + -l app.kubernetes.io/name=newt \ + --tail=50 +``` + +## Updating the rendered base + +When upgrading the Newt chart, re-render the base and review the changes. + +```bash +helm repo update fossorial +``` + +Render the updated chart output: + +```bash +helm template newt fossorial/newt \ + --namespace pangolin \ + --values values/values-base.yaml \ + > base/newt.yaml +``` + +Or with OCI: + +```bash +helm template newt oci://ghcr.io/fosrl/helm-charts/newt \ + --version 1.4.0 \ + --namespace pangolin \ + --values values/values-base.yaml \ + > base/newt.yaml +``` + +Validate the overlay: + +```bash +kustomize build overlays/site-a +``` + +Review the diff: + +```bash +git diff +kustomize build overlays/site-a | kubectl diff -f - +``` + +Commit the updated base and overlays: + +```bash +git add base/ overlays/ values/ +git commit -m "Update Newt rendered manifests" +``` + +Apply after review: + +```bash +kubectl apply -k overlays/site-a +``` + +## Ownership model + +Do not run `helm upgrade` against a release that is managed by Kustomize. + +Avoid this pattern: + +```bash +helm upgrade newt fossorial/newt --namespace pangolin +kubectl apply -k overlays/site-a +``` + +Use one of these models instead: + +| Model | Description | +| ----------------- | ------------------------------------------------------------------------------------------ | +| Helm-managed | Helm installs and upgrades the live release. Kustomize is not used for the same resources. | +| Kustomize-managed | Helm renders manifests only. Kustomize applies and owns the live resources. | +| GitOps-managed | Argo CD or Flux applies the Kustomize overlay and owns reconciliation. | + +## Validation + +Validate Kustomize output: + +```bash +kustomize build overlays/site-a +``` + +Run a server-side dry run: + +```bash +kustomize build overlays/site-a | kubectl apply -f - --dry-run=server +``` + +Preview live changes: + +```bash +kustomize build overlays/site-a | kubectl diff -f - +``` + +Check live resources: + +```bash +kubectl get all --namespace pangolin +kubectl get events --namespace pangolin --sort-by=.lastTimestamp +``` + +## Kustomize with GitOps + +Kustomize overlays work well with GitOps tools. + +* [Argo CD](/self-host/manual/kubernetes/gitops/argocd) can reconcile a Kustomize overlay path directly. +* [Flux](/self-host/manual/kubernetes/gitops/flux) can reconcile a Kustomize overlay with a `Kustomization` resource. + +In GitOps workflows, the controller owns the apply operation. Do not also apply the same overlay manually unless you are debugging. + +## Troubleshooting + +### The patch does not apply + +Check the rendered resource name and kind: + +```bash +kustomize build base | grep -E "^(kind:| name:)" +``` + +Then verify the patch target in your overlay. + +### The pod does not start + +Check pod status and events: + +```bash +kubectl get pods --namespace pangolin +kubectl describe pod --namespace pangolin +kubectl get events --namespace pangolin --sort-by=.lastTimestamp +``` + +### Newt does not connect + +Check logs: + +```bash +kubectl logs --namespace pangolin \ + -l app.kubernetes.io/name=newt \ + --tail=100 +``` + +Verify: + +* the Secret exists in the same namespace +* `PANGOLIN_ENDPOINT` is reachable from the pod +* `NEWT_ID` and `NEWT_SECRET` are correct +* outbound DNS and HTTPS are allowed +* TLS certificates for the Pangolin endpoint are valid + +## Next steps + + + + Install Newt with Helm. + + + Review Newt chart options. + + + Debug Newt deployment and connection issues. + + + Deploy Kustomize overlays with Argo CD or Flux. + + diff --git a/self-host/manual/kubernetes/newt/troubleshooting.mdx b/self-host/manual/kubernetes/newt/troubleshooting.mdx new file mode 100644 index 0000000..0d377e9 --- /dev/null +++ b/self-host/manual/kubernetes/newt/troubleshooting.mdx @@ -0,0 +1,752 @@ +--- +title: "Newt Troubleshooting" +description: "Diagnose and resolve common Newt Kubernetes deployment issues." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +Use this guide to troubleshoot Newt Kubernetes deployments installed with Helm, Kustomize, Argo CD, or Flux. + +Start with the basic checks, then move to the section that matches the symptom. + +## Quick checks + +Set the namespace and release name used by your installation: + +```bash +export NEWT_NAMESPACE=pangolin +export NEWT_RELEASE=newt +``` + +Check the Helm release: + +```bash +helm status "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE" +helm history "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE" +``` + +Check Newt pods: + +```bash +kubectl get pods --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt +``` + +Check recent events: + +```bash +kubectl get events --namespace "$NEWT_NAMESPACE" \ + --sort-by=.lastTimestamp +``` + +Check logs: + +```bash +kubectl logs --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt \ + --tail=100 +``` + +Check the applied Helm values: + +```bash +helm get values "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE" +``` + + +Do not assume the pod or Deployment name. Chart-generated names can change with the Helm release name, instance name, `nameOverride`, or `fullnameOverride`. + + +## Get the generated resource names + +List Newt resources: + +```bash +kubectl get deploy,sts,svc,secret,cm --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt +``` + +List pods with labels: + +```bash +kubectl get pods --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt \ + --show-labels +``` + +Store the first Newt pod name: + +```bash +export NEWT_POD="$(kubectl get pod --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt \ + -o jsonpath='{.items[0].metadata.name}')" +``` + +Then use: + +```bash +echo "$NEWT_POD" +``` + +## Pod fails to start + +### Symptoms + +```text +STATUS RESTARTS +CrashLoopBackOff 5 +Error 3 +CreateContainerConfigError +ImagePullBackOff +``` + +### Check pod details + +```bash +kubectl describe pod "$NEWT_POD" --namespace "$NEWT_NAMESPACE" +``` + +Check logs: + +```bash +kubectl logs "$NEWT_POD" --namespace "$NEWT_NAMESPACE" --tail=100 +``` + +If the container restarts quickly, check the previous logs: + +```bash +kubectl logs "$NEWT_POD" --namespace "$NEWT_NAMESPACE" --previous --tail=100 +``` + +### Common causes + +| Symptom | Likely cause | Check | +| ----------------------------------- | ------------------------------------------------------ | ---------------------------------------------------------- | +| `Secret "..." not found` | Secret name does not match `auth.existingSecretName` | `kubectl get secret -n "$NEWT_NAMESPACE"` | +| Missing env var or empty credential | Secret exists but key names do not match `auth.keys.*` | `kubectl describe secret -n "$NEWT_NAMESPACE"` | +| Authentication failure | Wrong `NEWT_ID`, `NEWT_SECRET`, or provisioning key | Check credentials in Pangolin | +| Endpoint connection errors | `PANGOLIN_ENDPOINT` is wrong or unreachable | Test DNS and HTTPS from the pod | +| Image pull failure | Registry or image settings are wrong | `kubectl describe pod` | +| Permission error with native mode | Native WireGuard mode requires privileged settings | Check `global.nativeMode.enabled` and `useNativeInterface` | + +## Secret issues + +### Verify the Secret exists + +```bash +kubectl get secret newt-auth --namespace "$NEWT_NAMESPACE" +``` + +### Check Secret keys + +```bash +kubectl describe secret newt-auth --namespace "$NEWT_NAMESPACE" +``` + +The default keys are: + +```text +PANGOLIN_ENDPOINT +NEWT_ID +NEWT_SECRET +``` + +If your Secret uses different key names, map them in values: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + existingSecretName: newt-auth + keys: + endpointKey: PANGOLIN_ENDPOINT + idKey: NEWT_ID + secretKey: NEWT_SECRET +``` + + +Do not paste decoded secrets into issue reports, logs, screenshots, or public repositories. + + +### Check which Secret the pod uses + +```bash +kubectl get pod "$NEWT_POD" --namespace "$NEWT_NAMESPACE" \ + -o jsonpath='{range .spec.containers[*].envFrom[*]}{.secretRef.name}{"\n"}{end}' +``` + +Also inspect explicit Secret references: + +```bash +kubectl get pod "$NEWT_POD" --namespace "$NEWT_NAMESPACE" -o yaml | grep -A5 -B2 secretKeyRef +``` + +## Newt cannot reach Pangolin + +### Test DNS from the Newt pod + +```bash +kubectl exec "$NEWT_POD" --namespace "$NEWT_NAMESPACE" -- \ + nslookup pangolin.example.com +``` + +### Test HTTPS from the Newt pod + +```bash +kubectl exec "$NEWT_POD" --namespace "$NEWT_NAMESPACE" -- \ + wget -S -O- https://pangolin.example.com 2>&1 | head -40 +``` + +Depending on the image, `curl`, `wget`, `nc`, or `nslookup` may not be available. If needed, run a temporary debug pod in the same namespace: + +```bash +kubectl run net-debug \ + --namespace "$NEWT_NAMESPACE" \ + --rm -it \ + --image=curlimages/curl:latest \ + --restart=Never \ + -- sh +``` + +Then test: + +```bash +curl -vk https://pangolin.example.com +``` + +### Common causes + +| Problem | What to check | +| -------------------------------- | --------------------------------------------------------- | +| DNS fails | CoreDNS, NetworkPolicy egress to DNS, wrong hostname | +| HTTPS fails | ingress, TLS certificate, firewall, proxy, wrong endpoint | +| TLS verification fails | certificate chain, hostname mismatch, private CA | +| Works locally but not in cluster | egress policies, proxy settings, DNS split-horizon | + +## Newt pod is running but site is offline + +Check logs: + +```bash +kubectl logs "$NEWT_POD" --namespace "$NEWT_NAMESPACE" --tail=200 +``` + +Check the site in the Pangolin dashboard. + +Verify: + +* the site credentials belong to the same site +* the site was not deleted or regenerated in Pangolin +* `PANGOLIN_ENDPOINT` points to the correct Pangolin URL +* the cluster can resolve and reach the Pangolin endpoint +* outbound HTTPS is allowed from the Newt namespace +* the Secret is in the same namespace as the Newt workload + +If you use provisioning, also verify: + +* `provisioningKey` is valid +* `newtName` is set as expected +* `configPersistence.enabled=true` +* the configured `CONFIG_FILE` path is writable + +## Provisioning issues + +Newt 1.11+ provisioning requires writable config persistence. + +### Symptoms + +* Newt starts but does not keep generated credentials after restart. +* Newt provisions repeatedly. +* Logs mention config file or write errors. +* Pod restarts cause the site to appear as a new or unconfigured instance. + +### Check values + +```bash +helm get values "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE" +``` + +Provisioning example: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + pangolinEndpoint: https://pangolin.example.com + provisioningKey: "" + newtName: "my-site" + configPersistence: + enabled: true + type: emptyDir + mountPath: /var/lib/newt + fileName: config.json +``` + +For durable state, use an existing PVC: + +```yaml +newtInstances: + - name: main-tunnel + enabled: true + auth: + pangolinEndpoint: https://pangolin.example.com + provisioningKey: "" + newtName: "my-site" + configPersistence: + enabled: true + type: persistentVolumeClaim + existingClaim: my-newt-config + mountPath: /var/lib/newt + fileName: config.json +``` + + +`emptyDir` is recreated when the pod is recreated. Use a PVC if the generated configuration must survive pod replacement. + + +## Native WireGuard permission issues + +By default, Newt does not require native WireGuard privileges. + +Only check this section if you enabled native WireGuard mode: + +```yaml +global: + nativeMode: + enabled: true + +newtInstances: + - name: main-tunnel + useNativeInterface: true +``` + +### Symptoms + +```text +operation not permitted +cannot create interface +permission denied +``` + +### Check security context + +```bash +kubectl get pod "$NEWT_POD" --namespace "$NEWT_NAMESPACE" -o yaml | grep -A30 securityContext +``` + +Native mode requires privileged workload settings and capabilities such as `NET_ADMIN` and `SYS_MODULE`. + + +Only enable native WireGuard mode if your cluster policy allows privileged workloads. Do not add `NET_ADMIN` to the default non-native deployment unless you know it is required. + + +### Check namespace policy + +```bash +kubectl get namespace "$NEWT_NAMESPACE" --show-labels +``` + +A namespace with a restricted Pod Security Admission profile may block native mode. + +## Service not created or not reachable + +### Important behavior + +`acceptClients` does not create a Service. + +A Service is created through: + +```yaml +newtInstances: + - name: main-tunnel + service: + enabled: true +``` + +The chart also has `service.enabledWhenAcceptClients`, but runtime client behavior and Service rendering should still be verified in the rendered manifests. + +### Check Services + +```bash +kubectl get svc --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt +``` + +Describe the Service: + +```bash +kubectl describe svc --namespace "$NEWT_NAMESPACE" +``` + +### LoadBalancer stuck in pending + +```text +EXTERNAL-IP +``` + +Common causes: + +* the cluster has no cloud load balancer integration +* bare-metal cluster without MetalLB or equivalent +* cloud provider quota or permission issue +* invalid `loadBalancerClass` +* invalid `loadBalancerSourceRanges` + +For bare-metal clusters, use MetalLB or another load balancer implementation, or use `NodePort` if appropriate. + +## Metrics scraping does not work + +Metrics are disabled by default. + +Enable metrics: + +```yaml +global: + metrics: + enabled: true +``` + +The chart default admin address is: + +```yaml +global: + metrics: + adminAddr: ":2112" +``` + +This listens on all interfaces and allows in-cluster scraping. Do not set it to `127.0.0.1:2112` if Prometheus scrapes from another pod. + +### Metrics Service + +Enable the metrics Service: + +```yaml +global: + metrics: + enabled: true + service: + enabled: true + port: 2112 +``` + +### ServiceMonitor + +If you use Prometheus Operator: + +```yaml +global: + metrics: + enabled: true + service: + enabled: true + serviceMonitor: + enabled: true +``` + +Check resources: + +```bash +kubectl get svc,podmonitor,servicemonitor,prometheusrule \ + --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt +``` + + +The chart has separate metrics values for container port, admin address, and metrics Service port. Check the rendered manifest when changing these values. + + +## NetworkPolicy blocks traffic + +If NetworkPolicy is enabled, check that the policy allows required egress. + +Newt usually needs egress to: + +* DNS +* Pangolin endpoint over HTTPS +* any tunnel or connectivity endpoints used by your deployment + +Check policies: + +```bash +kubectl get networkpolicy --namespace "$NEWT_NAMESPACE" +kubectl describe networkpolicy --namespace "$NEWT_NAMESPACE" +``` + +If DNS is blocked, enable or add DNS egress rules. + +Example: + +```yaml +global: + networkPolicy: + enabled: true + components: + dns: + enabled: true +``` + +If HTTPS egress is blocked, add an appropriate custom egress rule for your environment. + +## Multiple Newt instances conflict + +### Symptoms + +* Multiple pods run, but only one site connects. +* Both instances use the same credentials. +* A site appears to flap between instances. +* Logs show authentication or registration conflicts. + +### Check values + +```bash +helm get values "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE" +``` + +Each instance should use its own credentials or provisioning identity: + +```yaml +newtInstances: + - name: site-a + enabled: true + auth: + existingSecretName: newt-auth-site-a + + - name: site-b + enabled: true + auth: + existingSecretName: newt-auth-site-b +``` + +Create separate Secrets: + +```bash +kubectl create secret generic newt-auth-site-a \ + --namespace "$NEWT_NAMESPACE" \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= + +kubectl create secret generic newt-auth-site-b \ + --namespace "$NEWT_NAMESPACE" \ + --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \ + --from-literal=NEWT_ID= \ + --from-literal=NEWT_SECRET= +``` + +## RBAC or service account issues + +Chart `1.4.0` disables RBAC creation by default. + +Check service account and RBAC: + +```bash +kubectl get serviceaccount,role,rolebinding \ + --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt +``` + +If your configuration requires Kubernetes API access, enable RBAC: + +```yaml +rbac: + create: true + clusterRole: false +``` + +For most Newt deployments, RBAC is not required. + +## High CPU or memory usage + +Check resource usage: + +```bash +kubectl top pod --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt +``` + +Check current resource settings: + +```bash +kubectl get pod "$NEWT_POD" --namespace "$NEWT_NAMESPACE" \ + -o jsonpath='{.spec.containers[0].resources}' +``` + +Tune resources in values: + +```yaml +newtInstances: + - name: main-tunnel + resources: + requests: + cpu: 200m + memory: 256Mi + limits: + cpu: 1000m + memory: 512Mi +``` + +Then upgrade: + +```bash +helm upgrade "$NEWT_RELEASE" fossorial/newt \ + --namespace "$NEWT_NAMESPACE" \ + --values values-newt.yaml +``` + +Common causes of high usage: + +* high tunnel traffic +* too low resource limits +* repeated reconnect loops +* excessive debug logging +* MTU or network path issues + +## MTU issues + +### Symptoms + +* Connections establish but large transfers fail. +* Some websites or services work, others hang. +* Logs show repeated reconnects. +* Throughput is much lower than expected. + +Newt defaults to MTU `1280`. + +Try another MTU only after confirming basic connectivity: + +```yaml +newtInstances: + - name: main-tunnel + mtu: 1280 +``` + +Upgrade after changing values: + +```bash +helm upgrade "$NEWT_RELEASE" fossorial/newt \ + --namespace "$NEWT_NAMESPACE" \ + --values values-newt.yaml +``` + +## Helm debugging + +Preview an upgrade: + +```bash +helm upgrade "$NEWT_RELEASE" fossorial/newt \ + --namespace "$NEWT_NAMESPACE" \ + --values values-newt.yaml \ + --dry-run +``` + +Render the chart locally: + +```bash +helm template "$NEWT_RELEASE" fossorial/newt \ + --namespace "$NEWT_NAMESPACE" \ + --values values-newt.yaml +``` + +Show rendered manifests from the live release: + +```bash +helm get manifest "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE" +``` + +Show values from the live release: + +```bash +helm get values "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE" +``` + +Rollback: + +```bash +helm rollback "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE" +``` + +## Kustomize debugging + +Validate the overlay: + +```bash +kustomize build overlays/site-a +``` + +Run a server-side dry run: + +```bash +kustomize build overlays/site-a | kubectl apply -f - --dry-run=server +``` + +Preview live changes: + +```bash +kustomize build overlays/site-a | kubectl diff -f - +``` + +If a patch does not apply, inspect generated resource names: + +```bash +kustomize build base | grep -E "^(kind:| name:)" +``` + +## Collect diagnostics + +Collect logs and resource information: + +```bash +kubectl logs --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt \ + --tail=200 > newt-logs.txt + +kubectl get pods --namespace "$NEWT_NAMESPACE" \ + -l app.kubernetes.io/name=newt \ + -o yaml > newt-pods.yaml + +kubectl get events --namespace "$NEWT_NAMESPACE" \ + --sort-by=.lastTimestamp > newt-events.txt + +helm get values "$NEWT_RELEASE" \ + --namespace "$NEWT_NAMESPACE" > newt-helm-values.yaml + +helm get manifest "$NEWT_RELEASE" \ + --namespace "$NEWT_NAMESPACE" > newt-helm-manifest.yaml +``` + +If using Kustomize: + +```bash +kustomize build overlays/site-a > newt-kustomize-output.yaml +``` + +Before sharing diagnostics, remove: + +* Newt credentials +* provisioning keys +* TLS private keys +* tokens +* passwords +* internal hostnames if sensitive + +## Next steps + + + + Review Newt chart options. + + + Install Newt with Helm. + + + Install Newt with rendered manifests and Kustomize overlays. + + + Deploy Newt with Argo CD or Flux. + + diff --git a/self-host/manual/kubernetes/overview.mdx b/self-host/manual/kubernetes/overview.mdx new file mode 100644 index 0000000..68cdd82 --- /dev/null +++ b/self-host/manual/kubernetes/overview.mdx @@ -0,0 +1,108 @@ +--- +title: "Overview" +description: "Deploy Pangolin, Newt, and related components on Kubernetes with Helm, Kustomize, GitOps, or Helmfile." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +## Kubernetes deployment options + +Kubernetes is a good fit for running Pangolin-related components when you need repeatable deployments, workload isolation, rolling updates, and integration with existing cluster services such as ingress, storage, monitoring, and network policy. + +This section covers the main Kubernetes workflows: + +- **Helm** for the recommended chart-based installation and upgrade workflow. +- **Kustomize** for overlay-based customization and manifest-driven deployments. +- **GitOps** with Argo CD or Flux for reconciling Helm charts, Kustomize overlays, or manifests from Git. +- **Helmfile** for advanced setups that manage multiple Helm releases together. + +## What this section covers + +- Kubernetes prerequisites and cluster requirements. +- Installation workflows for Helm, Kustomize, GitOps, and Helmfile. +- Pangolin installation, configuration, and troubleshooting. +- Newt installation, configuration, and troubleshooting. +- How Pangolin, Gerbil, Traefik, Newt and Pangolin-Kube-Controller fit together in tunneled deployments. + +## Components + +| Component | Role | +| --- | --- | +| Pangolin | Main application and control plane for the dashboard, API, authentication, configuration, and database-backed state. | +| Gerbil | WireGuard interface management service used as part of the Pangolin tunnel stack. | +| Newt | Site connector used to expose private resources through Pangolin. Newt runs as a user-space WireGuard tunnel client and TCP/UDP proxy. | +| Traefik | Reverse proxy and router for ingress traffic. | +| PostgreSQL / SQlite | Database options for Pangolin deployments, depending on the selected installation workflow and chart configuration. | +| Controller | Kubernetes controller for integration with Traefik cluster resources, replacing single Traefik instances with Traefik ingress controllers. | + + +For local reverse proxy deployments, the full tunnel stack may not be required. Tunneled sites require the components needed for Newt and WireGuard-based connectivity. + + +```mermaid +flowchart LR + U[Users] --> T[Traefik] + T --> P[Pangolin] + P --> G[Gerbil] + N[Newt] --> G + P --> D[(Database)] +``` + +## Method comparison + +Choose the workflow that matches how you already manage Kubernetes applications: + +| Method | Best for | Complexity | GitOps fit | +| --- | --- | --- | --- | +| **Helm** | Standard Kubernetes installs and upgrades | Low | Works with Argo CD and Flux | +| **Kustomize** | Environment-specific overlays and manifest customization | Medium | Works with Argo CD and Flux | +| **Argo CD** | Git-driven reconciliation with a web UI and sync status | Medium | Native GitOps workflow | +| **Flux** | Declarative GitOps using Kubernetes custom resources | Medium | Native GitOps workflow | +| **Helmfile** | Managing multiple Helm releases as one deployment stack | Medium | Usually used from CI/CD or a controlled automation workflow | + + +Argo CD and Flux are delivery and reconciliation tools. They do not replace Helm or Kustomize. They can deploy Helm charts, Kustomize overlays, and other Kubernetes manifests. + + +## Recommended starting points + + + + Compare Helm, Kustomize, GitOps, and Helmfile before choosing a workflow. + + + Review the cluster, ingress, storage, DNS, and tooling requirements. + + + Start with the recommended chart-based Kubernetes workflow. + + + Use overlays and patches for manifest-based deployments. + + + Deploy with Argo CD or Flux from Git. + + + Manage Pangolin, Newt, and supporting Helm releases together. + + + +## Component quick links + + + + Install Pangolin with the Helm chart. + + + Configure Pangolin for your Kubernetes environment. + + + Install Newt in a Kubernetes cluster. + + + Configure Newt credentials, endpoints, resources, and runtime settings. + + diff --git a/self-host/manual/kubernetes/pangolin/configuration.mdx b/self-host/manual/kubernetes/pangolin/configuration.mdx new file mode 100644 index 0000000..0e30081 --- /dev/null +++ b/self-host/manual/kubernetes/pangolin/configuration.mdx @@ -0,0 +1,1016 @@ +--- +title: "Pangolin Configuration" +description: "Configuration reference for Pangolin Kubernetes deployments." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +This page covers the main Pangolin Kubernetes configuration options for Helm and Kustomize workflows. + +For exhaustive option coverage, refer to the chart resources: + + + + + + + +## Version context + +This page is aligned with the Pangolin Helm chart `0.1.0-alpha.0`. + +| Item | Value | +| --- | --- | +| Chart version | `0.1.0-alpha.0` | +| Pangolin app version | `1.18.2` | +| Kubernetes version | `>=1.30.14-0` | +| Gerbil image tag | `1.3.1` | +| pangolin-kube-controller image tag | `0.1.0-alpha.1` | +| Traefik image tag | `v3.6.15` | + +## Configuration sections + + + + +Control how Pangolin components are deployed and integrated with Kubernetes. + +```yaml +deployment: + type: controller + mode: multi + installTraefikController: false + traefikNamespace: "" +``` + +Recommended production topology: + +```yaml +deployment: + type: controller + mode: multi +``` + +| Setting | Description | +| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------- | +| `deployment.type=controller` | Uses `pangolin-kube-controller` and Traefik CRDs. Recommended for Kubernetes deployments. | +| `deployment.type=standalone` | Runs an internal Traefik workload managed by this chart. Mainly useful for labs and self-contained deployments. | +| `deployment.mode=multi` | Runs Pangolin, Gerbil, and controller/Traefik components as separate workloads. Recommended for production. | +| `deployment.mode=single` | Runs multiple components in one shared Pod. Useful only when you explicitly need a compact topology. | +| `deployment.installTraefikController=true` | Installs the bundled Traefik dependency in controller mode. | +| `deployment.traefikNamespace` | Namespace where Traefik controller resources live. Defaults to the release namespace when empty. | + + +In controller mode, Traefik CRDs and a Traefik controller must be available. You can install Traefik separately or enable the bundled Traefik dependency with `deployment.installTraefikController=true`. + + +If you enable the bundled Traefik dependency, put Traefik chart overrides under the `traefikController` key. + + + + + +Namespace creation is controlled by the `namespace` block. + +```yaml +namespace: + create: false + name: "" + labels: {} + podSecurity: + enforce: "" + warn: "" + audit: "" +``` + +Recommended pattern: + +1. Create the namespace manually. +2. Apply the required labels and annotations. +3. Install the chart into that namespace. + +```bash +kubectl create namespace pangolin +``` + +Gerbil requires `NET_ADMIN` for WireGuard interface management. If your cluster enforces Pod Security Admission, the namespace must allow that capability. + +Example: + +```bash +kubectl label namespace pangolin \ + pod-security.kubernetes.io/enforce=privileged \ + --overwrite +``` + +If you let the chart create the namespace, configure the Pod Security labels through values: + +```yaml +namespace: + create: true + name: pangolin + podSecurity: + enforce: privileged + warn: baseline + audit: restricted +``` + + +Do not apply a restricted Pod Security profile to a namespace running Gerbil unless you have validated WireGuard functionality. Gerbil requires `NET_ADMIN`; removing it breaks tunnel management. + + + + + + +Choose the database backend for Pangolin. + +```yaml +database: + mode: cloudnativepg + name: pangolin + username: pangolin +``` + +Supported modes: + +| Mode | Use case | +| --------------- | --------------------------------------------------------------------- | +| `cloudnativepg` | Recommended production mode using CloudNativePG. This is the default. | +| `external` | Production mode with an externally managed PostgreSQL database. | +| `embedded` | Chart-managed PostgreSQL for labs and test environments. | +| `sqlite` | Development or CI only. Not recommended for production. | + +### CloudNativePG + +The default database mode is `cloudnativepg`. + +```yaml +database: + mode: cloudnativepg + cloudnativepg: + cluster: + name: pangolin-db + connection: + database: pangolin + username: pangolin + sslMode: disable + +cnpg-operator: + enabled: false + +cnpg-cluster: + enabled: false + fullnameOverride: pangolin-db +``` + +CloudNativePG can be used in four common ways: + +| Mode | Values | +| -------------------------------------- | ----------------------------------------------------------- | +| Existing operator and existing cluster | `cnpg-operator.enabled=false`, `cnpg-cluster.enabled=false` | +| Chart installs operator only | `cnpg-operator.enabled=true`, `cnpg-cluster.enabled=false` | +| Chart installs cluster only | `cnpg-operator.enabled=false`, `cnpg-cluster.enabled=true` | +| Chart installs operator and cluster | `cnpg-operator.enabled=true`, `cnpg-cluster.enabled=true` | + +When `cnpg-cluster.enabled=true`, keep the CNPG cluster name consistent: + +```yaml +database: + cloudnativepg: + cluster: + name: pangolin-db + +cnpg-cluster: + enabled: true + fullnameOverride: pangolin-db +``` + +For the default CNPG cluster name `pangolin-db`, CloudNativePG creates an application Secret named `pangolin-db-app` with the key `uri`. The chart can automatically use this default Secret when no explicit `database.connection.existingSecretName` is set. + +Explicit Secret reference: + +```yaml +database: + connection: + existingSecretName: pangolin-db-app + existingSecretKey: uri +``` + +### External PostgreSQL + +For an external PostgreSQL database, prefer a Kubernetes Secret containing the final connection string. + +```yaml +database: + mode: external + connection: + existingSecretName: pangolin-db-connection + existingSecretKey: connectionString +``` + +The Secret should contain a PostgreSQL connection string: + +```bash +kubectl create secret generic pangolin-db-connection \ + --namespace pangolin \ + --from-literal=connectionString='postgresql://pangolin:password@postgres.example.com:5432/pangolin?sslmode=require' +``` + +You can also let the chart create a connection Secret from values: + +```yaml +database: + mode: external + external: + generatedSecret: + create: true + host: postgres.example.com + port: 5432 + database: pangolin + username: pangolin + password: "" + sslMode: require +``` + + +Avoid storing database passwords directly in values files for production. Use an existing Secret or your normal secret-management workflow. + + +### Embedded PostgreSQL + +Embedded PostgreSQL is intended for labs and tests. + +```yaml +database: + mode: embedded + embedded: + persistence: + enabled: true + size: 8Gi +``` + +### SQLite + +SQLite is only suitable for development, CI, or very small test deployments. + +```yaml +database: + mode: sqlite + sqlite: + persistence: + enabled: true + size: 1Gi +``` + + + + + +The `pangolin.config` block renders `/app/config/config.yml`. + +```yaml +pangolin: + config: + app: + dashboard_url: "https://pangolin.example.com" + log_level: info + domains: + domain1: + base_domain: "example.com" + cert_resolver: "letsencrypt" + gerbil: + start_port: 51820 + clients_start_port: 21820 + base_endpoint: "pangolin.example.com" + use_subdomain: false + traefik: + enabled: true + http_entrypoint: web + https_entrypoint: websecure + cert_resolver: letsencrypt +``` + +Important settings: + +| Setting | Description | +| ------------------------------------------- | ------------------------------------------------------------------------------------------- | +| `pangolin.config.app.dashboard_url` | Public dashboard URL. Set this to the real user-facing URL. | +| `pangolin.config.domains` | Domain map used by Pangolin. Replace the default `example.com` entry before production use. | +| `pangolin.config.gerbil.base_endpoint` | Public hostname or IP where Gerbil is reachable. | +| `pangolin.config.gerbil.start_port` | First WireGuard site port. Keep this aligned with `gerbil.ports.wg1`. | +| `pangolin.config.gerbil.clients_start_port` | Client WireGuard port. Keep this aligned with `gerbil.ports.wg2`. | +| `pangolin.config.traefik.enabled` | Includes Pangolin's Traefik config section. This does not install Traefik. | +| `pangolin.config.traefik.cert_resolver` | ACME resolver name used in Pangolin-generated Traefik configuration. | + + +`pangolin.config.traefik` controls the Traefik configuration generated by Pangolin. Traefik installation is controlled separately through controller mode, the bundled Traefik dependency, or standalone Traefik mode. + + +### Pangolin app secret + +Pangolin requires `SERVER_SECRET`. + +Use an existing Secret for production: + +```yaml +pangolin: + secret: + existingSecretName: pangolin-app-secret + existingSecretKey: SERVER_SECRET +``` + +Create the Secret: + +```bash +kubectl create secret generic pangolin-app-secret \ + --namespace pangolin \ + --from-literal=SERVER_SECRET='' +``` + +If no existing Secret is provided, the chart can generate one: + +```yaml +pangolin: + secret: + generated: + create: true + key: SERVER_SECRET + length: 64 +``` + + +Do not commit plaintext secrets to Git. For GitOps workflows, use SOPS, Sealed Secrets, External Secrets Operator, Vault, Infisical, or a cloud secret manager. + + + + + + +In controller mode, the chart can render a Traefik `IngressRoute` for the Pangolin dashboard and API. + +```yaml +pangolin: + ingressRoute: + dashboard: + enabled: true + host: "" + ingressClassName: "" + traefikSelectorLabels: {} + entryPoints: + - websecure + routes: + api: + enabled: true + pathPrefix: /api/v1 + priority: 100 + dashboard: + enabled: true + priority: 10 + tls: + enabled: true + certResolver: "" + secretName: "" +``` + +Default routing behavior: + +| Route | Match | Backend port | +| --------- | ---------------------------------- | ------------------------------------------------- | +| API | `Host(...) && PathPrefix(/api/v1)` | `pangolin.service.ports.external`, default `3000` | +| Dashboard | `Host(...)` | `pangolin.service.ports.next`, default `3002` | + +The host defaults to the hostname from `pangolin.config.app.dashboard_url`. You can override it with: + +```yaml +pangolin: + ingressRoute: + dashboard: + host: pangolin.example.com +``` + +### TLS with certResolver + +```yaml +pangolin: + config: + traefik: + cert_resolver: letsencrypt + ingressRoute: + dashboard: + tls: + enabled: true + certResolver: letsencrypt + secretName: "" +``` + +### TLS with existing Secret + +```yaml +pangolin: + ingressRoute: + dashboard: + tls: + enabled: true + certResolver: "" + secretName: pangolin-dashboard-tls +``` + + +`tls.certResolver` and `tls.secretName` are mutually exclusive. Use one or the other. + + +### Multi-Traefik setups + +Use labels to target a specific Traefik CRD provider when multiple Traefik instances watch different label selectors: + +```yaml +pangolin: + ingressRoute: + dashboard: + traefikSelectorLabels: + traefik-instance: public +``` + +You can also set an ingress class annotation: + +```yaml +pangolin: + ingressRoute: + dashboard: + ingressClassName: traefik-public +``` + + + + + +Gerbil manages WireGuard tunnel connectivity for Pangolin. + +```yaml +gerbil: + enabled: true + startupMode: normal + ports: + wg1: 51820 + wg2: 21820 + internalApi: 3004 + service: + enabled: true + type: ClusterIP + persistence: + enabled: true + size: 1Gi +``` + +Important settings: + +| Setting | Description | +| ---------------------------- | ----------------------------------------------------------------------------------------- | +| `gerbil.enabled` | Enables the Gerbil component. | +| `gerbil.startupMode` | Controls first-run and normal startup behavior. | +| `gerbil.ports.wg1` | First WireGuard UDP port. Keep aligned with `pangolin.config.gerbil.start_port`. | +| `gerbil.ports.wg2` | Second WireGuard UDP port. Keep aligned with `pangolin.config.gerbil.clients_start_port`. | +| `gerbil.ports.internalApi` | Internal Gerbil API/listener port. | +| `gerbil.service.enabled` | Creates a Service for Gerbil UDP traffic. | +| `gerbil.persistence.enabled` | Persists Gerbil key/config data. Recommended for production. | + +### Startup mode + +```yaml +gerbil: + startupMode: delayed +``` + +| Mode | Behavior | +| -------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| `normal` | Starts Gerbil immediately. Use after Pangolin setup is complete. | +| `delayed` | Renders Gerbil resources but keeps the Deployment at `replicas: 0` in multi mode. Useful for first installs and smoke tests. | +| `disabledUntilSetup` | Does not render Gerbil resources until switched back to `normal` or `delayed`. | + +For first installs, `delayed` can help when Gerbil would otherwise fail before the initial Pangolin setup is complete. + +Switch back after setup: + +```bash +helm upgrade pangolin fossorial/pangolin \ + --namespace pangolin \ + --reuse-values \ + --set gerbil.startupMode=normal +``` + +### Security + +Gerbil requires `NET_ADMIN`. + +```yaml +gerbil: + securityContext: + runAsNonRoot: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + capabilities: + add: + - NET_ADMIN + drop: + - ALL +``` + + +Do not remove `NET_ADMIN` from Gerbil. Without it, Gerbil cannot create or manage WireGuard interfaces. `SYS_MODULE` is not added by default and should only be added when your node kernel requires module loading from inside the container. + + + + + + +NetworkPolicy rendering is enabled by default. + + +The chart-managed NetworkPolicies are intended to allow required Pangolin, Gerbil, database, DNS, and controller traffic for standard deployments. + + +```yaml +networkPolicy: + enabled: true + allowExternalIngress: true + allowExternalEgressHttps: false + dns: + enabled: true + database: + enabled: true + port: 5432 + controller: + egress: + enabled: true + kubernetesApi: + enabled: true + cidr: "" + port: 443 + metrics: + enabled: false + gerbil: + allowWireguardUdpEgress: true + wireguardUdpCIDRs: + - 0.0.0.0/0 +``` + +Important defaults: + +| Setting | Default | Notes | +| ------------------------------------------------------- | ------- | -------------------------------------------------------------------------------- | +| `networkPolicy.enabled` | `true` | Renders NetworkPolicy resources. | +| `networkPolicy.allowExternalIngress` | `true` | Allows public ingress to exposed services controlled by the chart. | +| `networkPolicy.allowExternalEgressHttps` | `false` | Broad HTTPS egress is not allowed by default. Prefer scoped `extraEgress` rules. | +| `networkPolicy.dns.enabled` | `true` | Allows DNS egress. | +| `networkPolicy.database.enabled` | `true` | Adds database egress rules for Pangolin. | +| `networkPolicy.controller.egress.kubernetesApi.enabled` | `true` | Allows controller API-server access when configured. | +| `networkPolicy.gerbil.allowWireguardUdpEgress` | `true` | Allows Gerbil UDP egress for WireGuard peer traffic. | + +When tightening policies, verify these paths: + +* DNS egress +* Pangolin to database +* controller to Kubernetes API +* ingress controller to Pangolin service +* Gerbil UDP traffic +* outbound access for SMTP, OIDC, webhooks, or other external integrations + +Use component-scoped rules where possible: + +```yaml +networkPolicy: + pangolin: + extraEgress: [] + controller: + extraEgress: [] + gerbil: + extraEgress: [] +``` + + +If you disable or replace chart-managed NetworkPolicies, ensure your custom policies still allow all required traffic paths. + + + + + + +The chart has chart-level monitoring settings for Pangolin and controller-specific monitoring settings for `pangolin-kube-controller`. + +### Pangolin monitoring + +```yaml +monitoring: + enabled: false + service: + enabled: false + type: ClusterIP + port: 9090 + portName: metrics + metrics: + targetPortName: metrics + targetPort: 9090 + path: /metrics +``` + +### Controller monitoring + +```yaml +controller: + service: + enabled: true + port: 9090 + portName: metrics + monitoring: + serviceMonitor: + enabled: false + podMonitor: + enabled: false + prometheusRule: + enabled: false +``` + +Enable controller ServiceMonitor when Prometheus Operator is available: + +```yaml +controller: + monitoring: + serviceMonitor: + enabled: true +``` + +Enable chart-level metrics Service when the Pangolin app exposes metrics in your selected configuration: + +```yaml +monitoring: + enabled: true + service: + enabled: true +``` + + +Only enable ServiceMonitor, PodMonitor, or PrometheusRule resources when the matching CRDs are installed in the cluster. + + + + + + +The chart uses separate ServiceAccounts for Pangolin, Gerbil, and the controller in multi mode. + +```yaml +serviceAccount: + pangolin: + create: true + automountServiceAccountToken: false + gerbil: + create: true + automountServiceAccountToken: false + controller: + create: true + automountServiceAccountToken: true + +rbac: + create: true +``` + +Default behavior: + +| Component | API token mounted by default | Reason | +| ---------- | ---------------------------- | ----------------------------------------------------------------------- | +| Pangolin | No | The app does not need Kubernetes API access. | +| Gerbil | No | Gerbil manages WireGuard and does not need Kubernetes API access. | +| Controller | Yes | The controller reconciles Traefik CRDs and needs Kubernetes API access. | + + +In `deployment.mode=single` with `deployment.type=controller`, Kubernetes ServiceAccount selection is Pod-level. The shared Pod uses the controller ServiceAccount and token. + + + + + + +Global scheduling defaults: + +```yaml +global: + storageClass: "" + image: + registry: docker.io + imagePullPolicy: IfNotPresent + imagePullSecrets: [] + nodeSelector: {} + tolerations: [] + affinity: {} + topologySpreadConstraints: [] + priorityClassName: "" +``` + +Resource rendering policy: + +```yaml +resourcesPolicy: + cpuLimits: + enabled: true + ephemeralStorage: + enabled: false +``` + + +CPU limits can cause throttling even when spare CPU exists on the node. For most deployments, start with CPU requests and memory limits, then add CPU limits only when explicitly required. + + +Pangolin resources: + +```yaml +pangolin: + resources: + requests: + cpu: 200m + memory: 256Mi + ephemeral-storage: 32Mi + limits: + cpu: 1000m + memory: 1Gi + ephemeral-storage: 256Mi +``` + +Gerbil resources: + +```yaml +gerbil: + resources: + requests: + cpu: 100m + memory: 128Mi + ephemeral-storage: 16Mi + limits: + cpu: 500m + memory: 512Mi + ephemeral-storage: 128Mi +``` + +Controller resources: + +```yaml +controller: + resources: + requests: + cpu: 100m + memory: 128Mi + ephemeral-storage: 16Mi + limits: + cpu: 500m + memory: 512Mi + ephemeral-storage: 128Mi +``` + +Image configuration: + +```yaml +images: + pangolin: + registry: docker.io + repository: fosrl/pangolin + tag: "" + digest: "" + pangolinPostgresql: + registry: docker.io + repository: fosrl/pangolin + tag: "" + digest: "" + gerbil: + registry: docker.io + repository: fosrl/gerbil + tag: "1.3.1" + digest: "" + controller: + registry: ghcr.io + repository: fosrl/pangolin-kube-controller + tag: "0.1.0-alpha.1" + digest: "" + traefik: + registry: docker.io + repository: traefik + tag: v3.6.15 + digest: "" +``` + +The chart automatically selects the PostgreSQL-capable Pangolin image variant for non-SQLite database modes unless you override the Pangolin tag or digest. + + +Ephemeral-storage requests and limits are only rendered when `resourcesPolicy.ephemeralStorage.enabled=true`. + + + + + + +Standalone Traefik is used mainly when `deployment.type=standalone`. + +```yaml +traefik: + enabled: false + service: + enabled: true + type: LoadBalancer + config: + dashboard: false + httpEntrypoint: web + httpsEntrypoint: websecure + certResolver: letsencrypt + letsencryptEmail: "" + persistence: + enabled: false +``` + +Important notes: + +- `traefik.enabled=true` runs an internal Traefik workload managed by this chart. +- `traefik.config.letsencryptEmail` is required when standalone Traefik is enabled. +- If you enable the Traefik dashboard, enable `traefik.persistence.enabled` so ACME state survives restarts. +- In controller mode, prefer using an existing or bundled Traefik controller instead of standalone Traefik. + + + + + +The chart can store Pangolin Blueprint YAML files as Kubernetes ConfigMaps and Secrets. + +```yaml +pangolin: + blueprints: + enabled: false + configMap: + create: true + files: {} + environmentSecret: + create: true + existingConfigMap: "" + existingEnvironmentSecret: "" +``` + +Example: + +```yaml +pangolin: + blueprints: + enabled: true + configMap: + create: true + files: + site-blueprint.yaml: | + sites: + my-site: + name: My Site + public-resources: + web-app: + name: Web Application + protocol: http + full-domain: "app.example.com" + targets: + - site: my-site + hostname: app + port: 8080 + method: http +``` + +Sensitive blueprint environment values should come from a Secret: + +```yaml +pangolin: + blueprints: + enabled: true + existingConfigMap: my-blueprint-configmap + existingEnvironmentSecret: my-blueprint-env +``` + + +The Pangolin server does not apply Blueprint files directly. Blueprints are applied by Newt through the Pangolin API using `--blueprint-file` or `--provisioning-blueprint-file`. + + + + + + +## Configuration by install method + +### Helm + +Use a values file: + +```bash +helm upgrade --install pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values-pangolin.yaml +``` + +Use inline values only for small tests: + +```bash +helm upgrade --install pangolin fossorial/pangolin \ + --namespace pangolin \ + --set deployment.type=controller \ + --set deployment.mode=multi \ + --set database.mode=cloudnativepg \ + --set pangolin.config.app.dashboard_url=https://pangolin.example.com +``` + +See [Pangolin Helm](/self-host/manual/kubernetes/pangolin/helm) for the installation flow. + +### Kustomize + +Render the chart with Helm, then apply Kustomize overlays: + +```bash +helm template pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values-pangolin.yaml \ + > base/pangolin.yaml +``` + +Apply the overlay: + +```bash +kubectl apply -k overlays/prod +``` + +See [Pangolin Kustomize](/self-host/manual/kubernetes/pangolin/kustomize) for the Kustomize workflow. + +### GitOps + +Store Helm values or Kustomize overlays in Git. Argo CD or Flux reconciles the desired state. + +Argo CD Helm example: + +```yaml +spec: + source: + helm: + values: | + deployment: + type: controller + mode: multi + database: + mode: cloudnativepg +``` + +Flux HelmRelease example: + +```yaml +spec: + values: + deployment: + type: controller + mode: multi + database: + mode: cloudnativepg +``` + +See [GitOps](/self-host/manual/kubernetes/gitops/overview) for GitOps guidance. + +## Production checklist + +Before deploying to production: + +* [ ] Use `deployment.type=controller`, unless you have a specific reason to use standalone mode. +* [ ] Use `deployment.mode=multi`. +* [ ] Use `database.mode=cloudnativepg` or `database.mode=external`. +* [ ] Avoid SQLite for production. +* [ ] Configure a real `pangolin.config.app.dashboard_url`. +* [ ] Replace the default `example.com` domain entry. +* [ ] Configure `pangolin.config.gerbil.base_endpoint`. +* [ ] Keep `pangolin.config.gerbil.start_port` aligned with `gerbil.ports.wg1`. +* [ ] Keep `pangolin.config.gerbil.clients_start_port` aligned with `gerbil.ports.wg2`. +* [ ] Configure TLS with either a Traefik cert resolver or an existing TLS Secret. +* [ ] Create or label the namespace so Gerbil can use `NET_ADMIN`. +* [ ] Keep Gerbil persistence enabled. +* [ ] Store the Pangolin app secret in a Kubernetes Secret. +* [ ] Use an existing database connection Secret for external PostgreSQL. +* [ ] Review NetworkPolicy egress requirements. +* [ ] Avoid broad `0.0.0.0/0` egress unless required. +* [ ] Enable monitoring resources only when the required CRDs exist. +* [ ] Set resource requests and limits based on expected traffic. +* [ ] Define a database backup strategy. +* [ ] Test upgrades in a staging environment before production. + +## Next steps + + + + Install Pangolin with Helm. + + + Install Pangolin with rendered manifests and Kustomize overlays. + + + Debug Pangolin deployments on Kubernetes. + + + Deploy Pangolin with Argo CD or Flux. + + diff --git a/self-host/manual/kubernetes/pangolin/helm.mdx b/self-host/manual/kubernetes/pangolin/helm.mdx new file mode 100644 index 0000000..4e74445 --- /dev/null +++ b/self-host/manual/kubernetes/pangolin/helm.mdx @@ -0,0 +1,408 @@ +--- +title: "Pangolin Helm" +description: "Quick-start guide for installing Pangolin on Kubernetes using Helm." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + + +The Pangolin Helm chart is currently alpha (`0.1.0-alpha.0`). Test installs and upgrades in a non-production environment before using the chart for production traffic. + + +## What Pangolin deploys + +The Pangolin Helm chart deploys the Pangolin control plane and related Kubernetes components. + +Depending on the selected values, the chart can deploy: + +- **Pangolin application**: dashboard, API, authentication, configuration, and application state. +- **pangolin-kube-controller**: Kubernetes controller used in controller mode. +- **Gerbil**: WireGuard tunnel manager used by the Pangolin tunnel stack. +- **Traefik integration**: Traefik CRD-based routing in controller mode, bundled Traefik controller when enabled, or standalone Traefik mode. +- **Database backend**: CloudNativePG, external PostgreSQL, embedded PostgreSQL, or SQLite. + +## Version matrix + +| Item | Value | +| --- | --- | +| Chart version | `0.1.0-alpha.0` | +| Kubernetes version | `>=1.30.14-0` | +| Pangolin appVersion | `1.18.2` | +| Pangolin default image tag | `1.18.2` | +| Pangolin PostgreSQL image tag | `postgresql-1.18.2` | +| pangolin-kube-controller tag | `0.1.0-alpha.1` | +| Gerbil tag | `1.3.1` | +| Traefik tag | `v3.6.15` | + +The current chart metadata defines chart version `0.1.0-alpha.0`, app version `1.18.2`, Kubernetes `>=1.30.14-0`, and the component image metadata listed above. :contentReference[oaicite:0]{index=0} + +## Prerequisites + +Before installing Pangolin, you need: + +- Kubernetes `1.30.14` or newer. +- Helm 3.x. +- `kubectl` access to the target cluster. +- A namespace prepared for the install. +- A StorageClass if you use chart-managed persistent storage. +- DNS records for the Pangolin dashboard and tunnel endpoint. +- Traefik CRDs and a Traefik controller when using `deployment.type=controller`. +- A database plan: CloudNativePG, external PostgreSQL, embedded PostgreSQL, or SQLite. + +See [Prerequisites](/self-host/manual/kubernetes/prerequisites) for detailed cluster, namespace, storage, networking, and security requirements. + +## Recommended quick install + +This quick install uses: + +- `deployment.type=controller` +- `deployment.mode=multi` +- `database.mode=cloudnativepg` +- chart-managed CloudNativePG operator and cluster +- chart-managed dashboard `IngressRoute` +- Traefik cert resolver for TLS + + +This example assumes a Traefik controller is available and can process the chart-managed `IngressRoute`. If you want the chart to install the bundled Traefik controller, set `deployment.installTraefikController=true`. + + +### Step 1: Create the namespace + +Create the namespace before installing the chart: + +```bash +kubectl create namespace pangolin +``` + +Gerbil requires `NET_ADMIN` for WireGuard interface management. If your cluster enforces Pod Security Admission, label the namespace accordingly: + +```bash +kubectl label namespace pangolin \ + pod-security.kubernetes.io/enforce=privileged \ + pod-security.kubernetes.io/warn=baseline \ + pod-security.kubernetes.io/audit=restricted \ + --overwrite +``` + + +Do not use a restricted Pod Security profile for a namespace running Gerbil unless you have validated the selected chart mode. Gerbil requires `NET_ADMIN` for WireGuard. + + +### Step 2: Create a Pangolin app secret + +Create a Secret for `SERVER_SECRET`: + +```bash +kubectl create secret generic pangolin-app-secret \ + --namespace pangolin \ + --from-literal=SERVER_SECRET='' +``` + +Use a long random value. Do not commit this secret to Git. + +### Step 3: Create a values file + +Create `values-pangolin.yaml`: + +```yaml +deployment: + type: controller + mode: multi + installTraefikController: false + +database: + mode: cloudnativepg + cloudnativepg: + cluster: + name: pangolin-db + +cnpg-operator: + enabled: true + +cnpg-cluster: + enabled: true + fullnameOverride: pangolin-db + cluster: + instances: 1 + storage: + size: 8Gi + +pangolin: + secret: + existingSecretName: pangolin-app-secret + existingSecretKey: SERVER_SECRET + + config: + app: + dashboard_url: https://pangolin.example.com + domains: + domain1: + base_domain: example.com + cert_resolver: letsencrypt + gerbil: + base_endpoint: vpn.example.com + start_port: 51820 + clients_start_port: 21820 + traefik: + enabled: true + http_entrypoint: web + https_entrypoint: websecure + cert_resolver: letsencrypt + + ingressRoute: + dashboard: + enabled: true + host: pangolin.example.com + entryPoints: + - websecure + tls: + enabled: true + certResolver: letsencrypt + secretName: "" + +gerbil: + enabled: true + startupMode: delayed + persistence: + enabled: true + size: 1Gi +``` + +Important points: + +* Replace `pangolin.example.com`, `example.com`, and `vpn.example.com`. +* Keep `pangolin.config.gerbil.start_port` aligned with `gerbil.ports.wg1`. +* Keep `pangolin.config.gerbil.clients_start_port` aligned with `gerbil.ports.wg2`. +* Use `gerbil.startupMode=delayed` for the first install if Gerbil should not start before the initial Pangolin setup is complete. + +The chart defaults to `deployment.type=controller`, `deployment.mode=multi`, `database.mode=cloudnativepg`, and NetworkPolicy rendering enabled. Gerbil `startupMode` supports `normal`, `delayed`, and `disabledUntilSetup`. ([GitHub][1]) + +### Step 4: Install Pangolin + +Add the Helm repository: + +```bash +helm repo add fossorial https://charts.fossorial.io +helm repo update fossorial +``` + +Install Pangolin: + +```bash +helm upgrade --install pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values-pangolin.yaml +``` + +Do not use `--create-namespace` here. The namespace was created and labeled before installation. + +### Step 5: Verify the deployment + +Check Helm release status: + +```bash +helm status pangolin --namespace pangolin +helm history pangolin --namespace pangolin +``` + +Check workloads: + +```bash +kubectl get pods --namespace pangolin +kubectl get deploy,statefulset --namespace pangolin +``` + +Check Services: + +```bash +kubectl get svc --namespace pangolin +``` + +Check Traefik `IngressRoute` resources: + +```bash +kubectl get ingressroute --namespace pangolin +``` + +If Traefik CRDs are not installed, this command will fail. In that case, install Traefik CRDs or enable/install the Traefik controller path required by your selected deployment mode. + +Wait for the Pangolin pod to become ready: + +```bash +kubectl wait --for=condition=ready pod \ + -l app.kubernetes.io/name=pangolin \ + --namespace pangolin \ + --timeout=300s +``` + +## Accessing the dashboard + +After DNS and Traefik routing are configured, access Pangolin through the dashboard URL: + +```text +https://pangolin.example.com +``` + +The API route is exposed under: + +```text +https://pangolin.example.com/api/v1 +``` + + +For a temporary local check, port-forward the dashboard/UI port: + +```bash +kubectl port-forward --namespace pangolin svc/pangolin 8080:3002 +``` + +Then open: + +```text +http://localhost:8080 +``` + + + +The chart routes `/api/v1` to the Pangolin external/API port and the dashboard route to the Next/UI port. The default service ports are `3000` for external/API and `3002` for the dashboard/UI. ([GitHub][1]) + +## Switch Gerbil to normal startup + +If you installed with `gerbil.startupMode=delayed`, switch Gerbil to normal mode after the initial setup is complete: + +```bash +helm upgrade pangolin fossorial/pangolin \ + --namespace pangolin \ + --reuse-values \ + --set gerbil.startupMode=normal +``` + +Check Gerbil resources: + +```bash +kubectl get pods,svc,pvc --namespace pangolin \ + -l app.kubernetes.io/name=gerbil +``` + +## Upgrade + +Update the Helm repository: + +```bash +helm repo update fossorial +``` + +Upgrade the release: + +```bash +helm upgrade pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values-pangolin.yaml +``` + +Check upgrade status: + +```bash +helm status pangolin --namespace pangolin +helm history pangolin --namespace pangolin +``` + +Rollback if needed: + +```bash +helm rollback pangolin --namespace pangolin +``` + +## OCI install + +The Pangolin chart is also published as an OCI chart in GHCR. + +Pull the chart: + +```bash +helm pull oci://ghcr.io/fosrl/helm-charts/pangolin \ + --version 0.1.0-alpha.0 +``` + +Install from OCI: + +```bash +helm upgrade --install pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \ + --version 0.1.0-alpha.0 \ + --namespace pangolin \ + --values values-pangolin.yaml +``` + +OCI changes where Helm pulls the chart from. It does not change the values file or the release behavior. + +## Architecture overview + +Recommended deployment mode: + +```yaml +deployment: + type: controller + mode: multi +``` + +In this topology: + +| Component | Role | +| -------------------------- | -------------------------------------------------------------------- | +| Pangolin | Main application, dashboard, API, authentication, and configuration. | +| pangolin-kube-controller | Reconciles dynamic Kubernetes and Traefik CRD configuration. | +| Gerbil | WireGuard tunnel manager for Pangolin sites. | +| Traefik | Routes dashboard, API, and site traffic. | +| CloudNativePG / PostgreSQL | Stores Pangolin application state. | + +Database modes: + +| Mode | Use case | +| --------------- | --------------------------------------------------- | +| `cloudnativepg` | Recommended Kubernetes production path. | +| `external` | Production path with externally managed PostgreSQL. | +| `embedded` | Lab or test setups. | +| `sqlite` | Development or CI only. | + +The chart supports `cloudnativepg`, `external`, `embedded`, and `sqlite` database modes. The chart comments mark `cloudnativepg` as the preferred production mode and SQLite as development/test only. ([GitHub][1]) + +## Chart signing + +The chart metadata includes Artifact Hub signing information: + +```text +Fingerprint: 48E7F670FCC13645FC48B08D587294B228C2EC2C +Public key: https://charts.fossorial.io/pgp_keys.asc +``` + +Use this metadata when verifying signed chart releases. The signing key and fingerprint are published in the chart annotations. ([GitHub][2]) + +## References + + + + + + + + + +## Next steps + + + + Review Pangolin chart options. + + + Debug Pangolin deployment and routing issues. + + + Install Pangolin with rendered manifests and Kustomize overlays. + + + Deploy Pangolin with Argo CD or Flux. + + diff --git a/self-host/manual/kubernetes/pangolin/kustomize.mdx b/self-host/manual/kubernetes/pangolin/kustomize.mdx new file mode 100644 index 0000000..c9f1bf5 --- /dev/null +++ b/self-host/manual/kubernetes/pangolin/kustomize.mdx @@ -0,0 +1,708 @@ +--- +title: "Pangolin Kustomize" +description: "Deploy Pangolin on Kubernetes using Helm-rendered manifests and Kustomize overlays." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +Use Kustomize when you want to manage Pangolin with rendered manifests, environment-specific overlays, and explicit patches in Git. + +For Pangolin, the supported Kustomize workflow is: + +1. Render the Pangolin Helm chart to manifests. +2. Use the rendered output as the Kustomize base. +3. Create overlays per environment. +4. Apply the overlay with `kubectl apply -k` or reconcile it with Argo CD or Flux. + +## When to use Kustomize for Pangolin + +Use Kustomize if you: + +- want environment-specific overlays for dev, staging, or production +- need explicit patches committed to Git +- prefer reviewing rendered Kubernetes manifests before applying them +- use Argo CD or Flux with Kustomize sources +- want to customize Helm-rendered output without forking the chart + +For a simpler single-environment setup, use [Pangolin Helm](/self-host/manual/kubernetes/pangolin/helm). + +## Version context + +This page is aligned with the Pangolin Helm chart `0.1.0-alpha.0`. + +| Item | Value | +| --- | --- | +| Chart version | `0.1.0-alpha.0` | +| Pangolin app version | `1.18.2` | +| Kubernetes version | `>=1.30.14-0` | +| Gerbil image tag | `1.3.1` | +| pangolin-kube-controller image tag | `0.1.0-alpha.1` | +| Traefik image tag | `v3.6.15` | + +## Supported approach + +The Pangolin chart does not provide native Kustomize bases. Render the Helm chart first, then use Kustomize on the rendered manifests. + + +Do not manage the same Pangolin resources with both a live Helm release and Kustomize. Pick one ownership model per environment. + + +Recommended ownership model: + +- Use Helm only to render the Pangolin chart. +- Use Kustomize, Argo CD, or Flux to apply and reconcile the rendered manifests. +- Re-render the base when upgrading the chart or changing Helm values. + +## Example directory structure + +```text +pangolin-deployment/ +├── base/ +│ ├── kustomization.yaml +│ └── pangolin.yaml +├── overlays/ +│ ├── dev/ +│ │ ├── kustomization.yaml +│ │ └── patches/ +│ │ └── pangolin-resources.patch.yaml +│ ├── staging/ +│ │ ├── kustomization.yaml +│ │ └── patches/ +│ │ └── pangolin-resources.patch.yaml +│ └── prod/ +│ ├── kustomization.yaml +│ └── patches/ +│ ├── pangolin-resources.patch.yaml +│ └── ingressroute-host.patch.yaml +└── values/ + ├── values-base.yaml + ├── values-dev.yaml + ├── values-staging.yaml + └── values-prod.yaml +``` + +## Step 1: Create the namespace + +Create the namespace before applying rendered manifests: + +```bash +kubectl create namespace pangolin +``` + +Gerbil requires `NET_ADMIN` for WireGuard interface management. If your cluster enforces Pod Security Admission, label the namespace before creating workloads: + +```bash +kubectl label namespace pangolin \ + pod-security.kubernetes.io/enforce=privileged \ + pod-security.kubernetes.io/warn=baseline \ + pod-security.kubernetes.io/audit=restricted \ + --overwrite +``` + + +Do not use a restricted Pod Security profile for a namespace running Gerbil unless you have validated the selected chart mode. Gerbil requires `NET_ADMIN`. + + +## Step 2: Create the Pangolin app Secret + +Create a Secret for `SERVER_SECRET`: + +```bash +kubectl create secret generic pangolin-app-secret \ + --namespace pangolin \ + --from-literal=SERVER_SECRET='' +``` + +Do not commit this Secret to Git. + +## Step 3: Create base values + +Create `values/values-base.yaml`: + +```yaml +deployment: + type: controller + mode: multi + installTraefikController: false + +database: + mode: cloudnativepg + cloudnativepg: + cluster: + name: pangolin-db + +cnpg-operator: + enabled: true + +cnpg-cluster: + enabled: true + fullnameOverride: pangolin-db + cluster: + instances: 1 + storage: + size: 8Gi + +pangolin: + secret: + existingSecretName: pangolin-app-secret + existingSecretKey: SERVER_SECRET + + config: + app: + dashboard_url: https://pangolin.example.com + domains: + domain1: + base_domain: example.com + cert_resolver: letsencrypt + gerbil: + base_endpoint: vpn.example.com + start_port: 51820 + clients_start_port: 21820 + traefik: + enabled: true + http_entrypoint: web + https_entrypoint: websecure + cert_resolver: letsencrypt + + ingressRoute: + dashboard: + enabled: true + host: pangolin.example.com + entryPoints: + - websecure + tls: + enabled: true + certResolver: letsencrypt + secretName: "" + +gerbil: + enabled: true + startupMode: delayed + persistence: + enabled: true + size: 1Gi +``` + +Replace: + +* `pangolin.example.com` +* `example.com` +* `vpn.example.com` +* TLS resolver names +* storage settings + + +Use `gerbil.startupMode=delayed` for the first install if Gerbil should not start before the initial Pangolin setup is complete. Switch it to `normal` after setup. + + +## Step 4: Render Pangolin to the base + +Add and update the Helm repository: + +```bash +helm repo add fossorial https://charts.fossorial.io +helm repo update fossorial +``` + +Create directories: + +```bash +mkdir -p base overlays/dev/patches overlays/staging/patches overlays/prod/patches values +``` + +Render the Pangolin chart: + +```bash +helm template pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values/values-base.yaml \ + > base/pangolin.yaml +``` + +You can also render from the GHCR OCI chart: + +```bash +helm template pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \ + --version 0.1.0-alpha.0 \ + --namespace pangolin \ + --values values/values-base.yaml \ + > base/pangolin.yaml +``` + +## Step 5: Create the base kustomization + +```yaml +# base/kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - pangolin.yaml +``` + + +The namespace is already rendered by Helm through `--namespace pangolin`. You can also set `namespace: pangolin` in Kustomize, but avoid changing namespaces in overlays unless you have verified all rendered resources and references. + + +## Step 6: Inspect rendered resource names + +Before writing patches, inspect the generated resource names: + +```bash +kustomize build base | grep -E "^(kind:| name:)" +``` + +Or list the main resource names with `yq`: + +```bash +kustomize build base | yq '. | select(.kind == "Deployment" or .kind == "StatefulSet" or .kind == "IngressRoute" or .kind == "Service") | .kind + " " + .metadata.name' +``` + + +Do not assume generated resource names. Helm names can change with the release name, chart name, `nameOverride`, or `fullnameOverride`. + + +Use the actual rendered names in your patch targets. + +## Step 7: Create a production overlay + +Example `overlays/prod/kustomization.yaml`: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - ../../base + +labels: + - pairs: + app.kubernetes.io/environment: production + app.kubernetes.io/managed-by: kustomize + +patches: + - path: patches/pangolin-resources.patch.yaml + target: + group: apps + version: v1 + kind: Deployment + name: pangolin + + - path: patches/ingressroute-host.patch.yaml + target: + group: traefik.io + version: v1alpha1 + kind: IngressRoute + name: pangolin-dashboard +``` + + +Replace `pangolin` and `pangolin-dashboard` with the actual names from your rendered manifests. + + +## Step 8: Add patches + +### Patch Pangolin resources + +```yaml +# overlays/prod/patches/pangolin-resources.patch.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pangolin +spec: + template: + spec: + containers: + - name: pangolin + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + memory: 1Gi +``` + + +CPU limits are rendered by default through the chart's `resourcesPolicy.cpuLimits.enabled=true`. If you disable CPU limits in chart values, keep your Kustomize patches consistent with that policy. + + +### Patch dashboard IngressRoute host + +The Pangolin chart uses Traefik `IngressRoute` for the dashboard and API in controller mode, not a standard Kubernetes `Ingress`. + +```yaml +# overlays/prod/patches/ingressroute-host.patch.yaml +apiVersion: traefik.io/v1alpha1 +kind: IngressRoute +metadata: + name: pangolin-dashboard +spec: + routes: + - kind: Rule + match: Host(`pangolin-prod.example.com`) && PathPrefix(`/api/v1`) + - kind: Rule + match: Host(`pangolin-prod.example.com`) +``` + + +Patch the rendered `IngressRoute` only after checking the route order and match rules. The API route and dashboard route target different service ports. + + +### Patch node affinity + +```yaml +# overlays/prod/patches/pangolin-node-affinity.patch.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: pangolin +spec: + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-type + operator: In + values: + - production +``` + +Reference it in `overlays/prod/kustomization.yaml`: + +```yaml +patches: + - path: patches/pangolin-node-affinity.patch.yaml + target: + group: apps + version: v1 + kind: Deployment + name: pangolin +``` + +### Patch Gerbil startup mode + +For first install, this should usually be handled in Helm values before rendering. If you still need to patch rendered manifests, inspect the generated Deployment first. + +To switch Gerbil from delayed to normal mode, prefer updating values and re-rendering: + +```yaml +gerbil: + startupMode: normal +``` + +Then re-render: + +```bash +helm template pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values/values-base.yaml \ + > base/pangolin.yaml +``` + +## Do not rename rendered Helm resources by default + +Avoid Kustomize options such as `namePrefix` and `nameSuffix` for Helm-rendered bases unless you have verified every generated reference. + +Renaming rendered resources can break: + +* Service selectors +* Secret references +* ConfigMap references +* ServiceAccount references +* NetworkPolicy selectors +* Traefik `IngressRoute` service references +* Prometheus monitor selectors +* CloudNativePG references + +If you need different resource names, prefer changing the Helm release name or chart naming values before rendering. + +## Apply the overlay + +Preview the rendered output: + +```bash +kustomize build overlays/prod +``` + +Compare with the live cluster: + +```bash +kustomize build overlays/prod | kubectl diff -f - +``` + +Apply the overlay: + +```bash +kubectl apply -k overlays/prod +``` + +Verify workloads: + +```bash +kubectl get pods --namespace pangolin +kubectl get deploy,statefulset --namespace pangolin +kubectl get svc --namespace pangolin +``` + +Verify Traefik resources: + +```bash +kubectl get ingressroute --namespace pangolin +``` + +Check events: + +```bash +kubectl get events --namespace pangolin --sort-by=.lastTimestamp +``` + +## Updating the rendered base + +When upgrading the Pangolin chart or changing Helm values, re-render the base and review the changes. + +Update the Helm repository: + +```bash +helm repo update fossorial +``` + +Render the updated chart output: + +```bash +helm template pangolin fossorial/pangolin \ + --namespace pangolin \ + --values values/values-base.yaml \ + > base/pangolin.yaml +``` + +Or with OCI: + +```bash +helm template pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \ + --version 0.1.0-alpha.0 \ + --namespace pangolin \ + --values values/values-base.yaml \ + > base/pangolin.yaml +``` + +Validate the overlay: + +```bash +kustomize build overlays/prod +``` + +Review the diff: + +```bash +git diff +kustomize build overlays/prod | kubectl diff -f - +``` + +Commit the updated base and overlays: + +```bash +git add base/ overlays/ values/ +git commit -m "Update Pangolin rendered manifests" +``` + +Apply after review: + +```bash +kubectl apply -k overlays/prod +``` + +## Ownership model + +Do not run `helm upgrade` against a release that is managed by Kustomize. + +Avoid this pattern: + +```bash +helm upgrade pangolin fossorial/pangolin --namespace pangolin +kubectl apply -k overlays/prod +``` + +Use one of these models instead: + +| Model | Description | +| ----------------- | ------------------------------------------------------------------------------------------ | +| Helm-managed | Helm installs and upgrades the live release. Kustomize is not used for the same resources. | +| Kustomize-managed | Helm renders manifests only. Kustomize applies and owns the live resources. | +| GitOps-managed | Argo CD or Flux applies the Kustomize overlay and owns reconciliation. | + +## Common Kustomize patches for Pangolin + +### Patch resource requests and limits + +```yaml +patches: + - path: patches/pangolin-resources.patch.yaml + target: + group: apps + version: v1 + kind: Deployment + name: pangolin +``` + +### Patch IngressRoute host + +```yaml +patches: + - path: patches/ingressroute-host.patch.yaml + target: + group: traefik.io + version: v1alpha1 + kind: IngressRoute + name: pangolin-dashboard +``` + +### Add annotations + +```yaml +patches: + - target: + group: apps + version: v1 + kind: Deployment + name: pangolin + patch: |- + - op: add + path: /metadata/annotations + value: + example.com/owner: platform +``` + +### Patch Gerbil Service type + +Patch the Gerbil Service only after checking the rendered Service name. + +```yaml +patches: + - target: + version: v1 + kind: Service + name: pangolin-gerbil + patch: |- + - op: replace + path: /spec/type + value: LoadBalancer +``` + + +For important topology settings such as database mode, Gerbil ports, `startupMode`, Traefik mode, and CloudNativePG settings, prefer changing Helm values and re-rendering instead of patching rendered YAML. + + +## Validation + +Validate Kustomize output: + +```bash +kustomize build overlays/prod +``` + +Run a server-side dry run: + +```bash +kustomize build overlays/prod | kubectl apply -f - --dry-run=server +``` + +Preview live changes: + +```bash +kustomize build overlays/prod | kubectl diff -f - +``` + +If a patch does not apply, inspect generated resource names: + +```bash +kustomize build base | grep -E "^(kind:| name:)" +``` + +## Kustomize with GitOps + +Kustomize overlays work well with GitOps tools. + +* [Argo CD](/self-host/manual/kubernetes/gitops/argocd) can reconcile a Kustomize overlay path directly. +* [Flux](/self-host/manual/kubernetes/gitops/flux) can reconcile a Kustomize overlay with a `Kustomization` resource. + +In GitOps workflows, the controller owns the apply operation. Do not also apply the same overlay manually unless you are debugging. + +## Troubleshooting + +### The patch does not apply + +Check the rendered resource name and kind: + +```bash +kustomize build base | grep -E "^(kind:| name:)" +``` + +Then verify the patch target in your overlay. + +### The pod does not start + +Check pod status and events: + +```bash +kubectl get pods --namespace pangolin +kubectl describe pod --namespace pangolin +kubectl get events --namespace pangolin --sort-by=.lastTimestamp +``` + +### Dashboard routing does not work + +Check the rendered and applied `IngressRoute`: + +```bash +kubectl get ingressroute --namespace pangolin +kubectl describe ingressroute --namespace pangolin +``` + +Verify: + +* Traefik CRDs are installed. +* A Traefik controller is watching the namespace and labels. +* `pangolin.ingressRoute.dashboard.host` or the patched host matches DNS. +* The API route still contains `PathPrefix(/api/v1)`. +* TLS settings match your Traefik setup. + +### Gerbil does not start + +Check Gerbil resources: + +```bash +kubectl get pods,svc,pvc --namespace pangolin \ + -l app.kubernetes.io/name=gerbil +``` + +Verify: + +* namespace allows `NET_ADMIN` +* `gerbil.startupMode` is set correctly +* Gerbil persistence is enabled or intentionally disabled +* `pangolin.config.gerbil.start_port` matches `gerbil.ports.wg1` +* `pangolin.config.gerbil.clients_start_port` matches `gerbil.ports.wg2` + +## Next steps + + + + Install Pangolin with Helm. + + + Review Pangolin chart options. + + + Debug Pangolin deployment and routing issues. + + + Deploy Pangolin with Argo CD or Flux. + + diff --git a/self-host/manual/kubernetes/pangolin/troubleshooting.mdx b/self-host/manual/kubernetes/pangolin/troubleshooting.mdx new file mode 100644 index 0000000..91eb0be --- /dev/null +++ b/self-host/manual/kubernetes/pangolin/troubleshooting.mdx @@ -0,0 +1,888 @@ +--- +title: "Pangolin Troubleshooting" +description: "Diagnose and resolve Pangolin Kubernetes deployment issues." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +This page covers troubleshooting Pangolin Kubernetes deployments across Helm, Kustomize, Argo CD, and Flux workflows. + +Start with the core checks, then use the section that matches the symptom. + +## Core diagnostics + +Set the namespace and release name used by your installation: + +```bash +export PANGOLIN_NAMESPACE=pangolin +export PANGOLIN_RELEASE=pangolin +``` + +### Helm diagnostics + +Check the release: + +```bash +helm status "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" +helm history "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" +helm get values "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" --all +``` + +Render the chart locally with your values file: + +```bash +helm repo update fossorial + +helm template "$PANGOLIN_RELEASE" fossorial/pangolin \ + --namespace "$PANGOLIN_NAMESPACE" \ + --values values-pangolin.yaml +``` + +Preview an upgrade: + +```bash +helm upgrade "$PANGOLIN_RELEASE" fossorial/pangolin \ + --namespace "$PANGOLIN_NAMESPACE" \ + --values values-pangolin.yaml \ + --dry-run +``` + + +`helm lint charts/pangolin` is only useful when you are working inside the Helm chart repository. For normal installs, use `helm template` and `helm upgrade --dry-run`. + + +### Kubernetes diagnostics + +Check workloads and events: + +```bash +kubectl get pods --namespace "$PANGOLIN_NAMESPACE" +kubectl get deploy,statefulset,job,cronjob --namespace "$PANGOLIN_NAMESPACE" +kubectl get events --namespace "$PANGOLIN_NAMESPACE" --sort-by=.lastTimestamp +``` + +Inspect a pod: + +```bash +kubectl describe pod --namespace "$PANGOLIN_NAMESPACE" +kubectl logs --namespace "$PANGOLIN_NAMESPACE" --all-containers --tail=200 +``` + +Check services, PVCs, and policies: + +```bash +kubectl get svc,pvc,secret,configmap --namespace "$PANGOLIN_NAMESPACE" +kubectl get networkpolicy --namespace "$PANGOLIN_NAMESPACE" +``` + +### Traefik diagnostics + +In controller mode, the chart uses Traefik CRDs such as `IngressRoute`. + +Check whether Traefik CRDs are installed: + +```bash +kubectl get crd | grep traefik +``` + +Check rendered or applied Traefik resources: + +```bash +kubectl get ingressroute --namespace "$PANGOLIN_NAMESPACE" +kubectl describe ingressroute --namespace "$PANGOLIN_NAMESPACE" +``` + +Depending on your Traefik setup, also check: + +```bash +kubectl get middleware,tlsoption,traefikservice --namespace "$PANGOLIN_NAMESPACE" +``` + + +`kubectl get ingress` is only useful if your selected deployment mode renders standard Kubernetes Ingress resources. In controller mode, use `IngressRoute`. + + +### Database diagnostics + +If you use CloudNativePG, first check that the CRD exists: + +```bash +kubectl get crd | grep postgresql.cnpg.io +``` + +Then check CNPG resources: + +```bash +kubectl get cluster --namespace "$PANGOLIN_NAMESPACE" +kubectl describe cluster --namespace "$PANGOLIN_NAMESPACE" +kubectl get pods --namespace "$PANGOLIN_NAMESPACE" | grep -E 'pangolin-db|postgres' +kubectl get secret --namespace "$PANGOLIN_NAMESPACE" | grep -E 'pangolin-db|postgres' +``` + +If you use external PostgreSQL, verify the connection Secret: + +```bash +kubectl get secret --namespace "$PANGOLIN_NAMESPACE" +kubectl describe secret --namespace "$PANGOLIN_NAMESPACE" +``` + +Do not decode and paste database credentials into logs, screenshots, or issue reports. + +## Common issues and solutions + + + + + +**Symptoms** + +* Gerbil pod crashes during a fresh install. +* Logs mention missing setup data, missing exit node, or tunnel configuration not being ready. +* Pangolin itself is not initialized yet. + +**Cause** + +On first install, Gerbil may start before Pangolin has completed the initial setup. The chart supports `gerbil.startupMode` for this case. + +**Resolution** + +Use delayed startup for the first install: + +```yaml +gerbil: + startupMode: delayed +``` + +Install or upgrade with the values file: + +```bash +helm upgrade --install "$PANGOLIN_RELEASE" fossorial/pangolin \ + --namespace "$PANGOLIN_NAMESPACE" \ + --values values-pangolin.yaml +``` + +After Pangolin setup is complete, switch Gerbil to normal startup: + +```bash +helm upgrade "$PANGOLIN_RELEASE" fossorial/pangolin \ + --namespace "$PANGOLIN_NAMESPACE" \ + --reuse-values \ + --set gerbil.startupMode=normal +``` + +Check Gerbil resources: + +```bash +kubectl get pods,svc,pvc --namespace "$PANGOLIN_NAMESPACE" \ + -l app.kubernetes.io/name=gerbil +``` + + + + + +**Symptoms** + +* Gerbil pod does not start. +* Events mention Pod Security Admission. +* Events mention forbidden capabilities. +* Logs or events mention `NET_ADMIN`. + +**Cause** + +Gerbil requires the `NET_ADMIN` Linux capability for WireGuard interface management. A namespace using a restricted Pod Security profile can block this. + +**Resolution** + +Check namespace labels: + +```bash +kubectl get namespace "$PANGOLIN_NAMESPACE" --show-labels +``` + +For a namespace running Gerbil, use a policy profile that allows the required capability. Example: + +```bash +kubectl label namespace "$PANGOLIN_NAMESPACE" \ + pod-security.kubernetes.io/enforce=privileged \ + pod-security.kubernetes.io/warn=baseline \ + pod-security.kubernetes.io/audit=restricted \ + --overwrite +``` + +Then restart the affected pods: + +```bash +kubectl rollout restart deploy --namespace "$PANGOLIN_NAMESPACE" +``` + + +Do not use a restricted Pod Security profile for Gerbil unless you have validated the selected chart mode and security context. Removing `NET_ADMIN` breaks WireGuard management. + + + + + + +**Symptoms** + +* The dashboard URL does not load. +* Browser shows timeout, bad gateway, 404, or TLS error. +* API path `/api/v1` fails while the dashboard path works, or the reverse. + +**Common causes** + +* DNS points to the wrong load balancer or ingress endpoint. +* Traefik CRDs are missing. +* Traefik controller is not watching the namespace or selector labels. +* `IngressRoute` host does not match the dashboard URL. +* API route was changed and no longer matches `PathPrefix(/api/v1)`. +* TLS resolver or TLS Secret is misconfigured. + +**Checks** + +Check DNS: + +```bash +nslookup pangolin.example.com +``` + +Check Traefik CRDs: + +```bash +kubectl get crd | grep traefik +``` + +Check IngressRoute resources: + +```bash +kubectl get ingressroute --namespace "$PANGOLIN_NAMESPACE" +kubectl describe ingressroute --namespace "$PANGOLIN_NAMESPACE" +``` + +Check the rendered values: + +```bash +helm get values "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" --all | grep -A30 ingressRoute +``` + +Check Traefik logs. Adjust the namespace and label selector to your Traefik installation: + +```bash +kubectl logs --namespace traefik -l app.kubernetes.io/name=traefik --tail=100 +``` + +Temporary local check for the dashboard/UI service port: + +```bash +kubectl port-forward --namespace "$PANGOLIN_NAMESPACE" svc/pangolin 8080:3002 +``` + +Then open: + +```text +http://localhost:8080 +``` + + +The dashboard/UI port is `3002`. The API/external port is `3000`. Port-forward `3002` when checking the dashboard locally. + + + + + + +**Symptoms** + +* `IngressRoute` is created but TLS does not work. +* Traefik logs mention TLS configuration problems. +* Certificate is not issued or the TLS Secret is not found. + +**Cause** + +The dashboard `IngressRoute` TLS configuration should use either a Traefik certificate resolver or an existing TLS Secret. + +**Resolution** + +Use Traefik ACME certificate resolver: + +```yaml +pangolin: + ingressRoute: + dashboard: + tls: + enabled: true + certResolver: letsencrypt + secretName: "" +``` + +Or use an existing TLS Secret: + +```yaml +pangolin: + ingressRoute: + dashboard: + tls: + enabled: true + certResolver: "" + secretName: pangolin-dashboard-tls +``` + +Verify the Secret if using `secretName`: + +```bash +kubectl get secret pangolin-dashboard-tls --namespace "$PANGOLIN_NAMESPACE" +``` + + +`certResolver` is a Traefik ACME resolver setting. It is not a cert-manager issuer reference. + + + + + + +**Symptoms** + +* Newt shows repeated connection or tunnel errors. +* Tunnel traffic does not pass. +* WireGuard UDP ports are unreachable from the Newt location. + +**Common causes** + +* `pangolin.config.gerbil.base_endpoint` points to the wrong host. +* Gerbil Service is not exposed as expected. +* External firewall blocks UDP traffic. +* NetworkPolicy blocks the required traffic. +* `pangolin.config.gerbil.start_port` and `gerbil.ports.wg1` are not aligned. +* `pangolin.config.gerbil.clients_start_port` and `gerbil.ports.wg2` are not aligned. + +**Checks** + +Check Gerbil Service: + +```bash +kubectl get svc --namespace "$PANGOLIN_NAMESPACE" \ + -l app.kubernetes.io/name=gerbil + +kubectl describe svc --namespace "$PANGOLIN_NAMESPACE" +``` + +Check Gerbil values: + +```bash +helm get values "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" --all | grep -A30 gerbil +``` + +Check NetworkPolicies: + +```bash +kubectl get networkpolicy --namespace "$PANGOLIN_NAMESPACE" +kubectl describe networkpolicy --namespace "$PANGOLIN_NAMESPACE" +``` + +Verify external firewall rules for the configured UDP ports. + + + + + +**Symptoms** + +* Pangolin pod crashes. +* Logs mention database connection errors. +* Events mention missing Secret or missing Secret key. + +**Cause** + +`database.mode=external` needs a valid database connection Secret unless the chart is configured to generate one from values. + +**Resolution** + +Create a connection Secret: + +```bash +kubectl create secret generic pangolin-db-connection \ + --namespace "$PANGOLIN_NAMESPACE" \ + --from-literal=connectionString='postgresql://pangolin:password@postgres.example.com:5432/pangolin?sslmode=require' +``` + +Reference it in values: + +```yaml +database: + mode: external + connection: + existingSecretName: pangolin-db-connection + existingSecretKey: connectionString +``` + +Check the Secret: + +```bash +kubectl describe secret pangolin-db-connection --namespace "$PANGOLIN_NAMESPACE" +``` + + +Do not put database passwords directly in values files for production. Use an existing Secret or your normal secret-management workflow. + + + + + + +**Symptoms** + +* CNPG Cluster resource is missing. +* CNPG pods do not start. +* Pangolin cannot connect to the generated CNPG database. +* Secret such as `pangolin-db-app` is missing. + +**Common causes** + +* CloudNativePG CRDs/operator are not installed. +* `cnpg-cluster.enabled` is false when you expected the chart to create a cluster. +* `cnpg-operator.enabled` is false and no operator exists. +* `database.cloudnativepg.cluster.name` does not match the CNPG cluster name. +* StorageClass or PVC provisioning fails. + +**Checks** + +Check CRDs: + +```bash +kubectl get crd | grep postgresql.cnpg.io +``` + +Check CNPG operator pods: + +```bash +kubectl get pods --all-namespaces | grep -i cnpg +``` + +Check CNPG Cluster: + +```bash +kubectl get cluster --namespace "$PANGOLIN_NAMESPACE" +kubectl describe cluster pangolin-db --namespace "$PANGOLIN_NAMESPACE" +``` + +Check PVCs and Secrets: + +```bash +kubectl get pvc --namespace "$PANGOLIN_NAMESPACE" +kubectl get secret --namespace "$PANGOLIN_NAMESPACE" | grep pangolin-db +``` + +Expected naming when using the default example: + +```yaml +database: + cloudnativepg: + cluster: + name: pangolin-db + +cnpg-cluster: + enabled: true + fullnameOverride: pangolin-db +``` + + + + + +**Symptoms** + +* DNS lookups fail. +* Pangolin cannot connect to the database. +* Controller cannot reach the Kubernetes API. +* Gerbil or Newt traffic does not work. +* External services such as SMTP, OIDC, or webhooks time out. + +**Cause** + +The chart can render NetworkPolicies. If your CNI enforces them, missing egress or ingress rules can break required paths. + +**Checks** + +```bash +kubectl get networkpolicy --namespace "$PANGOLIN_NAMESPACE" +kubectl describe networkpolicy --namespace "$PANGOLIN_NAMESPACE" +``` + +Check whether DNS is allowed: + +```yaml +networkPolicy: + dns: + enabled: true +``` + +Check database egress: + +```yaml +networkPolicy: + database: + enabled: true + port: 5432 +``` + +Check controller API access: + +```yaml +networkPolicy: + controller: + egress: + enabled: true + kubernetesApi: + enabled: true + port: 443 +``` + +For external integrations, add scoped egress rules for the required services instead of allowing broad egress. + +For a temporary isolation test, disable NetworkPolicy and re-apply: + +```yaml +networkPolicy: + enabled: false +``` + +If this fixes the issue, re-enable policies and add the missing rules. + + + + + +**Symptoms** + +* Pangolin pod restarts repeatedly. +* Pod stays Pending. +* Readiness never becomes true. + +**Checks** + +Find the pod: + +```bash +kubectl get pods --namespace "$PANGOLIN_NAMESPACE" \ + -l app.kubernetes.io/name=pangolin +``` + +Inspect it: + +```bash +kubectl describe pod --namespace "$PANGOLIN_NAMESPACE" +kubectl logs --namespace "$PANGOLIN_NAMESPACE" --tail=200 +kubectl logs --namespace "$PANGOLIN_NAMESPACE" --previous --tail=200 +``` + +Check PVCs: + +```bash +kubectl get pvc --namespace "$PANGOLIN_NAMESPACE" +kubectl describe pvc --namespace "$PANGOLIN_NAMESPACE" +``` + +Common causes: + +| Status | Common causes | +| ------------------ | ----------------------------------------------------------------------------------------------------- | +| `CrashLoopBackOff` | Database connection issue, missing Secret, invalid config, startup dependency not ready | +| `Pending` | PVC not bound, insufficient resources, node selector/affinity mismatch, Pod Security policy rejection | +| `ImagePullBackOff` | Wrong image override, registry access issue, missing imagePullSecret | + + +Do not assume tools such as `psql`, `curl`, or `dig` are available inside the Pangolin container. Use logs, Events, or a temporary debug pod when needed. + + +Run a temporary debug pod for network tests: + +```bash +kubectl run net-debug \ + --namespace "$PANGOLIN_NAMESPACE" \ + --rm -it \ + --image=curlimages/curl:latest \ + --restart=Never \ + -- sh +``` + + + + + +**Symptoms** + +* Helm template or install succeeds, but Traefik resources are not reconciled. +* `kubectl get ingressroute` fails with unknown resource type. +* Argo CD or Flux reports missing kind `IngressRoute`. + +**Cause** + +Controller mode expects Traefik CRDs and a Traefik controller. They must be installed separately or through the bundled dependency when enabled. + +**Checks** + +```bash +kubectl get crd | grep traefik +kubectl get pods --all-namespaces | grep -i traefik +``` + +If you want the chart to install the bundled Traefik controller, enable it: + +```yaml +deployment: + type: controller + installTraefikController: true +``` + +If Traefik is already installed elsewhere, keep it disabled and make sure the controller watches the namespace and labels used by the Pangolin `IngressRoute`. + + + + + +**Symptoms** + +* `helm upgrade` fails. +* Rendered resources changed unexpectedly. +* Existing resources conflict with chart-managed resources. +* GitOps reports immutable field changes or ownership conflicts. + +**Checks** + +Render before upgrading: + +```bash +helm template "$PANGOLIN_RELEASE" fossorial/pangolin \ + --namespace "$PANGOLIN_NAMESPACE" \ + --values values-pangolin.yaml > rendered.yaml +``` + +Run a server-side dry run: + +```bash +kubectl apply -f rendered.yaml --dry-run=server +``` + +Compare the current live release: + +```bash +helm get manifest "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" > live-release.yaml +diff -u live-release.yaml rendered.yaml +``` + +Check ownership conflicts: + +```bash +kubectl get all --namespace "$PANGOLIN_NAMESPACE" -o yaml | grep -E "meta.helm.sh|app.kubernetes.io/managed-by" +``` + +Avoid `--force` unless you understand which resources will be recreated. + + +`helm upgrade --force` can delete and recreate resources. That can interrupt traffic and may affect persistent workloads depending on the resource type. + + + + + + +**Symptoms** + +* Kustomize build succeeds but changes are missing. +* Patch target does not match any resource. +* Patch breaks after chart upgrade. + +**Checks** + +List generated resource names: + +```bash +kustomize build base | grep -E "^(kind:| name:)" +``` + +Validate the overlay: + +```bash +kustomize build overlays/prod +``` + +Run a server-side dry run: + +```bash +kustomize build overlays/prod | kubectl apply -f - --dry-run=server +``` + +Preview live changes: + +```bash +kustomize build overlays/prod | kubectl diff -f - +``` + +Use modern Kustomize `patches` syntax: + +```yaml +patches: + - path: patches/pangolin-resources.patch.yaml + target: + group: apps + version: v1 + kind: Deployment + name: pangolin +``` + + +For Helm-rendered bases, do not assume resource names. Check the rendered manifests after each chart upgrade. + + + + + + +**Symptoms** + +* Argo CD Application is OutOfSync or Degraded. +* Flux HelmRelease or Kustomization is not Ready. +* Resources are missing or constantly reverted. + +**Argo CD checks** + +```bash +kubectl describe application pangolin --namespace argocd +kubectl logs --namespace argocd deployment/argocd-application-controller --tail=100 +argocd app diff pangolin +argocd app sync pangolin +``` + +**Flux checks** + +```bash +flux get sources all --all-namespaces +flux get helmreleases --all-namespaces +flux get kustomizations --all-namespaces +flux logs --all-namespaces --follow +``` + +Reconcile manually: + +```bash +flux reconcile helmrelease pangolin --namespace "$PANGOLIN_NAMESPACE" +flux reconcile kustomization pangolin --namespace flux-system +``` + +Common causes: + +- chart repository or OCI source not reachable +- wrong chart version +- missing CRDs +- invalid values +- rendered resource ownership conflict +- Secret not available in the expected namespace + + + + + +## Routing issues to the right repository + +Use the repository that matches the failing area: + +| Area | Repository | +| ----------------------------------------------------- | ------------------- | +| Chart templates, values, examples, rendered manifests | `fosrl/helm-charts` | +| Pangolin runtime, API, UI, auth, application behavior | `fosrl/pangolin` | +| Newt client behavior or connectivity | `fosrl/newt` | +| Documentation | `fosrl/docs-v2` | + +## Before opening an issue, collect + +Collect this information before opening an issue: + +* chart version +* Pangolin app version +* Kubernetes version +* Helm version +* deployment method: Helm, Kustomize, Argo CD, or Flux +* sanitized values file +* pod logs +* namespace events +* Traefik logs, if routing is involved +* rendered manifests from `helm template` or `kustomize build` +* Helm release status or GitOps sync status +* reproduction steps + +Collect basic diagnostics: + +```bash +kubectl version +helm version + +helm status "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" +helm get values "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" --all > pangolin-values.yaml +helm get manifest "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" > pangolin-manifest.yaml + +kubectl get pods --namespace "$PANGOLIN_NAMESPACE" -o wide > pangolin-pods.txt +kubectl get events --namespace "$PANGOLIN_NAMESPACE" --sort-by=.lastTimestamp > pangolin-events.txt +``` + +Before sharing diagnostics, remove: + +* database passwords +* `SERVER_SECRET` +* API keys +* OAuth/OIDC client secrets +* TLS private keys +* internal hostnames, if sensitive + +## Useful command reference + +```bash +# General cluster info +kubectl cluster-info +kubectl version + +# Namespace overview +kubectl get all --namespace "$PANGOLIN_NAMESPACE" +kubectl get pvc,secret,configmap --namespace "$PANGOLIN_NAMESPACE" +kubectl get events --namespace "$PANGOLIN_NAMESPACE" --sort-by=.lastTimestamp + +# Logs +kubectl logs --namespace "$PANGOLIN_NAMESPACE" \ + -l app.kubernetes.io/name=pangolin \ + --tail=200 + +kubectl logs --namespace "$PANGOLIN_NAMESPACE" \ + -l app.kubernetes.io/name=gerbil \ + --tail=200 + +# Dashboard local test +kubectl port-forward --namespace "$PANGOLIN_NAMESPACE" svc/pangolin 8080:3002 + +# Traefik resources +kubectl get ingressroute --namespace "$PANGOLIN_NAMESPACE" + +# Resource usage +kubectl top pod --namespace "$PANGOLIN_NAMESPACE" +kubectl top node +``` + +## Next steps + + + + Review Pangolin chart options. + + + Install Pangolin with Helm. + + + Install Pangolin with rendered manifests and Kustomize overlays. + + + Deploy Pangolin with Argo CD or Flux. + + diff --git a/self-host/manual/kubernetes/prerequisites.mdx b/self-host/manual/kubernetes/prerequisites.mdx new file mode 100644 index 0000000..77edf3c --- /dev/null +++ b/self-host/manual/kubernetes/prerequisites.mdx @@ -0,0 +1,426 @@ +--- +title: "Prerequisites" +description: "Cluster requirements, tools, and setup needed to deploy Pangolin and Newt on Kubernetes." +--- + +import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; + + + + +Before installing Pangolin or Newt on Kubernetes, check that your cluster, local tools, networking, and storage setup are ready. + +## Cluster requirements + +### Kubernetes version + +The Pangolin and Newt Helm charts currently require Kubernetes **1.30.14 or newer**. + +Use a Kubernetes version that is both: + +- supported by your Kubernetes provider or distribution +- compatible with the chart requirement + +Kubernetes 1.30 satisfies the chart minimum, but it is no longer a supported upstream Kubernetes release. For production, use a currently supported Kubernetes minor release whenever possible. + +Check your cluster version: + +```bash +kubectl version +``` + + +For the exact version support, refer to the Pangolin and Newt Helm chart READMEs before installing or upgrading. + + +### Cluster access + +You need `kubectl` access to the target cluster. + +The user or service account used for installation must be able to create and manage the resources required by the selected install method. + +For a standard Helm install, this usually includes: + +* namespaces +* deployments and statefulsets +* services +* configmaps +* secrets +* persistent volume claims +* service accounts +* roles and role bindings +* network policies, if enabled + +For Pangolin controller mode, additional permissions are required for the controller resources managed by the chart. + +Check cluster access: + +```bash +kubectl cluster-info +kubectl auth can-i create namespace +kubectl auth can-i create deployments --namespace pangolin +kubectl auth can-i create secrets --namespace pangolin +``` + + +The exact permissions depend on the selected chart options. If you install cluster-scoped components or CRDs, cluster-level permissions may be required. + + +## Required tools + +### kubectl + +`kubectl` is required for checking cluster access, inspecting resources, and troubleshooting. + +Install a `kubectl` version that is compatible with your cluster version. As a general rule, keep `kubectl` close to the Kubernetes API server version used by your cluster. + +Verify: + +```bash +kubectl version --client +``` + +### Helm + +Helm is required for Helm-based installs and for rendering Helm charts. +- **Minimum version**: 3.10 or later +- **Installation**: [Helm official install guide](https://helm.sh/docs/intro/install/) + +Verify: + +```bash +helm version +``` + +Install Helm from the official Helm installation guide if it is not already available. + +### Kustomize + +Kustomize is required for Kustomize-based installs. + +You can use either: + +* `kubectl apply -k` +* `kubectl kustomize` +* the standalone `kustomize` CLI + +Verify: + +```bash +kubectl kustomize --help +``` + +Or, when using the standalone CLI: + +```bash +kustomize version +``` + +### Git + +Git is required for GitOps workflows with Argo CD or Flux. + +Verify: + +```bash +git --version +``` + +## Storage requirements + +### StorageClass + +Pangolin requires persistent storage depending on the selected database mode and chart configuration. + +Check available StorageClasses: + +```bash +kubectl get storageclasses +``` + +If your cluster has a default StorageClass, Kubernetes can usually provision persistent volumes automatically. + +If no default StorageClass exists, configure the StorageClass explicitly in your values file. + +### Pangolin storage + +Pangolin deployments use persistent storage for database-backed setups, CloudNativePG, embedded PostgreSQL, or other chart-managed persistent components. + +For production, prefer one of these database modes: + +* CloudNativePG +* external PostgreSQL + +Avoid SQLite for production deployments. + +### Newt storage + +Newt is lightweight, but persistent storage is still recommended for stable Kubernetes operation. +The Newt chart supports writable configuration persistence through either `emptyDir` or an existing PVC. While `emptyDir` can be used for simple test deployments, it is ephemeral and is recreated when the Pod is replaced. In that case, Newt may lose its writable runtime configuration and has to re-establish its connection state after the Pod starts again. During this reconnect and handshake phase, existing tunnels or proxied connections can be interrupted temporarily. + + +`emptyDir` is suitable for short-lived tests, but pod replacement can interrupt active Newt traffic while connection state is rebuilt. + + + +For production Newt deployments, use a PersistentVolumeClaim so writable runtime configuration survives restarts, upgrades, and rescheduling. + + +For production or any setup where short interruptions after Pod restarts should be avoided, use a PersistentVolumeClaim. This allows Newt to keep its writable configuration across Pod restarts, node drains, upgrades, and rescheduling events. + +## Networking requirements + +### Ingress or Traefik routing + +Pangolin needs an external entrypoint for the dashboard, API, and site traffic. + +The Pangolin chart supports different deployment modes: + +* `controller` mode with a Traefik ingress controller and Traefik CRDs integration +* `standalone` mode with chart-managed Traefik workload +* optional bundled Traefik controller installation in controller mode + +Controller mode is the recommended production mode. + + +In controller mode, Traefik CRDs must be available. And it may add additional version requirements based on the Pangolin/Controller version. Check the chart README for the exact requirements. + + +Check ingress-related resources: + +```bash +kubectl get ingress -A +kubectl get ingressroute.traefik.io -A +``` + +The `IngressRoute.traefik.io` command only works when Traefik CRDs are installed. + +### DNS + +Configure DNS records before exposing Pangolin publicly. + +At minimum, the Pangolin dashboard domain should resolve to the ingress controller, load balancer, or public endpoint used by your deployment. + +Example: + +```bash +nslookup pangolin.example.com +``` + +For tunneled site deployments, also verify the DNS records used by the tunnel entrypoint and Newt connection settings. + +### TLS + +Use HTTPS for the Pangolin dashboard and API. + +Common TLS options are: + +* Traefik ACME / Let's Encrypt +* cert-manager, Infisical or similar with an existing ingress or certificate workflow +* a pre-created Kubernetes TLS secret +* TLS termination at an external load balancer or ingress controller + +The Pangolin chart supports cert-manager, Infisical, Traefik-related TLS configuration and custom Kubernetes TLS secrets. Make sure the configured entrypoints, certificate resolver names, or TLS secret names match your actual Traefik setup. + +If you use cert-manager, verify that cert-manager is already installed: + + +cert-manager is useful for many Kubernetes TLS setups, but it is not required for every Pangolin deployment. Use the TLS method that matches your ingress controller and cluster. + + +### Network policies + +The Pangolin and Newt charts include NetworkPolicy configuration for the required application traffic. When enabled, the chart-managed policies are designed to allow the necessary communication between Pangolin, Newt, Gerbil, Traefik, the database, DNS, and other required components. + + +Keep chart-managed NetworkPolicies enabled by default. Add custom policies mainly when your security model requires stricter controls. + + +You usually do not need to create additional NetworkPolicies for a standard installation. However, if you disable the chart-managed policies or replace them with your own policies because of custom security or other requirements, make sure your policies still allow the required traffic between: + +* Traefik (IngressController or Standalone), Pangolin and Pangolin-Kube-Controller, if used +* Pangolin and its database +* Pangolin and Gerbil, when the tunnel stack is enabled +* Newt and the Pangolin endpoint +* workloads and DNS +* workloads and external identity providers or APIs, if used + +For most deployments, it is recommended to keep the chart-managed NetworkPolicies enabled and only customize them when your cluster has specific network security requirements. + +## Namespace and RBAC + +### Namespace + +Choose a namespace for the installation. + +Example: + +```bash +kubectl create namespace pangolin +``` + +When installing with Helm, you can also let Helm create the namespace: + +```bash +helm upgrade --install pangolin fossorial/pangolin \ + --namespace pangolin \ + --create-namespace +``` + +It's recommended to create the namespace explicitly before installation, so you can apply any required labels or annotations for Pod Security Admission or other cluster policies. + +### Pod Security Admission + +Some clusters enforce Pod Security Admission labels at the namespace level. + +Pangolin deployments that include Gerbil may require permissions that are not compatible with a restricted namespace profile, because Gerbil manages WireGuard and requires capabilities such as `NET_ADMIN`. + +If the chart creates the namespace, it can apply the labels required by its configuration. If you manage the namespace yourself, apply the required labels manually based on the chart values and your selected deployment mode. + +### Service accounts and RBAC + +The Helm charts can create the required service accounts and RBAC resources. + +Controller mode requires Kubernetes API access for the Pangolin controller. The main Pangolin application and Gerbil do not need the same Kubernetes API permissions in the default multi-workload topology. + +## Secrets and configuration + +### Secret management + +Pangolin and Newt require secrets for different parts of the deployment. + +Common examples: + +* Pangolin application secrets +* database credentials or connection strings +* Newt credentials or provisioning keys +* TLS certificates, if not managed by the ingress layer +* identity provider client secrets, if used + +Do not commit plaintext secrets to Git. + + +Do not commit plaintext secrets to Git. Use encrypted or external secret backends for GitOps workflows. + + +If you deploy with GitOps tools such as Argo CD or Flux, use a secret management approach that allows secrets to be stored safely in Git or injected from an external secret backend. Common options include encrypted secrets with SOPS, Sealed Secrets, External Secrets Operator, HashiCorp Vault, Infisical, or cloud provider secret managers. These approaches allow you to keep the deployment declarative without storing plaintext credentials in the repository. + +Use your existing secret management workflow, for example: + +* SOPS-encrypted secrets for GitOps workflows +* Sealed Secrets +* External Secrets Operator +* HashiCorp Vault +* Infisical +* cloud provider secret managers +* manually created Kubernetes Secrets for small test environments + +### ConfigMaps + +Configuration that is not sensitive can be stored in ConfigMaps or provided through Helm values. + +Do not put passwords, private keys, API tokens, provisioning secrets, or other sensitive values into ConfigMaps. ConfigMaps are not designed for secret data and should only be used for non-sensitive configuration. +## Database requirements + +### Pangolin + +Pangolin supports several database modes through the Helm chart. + +For production, use: + +* CloudNativePG +* external PostgreSQL + +The default Pangolin chart database mode is `cloudnativepg`. + +When using CloudNativePG, make sure either: + +* the chart installs the required CloudNativePG components, or +* an existing CloudNativePG operator and cluster are already available, depending on your selected values + +When using an external PostgreSQL database, provide the connection details through a Kubernetes Secret or a chart-supported secret generation method. + +## Resource planning + +Set resource requests and limits according to your expected workload and cluster sizing. + +At minimum, plan resources for: + +* Pangolin +* Gerbil, when the tunnel stack is enabled +* pangolin-kube-controller, when controller mode is used +* Traefik, if installed or managed as part of the deployment +* PostgreSQL or CloudNativePG components, if used +* Newt instances + +Resource usage depends on traffic volume, number of sites, number of users, database mode, enabled metrics, and ingress/tunnel configuration. + +Configure resources in your Helm values, for example: + +```yaml +resources: + requests: + cpu: 500m + memory: 512Mi + limits: + memory: 1Gi +``` + + +Use the chart defaults as the starting point, then adjust requests and limits based on actual usage in your cluster. + + + +Avoid setting CPU limits unless you have a specific reason to enforce them. CPU limits can cause throttling when a workload temporarily needs more CPU, even if spare CPU capacity is available on the node. This can negatively affect latency-sensitive components such as ingress, tunnel, proxy, or controller workloads. +For most deployments, set CPU requests to reserve an appropriate baseline and set memory limits to protect the node from excessive memory usage. Add CPU limits only when your cluster policy requires them or when you intentionally want to cap a component's maximum CPU usage. + + +## Optional tools + +### Argo CD + +Use Argo CD if you want GitOps reconciliation with a web UI, sync status, and drift detection. + +### Flux + +Use Flux if you want GitOps reconciliation through Kubernetes custom resources such as `HelmRelease` and `Kustomization`. + +### Helmfile + +Use Helmfile if you want to manage multiple Helm releases together, for example supporting components plus Pangolin and Newt. + +## Verification checklist + +Before proceeding with installation: + +* [ ] The cluster runs a supported Kubernetes version that satisfies the chart requirement. +* [ ] `kubectl` can access the cluster. +* [ ] Helm is installed, if using the Helm workflow. +* [ ] Kustomize is available, if using the Kustomize workflow. +* [ ] A namespace is created or planned. +* [ ] A StorageClass is available if persistent storage is required. +* [ ] The ingress or Traefik routing strategy is defined. +* [ ] DNS records are configured. +* [ ] The TLS strategy is defined. +* [ ] Database mode is selected for Pangolin. +* [ ] Secret management is planned. +* [ ] Pod Security Admission requirements are understood. +* [ ] NetworkPolicy requirements are understood, if policies are enabled. +* [ ] Resource requests and limits are reviewed. + +## Next steps + + + + Install Pangolin or Newt with Helm. + + + Use Kustomize overlays and patches. + + + Deploy with Argo CD or Flux. + + + Start with the Pangolin Helm installation guide. + + From 1140b63e7f3fcbbfd386cee84840229143846768 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Sch=C3=A4fer?= Date: Sun, 10 May 2026 22:27:47 +0200 Subject: [PATCH 2/5] docs(self-host/kubernetes): fix stale links, citations, and version support wording MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marc Schäfer --- self-host/manual/kubernetes/helmfile.mdx | 4 ++-- self-host/manual/kubernetes/newt/helm.mdx | 10 +++++----- self-host/manual/kubernetes/overview.mdx | 2 +- self-host/manual/kubernetes/pangolin/helm.mdx | 2 +- self-host/manual/kubernetes/prerequisites.mdx | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/self-host/manual/kubernetes/helmfile.mdx b/self-host/manual/kubernetes/helmfile.mdx index 605b2ad..27ae5e3 100644 --- a/self-host/manual/kubernetes/helmfile.mdx +++ b/self-host/manual/kubernetes/helmfile.mdx @@ -33,7 +33,7 @@ Use Helmfile if you want to: ## Helmfile prerequisites - Helm 3.10+ -- `helmfile` CLI installed: [Helmfile GitHub](https://github.com/roboll/helmfile) +- `helmfile` CLI installed: [Helmfile GitHub](https://github.com/helmfile/helmfile) - Basic knowledge of Helm values and YAML Install helmfile: @@ -43,7 +43,7 @@ Install helmfile: brew install helmfile # or download from releases -wget https://github.com/roboll/helmfile/releases/download/v/helmfile__ +wget https://github.com/helmfile/helmfile/releases/download/v/helmfile__ chmod +x helmfile sudo mv helmfile /usr/local/bin/ ``` diff --git a/self-host/manual/kubernetes/newt/helm.mdx b/self-host/manual/kubernetes/newt/helm.mdx index e715a37..fa6197d 100644 --- a/self-host/manual/kubernetes/newt/helm.mdx +++ b/self-host/manual/kubernetes/newt/helm.mdx @@ -10,7 +10,7 @@ import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; Newt is the site connector used to expose private resources through Pangolin. It runs close to the resources you want to publish and connects back to Pangolin. -Newt is a user-space WireGuard tunnel client and TCP/UDP proxy. It does not require users to manage WireGuard tunnels or NAT rules manually. :contentReference[oaicite:1]{index=1} +Newt is a user-space WireGuard tunnel client and TCP/UDP proxy. It does not require users to manage WireGuard tunnels or NAT rules manually. ## Version matrix @@ -21,7 +21,7 @@ Newt is a user-space WireGuard tunnel client and TCP/UDP proxy. It does not requ | Kubernetes version | `>=1.30.14-0` | | Default image tag | `1.12.3` | -Newt chart `1.4.0` includes Newt `1.12.3` and supports Kubernetes `>=1.30.14-0`. :contentReference[oaicite:2]{index=2} +Newt chart `1.4.0` includes Newt `1.12.3` and supports Kubernetes `>=1.30.14-0`. ## What the chart supports @@ -38,7 +38,7 @@ Newt chart `1.4.0` includes support for: - optional native WireGuard mode - multi-instance deployments with per-instance overrides -The chart README lists these features for version `1.4.0`. :contentReference[oaicite:3]{index=3} +The chart README lists these features for version `1.4.0`. ## Prerequisites @@ -52,7 +52,7 @@ Before installing Newt, you need: - Newt credentials from Pangolin: `NEWT_ID` and `NEWT_SECRET` - or a provisioning key for Newt 1.11+ provisioning -The chart quickstart lists Kubernetes `>=1.30.14`, Helm 3.x, configured `kubectl`, and Newt credentials from Pangolin as prerequisites. :contentReference[oaicite:4]{index=4} +The chart quickstart lists Kubernetes `>=1.30.14`, Helm 3.x, configured `kubectl`, and Newt credentials from Pangolin as prerequisites. See [Prerequisites](/self-host/manual/kubernetes/prerequisites) for cluster, namespace, storage, networking, and security planning. @@ -66,7 +66,7 @@ Newt chart `1.4.0` supports three credential patterns: | Provisioning key | Newt 1.11+ provisioning | Requires writable config persistence | | Inline values | Local testing only | Credentials may be stored in Helm release history | -For production, use `auth.existingSecretName` or a GitOps-safe secret workflow. The chart values explicitly warn that inline credentials can be stored in Helm release history and recommend existing Secrets for production. :contentReference[oaicite:5]{index=5} +For production, use `auth.existingSecretName` or a GitOps-safe secret workflow. The chart values explicitly warn that inline credentials can be stored in Helm release history and recommend existing Secrets for production. ## Quick install with existing Secret diff --git a/self-host/manual/kubernetes/overview.mdx b/self-host/manual/kubernetes/overview.mdx index 68cdd82..5cf2af3 100644 --- a/self-host/manual/kubernetes/overview.mdx +++ b/self-host/manual/kubernetes/overview.mdx @@ -35,7 +35,7 @@ This section covers the main Kubernetes workflows: | Gerbil | WireGuard interface management service used as part of the Pangolin tunnel stack. | | Newt | Site connector used to expose private resources through Pangolin. Newt runs as a user-space WireGuard tunnel client and TCP/UDP proxy. | | Traefik | Reverse proxy and router for ingress traffic. | -| PostgreSQL / SQlite | Database options for Pangolin deployments, depending on the selected installation workflow and chart configuration. | +| PostgreSQL / SQLite | Database options for Pangolin deployments, depending on the selected installation workflow and chart configuration. | | Controller | Kubernetes controller for integration with Traefik cluster resources, replacing single Traefik instances with Traefik ingress controllers. | diff --git a/self-host/manual/kubernetes/pangolin/helm.mdx b/self-host/manual/kubernetes/pangolin/helm.mdx index 4e74445..185691f 100644 --- a/self-host/manual/kubernetes/pangolin/helm.mdx +++ b/self-host/manual/kubernetes/pangolin/helm.mdx @@ -37,7 +37,7 @@ Depending on the selected values, the chart can deploy: | Gerbil tag | `1.3.1` | | Traefik tag | `v3.6.15` | -The current chart metadata defines chart version `0.1.0-alpha.0`, app version `1.18.2`, Kubernetes `>=1.30.14-0`, and the component image metadata listed above. :contentReference[oaicite:0]{index=0} +The current chart metadata defines chart version `0.1.0-alpha.0`, app version `1.18.2`, Kubernetes `>=1.30.14-0`, and the component image metadata listed above. ## Prerequisites diff --git a/self-host/manual/kubernetes/prerequisites.mdx b/self-host/manual/kubernetes/prerequisites.mdx index 77edf3c..594a439 100644 --- a/self-host/manual/kubernetes/prerequisites.mdx +++ b/self-host/manual/kubernetes/prerequisites.mdx @@ -21,7 +21,7 @@ Use a Kubernetes version that is both: - supported by your Kubernetes provider or distribution - compatible with the chart requirement -Kubernetes 1.30 satisfies the chart minimum, but it is no longer a supported upstream Kubernetes release. For production, use a currently supported Kubernetes minor release whenever possible. +Kubernetes 1.30 satisfies the chart minimum, but it is no longer a supported upstream Kubernetes release. See the [Kubernetes version skew policy](https://kubernetes.io/releases/version-skew-policy/) for current support details. For production, use a currently supported Kubernetes minor release whenever possible. Check your cluster version: From 792c5d8a230095b1bcb252e2a620b7acd248c18b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Sch=C3=A4fer?= Date: Sun, 10 May 2026 22:42:54 +0200 Subject: [PATCH 3/5] docs(manage/sites): remove Newt Kubernetes installation guide from documentation. Covered now under the Kubernetes section MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marc Schäfer --- docs.json | 1 - manage/sites/install-kubernetes.mdx | 100 ---------------------------- 2 files changed, 101 deletions(-) delete mode 100644 manage/sites/install-kubernetes.mdx diff --git a/docs.json b/docs.json index e4088a8..1b6084f 100644 --- a/docs.json +++ b/docs.json @@ -38,7 +38,6 @@ "pages": [ "manage/sites/understanding-sites", "manage/sites/install-site", - "manage/sites/install-kubernetes", "manage/sites/configure-site", "manage/sites/update-site", "manage/sites/credentials", diff --git a/manage/sites/install-kubernetes.mdx b/manage/sites/install-kubernetes.mdx deleted file mode 100644 index 1955242..0000000 --- a/manage/sites/install-kubernetes.mdx +++ /dev/null @@ -1,100 +0,0 @@ ---- -title: "Kubernetes" -description: "How to deploy a Newt Site on Kubernetes" ---- - -import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; - - - - - -This guide walks you through setting up Newt on Kubernetes using Helm. - -This guide assumes you already are familiar with Kubernetes concepts and you fulfill the following Global prerequisites: - -## Global Prerequisites - -- Kubernetes Cluster (v1.28.15+) -- Access to the Kubernetes Cluster -- Helm (v3.0+) installed, see Helm install docs - -## Helm Installation - -All Fossorial Helm charts are available on Artifact Hub. See Fossorial Charts. - - - - ```bash - helm repo add fossorial https://charts.fossorial.io - helm repo update fossorial - helm search repo fossorial - ``` - - - - Prepare your Newt credentials: - ```env title="newt-cred.env" - PANGOLIN_ENDPOINT= - NEWT_ID= - NEWT_SECRET= - ``` - - Prepare a values file with your desired configuration. - - See Newt chart values configuration options. - - ```yaml title="values-newt.yaml" - newtInstances: - - name: main - enabled: true - auth: - existingSecretName: newt-cred - keys: - endpointKey: PANGOLIN_ENDPOINT - idKey: NEWT_ID - secretKey: NEWT_SECRET - ``` - - - - Create a Kubernetes Secret from the env file created earlier: - ```bash -kubectl create secret generic newt-cred -n newt --from-env-file=newt-cred.env - ``` - - Install Newt with Helm: - ```bash - helm install my-newt fossorial/newt \ - -n newt --create-namespace \ - -f values-newt.yaml - ``` - - Change the release name (`my-newt`), namespace (`newt`), and values filename as needed. - - - ```bash - # Update repo to get latest charts - helm repo update fossorial - # Upgrade Newt (after editing values) - helm upgrade my-newt fossorial/newt -n newt -f values-newt.yaml - ``` - ```bash - # Roll back to a previous revision - helm rollback my-newt 1 -n newt - ``` - - - -## Customizing Your Values - -All configuration options are documented in the respective repositories: - -- Newt Helm chart values - -## References - - - All Fossorial Helm Charts repo - - All Fossorial Kubernetes resources - - Pangolin Kubernetes Controller - - Helm documentation From e3d14a6cb0549f48ad855f919f8bf6522f30c44f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Sch=C3=A4fer?= Date: Thu, 14 May 2026 18:06:01 +0200 Subject: [PATCH 4/5] chore(self-host/manual/kubernetes): Refactor documentation for Pangolin and Newt Kubernetes deployment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Updated titles and descriptions for clarity and consistency across Helm, Kustomize, and troubleshooting guides. - Enhanced the overview section to better describe deployment options and components. - Revised prerequisites to streamline requirements for deploying Pangolin and Sites (Newt). - Improved clarity on storage, networking, and security requirements, including detailed RBAC and NetworkPolicy considerations. - Removed deprecated sections and added new information regarding Gerbil and proxy protocol handling. - Adjusted resource planning guidelines to reflect best practices for Kubernetes deployments. Signed-off-by: Marc Schäfer --- docs.json | 2 +- self-host/manual/kubernetes/choose-method.mdx | 181 ++------ self-host/manual/kubernetes/gitops/argocd.mdx | 44 -- self-host/manual/kubernetes/gitops/flux.mdx | 42 -- .../manual/kubernetes/gitops/overview.mdx | 365 ++-------------- self-host/manual/kubernetes/helm.mdx | 15 +- self-host/manual/kubernetes/kustomize.mdx | 22 - .../manual/kubernetes/newt/configuration.mdx | 47 +- self-host/manual/kubernetes/newt/helm.mdx | 38 +- .../manual/kubernetes/newt/kustomize.mdx | 11 +- .../kubernetes/newt/troubleshooting.mdx | 49 +-- self-host/manual/kubernetes/overview.mdx | 93 ++-- .../kubernetes/pangolin/configuration.mdx | 36 +- self-host/manual/kubernetes/pangolin/helm.mdx | 21 +- .../manual/kubernetes/pangolin/kustomize.mdx | 11 +- .../kubernetes/pangolin/troubleshooting.mdx | 43 +- self-host/manual/kubernetes/prerequisites.mdx | 401 +++++------------- 17 files changed, 284 insertions(+), 1137 deletions(-) diff --git a/docs.json b/docs.json index 1b6084f..697182c 100644 --- a/docs.json +++ b/docs.json @@ -193,7 +193,7 @@ ] }, { - "group": "Newt", + "group": "Site (newt)", "pages": [ "self-host/manual/kubernetes/newt/helm", "self-host/manual/kubernetes/newt/kustomize", diff --git a/self-host/manual/kubernetes/choose-method.mdx b/self-host/manual/kubernetes/choose-method.mdx index ac18077..a736062 100644 --- a/self-host/manual/kubernetes/choose-method.mdx +++ b/self-host/manual/kubernetes/choose-method.mdx @@ -1,180 +1,53 @@ --- -title: "Choose a Method" -description: "Choose the right Kubernetes installation workflow for Pangolin and Newt." +title: "Choose an Installation Path" +description: "Choose the Kubernetes deployment workflow for Pangolin and Sites (Newt)." --- import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; +Use this page to pick the right Kubernetes guide for your deployment workflow. -This page helps you choose the right Kubernetes workflow for installing and managing Pangolin and related components. +These guides assume you are already familiar with Kubernetes and the deployment tools listed below. -## Quick decision table +If you are new to Kubernetes, start with the [official Kubernetes learning resources](https://kubernetes.io/docs/tutorials/kubernetes-basics/) first. Then review the [Prerequisites](/self-host/manual/kubernetes/prerequisites) guide to check your cluster, tools, and setup. -| If you... | Use | Why | -| --- | --- | --- | -| Want the recommended Kubernetes install path | **Helm** | Standard chart-based workflow for installing, upgrading, and uninstalling releases | -| Need environment-specific overlays or manifest customization | **Kustomize** | Patch and reuse Kubernetes manifests without a separate templating language | -| Already use Argo CD or want GitOps with a web UI | **Argo CD** | Git-driven reconciliation, sync status, drift detection, and optional auto-sync | -| Already use Flux or want GitOps defined through Kubernetes CRDs | **Flux** | Declarative reconciliation with resources such as `HelmRelease` and `Kustomization` | -| Need to manage several Helm releases together | **Helmfile** | Declarative orchestration for multiple Helm releases and shared values | - -## Detailed method descriptions - -### Use Helm if... - -- You want the recommended Kubernetes install workflow for Pangolin or Newt. -- You want a straightforward chart-based install. -- You manage releases manually or through CI/CD. -- You want normal Helm release operations such as install, upgrade, rollback, and uninstall. -- You are comfortable managing configuration through `values.yaml`. - -Helm is the default choice for most Kubernetes installations. It packages Kubernetes resources into versioned charts and manages releases in the cluster. - -**Get started**: [Helm Quick-Start](/self-host/manual/kubernetes/helm) - -### Use Kustomize if... - -- You need environment-specific overlays for dev, staging, or production. -- You want to patch Kubernetes manifests without using a templating language. -- You prefer a manifest-driven workflow. -- You want to keep rendered or curated manifests in Git. -- You are comfortable with `kubectl apply -k` or `kustomize build`. - -Kustomize works well when you want a shared base with small environment-specific changes. It can be used with curated manifests, generated manifests, or GitOps tools. - -**Common scenario**: Keep a base deployment and apply overlays for each environment. - -**Get started**: [Kustomize Quick-Start](/self-host/manual/kubernetes/kustomize) - -### Use Argo CD if... - -- Your Git repository should be the source of truth. -- You want a web UI for application status, sync state, and troubleshooting. -- You want drift detection when the live cluster state differs from the desired state. -- You want manual sync, automated sync, or self-healing behavior. -- You already use Argo CD for other applications. - -Argo CD reconciles applications from a declared source into the cluster. It can use Helm charts, Kustomize overlays, plain YAML, Jsonnet, or configured plugins as sources. - -When Argo CD deploys a Helm chart, Helm is used to render the manifests. The application lifecycle is then managed by Argo CD, not by the local `helm` CLI. - -**Argo CD can deploy**: - -- Helm charts -- Kustomize overlays -- Plain YAML manifests -- Jsonnet or custom config-management plugin output - -**Get started**: [Argo CD Guide](/self-host/manual/kubernetes/gitops/argocd) - -### Use Flux if... - -- You want GitOps managed through Kubernetes custom resources. -- You already use Flux for other workloads. -- You want Helm releases reconciled by a controller. -- You want Kustomize overlays reconciled from Git. -- You prefer a lightweight workflow without depending on a central web UI. - -Flux defines sources and desired state as Kubernetes resources. Typical resources include `GitRepository`, `HelmRepository`, `OCIRepository`, `Kustomization`, and `HelmRelease`. - -**Flux can deploy**: - -- Helm charts with `HelmRepository` and `HelmRelease` -- OCI-based Helm charts with `OCIRepository` and `HelmRelease` -- Kustomize overlays with `GitRepository` and `Kustomization` -- Plain manifests through a Flux `Kustomization` - -**Get started**: [Flux Guide](/self-host/manual/kubernetes/gitops/flux) - -### Use Helmfile if... +## Installation paths -- You need to manage multiple Helm releases as one deployment stack. -- You want to install Pangolin together with supporting components. -- You want one declarative file for releases, values files, and release ordering. -- You prefer running one controlled workflow instead of several manual `helm upgrade --install` commands. - -Helmfile is a declarative wrapper around Helm. It does not replace Helm; it calls Helm to apply the declared releases. - -**Common scenario**: One Helmfile manages supporting components such as an ingress controller, certificate management, database components, Pangolin, and Newt. - -**Get started**: [Helmfile Guide](/self-host/manual/kubernetes/helmfile) - -## Important clarifications - -### Argo CD and Flux are not Helm replacements - -Argo CD and Flux are delivery and reconciliation tools. They do not replace Helm or Kustomize. - -- **Helm** packages and renders Kubernetes resources from charts. -- **Kustomize** customizes Kubernetes manifests through bases, overlays, and patches. -- **Argo CD** reconciles applications from Git, Helm repositories, OCI registries, or other configured sources. -- **Flux** reconciles sources and workloads through Kubernetes custom resources such as `HelmRelease` and `Kustomization`. - -You can use Argo CD or Flux with Helm charts, Kustomize overlays, or plain manifests. - -### OCI is not a separate install method - -OCI (Open Container Initiative) describes a chart distribution format, not a separate deployment workflow. - -For Pangolin and Newt, OCI chart publishing is available in GHCR: - -- Newt: `oci://ghcr.io/fosrl/helm-charts/newt` (for example `1.4.0`) -- Pangolin: `oci://ghcr.io/fosrl/helm-charts/pangolin` (for example `0.1.0-alpha.0`) - -You still choose the same deployment method (Helm directly, or GitOps with Argo CD/Flux). OCI only changes where charts are pulled from. - -Classic Helm repository flow is still valid: - -```bash -helm repo add fossorial https://charts.fossorial.io -helm repo update fossorial -helm install my-newt fossorial/newt -helm install my-pangolin fossorial/pangolin -``` - -OCI workflow with Helm: - -```bash -helm pull oci://ghcr.io/fosrl/helm-charts/newt --version 1.4.0 -helm pull oci://ghcr.io/fosrl/helm-charts/pangolin --version 0.1.0-alpha.0 -``` - -```bash -helm install my-newt oci://ghcr.io/fosrl/helm-charts/newt \ - --version 1.4.0 - -helm install my-pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \ - --version 0.1.0-alpha.0 -``` - -### Raw YAML is not a separate primary workflow - -This documentation does not provide a dedicated raw-YAML installation path. +| Path | Use when | Start here | +| --- | --- | --- | +| Helm | You want the standard chart-based installation path for Pangolin or Sites (Newt). | [Helm Quick-Start](/self-host/manual/kubernetes/helm) | +| Kustomize | You want manifest overlays, for example for environment-specific configuration, patches, or rendered manifests that can be reviewed before applying. | [Kustomize Quick-Start](/self-host/manual/kubernetes/kustomize) | +| Argo CD | You already use Argo CD and want to deploy Pangolin or Sites (Newt) through a Kubernetes-native GitOps workflow. | [Argo CD Guide](/self-host/manual/kubernetes/gitops/argocd) | +| Flux | You already use Flux and want to manage Pangolin or Sites (Newt) through `HelmRelease` or `Kustomization` resources. | [Flux Guide](/self-host/manual/kubernetes/gitops/flux) | +| Helmfile | You want to manage multiple related Helm releases as one stack. | [Helmfile Guide](/self-host/manual/kubernetes/helmfile) | -Raw manifests are still possible: +## Recommended starting point -- Render a Helm chart with `helm template`. -- Render Kustomize overlays with `kustomize build` or `kubectl kustomize`. -- Apply generated manifests with `kubectl apply -f`. -- Reconcile plain manifests with Argo CD or Flux. +For most Kubernetes deployments, start with Helm. Use the GitOps guides only if Argo CD or Flux is already part of your deployment workflow. -For most users, Helm, Kustomize, Argo CD, or Flux is easier to maintain than applying standalone YAML files manually. +Kustomize and Helmfile are useful when you need more control over manifests, overlays, or multiple coordinated releases. ## Next steps - Review cluster, tooling, ingress, DNS, storage, and secret requirements. + Review the required cluster, ingress, DNS, storage, and secret setup. - Install Pangolin or Newt with the recommended chart-based workflow. + Install Pangolin or Sites (Newt) with the standard chart-based workflow. - Use bases, overlays, and patches for manifest-driven deployments. + Use overlays and patches for manifest-based deployments. + + + Deploy Pangolin or Sites (Newt) with Argo CD. + + + Deploy Pangolin or Sites (Newt) with Flux. - - Deploy and reconcile Pangolin or Newt with Argo CD or Flux. + + Manage multiple Helm releases together. diff --git a/self-host/manual/kubernetes/gitops/argocd.mdx b/self-host/manual/kubernetes/gitops/argocd.mdx index 14a04b3..59a4867 100644 --- a/self-host/manual/kubernetes/gitops/argocd.mdx +++ b/self-host/manual/kubernetes/gitops/argocd.mdx @@ -10,50 +10,6 @@ import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; Argo CD is a declarative GitOps tool that continuously syncs your cluster state to your Git repository. This guide covers installing Pangolin and Newt using Argo CD. -## Argo CD overview - -Argo CD watches your Git repository (or Helm chart repository) and automatically reconciles Kubernetes resources to match the desired state defined in Git. - -**Key concepts**: - -- **Application**: Argo CD custom resource that defines what to deploy, where, and how -- **Helm source**: Argo CD uses Helm to render charts; you provide values -- **Kustomize source**: Argo CD uses Kustomize to build manifests -- **Sync**: Process of applying desired state to the cluster -- **Drift**: When cluster state diverges from Git (Argo CD can detect and correct) - -## Prerequisites - -- Argo CD installed in your cluster (in `argocd` namespace, typically) -- Helm repo configured: `helm repo add fossorial https://charts.fossorial.io` -- Git repository with Argo CD configuration (optional, can use chart repo as source) -- Newt auth secret (if installing Newt) - -## Install Argo CD - -If you don't have Argo CD yet: - -```bash -# Create namespace -kubectl create namespace argocd - -# Install Argo CD -helm repo add argo https://argoproj.github.io/argo-helm -helm repo update argo -helm install argocd argo/argo-cd -n argocd -``` - -Access the Argo CD UI: - -```bash -# Port-forward -kubectl port-forward -n argocd svc/argocd-server 8080:443 - -# Visit https://localhost:8080 -# Default username: admin -# Password: kubectl get secret -n argocd argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d -``` - ## Install Pangolin with Argo CD using Helm ### Step 1: Create Pangolin namespace diff --git a/self-host/manual/kubernetes/gitops/flux.mdx b/self-host/manual/kubernetes/gitops/flux.mdx index d4c823f..637659f 100644 --- a/self-host/manual/kubernetes/gitops/flux.mdx +++ b/self-host/manual/kubernetes/gitops/flux.mdx @@ -10,22 +10,6 @@ import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; Flux is a declarative GitOps tool that uses Kubernetes-native Custom Resources to manage deployments. This guide covers installing Pangolin and Newt using Flux. -## Flux overview - -Flux watches your Git repository and continuous reconciles cluster state using Kubernetes CRDs: - -- **HelmRepository**: Defines a Helm chart repository -- **HelmRelease**: Declaratively manages a Helm chart deployment -- **GitRepository**: References a Git repository -- **Kustomization**: Reconciles Kustomize overlays -- **OCIRepository**: References an OCI-based container registry (for Helm charts) - -**Key benefits**: - -- Native Kubernetes reconciliation (no separate UI needed, though one exists) -- Lightweight footprint -- Excellent for multi-cluster deployments -- Declarative everything: sources, releases, dependencies ## Flux prerequisites @@ -523,32 +507,6 @@ clusters/ Each environment's HelmRelease uses environment-specific values. -## Important notes - -### CRD management - -When using Flux with Helm charts that include CRDs: - -```yaml -spec: - install: - crds: Create # Create CRDs on first install - upgrade: - crds: CreateReplace # Update CRDs on upgrade -``` - -### Namespace creation - -Flux automatically creates namespaces if they don't exist. Ensure appropriate RBAC. - -### GitOps best practices - -- Use branches for different environments -- Protect production branches with review requirements -- Store secrets using sealed-secrets or external-secrets -- Track all changes in Git -- Use consistent naming conventions - ## Next steps diff --git a/self-host/manual/kubernetes/gitops/overview.mdx b/self-host/manual/kubernetes/gitops/overview.mdx index f5cbc57..6c3a547 100644 --- a/self-host/manual/kubernetes/gitops/overview.mdx +++ b/self-host/manual/kubernetes/gitops/overview.mdx @@ -1,351 +1,76 @@ --- title: "GitOps Overview" -description: "Git-driven Kubernetes deployments for Pangolin and Newt using Argo CD or Flux." +description: "Deploy Pangolin and Sites (Newt) with GitOps workflows such as Argo CD or Flux." --- import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; +Use GitOps when Pangolin and Sites (Newt) should be reconciled from Git instead of being installed manually from a local shell. +Can be used together with Blueprints — see [Blueprint config reference](/self-host/advanced/config-file) for details. -GitOps is a declarative approach to infrastructure management where your Git repository is the single source of truth for cluster state. Changes go through Git; the GitOps tool automatically reconciles the cluster to match. +These guides assume you already use, or plan to use, a GitOps controller such as Argo CD or Flux. +General GitOps concepts such as reconciliation, desired state, and Git-driven workflows are outside the scope of this documentation. Refer to your GitOps controller's documentation for those concepts. -## What is GitOps? +## Supported GitOps paths -GitOps reconciliation loop: - - - - **Git repository** contains desired state (manifests, Helm values, Kustomize overlays). - - - **GitOps tool** (Argo CD or Flux) watches the Git repository. - - - The controller detects new commits or drift between Git and the live cluster. - - - The controller syncs cluster state to match Git state. - - - **Result**: Cluster stays aligned with the declared Git configuration. - - - -## Benefits - -- **Version control**: All infrastructure changes tracked in Git -- **Audit trail**: See who changed what and when -- **Rollback**: Revert to previous state by reverting Git commits -- **Automation**: No manual `kubectl apply` commands needed -- **Drift detection**: Automatic alerts if cluster diverges from Git - -## GitOps tools for Kubernetes - -### Argo CD - -- **UI**: Web-based dashboard for monitoring and manual syncs -- **Approach**: External reconciler (watches Git, applies to cluster) -- **Supports**: Helm charts, Kustomize overlays, raw YAML -- **Best for**: Teams who want GitOps with a UI, hybrid manual/automated workflows - -**Use Argo CD if**: -- You want a visual dashboard -- You need frequent manual sync capabilities -- You're already using Argo CD for other workloads - -See: [Argo CD Install Guide](/self-host/manual/kubernetes/gitops/argocd) - -### Flux + + + Deploy Pangolin or Sites (Newt) with Argo CD Applications. + + + Deploy Pangolin or Sites (Newt) with Flux HelmRelease or Kustomization resources. + + -- **CRDs**: Kubernetes-native Custom Resources (HelmRelease, Kustomization, GitRepository) -- **Approach**: Declarative reconciliation using Kubernetes resources -- **Supports**: Helm charts, Kustomize overlays, raw YAML, OCI registries -- **Best for**: Teams who want declarative Kubernetes-way GitOps, lightweight controllers +## What GitOps manages -**Use Flux if**: -- You prefer Kubernetes-native CRDs -- You want a lightweight, modern GitOps tool -- You're already using Flux for other workloads +A GitOps workflow can reconcile the same deployment inputs used by the other Kubernetes guides: -See: [Flux Install Guide](/self-host/manual/kubernetes/gitops/flux) +| Input | Used for | +| --- | --- | +| Helm chart values | Configure Pangolin, controller mode, database mode, ingress, Sites, and related components. | +| Kustomize overlays | Patch or compose rendered manifests for environment-specific deployments. | +| Kubernetes Secrets | Provide credentials, TLS material, database connection details, or Site connector credentials. | +| Custom resources | Manage Argo CD Applications, Flux HelmReleases, Flux Kustomizations, or related controller resources. | -## Recommended repository structure +## Recommended layout -For multi-environment Pangolin/Newt deployments, organize your Git repository like this: +Keep the Pangolin and Site configuration close to the cluster or environment that owns it. -``` -my-org/infrastructure/ +```text +infrastructure/ ├── clusters/ │ ├── production/ │ │ ├── pangolin/ -│ │ │ ├── values.yaml -│ │ │ └── kustomization.yaml (if using Kustomize) -│ │ └── newt/ -│ │ ├── values.yaml -│ │ └── kustomization.yaml +│ │ └── sites/ │ ├── staging/ │ │ ├── pangolin/ -│ │ └── newt/ +│ │ └── sites/ │ └── dev/ │ ├── pangolin/ -│ └── newt/ -├── apps/ -│ ├── pangolin/ -│ │ ├── helm/ -│ │ │ ├── values-base.yaml -│ │ │ ├── values-prod.yaml -│ │ │ └── values-staging.yaml -│ │ └── kustomize/ -│ │ ├── base/ -│ │ └── overlays/ -│ └── newt/ -│ ├── helm/ -│ └── kustomize/ -└── .gitignore +│ └── sites/ +└── shared/ + ├── pangolin/ + └── sites/ ``` -**Pattern**: -- `clusters/` → environment-specific configuration -- `apps/` → shared, reusable application configuration -- Environment overlays layer on top of app definitions - -## Secrets in GitOps - - -**Never commit plaintext secrets to Git.** Use secret management tools instead. - - -Options for managing secrets in GitOps: - -### Sealed Secrets - -- **Tool**: [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets) -- **How**: Encrypt secrets with cluster-specific key; safe to commit encrypted secrets -- **Decrypt**: Only the cluster can decrypt (uses private key) - -```bash -# Encrypt a secret -echo -n mypassword | kubeseal -f - > secret.yaml - -# Git tracks encrypted secret.yaml -# Cluster auto-decrypts on apply -``` - -### External Secrets Operator - -- **Tool**: [external-secrets](https://external-secrets.io/) -- **How**: Reference secrets stored in external vault (AWS Secrets Manager, Azure Key Vault, HashiCorp Vault) -- **Git**: Stores reference only, not secret values - -```yaml -apiVersion: external-secrets.io/v1beta1 -kind: SecretStore -metadata: - name: vault-backend -spec: - provider: - vault: - server: "https://vault.example.com" -``` - -### SOPS (Secrets Operations) - -- **Tool**: [SOPS](https://github.com/mozilla/sops) -- **How**: Encrypt YAML files; decrypt at deploy time -- **Git**: Stores encrypted files - -```bash -sops --encrypt secrets.yaml > secrets.enc.yaml -# Commit secrets.enc.yaml; tool decrypts on apply -``` - -### Cloud Provider Secrets - -- **AWS**: Use AWS Secrets Manager or Parameter Store with IRSA (IAM Roles for Service Accounts) -- **Azure**: Use Azure Key Vault with pod identity -- **GCP**: Use Google Secret Manager with workload identity - - -Choose a secret management strategy **before** setting up GitOps. Seal secrets once; keep approach consistent. - - -## GitOps workflow example - -### 1. Set up Git repository - -```bash -git clone https://github.com/my-org/infrastructure.git -cd infrastructure -mkdir -p clusters/production/pangolin -cd clusters/production/pangolin -``` - -### 2. Create configuration - -```bash -# values.yaml with Pangolin config -cat > values.yaml <=0.1.0" # auto-upgrade to latest 0.1.x -``` - -### Drift detection and remediation - -Argo CD and Flux both support continuous drift detection: - -- **Argo CD**: Detects drift on demand or continuously; can auto-sync on drift -- **Flux**: Reconciles on interval; rolls back manual cluster changes +Use environment-specific directories for values, patches, and secrets that differ between clusters. Use shared directories only for reusable configuration that should stay the same across environments. ## Next steps - - - - + + Create Argo CD Applications for Pangolin and Sites (Newt). + + + Create Flux sources, HelmReleases, or Kustomizations for Pangolin and Sites (Newt). + + + Compare the supported Kubernetes deployment paths. + + + Review cluster, networking, storage, RBAC, and resource requirements. + diff --git a/self-host/manual/kubernetes/helm.mdx b/self-host/manual/kubernetes/helm.mdx index 8bc5069..cb16438 100644 --- a/self-host/manual/kubernetes/helm.mdx +++ b/self-host/manual/kubernetes/helm.mdx @@ -60,7 +60,7 @@ It is recommended to create the namespace explicitly before installation. This a For detailed installation steps, see: * [Pangolin Helm Quick-Start](/self-host/manual/kubernetes/pangolin/helm) — Install Pangolin -* [Newt Helm Quick-Start](/self-host/manual/kubernetes/newt/helm) — Install Newt +* [Site (Newt) Helm Quick-Start](/self-host/manual/kubernetes/newt/helm) — Install Site (Newt) ## Install command patterns @@ -340,15 +340,6 @@ helm uninstall newt --namespace pangolin Uninstalling a Helm release does not always remove persistent volumes, externally managed secrets, DNS records, certificates, or cloud load balancers. Review the namespace and related cluster resources before deleting data. -## Using Helm with GitOps - -Helm charts can also be installed and reconciled through GitOps tools. - -* [Argo CD](/self-host/manual/kubernetes/gitops/argocd) can deploy Helm charts from a Helm repository, Git repository, or OCI source. -* [Flux](/self-host/manual/kubernetes/gitops/flux) can reconcile Helm charts through `HelmRepository`, `OCIRepository`, and `HelmRelease`. - -In these workflows, Helm is the chart format. The GitOps controller manages reconciliation. - ## Troubleshooting For component-specific troubleshooting, see: @@ -381,8 +372,8 @@ kubectl logs -n pangolin Install Pangolin with the Helm chart. - - Install Newt with the Helm chart. + + Install Site (Newt) with the Helm chart. Configure Pangolin chart values for your cluster. diff --git a/self-host/manual/kubernetes/kustomize.mdx b/self-host/manual/kubernetes/kustomize.mdx index 8651e2d..f094e2b 100644 --- a/self-host/manual/kubernetes/kustomize.mdx +++ b/self-host/manual/kubernetes/kustomize.mdx @@ -19,18 +19,6 @@ Use Kustomize when you need: - a manifest-driven workflow for GitOps tools - small changes on top of a shared base without maintaining separate full manifests -## When to use Kustomize - -Use Kustomize if: - -- you want to manage rendered Pangolin or Newt manifests in Git -- you need different overlays for different environments -- your team prefers reviewing concrete Kubernetes manifests -- you use Argo CD or Flux with Kustomize sources -- you want to patch generated manifests without forking the Helm chart - -For a single environment or a first installation, [Helm](/self-host/manual/kubernetes/helm) is usually simpler. - ## Supported workflow The chart repository does not provide native Kustomize bases. Use this workflow instead: @@ -289,15 +277,6 @@ Apply after review: kubectl apply -k overlays/prod ``` -## Kustomize with GitOps - -Kustomize overlays work well with GitOps tools. - -* [Argo CD](/self-host/manual/kubernetes/gitops/argocd) can reconcile a Kustomize overlay path directly. -* [Flux](/self-host/manual/kubernetes/gitops/flux) can reconcile a Kustomize overlay with a `Kustomization` resource. - -In GitOps workflows, the controller applies the overlay. Do not also apply the same overlay manually unless you are debugging. - ## Important considerations ### Namespace handling @@ -400,4 +379,3 @@ kubectl get events -n pangolin --sort-by=.lastTimestamp Troubleshoot Pangolin deployments on Kubernetes. -``` diff --git a/self-host/manual/kubernetes/newt/configuration.mdx b/self-host/manual/kubernetes/newt/configuration.mdx index e376c69..ee5eb2a 100644 --- a/self-host/manual/kubernetes/newt/configuration.mdx +++ b/self-host/manual/kubernetes/newt/configuration.mdx @@ -1,5 +1,5 @@ --- -title: "Newt Configuration" +title: "Configuration" description: "Configuration reference for Newt Kubernetes deployments." --- @@ -189,7 +189,7 @@ Chart `1.4.0` also includes `auth.createSecret` and `auth.envVarsDirect` modes f -Newt 1.11+ supports provisioning-based installs. +Provisioning supports installs where Newt bootstraps credentials from a provisioning key. Use provisioning when Newt should bootstrap credentials from a provisioning key instead of using a static `NEWT_ID` and `NEWT_SECRET`. @@ -418,30 +418,6 @@ newtInstances: Use Secrets for certificates and sensitive script inputs. Avoid inline private keys or credentials in values files. - - - - -By default, Newt runs without native WireGuard mode. - -```yaml -global: - nativeMode: - enabled: false - -newtInstances: - - name: main-tunnel - useNativeInterface: false -``` - -Native mode requires elevated privileges. - -When native mode or `useNativeInterface` is enabled, Newt runs as root with privileged settings and capabilities such as `NET_ADMIN` and `SYS_MODULE`. - - -Only enable native WireGuard mode if your cluster policy allows privileged workloads and you understand the security impact. - - @@ -723,7 +699,7 @@ helm upgrade --install newt fossorial/newt \ --set 'newtInstances[0].auth.existingSecretName=newt-auth' ``` -See [Newt Helm](/self-host/manual/kubernetes/newt/helm) for the installation flow. +See [Site (newt) Helm](/self-host/manual/kubernetes/newt/helm) for the installation flow. ### Kustomize @@ -776,23 +752,6 @@ spec: See [GitOps](/self-host/manual/kubernetes/gitops/overview) for GitOps guidance. -## Production checklist - -Before deploying Newt to production: - -- [ ] Create the namespace before installation and apply required labels or annotations. -- [ ] Store credentials in Kubernetes Secrets. -- [ ] Avoid inline plaintext credentials. -- [ ] Use provisioning only with writable config persistence. -- [ ] Use a PVC for durable provisioning state. -- [ ] Keep native WireGuard mode disabled unless privileged workloads are allowed. -- [ ] Confirm the Pangolin endpoint is reachable from the Newt pod. -- [ ] Confirm TLS certificates are valid for the Pangolin endpoint. -- [ ] Set resources based on expected traffic. -- [ ] Configure NetworkPolicy rules if your cluster enforces network isolation. -- [ ] Enable metrics only when you have a scraping path. -- [ ] Review RBAC settings before upgrading from older chart versions. - ## Next steps diff --git a/self-host/manual/kubernetes/newt/helm.mdx b/self-host/manual/kubernetes/newt/helm.mdx index fa6197d..329d808 100644 --- a/self-host/manual/kubernetes/newt/helm.mdx +++ b/self-host/manual/kubernetes/newt/helm.mdx @@ -1,6 +1,6 @@ --- -title: "Newt Helm" -description: "Quick-start guide for installing Newt on Kubernetes using Helm." +title: "Helm" +description: "Quick-start guide for installing Site (newt) on Kubernetes using Helm." --- import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; @@ -8,20 +8,9 @@ import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; -Newt is the site connector used to expose private resources through Pangolin. It runs close to the resources you want to publish and connects back to Pangolin. +This guide installs and manages Site (newt) in Kubernetes using Helm. -Newt is a user-space WireGuard tunnel client and TCP/UDP proxy. It does not require users to manage WireGuard tunnels or NAT rules manually. - -## Version matrix - -| Item | Value | -| --- | --- | -| Chart version | `1.4.0` | -| App version | `1.12.3` | -| Kubernetes version | `>=1.30.14-0` | -| Default image tag | `1.12.3` | - -Newt chart `1.4.0` includes Newt `1.12.3` and supports Kubernetes `>=1.30.14-0`. +See [Version Matrix](https://github.com/fosrl/helm-charts/VERSION_MATRIX.md) for chart and default app version references. ## What the chart supports @@ -29,13 +18,12 @@ The Newt chart can deploy one or more Newt instances through `newtInstances[]`. Newt chart `1.4.0` includes support for: -- Newt 1.11+ provisioning with `NEWT_PROVISIONING_KEY` and `NEWT_NAME` +- provisioning with `NEWT_PROVISIONING_KEY` and `NEWT_NAME` - legacy credential installs with `NEWT_ID` and `NEWT_SECRET` - existing Kubernetes Secrets for production credentials - writable config persistence with `emptyDir` or an existing PVC - optional metrics, PodMonitor, ServiceMonitor, and PrometheusRule - optional NetworkPolicy -- optional native WireGuard mode - multi-instance deployments with per-instance overrides The chart README lists these features for version `1.4.0`. @@ -50,7 +38,7 @@ Before installing Newt, you need: - a reachable Pangolin instance - either: - Newt credentials from Pangolin: `NEWT_ID` and `NEWT_SECRET` - - or a provisioning key for Newt 1.11+ provisioning + - or a provisioning key for provisioning installs The chart quickstart lists Kubernetes `>=1.30.14`, Helm 3.x, configured `kubectl`, and Newt credentials from Pangolin as prerequisites. @@ -63,7 +51,7 @@ Newt chart `1.4.0` supports three credential patterns: | Method | Recommended for | Notes | | --- | --- | --- | | Existing Secret | Production | Credentials are stored in a Kubernetes Secret created outside Helm | -| Provisioning key | Newt 1.11+ provisioning | Requires writable config persistence | +| Provisioning key | Provisioning installs | Requires writable config persistence | | Inline values | Local testing only | Credentials may be stored in Helm release history | For production, use `auth.existingSecretName` or a GitOps-safe secret workflow. The chart values explicitly warn that inline credentials can be stored in Helm release history and recommend existing Secrets for production. @@ -204,7 +192,7 @@ kubectl wait --for=condition=ready pod \ ## Quick install with provisioning key -Newt 1.11+ supports provisioning-based installs. Use this when you want Newt to bootstrap credentials from a provisioning key. +Provisioning-based installs bootstrap credentials from a provisioning key. Provisioning requires writable config persistence so Newt can store the generated configuration. The chart quickstart explicitly notes that provisioning requires a writable `CONFIG_FILE` target and that the chart provides this through `newtInstances[x].configPersistence`. ([GitHub][3]) @@ -352,16 +340,6 @@ kubectl create secret generic newt-auth-site-b \ The chart values include `newtInstances[]`, per-instance namespace settings, and per-instance service account overrides. ([GitHub][2]) -### Security defaults - -By default, Newt runs without native WireGuard mode. - -The chart values describe the default non-native mode as non-root with privilege escalation disabled, read-only root filesystem, and dropped capabilities. Native WireGuard mode requires a privileged container with capabilities such as `NET_ADMIN` and `SYS_MODULE`. ([GitHub][2]) - - -Only enable native WireGuard mode if you understand the required privileges and your cluster policy allows them. - - ### RBAC Newt chart `1.4.0` defaults `rbac.create` to `false`. Enable RBAC only when your selected Newt configuration requires Kubernetes API permissions. diff --git a/self-host/manual/kubernetes/newt/kustomize.mdx b/self-host/manual/kubernetes/newt/kustomize.mdx index d031f14..92a9e0d 100644 --- a/self-host/manual/kubernetes/newt/kustomize.mdx +++ b/self-host/manual/kubernetes/newt/kustomize.mdx @@ -1,5 +1,5 @@ --- -title: "Newt Kustomize" +title: "Kustomize" description: "Deploy Newt on Kubernetes using Helm-rendered manifests and Kustomize overlays." --- @@ -559,15 +559,6 @@ kubectl get all --namespace pangolin kubectl get events --namespace pangolin --sort-by=.lastTimestamp ``` -## Kustomize with GitOps - -Kustomize overlays work well with GitOps tools. - -* [Argo CD](/self-host/manual/kubernetes/gitops/argocd) can reconcile a Kustomize overlay path directly. -* [Flux](/self-host/manual/kubernetes/gitops/flux) can reconcile a Kustomize overlay with a `Kustomization` resource. - -In GitOps workflows, the controller owns the apply operation. Do not also apply the same overlay manually unless you are debugging. - ## Troubleshooting ### The patch does not apply diff --git a/self-host/manual/kubernetes/newt/troubleshooting.mdx b/self-host/manual/kubernetes/newt/troubleshooting.mdx index 0d377e9..3f23f74 100644 --- a/self-host/manual/kubernetes/newt/troubleshooting.mdx +++ b/self-host/manual/kubernetes/newt/troubleshooting.mdx @@ -1,5 +1,5 @@ --- -title: "Newt Troubleshooting" +title: "Troubleshooting" description: "Diagnose and resolve common Newt Kubernetes deployment issues." --- @@ -130,7 +130,6 @@ kubectl logs "$NEWT_POD" --namespace "$NEWT_NAMESPACE" --previous --tail=100 | Authentication failure | Wrong `NEWT_ID`, `NEWT_SECRET`, or provisioning key | Check credentials in Pangolin | | Endpoint connection errors | `PANGOLIN_ENDPOINT` is wrong or unreachable | Test DNS and HTTPS from the pod | | Image pull failure | Registry or image settings are wrong | `kubectl describe pod` | -| Permission error with native mode | Native WireGuard mode requires privileged settings | Check `global.nativeMode.enabled` and `useNativeInterface` | ## Secret issues @@ -255,7 +254,7 @@ If you use provisioning, also verify: ## Provisioning issues -Newt 1.11+ provisioning requires writable config persistence. +Provisioning requires writable config persistence. ### Symptoms @@ -309,50 +308,6 @@ newtInstances: `emptyDir` is recreated when the pod is recreated. Use a PVC if the generated configuration must survive pod replacement. -## Native WireGuard permission issues - -By default, Newt does not require native WireGuard privileges. - -Only check this section if you enabled native WireGuard mode: - -```yaml -global: - nativeMode: - enabled: true - -newtInstances: - - name: main-tunnel - useNativeInterface: true -``` - -### Symptoms - -```text -operation not permitted -cannot create interface -permission denied -``` - -### Check security context - -```bash -kubectl get pod "$NEWT_POD" --namespace "$NEWT_NAMESPACE" -o yaml | grep -A30 securityContext -``` - -Native mode requires privileged workload settings and capabilities such as `NET_ADMIN` and `SYS_MODULE`. - - -Only enable native WireGuard mode if your cluster policy allows privileged workloads. Do not add `NET_ADMIN` to the default non-native deployment unless you know it is required. - - -### Check namespace policy - -```bash -kubectl get namespace "$NEWT_NAMESPACE" --show-labels -``` - -A namespace with a restricted Pod Security Admission profile may block native mode. - ## Service not created or not reachable ### Important behavior diff --git a/self-host/manual/kubernetes/overview.mdx b/self-host/manual/kubernetes/overview.mdx index 5cf2af3..ae76fd0 100644 --- a/self-host/manual/kubernetes/overview.mdx +++ b/self-host/manual/kubernetes/overview.mdx @@ -1,45 +1,25 @@ --- title: "Overview" -description: "Deploy Pangolin, Newt, and related components on Kubernetes with Helm, Kustomize, GitOps, or Helmfile." +description: "Deploy Pangolin, Sites (Newt), and related components on Kubernetes." --- import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; - -## Kubernetes deployment options - -Kubernetes is a good fit for running Pangolin-related components when you need repeatable deployments, workload isolation, rolling updates, and integration with existing cluster services such as ingress, storage, monitoring, and network policy. - -This section covers the main Kubernetes workflows: - -- **Helm** for the recommended chart-based installation and upgrade workflow. -- **Kustomize** for overlay-based customization and manifest-driven deployments. -- **GitOps** with Argo CD or Flux for reconciling Helm charts, Kustomize overlays, or manifests from Git. -- **Helmfile** for advanced setups that manage multiple Helm releases together. - -## What this section covers - -- Kubernetes prerequisites and cluster requirements. -- Installation workflows for Helm, Kustomize, GitOps, and Helmfile. -- Pangolin installation, configuration, and troubleshooting. -- Newt installation, configuration, and troubleshooting. -- How Pangolin, Gerbil, Traefik, Newt and Pangolin-Kube-Controller fit together in tunneled deployments. - ## Components | Component | Role | | --- | --- | -| Pangolin | Main application and control plane for the dashboard, API, authentication, configuration, and database-backed state. | -| Gerbil | WireGuard interface management service used as part of the Pangolin tunnel stack. | -| Newt | Site connector used to expose private resources through Pangolin. Newt runs as a user-space WireGuard tunnel client and TCP/UDP proxy. | +| Pangolin | Main application for the dashboard, API, authentication, configuration, and database-backed state. | +| Gerbil | Tunnel stack component used by Pangolin for site connectivity. | +| Site (Newt) | Site connector used to connect private resources to Pangolin. | | Traefik | Reverse proxy and router for ingress traffic. | -| PostgreSQL / SQLite | Database options for Pangolin deployments, depending on the selected installation workflow and chart configuration. | -| Controller | Kubernetes controller for integration with Traefik cluster resources, replacing single Traefik instances with Traefik ingress controllers. | +| PostgreSQL / SQLite | Database options for Pangolin deployments, depending on the selected chart configuration. | +| Pangolin Kube Controller | Kubernetes controller for integrating Pangolin with Kubernetes and Traefik resources. | -For local reverse proxy deployments, the full tunnel stack may not be required. Tunneled sites require the components needed for Newt and WireGuard-based connectivity. +Depending on your deployment mode, not every component is required. Local reverse proxy deployments and tunneled site deployments can have different component requirements. ```mermaid @@ -47,50 +27,37 @@ flowchart LR U[Users] --> T[Traefik] T --> P[Pangolin] P --> G[Gerbil] - N[Newt] --> G + S[Site connector
Newt] --> G P --> D[(Database)] ``` -## Method comparison - -Choose the workflow that matches how you already manage Kubernetes applications: - -| Method | Best for | Complexity | GitOps fit | -| --- | --- | --- | --- | -| **Helm** | Standard Kubernetes installs and upgrades | Low | Works with Argo CD and Flux | -| **Kustomize** | Environment-specific overlays and manifest customization | Medium | Works with Argo CD and Flux | -| **Argo CD** | Git-driven reconciliation with a web UI and sync status | Medium | Native GitOps workflow | -| **Flux** | Declarative GitOps using Kubernetes custom resources | Medium | Native GitOps workflow | -| **Helmfile** | Managing multiple Helm releases as one deployment stack | Medium | Usually used from CI/CD or a controlled automation workflow | - - -Argo CD and Flux are delivery and reconciliation tools. They do not replace Helm or Kustomize. They can deploy Helm charts, Kustomize overlays, and other Kubernetes manifests. - - -## Recommended starting points +## Installation paths - - Compare Helm, Kustomize, GitOps, and Helmfile before choosing a workflow. + + Pick the Kubernetes workflow that matches how you deploy applications. - Review the cluster, ingress, storage, DNS, and tooling requirements. + Review the required cluster, ingress, DNS, storage, and secret setup. - - Start with the recommended chart-based Kubernetes workflow. + + Install Pangolin or Sites (Newt) with the standard chart-based workflow. - + Use overlays and patches for manifest-based deployments. - - Deploy with Argo CD or Flux from Git. + + Deploy Pangolin or Sites (Newt) with Argo CD. - - Manage Pangolin, Newt, and supporting Helm releases together. + + Deploy Pangolin or Sites (Newt) with Flux. + + + Manage multiple Helm releases together. -## Component quick links +## Component guides @@ -99,10 +66,16 @@ Argo CD and Flux are delivery and reconciliation tools. They do not replace Helm Configure Pangolin for your Kubernetes environment. - - Install Newt in a Kubernetes cluster. + + Diagnose and resolve Pangolin deployment issues. + + + Install a Site connector with the Newt Helm chart. + + + Configure Site connector credentials and runtime settings. - - Configure Newt credentials, endpoints, resources, and runtime settings. + + Diagnose and resolve Site connector deployment issues. diff --git a/self-host/manual/kubernetes/pangolin/configuration.mdx b/self-host/manual/kubernetes/pangolin/configuration.mdx index 0e30081..7eed350 100644 --- a/self-host/manual/kubernetes/pangolin/configuration.mdx +++ b/self-host/manual/kubernetes/pangolin/configuration.mdx @@ -1,5 +1,5 @@ --- -title: "Pangolin Configuration" +title: "Configuration" description: "Configuration reference for Pangolin Kubernetes deployments." --- @@ -478,6 +478,10 @@ Important settings: | `gerbil.service.enabled` | Creates a Service for Gerbil UDP traffic. | | `gerbil.persistence.enabled` | Persists Gerbil key/config data. Recommended for production. | + +If Gerbil is exposed through a reverse proxy or UDP gateway, keep proxy protocol settings aligned end-to-end. Do not enable proxy protocol on the upstream hop unless Gerbil is configured to accept it. + + ### Startup mode ```yaml @@ -922,6 +926,11 @@ helm upgrade --install pangolin fossorial/pangolin \ See [Pangolin Helm](/self-host/manual/kubernetes/pangolin/helm) for the installation flow. +For complete application configuration keys and examples, see: + +- [Public config file reference](/self-host/advanced/config-file) +- [Private config file reference](/self-host/advanced/private-config-file) + ### Kustomize Render the chart with Helm, then apply Kustomize overlays: @@ -973,31 +982,6 @@ spec: See [GitOps](/self-host/manual/kubernetes/gitops/overview) for GitOps guidance. -## Production checklist - -Before deploying to production: - -* [ ] Use `deployment.type=controller`, unless you have a specific reason to use standalone mode. -* [ ] Use `deployment.mode=multi`. -* [ ] Use `database.mode=cloudnativepg` or `database.mode=external`. -* [ ] Avoid SQLite for production. -* [ ] Configure a real `pangolin.config.app.dashboard_url`. -* [ ] Replace the default `example.com` domain entry. -* [ ] Configure `pangolin.config.gerbil.base_endpoint`. -* [ ] Keep `pangolin.config.gerbil.start_port` aligned with `gerbil.ports.wg1`. -* [ ] Keep `pangolin.config.gerbil.clients_start_port` aligned with `gerbil.ports.wg2`. -* [ ] Configure TLS with either a Traefik cert resolver or an existing TLS Secret. -* [ ] Create or label the namespace so Gerbil can use `NET_ADMIN`. -* [ ] Keep Gerbil persistence enabled. -* [ ] Store the Pangolin app secret in a Kubernetes Secret. -* [ ] Use an existing database connection Secret for external PostgreSQL. -* [ ] Review NetworkPolicy egress requirements. -* [ ] Avoid broad `0.0.0.0/0` egress unless required. -* [ ] Enable monitoring resources only when the required CRDs exist. -* [ ] Set resource requests and limits based on expected traffic. -* [ ] Define a database backup strategy. -* [ ] Test upgrades in a staging environment before production. - ## Next steps diff --git a/self-host/manual/kubernetes/pangolin/helm.mdx b/self-host/manual/kubernetes/pangolin/helm.mdx index 185691f..2fb5ca8 100644 --- a/self-host/manual/kubernetes/pangolin/helm.mdx +++ b/self-host/manual/kubernetes/pangolin/helm.mdx @@ -1,5 +1,5 @@ --- -title: "Pangolin Helm" +title: "Helm" description: "Quick-start guide for installing Pangolin on Kubernetes using Helm." --- @@ -24,20 +24,15 @@ Depending on the selected values, the chart can deploy: - **Traefik integration**: Traefik CRD-based routing in controller mode, bundled Traefik controller when enabled, or standalone Traefik mode. - **Database backend**: CloudNativePG, external PostgreSQL, embedded PostgreSQL, or SQLite. -## Version matrix +See [Version Matrix](https://github.com/fosrl/helm-charts/VERSION_MATRIX.md) for chart and default app version references. -| Item | Value | -| --- | --- | -| Chart version | `0.1.0-alpha.0` | -| Kubernetes version | `>=1.30.14-0` | -| Pangolin appVersion | `1.18.2` | -| Pangolin default image tag | `1.18.2` | -| Pangolin PostgreSQL image tag | `postgresql-1.18.2` | -| pangolin-kube-controller tag | `0.1.0-alpha.1` | -| Gerbil tag | `1.3.1` | -| Traefik tag | `v3.6.15` | +## Gerbil setup in the Pangolin chart -The current chart metadata defines chart version `0.1.0-alpha.0`, app version `1.18.2`, Kubernetes `>=1.30.14-0`, and the component image metadata listed above. +This chart deploys Gerbil when `gerbil.enabled=true`. This is the default when using `deployment.type=controller` and recommended. + + +If Gerbil is exposed through a reverse proxy or UDP gateway, keep proxy protocol settings aligned end-to-end. Do not enable proxy protocol on the upstream hop unless Gerbil is configured to accept it. + ## Prerequisites diff --git a/self-host/manual/kubernetes/pangolin/kustomize.mdx b/self-host/manual/kubernetes/pangolin/kustomize.mdx index c9f1bf5..69410bd 100644 --- a/self-host/manual/kubernetes/pangolin/kustomize.mdx +++ b/self-host/manual/kubernetes/pangolin/kustomize.mdx @@ -1,5 +1,5 @@ --- -title: "Pangolin Kustomize" +title: "Kustomize" description: "Deploy Pangolin on Kubernetes using Helm-rendered manifests and Kustomize overlays." --- @@ -625,15 +625,6 @@ If a patch does not apply, inspect generated resource names: kustomize build base | grep -E "^(kind:| name:)" ``` -## Kustomize with GitOps - -Kustomize overlays work well with GitOps tools. - -* [Argo CD](/self-host/manual/kubernetes/gitops/argocd) can reconcile a Kustomize overlay path directly. -* [Flux](/self-host/manual/kubernetes/gitops/flux) can reconcile a Kustomize overlay with a `Kustomization` resource. - -In GitOps workflows, the controller owns the apply operation. Do not also apply the same overlay manually unless you are debugging. - ## Troubleshooting ### The patch does not apply diff --git a/self-host/manual/kubernetes/pangolin/troubleshooting.mdx b/self-host/manual/kubernetes/pangolin/troubleshooting.mdx index 91eb0be..54480f2 100644 --- a/self-host/manual/kubernetes/pangolin/troubleshooting.mdx +++ b/self-host/manual/kubernetes/pangolin/troubleshooting.mdx @@ -1,5 +1,5 @@ --- -title: "Pangolin Troubleshooting" +title: "Troubleshooting" description: "Diagnose and resolve Pangolin Kubernetes deployment issues." --- @@ -386,6 +386,47 @@ kubectl describe networkpolicy --namespace "$PANGOLIN_NAMESPACE" Verify external firewall rules for the configured UDP ports. +
+ + + +**Symptoms** + +* Newt peers do not establish stable handshakes. +* Tunnel traffic drops even though Gerbil pods are healthy. +* Logs show connection resets or malformed upstream traffic. + +**Cause** + +Proxy protocol handling is inconsistent between the upstream hop and Gerbil. + + +If Gerbil is exposed through a reverse proxy or UDP gateway, keep proxy protocol settings aligned end-to-end. Do not enable proxy protocol on the upstream hop unless Gerbil is configured to accept it. + + +**Checks** + +Check endpoint and port alignment: + +```bash +helm get values "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" --all | grep -A40 gerbil +``` + +Check Gerbil logs: + +```bash +kubectl logs --namespace "$PANGOLIN_NAMESPACE" \ + -l app.kubernetes.io/name=gerbil \ + --tail=200 +``` + +Check Service exposure: + +```bash +kubectl get svc --namespace "$PANGOLIN_NAMESPACE" \ + -l app.kubernetes.io/name=gerbil -o wide +``` + diff --git a/self-host/manual/kubernetes/prerequisites.mdx b/self-host/manual/kubernetes/prerequisites.mdx index 594a439..2c349ec 100644 --- a/self-host/manual/kubernetes/prerequisites.mdx +++ b/self-host/manual/kubernetes/prerequisites.mdx @@ -1,136 +1,67 @@ --- title: "Prerequisites" -description: "Cluster requirements, tools, and setup needed to deploy Pangolin and Newt on Kubernetes." +description: "Cluster, tooling, networking, and storage requirements for deploying Pangolin and Sites (Newt) on Kubernetes." --- import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx"; +Before installing Pangolin or Sites (Newt) on Kubernetes, check that your cluster, tools, networking, and storage setup match the deployment path you want to use. -Before installing Pangolin or Newt on Kubernetes, check that your cluster, local tools, networking, and storage setup are ready. +## Kubernetes cluster -## Cluster requirements - -### Kubernetes version - -The Pangolin and Newt Helm charts currently require Kubernetes **1.30.14 or newer**. - -Use a Kubernetes version that is both: - -- supported by your Kubernetes provider or distribution -- compatible with the chart requirement - -Kubernetes 1.30 satisfies the chart minimum, but it is no longer a supported upstream Kubernetes release. See the [Kubernetes version skew policy](https://kubernetes.io/releases/version-skew-policy/) for current support details. For production, use a currently supported Kubernetes minor release whenever possible. +Use a Kubernetes version that satisfies the Helm chart `kubeVersion` requirement and is supported by your Kubernetes provider or distribution. Check your cluster version: ```bash kubectl version -``` +``` -For the exact version support, refer to the Pangolin and Newt Helm chart READMEs before installing or upgrading. +See the [Version Matrix](https://github.com/fosrl/helm-charts/VERSION_MATRIX.md) for the supported Kubernetes versions of the Pangolin and Newt Helm charts. -### Cluster access - -You need `kubectl` access to the target cluster. - -The user or service account used for installation must be able to create and manage the resources required by the selected install method. - -For a standard Helm install, this usually includes: - -* namespaces -* deployments and statefulsets -* services -* configmaps -* secrets -* persistent volume claims -* service accounts -* roles and role bindings -* network policies, if enabled - -For Pangolin controller mode, additional permissions are required for the controller resources managed by the chart. - -Check cluster access: - -```bash -kubectl cluster-info -kubectl auth can-i create namespace -kubectl auth can-i create deployments --namespace pangolin -kubectl auth can-i create secrets --namespace pangolin -``` - - -The exact permissions depend on the selected chart options. If you install cluster-scoped components or CRDs, cluster-level permissions may be required. - - -## Required tools - -### kubectl - -`kubectl` is required for checking cluster access, inspecting resources, and troubleshooting. - -Install a `kubectl` version that is compatible with your cluster version. As a general rule, keep `kubectl` close to the Kubernetes API server version used by your cluster. - -Verify: - -```bash -kubectl version --client -``` - -### Helm - -Helm is required for Helm-based installs and for rendering Helm charts. -- **Minimum version**: 3.10 or later -- **Installation**: [Helm official install guide](https://helm.sh/docs/intro/install/) - -Verify: +## Controller access and RBAC -```bash -helm version -``` - -Install Helm from the official Helm installation guide if it is not already available. - -### Kustomize +Controller mode is the default and recommended Kubernetes deployment mode for Pangolin. -Kustomize is required for Kustomize-based installs. +When controller mode is enabled, the Pangolin Kube Controller runs with its own ServiceAccount and needs permission to watch and manage the Kubernetes and Traefik resources it reconciles. The chart creates the required RBAC resources for you, unless RBAC creation is disabled. -You can use either: +By default, the controller is scoped to the namespace of a single Pangolin deployment. It can also be configured for a broader scope when one controller should reconcile resources for multiple Pangolin deployments. -* `kubectl apply -k` -* `kubectl kustomize` -* the standalone `kustomize` CLI - -Verify: - -```bash -kubectl kustomize --help -``` +Depending on the configured controller scope, the controller needs namespace-scoped or cluster-scoped access to the resources it reconciles: -Or, when using the standalone CLI: +| API group | Resources | Verbs | +| --- | --- | --- | +| `""` | `events` | `create`, `patch`, `update` | +| `""` | `services`, `endpoints` | `get`, `list`, `watch`, `create`, `update`, `patch`, `delete` | +| `discovery.k8s.io` | `endpointslices` | `get`, `list`, `watch`, `create`, `update`, `patch`, `delete` | +| `traefik.io` | `ingressroutes`, `ingressroutetcps`, `ingressrouteudps`, `middlewares`, `middlewaretcps`, `traefikservices`, `serverstransports`, `serverstransporttcps`, `tlsoptions`, `tlsstores` | `get`, `list`, `watch`, `create`, `update`, `patch`, `delete` | -```bash -kustomize version -``` +If leader election is enabled, the controller also needs access to: -### Git +| API group | Resources | Verbs | +| --- | --- | --- | +| `coordination.k8s.io` | `leases` | `get`, `list`, `watch`, `create`, `update`, `patch` | -Git is required for GitOps workflows with Argo CD or Flux. +The controller also needs cluster-wide read access to Kubernetes discovery resources: -Verify: +| API group | Resources | Verbs | +| --- | --- | --- | +| `networking.k8s.io` | `ingressclasses` | `get`, `list`, `watch` | +| `apiextensions.k8s.io` | `customresourcedefinitions` | `get`, `list`, `watch` | -```bash -git --version -``` + +For namespace-scoped deployments, the chart creates namespaced RBAC for the controller namespace and, if configured, the target namespace. For broader controller scopes, the chart creates the required cluster-scoped RBAC. + -## Storage requirements +## Database and storage -### StorageClass +Pangolin requires a database backend. The Helm chart supports multiple database modes, including CloudNativePG, external PostgreSQL, embedded PostgreSQL, and SQLite. -Pangolin requires persistent storage depending on the selected database mode and chart configuration. +For persistent database-backed deployments, make sure your cluster has a usable StorageClass or configure the StorageClass explicitly in your chart values. Check available StorageClasses: @@ -138,66 +69,45 @@ Check available StorageClasses: kubectl get storageclasses ``` -If your cluster has a default StorageClass, Kubernetes can usually provision persistent volumes automatically. +For long-running/production deployments, prefer PostgreSQL-based modes such as CloudNativePG or external PostgreSQL. -If no default StorageClass exists, configure the StorageClass explicitly in your values file. - -### Pangolin storage - -Pangolin deployments use persistent storage for database-backed setups, CloudNativePG, embedded PostgreSQL, or other chart-managed persistent components. - -For production, prefer one of these database modes: - -* CloudNativePG -* external PostgreSQL - -Avoid SQLite for production deployments. - -### Newt storage - -Newt is lightweight, but persistent storage is still recommended for stable Kubernetes operation. -The Newt chart supports writable configuration persistence through either `emptyDir` or an existing PVC. While `emptyDir` can be used for simple test deployments, it is ephemeral and is recreated when the Pod is replaced. In that case, Newt may lose its writable runtime configuration and has to re-establish its connection state after the Pod starts again. During this reconnect and handshake phase, existing tunnels or proxied connections can be interrupted temporarily. + +SQLite can be useful for simple or test deployments, but PostgreSQL-based modes are the better fit for long-running/production Kubernetes deployments. + - -`emptyDir` is suitable for short-lived tests, but pod replacement can interrupt active Newt traffic while connection state is rebuilt. - +### Site connector storage - -For production Newt deployments, use a PersistentVolumeClaim so writable runtime configuration survives restarts, upgrades, and rescheduling. - +A Site (Newt) deployment does not require persistent storage by default. -For production or any setup where short interruptions after Pod restarts should be avoided, use a PersistentVolumeClaim. This allows Newt to keep its writable configuration across Pod restarts, node drains, upgrades, and rescheduling events. +Use writable configuration persistence only if your deployment needs runtime configuration to survive pod replacement, upgrades, node drains, or rescheduling. For simple deployments, no PVC is required. -## Networking requirements +## Networking -### Ingress or Traefik routing +### Ingress and routing Pangolin needs an external entrypoint for the dashboard, API, and site traffic. -The Pangolin chart supports different deployment modes: +Depending on your chart values, this can use: -* `controller` mode with a Traefik ingress controller and Traefik CRDs integration -* `standalone` mode with chart-managed Traefik workload -* optional bundled Traefik controller installation in controller mode +* controller mode with a Traefik ingress controller +* standalone mode with chart-managed Traefik components +* an existing ingress or load balancer setup -Controller mode is the recommended production mode. +If you use controller mode with Traefik CRDs, verify that the required Traefik API resources are available: - -In controller mode, Traefik CRDs must be available. And it may add additional version requirements based on the Pangolin/Controller version. Check the chart README for the exact requirements. - +```bash +kubectl api-resources --api-group=traefik.io +``` -Check ingress-related resources: +You can also check existing ingress resources: ```bash kubectl get ingress -A -kubectl get ingressroute.traefik.io -A ``` -The `IngressRoute.traefik.io` command only works when Traefik CRDs are installed. - ### DNS -Configure DNS records before exposing Pangolin publicly. +Configure DNS records for the domains used by Pangolin before exposing it publicly. At minimum, the Pangolin dashboard domain should resolve to the ingress controller, load balancer, or public endpoint used by your deployment. @@ -207,51 +117,30 @@ Example: nslookup pangolin.example.com ``` -For tunneled site deployments, also verify the DNS records used by the tunnel entrypoint and Newt connection settings. +For tunneled site deployments, also verify the DNS name used by the site connector endpoint. ### TLS Use HTTPS for the Pangolin dashboard and API. -Common TLS options are: +Common TLS options include: * Traefik ACME / Let's Encrypt -* cert-manager, Infisical or similar with an existing ingress or certificate workflow -* a pre-created Kubernetes TLS secret +* cert-manager +* a pre-created Kubernetes TLS Secret * TLS termination at an external load balancer or ingress controller -The Pangolin chart supports cert-manager, Infisical, Traefik-related TLS configuration and custom Kubernetes TLS secrets. Make sure the configured entrypoints, certificate resolver names, or TLS secret names match your actual Traefik setup. - -If you use cert-manager, verify that cert-manager is already installed: - - -cert-manager is useful for many Kubernetes TLS setups, but it is not required for every Pangolin deployment. Use the TLS method that matches your ingress controller and cluster. - - -### Network policies - -The Pangolin and Newt charts include NetworkPolicy configuration for the required application traffic. When enabled, the chart-managed policies are designed to allow the necessary communication between Pangolin, Newt, Gerbil, Traefik, the database, DNS, and other required components. +Use the TLS method that matches your ingress and cluster setup. - -Keep chart-managed NetworkPolicies enabled by default. Add custom policies mainly when your security model requires stricter controls. - +If you use cert-manager, verify that the certificate CRDs are available: -You usually do not need to create additional NetworkPolicies for a standard installation. However, if you disable the chart-managed policies or replace them with your own policies because of custom security or other requirements, make sure your policies still allow the required traffic between: - -* Traefik (IngressController or Standalone), Pangolin and Pangolin-Kube-Controller, if used -* Pangolin and its database -* Pangolin and Gerbil, when the tunnel stack is enabled -* Newt and the Pangolin endpoint -* workloads and DNS -* workloads and external identity providers or APIs, if used - -For most deployments, it is recommended to keep the chart-managed NetworkPolicies enabled and only customize them when your cluster has specific network security requirements. - -## Namespace and RBAC +```bash +kubectl get crd certificates.cert-manager.io +``` -### Namespace +## Namespace and security -Choose a namespace for the installation. +Choose the namespace where Pangolin and related components should run. Example: @@ -259,7 +148,7 @@ Example: kubectl create namespace pangolin ``` -When installing with Helm, you can also let Helm create the namespace: +When using Helm, you can also let Helm create the namespace: ```bash helm upgrade --install pangolin fossorial/pangolin \ @@ -267,158 +156,68 @@ helm upgrade --install pangolin fossorial/pangolin \ --create-namespace ``` -It's recommended to create the namespace explicitly before installation, so you can apply any required labels or annotations for Pod Security Admission or other cluster policies. - -### Pod Security Admission - -Some clusters enforce Pod Security Admission labels at the namespace level. +If your cluster enforces Pod Security Admission, make sure the namespace labels match the selected deployment mode. Deployments that include tunnel components may require permissions that are not compatible with a fully restricted namespace profile. -Pangolin deployments that include Gerbil may require permissions that are not compatible with a restricted namespace profile, because Gerbil manages WireGuard and requires capabilities such as `NET_ADMIN`. - -If the chart creates the namespace, it can apply the labels required by its configuration. If you manage the namespace yourself, apply the required labels manually based on the chart values and your selected deployment mode. - -### Service accounts and RBAC - -The Helm charts can create the required service accounts and RBAC resources. - -Controller mode requires Kubernetes API access for the Pangolin controller. The main Pangolin application and Gerbil do not need the same Kubernetes API permissions in the default multi-workload topology. - -## Secrets and configuration - -### Secret management - -Pangolin and Newt require secrets for different parts of the deployment. - -Common examples: - -* Pangolin application secrets -* database credentials or connection strings -* Newt credentials or provisioning keys -* TLS certificates, if not managed by the ingress layer -* identity provider client secrets, if used - -Do not commit plaintext secrets to Git. - - -Do not commit plaintext secrets to Git. Use encrypted or external secret backends for GitOps workflows. - +## NetworkPolicy -If you deploy with GitOps tools such as Argo CD or Flux, use a secret management approach that allows secrets to be stored safely in Git or injected from an external secret backend. Common options include encrypted secrets with SOPS, Sealed Secrets, External Secrets Operator, HashiCorp Vault, Infisical, or cloud provider secret managers. These approaches allow you to keep the deployment declarative without storing plaintext credentials in the repository. +The Pangolin and Newt charts can manage NetworkPolicies for the required application traffic. -Use your existing secret management workflow, for example: - -* SOPS-encrypted secrets for GitOps workflows -* Sealed Secrets -* External Secrets Operator -* HashiCorp Vault -* Infisical -* cloud provider secret managers -* manually created Kubernetes Secrets for small test environments - -### ConfigMaps - -Configuration that is not sensitive can be stored in ConfigMaps or provided through Helm values. - -Do not put passwords, private keys, API tokens, provisioning secrets, or other sensitive values into ConfigMaps. ConfigMaps are not designed for secret data and should only be used for non-sensitive configuration. -## Database requirements - -### Pangolin - -Pangolin supports several database modes through the Helm chart. - -For production, use: - -* CloudNativePG -* external PostgreSQL - -The default Pangolin chart database mode is `cloudnativepg`. - -When using CloudNativePG, make sure either: - -* the chart installs the required CloudNativePG components, or -* an existing CloudNativePG operator and cluster are already available, depending on your selected values - -When using an external PostgreSQL database, provide the connection details through a Kubernetes Secret or a chart-supported secret generation method. +If you enable chart-managed NetworkPolicies, review the generated policies before adding custom deny rules. If you replace them with your own policies, allow the required traffic between the components you deploy, such as Pangolin, Traefik, Gerbil, the database, DNS, and Site connectors. ## Resource planning -Set resource requests and limits according to your expected workload and cluster sizing. +Pangolin and Site (Newt) Kubernetes deployments include predefined resource profiles for the supported deployment methods. These profiles set CPU and memory requests and limits for the components used by the selected deployment mode. -At minimum, plan resources for: +The available profiles are: -* Pangolin -* Gerbil, when the tunnel stack is enabled -* pangolin-kube-controller, when controller mode is used -* Traefik, if installed or managed as part of the deployment -* PostgreSQL or CloudNativePG components, if used -* Newt instances +| Profile | Intended use | +| --- | --- | +| Small | Small deployments, or clusters with very limited available resources. | +| Standard | Default profile for most normal deployments. | +| Large | Larger environments with more Sites, more users, higher traffic, or stricter availability expectations. | -Resource usage depends on traffic volume, number of sites, number of users, database mode, enabled metrics, and ingress/tunnel configuration. +The selected profile applies to the workloads that are part of your deployment, for example: -Configure resources in your Helm values, for example: +| Component | Resource considerations | +| --- | --- | +| Pangolin | Main application workload. Size according to dashboard/API usage, users, and traffic. | +| Pangolin Kube Controller | Required in controller mode. Size according to the number of reconciled Kubernetes and Traefik resources. | +| Traefik | Size according to ingress and proxy traffic. | +| Gerbil | Required when the tunnel stack is enabled. Size according to tunnel traffic and number of connected Sites. | +| PostgreSQL / CloudNativePG | Size according to database mode, stored state, and expected write/read activity. | +| Site connectors (Newt) | Each Site connector adds its own resource usage. Size according to the traffic handled by that Site. | -```yaml -resources: - requests: - cpu: 500m - memory: 512Mi - limits: - memory: 1Gi -``` + +The Standard profile is intended to be enough for most standard deployments. Use Small for very limited lab or test environments, and Large for higher traffic, more Sites, more users, or larger production environments. + - -Use the chart defaults as the starting point, then adjust requests and limits based on actual usage in your cluster. - +After installation, monitor CPU and memory usage and adjust the selected profile or individual resource overrides if needed. -Avoid setting CPU limits unless you have a specific reason to enforce them. CPU limits can cause throttling when a workload temporarily needs more CPU, even if spare CPU capacity is available on the node. This can negatively affect latency-sensitive components such as ingress, tunnel, proxy, or controller workloads. -For most deployments, set CPU requests to reserve an appropriate baseline and set memory limits to protect the node from excessive memory usage. Add CPU limits only when your cluster policy requires them or when you intentionally want to cap a component's maximum CPU usage. - - -## Optional tools - -### Argo CD - -Use Argo CD if you want GitOps reconciliation with a web UI, sync status, and drift detection. - -### Flux - -Use Flux if you want GitOps reconciliation through Kubernetes custom resources such as `HelmRelease` and `Kustomization`. +Avoid setting CPU limits on latency-sensitive Pangolin components unless your cluster policy requires them or you intentionally want to cap CPU usage. -### Helmfile +CPU limits can cause throttling when a workload temporarily needs more CPU, even if spare CPU capacity is available on the node. This can negatively affect ingress, tunnel, proxy, database, and controller workloads. -Use Helmfile if you want to manage multiple Helm releases together, for example supporting components plus Pangolin and Newt. - -## Verification checklist - -Before proceeding with installation: - -* [ ] The cluster runs a supported Kubernetes version that satisfies the chart requirement. -* [ ] `kubectl` can access the cluster. -* [ ] Helm is installed, if using the Helm workflow. -* [ ] Kustomize is available, if using the Kustomize workflow. -* [ ] A namespace is created or planned. -* [ ] A StorageClass is available if persistent storage is required. -* [ ] The ingress or Traefik routing strategy is defined. -* [ ] DNS records are configured. -* [ ] The TLS strategy is defined. -* [ ] Database mode is selected for Pangolin. -* [ ] Secret management is planned. -* [ ] Pod Security Admission requirements are understood. -* [ ] NetworkPolicy requirements are understood, if policies are enabled. -* [ ] Resource requests and limits are reviewed. +For most deployments, use CPU requests to reserve baseline capacity and memory limits to protect the node from excessive memory usage. + ## Next steps + + Pick the Kubernetes workflow that matches how you deploy applications. + - Install Pangolin or Newt with Helm. + Install Pangolin or Sites (Newt) with Helm. Use Kustomize overlays and patches. - - Deploy with Argo CD or Flux. + + Deploy Pangolin or Sites (Newt) with Argo CD. + + + Deploy Pangolin or Sites (Newt) with Flux. Start with the Pangolin Helm installation guide. From a93c49137c9804ef2108441c9b10db219a429444 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marc=20Sch=C3=A4fer?= Date: Thu, 14 May 2026 18:07:06 +0200 Subject: [PATCH 5/5] docs(docs.json): add redirect for Newt Kubernetes installation to Helm guide MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marc Schäfer --- docs.json | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs.json b/docs.json index 697182c..89ecfab 100644 --- a/docs.json +++ b/docs.json @@ -443,6 +443,11 @@ { "source": "/manage/resources/private/icmp-access", "destination": "/manage/resources/private/port-restrictions" + }, + { + "source": "/manage/sites/install-kubernetes", + "destination": "/self-host/manual/kubernetes/newt/helm", + "permanent": true } ], "seo": {