diff --git a/docs.json b/docs.json
index 2b1b204..89ecfab 100644
--- a/docs.json
+++ b/docs.json
@@ -38,7 +38,6 @@
"pages": [
"manage/sites/understanding-sites",
"manage/sites/install-site",
- "manage/sites/install-kubernetes",
"manage/sites/configure-site",
"manage/sites/update-site",
"manage/sites/credentials",
@@ -175,7 +174,44 @@
"group": "Manual Installation",
"pages": [
"self-host/manual/docker-compose",
- "self-host/manual/unraid"
+ "self-host/manual/unraid",
+ {
+ "group": "Kubernetes",
+ "pages": [
+ "self-host/manual/kubernetes/overview",
+ "self-host/manual/kubernetes/choose-method",
+ "self-host/manual/kubernetes/prerequisites",
+ "self-host/manual/kubernetes/helm",
+ "self-host/manual/kubernetes/kustomize",
+ "self-host/manual/kubernetes/helmfile",
+ {
+ "group": "GitOps",
+ "pages": [
+ "self-host/manual/kubernetes/gitops/overview",
+ "self-host/manual/kubernetes/gitops/argocd",
+ "self-host/manual/kubernetes/gitops/flux"
+ ]
+ },
+ {
+ "group": "Site (newt)",
+ "pages": [
+ "self-host/manual/kubernetes/newt/helm",
+ "self-host/manual/kubernetes/newt/kustomize",
+ "self-host/manual/kubernetes/newt/configuration",
+ "self-host/manual/kubernetes/newt/troubleshooting"
+ ]
+ },
+ {
+ "group": "Pangolin",
+ "pages": [
+ "self-host/manual/kubernetes/pangolin/helm",
+ "self-host/manual/kubernetes/pangolin/kustomize",
+ "self-host/manual/kubernetes/pangolin/configuration",
+ "self-host/manual/kubernetes/pangolin/troubleshooting"
+ ]
+ }
+ ]
+ }
]
},
"self-host/dns-and-networking",
@@ -407,6 +443,11 @@
{
"source": "/manage/resources/private/icmp-access",
"destination": "/manage/resources/private/port-restrictions"
+ },
+ {
+ "source": "/manage/sites/install-kubernetes",
+ "destination": "/self-host/manual/kubernetes/newt/helm",
+ "permanent": true
}
],
"seo": {
diff --git a/manage/sites/install-kubernetes.mdx b/manage/sites/install-kubernetes.mdx
deleted file mode 100644
index 1955242..0000000
--- a/manage/sites/install-kubernetes.mdx
+++ /dev/null
@@ -1,100 +0,0 @@
----
-title: "Kubernetes"
-description: "How to deploy a Newt Site on Kubernetes"
----
-
-import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
-
-
-
-
-
-This guide walks you through setting up Newt on Kubernetes using Helm.
-
-This guide assumes you already are familiar with Kubernetes concepts and you fulfill the following Global prerequisites:
-
-## Global Prerequisites
-
-- Kubernetes Cluster (v1.28.15+)
-- Access to the Kubernetes Cluster
-- Helm (v3.0+) installed, see Helm install docs
-
-## Helm Installation
-
-All Fossorial Helm charts are available on Artifact Hub. See Fossorial Charts.
-
-
-
- ```bash
- helm repo add fossorial https://charts.fossorial.io
- helm repo update fossorial
- helm search repo fossorial
- ```
-
-
-
- Prepare your Newt credentials:
- ```env title="newt-cred.env"
- PANGOLIN_ENDPOINT=
- NEWT_ID=
- NEWT_SECRET=
- ```
-
- Prepare a values file with your desired configuration.
-
- See Newt chart values configuration options.
-
- ```yaml title="values-newt.yaml"
- newtInstances:
- - name: main
- enabled: true
- auth:
- existingSecretName: newt-cred
- keys:
- endpointKey: PANGOLIN_ENDPOINT
- idKey: NEWT_ID
- secretKey: NEWT_SECRET
- ```
-
-
-
- Create a Kubernetes Secret from the env file created earlier:
- ```bash
-kubectl create secret generic newt-cred -n newt --from-env-file=newt-cred.env
- ```
-
- Install Newt with Helm:
- ```bash
- helm install my-newt fossorial/newt \
- -n newt --create-namespace \
- -f values-newt.yaml
- ```
-
- Change the release name (`my-newt`), namespace (`newt`), and values filename as needed.
-
-
- ```bash
- # Update repo to get latest charts
- helm repo update fossorial
- # Upgrade Newt (after editing values)
- helm upgrade my-newt fossorial/newt -n newt -f values-newt.yaml
- ```
- ```bash
- # Roll back to a previous revision
- helm rollback my-newt 1 -n newt
- ```
-
-
-
-## Customizing Your Values
-
-All configuration options are documented in the respective repositories:
-
-- Newt Helm chart values
-
-## References
-
- - All Fossorial Helm Charts repo
- - All Fossorial Kubernetes resources
- - Pangolin Kubernetes Controller
- - Helm documentation
diff --git a/self-host/manual/kubernetes/choose-method.mdx b/self-host/manual/kubernetes/choose-method.mdx
new file mode 100644
index 0000000..a736062
--- /dev/null
+++ b/self-host/manual/kubernetes/choose-method.mdx
@@ -0,0 +1,53 @@
+---
+title: "Choose an Installation Path"
+description: "Choose the Kubernetes deployment workflow for Pangolin and Sites (Newt)."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+Use this page to pick the right Kubernetes guide for your deployment workflow.
+
+These guides assume you are already familiar with Kubernetes and the deployment tools listed below.
+
+If you are new to Kubernetes, start with the [official Kubernetes learning resources](https://kubernetes.io/docs/tutorials/kubernetes-basics/) first. Then review the [Prerequisites](/self-host/manual/kubernetes/prerequisites) guide to check your cluster, tools, and setup.
+
+## Installation paths
+
+| Path | Use when | Start here |
+| --- | --- | --- |
+| Helm | You want the standard chart-based installation path for Pangolin or Sites (Newt). | [Helm Quick-Start](/self-host/manual/kubernetes/helm) |
+| Kustomize | You want manifest overlays, for example for environment-specific configuration, patches, or rendered manifests that can be reviewed before applying. | [Kustomize Quick-Start](/self-host/manual/kubernetes/kustomize) |
+| Argo CD | You already use Argo CD and want to deploy Pangolin or Sites (Newt) through a Kubernetes-native GitOps workflow. | [Argo CD Guide](/self-host/manual/kubernetes/gitops/argocd) |
+| Flux | You already use Flux and want to manage Pangolin or Sites (Newt) through `HelmRelease` or `Kustomization` resources. | [Flux Guide](/self-host/manual/kubernetes/gitops/flux) |
+| Helmfile | You want to manage multiple related Helm releases as one stack. | [Helmfile Guide](/self-host/manual/kubernetes/helmfile) |
+
+## Recommended starting point
+
+For most Kubernetes deployments, start with Helm. Use the GitOps guides only if Argo CD or Flux is already part of your deployment workflow.
+
+Kustomize and Helmfile are useful when you need more control over manifests, overlays, or multiple coordinated releases.
+
+## Next steps
+
+
+
+ Review the required cluster, ingress, DNS, storage, and secret setup.
+
+
+ Install Pangolin or Sites (Newt) with the standard chart-based workflow.
+
+
+ Use overlays and patches for manifest-based deployments.
+
+
+ Deploy Pangolin or Sites (Newt) with Argo CD.
+
+
+ Deploy Pangolin or Sites (Newt) with Flux.
+
+
+ Manage multiple Helm releases together.
+
+
diff --git a/self-host/manual/kubernetes/gitops/argocd.mdx b/self-host/manual/kubernetes/gitops/argocd.mdx
new file mode 100644
index 0000000..59a4867
--- /dev/null
+++ b/self-host/manual/kubernetes/gitops/argocd.mdx
@@ -0,0 +1,424 @@
+---
+title: "Argo CD"
+description: "Deploy Pangolin and Newt using Argo CD for Git-driven GitOps reconciliation."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+Argo CD is a declarative GitOps tool that continuously syncs your cluster state to your Git repository. This guide covers installing Pangolin and Newt using Argo CD.
+
+## Install Pangolin with Argo CD using Helm
+
+### Step 1: Create Pangolin namespace
+
+```bash
+kubectl create namespace pangolin
+```
+
+### Step 2: Create Application
+
+Create an Argo CD Application resource that tells Argo CD to deploy Pangolin using the Helm chart:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: pangolin
+ namespace: argocd
+spec:
+ project: default
+
+ source:
+ repoURL: https://charts.fossorial.io
+ chart: pangolin
+ targetRevision: 0.1.0-alpha.0 # or use ~0.1.0 for range
+ helm:
+ values: |
+ deployment:
+ type: controller
+ mode: multi
+
+ database:
+ mode: cloudnativepg
+
+ pangolin:
+ config:
+ app:
+ dashboard_url: https://pangolin.example.com
+ domains:
+ domain1:
+ base_domain: example.com
+ gerbil:
+ base_endpoint: vpn.example.com
+
+ ingress:
+ enabled: true
+ className: traefik
+ hosts:
+ - host: pangolin.example.com
+ paths:
+ - path: /
+ pathType: Prefix
+ tls:
+ - secretName: pangolin-tls
+ hosts:
+ - pangolin.example.com
+
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: pangolin
+
+ syncPolicy:
+ syncOptions:
+ - CreateNamespace=true
+ automated:
+ prune: true
+ selfHeal: true
+```
+
+Apply the Application:
+
+```bash
+kubectl apply -f pangolin-app.yaml
+```
+
+### Step 3: Monitor in Argo CD
+
+In the Argo CD UI, you should see the `pangolin` application. Argo CD will:
+
+1. Fetch the Helm chart from `https://charts.fossorial.io`
+2. Render the chart with your inline `values`
+3. Create all resources in the `pangolin` namespace
+4. Continuously monitor for drift
+
+### Step 4: Verify deployment
+
+```bash
+# Check Argo CD status
+kubectl describe app -n argocd pangolin
+
+# Check pod status
+kubectl get pods -n pangolin
+```
+
+## Install Newt with Argo CD using Helm
+
+### Step 1: Create Newt auth secret
+
+```bash
+kubectl create secret generic newt-auth \
+ -n pangolin \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+```
+
+### Step 2: Create Newt Application
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: newt
+ namespace: argocd
+spec:
+ project: default
+
+ source:
+ repoURL: https://charts.fossorial.io
+ chart: newt
+ targetRevision: 1.4.0
+ helm:
+ values: |
+ newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ existingSecretName: newt-auth
+
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: pangolin
+
+ syncPolicy:
+ syncOptions:
+ - CreateNamespace=true
+ automated:
+ prune: true
+ selfHeal: true
+```
+
+Apply:
+
+```bash
+kubectl apply -f newt-app.yaml
+```
+
+## Using Argo CD with Git repository
+
+Instead of inline values, you can store configuration in Git and have Argo CD deploy from there:
+
+### Repository structure
+
+```
+infrastructure/
+├── apps/
+│ ├── pangolin/
+│ │ ├── values-base.yaml
+│ │ ├── values-prod.yaml
+│ │ └── app.yaml (Argo CD Application CRD)
+│ └── newt/
+│ ├── values.yaml
+│ └── app.yaml
+└── clusters/
+ └── production/
+ ├── pangolin.yaml (reference to app)
+ └── newt.yaml
+```
+
+### Git-based Application
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: pangolin
+ namespace: argocd
+spec:
+ project: default
+
+ source:
+ repoURL: https://github.com/my-org/infrastructure
+ path: apps/pangolin
+ targetRevision: main
+ helm:
+ valuesObject:
+ deployment:
+ type: controller
+ mode: multi
+ releaseName: pangolin
+
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: pangolin
+
+ syncPolicy:
+ syncOptions:
+ - CreateNamespace=true
+ automated:
+ prune: true
+ selfHeal: true
+```
+
+Argo CD will watch the Git repository and auto-sync on changes to `apps/pangolin`.
+
+## Using Argo CD with Kustomize
+
+Deploy Pangolin using Kustomize overlays:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: Application
+metadata:
+ name: pangolin
+ namespace: argocd
+spec:
+ project: default
+
+ source:
+ repoURL: https://github.com/my-org/infrastructure
+ path: overlays/production
+ targetRevision: main
+
+ destination:
+ server: https://kubernetes.default.svc
+ namespace: pangolin
+
+ syncPolicy:
+ syncOptions:
+ - CreateNamespace=true
+ automated:
+ prune: true
+ selfHeal: true
+```
+
+## Sync policies
+
+### Automated sync
+
+**prune: true**: Deletes resources in cluster that are no longer in Git
+
+**selfHeal: true**: Resyncs if cluster drifts from Git (e.g., manual `kubectl apply`)
+
+```yaml
+syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+ allowEmpty: false # prevent accidental deletion of all resources
+```
+
+### Manual sync
+
+Sync only when you explicitly trigger it:
+
+```yaml
+syncPolicy:
+ syncOptions:
+ - CreateNamespace=true
+```
+
+Manually sync:
+
+```bash
+argocd app sync pangolin
+# or use UI
+```
+
+## Advanced: ApplicationSet for multi-environment
+
+Deploy Pangolin and Newt across multiple clusters or environments:
+
+```yaml
+apiVersion: argoproj.io/v1alpha1
+kind: ApplicationSet
+metadata:
+ name: pangolin-multienv
+ namespace: argocd
+spec:
+ generators:
+ - list:
+ elements:
+ - cluster: production
+ env: prod
+ - cluster: staging
+ env: staging
+ template:
+ metadata:
+ name: pangolin-{{ .cluster }}
+ spec:
+ project: default
+ source:
+ repoURL: https://github.com/my-org/infrastructure
+ path: clusters/{{ .cluster }}/pangolin
+ targetRevision: main
+ destination:
+ name: '{{ .cluster }}'
+ namespace: pangolin
+ syncPolicy:
+ automated:
+ prune: true
+ selfHeal: true
+```
+
+## OCI Helm sources (if available)
+
+If the Helm chart is available in an OCI registry:
+
+```yaml
+source:
+ repoURL: oci://registry.example.com/fossorial
+ chart: pangolin
+ targetRevision: 0.1.0-alpha.0
+ helm:
+ values: |
+ # ... values ...
+```
+
+OCI chart references work the same as traditional Helm repository references in Argo CD.
+
+## Troubleshooting Argo CD deployments
+
+### Check Application status
+
+```bash
+kubectl describe app -n argocd pangolin
+kubectl get app -n argocd pangolin -o yaml
+```
+
+### Check sync status
+
+```bash
+argocd app get pangolin
+argocd app logs pangolin
+```
+
+### Manual sync
+
+```bash
+argocd app sync pangolin --force
+```
+
+### Refresh from repository
+
+```bash
+argocd app diff pangolin
+```
+
+### Delete Application
+
+```bash
+kubectl delete app -n argocd pangolin
+```
+
+## Common patterns
+
+### Different values per environment
+
+Use multiple Applications:
+
+```yaml
+# production/pangolin-app.yaml
+spec:
+ source:
+ helm:
+ values: |
+ resources:
+ limits:
+ cpu: 2000m
+ memory: 2Gi
+ replicas: 3
+
+# staging/pangolin-app.yaml
+spec:
+ source:
+ helm:
+ values: |
+ resources:
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ replicas: 1
+```
+
+### Secrets with sealed-secrets
+
+Use sealed-secrets to safely store secrets in Git:
+
+```yaml
+# In Git
+apiVersion: bitnami.com/v1alpha1
+kind: SealedSecret
+metadata:
+ name: newt-auth
+ namespace: pangolin
+spec:
+ encryptedData:
+ PANGOLIN_ENDPOINT: AgC4F5qd...
+ NEWT_ID: AgB9l2pK...
+ NEWT_SECRET: AgDq3jX...
+```
+
+Argo CD applies the sealed secret; the cluster decrypts it.
+
+## Next steps
+
+
+
+
+
+
+
diff --git a/self-host/manual/kubernetes/gitops/flux.mdx b/self-host/manual/kubernetes/gitops/flux.mdx
new file mode 100644
index 0000000..637659f
--- /dev/null
+++ b/self-host/manual/kubernetes/gitops/flux.mdx
@@ -0,0 +1,517 @@
+---
+title: "Flux"
+description: "Deploy Pangolin and Newt using Flux for Git-driven GitOps reconciliation."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+Flux is a declarative GitOps tool that uses Kubernetes-native Custom Resources to manage deployments. This guide covers installing Pangolin and Newt using Flux.
+
+
+## Flux prerequisites
+
+- Kubernetes 1.25+
+- `flux` CLI installed: [Flux install guide](https://fluxcd.io/flux/installation/)
+- Git repository for configuration (optional, can use built-in sources)
+- GitHub, GitLab, or other Git provider account (optional)
+
+Install Flux CLI:
+
+```bash
+# macOS/Linux with brew
+brew install flux
+
+# or curl
+curl -s https://fluxcd.io/install.sh | sudo bash
+
+# Verify
+flux --version
+```
+
+## Install Flux on your cluster
+
+### Option 1: Bootstrap Flux from GitHub
+
+Flux `bootstrap` automatically installs Flux and configures Git sync:
+
+```bash
+flux bootstrap github \
+ --owner=my-org \
+ --repo=infrastructure \
+ --personal \
+ --path=clusters/production
+```
+
+This creates the Git repository structure and installs Flux components.
+
+### Option 2: Manual Flux installation
+
+```bash
+# Create flux-system namespace and install Flux
+flux install --namespace=flux-system --network-policy=true
+```
+
+## Install Pangolin with Flux using HelmRelease
+
+### Step 1: Create HelmRepository
+
+Define the Fossorial Helm chart repository:
+
+```yaml
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: HelmRepository
+metadata:
+ name: fossorial
+ namespace: flux-system
+spec:
+ interval: 5m
+ url: https://charts.fossorial.io
+```
+
+Apply:
+
+```bash
+kubectl apply -f helmrepo.yaml
+
+# Verify
+kubectl get helmrepo -n flux-system
+```
+
+### Step 2: Create Pangolin HelmRelease
+
+```yaml
+apiVersion: helm.toolkit.fluxcd.io/v2
+kind: HelmRelease
+metadata:
+ name: pangolin
+ namespace: pangolin
+spec:
+ interval: 10m
+ chart:
+ spec:
+ chart: pangolin
+ version: 0.1.0-alpha.0 # or use ~0.1.0 for auto-upgrades
+ sourceRef:
+ kind: HelmRepository
+ name: fossorial
+ namespace: flux-system
+
+ install:
+ crds: Create
+ upgrade:
+ crds: CreateReplace
+
+ values:
+ deployment:
+ type: controller
+ mode: multi
+
+ database:
+ mode: cloudnativepg
+
+ pangolin:
+ config:
+ app:
+ dashboard_url: https://pangolin.example.com
+ domains:
+ domain1:
+ base_domain: example.com
+ gerbil:
+ base_endpoint: vpn.example.com
+
+ ingress:
+ enabled: true
+ className: traefik
+ hosts:
+ - host: pangolin.example.com
+ paths:
+ - path: /
+ pathType: Prefix
+ tls:
+ - secretName: pangolin-tls
+ hosts:
+ - pangolin.example.com
+```
+
+Create namespace:
+
+```bash
+kubectl create namespace pangolin
+```
+
+Apply:
+
+```bash
+kubectl apply -f pangolin-helmrelease.yaml
+```
+
+### Step 3: Monitor reconciliation
+
+```bash
+# Check HelmRelease status
+kubectl get helmrelease -n pangolin
+
+# Watch live
+kubectl get helmrelease -n pangolin -w
+
+# Describe for details
+kubectl describe helmrelease pangolin -n pangolin
+
+# Check Flux logs
+flux logs --all-namespaces --follow
+```
+
+## Install Newt with Flux using HelmRelease
+
+### Step 1: Create Newt auth secret
+
+```bash
+kubectl create secret generic newt-auth \
+ -n pangolin \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+```
+
+### Step 2: Create Newt HelmRelease
+
+```yaml
+apiVersion: helm.toolkit.fluxcd.io/v2
+kind: HelmRelease
+metadata:
+ name: newt
+ namespace: pangolin
+spec:
+ interval: 10m
+ chart:
+ spec:
+ chart: newt
+ version: 1.4.0
+ sourceRef:
+ kind: HelmRepository
+ name: fossorial
+ namespace: flux-system
+
+ values:
+ newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ existingSecretName: newt-auth
+```
+
+Apply:
+
+```bash
+kubectl apply -f newt-helmrelease.yaml
+```
+
+### Step 3: Verify
+
+```bash
+kubectl get helmrelease -n pangolin
+kubectl describe helmrelease newt -n pangolin
+```
+
+## Using Flux with Git repository (GitOps)
+
+Store Flux configuration in Git and have Flux automatically reconcile changes:
+
+### Repository structure
+
+```
+infrastructure/
+├── clusters/
+│ └── production/
+│ ├── flux-system/
+│ │ └── gotk-components.yaml (auto-generated)
+│ ├── pangolin/
+│ │ ├── helmrepo.yaml
+│ │ ├── pangolin-helmrelease.yaml
+│ │ └── newt-helmrelease.yaml
+│ └── kustomization.yaml
+└── apps/
+ ├── pangolin/
+ │ └── values.yaml
+ └── newt/
+ └── values.yaml
+```
+
+### GitRepository for configuration
+
+```yaml
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: GitRepository
+metadata:
+ name: infrastructure
+ namespace: flux-system
+spec:
+ interval: 1m
+ url: https://github.com/my-org/infrastructure
+ ref:
+ branch: main
+```
+
+### Kustomization for syncing
+
+```yaml
+apiVersion: kustomize.toolkit.fluxcd.io/v1
+kind: Kustomization
+metadata:
+ name: production
+ namespace: flux-system
+spec:
+ interval: 10m
+ sourceRef:
+ kind: GitRepository
+ name: infrastructure
+ path: ./clusters/production
+ prune: true
+ wait: true
+```
+
+Flux watches `clusters/production` in Git and auto-applies all resources.
+
+## Using Flux with Kustomize overlays
+
+Manage environment-specific overlays with Flux:
+
+### Repository structure
+
+```
+overlays/
+├── dev/
+│ ├── kustomization.yaml
+│ └── pangolin-patch.yaml
+├── staging/
+│ └── kustomization.yaml
+└── prod/
+ ├── kustomization.yaml
+ └── pangolin-patch.yaml
+```
+
+### Kustomization resource
+
+```yaml
+apiVersion: kustomize.toolkit.fluxcd.io/v1
+kind: Kustomization
+metadata:
+ name: pangolin-prod
+ namespace: flux-system
+spec:
+ interval: 10m
+ sourceRef:
+ kind: GitRepository
+ name: infrastructure
+ path: ./overlays/prod
+ prune: true
+ wait: true
+```
+
+Flux builds and applies the Kustomize overlay automatically.
+
+## Using Flux with OCI Helm charts
+
+If Helm charts are available in an OCI registry:
+
+```yaml
+apiVersion: source.toolkit.fluxcd.io/v1beta2
+kind: OCIRepository
+metadata:
+ name: fossorial-oci
+ namespace: flux-system
+spec:
+ interval: 5m
+ url: oci://registry.example.com/fossorial
+
+---
+apiVersion: helm.toolkit.fluxcd.io/v2
+kind: HelmRelease
+metadata:
+ name: pangolin
+ namespace: pangolin
+spec:
+ interval: 10m
+ chart:
+ spec:
+ chart: pangolin
+ version: 0.1.0-alpha.0
+ sourceRef:
+ kind: OCIRepository
+ name: fossorial-oci
+ namespace: flux-system
+ values:
+ # ... values ...
+```
+
+## Advanced: Dependency ordering
+
+Order HelmReleases to install dependencies first:
+
+```yaml
+apiVersion: helm.toolkit.fluxcd.io/v2
+kind: HelmRelease
+metadata:
+ name: cert-manager
+ namespace: cert-manager
+spec:
+ interval: 10m
+ chart:
+ spec:
+ chart: cert-manager
+ # ...
+
+---
+apiVersion: helm.toolkit.fluxcd.io/v2
+kind: HelmRelease
+metadata:
+ name: pangolin
+ namespace: pangolin
+spec:
+ interval: 10m
+ dependsOn:
+ - name: cert-manager
+ namespace: cert-manager
+ chart:
+ spec:
+ chart: pangolin
+ # ...
+```
+
+Flux ensures `cert-manager` reconciles before `pangolin`.
+
+## Advanced: valuesFrom ConfigMap/Secret
+
+Store values in ConfigMaps or Secrets, referenced from HelmRelease:
+
+```yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: pangolin-values
+ namespace: pangolin
+data:
+ values.yaml: |
+ deployment:
+ type: controller
+ mode: multi
+
+---
+apiVersion: helm.toolkit.fluxcd.io/v2
+kind: HelmRelease
+metadata:
+ name: pangolin
+ namespace: pangolin
+spec:
+ interval: 10m
+ chart:
+ spec:
+ chart: pangolin
+ # ...
+ valuesFrom:
+ - kind: ConfigMap
+ name: pangolin-values
+```
+
+Flux extracts values from the ConfigMap and applies them to the HelmRelease.
+
+## Troubleshooting Flux
+
+### Check Flux components
+
+```bash
+kubectl get deployments -n flux-system
+flux check --all-namespaces
+```
+
+### Check HelmRelease status
+
+```bash
+kubectl get helmrelease -n pangolin
+kubectl describe helmrelease pangolin -n pangolin
+kubectl get helmrelease pangolin -n pangolin -o yaml
+```
+
+### View reconciliation logs
+
+```bash
+flux logs --all-namespaces --follow
+
+# Specific resource
+kubectl logs -n pangolin deployment/helm-operator -f
+```
+
+### Manual reconciliation
+
+```bash
+flux reconcile helmrelease pangolin -n pangolin
+flux reconcile kustomization production -n flux-system
+```
+
+### Suspend reconciliation
+
+```bash
+flux suspend helmrelease pangolin -n pangolin
+```
+
+### Resume reconciliation
+
+```bash
+flux resume helmrelease pangolin -n pangolin
+```
+
+## Multi-environment example
+
+### Bootstrap multiple clusters
+
+```bash
+# Production cluster
+flux bootstrap github \
+ --owner=my-org \
+ --repo=infrastructure \
+ --personal \
+ --path=clusters/production
+
+# Staging cluster (from different checkout)
+flux bootstrap github \
+ --owner=my-org \
+ --repo=infrastructure \
+ --personal \
+ --path=clusters/staging
+```
+
+Each cluster reconciles its own `clusters/*/` directory.
+
+### Repository structure
+
+```
+clusters/
+├── production/
+│ ├── kustomization.yaml
+│ └── pangolin/
+│ ├── helmrepo.yaml
+│ └── helmrelease.yaml (prod values)
+├── staging/
+│ ├── kustomization.yaml
+│ └── pangolin/
+│ ├── helmrepo.yaml
+│ └── helmrelease.yaml (staging values)
+└── dev/
+ ├── kustomization.yaml
+ └── pangolin/
+ └── helmrelease.yaml (dev values)
+```
+
+Each environment's HelmRelease uses environment-specific values.
+
+## Next steps
+
+
+
+
+
+
+
diff --git a/self-host/manual/kubernetes/gitops/overview.mdx b/self-host/manual/kubernetes/gitops/overview.mdx
new file mode 100644
index 0000000..6c3a547
--- /dev/null
+++ b/self-host/manual/kubernetes/gitops/overview.mdx
@@ -0,0 +1,76 @@
+---
+title: "GitOps Overview"
+description: "Deploy Pangolin and Sites (Newt) with GitOps workflows such as Argo CD or Flux."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+Use GitOps when Pangolin and Sites (Newt) should be reconciled from Git instead of being installed manually from a local shell.
+Can be used together with Blueprints — see [Blueprint config reference](/self-host/advanced/config-file) for details.
+
+These guides assume you already use, or plan to use, a GitOps controller such as Argo CD or Flux.
+General GitOps concepts such as reconciliation, desired state, and Git-driven workflows are outside the scope of this documentation. Refer to your GitOps controller's documentation for those concepts.
+
+## Supported GitOps paths
+
+
+
+ Deploy Pangolin or Sites (Newt) with Argo CD Applications.
+
+
+ Deploy Pangolin or Sites (Newt) with Flux HelmRelease or Kustomization resources.
+
+
+
+## What GitOps manages
+
+A GitOps workflow can reconcile the same deployment inputs used by the other Kubernetes guides:
+
+| Input | Used for |
+| --- | --- |
+| Helm chart values | Configure Pangolin, controller mode, database mode, ingress, Sites, and related components. |
+| Kustomize overlays | Patch or compose rendered manifests for environment-specific deployments. |
+| Kubernetes Secrets | Provide credentials, TLS material, database connection details, or Site connector credentials. |
+| Custom resources | Manage Argo CD Applications, Flux HelmReleases, Flux Kustomizations, or related controller resources. |
+
+## Recommended layout
+
+Keep the Pangolin and Site configuration close to the cluster or environment that owns it.
+
+```text
+infrastructure/
+├── clusters/
+│ ├── production/
+│ │ ├── pangolin/
+│ │ └── sites/
+│ ├── staging/
+│ │ ├── pangolin/
+│ │ └── sites/
+│ └── dev/
+│ ├── pangolin/
+│ └── sites/
+└── shared/
+ ├── pangolin/
+ └── sites/
+```
+
+Use environment-specific directories for values, patches, and secrets that differ between clusters. Use shared directories only for reusable configuration that should stay the same across environments.
+
+## Next steps
+
+
+
+ Create Argo CD Applications for Pangolin and Sites (Newt).
+
+
+ Create Flux sources, HelmReleases, or Kustomizations for Pangolin and Sites (Newt).
+
+
+ Compare the supported Kubernetes deployment paths.
+
+
+ Review cluster, networking, storage, RBAC, and resource requirements.
+
+
diff --git a/self-host/manual/kubernetes/helm.mdx b/self-host/manual/kubernetes/helm.mdx
new file mode 100644
index 0000000..cb16438
--- /dev/null
+++ b/self-host/manual/kubernetes/helm.mdx
@@ -0,0 +1,390 @@
+---
+title: "Helm"
+description: "Kubernetes installation using Helm charts for Pangolin and Newt."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+Helm is the recommended method for standard Kubernetes installations of Pangolin and Newt.
+
+Use Helm when you want a chart-based workflow for installing, upgrading, rolling back, and removing releases from your cluster.
+
+## Helm repository setup
+
+Add the Fossorial Helm chart repository:
+
+```bash
+helm repo add fossorial https://charts.fossorial.io
+helm repo update fossorial
+```
+
+Search for available charts:
+
+```bash
+helm search repo fossorial
+```
+
+The classic Helm repository flow is the default path for most installations:
+
+```bash
+helm install my-newt fossorial/newt
+helm install my-pangolin fossorial/pangolin
+```
+
+## Installation overview
+
+A typical Helm installation flow looks like this:
+
+
+
+ Create the namespace manually and apply required labels or annotations.
+
+
+ Create a `values.yaml` file for each release (`values-pangolin.yaml`, `values-newt.yaml`).
+
+
+ Install with `helm upgrade --install` to support first install and future updates with the same command.
+
+
+ Confirm Helm release status and Kubernetes resources after deployment.
+
+
+
+
+It is recommended to create the namespace explicitly before installation. This allows you to apply Pod Security Admission labels, policy labels, annotations, or other cluster-specific metadata before the chart creates workloads.
+
+
+For detailed installation steps, see:
+
+* [Pangolin Helm Quick-Start](/self-host/manual/kubernetes/pangolin/helm) — Install Pangolin
+* [Site (Newt) Helm Quick-Start](/self-host/manual/kubernetes/newt/helm) — Install Site (Newt)
+
+## Install command patterns
+
+
+```bash Classic Helm repository
+helm upgrade --install pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+
+helm upgrade --install newt fossorial/newt \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+```bash OCI (GHCR)
+helm upgrade --install pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \
+ --version 0.1.0-alpha.0 \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+
+helm upgrade --install newt oci://ghcr.io/fosrl/helm-charts/newt \
+ --version 1.4.0 \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+
+## Namespace preparation
+
+Create the namespace before installing the chart:
+
+```bash
+kubectl create namespace pangolin
+```
+
+If your cluster uses Pod Security Admission or namespace-based policies, apply the required labels before installation.
+
+Example:
+
+```bash
+kubectl label namespace pangolin \
+ pod-security.kubernetes.io/enforce=baseline \
+ pod-security.kubernetes.io/audit=restricted \
+ pod-security.kubernetes.io/warn=restricted
+```
+
+
+Pangolin deployments that include Gerbil require permissions that are not compatible with a restricted namespace profile, because Gerbil manages WireGuard and requires capabilities such as `NET_ADMIN`.
+
+
+For more details, see [Prerequisites](/self-host/manual/kubernetes/prerequisites).
+
+## Install with a values file
+
+Both charts use values files for configuration.
+
+Pangolin example:
+
+```bash
+helm upgrade --install pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+```
+
+Newt example:
+
+```bash
+helm upgrade --install newt fossorial/newt \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+Using `helm upgrade --install` keeps the command usable for both the first installation and later configuration changes.
+
+
+Do not use `--create-namespace` if you need custom namespace labels or annotations. Create the namespace first and then run Helm against that namespace.
+
+
+## Values and configuration
+
+Keep reusable configuration in a values file:
+
+```bash
+helm upgrade --install pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+```
+
+Use `--set` only for small tests or temporary overrides:
+
+```bash
+helm upgrade --install pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --set example.key=value
+```
+
+Common value sources:
+
+* `values-pangolin.yaml` for Pangolin.
+* `values-newt.yaml` for Newt.
+* Kubernetes Secrets for credentials.
+* Existing cluster resources such as TLS secrets, StorageClasses, or ingress controllers.
+
+Full configuration options are documented here:
+
+* [Pangolin Configuration](/self-host/manual/kubernetes/pangolin/configuration)
+* [Newt Configuration](/self-host/manual/kubernetes/newt/configuration)
+
+## Artifact Hub and chart discovery
+
+The Fossorial charts can be installed from the Fossorial Helm repository:
+
+```bash
+helm repo add fossorial https://charts.fossorial.io
+helm repo update fossorial
+helm search repo fossorial
+```
+
+Artifact Hub can also be used to discover published chart metadata, available versions, install commands, and repository information.
+
+
+Always verify the chart name, chart version, and repository URL before copying install commands into production.
+
+
+## OCI-based charts
+
+OCI is not a separate installation method. It only changes where Helm pulls the chart from.
+
+For Pangolin and Newt, OCI chart publishing is available in GHCR:
+
+* Newt: `oci://ghcr.io/fosrl/helm-charts/newt`
+* Pangolin: `oci://ghcr.io/fosrl/helm-charts/pangolin`
+
+You still use Helm in the same way: choose a chart, select a version, provide values, and install the release.
+
+### Pull OCI charts
+
+Newt example:
+
+```bash
+helm pull oci://ghcr.io/fosrl/helm-charts/newt \
+ --version 1.4.0
+```
+
+Pangolin example:
+
+```bash
+helm pull oci://ghcr.io/fosrl/helm-charts/pangolin \
+ --version 0.1.0-alpha.0
+```
+
+### Install from OCI
+
+Newt example:
+
+```bash
+helm upgrade --install newt oci://ghcr.io/fosrl/helm-charts/newt \
+ --version 1.4.0 \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+Pangolin example:
+
+```bash
+helm upgrade --install pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \
+ --version 0.1.0-alpha.0 \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+```
+
+
+Use the classic Helm repository when you want the normal `helm repo add` and `helm search repo` workflow. Use OCI when you want to pull charts directly from GHCR or when your deployment tooling expects OCI chart references.
+
+
+## Upgrade and maintenance
+
+### Update the classic Helm repository
+
+```bash
+helm repo update fossorial
+```
+
+This step is only needed when using the classic Helm repository. OCI installs pull the chart by OCI reference and version.
+
+### Upgrade Pangolin
+
+Classic Helm repository:
+
+```bash
+helm upgrade pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+```
+
+OCI:
+
+```bash
+helm upgrade pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \
+ --version 0.1.0-alpha.0 \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+```
+
+### Upgrade Newt
+
+Classic Helm repository:
+
+```bash
+helm upgrade newt fossorial/newt \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+OCI:
+
+```bash
+helm upgrade newt oci://ghcr.io/fosrl/helm-charts/newt \
+ --version 1.4.0 \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+### Check release status
+
+```bash
+helm status pangolin --namespace pangolin
+helm history pangolin --namespace pangolin
+```
+
+```bash
+helm status newt --namespace pangolin
+helm history newt --namespace pangolin
+```
+
+### View rendered manifests
+
+```bash
+helm get manifest pangolin --namespace pangolin
+```
+
+```bash
+helm get manifest newt --namespace pangolin
+```
+
+### View applied values
+
+```bash
+helm get values pangolin --namespace pangolin
+```
+
+```bash
+helm get values newt --namespace pangolin
+```
+
+### Roll back a release
+
+```bash
+helm rollback pangolin --namespace pangolin
+```
+
+```bash
+helm rollback newt --namespace pangolin
+```
+
+### Uninstall a release
+
+```bash
+helm uninstall pangolin --namespace pangolin
+```
+
+```bash
+helm uninstall newt --namespace pangolin
+```
+
+
+Uninstalling a Helm release does not always remove persistent volumes, externally managed secrets, DNS records, certificates, or cloud load balancers. Review the namespace and related cluster resources before deleting data.
+
+
+## Troubleshooting
+
+For component-specific troubleshooting, see:
+
+* [Pangolin Troubleshooting](/self-host/manual/kubernetes/pangolin/troubleshooting)
+* [Newt Troubleshooting](/self-host/manual/kubernetes/newt/troubleshooting)
+
+Useful Helm commands:
+
+```bash
+helm list --all-namespaces
+helm status --namespace
+helm history --namespace
+helm get values --namespace
+helm get manifest --namespace
+```
+
+Useful Kubernetes commands:
+
+```bash
+kubectl get pods -n pangolin
+kubectl get events -n pangolin --sort-by=.lastTimestamp
+kubectl describe pod -n pangolin
+kubectl logs -n pangolin
+```
+
+## Next steps
+
+
+
+ Install Pangolin with the Helm chart.
+
+
+ Install Site (Newt) with the Helm chart.
+
+
+ Configure Pangolin chart values for your cluster.
+
+
+ Configure Newt chart values and credentials.
+
+
+ Deploy the charts with Argo CD.
+
+
+ Deploy the charts with Flux.
+
+
diff --git a/self-host/manual/kubernetes/helmfile.mdx b/self-host/manual/kubernetes/helmfile.mdx
new file mode 100644
index 0000000..27ae5e3
--- /dev/null
+++ b/self-host/manual/kubernetes/helmfile.mdx
@@ -0,0 +1,386 @@
+---
+title: "Helmfile"
+description: "Advanced Kubernetes installation using Helmfile for multi-release orchestration."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+Helmfile is a declarative way to manage multiple Helm releases in a single workflow. Use Helmfile when you need to install Pangolin and/or Newt alongside other Kubernetes components or manage multiple releases together.
+
+## When to use Helmfile
+
+Use Helmfile if you want to:
+
+- **Orchestrate multiple Helm releases** in a single file (Pangolin + Newt + dependencies).
+- **Manage dependencies** between releases (e.g., install cert-manager before Pangolin).
+- **Keep release definitions** in version control and synchronized.
+- **Avoid repeated `helm install` commands** for complex multi-release setups.
+
+**Not using Helmfile?** If you're installing only Pangolin or only Newt without additional services, [Helm quick-start](/self-host/manual/kubernetes/helm) is simpler.
+
+## Helm vs. Helmfile
+
+| Aspect | Helm | Helmfile |
+| --- | --- | --- |
+| **Purpose** | Install/manage a single Helm chart release | Orchestrate multiple Helm chart releases |
+| **Command** | `helm install`, `helm upgrade` | `helmfile sync`, `helmfile apply` |
+| **Use case** | Quick install, single app | Multi-release, dependencies, fleet management |
+| **Complexity** | Low | Medium |
+
+## Helmfile prerequisites
+
+- Helm 3.10+
+- `helmfile` CLI installed: [Helmfile GitHub](https://github.com/helmfile/helmfile)
+- Basic knowledge of Helm values and YAML
+
+Install helmfile:
+
+```bash
+# macOS/Linux with brew
+brew install helmfile
+
+# or download from releases
+wget https://github.com/helmfile/helmfile/releases/download/v/helmfile__
+chmod +x helmfile
+sudo mv helmfile /usr/local/bin/
+```
+
+Verify:
+
+```bash
+helmfile --version
+```
+
+## Basic Helmfile structure
+
+A Helmfile is a YAML file (typically named `helmfile.yaml`) that declares multiple releases:
+
+```yaml
+# helmfile.yaml
+releases:
+ - name: cert-manager
+ namespace: cert-manager
+ createNamespace: true
+ chart: jetstack/cert-manager
+ version: v1.14.0
+
+ - name: pangolin
+ namespace: pangolin
+ createNamespace: true
+ chart: fossorial/pangolin
+ version: 0.1.0-alpha.0
+ values:
+ - pangolin-values.yaml
+
+ - name: newt
+ namespace: pangolin
+ chart: fossorial/newt
+ version: 1.4.0
+ values:
+ - newt-values.yaml
+ dependsOn:
+ - pangolin
+```
+
+## Helmfile with Pangolin and Newt
+
+### 1. Add Helm repositories
+
+```bash
+helm repo add jetstack https://charts.jetstack.io
+helm repo add fossorial https://charts.fossorial.io
+helm repo update
+```
+
+### 2. Create Helmfile
+
+Create `helmfile.yaml`:
+
+```yaml
+helmDefaults:
+ atomic: true
+ cleanupOnFail: true
+ wait: true
+ timeout: 600
+ recreatePods: true
+ force: false
+
+repositories:
+ - name: jetstack
+ url: https://charts.jetstack.io
+ - name: fossorial
+ url: https://charts.fossorial.io
+
+releases:
+ - name: cert-manager
+ namespace: cert-manager
+ createNamespace: true
+ chart: jetstack/cert-manager
+ version: v1.14.0
+ set:
+ installCRDs: true
+
+ - name: pangolin
+ namespace: pangolin
+ createNamespace: true
+ chart: fossorial/pangolin
+ version: 0.1.0-alpha.0
+ values:
+ - ./values/pangolin.yaml
+ dependsOn:
+ - cert-manager
+
+ - name: newt
+ namespace: pangolin
+ chart: fossorial/newt
+ version: 1.4.0
+ values:
+ - ./values/newt.yaml
+ dependsOn:
+ - pangolin
+```
+
+### 3. Create values files
+
+Create `values/pangolin.yaml`:
+
+```yaml
+deployment:
+ type: controller
+ mode: multi
+
+database:
+ mode: cloudnativepg
+
+pangolin:
+ config:
+ app:
+ dashboard_url: https://pangolin.example.com
+ domains:
+ domain1:
+ base_domain: example.com
+ gerbil:
+ base_endpoint: vpn.example.com
+
+ingress:
+ enabled: true
+ className: traefik
+ hosts:
+ - host: pangolin.example.com
+ paths:
+ - path: /
+ pathType: Prefix
+ tls:
+ - secretName: pangolin-tls
+ hosts:
+ - pangolin.example.com
+```
+
+Create `values/newt.yaml`:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ existingSecretName: newt-auth
+```
+
+### 4. Create Newt auth secret
+
+Before applying Helmfile:
+
+```bash
+kubectl create namespace pangolin
+kubectl create secret generic newt-auth \
+ -n pangolin \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+```
+
+### 5. Deploy with Helmfile
+
+```bash
+# Preview changes
+helmfile diff
+
+# Apply releases
+helmfile sync
+
+# or
+helmfile apply
+```
+
+### 6. Verify deployment
+
+```bash
+helmfile status
+
+# Check individual releases
+helm status cert-manager -n cert-manager
+helm status pangolin -n pangolin
+helm status newt -n pangolin
+
+# Check pods
+kubectl get pods -n pangolin
+kubectl get pods -n cert-manager
+```
+
+## Advanced: Helmfile with environments
+
+For multi-environment setups (dev, staging, prod), use Helmfile environments:
+
+```yaml
+environments:
+ dev:
+ values:
+ environment: dev
+ domain: dev.example.com
+ replicaCount: 1
+ prod:
+ values:
+ environment: prod
+ domain: pangolin.example.com
+ replicaCount: 3
+
+helmDefaults:
+ atomic: true
+ wait: true
+
+repositories:
+ - name: fossorial
+ url: https://charts.fossorial.io
+
+releases:
+ - name: pangolin
+ namespace: pangolin
+ createNamespace: true
+ chart: fossorial/pangolin
+ version: 0.1.0-alpha.0
+ values:
+ - ./values/pangolin-{{ .Environment.Values.environment }}.yaml
+```
+
+Deploy to specific environment:
+
+```bash
+helmfile -e dev sync
+helmfile -e prod sync
+```
+
+## Helmfile with GitOps
+
+### Using Helmfile with FluxCD
+
+FluxCD can reconcile Helmfile declarations using the `helmfile-controller`. This allows Git-driven Helmfile updates:
+
+1. Commit Helmfile and values to Git
+2. Create HelmRelease for each release in your Helmfile
+3. Flux reconciles and applies changes
+
+See [Flux Guide](/self-host/manual/kubernetes/gitops/flux) for details.
+
+### Using Helmfile with Argo CD
+
+While Argo CD has native Helm and Kustomize support, you can:
+
+1. Use Helmfile to render manifests: `helmfile template > manifests.yaml`
+2. Commit manifests to Git
+3. Have Argo CD manage the raw YAML
+
+Alternatively, use Helm source in Argo CD (simpler than Helmfile for single releases).
+
+## Troubleshooting Helmfile
+
+### Check syntax
+
+```bash
+helmfile lint
+```
+
+### Debug release dependencies
+
+```bash
+helmfile template
+```
+
+### See what will be deployed
+
+```bash
+helmfile diff
+```
+
+### Remove releases
+
+```bash
+helmfile destroy
+```
+
+
+`helmfile destroy` uninstalls all releases and may delete data (e.g., databases). Use with caution in production.
+
+
+## Common patterns
+
+### Helmfile with local chart overrides
+
+```yaml
+releases:
+ - name: pangolin
+ namespace: pangolin
+ chart: ./charts/pangolin # local path
+ values:
+ - values.yaml
+```
+
+### Helmfile with inline values
+
+```yaml
+releases:
+ - name: pangolin
+ namespace: pangolin
+ chart: fossorial/pangolin
+ set:
+ deployment.type: controller
+ deployment.mode: multi
+```
+
+### Helmfile with conditional releases
+
+```yaml
+releases:
+ - name: cert-manager
+ namespace: cert-manager
+ createNamespace: true
+ chart: jetstack/cert-manager
+ installed: {{ .Environment.Values.installCertManager | default true }}
+```
+
+## Important notes
+
+### Official support
+
+Helmfile for Pangolin/Newt Kubernetes deployments is **advanced/community-supported**. The primary supported methods are:
+
+- Helm directly
+- Kustomize overlays
+- GitOps tools (Argo CD, Flux)
+
+If you encounter Helmfile-specific issues, refer to the [Helmfile documentation](https://github.com/roboll/helmfile) and community.
+
+### Helm chart dependencies
+
+The Pangolin Helm chart includes optional sub-chart dependencies (e.g., CloudNativePG operator). Helmfile does not manage these—they're handled by Helm. Ensure chart dependencies are available when installing.
+
+## Next steps
+
+
+
+
+
+
+
diff --git a/self-host/manual/kubernetes/kustomize.mdx b/self-host/manual/kubernetes/kustomize.mdx
new file mode 100644
index 0000000..f094e2b
--- /dev/null
+++ b/self-host/manual/kubernetes/kustomize.mdx
@@ -0,0 +1,381 @@
+---
+title: "Kustomize"
+description: "Customize Helm-rendered Kubernetes manifests with Kustomize overlays."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+Kustomize can be used to customize Kubernetes manifests with bases, overlays, and patches.
+
+For Pangolin and Newt, the supported Kustomize workflow is to render the Helm charts into manifests and use those rendered manifests as the Kustomize base.
+
+Use Kustomize when you need:
+
+- environment-specific overlays for dev, staging, or production
+- explicit manifest patches in Git
+- a manifest-driven workflow for GitOps tools
+- small changes on top of a shared base without maintaining separate full manifests
+
+## Supported workflow
+
+The chart repository does not provide native Kustomize bases. Use this workflow instead:
+
+
+
+ Render the Helm chart with your values file and save the output as base manifests.
+
+
+ Commit rendered manifests as the Kustomize base in Git.
+
+
+ Create overlays for each environment (for example dev, staging, production).
+
+
+ Apply overlays manually or reconcile them with Argo CD or Flux.
+
+
+
+
+Do not manage the same resources with both a live Helm release and Kustomize. Pick one ownership model per environment.
+
+
+Recommended ownership model:
+
+- Use Helm only to render manifests.
+- Use Kustomize, Argo CD, or Flux to apply and reconcile the rendered manifests.
+- Re-render the base when upgrading the chart version.
+
+## Example repository layout
+
+```text
+my-pangolin-k8s/
+├── base/
+│ ├── kustomization.yaml
+│ ├── pangolin.yaml
+│ └── newt.yaml
+├── overlays/
+│ ├── dev/
+│ │ ├── kustomization.yaml
+│ │ └── pangolin-resources.patch.yaml
+│ ├── staging/
+│ │ ├── kustomization.yaml
+│ │ └── pangolin-resources.patch.yaml
+│ └── prod/
+│ ├── kustomization.yaml
+│ └── pangolin-resources.patch.yaml
+└── values/
+ ├── values-pangolin.yaml
+ └── values-newt.yaml
+```
+
+## Step 1: Render manifests from Helm
+
+Create a base directory:
+
+```bash
+mkdir -p base overlays/dev overlays/staging overlays/prod
+```
+
+Render Pangolin:
+
+
+```bash Classic Helm repository
+helm template pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values/values-pangolin.yaml \
+ > base/pangolin.yaml
+```
+
+```bash OCI (GHCR)
+helm template pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \
+ --version 0.1.0-alpha.0 \
+ --namespace pangolin \
+ --values values/values-pangolin.yaml \
+ > base/pangolin.yaml
+```
+
+
+Render Newt:
+
+
+```bash Classic Helm repository
+helm template newt fossorial/newt \
+ --namespace pangolin \
+ --values values/values-newt.yaml \
+ > base/newt.yaml
+```
+
+```bash OCI (GHCR)
+helm template newt oci://ghcr.io/fosrl/helm-charts/newt \
+ --version 1.4.0 \
+ --namespace pangolin \
+ --values values/values-newt.yaml \
+ > base/newt.yaml
+```
+
+
+## Step 2: Create the base kustomization
+
+```yaml
+# base/kustomization.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - pangolin.yaml
+ - newt.yaml
+```
+
+## Step 3: Create an overlay
+
+Use `resources` to reference the base.
+
+```yaml
+# overlays/prod/kustomization.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../base
+
+labels:
+ - pairs:
+ app.kubernetes.io/environment: production
+ app.kubernetes.io/managed-by: kustomize
+
+patches:
+ - path: pangolin-resources.patch.yaml
+ target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: pangolin
+```
+
+
+Avoid `namePrefix` and `nameSuffix` for Helm-rendered bases unless you have verified every generated reference. Renaming chart-generated resources can break service names, selectors, secret references, and workload dependencies.
+
+
+## Step 4: Add patches
+
+Example Strategic Merge patch for container resources:
+
+```yaml
+# overlays/prod/pangolin-resources.patch.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: pangolin
+spec:
+ template:
+ spec:
+ containers:
+ - name: pangolin
+ resources:
+ requests:
+ cpu: 1000m
+ memory: 1Gi
+ limits:
+ memory: 2Gi
+```
+
+Example JSON6902-style inline patch:
+
+```yaml
+# overlays/prod/kustomization.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../base
+
+patches:
+ - target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: pangolin
+ patch: |-
+ - op: replace
+ path: /spec/template/spec/containers/0/resources/requests/cpu
+ value: "1000m"
+```
+
+
+Modern Kustomize uses the `patches` field for both Strategic Merge and JSON6902-style patches. Avoid `patchesStrategicMerge`, `patchesJson6902`, and `bases` in new examples.
+
+
+## Apply an overlay
+
+Preview the rendered output:
+
+```bash
+kustomize build overlays/prod
+```
+
+Compare with the live cluster:
+
+```bash
+kustomize build overlays/prod | kubectl diff -f -
+```
+
+Apply the overlay:
+
+```bash
+kubectl apply -k overlays/prod
+```
+
+Or apply the rendered output:
+
+```bash
+kustomize build overlays/prod | kubectl apply -f -
+```
+
+## Updating the base
+
+When upgrading chart versions or changing Helm values, re-render the base and review the diff.
+
+```bash
+helm repo update fossorial
+```
+
+Render the updated chart output:
+
+```bash
+helm template pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values/values-pangolin.yaml \
+ > base/pangolin.yaml
+```
+
+```bash
+helm template newt fossorial/newt \
+ --namespace pangolin \
+ --values values/values-newt.yaml \
+ > base/newt.yaml
+```
+
+Then validate the overlay:
+
+```bash
+kustomize build overlays/prod
+```
+
+Review changes before applying:
+
+```bash
+git diff
+kustomize build overlays/prod | kubectl diff -f -
+```
+
+Apply after review:
+
+```bash
+kubectl apply -k overlays/prod
+```
+
+## Important considerations
+
+### Namespace handling
+
+Render the charts with the namespace you intend to use:
+
+```bash
+helm template pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values/values-pangolin.yaml \
+ > base/pangolin.yaml
+```
+
+Create the namespace before applying the overlay:
+
+```bash
+kubectl create namespace pangolin
+```
+
+Apply any required Pod Security Admission labels or cluster-policy labels before workloads are created.
+
+### Secrets
+
+Do not commit plaintext secrets into rendered manifests.
+
+Use one of these approaches instead:
+
+* reference existing Kubernetes Secrets in the values file before rendering
+* create secrets separately with your secret-management workflow
+* use Sealed Secrets, External Secrets Operator, SOPS, or another GitOps-safe secret solution
+
+### Do not mix ownership models
+
+Avoid this pattern:
+
+```text
+helm upgrade pangolin fossorial/pangolin
+kubectl apply -k overlays/prod
+```
+
+This creates two tools managing the same objects.
+
+Use one of these models instead:
+
+| Model | Description |
+| ----------------- | ---------------------------------------------------------------------------------------- |
+| Helm-managed | Helm installs and upgrades the live release. Kustomize is not used for the same objects. |
+| Kustomize-managed | Helm only renders the base. Kustomize applies and owns the live objects. |
+| GitOps-managed | Argo CD or Flux applies the Kustomize overlay and owns reconciliation. |
+
+## Troubleshooting
+
+Validate the overlay:
+
+```bash
+kustomize build overlays/prod
+```
+
+Check the generated YAML:
+
+```bash
+kustomize build overlays/prod > manifests.yaml
+```
+
+Run a server-side dry run:
+
+```bash
+kubectl apply -f manifests.yaml --dry-run=server
+```
+
+Preview live changes:
+
+```bash
+kubectl diff -f manifests.yaml
+```
+
+Check live resources:
+
+```bash
+kubectl get all -n pangolin
+kubectl get events -n pangolin --sort-by=.lastTimestamp
+```
+
+## Next steps
+
+
+
+ Install Pangolin with rendered manifests and Kustomize overlays.
+
+
+ Install Newt with rendered manifests and Kustomize overlays.
+
+
+ Reconcile Kustomize overlays with Argo CD.
+
+
+ Reconcile Kustomize overlays with Flux.
+
+
+ Troubleshoot Pangolin deployments on Kubernetes.
+
+
diff --git a/self-host/manual/kubernetes/newt/configuration.mdx b/self-host/manual/kubernetes/newt/configuration.mdx
new file mode 100644
index 0000000..ee5eb2a
--- /dev/null
+++ b/self-host/manual/kubernetes/newt/configuration.mdx
@@ -0,0 +1,770 @@
+---
+title: "Configuration"
+description: "Configuration reference for Newt Kubernetes deployments."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+This page covers the main Newt Kubernetes configuration options for Helm and Kustomize workflows.
+
+For exhaustive option coverage, refer to the chart resources:
+
+
+
+
+
+
+
+## Version context
+
+This page is aligned with the Newt Helm chart `1.4.0`.
+
+| Item | Value |
+| --- | --- |
+| Chart version | `1.4.0` |
+| App version | `1.12.3` |
+| Kubernetes version | `>=1.30.14-0` |
+| Default image | `docker.io/fosrl/newt:1.12.3` |
+
+Chart `1.4.0` also publishes the Newt image metadata for Docker Hub and GHCR and includes Artifact Hub signing metadata.
+
+## Configuration sections
+
+
+
+
+
+Use `global.image` to control the Newt container image used by all instances.
+
+```yaml
+global:
+ image:
+ registry: docker.io
+ repository: fosrl/newt
+ tag: ""
+ digest: ""
+ imagePullPolicy: IfNotPresent
+ imagePullSecrets: []
+
+ logLevel: INFO
+```
+
+Recommendations:
+
+- Leave `tag` empty to use the chart `appVersion`.
+- Use `digest` when you need immutable image pinning.
+- Use `imagePullSecrets` when pulling from a private registry.
+- Use per-instance overrides only when `allowGlobalOverride` is enabled for that instance.
+
+
+
+
+
+The chart can render Namespace resources, including Pod Security Admission labels.
+
+```yaml
+namespace:
+ create: false
+ name: ""
+ labels: {}
+ podSecurity:
+ enforce: ""
+ warn: ""
+ audit: ""
+```
+
+Recommended production pattern:
+
+1. Create the namespace manually.
+2. Apply required Pod Security Admission labels or policy labels.
+3. Install the chart into that namespace.
+
+```bash
+kubectl create namespace pangolin
+```
+
+Example namespace labels:
+
+```bash
+kubectl label namespace pangolin \
+ pod-security.kubernetes.io/enforce=baseline \
+ pod-security.kubernetes.io/audit=restricted \
+ pod-security.kubernetes.io/warn=restricted
+```
+
+Per-instance namespace overrides are available when `allowGlobalOverride: true` is set:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ allowGlobalOverride: true
+ namespace:
+ name: pangolin
+ create: false
+ labels: {}
+ podSecurity:
+ enforce: ""
+ warn: ""
+ audit: ""
+```
+
+
+Creating the namespace manually is recommended when your cluster uses Pod Security Admission, policy labels, admission webhooks, or namespace annotations.
+
+
+
+
+
+
+For production, use an existing Kubernetes Secret.
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ existingSecretName: newt-auth
+```
+
+Create the Secret before installing the chart:
+
+```bash
+kubectl create secret generic newt-auth \
+ --namespace pangolin \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+```
+
+The default Secret keys are:
+
+```yaml
+PANGOLIN_ENDPOINT
+NEWT_ID
+NEWT_SECRET
+```
+
+Use `auth.keys.*` only when your Secret uses different key names:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ existingSecretName: newt-auth
+ keys:
+ endpointKey: PANGOLIN_ENDPOINT
+ idKey: NEWT_ID
+ secretKey: NEWT_SECRET
+```
+
+`auth.keys.*` are Secret key names, not credential values.
+
+Inline credentials are supported, but should only be used for local testing:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ pangolinEndpoint: "https://pangolin.example.com"
+ id: ""
+ secret: ""
+```
+
+
+Inline credentials can appear in rendered manifests and Helm release history. Use `auth.existingSecretName` for production.
+
+
+
+Do not commit plaintext credentials to Git. For GitOps workflows, use encrypted or external secret backends such as SOPS, Sealed Secrets, External Secrets Operator, Vault, or Infisical.
+
+
+Chart `1.4.0` also includes `auth.createSecret` and `auth.envVarsDirect` modes for generated Secret and direct environment-variable workflows. Use these only when they match your operational model.
+
+
+
+
+
+Provisioning supports installs where Newt bootstraps credentials from a provisioning key.
+
+Use provisioning when Newt should bootstrap credentials from a provisioning key instead of using a static `NEWT_ID` and `NEWT_SECRET`.
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ pangolinEndpoint: https://pangolin.example.com
+ provisioningKey: ""
+ newtName: "my-site"
+ configPersistence:
+ enabled: true
+ type: emptyDir
+ mountPath: /var/lib/newt
+ fileName: config.json
+```
+
+Provisioning requires writable config persistence so Newt can store the generated configuration.
+
+For durable storage, use an existing PVC:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ pangolinEndpoint: https://pangolin.example.com
+ provisioningKey: ""
+ newtName: "my-site"
+ configPersistence:
+ enabled: true
+ type: persistentVolumeClaim
+ existingClaim: my-newt-config
+ mountPath: /var/lib/newt
+ fileName: config.json
+```
+
+You can also provide a provisioning blueprint:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ pangolinEndpoint: https://pangolin.example.com
+ provisioningKey: ""
+ newtName: "my-site"
+ configPersistence:
+ enabled: true
+ type: emptyDir
+ provisioningBlueprintFile: /etc/newt/provisioning-blueprint.yaml
+ provisioningBlueprintData: |
+ version: 1
+ routes: []
+```
+
+
+
+
+
+Each Newt instance is configured under `newtInstances[]`.
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ replicas: 1
+ logLevel: INFO
+ mtu: 1280
+ dns: ""
+ pingInterval: ""
+ pingTimeout: ""
+ acceptClients: false
+ useNativeInterface: false
+ interface: newt
+ keepInterface: false
+ noCloud: false
+ disableClients: false
+```
+
+Key settings:
+
+| Setting | Purpose |
+| ------------------------------ | ----------------------------------------------------------- |
+| `replicas` | Number of replicas for this Newt instance |
+| `mtu` | WireGuard interface MTU |
+| `dns` | Optional DNS server address pushed to the client |
+| `pingInterval` / `pingTimeout` | Optional Newt ping timing overrides |
+| `acceptClients` | Allows client connections at runtime |
+| `useNativeInterface` | Uses native WireGuard interface when native mode is enabled |
+| `noCloud` | Disables cloud connectivity |
+| `disableClients` | Disables client connections |
+
+
+Newt 1.11 changed upstream ping defaults. Set `pingInterval` and `pingTimeout` explicitly if you need older timing behavior.
+
+
+
+
+
+
+Service exposure is controlled separately from `acceptClients`.
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ service:
+ enabled: false
+ type: ClusterIP
+ port: 51820
+ testerPort: ""
+ externalTrafficPolicy: ""
+ loadBalancerSourceRanges: []
+```
+
+Important behavior:
+
+- `acceptClients` does not create a Service.
+- `newtInstances[].service.enabled` controls whether a Service is created.
+- Tester port exposure is disabled by default unless enabled through test settings or explicit legacy tester-port configuration.
+
+Common Service types:
+
+| Type | Use case |
+| -------------- | --------------------------------------------- |
+| `ClusterIP` | Internal cluster access |
+| `LoadBalancer` | External exposure through cloud load balancer |
+| `NodePort` | Node-level port exposure |
+
+
+
+
+
+Use `configPersistence` when Newt needs writable configuration storage.
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ configPersistence:
+ enabled: false
+ type: emptyDir
+ mountPath: /var/lib/newt
+ fileName: config.json
+ existingClaim: ""
+```
+
+Storage types:
+
+| Type | Behavior |
+| ----------------------- | ----------------------------------------- |
+| `emptyDir` | Ephemeral storage, recreated with the pod |
+| `persistentVolumeClaim` | Durable storage using an existing PVC |
+
+Provisioning-based installs should enable config persistence. For production provisioning, prefer a PVC over `emptyDir`.
+
+
+`emptyDir` is recreated when a pod is replaced. Newt can require a reconnect and handshake after restart, which may briefly interrupt active traffic.
+
+
+
+For production, prefer an existing PersistentVolumeClaim to keep writable Newt configuration across restarts and rescheduling.
+
+
+
+
+
+
+The chart supports blueprints, provisioning blueprints, mTLS certificate mounts, Docker socket mounts, and up/down scripts.
+
+Blueprint example:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ blueprintFile: /etc/newt/blueprint.yaml
+ blueprintData: |
+ version: 1
+ routes: []
+```
+
+Provisioning blueprint example:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ provisioningBlueprintFile: /etc/newt/provisioning-blueprint.yaml
+ provisioningBlueprintData: |
+ version: 1
+ routes: []
+```
+
+mTLS using an existing PEM Secret:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ mtls:
+ enabled: true
+ mode: pem
+ pem:
+ secretName: newt-mtls
+ clientCertPath: /certs/client.crt
+ clientKeyPath: /certs/client.key
+ caPath: /certs/ca.crt
+```
+
+Up/down scripts:
+
+```yaml
+global:
+ updownScripts:
+ route.sh: |
+ #!/bin/sh
+ echo "Newt interface changed"
+
+newtInstances:
+ - name: main-tunnel
+ updown:
+ enabled: true
+ mountPath: /opt/newt/updown
+```
+
+
+Use Secrets for certificates and sensitive script inputs. Avoid inline private keys or credentials in values files.
+
+
+
+
+
+
+ServiceAccount creation is enabled by default.
+
+```yaml
+serviceAccount:
+ create: true
+ name: ""
+ automountServiceAccountToken: false
+```
+
+RBAC is disabled by default in chart `1.4.0`:
+
+```yaml
+rbac:
+ create: false
+ clusterRole: false
+```
+
+Enable RBAC only when your selected configuration needs Kubernetes API permissions:
+
+```yaml
+rbac:
+ create: true
+ clusterRole: false
+```
+
+Per-instance ServiceAccount overrides are available when `allowGlobalOverride: true` is set:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ allowGlobalOverride: true
+ serviceAccount:
+ create: true
+ name: newt-main-tunnel
+ automountServiceAccountToken: false
+```
+
+
+Chart `1.4.0` changed the RBAC default to `rbac.create=false`. Existing installations that relied on auto-created RBAC must opt in explicitly during upgrade.
+
+
+
+
+
+
+Global resource requests and limits apply to Newt workloads.
+
+```yaml
+global:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ ephemeral-storage: 128Mi
+ limits:
+ cpu: 200m
+ memory: 256Mi
+ ephemeral-storage: 256Mi
+```
+
+Scheduling defaults:
+
+```yaml
+global:
+ priorityClassName: ""
+ nodeSelector: {}
+ tolerations: []
+ affinity:
+ nodeAffinity: {}
+ podAffinity: {}
+ podAntiAffinity: {}
+ topologySpreadConstraints: []
+```
+
+Pod Disruption Budget:
+
+```yaml
+global:
+ podDisruptionBudget:
+ enabled: false
+ minAvailable: 1
+ maxUnavailable: ""
+```
+
+Recommendations:
+
+- Start with the chart defaults.
+- Increase requests and limits based on traffic volume.
+- Use node selectors, tolerations, affinity, or topology spread constraints when you need placement control.
+- Enable a PodDisruptionBudget only when your replica count and maintenance policy support it.
+
+
+Avoid CPU limits unless you explicitly need hard caps. CPU limits can trigger throttling even when spare node CPU exists. For most deployments, use CPU requests and memory limits as the starting point.
+
+
+
+
+
+
+Health probes are disabled by default.
+
+```yaml
+global:
+ health:
+ enabled: false
+ path: /tmp/healthy
+ readinessFailureThreshold: 3
+```
+
+Per-instance health options:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ healthFile: /tmp/healthy
+ enforceHcCert: false
+```
+
+Helm test jobs are disabled by default:
+
+```yaml
+global:
+ tests:
+ enabled: false
+ image:
+ repository: registry.k8s.io/kubectl
+ tag: "1.30.14"
+ pullPolicy: IfNotPresent
+```
+
+Enable tests only when you want chart test jobs and tester-port related resources.
+
+
+
+
+
+Metrics are disabled by default.
+
+```yaml
+global:
+ metrics:
+ enabled: false
+ port: 9090
+ path: /metrics
+ adminAddr: ":2112"
+ asyncBytes: false
+ region: ""
+ otlpEnabled: false
+ pprofEnabled: false
+```
+
+The default `adminAddr` is `:2112`, which listens on all interfaces and allows in-cluster scraping. Use `127.0.0.1:2112` only when scraping from other pods is not required.
+
+Metrics Service:
+
+```yaml
+global:
+ metrics:
+ service:
+ enabled: false
+ type: ClusterIP
+ port: 2112
+ portName: metrics
+```
+
+Prometheus Operator resources:
+
+```yaml
+global:
+ metrics:
+ podMonitor:
+ enabled: false
+ serviceMonitor:
+ enabled: false
+ prometheusRule:
+ enabled: false
+```
+
+Example with ServiceMonitor:
+
+```yaml
+global:
+ metrics:
+ enabled: true
+ service:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+```
+
+Optional pprof endpoint:
+
+```yaml
+global:
+ metrics:
+ pprofEnabled: true
+```
+
+
+
+
+
+NetworkPolicy rendering is disabled by default.
+
+```yaml
+global:
+ networkPolicy:
+ enabled: false
+ defaultMode: merge
+ components:
+ defaultApp:
+ enabled: true
+ dns:
+ enabled: false
+ kubeApi:
+ enabled: false
+ custom:
+ enabled: false
+ ruleSets: {}
+```
+
+Per-instance NetworkPolicy overrides:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ networkPolicy:
+ enabled: null
+ mode: merge
+ useGlobalComponents:
+ defaultApp: true
+ dns: false
+ kubeApi: false
+ custom: true
+ components:
+ dns:
+ enabled: false
+ custom:
+ enabled: false
+ includeRuleSets: []
+```
+
+Modes:
+
+| Mode | Behavior |
+| --------- | ------------------------------------------------- |
+| `inherit` | Use global components and rule sets only |
+| `merge` | Combine global and instance-level policy settings |
+| `replace` | Use only the instance-level policy settings |
+
+Enable DNS egress rules if your default network policy blocks DNS.
+
+
+
+
+
+## Configuration by install method
+
+### Helm
+
+Use a values file:
+
+```bash
+helm upgrade --install newt fossorial/newt \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+Use inline values only for small tests:
+
+```bash
+helm upgrade --install newt fossorial/newt \
+ --namespace pangolin \
+ --set 'newtInstances[0].name=main-tunnel' \
+ --set 'newtInstances[0].auth.existingSecretName=newt-auth'
+```
+
+See [Site (newt) Helm](/self-host/manual/kubernetes/newt/helm) for the installation flow.
+
+### Kustomize
+
+Render the chart with Helm, then use Kustomize overlays:
+
+```bash
+helm template newt fossorial/newt \
+ --namespace pangolin \
+ --values values-newt.yaml \
+ > base/newt.yaml
+```
+
+Then apply an overlay:
+
+```bash
+kubectl apply -k overlays/site-a
+```
+
+See [Newt Kustomize](/self-host/manual/kubernetes/newt/kustomize) for the Kustomize workflow.
+
+### GitOps
+
+Store Helm values or Kustomize overlays in Git. Argo CD or Flux reconciles the desired state.
+
+Argo CD Helm example:
+
+```yaml
+spec:
+ source:
+ helm:
+ values: |
+ newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ existingSecretName: newt-auth
+```
+
+Flux HelmRelease example:
+
+```yaml
+spec:
+ values:
+ newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ existingSecretName: newt-auth
+```
+
+See [GitOps](/self-host/manual/kubernetes/gitops/overview) for GitOps guidance.
+
+## Next steps
+
+
+
+ Install Newt with Helm.
+
+
+ Install Newt with rendered manifests and Kustomize overlays.
+
+
+ Debug Newt deployment and connection issues.
+
+
+ Deploy Newt with Argo CD or Flux.
+
+
diff --git a/self-host/manual/kubernetes/newt/helm.mdx b/self-host/manual/kubernetes/newt/helm.mdx
new file mode 100644
index 0000000..329d808
--- /dev/null
+++ b/self-host/manual/kubernetes/newt/helm.mdx
@@ -0,0 +1,415 @@
+---
+title: "Helm"
+description: "Quick-start guide for installing Site (newt) on Kubernetes using Helm."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+This guide installs and manages Site (newt) in Kubernetes using Helm.
+
+See [Version Matrix](https://github.com/fosrl/helm-charts/VERSION_MATRIX.md) for chart and default app version references.
+
+## What the chart supports
+
+The Newt chart can deploy one or more Newt instances through `newtInstances[]`.
+
+Newt chart `1.4.0` includes support for:
+
+- provisioning with `NEWT_PROVISIONING_KEY` and `NEWT_NAME`
+- legacy credential installs with `NEWT_ID` and `NEWT_SECRET`
+- existing Kubernetes Secrets for production credentials
+- writable config persistence with `emptyDir` or an existing PVC
+- optional metrics, PodMonitor, ServiceMonitor, and PrometheusRule
+- optional NetworkPolicy
+- multi-instance deployments with per-instance overrides
+
+The chart README lists these features for version `1.4.0`.
+
+## Prerequisites
+
+Before installing Newt, you need:
+
+- Kubernetes `1.30.14` or newer
+- Helm 3.x
+- `kubectl` access to the target cluster
+- a reachable Pangolin instance
+- either:
+ - Newt credentials from Pangolin: `NEWT_ID` and `NEWT_SECRET`
+ - or a provisioning key for provisioning installs
+
+The chart quickstart lists Kubernetes `>=1.30.14`, Helm 3.x, configured `kubectl`, and Newt credentials from Pangolin as prerequisites.
+
+See [Prerequisites](/self-host/manual/kubernetes/prerequisites) for cluster, namespace, storage, networking, and security planning.
+
+## Authentication options
+
+Newt chart `1.4.0` supports three credential patterns:
+
+| Method | Recommended for | Notes |
+| --- | --- | --- |
+| Existing Secret | Production | Credentials are stored in a Kubernetes Secret created outside Helm |
+| Provisioning key | Provisioning installs | Requires writable config persistence |
+| Inline values | Local testing only | Credentials may be stored in Helm release history |
+
+For production, use `auth.existingSecretName` or a GitOps-safe secret workflow. The chart values explicitly warn that inline credentials can be stored in Helm release history and recommend existing Secrets for production.
+
+## Quick install with existing Secret
+
+This is the recommended simple production pattern.
+
+### Step 1: Create the namespace
+
+Create the namespace before installing the chart:
+
+```bash
+kubectl create namespace pangolin
+```
+
+If your cluster uses Pod Security Admission labels, namespace labels, or policy annotations, apply them before installing Newt.
+
+Example:
+
+```bash
+kubectl label namespace pangolin \
+ pod-security.kubernetes.io/enforce=baseline \
+ pod-security.kubernetes.io/audit=restricted \
+ pod-security.kubernetes.io/warn=restricted
+```
+
+
+The chart can create namespaces through `namespace.create`, but creating the namespace explicitly is recommended when your cluster uses Pod Security Admission, namespace labels, or policy annotations.
+
+
+### Step 2: Create the Newt Secret
+
+Create a Secret with the credentials from Pangolin:
+
+```bash
+kubectl create secret generic newt-auth \
+ --namespace pangolin \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+```
+
+
+Get the Newt credentials from the Pangolin dashboard for the site you want this Newt instance to connect to.
+
+
+### Step 3: Create a values file
+
+Create `values-newt.yaml`:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ existingSecretName: newt-auth
+ replicas: 1
+```
+
+The default Secret keys are:
+
+```yaml
+PANGOLIN_ENDPOINT
+NEWT_ID
+NEWT_SECRET
+```
+
+You only need to set `auth.keys.*` if your Secret uses different key names.
+
+Example with custom Secret keys:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ existingSecretName: newt-auth
+ keys:
+ endpointKey: PANGOLIN_ENDPOINT
+ idKey: NEWT_ID
+ secretKey: NEWT_SECRET
+ replicas: 1
+```
+
+`auth.keys.*` are key names inside the Kubernetes Secret, not the credential values themselves. ([GitHub][2])
+
+### Step 4: Install Newt
+
+Add the Helm repository:
+
+```bash
+helm repo add fossorial https://charts.fossorial.io
+helm repo update fossorial
+```
+
+Install Newt:
+
+```bash
+helm upgrade --install newt fossorial/newt \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+Do not use `--create-namespace` here if you created and labeled the namespace manually.
+
+### Step 5: Verify the deployment
+
+Check the Helm release:
+
+```bash
+helm status newt --namespace pangolin
+```
+
+Check the pods:
+
+```bash
+kubectl get pods --namespace pangolin \
+ -l app.kubernetes.io/name=newt
+```
+
+Check the logs:
+
+```bash
+kubectl logs --namespace pangolin \
+ -l app.kubernetes.io/name=newt \
+ --tail=50
+```
+
+Wait for the Newt pod to become ready:
+
+```bash
+kubectl wait --for=condition=ready pod \
+ -l app.kubernetes.io/name=newt \
+ --namespace pangolin \
+ --timeout=60s
+```
+
+## Quick install with provisioning key
+
+Provisioning-based installs bootstrap credentials from a provisioning key.
+
+Provisioning requires writable config persistence so Newt can store the generated configuration. The chart quickstart explicitly notes that provisioning requires a writable `CONFIG_FILE` target and that the chart provides this through `newtInstances[x].configPersistence`. ([GitHub][3])
+
+Create `values-newt.yaml`:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ pangolinEndpoint: https://pangolin.example.com
+ provisioningKey: ""
+ newtName: "my-site"
+ configPersistence:
+ enabled: true
+ type: emptyDir
+ mountPath: /var/lib/newt
+ fileName: config.json
+```
+
+Install Newt:
+
+```bash
+helm upgrade --install newt fossorial/newt \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+
+`emptyDir` is enough for testing, but it is ephemeral. For durable provisioning state, use `type: persistentVolumeClaim` with an existing PVC.
+
+
+Example with an existing PVC:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ pangolinEndpoint: https://pangolin.example.com
+ provisioningKey: ""
+ newtName: "my-site"
+ configPersistence:
+ enabled: true
+ type: persistentVolumeClaim
+ existingClaim: my-newt-config
+ mountPath: /var/lib/newt
+ fileName: config.json
+```
+
+The Newt README includes both `emptyDir` and existing PVC provisioning examples. ([GitHub][4])
+
+## Verifying connectivity
+
+Follow the Newt logs:
+
+```bash
+kubectl logs --namespace pangolin \
+ -l app.kubernetes.io/name=newt \
+ --follow
+```
+
+In the Pangolin dashboard, verify that the site connected by this Newt instance is online.
+
+If the pod is running but the site does not connect, check:
+
+* `PANGOLIN_ENDPOINT`
+* Newt credentials or provisioning key
+* DNS resolution from inside the cluster
+* outbound network access from the Newt pod
+* TLS validity for the Pangolin endpoint
+
+## Upgrade
+
+Update the Helm repository:
+
+```bash
+helm repo update fossorial
+```
+
+Upgrade the release:
+
+```bash
+helm upgrade newt fossorial/newt \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+Check upgrade status:
+
+```bash
+helm status newt --namespace pangolin
+helm history newt --namespace pangolin
+```
+
+Rollback to a previous revision if needed:
+
+```bash
+helm rollback newt --namespace pangolin
+```
+
+## Multiple Newt instances
+
+You can deploy multiple Newt instances with one chart release.
+
+Example:
+
+```yaml
+newtInstances:
+ - name: site-a
+ enabled: true
+ auth:
+ existingSecretName: newt-auth-site-a
+ replicas: 1
+
+ - name: site-b
+ enabled: true
+ auth:
+ existingSecretName: newt-auth-site-b
+ replicas: 1
+```
+
+Create a separate Secret for each site:
+
+```bash
+kubectl create secret generic newt-auth-site-a \
+ --namespace pangolin \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+
+kubectl create secret generic newt-auth-site-b \
+ --namespace pangolin \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+```
+
+## Architecture notes
+
+### Instance-based deployment
+
+* `newtInstances[]` defines the Newt instances rendered by the chart.
+* Each enabled instance creates its own workload.
+* Each instance can use its own Secret, provisioning settings, resources, service settings, and network policy settings.
+* Per-instance namespace and service account overrides require `allowGlobalOverride: true`.
+
+The chart values include `newtInstances[]`, per-instance namespace settings, and per-instance service account overrides. ([GitHub][2])
+
+### RBAC
+
+Newt chart `1.4.0` defaults `rbac.create` to `false`. Enable RBAC only when your selected Newt configuration requires Kubernetes API permissions.
+
+```yaml
+rbac:
+ create: true
+```
+
+The chart changelog for `1.4.0` marks this as a breaking change: installations that relied on auto-created RBAC must explicitly enable `rbac.create=true` during upgrade. ([GitHub][1])
+
+### Helm tests
+
+Helm test Jobs are disabled by default.
+
+Enable them only when you want to run chart test jobs:
+
+```yaml
+global:
+ tests:
+ enabled: true
+```
+
+The chart quickstart notes that test Jobs are gated behind `global.tests.enabled`, which defaults to `false`. ([GitHub][3])
+
+## OCI install
+
+The Newt chart is also published as an OCI chart in GHCR.
+
+Pull the chart:
+
+```bash
+helm pull oci://ghcr.io/fosrl/helm-charts/newt \
+ --version 1.4.0
+```
+
+Install from OCI:
+
+```bash
+helm upgrade --install newt oci://ghcr.io/fosrl/helm-charts/newt \
+ --version 1.4.0 \
+ --namespace pangolin \
+ --values values-newt.yaml
+```
+
+OCI changes where Helm pulls the chart from. It does not change the values file or the release behavior.
+
+## References
+
+
+
+
+
+
+
+
+
+## Next steps
+
+
+
+ Review all Newt chart options.
+
+
+ Debug Newt deployment and connection issues.
+
+
+ Install Newt with rendered manifests and Kustomize overlays.
+
+
+ Install the Pangolin control plane.
+
+
diff --git a/self-host/manual/kubernetes/newt/kustomize.mdx b/self-host/manual/kubernetes/newt/kustomize.mdx
new file mode 100644
index 0000000..92a9e0d
--- /dev/null
+++ b/self-host/manual/kubernetes/newt/kustomize.mdx
@@ -0,0 +1,617 @@
+---
+title: "Kustomize"
+description: "Deploy Newt on Kubernetes using Helm-rendered manifests and Kustomize overlays."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+Use Kustomize when you want to manage Newt with rendered manifests, environment-specific overlays, and explicit patches in Git.
+
+For Newt, the supported Kustomize workflow is:
+
+1. Render the Newt Helm chart to manifests.
+2. Use the rendered output as the Kustomize base.
+3. Create overlays per site, cluster, or environment.
+4. Apply the overlay with `kubectl apply -k` or reconcile it with Argo CD or Flux.
+
+## When to use Kustomize for Newt
+
+Use Kustomize if you:
+
+- want site-specific or environment-specific overlays
+- need explicit patches committed to Git
+- prefer reviewing rendered Kubernetes manifests before applying them
+- use Argo CD or Flux with Kustomize sources
+- want to customize Helm-rendered output without forking the chart
+
+For a simpler single-site setup, use [Newt Helm](/self-host/manual/kubernetes/newt/helm).
+
+## Supported approach
+
+The Newt chart does not provide native Kustomize bases. Render the Helm chart first, then use Kustomize on the rendered manifests.
+
+
+Do not manage the same Newt resources with both a live Helm release and Kustomize. Pick one ownership model per environment.
+
+
+Recommended ownership model:
+
+- Use Helm only to render the Newt chart.
+- Use Kustomize, Argo CD, or Flux to apply and reconcile the rendered manifests.
+- Re-render the base when upgrading the chart or changing Helm values.
+
+## Example directory structure
+
+```text
+newt-deployment/
+├── base/
+│ ├── kustomization.yaml
+│ └── newt.yaml
+├── overlays/
+│ ├── site-a/
+│ │ ├── kustomization.yaml
+│ │ └── patches/
+│ │ └── deployment-resources.patch.yaml
+│ └── site-b/
+│ ├── kustomization.yaml
+│ └── patches/
+│ └── deployment-resources.patch.yaml
+└── values/
+ ├── values-base.yaml
+ ├── values-site-a.yaml
+ └── values-site-b.yaml
+```
+
+## Step 1: Create the namespace
+
+Create the namespace before applying rendered manifests:
+
+```bash
+kubectl create namespace pangolin
+```
+
+If your cluster uses Pod Security Admission, namespace labels, or other policy labels, apply them before creating workloads.
+
+Example:
+
+```bash
+kubectl label namespace pangolin \
+ pod-security.kubernetes.io/enforce=baseline \
+ pod-security.kubernetes.io/audit=restricted \
+ pod-security.kubernetes.io/warn=restricted
+```
+
+## Step 2: Create Newt credentials
+
+Create a Kubernetes Secret for each Newt site or instance.
+
+```bash
+kubectl create secret generic newt-auth-site-a \
+ --namespace pangolin \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+```
+
+For a second site:
+
+```bash
+kubectl create secret generic newt-auth-site-b \
+ --namespace pangolin \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+```
+
+
+Use existing Kubernetes Secrets for production. Do not commit Newt credentials into Helm values, rendered manifests, or Kustomize patches.
+
+
+## Step 3: Create base values
+
+Create `values/values-base.yaml`:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ replicas: 1
+ auth:
+ existingSecretName: newt-auth-site-a
+```
+
+This values file uses an existing Secret. The default Secret keys are:
+
+```text
+PANGOLIN_ENDPOINT
+NEWT_ID
+NEWT_SECRET
+```
+
+Use `auth.keys.*` only when your Secret uses different key names.
+
+## Step 4: Render Newt to the base
+
+Add and update the Helm repository:
+
+```bash
+helm repo add fossorial https://charts.fossorial.io
+helm repo update fossorial
+```
+
+Render the Newt chart:
+
+```bash
+mkdir -p base overlays/site-a/patches overlays/site-b/patches values
+
+helm template newt fossorial/newt \
+ --namespace pangolin \
+ --values values/values-base.yaml \
+ > base/newt.yaml
+```
+
+You can also render from the GHCR OCI chart:
+
+```bash
+helm template newt oci://ghcr.io/fosrl/helm-charts/newt \
+ --version 1.4.0 \
+ --namespace pangolin \
+ --values values/values-base.yaml \
+ > base/newt.yaml
+```
+
+## Step 5: Create the base kustomization
+
+```yaml
+# base/kustomization.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - newt.yaml
+```
+
+
+The namespace is already rendered by Helm through `--namespace pangolin`. You can also set `namespace: pangolin` in Kustomize, but avoid changing namespaces in overlays unless you have verified all rendered resources and references.
+
+
+## Step 6: Inspect the rendered resource names
+
+Before writing patches, check the generated names:
+
+```bash
+kustomize build base | grep -E "^(kind:| name:)"
+```
+
+Or list the deployments:
+
+```bash
+kustomize build base | yq '. | select(.kind == "Deployment") | .metadata.name'
+```
+
+Use the actual rendered Deployment name in your patch targets.
+
+
+Do not assume the rendered Deployment name without checking the generated manifests. Helm naming can change with release name, chart name, `nameOverride`, or `fullnameOverride`.
+
+
+## Step 7: Create site-specific overlays
+
+Example overlay for Site A:
+
+```yaml
+# overlays/site-a/kustomization.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../base
+
+labels:
+ - pairs:
+ app.kubernetes.io/site: site-a
+ app.kubernetes.io/environment: production
+
+patches:
+ - path: patches/deployment-resources.patch.yaml
+ target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: newt-main-tunnel
+```
+
+Example resource patch:
+
+```yaml
+# overlays/site-a/patches/deployment-resources.patch.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: newt-main-tunnel
+spec:
+ replicas: 1
+ template:
+ spec:
+ containers:
+ - name: newt
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ limits:
+ memory: 256Mi
+```
+
+
+Replace `newt-main-tunnel` with the actual Deployment name from your rendered manifests.
+
+
+Example overlay for Site B with a different Secret is usually better handled by rendering a second base with a different values file.
+
+Create `values/values-site-b.yaml`:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ replicas: 1
+ auth:
+ existingSecretName: newt-auth-site-b
+```
+
+Then render a separate base for Site B:
+
+```bash
+mkdir -p site-b/base
+
+helm template newt-site-b fossorial/newt \
+ --namespace pangolin \
+ --values values/values-site-b.yaml \
+ > site-b/base/newt.yaml
+```
+
+
+For different credentials, endpoints, provisioning keys, or instance names, prefer separate Helm-rendered bases. Use Kustomize patches for environment-level changes such as labels, annotations, resources, scheduling, or NetworkPolicy adjustments.
+
+
+## Common Kustomize patches for Newt
+
+### Patch resource requests and limits
+
+```yaml
+# overlays/site-a/kustomization.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../base
+
+patches:
+ - path: patches/resources.patch.yaml
+ target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: newt-main-tunnel
+```
+
+```yaml
+# overlays/site-a/patches/resources.patch.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: newt-main-tunnel
+spec:
+ template:
+ spec:
+ containers:
+ - name: newt
+ resources:
+ requests:
+ cpu: 200m
+ memory: 256Mi
+ limits:
+ memory: 512Mi
+```
+
+### Patch log level
+
+Prefer configuring log level through Helm values before rendering. If you still need a manifest patch, patch the generated environment variable carefully after inspecting the rendered Deployment.
+
+Example JSON6902-style patch:
+
+```yaml
+# overlays/site-a/kustomization.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../base
+
+patches:
+ - target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: newt-main-tunnel
+ patch: |-
+ - op: add
+ path: /spec/template/spec/containers/0/env/-
+ value:
+ name: LOG_LEVEL
+ value: DEBUG
+```
+
+
+Only use index-based JSON patches after checking the rendered manifest. Container order and environment variable layout can change between chart versions.
+
+
+### Add node affinity
+
+```yaml
+# overlays/site-a/patches/node-affinity.patch.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: newt-main-tunnel
+spec:
+ template:
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: site
+ operator: In
+ values:
+ - site-a
+```
+
+Reference the patch:
+
+```yaml
+# overlays/site-a/kustomization.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../base
+
+patches:
+ - path: patches/node-affinity.patch.yaml
+ target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: newt-main-tunnel
+```
+
+### Add annotations
+
+```yaml
+# overlays/site-a/kustomization.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../base
+
+patches:
+ - target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: newt-main-tunnel
+ patch: |-
+ - op: add
+ path: /metadata/annotations
+ value:
+ example.com/owner: platform
+```
+
+## Do not rename rendered Helm resources by default
+
+Avoid Kustomize options such as `namePrefix` and `nameSuffix` for Helm-rendered bases unless you have validated every generated reference.
+
+Renaming rendered resources can break:
+
+* Service selectors
+* Secret references
+* ConfigMap references
+* ServiceAccount references
+* NetworkPolicy selectors
+* Prometheus monitor selectors
+
+If you need different resource names, prefer changing the Helm release name or chart naming values before rendering.
+
+## Apply the overlay
+
+Preview the rendered output:
+
+```bash
+kustomize build overlays/site-a
+```
+
+Compare with the live cluster:
+
+```bash
+kustomize build overlays/site-a | kubectl diff -f -
+```
+
+Apply the overlay:
+
+```bash
+kubectl apply -k overlays/site-a
+```
+
+Verify the deployment:
+
+```bash
+kubectl get pods --namespace pangolin \
+ -l app.kubernetes.io/name=newt
+
+kubectl logs --namespace pangolin \
+ -l app.kubernetes.io/name=newt \
+ --tail=50
+```
+
+## Updating the rendered base
+
+When upgrading the Newt chart, re-render the base and review the changes.
+
+```bash
+helm repo update fossorial
+```
+
+Render the updated chart output:
+
+```bash
+helm template newt fossorial/newt \
+ --namespace pangolin \
+ --values values/values-base.yaml \
+ > base/newt.yaml
+```
+
+Or with OCI:
+
+```bash
+helm template newt oci://ghcr.io/fosrl/helm-charts/newt \
+ --version 1.4.0 \
+ --namespace pangolin \
+ --values values/values-base.yaml \
+ > base/newt.yaml
+```
+
+Validate the overlay:
+
+```bash
+kustomize build overlays/site-a
+```
+
+Review the diff:
+
+```bash
+git diff
+kustomize build overlays/site-a | kubectl diff -f -
+```
+
+Commit the updated base and overlays:
+
+```bash
+git add base/ overlays/ values/
+git commit -m "Update Newt rendered manifests"
+```
+
+Apply after review:
+
+```bash
+kubectl apply -k overlays/site-a
+```
+
+## Ownership model
+
+Do not run `helm upgrade` against a release that is managed by Kustomize.
+
+Avoid this pattern:
+
+```bash
+helm upgrade newt fossorial/newt --namespace pangolin
+kubectl apply -k overlays/site-a
+```
+
+Use one of these models instead:
+
+| Model | Description |
+| ----------------- | ------------------------------------------------------------------------------------------ |
+| Helm-managed | Helm installs and upgrades the live release. Kustomize is not used for the same resources. |
+| Kustomize-managed | Helm renders manifests only. Kustomize applies and owns the live resources. |
+| GitOps-managed | Argo CD or Flux applies the Kustomize overlay and owns reconciliation. |
+
+## Validation
+
+Validate Kustomize output:
+
+```bash
+kustomize build overlays/site-a
+```
+
+Run a server-side dry run:
+
+```bash
+kustomize build overlays/site-a | kubectl apply -f - --dry-run=server
+```
+
+Preview live changes:
+
+```bash
+kustomize build overlays/site-a | kubectl diff -f -
+```
+
+Check live resources:
+
+```bash
+kubectl get all --namespace pangolin
+kubectl get events --namespace pangolin --sort-by=.lastTimestamp
+```
+
+## Troubleshooting
+
+### The patch does not apply
+
+Check the rendered resource name and kind:
+
+```bash
+kustomize build base | grep -E "^(kind:| name:)"
+```
+
+Then verify the patch target in your overlay.
+
+### The pod does not start
+
+Check pod status and events:
+
+```bash
+kubectl get pods --namespace pangolin
+kubectl describe pod --namespace pangolin
+kubectl get events --namespace pangolin --sort-by=.lastTimestamp
+```
+
+### Newt does not connect
+
+Check logs:
+
+```bash
+kubectl logs --namespace pangolin \
+ -l app.kubernetes.io/name=newt \
+ --tail=100
+```
+
+Verify:
+
+* the Secret exists in the same namespace
+* `PANGOLIN_ENDPOINT` is reachable from the pod
+* `NEWT_ID` and `NEWT_SECRET` are correct
+* outbound DNS and HTTPS are allowed
+* TLS certificates for the Pangolin endpoint are valid
+
+## Next steps
+
+
+
+ Install Newt with Helm.
+
+
+ Review Newt chart options.
+
+
+ Debug Newt deployment and connection issues.
+
+
+ Deploy Kustomize overlays with Argo CD or Flux.
+
+
diff --git a/self-host/manual/kubernetes/newt/troubleshooting.mdx b/self-host/manual/kubernetes/newt/troubleshooting.mdx
new file mode 100644
index 0000000..3f23f74
--- /dev/null
+++ b/self-host/manual/kubernetes/newt/troubleshooting.mdx
@@ -0,0 +1,707 @@
+---
+title: "Troubleshooting"
+description: "Diagnose and resolve common Newt Kubernetes deployment issues."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+Use this guide to troubleshoot Newt Kubernetes deployments installed with Helm, Kustomize, Argo CD, or Flux.
+
+Start with the basic checks, then move to the section that matches the symptom.
+
+## Quick checks
+
+Set the namespace and release name used by your installation:
+
+```bash
+export NEWT_NAMESPACE=pangolin
+export NEWT_RELEASE=newt
+```
+
+Check the Helm release:
+
+```bash
+helm status "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE"
+helm history "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE"
+```
+
+Check Newt pods:
+
+```bash
+kubectl get pods --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt
+```
+
+Check recent events:
+
+```bash
+kubectl get events --namespace "$NEWT_NAMESPACE" \
+ --sort-by=.lastTimestamp
+```
+
+Check logs:
+
+```bash
+kubectl logs --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt \
+ --tail=100
+```
+
+Check the applied Helm values:
+
+```bash
+helm get values "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE"
+```
+
+
+Do not assume the pod or Deployment name. Chart-generated names can change with the Helm release name, instance name, `nameOverride`, or `fullnameOverride`.
+
+
+## Get the generated resource names
+
+List Newt resources:
+
+```bash
+kubectl get deploy,sts,svc,secret,cm --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt
+```
+
+List pods with labels:
+
+```bash
+kubectl get pods --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt \
+ --show-labels
+```
+
+Store the first Newt pod name:
+
+```bash
+export NEWT_POD="$(kubectl get pod --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt \
+ -o jsonpath='{.items[0].metadata.name}')"
+```
+
+Then use:
+
+```bash
+echo "$NEWT_POD"
+```
+
+## Pod fails to start
+
+### Symptoms
+
+```text
+STATUS RESTARTS
+CrashLoopBackOff 5
+Error 3
+CreateContainerConfigError
+ImagePullBackOff
+```
+
+### Check pod details
+
+```bash
+kubectl describe pod "$NEWT_POD" --namespace "$NEWT_NAMESPACE"
+```
+
+Check logs:
+
+```bash
+kubectl logs "$NEWT_POD" --namespace "$NEWT_NAMESPACE" --tail=100
+```
+
+If the container restarts quickly, check the previous logs:
+
+```bash
+kubectl logs "$NEWT_POD" --namespace "$NEWT_NAMESPACE" --previous --tail=100
+```
+
+### Common causes
+
+| Symptom | Likely cause | Check |
+| ----------------------------------- | ------------------------------------------------------ | ---------------------------------------------------------- |
+| `Secret "..." not found` | Secret name does not match `auth.existingSecretName` | `kubectl get secret -n "$NEWT_NAMESPACE"` |
+| Missing env var or empty credential | Secret exists but key names do not match `auth.keys.*` | `kubectl describe secret -n "$NEWT_NAMESPACE"` |
+| Authentication failure | Wrong `NEWT_ID`, `NEWT_SECRET`, or provisioning key | Check credentials in Pangolin |
+| Endpoint connection errors | `PANGOLIN_ENDPOINT` is wrong or unreachable | Test DNS and HTTPS from the pod |
+| Image pull failure | Registry or image settings are wrong | `kubectl describe pod` |
+
+## Secret issues
+
+### Verify the Secret exists
+
+```bash
+kubectl get secret newt-auth --namespace "$NEWT_NAMESPACE"
+```
+
+### Check Secret keys
+
+```bash
+kubectl describe secret newt-auth --namespace "$NEWT_NAMESPACE"
+```
+
+The default keys are:
+
+```text
+PANGOLIN_ENDPOINT
+NEWT_ID
+NEWT_SECRET
+```
+
+If your Secret uses different key names, map them in values:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ existingSecretName: newt-auth
+ keys:
+ endpointKey: PANGOLIN_ENDPOINT
+ idKey: NEWT_ID
+ secretKey: NEWT_SECRET
+```
+
+
+Do not paste decoded secrets into issue reports, logs, screenshots, or public repositories.
+
+
+### Check which Secret the pod uses
+
+```bash
+kubectl get pod "$NEWT_POD" --namespace "$NEWT_NAMESPACE" \
+ -o jsonpath='{range .spec.containers[*].envFrom[*]}{.secretRef.name}{"\n"}{end}'
+```
+
+Also inspect explicit Secret references:
+
+```bash
+kubectl get pod "$NEWT_POD" --namespace "$NEWT_NAMESPACE" -o yaml | grep -A5 -B2 secretKeyRef
+```
+
+## Newt cannot reach Pangolin
+
+### Test DNS from the Newt pod
+
+```bash
+kubectl exec "$NEWT_POD" --namespace "$NEWT_NAMESPACE" -- \
+ nslookup pangolin.example.com
+```
+
+### Test HTTPS from the Newt pod
+
+```bash
+kubectl exec "$NEWT_POD" --namespace "$NEWT_NAMESPACE" -- \
+ wget -S -O- https://pangolin.example.com 2>&1 | head -40
+```
+
+Depending on the image, `curl`, `wget`, `nc`, or `nslookup` may not be available. If needed, run a temporary debug pod in the same namespace:
+
+```bash
+kubectl run net-debug \
+ --namespace "$NEWT_NAMESPACE" \
+ --rm -it \
+ --image=curlimages/curl:latest \
+ --restart=Never \
+ -- sh
+```
+
+Then test:
+
+```bash
+curl -vk https://pangolin.example.com
+```
+
+### Common causes
+
+| Problem | What to check |
+| -------------------------------- | --------------------------------------------------------- |
+| DNS fails | CoreDNS, NetworkPolicy egress to DNS, wrong hostname |
+| HTTPS fails | ingress, TLS certificate, firewall, proxy, wrong endpoint |
+| TLS verification fails | certificate chain, hostname mismatch, private CA |
+| Works locally but not in cluster | egress policies, proxy settings, DNS split-horizon |
+
+## Newt pod is running but site is offline
+
+Check logs:
+
+```bash
+kubectl logs "$NEWT_POD" --namespace "$NEWT_NAMESPACE" --tail=200
+```
+
+Check the site in the Pangolin dashboard.
+
+Verify:
+
+* the site credentials belong to the same site
+* the site was not deleted or regenerated in Pangolin
+* `PANGOLIN_ENDPOINT` points to the correct Pangolin URL
+* the cluster can resolve and reach the Pangolin endpoint
+* outbound HTTPS is allowed from the Newt namespace
+* the Secret is in the same namespace as the Newt workload
+
+If you use provisioning, also verify:
+
+* `provisioningKey` is valid
+* `newtName` is set as expected
+* `configPersistence.enabled=true`
+* the configured `CONFIG_FILE` path is writable
+
+## Provisioning issues
+
+Provisioning requires writable config persistence.
+
+### Symptoms
+
+* Newt starts but does not keep generated credentials after restart.
+* Newt provisions repeatedly.
+* Logs mention config file or write errors.
+* Pod restarts cause the site to appear as a new or unconfigured instance.
+
+### Check values
+
+```bash
+helm get values "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE"
+```
+
+Provisioning example:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ pangolinEndpoint: https://pangolin.example.com
+ provisioningKey: ""
+ newtName: "my-site"
+ configPersistence:
+ enabled: true
+ type: emptyDir
+ mountPath: /var/lib/newt
+ fileName: config.json
+```
+
+For durable state, use an existing PVC:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ enabled: true
+ auth:
+ pangolinEndpoint: https://pangolin.example.com
+ provisioningKey: ""
+ newtName: "my-site"
+ configPersistence:
+ enabled: true
+ type: persistentVolumeClaim
+ existingClaim: my-newt-config
+ mountPath: /var/lib/newt
+ fileName: config.json
+```
+
+
+`emptyDir` is recreated when the pod is recreated. Use a PVC if the generated configuration must survive pod replacement.
+
+
+## Service not created or not reachable
+
+### Important behavior
+
+`acceptClients` does not create a Service.
+
+A Service is created through:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ service:
+ enabled: true
+```
+
+The chart also has `service.enabledWhenAcceptClients`, but runtime client behavior and Service rendering should still be verified in the rendered manifests.
+
+### Check Services
+
+```bash
+kubectl get svc --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt
+```
+
+Describe the Service:
+
+```bash
+kubectl describe svc --namespace "$NEWT_NAMESPACE"
+```
+
+### LoadBalancer stuck in pending
+
+```text
+EXTERNAL-IP
+```
+
+Common causes:
+
+* the cluster has no cloud load balancer integration
+* bare-metal cluster without MetalLB or equivalent
+* cloud provider quota or permission issue
+* invalid `loadBalancerClass`
+* invalid `loadBalancerSourceRanges`
+
+For bare-metal clusters, use MetalLB or another load balancer implementation, or use `NodePort` if appropriate.
+
+## Metrics scraping does not work
+
+Metrics are disabled by default.
+
+Enable metrics:
+
+```yaml
+global:
+ metrics:
+ enabled: true
+```
+
+The chart default admin address is:
+
+```yaml
+global:
+ metrics:
+ adminAddr: ":2112"
+```
+
+This listens on all interfaces and allows in-cluster scraping. Do not set it to `127.0.0.1:2112` if Prometheus scrapes from another pod.
+
+### Metrics Service
+
+Enable the metrics Service:
+
+```yaml
+global:
+ metrics:
+ enabled: true
+ service:
+ enabled: true
+ port: 2112
+```
+
+### ServiceMonitor
+
+If you use Prometheus Operator:
+
+```yaml
+global:
+ metrics:
+ enabled: true
+ service:
+ enabled: true
+ serviceMonitor:
+ enabled: true
+```
+
+Check resources:
+
+```bash
+kubectl get svc,podmonitor,servicemonitor,prometheusrule \
+ --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt
+```
+
+
+The chart has separate metrics values for container port, admin address, and metrics Service port. Check the rendered manifest when changing these values.
+
+
+## NetworkPolicy blocks traffic
+
+If NetworkPolicy is enabled, check that the policy allows required egress.
+
+Newt usually needs egress to:
+
+* DNS
+* Pangolin endpoint over HTTPS
+* any tunnel or connectivity endpoints used by your deployment
+
+Check policies:
+
+```bash
+kubectl get networkpolicy --namespace "$NEWT_NAMESPACE"
+kubectl describe networkpolicy --namespace "$NEWT_NAMESPACE"
+```
+
+If DNS is blocked, enable or add DNS egress rules.
+
+Example:
+
+```yaml
+global:
+ networkPolicy:
+ enabled: true
+ components:
+ dns:
+ enabled: true
+```
+
+If HTTPS egress is blocked, add an appropriate custom egress rule for your environment.
+
+## Multiple Newt instances conflict
+
+### Symptoms
+
+* Multiple pods run, but only one site connects.
+* Both instances use the same credentials.
+* A site appears to flap between instances.
+* Logs show authentication or registration conflicts.
+
+### Check values
+
+```bash
+helm get values "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE"
+```
+
+Each instance should use its own credentials or provisioning identity:
+
+```yaml
+newtInstances:
+ - name: site-a
+ enabled: true
+ auth:
+ existingSecretName: newt-auth-site-a
+
+ - name: site-b
+ enabled: true
+ auth:
+ existingSecretName: newt-auth-site-b
+```
+
+Create separate Secrets:
+
+```bash
+kubectl create secret generic newt-auth-site-a \
+ --namespace "$NEWT_NAMESPACE" \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+
+kubectl create secret generic newt-auth-site-b \
+ --namespace "$NEWT_NAMESPACE" \
+ --from-literal=PANGOLIN_ENDPOINT=https://pangolin.example.com \
+ --from-literal=NEWT_ID= \
+ --from-literal=NEWT_SECRET=
+```
+
+## RBAC or service account issues
+
+Chart `1.4.0` disables RBAC creation by default.
+
+Check service account and RBAC:
+
+```bash
+kubectl get serviceaccount,role,rolebinding \
+ --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt
+```
+
+If your configuration requires Kubernetes API access, enable RBAC:
+
+```yaml
+rbac:
+ create: true
+ clusterRole: false
+```
+
+For most Newt deployments, RBAC is not required.
+
+## High CPU or memory usage
+
+Check resource usage:
+
+```bash
+kubectl top pod --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt
+```
+
+Check current resource settings:
+
+```bash
+kubectl get pod "$NEWT_POD" --namespace "$NEWT_NAMESPACE" \
+ -o jsonpath='{.spec.containers[0].resources}'
+```
+
+Tune resources in values:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ resources:
+ requests:
+ cpu: 200m
+ memory: 256Mi
+ limits:
+ cpu: 1000m
+ memory: 512Mi
+```
+
+Then upgrade:
+
+```bash
+helm upgrade "$NEWT_RELEASE" fossorial/newt \
+ --namespace "$NEWT_NAMESPACE" \
+ --values values-newt.yaml
+```
+
+Common causes of high usage:
+
+* high tunnel traffic
+* too low resource limits
+* repeated reconnect loops
+* excessive debug logging
+* MTU or network path issues
+
+## MTU issues
+
+### Symptoms
+
+* Connections establish but large transfers fail.
+* Some websites or services work, others hang.
+* Logs show repeated reconnects.
+* Throughput is much lower than expected.
+
+Newt defaults to MTU `1280`.
+
+Try another MTU only after confirming basic connectivity:
+
+```yaml
+newtInstances:
+ - name: main-tunnel
+ mtu: 1280
+```
+
+Upgrade after changing values:
+
+```bash
+helm upgrade "$NEWT_RELEASE" fossorial/newt \
+ --namespace "$NEWT_NAMESPACE" \
+ --values values-newt.yaml
+```
+
+## Helm debugging
+
+Preview an upgrade:
+
+```bash
+helm upgrade "$NEWT_RELEASE" fossorial/newt \
+ --namespace "$NEWT_NAMESPACE" \
+ --values values-newt.yaml \
+ --dry-run
+```
+
+Render the chart locally:
+
+```bash
+helm template "$NEWT_RELEASE" fossorial/newt \
+ --namespace "$NEWT_NAMESPACE" \
+ --values values-newt.yaml
+```
+
+Show rendered manifests from the live release:
+
+```bash
+helm get manifest "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE"
+```
+
+Show values from the live release:
+
+```bash
+helm get values "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE"
+```
+
+Rollback:
+
+```bash
+helm rollback "$NEWT_RELEASE" --namespace "$NEWT_NAMESPACE"
+```
+
+## Kustomize debugging
+
+Validate the overlay:
+
+```bash
+kustomize build overlays/site-a
+```
+
+Run a server-side dry run:
+
+```bash
+kustomize build overlays/site-a | kubectl apply -f - --dry-run=server
+```
+
+Preview live changes:
+
+```bash
+kustomize build overlays/site-a | kubectl diff -f -
+```
+
+If a patch does not apply, inspect generated resource names:
+
+```bash
+kustomize build base | grep -E "^(kind:| name:)"
+```
+
+## Collect diagnostics
+
+Collect logs and resource information:
+
+```bash
+kubectl logs --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt \
+ --tail=200 > newt-logs.txt
+
+kubectl get pods --namespace "$NEWT_NAMESPACE" \
+ -l app.kubernetes.io/name=newt \
+ -o yaml > newt-pods.yaml
+
+kubectl get events --namespace "$NEWT_NAMESPACE" \
+ --sort-by=.lastTimestamp > newt-events.txt
+
+helm get values "$NEWT_RELEASE" \
+ --namespace "$NEWT_NAMESPACE" > newt-helm-values.yaml
+
+helm get manifest "$NEWT_RELEASE" \
+ --namespace "$NEWT_NAMESPACE" > newt-helm-manifest.yaml
+```
+
+If using Kustomize:
+
+```bash
+kustomize build overlays/site-a > newt-kustomize-output.yaml
+```
+
+Before sharing diagnostics, remove:
+
+* Newt credentials
+* provisioning keys
+* TLS private keys
+* tokens
+* passwords
+* internal hostnames if sensitive
+
+## Next steps
+
+
+
+ Review Newt chart options.
+
+
+ Install Newt with Helm.
+
+
+ Install Newt with rendered manifests and Kustomize overlays.
+
+
+ Deploy Newt with Argo CD or Flux.
+
+
diff --git a/self-host/manual/kubernetes/overview.mdx b/self-host/manual/kubernetes/overview.mdx
new file mode 100644
index 0000000..ae76fd0
--- /dev/null
+++ b/self-host/manual/kubernetes/overview.mdx
@@ -0,0 +1,81 @@
+---
+title: "Overview"
+description: "Deploy Pangolin, Sites (Newt), and related components on Kubernetes."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+## Components
+
+| Component | Role |
+| --- | --- |
+| Pangolin | Main application for the dashboard, API, authentication, configuration, and database-backed state. |
+| Gerbil | Tunnel stack component used by Pangolin for site connectivity. |
+| Site (Newt) | Site connector used to connect private resources to Pangolin. |
+| Traefik | Reverse proxy and router for ingress traffic. |
+| PostgreSQL / SQLite | Database options for Pangolin deployments, depending on the selected chart configuration. |
+| Pangolin Kube Controller | Kubernetes controller for integrating Pangolin with Kubernetes and Traefik resources. |
+
+
+Depending on your deployment mode, not every component is required. Local reverse proxy deployments and tunneled site deployments can have different component requirements.
+
+
+```mermaid
+flowchart LR
+ U[Users] --> T[Traefik]
+ T --> P[Pangolin]
+ P --> G[Gerbil]
+ S[Site connector
Newt] --> G
+ P --> D[(Database)]
+```
+
+## Installation paths
+
+
+
+ Pick the Kubernetes workflow that matches how you deploy applications.
+
+
+ Review the required cluster, ingress, DNS, storage, and secret setup.
+
+
+ Install Pangolin or Sites (Newt) with the standard chart-based workflow.
+
+
+ Use overlays and patches for manifest-based deployments.
+
+
+ Deploy Pangolin or Sites (Newt) with Argo CD.
+
+
+ Deploy Pangolin or Sites (Newt) with Flux.
+
+
+ Manage multiple Helm releases together.
+
+
+
+## Component guides
+
+
+
+ Install Pangolin with the Helm chart.
+
+
+ Configure Pangolin for your Kubernetes environment.
+
+
+ Diagnose and resolve Pangolin deployment issues.
+
+
+ Install a Site connector with the Newt Helm chart.
+
+
+ Configure Site connector credentials and runtime settings.
+
+
+ Diagnose and resolve Site connector deployment issues.
+
+
diff --git a/self-host/manual/kubernetes/pangolin/configuration.mdx b/self-host/manual/kubernetes/pangolin/configuration.mdx
new file mode 100644
index 0000000..7eed350
--- /dev/null
+++ b/self-host/manual/kubernetes/pangolin/configuration.mdx
@@ -0,0 +1,1000 @@
+---
+title: "Configuration"
+description: "Configuration reference for Pangolin Kubernetes deployments."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+This page covers the main Pangolin Kubernetes configuration options for Helm and Kustomize workflows.
+
+For exhaustive option coverage, refer to the chart resources:
+
+
+
+
+
+
+
+## Version context
+
+This page is aligned with the Pangolin Helm chart `0.1.0-alpha.0`.
+
+| Item | Value |
+| --- | --- |
+| Chart version | `0.1.0-alpha.0` |
+| Pangolin app version | `1.18.2` |
+| Kubernetes version | `>=1.30.14-0` |
+| Gerbil image tag | `1.3.1` |
+| pangolin-kube-controller image tag | `0.1.0-alpha.1` |
+| Traefik image tag | `v3.6.15` |
+
+## Configuration sections
+
+
+
+
+Control how Pangolin components are deployed and integrated with Kubernetes.
+
+```yaml
+deployment:
+ type: controller
+ mode: multi
+ installTraefikController: false
+ traefikNamespace: ""
+```
+
+Recommended production topology:
+
+```yaml
+deployment:
+ type: controller
+ mode: multi
+```
+
+| Setting | Description |
+| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------- |
+| `deployment.type=controller` | Uses `pangolin-kube-controller` and Traefik CRDs. Recommended for Kubernetes deployments. |
+| `deployment.type=standalone` | Runs an internal Traefik workload managed by this chart. Mainly useful for labs and self-contained deployments. |
+| `deployment.mode=multi` | Runs Pangolin, Gerbil, and controller/Traefik components as separate workloads. Recommended for production. |
+| `deployment.mode=single` | Runs multiple components in one shared Pod. Useful only when you explicitly need a compact topology. |
+| `deployment.installTraefikController=true` | Installs the bundled Traefik dependency in controller mode. |
+| `deployment.traefikNamespace` | Namespace where Traefik controller resources live. Defaults to the release namespace when empty. |
+
+
+In controller mode, Traefik CRDs and a Traefik controller must be available. You can install Traefik separately or enable the bundled Traefik dependency with `deployment.installTraefikController=true`.
+
+
+If you enable the bundled Traefik dependency, put Traefik chart overrides under the `traefikController` key.
+
+
+
+
+
+Namespace creation is controlled by the `namespace` block.
+
+```yaml
+namespace:
+ create: false
+ name: ""
+ labels: {}
+ podSecurity:
+ enforce: ""
+ warn: ""
+ audit: ""
+```
+
+Recommended pattern:
+
+1. Create the namespace manually.
+2. Apply the required labels and annotations.
+3. Install the chart into that namespace.
+
+```bash
+kubectl create namespace pangolin
+```
+
+Gerbil requires `NET_ADMIN` for WireGuard interface management. If your cluster enforces Pod Security Admission, the namespace must allow that capability.
+
+Example:
+
+```bash
+kubectl label namespace pangolin \
+ pod-security.kubernetes.io/enforce=privileged \
+ --overwrite
+```
+
+If you let the chart create the namespace, configure the Pod Security labels through values:
+
+```yaml
+namespace:
+ create: true
+ name: pangolin
+ podSecurity:
+ enforce: privileged
+ warn: baseline
+ audit: restricted
+```
+
+
+Do not apply a restricted Pod Security profile to a namespace running Gerbil unless you have validated WireGuard functionality. Gerbil requires `NET_ADMIN`; removing it breaks tunnel management.
+
+
+
+
+
+
+Choose the database backend for Pangolin.
+
+```yaml
+database:
+ mode: cloudnativepg
+ name: pangolin
+ username: pangolin
+```
+
+Supported modes:
+
+| Mode | Use case |
+| --------------- | --------------------------------------------------------------------- |
+| `cloudnativepg` | Recommended production mode using CloudNativePG. This is the default. |
+| `external` | Production mode with an externally managed PostgreSQL database. |
+| `embedded` | Chart-managed PostgreSQL for labs and test environments. |
+| `sqlite` | Development or CI only. Not recommended for production. |
+
+### CloudNativePG
+
+The default database mode is `cloudnativepg`.
+
+```yaml
+database:
+ mode: cloudnativepg
+ cloudnativepg:
+ cluster:
+ name: pangolin-db
+ connection:
+ database: pangolin
+ username: pangolin
+ sslMode: disable
+
+cnpg-operator:
+ enabled: false
+
+cnpg-cluster:
+ enabled: false
+ fullnameOverride: pangolin-db
+```
+
+CloudNativePG can be used in four common ways:
+
+| Mode | Values |
+| -------------------------------------- | ----------------------------------------------------------- |
+| Existing operator and existing cluster | `cnpg-operator.enabled=false`, `cnpg-cluster.enabled=false` |
+| Chart installs operator only | `cnpg-operator.enabled=true`, `cnpg-cluster.enabled=false` |
+| Chart installs cluster only | `cnpg-operator.enabled=false`, `cnpg-cluster.enabled=true` |
+| Chart installs operator and cluster | `cnpg-operator.enabled=true`, `cnpg-cluster.enabled=true` |
+
+When `cnpg-cluster.enabled=true`, keep the CNPG cluster name consistent:
+
+```yaml
+database:
+ cloudnativepg:
+ cluster:
+ name: pangolin-db
+
+cnpg-cluster:
+ enabled: true
+ fullnameOverride: pangolin-db
+```
+
+For the default CNPG cluster name `pangolin-db`, CloudNativePG creates an application Secret named `pangolin-db-app` with the key `uri`. The chart can automatically use this default Secret when no explicit `database.connection.existingSecretName` is set.
+
+Explicit Secret reference:
+
+```yaml
+database:
+ connection:
+ existingSecretName: pangolin-db-app
+ existingSecretKey: uri
+```
+
+### External PostgreSQL
+
+For an external PostgreSQL database, prefer a Kubernetes Secret containing the final connection string.
+
+```yaml
+database:
+ mode: external
+ connection:
+ existingSecretName: pangolin-db-connection
+ existingSecretKey: connectionString
+```
+
+The Secret should contain a PostgreSQL connection string:
+
+```bash
+kubectl create secret generic pangolin-db-connection \
+ --namespace pangolin \
+ --from-literal=connectionString='postgresql://pangolin:password@postgres.example.com:5432/pangolin?sslmode=require'
+```
+
+You can also let the chart create a connection Secret from values:
+
+```yaml
+database:
+ mode: external
+ external:
+ generatedSecret:
+ create: true
+ host: postgres.example.com
+ port: 5432
+ database: pangolin
+ username: pangolin
+ password: ""
+ sslMode: require
+```
+
+
+Avoid storing database passwords directly in values files for production. Use an existing Secret or your normal secret-management workflow.
+
+
+### Embedded PostgreSQL
+
+Embedded PostgreSQL is intended for labs and tests.
+
+```yaml
+database:
+ mode: embedded
+ embedded:
+ persistence:
+ enabled: true
+ size: 8Gi
+```
+
+### SQLite
+
+SQLite is only suitable for development, CI, or very small test deployments.
+
+```yaml
+database:
+ mode: sqlite
+ sqlite:
+ persistence:
+ enabled: true
+ size: 1Gi
+```
+
+
+
+
+
+The `pangolin.config` block renders `/app/config/config.yml`.
+
+```yaml
+pangolin:
+ config:
+ app:
+ dashboard_url: "https://pangolin.example.com"
+ log_level: info
+ domains:
+ domain1:
+ base_domain: "example.com"
+ cert_resolver: "letsencrypt"
+ gerbil:
+ start_port: 51820
+ clients_start_port: 21820
+ base_endpoint: "pangolin.example.com"
+ use_subdomain: false
+ traefik:
+ enabled: true
+ http_entrypoint: web
+ https_entrypoint: websecure
+ cert_resolver: letsencrypt
+```
+
+Important settings:
+
+| Setting | Description |
+| ------------------------------------------- | ------------------------------------------------------------------------------------------- |
+| `pangolin.config.app.dashboard_url` | Public dashboard URL. Set this to the real user-facing URL. |
+| `pangolin.config.domains` | Domain map used by Pangolin. Replace the default `example.com` entry before production use. |
+| `pangolin.config.gerbil.base_endpoint` | Public hostname or IP where Gerbil is reachable. |
+| `pangolin.config.gerbil.start_port` | First WireGuard site port. Keep this aligned with `gerbil.ports.wg1`. |
+| `pangolin.config.gerbil.clients_start_port` | Client WireGuard port. Keep this aligned with `gerbil.ports.wg2`. |
+| `pangolin.config.traefik.enabled` | Includes Pangolin's Traefik config section. This does not install Traefik. |
+| `pangolin.config.traefik.cert_resolver` | ACME resolver name used in Pangolin-generated Traefik configuration. |
+
+
+`pangolin.config.traefik` controls the Traefik configuration generated by Pangolin. Traefik installation is controlled separately through controller mode, the bundled Traefik dependency, or standalone Traefik mode.
+
+
+### Pangolin app secret
+
+Pangolin requires `SERVER_SECRET`.
+
+Use an existing Secret for production:
+
+```yaml
+pangolin:
+ secret:
+ existingSecretName: pangolin-app-secret
+ existingSecretKey: SERVER_SECRET
+```
+
+Create the Secret:
+
+```bash
+kubectl create secret generic pangolin-app-secret \
+ --namespace pangolin \
+ --from-literal=SERVER_SECRET=''
+```
+
+If no existing Secret is provided, the chart can generate one:
+
+```yaml
+pangolin:
+ secret:
+ generated:
+ create: true
+ key: SERVER_SECRET
+ length: 64
+```
+
+
+Do not commit plaintext secrets to Git. For GitOps workflows, use SOPS, Sealed Secrets, External Secrets Operator, Vault, Infisical, or a cloud secret manager.
+
+
+
+
+
+
+In controller mode, the chart can render a Traefik `IngressRoute` for the Pangolin dashboard and API.
+
+```yaml
+pangolin:
+ ingressRoute:
+ dashboard:
+ enabled: true
+ host: ""
+ ingressClassName: ""
+ traefikSelectorLabels: {}
+ entryPoints:
+ - websecure
+ routes:
+ api:
+ enabled: true
+ pathPrefix: /api/v1
+ priority: 100
+ dashboard:
+ enabled: true
+ priority: 10
+ tls:
+ enabled: true
+ certResolver: ""
+ secretName: ""
+```
+
+Default routing behavior:
+
+| Route | Match | Backend port |
+| --------- | ---------------------------------- | ------------------------------------------------- |
+| API | `Host(...) && PathPrefix(/api/v1)` | `pangolin.service.ports.external`, default `3000` |
+| Dashboard | `Host(...)` | `pangolin.service.ports.next`, default `3002` |
+
+The host defaults to the hostname from `pangolin.config.app.dashboard_url`. You can override it with:
+
+```yaml
+pangolin:
+ ingressRoute:
+ dashboard:
+ host: pangolin.example.com
+```
+
+### TLS with certResolver
+
+```yaml
+pangolin:
+ config:
+ traefik:
+ cert_resolver: letsencrypt
+ ingressRoute:
+ dashboard:
+ tls:
+ enabled: true
+ certResolver: letsencrypt
+ secretName: ""
+```
+
+### TLS with existing Secret
+
+```yaml
+pangolin:
+ ingressRoute:
+ dashboard:
+ tls:
+ enabled: true
+ certResolver: ""
+ secretName: pangolin-dashboard-tls
+```
+
+
+`tls.certResolver` and `tls.secretName` are mutually exclusive. Use one or the other.
+
+
+### Multi-Traefik setups
+
+Use labels to target a specific Traefik CRD provider when multiple Traefik instances watch different label selectors:
+
+```yaml
+pangolin:
+ ingressRoute:
+ dashboard:
+ traefikSelectorLabels:
+ traefik-instance: public
+```
+
+You can also set an ingress class annotation:
+
+```yaml
+pangolin:
+ ingressRoute:
+ dashboard:
+ ingressClassName: traefik-public
+```
+
+
+
+
+
+Gerbil manages WireGuard tunnel connectivity for Pangolin.
+
+```yaml
+gerbil:
+ enabled: true
+ startupMode: normal
+ ports:
+ wg1: 51820
+ wg2: 21820
+ internalApi: 3004
+ service:
+ enabled: true
+ type: ClusterIP
+ persistence:
+ enabled: true
+ size: 1Gi
+```
+
+Important settings:
+
+| Setting | Description |
+| ---------------------------- | ----------------------------------------------------------------------------------------- |
+| `gerbil.enabled` | Enables the Gerbil component. |
+| `gerbil.startupMode` | Controls first-run and normal startup behavior. |
+| `gerbil.ports.wg1` | First WireGuard UDP port. Keep aligned with `pangolin.config.gerbil.start_port`. |
+| `gerbil.ports.wg2` | Second WireGuard UDP port. Keep aligned with `pangolin.config.gerbil.clients_start_port`. |
+| `gerbil.ports.internalApi` | Internal Gerbil API/listener port. |
+| `gerbil.service.enabled` | Creates a Service for Gerbil UDP traffic. |
+| `gerbil.persistence.enabled` | Persists Gerbil key/config data. Recommended for production. |
+
+
+If Gerbil is exposed through a reverse proxy or UDP gateway, keep proxy protocol settings aligned end-to-end. Do not enable proxy protocol on the upstream hop unless Gerbil is configured to accept it.
+
+
+### Startup mode
+
+```yaml
+gerbil:
+ startupMode: delayed
+```
+
+| Mode | Behavior |
+| -------------------- | ---------------------------------------------------------------------------------------------------------------------------- |
+| `normal` | Starts Gerbil immediately. Use after Pangolin setup is complete. |
+| `delayed` | Renders Gerbil resources but keeps the Deployment at `replicas: 0` in multi mode. Useful for first installs and smoke tests. |
+| `disabledUntilSetup` | Does not render Gerbil resources until switched back to `normal` or `delayed`. |
+
+For first installs, `delayed` can help when Gerbil would otherwise fail before the initial Pangolin setup is complete.
+
+Switch back after setup:
+
+```bash
+helm upgrade pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --reuse-values \
+ --set gerbil.startupMode=normal
+```
+
+### Security
+
+Gerbil requires `NET_ADMIN`.
+
+```yaml
+gerbil:
+ securityContext:
+ runAsNonRoot: false
+ allowPrivilegeEscalation: false
+ readOnlyRootFilesystem: false
+ capabilities:
+ add:
+ - NET_ADMIN
+ drop:
+ - ALL
+```
+
+
+Do not remove `NET_ADMIN` from Gerbil. Without it, Gerbil cannot create or manage WireGuard interfaces. `SYS_MODULE` is not added by default and should only be added when your node kernel requires module loading from inside the container.
+
+
+
+
+
+
+NetworkPolicy rendering is enabled by default.
+
+
+The chart-managed NetworkPolicies are intended to allow required Pangolin, Gerbil, database, DNS, and controller traffic for standard deployments.
+
+
+```yaml
+networkPolicy:
+ enabled: true
+ allowExternalIngress: true
+ allowExternalEgressHttps: false
+ dns:
+ enabled: true
+ database:
+ enabled: true
+ port: 5432
+ controller:
+ egress:
+ enabled: true
+ kubernetesApi:
+ enabled: true
+ cidr: ""
+ port: 443
+ metrics:
+ enabled: false
+ gerbil:
+ allowWireguardUdpEgress: true
+ wireguardUdpCIDRs:
+ - 0.0.0.0/0
+```
+
+Important defaults:
+
+| Setting | Default | Notes |
+| ------------------------------------------------------- | ------- | -------------------------------------------------------------------------------- |
+| `networkPolicy.enabled` | `true` | Renders NetworkPolicy resources. |
+| `networkPolicy.allowExternalIngress` | `true` | Allows public ingress to exposed services controlled by the chart. |
+| `networkPolicy.allowExternalEgressHttps` | `false` | Broad HTTPS egress is not allowed by default. Prefer scoped `extraEgress` rules. |
+| `networkPolicy.dns.enabled` | `true` | Allows DNS egress. |
+| `networkPolicy.database.enabled` | `true` | Adds database egress rules for Pangolin. |
+| `networkPolicy.controller.egress.kubernetesApi.enabled` | `true` | Allows controller API-server access when configured. |
+| `networkPolicy.gerbil.allowWireguardUdpEgress` | `true` | Allows Gerbil UDP egress for WireGuard peer traffic. |
+
+When tightening policies, verify these paths:
+
+* DNS egress
+* Pangolin to database
+* controller to Kubernetes API
+* ingress controller to Pangolin service
+* Gerbil UDP traffic
+* outbound access for SMTP, OIDC, webhooks, or other external integrations
+
+Use component-scoped rules where possible:
+
+```yaml
+networkPolicy:
+ pangolin:
+ extraEgress: []
+ controller:
+ extraEgress: []
+ gerbil:
+ extraEgress: []
+```
+
+
+If you disable or replace chart-managed NetworkPolicies, ensure your custom policies still allow all required traffic paths.
+
+
+
+
+
+
+The chart has chart-level monitoring settings for Pangolin and controller-specific monitoring settings for `pangolin-kube-controller`.
+
+### Pangolin monitoring
+
+```yaml
+monitoring:
+ enabled: false
+ service:
+ enabled: false
+ type: ClusterIP
+ port: 9090
+ portName: metrics
+ metrics:
+ targetPortName: metrics
+ targetPort: 9090
+ path: /metrics
+```
+
+### Controller monitoring
+
+```yaml
+controller:
+ service:
+ enabled: true
+ port: 9090
+ portName: metrics
+ monitoring:
+ serviceMonitor:
+ enabled: false
+ podMonitor:
+ enabled: false
+ prometheusRule:
+ enabled: false
+```
+
+Enable controller ServiceMonitor when Prometheus Operator is available:
+
+```yaml
+controller:
+ monitoring:
+ serviceMonitor:
+ enabled: true
+```
+
+Enable chart-level metrics Service when the Pangolin app exposes metrics in your selected configuration:
+
+```yaml
+monitoring:
+ enabled: true
+ service:
+ enabled: true
+```
+
+
+Only enable ServiceMonitor, PodMonitor, or PrometheusRule resources when the matching CRDs are installed in the cluster.
+
+
+
+
+
+
+The chart uses separate ServiceAccounts for Pangolin, Gerbil, and the controller in multi mode.
+
+```yaml
+serviceAccount:
+ pangolin:
+ create: true
+ automountServiceAccountToken: false
+ gerbil:
+ create: true
+ automountServiceAccountToken: false
+ controller:
+ create: true
+ automountServiceAccountToken: true
+
+rbac:
+ create: true
+```
+
+Default behavior:
+
+| Component | API token mounted by default | Reason |
+| ---------- | ---------------------------- | ----------------------------------------------------------------------- |
+| Pangolin | No | The app does not need Kubernetes API access. |
+| Gerbil | No | Gerbil manages WireGuard and does not need Kubernetes API access. |
+| Controller | Yes | The controller reconciles Traefik CRDs and needs Kubernetes API access. |
+
+
+In `deployment.mode=single` with `deployment.type=controller`, Kubernetes ServiceAccount selection is Pod-level. The shared Pod uses the controller ServiceAccount and token.
+
+
+
+
+
+
+Global scheduling defaults:
+
+```yaml
+global:
+ storageClass: ""
+ image:
+ registry: docker.io
+ imagePullPolicy: IfNotPresent
+ imagePullSecrets: []
+ nodeSelector: {}
+ tolerations: []
+ affinity: {}
+ topologySpreadConstraints: []
+ priorityClassName: ""
+```
+
+Resource rendering policy:
+
+```yaml
+resourcesPolicy:
+ cpuLimits:
+ enabled: true
+ ephemeralStorage:
+ enabled: false
+```
+
+
+CPU limits can cause throttling even when spare CPU exists on the node. For most deployments, start with CPU requests and memory limits, then add CPU limits only when explicitly required.
+
+
+Pangolin resources:
+
+```yaml
+pangolin:
+ resources:
+ requests:
+ cpu: 200m
+ memory: 256Mi
+ ephemeral-storage: 32Mi
+ limits:
+ cpu: 1000m
+ memory: 1Gi
+ ephemeral-storage: 256Mi
+```
+
+Gerbil resources:
+
+```yaml
+gerbil:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ ephemeral-storage: 16Mi
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ ephemeral-storage: 128Mi
+```
+
+Controller resources:
+
+```yaml
+controller:
+ resources:
+ requests:
+ cpu: 100m
+ memory: 128Mi
+ ephemeral-storage: 16Mi
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ ephemeral-storage: 128Mi
+```
+
+Image configuration:
+
+```yaml
+images:
+ pangolin:
+ registry: docker.io
+ repository: fosrl/pangolin
+ tag: ""
+ digest: ""
+ pangolinPostgresql:
+ registry: docker.io
+ repository: fosrl/pangolin
+ tag: ""
+ digest: ""
+ gerbil:
+ registry: docker.io
+ repository: fosrl/gerbil
+ tag: "1.3.1"
+ digest: ""
+ controller:
+ registry: ghcr.io
+ repository: fosrl/pangolin-kube-controller
+ tag: "0.1.0-alpha.1"
+ digest: ""
+ traefik:
+ registry: docker.io
+ repository: traefik
+ tag: v3.6.15
+ digest: ""
+```
+
+The chart automatically selects the PostgreSQL-capable Pangolin image variant for non-SQLite database modes unless you override the Pangolin tag or digest.
+
+
+Ephemeral-storage requests and limits are only rendered when `resourcesPolicy.ephemeralStorage.enabled=true`.
+
+
+
+
+
+
+Standalone Traefik is used mainly when `deployment.type=standalone`.
+
+```yaml
+traefik:
+ enabled: false
+ service:
+ enabled: true
+ type: LoadBalancer
+ config:
+ dashboard: false
+ httpEntrypoint: web
+ httpsEntrypoint: websecure
+ certResolver: letsencrypt
+ letsencryptEmail: ""
+ persistence:
+ enabled: false
+```
+
+Important notes:
+
+- `traefik.enabled=true` runs an internal Traefik workload managed by this chart.
+- `traefik.config.letsencryptEmail` is required when standalone Traefik is enabled.
+- If you enable the Traefik dashboard, enable `traefik.persistence.enabled` so ACME state survives restarts.
+- In controller mode, prefer using an existing or bundled Traefik controller instead of standalone Traefik.
+
+
+
+
+
+The chart can store Pangolin Blueprint YAML files as Kubernetes ConfigMaps and Secrets.
+
+```yaml
+pangolin:
+ blueprints:
+ enabled: false
+ configMap:
+ create: true
+ files: {}
+ environmentSecret:
+ create: true
+ existingConfigMap: ""
+ existingEnvironmentSecret: ""
+```
+
+Example:
+
+```yaml
+pangolin:
+ blueprints:
+ enabled: true
+ configMap:
+ create: true
+ files:
+ site-blueprint.yaml: |
+ sites:
+ my-site:
+ name: My Site
+ public-resources:
+ web-app:
+ name: Web Application
+ protocol: http
+ full-domain: "app.example.com"
+ targets:
+ - site: my-site
+ hostname: app
+ port: 8080
+ method: http
+```
+
+Sensitive blueprint environment values should come from a Secret:
+
+```yaml
+pangolin:
+ blueprints:
+ enabled: true
+ existingConfigMap: my-blueprint-configmap
+ existingEnvironmentSecret: my-blueprint-env
+```
+
+
+The Pangolin server does not apply Blueprint files directly. Blueprints are applied by Newt through the Pangolin API using `--blueprint-file` or `--provisioning-blueprint-file`.
+
+
+
+
+
+
+## Configuration by install method
+
+### Helm
+
+Use a values file:
+
+```bash
+helm upgrade --install pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+```
+
+Use inline values only for small tests:
+
+```bash
+helm upgrade --install pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --set deployment.type=controller \
+ --set deployment.mode=multi \
+ --set database.mode=cloudnativepg \
+ --set pangolin.config.app.dashboard_url=https://pangolin.example.com
+```
+
+See [Pangolin Helm](/self-host/manual/kubernetes/pangolin/helm) for the installation flow.
+
+For complete application configuration keys and examples, see:
+
+- [Public config file reference](/self-host/advanced/config-file)
+- [Private config file reference](/self-host/advanced/private-config-file)
+
+### Kustomize
+
+Render the chart with Helm, then apply Kustomize overlays:
+
+```bash
+helm template pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values-pangolin.yaml \
+ > base/pangolin.yaml
+```
+
+Apply the overlay:
+
+```bash
+kubectl apply -k overlays/prod
+```
+
+See [Pangolin Kustomize](/self-host/manual/kubernetes/pangolin/kustomize) for the Kustomize workflow.
+
+### GitOps
+
+Store Helm values or Kustomize overlays in Git. Argo CD or Flux reconciles the desired state.
+
+Argo CD Helm example:
+
+```yaml
+spec:
+ source:
+ helm:
+ values: |
+ deployment:
+ type: controller
+ mode: multi
+ database:
+ mode: cloudnativepg
+```
+
+Flux HelmRelease example:
+
+```yaml
+spec:
+ values:
+ deployment:
+ type: controller
+ mode: multi
+ database:
+ mode: cloudnativepg
+```
+
+See [GitOps](/self-host/manual/kubernetes/gitops/overview) for GitOps guidance.
+
+## Next steps
+
+
+
+ Install Pangolin with Helm.
+
+
+ Install Pangolin with rendered manifests and Kustomize overlays.
+
+
+ Debug Pangolin deployments on Kubernetes.
+
+
+ Deploy Pangolin with Argo CD or Flux.
+
+
diff --git a/self-host/manual/kubernetes/pangolin/helm.mdx b/self-host/manual/kubernetes/pangolin/helm.mdx
new file mode 100644
index 0000000..2fb5ca8
--- /dev/null
+++ b/self-host/manual/kubernetes/pangolin/helm.mdx
@@ -0,0 +1,403 @@
+---
+title: "Helm"
+description: "Quick-start guide for installing Pangolin on Kubernetes using Helm."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+
+The Pangolin Helm chart is currently alpha (`0.1.0-alpha.0`). Test installs and upgrades in a non-production environment before using the chart for production traffic.
+
+
+## What Pangolin deploys
+
+The Pangolin Helm chart deploys the Pangolin control plane and related Kubernetes components.
+
+Depending on the selected values, the chart can deploy:
+
+- **Pangolin application**: dashboard, API, authentication, configuration, and application state.
+- **pangolin-kube-controller**: Kubernetes controller used in controller mode.
+- **Gerbil**: WireGuard tunnel manager used by the Pangolin tunnel stack.
+- **Traefik integration**: Traefik CRD-based routing in controller mode, bundled Traefik controller when enabled, or standalone Traefik mode.
+- **Database backend**: CloudNativePG, external PostgreSQL, embedded PostgreSQL, or SQLite.
+
+See [Version Matrix](https://github.com/fosrl/helm-charts/VERSION_MATRIX.md) for chart and default app version references.
+
+## Gerbil setup in the Pangolin chart
+
+This chart deploys Gerbil when `gerbil.enabled=true`. This is the default when using `deployment.type=controller` and recommended.
+
+
+If Gerbil is exposed through a reverse proxy or UDP gateway, keep proxy protocol settings aligned end-to-end. Do not enable proxy protocol on the upstream hop unless Gerbil is configured to accept it.
+
+
+## Prerequisites
+
+Before installing Pangolin, you need:
+
+- Kubernetes `1.30.14` or newer.
+- Helm 3.x.
+- `kubectl` access to the target cluster.
+- A namespace prepared for the install.
+- A StorageClass if you use chart-managed persistent storage.
+- DNS records for the Pangolin dashboard and tunnel endpoint.
+- Traefik CRDs and a Traefik controller when using `deployment.type=controller`.
+- A database plan: CloudNativePG, external PostgreSQL, embedded PostgreSQL, or SQLite.
+
+See [Prerequisites](/self-host/manual/kubernetes/prerequisites) for detailed cluster, namespace, storage, networking, and security requirements.
+
+## Recommended quick install
+
+This quick install uses:
+
+- `deployment.type=controller`
+- `deployment.mode=multi`
+- `database.mode=cloudnativepg`
+- chart-managed CloudNativePG operator and cluster
+- chart-managed dashboard `IngressRoute`
+- Traefik cert resolver for TLS
+
+
+This example assumes a Traefik controller is available and can process the chart-managed `IngressRoute`. If you want the chart to install the bundled Traefik controller, set `deployment.installTraefikController=true`.
+
+
+### Step 1: Create the namespace
+
+Create the namespace before installing the chart:
+
+```bash
+kubectl create namespace pangolin
+```
+
+Gerbil requires `NET_ADMIN` for WireGuard interface management. If your cluster enforces Pod Security Admission, label the namespace accordingly:
+
+```bash
+kubectl label namespace pangolin \
+ pod-security.kubernetes.io/enforce=privileged \
+ pod-security.kubernetes.io/warn=baseline \
+ pod-security.kubernetes.io/audit=restricted \
+ --overwrite
+```
+
+
+Do not use a restricted Pod Security profile for a namespace running Gerbil unless you have validated the selected chart mode. Gerbil requires `NET_ADMIN` for WireGuard.
+
+
+### Step 2: Create a Pangolin app secret
+
+Create a Secret for `SERVER_SECRET`:
+
+```bash
+kubectl create secret generic pangolin-app-secret \
+ --namespace pangolin \
+ --from-literal=SERVER_SECRET=''
+```
+
+Use a long random value. Do not commit this secret to Git.
+
+### Step 3: Create a values file
+
+Create `values-pangolin.yaml`:
+
+```yaml
+deployment:
+ type: controller
+ mode: multi
+ installTraefikController: false
+
+database:
+ mode: cloudnativepg
+ cloudnativepg:
+ cluster:
+ name: pangolin-db
+
+cnpg-operator:
+ enabled: true
+
+cnpg-cluster:
+ enabled: true
+ fullnameOverride: pangolin-db
+ cluster:
+ instances: 1
+ storage:
+ size: 8Gi
+
+pangolin:
+ secret:
+ existingSecretName: pangolin-app-secret
+ existingSecretKey: SERVER_SECRET
+
+ config:
+ app:
+ dashboard_url: https://pangolin.example.com
+ domains:
+ domain1:
+ base_domain: example.com
+ cert_resolver: letsencrypt
+ gerbil:
+ base_endpoint: vpn.example.com
+ start_port: 51820
+ clients_start_port: 21820
+ traefik:
+ enabled: true
+ http_entrypoint: web
+ https_entrypoint: websecure
+ cert_resolver: letsencrypt
+
+ ingressRoute:
+ dashboard:
+ enabled: true
+ host: pangolin.example.com
+ entryPoints:
+ - websecure
+ tls:
+ enabled: true
+ certResolver: letsencrypt
+ secretName: ""
+
+gerbil:
+ enabled: true
+ startupMode: delayed
+ persistence:
+ enabled: true
+ size: 1Gi
+```
+
+Important points:
+
+* Replace `pangolin.example.com`, `example.com`, and `vpn.example.com`.
+* Keep `pangolin.config.gerbil.start_port` aligned with `gerbil.ports.wg1`.
+* Keep `pangolin.config.gerbil.clients_start_port` aligned with `gerbil.ports.wg2`.
+* Use `gerbil.startupMode=delayed` for the first install if Gerbil should not start before the initial Pangolin setup is complete.
+
+The chart defaults to `deployment.type=controller`, `deployment.mode=multi`, `database.mode=cloudnativepg`, and NetworkPolicy rendering enabled. Gerbil `startupMode` supports `normal`, `delayed`, and `disabledUntilSetup`. ([GitHub][1])
+
+### Step 4: Install Pangolin
+
+Add the Helm repository:
+
+```bash
+helm repo add fossorial https://charts.fossorial.io
+helm repo update fossorial
+```
+
+Install Pangolin:
+
+```bash
+helm upgrade --install pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+```
+
+Do not use `--create-namespace` here. The namespace was created and labeled before installation.
+
+### Step 5: Verify the deployment
+
+Check Helm release status:
+
+```bash
+helm status pangolin --namespace pangolin
+helm history pangolin --namespace pangolin
+```
+
+Check workloads:
+
+```bash
+kubectl get pods --namespace pangolin
+kubectl get deploy,statefulset --namespace pangolin
+```
+
+Check Services:
+
+```bash
+kubectl get svc --namespace pangolin
+```
+
+Check Traefik `IngressRoute` resources:
+
+```bash
+kubectl get ingressroute --namespace pangolin
+```
+
+If Traefik CRDs are not installed, this command will fail. In that case, install Traefik CRDs or enable/install the Traefik controller path required by your selected deployment mode.
+
+Wait for the Pangolin pod to become ready:
+
+```bash
+kubectl wait --for=condition=ready pod \
+ -l app.kubernetes.io/name=pangolin \
+ --namespace pangolin \
+ --timeout=300s
+```
+
+## Accessing the dashboard
+
+After DNS and Traefik routing are configured, access Pangolin through the dashboard URL:
+
+```text
+https://pangolin.example.com
+```
+
+The API route is exposed under:
+
+```text
+https://pangolin.example.com/api/v1
+```
+
+
+For a temporary local check, port-forward the dashboard/UI port:
+
+```bash
+kubectl port-forward --namespace pangolin svc/pangolin 8080:3002
+```
+
+Then open:
+
+```text
+http://localhost:8080
+```
+
+
+
+The chart routes `/api/v1` to the Pangolin external/API port and the dashboard route to the Next/UI port. The default service ports are `3000` for external/API and `3002` for the dashboard/UI. ([GitHub][1])
+
+## Switch Gerbil to normal startup
+
+If you installed with `gerbil.startupMode=delayed`, switch Gerbil to normal mode after the initial setup is complete:
+
+```bash
+helm upgrade pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --reuse-values \
+ --set gerbil.startupMode=normal
+```
+
+Check Gerbil resources:
+
+```bash
+kubectl get pods,svc,pvc --namespace pangolin \
+ -l app.kubernetes.io/name=gerbil
+```
+
+## Upgrade
+
+Update the Helm repository:
+
+```bash
+helm repo update fossorial
+```
+
+Upgrade the release:
+
+```bash
+helm upgrade pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+```
+
+Check upgrade status:
+
+```bash
+helm status pangolin --namespace pangolin
+helm history pangolin --namespace pangolin
+```
+
+Rollback if needed:
+
+```bash
+helm rollback pangolin --namespace pangolin
+```
+
+## OCI install
+
+The Pangolin chart is also published as an OCI chart in GHCR.
+
+Pull the chart:
+
+```bash
+helm pull oci://ghcr.io/fosrl/helm-charts/pangolin \
+ --version 0.1.0-alpha.0
+```
+
+Install from OCI:
+
+```bash
+helm upgrade --install pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \
+ --version 0.1.0-alpha.0 \
+ --namespace pangolin \
+ --values values-pangolin.yaml
+```
+
+OCI changes where Helm pulls the chart from. It does not change the values file or the release behavior.
+
+## Architecture overview
+
+Recommended deployment mode:
+
+```yaml
+deployment:
+ type: controller
+ mode: multi
+```
+
+In this topology:
+
+| Component | Role |
+| -------------------------- | -------------------------------------------------------------------- |
+| Pangolin | Main application, dashboard, API, authentication, and configuration. |
+| pangolin-kube-controller | Reconciles dynamic Kubernetes and Traefik CRD configuration. |
+| Gerbil | WireGuard tunnel manager for Pangolin sites. |
+| Traefik | Routes dashboard, API, and site traffic. |
+| CloudNativePG / PostgreSQL | Stores Pangolin application state. |
+
+Database modes:
+
+| Mode | Use case |
+| --------------- | --------------------------------------------------- |
+| `cloudnativepg` | Recommended Kubernetes production path. |
+| `external` | Production path with externally managed PostgreSQL. |
+| `embedded` | Lab or test setups. |
+| `sqlite` | Development or CI only. |
+
+The chart supports `cloudnativepg`, `external`, `embedded`, and `sqlite` database modes. The chart comments mark `cloudnativepg` as the preferred production mode and SQLite as development/test only. ([GitHub][1])
+
+## Chart signing
+
+The chart metadata includes Artifact Hub signing information:
+
+```text
+Fingerprint: 48E7F670FCC13645FC48B08D587294B228C2EC2C
+Public key: https://charts.fossorial.io/pgp_keys.asc
+```
+
+Use this metadata when verifying signed chart releases. The signing key and fingerprint are published in the chart annotations. ([GitHub][2])
+
+## References
+
+
+
+
+
+
+
+
+
+## Next steps
+
+
+
+ Review Pangolin chart options.
+
+
+ Debug Pangolin deployment and routing issues.
+
+
+ Install Pangolin with rendered manifests and Kustomize overlays.
+
+
+ Deploy Pangolin with Argo CD or Flux.
+
+
diff --git a/self-host/manual/kubernetes/pangolin/kustomize.mdx b/self-host/manual/kubernetes/pangolin/kustomize.mdx
new file mode 100644
index 0000000..69410bd
--- /dev/null
+++ b/self-host/manual/kubernetes/pangolin/kustomize.mdx
@@ -0,0 +1,699 @@
+---
+title: "Kustomize"
+description: "Deploy Pangolin on Kubernetes using Helm-rendered manifests and Kustomize overlays."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+Use Kustomize when you want to manage Pangolin with rendered manifests, environment-specific overlays, and explicit patches in Git.
+
+For Pangolin, the supported Kustomize workflow is:
+
+1. Render the Pangolin Helm chart to manifests.
+2. Use the rendered output as the Kustomize base.
+3. Create overlays per environment.
+4. Apply the overlay with `kubectl apply -k` or reconcile it with Argo CD or Flux.
+
+## When to use Kustomize for Pangolin
+
+Use Kustomize if you:
+
+- want environment-specific overlays for dev, staging, or production
+- need explicit patches committed to Git
+- prefer reviewing rendered Kubernetes manifests before applying them
+- use Argo CD or Flux with Kustomize sources
+- want to customize Helm-rendered output without forking the chart
+
+For a simpler single-environment setup, use [Pangolin Helm](/self-host/manual/kubernetes/pangolin/helm).
+
+## Version context
+
+This page is aligned with the Pangolin Helm chart `0.1.0-alpha.0`.
+
+| Item | Value |
+| --- | --- |
+| Chart version | `0.1.0-alpha.0` |
+| Pangolin app version | `1.18.2` |
+| Kubernetes version | `>=1.30.14-0` |
+| Gerbil image tag | `1.3.1` |
+| pangolin-kube-controller image tag | `0.1.0-alpha.1` |
+| Traefik image tag | `v3.6.15` |
+
+## Supported approach
+
+The Pangolin chart does not provide native Kustomize bases. Render the Helm chart first, then use Kustomize on the rendered manifests.
+
+
+Do not manage the same Pangolin resources with both a live Helm release and Kustomize. Pick one ownership model per environment.
+
+
+Recommended ownership model:
+
+- Use Helm only to render the Pangolin chart.
+- Use Kustomize, Argo CD, or Flux to apply and reconcile the rendered manifests.
+- Re-render the base when upgrading the chart or changing Helm values.
+
+## Example directory structure
+
+```text
+pangolin-deployment/
+├── base/
+│ ├── kustomization.yaml
+│ └── pangolin.yaml
+├── overlays/
+│ ├── dev/
+│ │ ├── kustomization.yaml
+│ │ └── patches/
+│ │ └── pangolin-resources.patch.yaml
+│ ├── staging/
+│ │ ├── kustomization.yaml
+│ │ └── patches/
+│ │ └── pangolin-resources.patch.yaml
+│ └── prod/
+│ ├── kustomization.yaml
+│ └── patches/
+│ ├── pangolin-resources.patch.yaml
+│ └── ingressroute-host.patch.yaml
+└── values/
+ ├── values-base.yaml
+ ├── values-dev.yaml
+ ├── values-staging.yaml
+ └── values-prod.yaml
+```
+
+## Step 1: Create the namespace
+
+Create the namespace before applying rendered manifests:
+
+```bash
+kubectl create namespace pangolin
+```
+
+Gerbil requires `NET_ADMIN` for WireGuard interface management. If your cluster enforces Pod Security Admission, label the namespace before creating workloads:
+
+```bash
+kubectl label namespace pangolin \
+ pod-security.kubernetes.io/enforce=privileged \
+ pod-security.kubernetes.io/warn=baseline \
+ pod-security.kubernetes.io/audit=restricted \
+ --overwrite
+```
+
+
+Do not use a restricted Pod Security profile for a namespace running Gerbil unless you have validated the selected chart mode. Gerbil requires `NET_ADMIN`.
+
+
+## Step 2: Create the Pangolin app Secret
+
+Create a Secret for `SERVER_SECRET`:
+
+```bash
+kubectl create secret generic pangolin-app-secret \
+ --namespace pangolin \
+ --from-literal=SERVER_SECRET=''
+```
+
+Do not commit this Secret to Git.
+
+## Step 3: Create base values
+
+Create `values/values-base.yaml`:
+
+```yaml
+deployment:
+ type: controller
+ mode: multi
+ installTraefikController: false
+
+database:
+ mode: cloudnativepg
+ cloudnativepg:
+ cluster:
+ name: pangolin-db
+
+cnpg-operator:
+ enabled: true
+
+cnpg-cluster:
+ enabled: true
+ fullnameOverride: pangolin-db
+ cluster:
+ instances: 1
+ storage:
+ size: 8Gi
+
+pangolin:
+ secret:
+ existingSecretName: pangolin-app-secret
+ existingSecretKey: SERVER_SECRET
+
+ config:
+ app:
+ dashboard_url: https://pangolin.example.com
+ domains:
+ domain1:
+ base_domain: example.com
+ cert_resolver: letsencrypt
+ gerbil:
+ base_endpoint: vpn.example.com
+ start_port: 51820
+ clients_start_port: 21820
+ traefik:
+ enabled: true
+ http_entrypoint: web
+ https_entrypoint: websecure
+ cert_resolver: letsencrypt
+
+ ingressRoute:
+ dashboard:
+ enabled: true
+ host: pangolin.example.com
+ entryPoints:
+ - websecure
+ tls:
+ enabled: true
+ certResolver: letsencrypt
+ secretName: ""
+
+gerbil:
+ enabled: true
+ startupMode: delayed
+ persistence:
+ enabled: true
+ size: 1Gi
+```
+
+Replace:
+
+* `pangolin.example.com`
+* `example.com`
+* `vpn.example.com`
+* TLS resolver names
+* storage settings
+
+
+Use `gerbil.startupMode=delayed` for the first install if Gerbil should not start before the initial Pangolin setup is complete. Switch it to `normal` after setup.
+
+
+## Step 4: Render Pangolin to the base
+
+Add and update the Helm repository:
+
+```bash
+helm repo add fossorial https://charts.fossorial.io
+helm repo update fossorial
+```
+
+Create directories:
+
+```bash
+mkdir -p base overlays/dev/patches overlays/staging/patches overlays/prod/patches values
+```
+
+Render the Pangolin chart:
+
+```bash
+helm template pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values/values-base.yaml \
+ > base/pangolin.yaml
+```
+
+You can also render from the GHCR OCI chart:
+
+```bash
+helm template pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \
+ --version 0.1.0-alpha.0 \
+ --namespace pangolin \
+ --values values/values-base.yaml \
+ > base/pangolin.yaml
+```
+
+## Step 5: Create the base kustomization
+
+```yaml
+# base/kustomization.yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - pangolin.yaml
+```
+
+
+The namespace is already rendered by Helm through `--namespace pangolin`. You can also set `namespace: pangolin` in Kustomize, but avoid changing namespaces in overlays unless you have verified all rendered resources and references.
+
+
+## Step 6: Inspect rendered resource names
+
+Before writing patches, inspect the generated resource names:
+
+```bash
+kustomize build base | grep -E "^(kind:| name:)"
+```
+
+Or list the main resource names with `yq`:
+
+```bash
+kustomize build base | yq '. | select(.kind == "Deployment" or .kind == "StatefulSet" or .kind == "IngressRoute" or .kind == "Service") | .kind + " " + .metadata.name'
+```
+
+
+Do not assume generated resource names. Helm names can change with the release name, chart name, `nameOverride`, or `fullnameOverride`.
+
+
+Use the actual rendered names in your patch targets.
+
+## Step 7: Create a production overlay
+
+Example `overlays/prod/kustomization.yaml`:
+
+```yaml
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../base
+
+labels:
+ - pairs:
+ app.kubernetes.io/environment: production
+ app.kubernetes.io/managed-by: kustomize
+
+patches:
+ - path: patches/pangolin-resources.patch.yaml
+ target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: pangolin
+
+ - path: patches/ingressroute-host.patch.yaml
+ target:
+ group: traefik.io
+ version: v1alpha1
+ kind: IngressRoute
+ name: pangolin-dashboard
+```
+
+
+Replace `pangolin` and `pangolin-dashboard` with the actual names from your rendered manifests.
+
+
+## Step 8: Add patches
+
+### Patch Pangolin resources
+
+```yaml
+# overlays/prod/patches/pangolin-resources.patch.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: pangolin
+spec:
+ template:
+ spec:
+ containers:
+ - name: pangolin
+ resources:
+ requests:
+ cpu: 500m
+ memory: 512Mi
+ limits:
+ memory: 1Gi
+```
+
+
+CPU limits are rendered by default through the chart's `resourcesPolicy.cpuLimits.enabled=true`. If you disable CPU limits in chart values, keep your Kustomize patches consistent with that policy.
+
+
+### Patch dashboard IngressRoute host
+
+The Pangolin chart uses Traefik `IngressRoute` for the dashboard and API in controller mode, not a standard Kubernetes `Ingress`.
+
+```yaml
+# overlays/prod/patches/ingressroute-host.patch.yaml
+apiVersion: traefik.io/v1alpha1
+kind: IngressRoute
+metadata:
+ name: pangolin-dashboard
+spec:
+ routes:
+ - kind: Rule
+ match: Host(`pangolin-prod.example.com`) && PathPrefix(`/api/v1`)
+ - kind: Rule
+ match: Host(`pangolin-prod.example.com`)
+```
+
+
+Patch the rendered `IngressRoute` only after checking the route order and match rules. The API route and dashboard route target different service ports.
+
+
+### Patch node affinity
+
+```yaml
+# overlays/prod/patches/pangolin-node-affinity.patch.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: pangolin
+spec:
+ template:
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-type
+ operator: In
+ values:
+ - production
+```
+
+Reference it in `overlays/prod/kustomization.yaml`:
+
+```yaml
+patches:
+ - path: patches/pangolin-node-affinity.patch.yaml
+ target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: pangolin
+```
+
+### Patch Gerbil startup mode
+
+For first install, this should usually be handled in Helm values before rendering. If you still need to patch rendered manifests, inspect the generated Deployment first.
+
+To switch Gerbil from delayed to normal mode, prefer updating values and re-rendering:
+
+```yaml
+gerbil:
+ startupMode: normal
+```
+
+Then re-render:
+
+```bash
+helm template pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values/values-base.yaml \
+ > base/pangolin.yaml
+```
+
+## Do not rename rendered Helm resources by default
+
+Avoid Kustomize options such as `namePrefix` and `nameSuffix` for Helm-rendered bases unless you have verified every generated reference.
+
+Renaming rendered resources can break:
+
+* Service selectors
+* Secret references
+* ConfigMap references
+* ServiceAccount references
+* NetworkPolicy selectors
+* Traefik `IngressRoute` service references
+* Prometheus monitor selectors
+* CloudNativePG references
+
+If you need different resource names, prefer changing the Helm release name or chart naming values before rendering.
+
+## Apply the overlay
+
+Preview the rendered output:
+
+```bash
+kustomize build overlays/prod
+```
+
+Compare with the live cluster:
+
+```bash
+kustomize build overlays/prod | kubectl diff -f -
+```
+
+Apply the overlay:
+
+```bash
+kubectl apply -k overlays/prod
+```
+
+Verify workloads:
+
+```bash
+kubectl get pods --namespace pangolin
+kubectl get deploy,statefulset --namespace pangolin
+kubectl get svc --namespace pangolin
+```
+
+Verify Traefik resources:
+
+```bash
+kubectl get ingressroute --namespace pangolin
+```
+
+Check events:
+
+```bash
+kubectl get events --namespace pangolin --sort-by=.lastTimestamp
+```
+
+## Updating the rendered base
+
+When upgrading the Pangolin chart or changing Helm values, re-render the base and review the changes.
+
+Update the Helm repository:
+
+```bash
+helm repo update fossorial
+```
+
+Render the updated chart output:
+
+```bash
+helm template pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --values values/values-base.yaml \
+ > base/pangolin.yaml
+```
+
+Or with OCI:
+
+```bash
+helm template pangolin oci://ghcr.io/fosrl/helm-charts/pangolin \
+ --version 0.1.0-alpha.0 \
+ --namespace pangolin \
+ --values values/values-base.yaml \
+ > base/pangolin.yaml
+```
+
+Validate the overlay:
+
+```bash
+kustomize build overlays/prod
+```
+
+Review the diff:
+
+```bash
+git diff
+kustomize build overlays/prod | kubectl diff -f -
+```
+
+Commit the updated base and overlays:
+
+```bash
+git add base/ overlays/ values/
+git commit -m "Update Pangolin rendered manifests"
+```
+
+Apply after review:
+
+```bash
+kubectl apply -k overlays/prod
+```
+
+## Ownership model
+
+Do not run `helm upgrade` against a release that is managed by Kustomize.
+
+Avoid this pattern:
+
+```bash
+helm upgrade pangolin fossorial/pangolin --namespace pangolin
+kubectl apply -k overlays/prod
+```
+
+Use one of these models instead:
+
+| Model | Description |
+| ----------------- | ------------------------------------------------------------------------------------------ |
+| Helm-managed | Helm installs and upgrades the live release. Kustomize is not used for the same resources. |
+| Kustomize-managed | Helm renders manifests only. Kustomize applies and owns the live resources. |
+| GitOps-managed | Argo CD or Flux applies the Kustomize overlay and owns reconciliation. |
+
+## Common Kustomize patches for Pangolin
+
+### Patch resource requests and limits
+
+```yaml
+patches:
+ - path: patches/pangolin-resources.patch.yaml
+ target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: pangolin
+```
+
+### Patch IngressRoute host
+
+```yaml
+patches:
+ - path: patches/ingressroute-host.patch.yaml
+ target:
+ group: traefik.io
+ version: v1alpha1
+ kind: IngressRoute
+ name: pangolin-dashboard
+```
+
+### Add annotations
+
+```yaml
+patches:
+ - target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: pangolin
+ patch: |-
+ - op: add
+ path: /metadata/annotations
+ value:
+ example.com/owner: platform
+```
+
+### Patch Gerbil Service type
+
+Patch the Gerbil Service only after checking the rendered Service name.
+
+```yaml
+patches:
+ - target:
+ version: v1
+ kind: Service
+ name: pangolin-gerbil
+ patch: |-
+ - op: replace
+ path: /spec/type
+ value: LoadBalancer
+```
+
+
+For important topology settings such as database mode, Gerbil ports, `startupMode`, Traefik mode, and CloudNativePG settings, prefer changing Helm values and re-rendering instead of patching rendered YAML.
+
+
+## Validation
+
+Validate Kustomize output:
+
+```bash
+kustomize build overlays/prod
+```
+
+Run a server-side dry run:
+
+```bash
+kustomize build overlays/prod | kubectl apply -f - --dry-run=server
+```
+
+Preview live changes:
+
+```bash
+kustomize build overlays/prod | kubectl diff -f -
+```
+
+If a patch does not apply, inspect generated resource names:
+
+```bash
+kustomize build base | grep -E "^(kind:| name:)"
+```
+
+## Troubleshooting
+
+### The patch does not apply
+
+Check the rendered resource name and kind:
+
+```bash
+kustomize build base | grep -E "^(kind:| name:)"
+```
+
+Then verify the patch target in your overlay.
+
+### The pod does not start
+
+Check pod status and events:
+
+```bash
+kubectl get pods --namespace pangolin
+kubectl describe pod --namespace pangolin
+kubectl get events --namespace pangolin --sort-by=.lastTimestamp
+```
+
+### Dashboard routing does not work
+
+Check the rendered and applied `IngressRoute`:
+
+```bash
+kubectl get ingressroute --namespace pangolin
+kubectl describe ingressroute --namespace pangolin
+```
+
+Verify:
+
+* Traefik CRDs are installed.
+* A Traefik controller is watching the namespace and labels.
+* `pangolin.ingressRoute.dashboard.host` or the patched host matches DNS.
+* The API route still contains `PathPrefix(/api/v1)`.
+* TLS settings match your Traefik setup.
+
+### Gerbil does not start
+
+Check Gerbil resources:
+
+```bash
+kubectl get pods,svc,pvc --namespace pangolin \
+ -l app.kubernetes.io/name=gerbil
+```
+
+Verify:
+
+* namespace allows `NET_ADMIN`
+* `gerbil.startupMode` is set correctly
+* Gerbil persistence is enabled or intentionally disabled
+* `pangolin.config.gerbil.start_port` matches `gerbil.ports.wg1`
+* `pangolin.config.gerbil.clients_start_port` matches `gerbil.ports.wg2`
+
+## Next steps
+
+
+
+ Install Pangolin with Helm.
+
+
+ Review Pangolin chart options.
+
+
+ Debug Pangolin deployment and routing issues.
+
+
+ Deploy Pangolin with Argo CD or Flux.
+
+
diff --git a/self-host/manual/kubernetes/pangolin/troubleshooting.mdx b/self-host/manual/kubernetes/pangolin/troubleshooting.mdx
new file mode 100644
index 0000000..54480f2
--- /dev/null
+++ b/self-host/manual/kubernetes/pangolin/troubleshooting.mdx
@@ -0,0 +1,929 @@
+---
+title: "Troubleshooting"
+description: "Diagnose and resolve Pangolin Kubernetes deployment issues."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+
+This page covers troubleshooting Pangolin Kubernetes deployments across Helm, Kustomize, Argo CD, and Flux workflows.
+
+Start with the core checks, then use the section that matches the symptom.
+
+## Core diagnostics
+
+Set the namespace and release name used by your installation:
+
+```bash
+export PANGOLIN_NAMESPACE=pangolin
+export PANGOLIN_RELEASE=pangolin
+```
+
+### Helm diagnostics
+
+Check the release:
+
+```bash
+helm status "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE"
+helm history "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE"
+helm get values "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" --all
+```
+
+Render the chart locally with your values file:
+
+```bash
+helm repo update fossorial
+
+helm template "$PANGOLIN_RELEASE" fossorial/pangolin \
+ --namespace "$PANGOLIN_NAMESPACE" \
+ --values values-pangolin.yaml
+```
+
+Preview an upgrade:
+
+```bash
+helm upgrade "$PANGOLIN_RELEASE" fossorial/pangolin \
+ --namespace "$PANGOLIN_NAMESPACE" \
+ --values values-pangolin.yaml \
+ --dry-run
+```
+
+
+`helm lint charts/pangolin` is only useful when you are working inside the Helm chart repository. For normal installs, use `helm template` and `helm upgrade --dry-run`.
+
+
+### Kubernetes diagnostics
+
+Check workloads and events:
+
+```bash
+kubectl get pods --namespace "$PANGOLIN_NAMESPACE"
+kubectl get deploy,statefulset,job,cronjob --namespace "$PANGOLIN_NAMESPACE"
+kubectl get events --namespace "$PANGOLIN_NAMESPACE" --sort-by=.lastTimestamp
+```
+
+Inspect a pod:
+
+```bash
+kubectl describe pod --namespace "$PANGOLIN_NAMESPACE"
+kubectl logs --namespace "$PANGOLIN_NAMESPACE" --all-containers --tail=200
+```
+
+Check services, PVCs, and policies:
+
+```bash
+kubectl get svc,pvc,secret,configmap --namespace "$PANGOLIN_NAMESPACE"
+kubectl get networkpolicy --namespace "$PANGOLIN_NAMESPACE"
+```
+
+### Traefik diagnostics
+
+In controller mode, the chart uses Traefik CRDs such as `IngressRoute`.
+
+Check whether Traefik CRDs are installed:
+
+```bash
+kubectl get crd | grep traefik
+```
+
+Check rendered or applied Traefik resources:
+
+```bash
+kubectl get ingressroute --namespace "$PANGOLIN_NAMESPACE"
+kubectl describe ingressroute --namespace "$PANGOLIN_NAMESPACE"
+```
+
+Depending on your Traefik setup, also check:
+
+```bash
+kubectl get middleware,tlsoption,traefikservice --namespace "$PANGOLIN_NAMESPACE"
+```
+
+
+`kubectl get ingress` is only useful if your selected deployment mode renders standard Kubernetes Ingress resources. In controller mode, use `IngressRoute`.
+
+
+### Database diagnostics
+
+If you use CloudNativePG, first check that the CRD exists:
+
+```bash
+kubectl get crd | grep postgresql.cnpg.io
+```
+
+Then check CNPG resources:
+
+```bash
+kubectl get cluster --namespace "$PANGOLIN_NAMESPACE"
+kubectl describe cluster --namespace "$PANGOLIN_NAMESPACE"
+kubectl get pods --namespace "$PANGOLIN_NAMESPACE" | grep -E 'pangolin-db|postgres'
+kubectl get secret --namespace "$PANGOLIN_NAMESPACE" | grep -E 'pangolin-db|postgres'
+```
+
+If you use external PostgreSQL, verify the connection Secret:
+
+```bash
+kubectl get secret --namespace "$PANGOLIN_NAMESPACE"
+kubectl describe secret --namespace "$PANGOLIN_NAMESPACE"
+```
+
+Do not decode and paste database credentials into logs, screenshots, or issue reports.
+
+## Common issues and solutions
+
+
+
+
+
+**Symptoms**
+
+* Gerbil pod crashes during a fresh install.
+* Logs mention missing setup data, missing exit node, or tunnel configuration not being ready.
+* Pangolin itself is not initialized yet.
+
+**Cause**
+
+On first install, Gerbil may start before Pangolin has completed the initial setup. The chart supports `gerbil.startupMode` for this case.
+
+**Resolution**
+
+Use delayed startup for the first install:
+
+```yaml
+gerbil:
+ startupMode: delayed
+```
+
+Install or upgrade with the values file:
+
+```bash
+helm upgrade --install "$PANGOLIN_RELEASE" fossorial/pangolin \
+ --namespace "$PANGOLIN_NAMESPACE" \
+ --values values-pangolin.yaml
+```
+
+After Pangolin setup is complete, switch Gerbil to normal startup:
+
+```bash
+helm upgrade "$PANGOLIN_RELEASE" fossorial/pangolin \
+ --namespace "$PANGOLIN_NAMESPACE" \
+ --reuse-values \
+ --set gerbil.startupMode=normal
+```
+
+Check Gerbil resources:
+
+```bash
+kubectl get pods,svc,pvc --namespace "$PANGOLIN_NAMESPACE" \
+ -l app.kubernetes.io/name=gerbil
+```
+
+
+
+
+
+**Symptoms**
+
+* Gerbil pod does not start.
+* Events mention Pod Security Admission.
+* Events mention forbidden capabilities.
+* Logs or events mention `NET_ADMIN`.
+
+**Cause**
+
+Gerbil requires the `NET_ADMIN` Linux capability for WireGuard interface management. A namespace using a restricted Pod Security profile can block this.
+
+**Resolution**
+
+Check namespace labels:
+
+```bash
+kubectl get namespace "$PANGOLIN_NAMESPACE" --show-labels
+```
+
+For a namespace running Gerbil, use a policy profile that allows the required capability. Example:
+
+```bash
+kubectl label namespace "$PANGOLIN_NAMESPACE" \
+ pod-security.kubernetes.io/enforce=privileged \
+ pod-security.kubernetes.io/warn=baseline \
+ pod-security.kubernetes.io/audit=restricted \
+ --overwrite
+```
+
+Then restart the affected pods:
+
+```bash
+kubectl rollout restart deploy --namespace "$PANGOLIN_NAMESPACE"
+```
+
+
+Do not use a restricted Pod Security profile for Gerbil unless you have validated the selected chart mode and security context. Removing `NET_ADMIN` breaks WireGuard management.
+
+
+
+
+
+
+**Symptoms**
+
+* The dashboard URL does not load.
+* Browser shows timeout, bad gateway, 404, or TLS error.
+* API path `/api/v1` fails while the dashboard path works, or the reverse.
+
+**Common causes**
+
+* DNS points to the wrong load balancer or ingress endpoint.
+* Traefik CRDs are missing.
+* Traefik controller is not watching the namespace or selector labels.
+* `IngressRoute` host does not match the dashboard URL.
+* API route was changed and no longer matches `PathPrefix(/api/v1)`.
+* TLS resolver or TLS Secret is misconfigured.
+
+**Checks**
+
+Check DNS:
+
+```bash
+nslookup pangolin.example.com
+```
+
+Check Traefik CRDs:
+
+```bash
+kubectl get crd | grep traefik
+```
+
+Check IngressRoute resources:
+
+```bash
+kubectl get ingressroute --namespace "$PANGOLIN_NAMESPACE"
+kubectl describe ingressroute --namespace "$PANGOLIN_NAMESPACE"
+```
+
+Check the rendered values:
+
+```bash
+helm get values "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" --all | grep -A30 ingressRoute
+```
+
+Check Traefik logs. Adjust the namespace and label selector to your Traefik installation:
+
+```bash
+kubectl logs --namespace traefik -l app.kubernetes.io/name=traefik --tail=100
+```
+
+Temporary local check for the dashboard/UI service port:
+
+```bash
+kubectl port-forward --namespace "$PANGOLIN_NAMESPACE" svc/pangolin 8080:3002
+```
+
+Then open:
+
+```text
+http://localhost:8080
+```
+
+
+The dashboard/UI port is `3002`. The API/external port is `3000`. Port-forward `3002` when checking the dashboard locally.
+
+
+
+
+
+
+**Symptoms**
+
+* `IngressRoute` is created but TLS does not work.
+* Traefik logs mention TLS configuration problems.
+* Certificate is not issued or the TLS Secret is not found.
+
+**Cause**
+
+The dashboard `IngressRoute` TLS configuration should use either a Traefik certificate resolver or an existing TLS Secret.
+
+**Resolution**
+
+Use Traefik ACME certificate resolver:
+
+```yaml
+pangolin:
+ ingressRoute:
+ dashboard:
+ tls:
+ enabled: true
+ certResolver: letsencrypt
+ secretName: ""
+```
+
+Or use an existing TLS Secret:
+
+```yaml
+pangolin:
+ ingressRoute:
+ dashboard:
+ tls:
+ enabled: true
+ certResolver: ""
+ secretName: pangolin-dashboard-tls
+```
+
+Verify the Secret if using `secretName`:
+
+```bash
+kubectl get secret pangolin-dashboard-tls --namespace "$PANGOLIN_NAMESPACE"
+```
+
+
+`certResolver` is a Traefik ACME resolver setting. It is not a cert-manager issuer reference.
+
+
+
+
+
+
+**Symptoms**
+
+* Newt shows repeated connection or tunnel errors.
+* Tunnel traffic does not pass.
+* WireGuard UDP ports are unreachable from the Newt location.
+
+**Common causes**
+
+* `pangolin.config.gerbil.base_endpoint` points to the wrong host.
+* Gerbil Service is not exposed as expected.
+* External firewall blocks UDP traffic.
+* NetworkPolicy blocks the required traffic.
+* `pangolin.config.gerbil.start_port` and `gerbil.ports.wg1` are not aligned.
+* `pangolin.config.gerbil.clients_start_port` and `gerbil.ports.wg2` are not aligned.
+
+**Checks**
+
+Check Gerbil Service:
+
+```bash
+kubectl get svc --namespace "$PANGOLIN_NAMESPACE" \
+ -l app.kubernetes.io/name=gerbil
+
+kubectl describe svc --namespace "$PANGOLIN_NAMESPACE"
+```
+
+Check Gerbil values:
+
+```bash
+helm get values "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" --all | grep -A30 gerbil
+```
+
+Check NetworkPolicies:
+
+```bash
+kubectl get networkpolicy --namespace "$PANGOLIN_NAMESPACE"
+kubectl describe networkpolicy --namespace "$PANGOLIN_NAMESPACE"
+```
+
+Verify external firewall rules for the configured UDP ports.
+
+
+
+
+
+**Symptoms**
+
+* Newt peers do not establish stable handshakes.
+* Tunnel traffic drops even though Gerbil pods are healthy.
+* Logs show connection resets or malformed upstream traffic.
+
+**Cause**
+
+Proxy protocol handling is inconsistent between the upstream hop and Gerbil.
+
+
+If Gerbil is exposed through a reverse proxy or UDP gateway, keep proxy protocol settings aligned end-to-end. Do not enable proxy protocol on the upstream hop unless Gerbil is configured to accept it.
+
+
+**Checks**
+
+Check endpoint and port alignment:
+
+```bash
+helm get values "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" --all | grep -A40 gerbil
+```
+
+Check Gerbil logs:
+
+```bash
+kubectl logs --namespace "$PANGOLIN_NAMESPACE" \
+ -l app.kubernetes.io/name=gerbil \
+ --tail=200
+```
+
+Check Service exposure:
+
+```bash
+kubectl get svc --namespace "$PANGOLIN_NAMESPACE" \
+ -l app.kubernetes.io/name=gerbil -o wide
+```
+
+
+
+
+
+**Symptoms**
+
+* Pangolin pod crashes.
+* Logs mention database connection errors.
+* Events mention missing Secret or missing Secret key.
+
+**Cause**
+
+`database.mode=external` needs a valid database connection Secret unless the chart is configured to generate one from values.
+
+**Resolution**
+
+Create a connection Secret:
+
+```bash
+kubectl create secret generic pangolin-db-connection \
+ --namespace "$PANGOLIN_NAMESPACE" \
+ --from-literal=connectionString='postgresql://pangolin:password@postgres.example.com:5432/pangolin?sslmode=require'
+```
+
+Reference it in values:
+
+```yaml
+database:
+ mode: external
+ connection:
+ existingSecretName: pangolin-db-connection
+ existingSecretKey: connectionString
+```
+
+Check the Secret:
+
+```bash
+kubectl describe secret pangolin-db-connection --namespace "$PANGOLIN_NAMESPACE"
+```
+
+
+Do not put database passwords directly in values files for production. Use an existing Secret or your normal secret-management workflow.
+
+
+
+
+
+
+**Symptoms**
+
+* CNPG Cluster resource is missing.
+* CNPG pods do not start.
+* Pangolin cannot connect to the generated CNPG database.
+* Secret such as `pangolin-db-app` is missing.
+
+**Common causes**
+
+* CloudNativePG CRDs/operator are not installed.
+* `cnpg-cluster.enabled` is false when you expected the chart to create a cluster.
+* `cnpg-operator.enabled` is false and no operator exists.
+* `database.cloudnativepg.cluster.name` does not match the CNPG cluster name.
+* StorageClass or PVC provisioning fails.
+
+**Checks**
+
+Check CRDs:
+
+```bash
+kubectl get crd | grep postgresql.cnpg.io
+```
+
+Check CNPG operator pods:
+
+```bash
+kubectl get pods --all-namespaces | grep -i cnpg
+```
+
+Check CNPG Cluster:
+
+```bash
+kubectl get cluster --namespace "$PANGOLIN_NAMESPACE"
+kubectl describe cluster pangolin-db --namespace "$PANGOLIN_NAMESPACE"
+```
+
+Check PVCs and Secrets:
+
+```bash
+kubectl get pvc --namespace "$PANGOLIN_NAMESPACE"
+kubectl get secret --namespace "$PANGOLIN_NAMESPACE" | grep pangolin-db
+```
+
+Expected naming when using the default example:
+
+```yaml
+database:
+ cloudnativepg:
+ cluster:
+ name: pangolin-db
+
+cnpg-cluster:
+ enabled: true
+ fullnameOverride: pangolin-db
+```
+
+
+
+
+
+**Symptoms**
+
+* DNS lookups fail.
+* Pangolin cannot connect to the database.
+* Controller cannot reach the Kubernetes API.
+* Gerbil or Newt traffic does not work.
+* External services such as SMTP, OIDC, or webhooks time out.
+
+**Cause**
+
+The chart can render NetworkPolicies. If your CNI enforces them, missing egress or ingress rules can break required paths.
+
+**Checks**
+
+```bash
+kubectl get networkpolicy --namespace "$PANGOLIN_NAMESPACE"
+kubectl describe networkpolicy --namespace "$PANGOLIN_NAMESPACE"
+```
+
+Check whether DNS is allowed:
+
+```yaml
+networkPolicy:
+ dns:
+ enabled: true
+```
+
+Check database egress:
+
+```yaml
+networkPolicy:
+ database:
+ enabled: true
+ port: 5432
+```
+
+Check controller API access:
+
+```yaml
+networkPolicy:
+ controller:
+ egress:
+ enabled: true
+ kubernetesApi:
+ enabled: true
+ port: 443
+```
+
+For external integrations, add scoped egress rules for the required services instead of allowing broad egress.
+
+For a temporary isolation test, disable NetworkPolicy and re-apply:
+
+```yaml
+networkPolicy:
+ enabled: false
+```
+
+If this fixes the issue, re-enable policies and add the missing rules.
+
+
+
+
+
+**Symptoms**
+
+* Pangolin pod restarts repeatedly.
+* Pod stays Pending.
+* Readiness never becomes true.
+
+**Checks**
+
+Find the pod:
+
+```bash
+kubectl get pods --namespace "$PANGOLIN_NAMESPACE" \
+ -l app.kubernetes.io/name=pangolin
+```
+
+Inspect it:
+
+```bash
+kubectl describe pod --namespace "$PANGOLIN_NAMESPACE"
+kubectl logs --namespace "$PANGOLIN_NAMESPACE" --tail=200
+kubectl logs --namespace "$PANGOLIN_NAMESPACE" --previous --tail=200
+```
+
+Check PVCs:
+
+```bash
+kubectl get pvc --namespace "$PANGOLIN_NAMESPACE"
+kubectl describe pvc --namespace "$PANGOLIN_NAMESPACE"
+```
+
+Common causes:
+
+| Status | Common causes |
+| ------------------ | ----------------------------------------------------------------------------------------------------- |
+| `CrashLoopBackOff` | Database connection issue, missing Secret, invalid config, startup dependency not ready |
+| `Pending` | PVC not bound, insufficient resources, node selector/affinity mismatch, Pod Security policy rejection |
+| `ImagePullBackOff` | Wrong image override, registry access issue, missing imagePullSecret |
+
+
+Do not assume tools such as `psql`, `curl`, or `dig` are available inside the Pangolin container. Use logs, Events, or a temporary debug pod when needed.
+
+
+Run a temporary debug pod for network tests:
+
+```bash
+kubectl run net-debug \
+ --namespace "$PANGOLIN_NAMESPACE" \
+ --rm -it \
+ --image=curlimages/curl:latest \
+ --restart=Never \
+ -- sh
+```
+
+
+
+
+
+**Symptoms**
+
+* Helm template or install succeeds, but Traefik resources are not reconciled.
+* `kubectl get ingressroute` fails with unknown resource type.
+* Argo CD or Flux reports missing kind `IngressRoute`.
+
+**Cause**
+
+Controller mode expects Traefik CRDs and a Traefik controller. They must be installed separately or through the bundled dependency when enabled.
+
+**Checks**
+
+```bash
+kubectl get crd | grep traefik
+kubectl get pods --all-namespaces | grep -i traefik
+```
+
+If you want the chart to install the bundled Traefik controller, enable it:
+
+```yaml
+deployment:
+ type: controller
+ installTraefikController: true
+```
+
+If Traefik is already installed elsewhere, keep it disabled and make sure the controller watches the namespace and labels used by the Pangolin `IngressRoute`.
+
+
+
+
+
+**Symptoms**
+
+* `helm upgrade` fails.
+* Rendered resources changed unexpectedly.
+* Existing resources conflict with chart-managed resources.
+* GitOps reports immutable field changes or ownership conflicts.
+
+**Checks**
+
+Render before upgrading:
+
+```bash
+helm template "$PANGOLIN_RELEASE" fossorial/pangolin \
+ --namespace "$PANGOLIN_NAMESPACE" \
+ --values values-pangolin.yaml > rendered.yaml
+```
+
+Run a server-side dry run:
+
+```bash
+kubectl apply -f rendered.yaml --dry-run=server
+```
+
+Compare the current live release:
+
+```bash
+helm get manifest "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" > live-release.yaml
+diff -u live-release.yaml rendered.yaml
+```
+
+Check ownership conflicts:
+
+```bash
+kubectl get all --namespace "$PANGOLIN_NAMESPACE" -o yaml | grep -E "meta.helm.sh|app.kubernetes.io/managed-by"
+```
+
+Avoid `--force` unless you understand which resources will be recreated.
+
+
+`helm upgrade --force` can delete and recreate resources. That can interrupt traffic and may affect persistent workloads depending on the resource type.
+
+
+
+
+
+
+**Symptoms**
+
+* Kustomize build succeeds but changes are missing.
+* Patch target does not match any resource.
+* Patch breaks after chart upgrade.
+
+**Checks**
+
+List generated resource names:
+
+```bash
+kustomize build base | grep -E "^(kind:| name:)"
+```
+
+Validate the overlay:
+
+```bash
+kustomize build overlays/prod
+```
+
+Run a server-side dry run:
+
+```bash
+kustomize build overlays/prod | kubectl apply -f - --dry-run=server
+```
+
+Preview live changes:
+
+```bash
+kustomize build overlays/prod | kubectl diff -f -
+```
+
+Use modern Kustomize `patches` syntax:
+
+```yaml
+patches:
+ - path: patches/pangolin-resources.patch.yaml
+ target:
+ group: apps
+ version: v1
+ kind: Deployment
+ name: pangolin
+```
+
+
+For Helm-rendered bases, do not assume resource names. Check the rendered manifests after each chart upgrade.
+
+
+
+
+
+
+**Symptoms**
+
+* Argo CD Application is OutOfSync or Degraded.
+* Flux HelmRelease or Kustomization is not Ready.
+* Resources are missing or constantly reverted.
+
+**Argo CD checks**
+
+```bash
+kubectl describe application pangolin --namespace argocd
+kubectl logs --namespace argocd deployment/argocd-application-controller --tail=100
+argocd app diff pangolin
+argocd app sync pangolin
+```
+
+**Flux checks**
+
+```bash
+flux get sources all --all-namespaces
+flux get helmreleases --all-namespaces
+flux get kustomizations --all-namespaces
+flux logs --all-namespaces --follow
+```
+
+Reconcile manually:
+
+```bash
+flux reconcile helmrelease pangolin --namespace "$PANGOLIN_NAMESPACE"
+flux reconcile kustomization pangolin --namespace flux-system
+```
+
+Common causes:
+
+- chart repository or OCI source not reachable
+- wrong chart version
+- missing CRDs
+- invalid values
+- rendered resource ownership conflict
+- Secret not available in the expected namespace
+
+
+
+
+
+## Routing issues to the right repository
+
+Use the repository that matches the failing area:
+
+| Area | Repository |
+| ----------------------------------------------------- | ------------------- |
+| Chart templates, values, examples, rendered manifests | `fosrl/helm-charts` |
+| Pangolin runtime, API, UI, auth, application behavior | `fosrl/pangolin` |
+| Newt client behavior or connectivity | `fosrl/newt` |
+| Documentation | `fosrl/docs-v2` |
+
+## Before opening an issue, collect
+
+Collect this information before opening an issue:
+
+* chart version
+* Pangolin app version
+* Kubernetes version
+* Helm version
+* deployment method: Helm, Kustomize, Argo CD, or Flux
+* sanitized values file
+* pod logs
+* namespace events
+* Traefik logs, if routing is involved
+* rendered manifests from `helm template` or `kustomize build`
+* Helm release status or GitOps sync status
+* reproduction steps
+
+Collect basic diagnostics:
+
+```bash
+kubectl version
+helm version
+
+helm status "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE"
+helm get values "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" --all > pangolin-values.yaml
+helm get manifest "$PANGOLIN_RELEASE" --namespace "$PANGOLIN_NAMESPACE" > pangolin-manifest.yaml
+
+kubectl get pods --namespace "$PANGOLIN_NAMESPACE" -o wide > pangolin-pods.txt
+kubectl get events --namespace "$PANGOLIN_NAMESPACE" --sort-by=.lastTimestamp > pangolin-events.txt
+```
+
+Before sharing diagnostics, remove:
+
+* database passwords
+* `SERVER_SECRET`
+* API keys
+* OAuth/OIDC client secrets
+* TLS private keys
+* internal hostnames, if sensitive
+
+## Useful command reference
+
+```bash
+# General cluster info
+kubectl cluster-info
+kubectl version
+
+# Namespace overview
+kubectl get all --namespace "$PANGOLIN_NAMESPACE"
+kubectl get pvc,secret,configmap --namespace "$PANGOLIN_NAMESPACE"
+kubectl get events --namespace "$PANGOLIN_NAMESPACE" --sort-by=.lastTimestamp
+
+# Logs
+kubectl logs --namespace "$PANGOLIN_NAMESPACE" \
+ -l app.kubernetes.io/name=pangolin \
+ --tail=200
+
+kubectl logs --namespace "$PANGOLIN_NAMESPACE" \
+ -l app.kubernetes.io/name=gerbil \
+ --tail=200
+
+# Dashboard local test
+kubectl port-forward --namespace "$PANGOLIN_NAMESPACE" svc/pangolin 8080:3002
+
+# Traefik resources
+kubectl get ingressroute --namespace "$PANGOLIN_NAMESPACE"
+
+# Resource usage
+kubectl top pod --namespace "$PANGOLIN_NAMESPACE"
+kubectl top node
+```
+
+## Next steps
+
+
+
+ Review Pangolin chart options.
+
+
+ Install Pangolin with Helm.
+
+
+ Install Pangolin with rendered manifests and Kustomize overlays.
+
+
+ Deploy Pangolin with Argo CD or Flux.
+
+
diff --git a/self-host/manual/kubernetes/prerequisites.mdx b/self-host/manual/kubernetes/prerequisites.mdx
new file mode 100644
index 0000000..2c349ec
--- /dev/null
+++ b/self-host/manual/kubernetes/prerequisites.mdx
@@ -0,0 +1,225 @@
+---
+title: "Prerequisites"
+description: "Cluster, tooling, networking, and storage requirements for deploying Pangolin and Sites (Newt) on Kubernetes."
+---
+
+import PangolinCloudTocCta from "/snippets/pangolin-cloud-toc-cta.mdx";
+
+
+
+Before installing Pangolin or Sites (Newt) on Kubernetes, check that your cluster, tools, networking, and storage setup match the deployment path you want to use.
+
+## Kubernetes cluster
+
+Use a Kubernetes version that satisfies the Helm chart `kubeVersion` requirement and is supported by your Kubernetes provider or distribution.
+
+Check your cluster version:
+
+```bash
+kubectl version
+```
+
+
+See the [Version Matrix](https://github.com/fosrl/helm-charts/VERSION_MATRIX.md) for the supported Kubernetes versions of the Pangolin and Newt Helm charts.
+
+
+## Controller access and RBAC
+
+Controller mode is the default and recommended Kubernetes deployment mode for Pangolin.
+
+When controller mode is enabled, the Pangolin Kube Controller runs with its own ServiceAccount and needs permission to watch and manage the Kubernetes and Traefik resources it reconciles. The chart creates the required RBAC resources for you, unless RBAC creation is disabled.
+
+By default, the controller is scoped to the namespace of a single Pangolin deployment. It can also be configured for a broader scope when one controller should reconcile resources for multiple Pangolin deployments.
+
+Depending on the configured controller scope, the controller needs namespace-scoped or cluster-scoped access to the resources it reconciles:
+
+| API group | Resources | Verbs |
+| --- | --- | --- |
+| `""` | `events` | `create`, `patch`, `update` |
+| `""` | `services`, `endpoints` | `get`, `list`, `watch`, `create`, `update`, `patch`, `delete` |
+| `discovery.k8s.io` | `endpointslices` | `get`, `list`, `watch`, `create`, `update`, `patch`, `delete` |
+| `traefik.io` | `ingressroutes`, `ingressroutetcps`, `ingressrouteudps`, `middlewares`, `middlewaretcps`, `traefikservices`, `serverstransports`, `serverstransporttcps`, `tlsoptions`, `tlsstores` | `get`, `list`, `watch`, `create`, `update`, `patch`, `delete` |
+
+If leader election is enabled, the controller also needs access to:
+
+| API group | Resources | Verbs |
+| --- | --- | --- |
+| `coordination.k8s.io` | `leases` | `get`, `list`, `watch`, `create`, `update`, `patch` |
+
+The controller also needs cluster-wide read access to Kubernetes discovery resources:
+
+| API group | Resources | Verbs |
+| --- | --- | --- |
+| `networking.k8s.io` | `ingressclasses` | `get`, `list`, `watch` |
+| `apiextensions.k8s.io` | `customresourcedefinitions` | `get`, `list`, `watch` |
+
+
+For namespace-scoped deployments, the chart creates namespaced RBAC for the controller namespace and, if configured, the target namespace. For broader controller scopes, the chart creates the required cluster-scoped RBAC.
+
+
+## Database and storage
+
+Pangolin requires a database backend. The Helm chart supports multiple database modes, including CloudNativePG, external PostgreSQL, embedded PostgreSQL, and SQLite.
+
+For persistent database-backed deployments, make sure your cluster has a usable StorageClass or configure the StorageClass explicitly in your chart values.
+
+Check available StorageClasses:
+
+```bash
+kubectl get storageclasses
+```
+
+For long-running/production deployments, prefer PostgreSQL-based modes such as CloudNativePG or external PostgreSQL.
+
+
+SQLite can be useful for simple or test deployments, but PostgreSQL-based modes are the better fit for long-running/production Kubernetes deployments.
+
+
+### Site connector storage
+
+A Site (Newt) deployment does not require persistent storage by default.
+
+Use writable configuration persistence only if your deployment needs runtime configuration to survive pod replacement, upgrades, node drains, or rescheduling. For simple deployments, no PVC is required.
+
+## Networking
+
+### Ingress and routing
+
+Pangolin needs an external entrypoint for the dashboard, API, and site traffic.
+
+Depending on your chart values, this can use:
+
+* controller mode with a Traefik ingress controller
+* standalone mode with chart-managed Traefik components
+* an existing ingress or load balancer setup
+
+If you use controller mode with Traefik CRDs, verify that the required Traefik API resources are available:
+
+```bash
+kubectl api-resources --api-group=traefik.io
+```
+
+You can also check existing ingress resources:
+
+```bash
+kubectl get ingress -A
+```
+
+### DNS
+
+Configure DNS records for the domains used by Pangolin before exposing it publicly.
+
+At minimum, the Pangolin dashboard domain should resolve to the ingress controller, load balancer, or public endpoint used by your deployment.
+
+Example:
+
+```bash
+nslookup pangolin.example.com
+```
+
+For tunneled site deployments, also verify the DNS name used by the site connector endpoint.
+
+### TLS
+
+Use HTTPS for the Pangolin dashboard and API.
+
+Common TLS options include:
+
+* Traefik ACME / Let's Encrypt
+* cert-manager
+* a pre-created Kubernetes TLS Secret
+* TLS termination at an external load balancer or ingress controller
+
+Use the TLS method that matches your ingress and cluster setup.
+
+If you use cert-manager, verify that the certificate CRDs are available:
+
+```bash
+kubectl get crd certificates.cert-manager.io
+```
+
+## Namespace and security
+
+Choose the namespace where Pangolin and related components should run.
+
+Example:
+
+```bash
+kubectl create namespace pangolin
+```
+
+When using Helm, you can also let Helm create the namespace:
+
+```bash
+helm upgrade --install pangolin fossorial/pangolin \
+ --namespace pangolin \
+ --create-namespace
+```
+
+If your cluster enforces Pod Security Admission, make sure the namespace labels match the selected deployment mode. Deployments that include tunnel components may require permissions that are not compatible with a fully restricted namespace profile.
+
+## NetworkPolicy
+
+The Pangolin and Newt charts can manage NetworkPolicies for the required application traffic.
+
+If you enable chart-managed NetworkPolicies, review the generated policies before adding custom deny rules. If you replace them with your own policies, allow the required traffic between the components you deploy, such as Pangolin, Traefik, Gerbil, the database, DNS, and Site connectors.
+
+## Resource planning
+
+Pangolin and Site (Newt) Kubernetes deployments include predefined resource profiles for the supported deployment methods. These profiles set CPU and memory requests and limits for the components used by the selected deployment mode.
+
+The available profiles are:
+
+| Profile | Intended use |
+| --- | --- |
+| Small | Small deployments, or clusters with very limited available resources. |
+| Standard | Default profile for most normal deployments. |
+| Large | Larger environments with more Sites, more users, higher traffic, or stricter availability expectations. |
+
+The selected profile applies to the workloads that are part of your deployment, for example:
+
+| Component | Resource considerations |
+| --- | --- |
+| Pangolin | Main application workload. Size according to dashboard/API usage, users, and traffic. |
+| Pangolin Kube Controller | Required in controller mode. Size according to the number of reconciled Kubernetes and Traefik resources. |
+| Traefik | Size according to ingress and proxy traffic. |
+| Gerbil | Required when the tunnel stack is enabled. Size according to tunnel traffic and number of connected Sites. |
+| PostgreSQL / CloudNativePG | Size according to database mode, stored state, and expected write/read activity. |
+| Site connectors (Newt) | Each Site connector adds its own resource usage. Size according to the traffic handled by that Site. |
+
+
+The Standard profile is intended to be enough for most standard deployments. Use Small for very limited lab or test environments, and Large for higher traffic, more Sites, more users, or larger production environments.
+
+
+After installation, monitor CPU and memory usage and adjust the selected profile or individual resource overrides if needed.
+
+
+Avoid setting CPU limits on latency-sensitive Pangolin components unless your cluster policy requires them or you intentionally want to cap CPU usage.
+
+CPU limits can cause throttling when a workload temporarily needs more CPU, even if spare CPU capacity is available on the node. This can negatively affect ingress, tunnel, proxy, database, and controller workloads.
+
+For most deployments, use CPU requests to reserve baseline capacity and memory limits to protect the node from excessive memory usage.
+
+
+## Next steps
+
+
+
+ Pick the Kubernetes workflow that matches how you deploy applications.
+
+
+ Install Pangolin or Sites (Newt) with Helm.
+
+
+ Use Kustomize overlays and patches.
+
+
+ Deploy Pangolin or Sites (Newt) with Argo CD.
+
+
+ Deploy Pangolin or Sites (Newt) with Flux.
+
+
+ Start with the Pangolin Helm installation guide.
+
+