From fb338992d5424c6b265902b5dfca876d533dd730 Mon Sep 17 00:00:00 2001 From: Alex J Date: Thu, 9 Apr 2026 09:24:30 +0100 Subject: [PATCH 01/13] chore: making new branch (#1478) merging personal branch to a new branch. --- helm/blueapi/Chart.yaml | 2 +- helm/blueapi/templates/configmap.yaml | 27 ++++++- helm/blueapi/templates/cronjob.yaml | 72 +++++++++++++++++++ .../blueapi/templates/tests/test-cronjob.yaml | 19 +++++ 4 files changed, 118 insertions(+), 2 deletions(-) create mode 100644 helm/blueapi/templates/cronjob.yaml create mode 100644 helm/blueapi/templates/tests/test-cronjob.yaml diff --git a/helm/blueapi/Chart.yaml b/helm/blueapi/Chart.yaml index 08097921fb..385f69f9d1 100644 --- a/helm/blueapi/Chart.yaml +++ b/helm/blueapi/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number is incremented by the release process. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 +version: 1.13.0 # This is the version number of the application being deployed. This version number is incremented by the release process. # Versions are not expected to follow Semantic Versioning. They should reflect the version the application is using. diff --git a/helm/blueapi/templates/configmap.yaml b/helm/blueapi/templates/configmap.yaml index aa813e6485..15cfbc4d67 100644 --- a/helm/blueapi/templates/configmap.yaml +++ b/helm/blueapi/templates/configmap.yaml @@ -31,6 +31,31 @@ data: init_config.yaml: |- scratch: {{- toYaml .Values.worker.scratch | nindent 6 }} + +--- {{- end }} ---- +apiVersion: v1 +kind: ConfigMap +metadata: + name : {{include "blueapi.fullname" . }}-pvc-stamper-script +data: + time-stamper.sh: | + #!/bin/sh + # Get PVCs belonging to this blueapi release + ALL_PVC=$(kubectl get pvc -n {{ .Release.Namespace }} \ + -o jsonpath='{.items[*].metadata.name}' | tr ' ' '\n' | \ + grep "^{{ .Release.Name }}-scratch-") + # Get all PVCs currently mounted by running pods + MOUNTED_PVCS=$(kubectl get pods -n {{ .Release.Namespace }} \ + -o=jsonpath='{.items[*].spec.volumes[*].persistentVolumeClaim.claimName}' | tr ' ' '\n' | sort -u) + NOW=$(date +%s) + #loop through all the pvcs annotating ones thare are mounted or lack a last-used stamp + for pvc in $ALL_PVC; do + ANNOTATION=$(kubectl get pvc "$pvc" -n {{ .Release.Namespace }} -o=jsonpath='{.metadata.annotations.last-used}') + if [ -z "$ANNOTATION" ]; then + kubectl annotate --overwrite pvc "$pvc" -n {{ .Release.Namespace }} last-used="$NOW" + elif echo "$MOUNTED_PVCS" | grep -qx "$pvc"; then + kubectl annotate --overwrite pvc "$pvc" -n {{ .Release.Namespace }} last-used="$NOW" + fi + done diff --git a/helm/blueapi/templates/cronjob.yaml b/helm/blueapi/templates/cronjob.yaml new file mode 100644 index 0000000000..0860467d73 --- /dev/null +++ b/helm/blueapi/templates/cronjob.yaml @@ -0,0 +1,72 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "blueapi.fullname" . }}-last-used-stamper + namespace: {{ .Release.Namespace }} +automountServiceAccountToken: true +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "blueapi.fullname" . }}-last-used-stamper + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: [""] + resources: ["pods", "persistentvolumeclaims"] + verbs: ["get", "list", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "blueapi.fullname" . }}-last-used-stamper + namespace: {{ .Release.Namespace }} +subjects: +- kind: ServiceAccount + name: {{ include "blueapi.fullname" . }}-last-used-stamper + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ include "blueapi.fullname" . }}-last-used-stamper + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ include "blueapi.fullname" . }}-last-used-stamper + namespace: {{ .Release.Namespace }} +spec: + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 + schedule: "*/5 * * * *" + + jobTemplate: + spec: + # amount of attempts of labeling a pvc + backoffLimit: 0 + # job stops after 60 secounds + activeDeadlineSeconds: 60 + template: + spec: + serviceAccountName: {{ include "blueapi.fullname" . }}-last-used-stamper + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} + + volumes: + - name : {{include "blueapi.fullname" . }}-pvc-stamper-script + configMap: + name: {{include "blueapi.fullname" . }}-pvc-stamper-script + defaultMode: 0555 + + + containers: + - name: last-used-stamper + volumeMounts: + - name: {{include "blueapi.fullname" . }}-pvc-stamper-script + mountPath: /scripts + image: bitnami/kubectl:latest + imagePullPolicy: IfNotPresent + command: ["/scripts/time-stamper.sh"] + restartPolicy: OnFailure diff --git a/helm/blueapi/templates/tests/test-cronjob.yaml b/helm/blueapi/templates/tests/test-cronjob.yaml new file mode 100644 index 0000000000..52df70cba5 --- /dev/null +++ b/helm/blueapi/templates/tests/test-cronjob.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "blueapi.fullname" . }}-last-used-stamper + annotations: + "helm.sh/hook": test +spec: + serviceAccountName: {{ include "blueapi.fullname" . }}-last-used-stamper + containers: + - name: test + image: bitnami/kubectl:1.34.5 + command: ["/bin/sh", "-c"] + args: + - | + # Get a pod belonging to this release and annotate it, then verify + kubectl annotate --overwrite pod "$HOSTNAME" -n "{{ .Release.Namespace }}" last-used="$(date +%s)" + kubectl get pod "$HOSTNAME" -n "{{ .Release.Namespace }}" \ + -o jsonpath='{.metadata.annotations.last-used}' | grep -q . + restartPolicy: Never From 079426b5bd44bf20f3c77a8320df4ca0b960bf1b Mon Sep 17 00:00:00 2001 From: Alex J Date: Thu, 9 Apr 2026 09:35:18 +0100 Subject: [PATCH 02/13] Add conditional cronjob and increase backoff limit --- helm/blueapi/templates/cronjob.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/helm/blueapi/templates/cronjob.yaml b/helm/blueapi/templates/cronjob.yaml index 0860467d73..52f5849414 100644 --- a/helm/blueapi/templates/cronjob.yaml +++ b/helm/blueapi/templates/cronjob.yaml @@ -1,3 +1,4 @@ +{{- if .Values.timeStampCron.enabled }} apiVersion: v1 kind: ServiceAccount metadata: @@ -43,7 +44,7 @@ spec: jobTemplate: spec: # amount of attempts of labeling a pvc - backoffLimit: 0 + backoffLimit: 3 # job stops after 60 secounds activeDeadlineSeconds: 60 template: @@ -63,6 +64,11 @@ spec: containers: - name: last-used-stamper + env: + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: RELEASE_NAMESPACE + value: {{ .Release.Namespace }} volumeMounts: - name: {{include "blueapi.fullname" . }}-pvc-stamper-script mountPath: /scripts @@ -70,3 +76,4 @@ spec: imagePullPolicy: IfNotPresent command: ["/scripts/time-stamper.sh"] restartPolicy: OnFailure +{{- end }} From 33726a6cbcfb055d55cb293ef9a418764b4747b3 Mon Sep 17 00:00:00 2001 From: Alex J Date: Thu, 9 Apr 2026 09:35:42 +0100 Subject: [PATCH 03/13] Refactor time-stamper.sh script inclusion in ConfigMap --- helm/blueapi/templates/configmap.yaml | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/helm/blueapi/templates/configmap.yaml b/helm/blueapi/templates/configmap.yaml index 15cfbc4d67..2c834bb31a 100644 --- a/helm/blueapi/templates/configmap.yaml +++ b/helm/blueapi/templates/configmap.yaml @@ -35,27 +35,14 @@ data: --- {{- end }} +--- +{{- if .Values.timeStampCron.enabled }} apiVersion: v1 kind: ConfigMap metadata: name : {{include "blueapi.fullname" . }}-pvc-stamper-script data: - time-stamper.sh: | - #!/bin/sh - # Get PVCs belonging to this blueapi release - ALL_PVC=$(kubectl get pvc -n {{ .Release.Namespace }} \ - -o jsonpath='{.items[*].metadata.name}' | tr ' ' '\n' | \ - grep "^{{ .Release.Name }}-scratch-") - # Get all PVCs currently mounted by running pods - MOUNTED_PVCS=$(kubectl get pods -n {{ .Release.Namespace }} \ - -o=jsonpath='{.items[*].spec.volumes[*].persistentVolumeClaim.claimName}' | tr ' ' '\n' | sort -u) - NOW=$(date +%s) - #loop through all the pvcs annotating ones thare are mounted or lack a last-used stamp - for pvc in $ALL_PVC; do - ANNOTATION=$(kubectl get pvc "$pvc" -n {{ .Release.Namespace }} -o=jsonpath='{.metadata.annotations.last-used}') - if [ -z "$ANNOTATION" ]; then - kubectl annotate --overwrite pvc "$pvc" -n {{ .Release.Namespace }} last-used="$NOW" - elif echo "$MOUNTED_PVCS" | grep -qx "$pvc"; then - kubectl annotate --overwrite pvc "$pvc" -n {{ .Release.Namespace }} last-used="$NOW" - fi - done + {{- $files := .Files }} + time-stamper.sh: |- +{{ $files.Get "files/scripts/time-stamper.sh" | indent 4 }} +{{- end }} From a88fa5dad36f1ee6e15d6fa93661adf38c552138 Mon Sep 17 00:00:00 2001 From: Alex J Date: Thu, 9 Apr 2026 10:56:23 +0100 Subject: [PATCH 04/13] Time stamper cron (#1479) --- helm/blueapi/README.md | 1 + helm/blueapi/files/scripts/time-stamper.sh | 20 ++++++++++++++++++++ helm/blueapi/values.schema.json | 8 ++++++++ helm/blueapi/values.yaml | 3 +++ 4 files changed, 32 insertions(+) create mode 100644 helm/blueapi/files/scripts/time-stamper.sh diff --git a/helm/blueapi/README.md b/helm/blueapi/README.md index 3862290fb8..41ccb8827b 100644 --- a/helm/blueapi/README.md +++ b/helm/blueapi/README.md @@ -44,6 +44,7 @@ A Helm chart deploying a worker pod that runs Bluesky plans | serviceAccount.create | bool | `false` | | | serviceAccount.name | string | `""` | | | startupProbe | object | `{"failureThreshold":5,"httpGet":{"path":"/healthz","port":"http"},"periodSeconds":10}` | A more lenient livenessProbe to allow the service to start fully. This is automatically disabled when in debug mode. | +| timeStampCron.enabled | bool | `true` | | | tolerations | list | `[]` | May be required to run on specific nodes (e.g. the control machine) | | tracing | object | `{"fastapi":{"excludedURLs":"/healthz"},"otlp":{"enabled":false,"protocol":"http/protobuf","server":{"host":"http://opentelemetry-collector.tracing","port":4318}}}` | Exclude health probe requests from tracing by default to prevent spamming | | volumeMounts | list | `[{"mountPath":"/config","name":"worker-config","readOnly":true}]` | Additional volumeMounts on the output StatefulSet definition. Define how volumes are mounted to the container referenced by using the same name. | diff --git a/helm/blueapi/files/scripts/time-stamper.sh b/helm/blueapi/files/scripts/time-stamper.sh new file mode 100644 index 0000000000..027e879fd2 --- /dev/null +++ b/helm/blueapi/files/scripts/time-stamper.sh @@ -0,0 +1,20 @@ +#!/bin/sh +# Get PVCs belonging to this blueapi release +ALL_PVC=$(kubectl get pvc -n $RELEASE_NAMESPACE \ + -o jsonpath='{.items[*].metadata.name}' | tr ' ' '\n' | \ + grep "^$RELEASE_NAME-scratch-") +# Get all PVCs currently mounted by running pods +MOUNTED_PVCS=$(kubectl get pods -n $RELEASE_NAMESPACE \ + -o=jsonpath='{.items[*].spec.volumes[*].persistentVolumeClaim.claimName}' | tr ' ' '\n' | sort -u) +NOW=$(date +%s) +#loop through all the pvcs annotating ones thare are mounted or lack a last-used stamp +for pvc in $ALL_PVC; do + # Checks if Annotation for last-used is empty + ANNOTATION=$(kubectl get pvc "$pvc" -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.last-used}') + # -z checks if ANNOTATION is empty, if its empty or mounted to updates last-used else it ignores it + if [ -z "$ANNOTATION" ]; then + kubectl annotate --overwrite pvc "$pvc" -n $RELEASE_NAMESPACE last-used="$NOW" + elif echo "$MOUNTED_PVCS" | grep -qx "$pvc"; then + kubectl annotate --overwrite pvc "$pvc" -n $RELEASE_NAMESPACE last-used="$NOW" + fi +done diff --git a/helm/blueapi/values.schema.json b/helm/blueapi/values.schema.json index 3159f6713e..5bd7d0735d 100644 --- a/helm/blueapi/values.schema.json +++ b/helm/blueapi/values.schema.json @@ -292,6 +292,14 @@ } } }, + "timeStampCron": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, "tolerations": { "description": "May be required to run on specific nodes (e.g. the control machine)", "type": "array" diff --git a/helm/blueapi/values.yaml b/helm/blueapi/values.yaml index 876b37a989..9634d08596 100644 --- a/helm/blueapi/values.yaml +++ b/helm/blueapi/values.yaml @@ -224,6 +224,9 @@ initContainer: # -- Size of persistent volume size: "1Gi" +timeStampCron: + enabled: true + debug: # -- If enabled, runs debugpy, allowing port-forwarding to expose port 5678 or attached vscode instance enabled: false From 6bbaf6dc94ad831d67ccdda523e8d2c768ef447a Mon Sep 17 00:00:00 2001 From: Alex J Date: Thu, 9 Apr 2026 10:57:34 +0100 Subject: [PATCH 05/13] Change chart version from 1.13.0 to 0.1.0 --- helm/blueapi/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/blueapi/Chart.yaml b/helm/blueapi/Chart.yaml index 385f69f9d1..08097921fb 100644 --- a/helm/blueapi/Chart.yaml +++ b/helm/blueapi/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number is incremented by the release process. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.13.0 +version: 0.1.0 # This is the version number of the application being deployed. This version number is incremented by the release process. # Versions are not expected to follow Semantic Versioning. They should reflect the version the application is using. From 09cc9c2ad24c4fc65543e91d7c44da1f13a56149 Mon Sep 17 00:00:00 2001 From: alexj9837 Date: Thu, 9 Apr 2026 14:32:05 +0000 Subject: [PATCH 06/13] removing not fit for purpose test --- .../blueapi/templates/tests/test-cronjob.yaml | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 helm/blueapi/templates/tests/test-cronjob.yaml diff --git a/helm/blueapi/templates/tests/test-cronjob.yaml b/helm/blueapi/templates/tests/test-cronjob.yaml deleted file mode 100644 index 52df70cba5..0000000000 --- a/helm/blueapi/templates/tests/test-cronjob.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: {{ include "blueapi.fullname" . }}-last-used-stamper - annotations: - "helm.sh/hook": test -spec: - serviceAccountName: {{ include "blueapi.fullname" . }}-last-used-stamper - containers: - - name: test - image: bitnami/kubectl:1.34.5 - command: ["/bin/sh", "-c"] - args: - - | - # Get a pod belonging to this release and annotate it, then verify - kubectl annotate --overwrite pod "$HOSTNAME" -n "{{ .Release.Namespace }}" last-used="$(date +%s)" - kubectl get pod "$HOSTNAME" -n "{{ .Release.Namespace }}" \ - -o jsonpath='{.metadata.annotations.last-used}' | grep -q . - restartPolicy: Never From c9c6917d1e563a7039249692efe867a141de385e Mon Sep 17 00:00:00 2001 From: Alex J Date: Wed, 15 Apr 2026 14:52:14 +0100 Subject: [PATCH 07/13] Update time-stamper.sh removing the logic for if the pvc wasn't mounted. now only annotates mounted pvcs. --- helm/blueapi/files/scripts/time-stamper.sh | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/helm/blueapi/files/scripts/time-stamper.sh b/helm/blueapi/files/scripts/time-stamper.sh index 027e879fd2..514cd01d23 100644 --- a/helm/blueapi/files/scripts/time-stamper.sh +++ b/helm/blueapi/files/scripts/time-stamper.sh @@ -1,20 +1,9 @@ #!/bin/sh -# Get PVCs belonging to this blueapi release -ALL_PVC=$(kubectl get pvc -n $RELEASE_NAMESPACE \ - -o jsonpath='{.items[*].metadata.name}' | tr ' ' '\n' | \ - grep "^$RELEASE_NAME-scratch-") # Get all PVCs currently mounted by running pods MOUNTED_PVCS=$(kubectl get pods -n $RELEASE_NAMESPACE \ -o=jsonpath='{.items[*].spec.volumes[*].persistentVolumeClaim.claimName}' | tr ' ' '\n' | sort -u) NOW=$(date +%s) -#loop through all the pvcs annotating ones thare are mounted or lack a last-used stamp -for pvc in $ALL_PVC; do - # Checks if Annotation for last-used is empty - ANNOTATION=$(kubectl get pvc "$pvc" -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.last-used}') - # -z checks if ANNOTATION is empty, if its empty or mounted to updates last-used else it ignores it - if [ -z "$ANNOTATION" ]; then +#loop through all the pvcs annotating ones thare are mounted +for pvc in $MOUNTED_PVCS; do kubectl annotate --overwrite pvc "$pvc" -n $RELEASE_NAMESPACE last-used="$NOW" - elif echo "$MOUNTED_PVCS" | grep -qx "$pvc"; then - kubectl annotate --overwrite pvc "$pvc" -n $RELEASE_NAMESPACE last-used="$NOW" - fi done From 0142922d15d2b7ff02a89d8da2dfb3b9ed1771a3 Mon Sep 17 00:00:00 2001 From: alexj9837 <52531949+Alexj9837@users.noreply.github.com> Date: Fri, 24 Apr 2026 13:37:42 +0000 Subject: [PATCH 08/13] Base logic for pvc auto deletion --- helm/blueapi/files/scripts/pvc-deletion.sh | 24 ++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 helm/blueapi/files/scripts/pvc-deletion.sh diff --git a/helm/blueapi/files/scripts/pvc-deletion.sh b/helm/blueapi/files/scripts/pvc-deletion.sh new file mode 100644 index 0000000000..4c82f69c90 --- /dev/null +++ b/helm/blueapi/files/scripts/pvc-deletion.sh @@ -0,0 +1,24 @@ +#!/bin/sh +# Get all PVCs by running pods +ALL_PVCS=$(kubectl get pvc -n $RELEASE_NAMESPACE -o=jsonpath='{.items[*].metadata.name}' | tr ' ' '\n' | sort -u) +Now=$(date +%s) +#loop through all pvcs. +for pvc in $ALL_PVCS; do + #check if pvc has last-used annotation + if get $pvc last-used -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.last-used}' + then + #get last used annotation and check if it is more than three months ago (2628000 seconds) + LAST_USED=$(get $pvc last-used -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.last-used}') + if [ $((NOW - LAST_USED)) -gt 2628000 ]; then + #checking if the pvc is protected, if it is protected skip deletion + if [ get $pvc protected -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.protected}' = "true" ]; then + echo "PVC $pvc is protected, skipping deletion" + continue + fi + #PVC has not been used for more than three months, delete it + kubectl delete pvc "$pvc" -n $RELEASE_NAMESPACE + fi + else + echo "PVC $pvc does not have last-used annotation, skipping deletion" + fi +done From 3e8f562820b8ca9e07c03351cee4aa3fcf3597fb Mon Sep 17 00:00:00 2001 From: alexj9837 <52531949+Alexj9837@users.noreply.github.com> Date: Fri, 24 Apr 2026 14:24:35 +0000 Subject: [PATCH 09/13] Sorting out the weekly cronjob for pvc auto deletion, also adding someting to value yaml to turn it off --- helm/blueapi/README.md | 1 + helm/blueapi/templates/configmap.yaml | 12 ++++ helm/blueapi/templates/cronjob.yaml | 82 ++++++++++++++++++++++++++- helm/blueapi/values.schema.json | 8 +++ helm/blueapi/values.yaml | 3 + 5 files changed, 105 insertions(+), 1 deletion(-) diff --git a/helm/blueapi/README.md b/helm/blueapi/README.md index 41ccb8827b..1081a9bbde 100644 --- a/helm/blueapi/README.md +++ b/helm/blueapi/README.md @@ -32,6 +32,7 @@ A Helm chart deploying a worker pod that runs Bluesky plans | podAnnotations | object | `{}` | | | podLabels | object | `{}` | | | podSecurityContext | object | `{}` | | +| pvcautodeletion.enabled | bool | `true` | | | readinessProbe | object | `{"failureThreshold":2,"httpGet":{"path":"/healthz","port":"http"},"periodSeconds":10}` | Readiness probe, if configured kubernetes will not route traffic to this pod if failed consecutively. This could allow the service time to recover if it is being overwhelmed by traffic, but without the to ability to load balance or scale up/outwards, upstream services will need to know to back off. This is automatically disabled when in debug mode. | | resources | object | `{"limits":{"cpu":"2000m","memory":"4000Mi"},"requests":{"cpu":"200m","memory":"400Mi"}}` | Sets the compute resources available to the pod. These defaults are appropriate when using debug mode or an internal PVC and therefore running VS Code server in the pod. In the Diamond cluster, requests must be >= 0.1*limits When not using either of the above, the limits may be lowered. When idle but connected, blueapi consumes ~400MB of memory and 1% cpu and may struggle when allocated less. | | restartOnConfigChange | bool | `true` | If enabled the blueapi pod will restart on changes to `worker` | diff --git a/helm/blueapi/templates/configmap.yaml b/helm/blueapi/templates/configmap.yaml index 2c834bb31a..9ec730a9d1 100644 --- a/helm/blueapi/templates/configmap.yaml +++ b/helm/blueapi/templates/configmap.yaml @@ -46,3 +46,15 @@ data: time-stamper.sh: |- {{ $files.Get "files/scripts/time-stamper.sh" | indent 4 }} {{- end }} + +--- +{{- if .Values.pvcautodeletion.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name : {{include "blueapi.fullname" . }}-pvc-autodeletion-script +data: + {{- $files := .Files }} + pvc-deletion.sh: |- +{{ $files.Get "files/scripts/pvc-deletion.sh" | indent 4 }} +{{- end }} diff --git a/helm/blueapi/templates/cronjob.yaml b/helm/blueapi/templates/cronjob.yaml index 52f5849414..4be93ee4bf 100644 --- a/helm/blueapi/templates/cronjob.yaml +++ b/helm/blueapi/templates/cronjob.yaml @@ -45,7 +45,7 @@ spec: spec: # amount of attempts of labeling a pvc backoffLimit: 3 - # job stops after 60 secounds + # job stops after 60 seconds activeDeadlineSeconds: 60 template: spec: @@ -77,3 +77,83 @@ spec: command: ["/scripts/time-stamper.sh"] restartPolicy: OnFailure {{- end }} + +{{- if .Values.pvcautodeletion.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "blueapi.fullname" . }}-pvcautodeletion + namespace: {{ .Release.Namespace }} +automountServiceAccountToken: true +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "blueapi.fullname" . }}-pvcautodeletion + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: [""] + resources: ["pods", "persistentvolumeclaims"] + verbs: ["get", "list", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "blueapi.fullname" . }}-pvcautodeletion + namespace: {{ .Release.Namespace }} +subjects: +- kind: ServiceAccount + name: {{ include "blueapi.fullname" . }}-pvcautodeletion + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: {{ include "blueapi.fullname" . }}-pvcautodeletion + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ include "blueapi.fullname" . }}-pvcautodeletion + namespace: {{ .Release.Namespace }} +spec: + concurrencyPolicy: Forbid + successfulJobsHistoryLimit: 3 + failedJobsHistoryLimit: 1 + schedule: "@weekly" + + jobTemplate: + spec: + # amount of attempts of labeling a pvc + backoffLimit: 3 + # job stops after 300 seconds + activeDeadlineSeconds: 300 + template: + spec: + serviceAccountName: {{ include "blueapi.fullname" . }}-pvcautodeletion + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 12 }} + {{- end }} + + volumes: + - name : {{include "blueapi.fullname" . }}-pvc-autodeletion-script + configMap: + name: {{include "blueapi.fullname" . }}-pvc-autodeletion-script + defaultMode: 0555 + + + containers: + - name: pvcautodeletion + env: + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: RELEASE_NAMESPACE + value: {{ .Release.Namespace }} + volumeMounts: + - name: {{include "blueapi.fullname" . }}-pvc-autodeletion-script + mountPath: /scripts + image: bitnami/kubectl:latest + imagePullPolicy: IfNotPresent + command: ["/scripts/pvc-deletion.sh"] + restartPolicy: OnFailure +{{- end }} diff --git a/helm/blueapi/values.schema.json b/helm/blueapi/values.schema.json index 5bd7d0735d..c5d41d36dc 100644 --- a/helm/blueapi/values.schema.json +++ b/helm/blueapi/values.schema.json @@ -174,6 +174,14 @@ "podSecurityContext": { "type": "object" }, + "pvcautodeletion": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, "readinessProbe": { "description": "Readiness probe, if configured kubernetes will not route traffic to this pod if failed consecutively. This could allow the service time to recover if it is being overwhelmed by traffic, but without the to ability to load balance or scale up/outwards, upstream services will need to know to back off. This is automatically disabled when in debug mode.", "type": "object", diff --git a/helm/blueapi/values.yaml b/helm/blueapi/values.yaml index 9634d08596..f2cde2a972 100644 --- a/helm/blueapi/values.yaml +++ b/helm/blueapi/values.yaml @@ -227,6 +227,9 @@ initContainer: timeStampCron: enabled: true +pvcautodeletion: + enabled: true + debug: # -- If enabled, runs debugpy, allowing port-forwarding to expose port 5678 or attached vscode instance enabled: false From caa5a4a8eb695051597439571df402752db986eb Mon Sep 17 00:00:00 2001 From: alexj9837 <52531949+Alexj9837@users.noreply.github.com> Date: Mon, 27 Apr 2026 14:02:52 +0000 Subject: [PATCH 10/13] Added Del perm --- helm/blueapi/templates/cronjob.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/blueapi/templates/cronjob.yaml b/helm/blueapi/templates/cronjob.yaml index 4be93ee4bf..dd67a67d56 100644 --- a/helm/blueapi/templates/cronjob.yaml +++ b/helm/blueapi/templates/cronjob.yaml @@ -94,7 +94,7 @@ metadata: rules: - apiGroups: [""] resources: ["pods", "persistentvolumeclaims"] - verbs: ["get", "list", "patch"] + verbs: ["get", "list", "patch","delete"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding From d167f3d154f13ed7bded8791e11ac218ba602295 Mon Sep 17 00:00:00 2001 From: alexj9837 <52531949+Alexj9837@users.noreply.github.com> Date: Mon, 27 Apr 2026 15:18:39 +0000 Subject: [PATCH 11/13] del s --- helm/blueapi/files/scripts/pvc-deletion.sh | 10 +++++----- helm/blueapi/templates/cronjob.yaml | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/helm/blueapi/files/scripts/pvc-deletion.sh b/helm/blueapi/files/scripts/pvc-deletion.sh index 4c82f69c90..74e70f8602 100644 --- a/helm/blueapi/files/scripts/pvc-deletion.sh +++ b/helm/blueapi/files/scripts/pvc-deletion.sh @@ -1,17 +1,17 @@ #!/bin/sh # Get all PVCs by running pods ALL_PVCS=$(kubectl get pvc -n $RELEASE_NAMESPACE -o=jsonpath='{.items[*].metadata.name}' | tr ' ' '\n' | sort -u) -Now=$(date +%s) +NOW=$(date +%s) #loop through all pvcs. for pvc in $ALL_PVCS; do #check if pvc has last-used annotation - if get $pvc last-used -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.last-used}' + if kubectl get pvc $pvc -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.last-used}' then #get last used annotation and check if it is more than three months ago (2628000 seconds) - LAST_USED=$(get $pvc last-used -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.last-used}') - if [ $((NOW - LAST_USED)) -gt 2628000 ]; then + LAST_USED=$(kubectl get pvc $pvc -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.last-used}') + if [ $(($NOW - LAST_USED)) -gt 2628000 ]; then #checking if the pvc is protected, if it is protected skip deletion - if [ get $pvc protected -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.protected}' = "true" ]; then + if [ "$(kubectl get pvc $pvc -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.protected}')" = "true" ]; then echo "PVC $pvc is protected, skipping deletion" continue fi diff --git a/helm/blueapi/templates/cronjob.yaml b/helm/blueapi/templates/cronjob.yaml index dd67a67d56..b37d5cde44 100644 --- a/helm/blueapi/templates/cronjob.yaml +++ b/helm/blueapi/templates/cronjob.yaml @@ -77,8 +77,8 @@ spec: command: ["/scripts/time-stamper.sh"] restartPolicy: OnFailure {{- end }} - {{- if .Values.pvcautodeletion.enabled }} +--- apiVersion: v1 kind: ServiceAccount metadata: From ef874bae91da0214d81ec1a55bfc182533a2c41f Mon Sep 17 00:00:00 2001 From: alexj9837 <52531949+Alexj9837@users.noreply.github.com> Date: Tue, 28 Apr 2026 09:51:30 +0000 Subject: [PATCH 12/13] changing name to be more readable --- helm/blueapi/README.md | 8 +++++--- helm/blueapi/templates/configmap.yaml | 4 ++-- helm/blueapi/templates/cronjob.yaml | 28 +++++++++++++-------------- helm/blueapi/values.schema.json | 4 +--- helm/blueapi/values.yaml | 23 +++++++++++----------- 5 files changed, 33 insertions(+), 34 deletions(-) diff --git a/helm/blueapi/README.md b/helm/blueapi/README.md index 1081a9bbde..17e5066071 100644 --- a/helm/blueapi/README.md +++ b/helm/blueapi/README.md @@ -32,9 +32,12 @@ A Helm chart deploying a worker pod that runs Bluesky plans | podAnnotations | object | `{}` | | | podLabels | object | `{}` | | | podSecurityContext | object | `{}` | | -| pvcautodeletion.enabled | bool | `true` | | +| pvcAutoDeletion.enabled | bool | `true` | | | readinessProbe | object | `{"failureThreshold":2,"httpGet":{"path":"/healthz","port":"http"},"periodSeconds":10}` | Readiness probe, if configured kubernetes will not route traffic to this pod if failed consecutively. This could allow the service time to recover if it is being overwhelmed by traffic, but without the to ability to load balance or scale up/outwards, upstream services will need to know to back off. This is automatically disabled when in debug mode. | -| resources | object | `{"limits":{"cpu":"2000m","memory":"4000Mi"},"requests":{"cpu":"200m","memory":"400Mi"}}` | Sets the compute resources available to the pod. These defaults are appropriate when using debug mode or an internal PVC and therefore running VS Code server in the pod. In the Diamond cluster, requests must be >= 0.1*limits When not using either of the above, the limits may be lowered. When idle but connected, blueapi consumes ~400MB of memory and 1% cpu and may struggle when allocated less. | +| resources.limits.cpu | string | `"2000m"` | | +| resources.limits.memory | string | `"4000Mi"` | | +| resources.requests.cpu | string | `"200m"` | | +| resources.requests.memory | string | `"400Mi"` | | | restartOnConfigChange | bool | `true` | If enabled the blueapi pod will restart on changes to `worker` | | securityContext.runAsNonRoot | bool | `true` | | | securityContext.runAsUser | int | `1000` | | @@ -53,6 +56,5 @@ A Helm chart deploying a worker pod that runs Bluesky plans | worker | object | `{"api":{"url":"http://0.0.0.0:8000/"},"env":{"sources":[{"kind":"planFunctions","module":"dodal.plans"},{"kind":"planFunctions","module":"dodal.plan_stubs.wrapped"}]},"logging":{"graylog":{"enabled":false,"url":"tcp://graylog-log-target.diamond.ac.uk:12231/"},"level":"INFO"},"scratch":{"repositories":[],"root":"/workspace"},"stomp":{"auth":{"password":"guest","username":"guest"},"enabled":false,"url":"tcp://rabbitmq:61613/"}}` | Config for the worker goes here, will be mounted into a config file | | worker.api.url | string | `"http://0.0.0.0:8000/"` | 0.0.0.0 required to allow non-loopback traffic If using hostNetwork, the port must be free on the host | | worker.env.sources | list | `[{"kind":"planFunctions","module":"dodal.plans"},{"kind":"planFunctions","module":"dodal.plan_stubs.wrapped"}]` | modules (must be installed in the venv) to fetch devices/plans from | -| worker.logging | object | `{"graylog":{"enabled":false,"url":"tcp://graylog-log-target.diamond.ac.uk:12231/"},"level":"INFO"}` | Configures logging. Port 12231 is the `dodal` input on graylog which will be renamed `blueapi` | | worker.scratch | object | `{"repositories":[],"root":"/workspace"}` | If initContainer is enabled the default branch of python projects in this section are installed into the venv *without their dependencies* | | worker.stomp | object | `{"auth":{"password":"guest","username":"guest"},"enabled":false,"url":"tcp://rabbitmq:61613/"}` | Message bus configuration for returning status to GDA/forwarding documents downstream Password may be in the form ${ENV_VAR} to be fetched from an environment variable e.g. mounted from a SealedSecret | diff --git a/helm/blueapi/templates/configmap.yaml b/helm/blueapi/templates/configmap.yaml index 9ec730a9d1..3286f16ff7 100644 --- a/helm/blueapi/templates/configmap.yaml +++ b/helm/blueapi/templates/configmap.yaml @@ -48,11 +48,11 @@ data: {{- end }} --- -{{- if .Values.pvcautodeletion.enabled }} +{{- if .Values.pvcAutoDeletion.enabled }} apiVersion: v1 kind: ConfigMap metadata: - name : {{include "blueapi.fullname" . }}-pvc-autodeletion-script + name : {{include "blueapi.fullname" . }}-pvc-auto-deletion-script data: {{- $files := .Files }} pvc-deletion.sh: |- diff --git a/helm/blueapi/templates/cronjob.yaml b/helm/blueapi/templates/cronjob.yaml index b37d5cde44..717fa6bf48 100644 --- a/helm/blueapi/templates/cronjob.yaml +++ b/helm/blueapi/templates/cronjob.yaml @@ -45,8 +45,8 @@ spec: spec: # amount of attempts of labeling a pvc backoffLimit: 3 - # job stops after 60 seconds - activeDeadlineSeconds: 60 + # job stops after 180 seconds + activeDeadlineSeconds: 180 template: spec: serviceAccountName: {{ include "blueapi.fullname" . }}-last-used-stamper @@ -77,19 +77,19 @@ spec: command: ["/scripts/time-stamper.sh"] restartPolicy: OnFailure {{- end }} -{{- if .Values.pvcautodeletion.enabled }} +{{- if .Values.pvcAutoDeletion.enabled }} --- apiVersion: v1 kind: ServiceAccount metadata: - name: {{ include "blueapi.fullname" . }}-pvcautodeletion + name: {{ include "blueapi.fullname" . }}-pvc-auto-deletion namespace: {{ .Release.Namespace }} automountServiceAccountToken: true --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "blueapi.fullname" . }}-pvcautodeletion + name: {{ include "blueapi.fullname" . }}-pvc-auto-deletion namespace: {{ .Release.Namespace }} rules: - apiGroups: [""] @@ -99,21 +99,21 @@ rules: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: {{ include "blueapi.fullname" . }}-pvcautodeletion + name: {{ include "blueapi.fullname" . }}-pvc-auto-deletion namespace: {{ .Release.Namespace }} subjects: - kind: ServiceAccount - name: {{ include "blueapi.fullname" . }}-pvcautodeletion + name: {{ include "blueapi.fullname" . }}-pvc-auto-deletion namespace: {{ .Release.Namespace }} roleRef: kind: Role - name: {{ include "blueapi.fullname" . }}-pvcautodeletion + name: {{ include "blueapi.fullname" . }}-pvc-auto-deletion apiGroup: rbac.authorization.k8s.io --- apiVersion: batch/v1 kind: CronJob metadata: - name: {{ include "blueapi.fullname" . }}-pvcautodeletion + name: {{ include "blueapi.fullname" . }}-pvc-auto-deletion namespace: {{ .Release.Namespace }} spec: concurrencyPolicy: Forbid @@ -129,28 +129,28 @@ spec: activeDeadlineSeconds: 300 template: spec: - serviceAccountName: {{ include "blueapi.fullname" . }}-pvcautodeletion + serviceAccountName: {{ include "blueapi.fullname" . }}-pvc-auto-deletion {{- with .Values.tolerations }} tolerations: {{- toYaml . | nindent 12 }} {{- end }} volumes: - - name : {{include "blueapi.fullname" . }}-pvc-autodeletion-script + - name : {{include "blueapi.fullname" . }}-pvc-auto-deletion-script configMap: - name: {{include "blueapi.fullname" . }}-pvc-autodeletion-script + name: {{include "blueapi.fullname" . }}-pvc-auto-deletion-script defaultMode: 0555 containers: - - name: pvcautodeletion + - name: pvc-auto-deletion env: - name: RELEASE_NAME value: {{ .Release.Name }} - name: RELEASE_NAMESPACE value: {{ .Release.Namespace }} volumeMounts: - - name: {{include "blueapi.fullname" . }}-pvc-autodeletion-script + - name: {{include "blueapi.fullname" . }}-pvc-auto-deletion-script mountPath: /scripts image: bitnami/kubectl:latest imagePullPolicy: IfNotPresent diff --git a/helm/blueapi/values.schema.json b/helm/blueapi/values.schema.json index c5d41d36dc..654e1178d4 100644 --- a/helm/blueapi/values.schema.json +++ b/helm/blueapi/values.schema.json @@ -174,7 +174,7 @@ "podSecurityContext": { "type": "object" }, - "pvcautodeletion": { + "pvcAutoDeletion": { "type": "object", "properties": { "enabled": { @@ -206,7 +206,6 @@ } }, "resources": { - "description": "Sets the compute resources available to the pod. These defaults are appropriate when using debug mode or an internal PVC and therefore running VS Code server in the pod. In the Diamond cluster, requests must be \u003e= 0.1*limits When not using either of the above, the limits may be lowered. When idle but connected, blueapi consumes ~400MB of memory and 1% cpu and may struggle when allocated less.", "type": "object", "properties": { "limits": { @@ -405,7 +404,6 @@ } }, "logging": { - "description": "Configures logging. Port 12231 is the `dodal` input on graylog which will be renamed `blueapi`", "type": "object", "properties": { "graylog": { diff --git a/helm/blueapi/values.yaml b/helm/blueapi/values.yaml index f2cde2a972..c7e6e2fa1e 100644 --- a/helm/blueapi/values.yaml +++ b/helm/blueapi/values.yaml @@ -36,8 +36,7 @@ podAnnotations: {} # For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ podLabels: {} -podSecurityContext: {} - # fsGroup: 2000 +podSecurityContext: {} # fsGroup: 2000 securityContext: # https://github.com/DiamondLightSource/blueapi/issues/1096 @@ -48,7 +47,7 @@ securityContext: # drop: # - ALL -# This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/ + # This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/ service: # This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types # -- To make blueapi available on an IP outside of the cluster prior to an Ingress being created, change this to LoadBalancer @@ -76,13 +75,13 @@ ingress: # hosts: # - chart-example.local -# -- Sets the compute resources available to the pod. -# These defaults are appropriate when using debug mode or an internal PVC and therefore -# running VS Code server in the pod. -# In the Diamond cluster, requests must be >= 0.1*limits -# When not using either of the above, the limits may be lowered. -# When idle but connected, blueapi consumes ~400MB of memory and 1% cpu -# and may struggle when allocated less. + # -- Sets the compute resources available to the pod. + # These defaults are appropriate when using debug mode or an internal PVC and therefore + # running VS Code server in the pod. + # In the Diamond cluster, requests must be >= 0.1*limits + # When not using either of the above, the limits may be lowered. + # When idle but connected, blueapi consumes ~400MB of memory and 1% cpu + # and may struggle when allocated less. resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little @@ -205,7 +204,7 @@ worker: repositories: [] # - name: "dodal" # remote_url: https://github.com/DiamondLightSource/dodal.git - # -- Configures logging. Port 12231 is the `dodal` input on graylog which will be renamed `blueapi` + # -- Configures logging. Port 12231 is the `dodal` input on graylog which will be renamed `blueapi` logging: level: "INFO" graylog: @@ -227,7 +226,7 @@ initContainer: timeStampCron: enabled: true -pvcautodeletion: +pvcAutoDeletion: enabled: true debug: From 1c69150a1d510f567de5f6456226b0ac7082da82 Mon Sep 17 00:00:00 2001 From: alexj9837 <52531949+Alexj9837@users.noreply.github.com> Date: Tue, 28 Apr 2026 10:11:18 +0000 Subject: [PATCH 13/13] fix: if there isn't a last_used check if not null --- helm/blueapi/files/scripts/pvc-deletion.sh | 22 ++++++++++------- helm/blueapi/templates/configmap.yaml | 24 ------------------- .../blueapi/templates/cronjob-configmaps.yaml | 22 +++++++++++++++++ 3 files changed, 35 insertions(+), 33 deletions(-) create mode 100644 helm/blueapi/templates/cronjob-configmaps.yaml diff --git a/helm/blueapi/files/scripts/pvc-deletion.sh b/helm/blueapi/files/scripts/pvc-deletion.sh index 74e70f8602..fc4f2f9415 100644 --- a/helm/blueapi/files/scripts/pvc-deletion.sh +++ b/helm/blueapi/files/scripts/pvc-deletion.sh @@ -7,18 +7,22 @@ for pvc in $ALL_PVCS; do #check if pvc has last-used annotation if kubectl get pvc $pvc -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.last-used}' then - #get last used annotation and check if it is more than three months ago (2628000 seconds) + #get last used annotation LAST_USED=$(kubectl get pvc $pvc -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.last-used}') - if [ $(($NOW - LAST_USED)) -gt 2628000 ]; then - #checking if the pvc is protected, if it is protected skip deletion - if [ "$(kubectl get pvc $pvc -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.protected}')" = "true" ]; then - echo "PVC $pvc is protected, skipping deletion" - continue + #checking if its not null + if [ -n "$LAST_USED" ]; then + #check if last_used is older than 3 months + if [ $(($NOW - LAST_USED)) -gt 2628000 ]; then + #checking if the pvc is protected, if it is protected skip deletion + if [ "$(kubectl get pvc $pvc -n $RELEASE_NAMESPACE -o=jsonpath='{.metadata.annotations.protected}')" = "true" ]; then + echo "PVC $pvc is protected, skipping deletion" + continue + fi + #PVC has not been used for more than three months, delete it + kubectl delete pvc "$pvc" -n $RELEASE_NAMESPACE fi - #PVC has not been used for more than three months, delete it - kubectl delete pvc "$pvc" -n $RELEASE_NAMESPACE fi else - echo "PVC $pvc does not have last-used annotation, skipping deletion" + echo "PVC $pvc does not have last-used annotation, skipping deletion" fi done diff --git a/helm/blueapi/templates/configmap.yaml b/helm/blueapi/templates/configmap.yaml index 3286f16ff7..93ba1447ea 100644 --- a/helm/blueapi/templates/configmap.yaml +++ b/helm/blueapi/templates/configmap.yaml @@ -34,27 +34,3 @@ data: --- {{- end }} - ---- -{{- if .Values.timeStampCron.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name : {{include "blueapi.fullname" . }}-pvc-stamper-script -data: - {{- $files := .Files }} - time-stamper.sh: |- -{{ $files.Get "files/scripts/time-stamper.sh" | indent 4 }} -{{- end }} - ---- -{{- if .Values.pvcAutoDeletion.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name : {{include "blueapi.fullname" . }}-pvc-auto-deletion-script -data: - {{- $files := .Files }} - pvc-deletion.sh: |- -{{ $files.Get "files/scripts/pvc-deletion.sh" | indent 4 }} -{{- end }} diff --git a/helm/blueapi/templates/cronjob-configmaps.yaml b/helm/blueapi/templates/cronjob-configmaps.yaml new file mode 100644 index 0000000000..188bb1a5f7 --- /dev/null +++ b/helm/blueapi/templates/cronjob-configmaps.yaml @@ -0,0 +1,22 @@ +{{- if .Values.timeStampCron.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name : {{include "blueapi.fullname" . }}-pvc-stamper-script +data: + {{- $files := .Files }} + time-stamper.sh: |- +{{ $files.Get "files/scripts/time-stamper.sh" | indent 4 }} +--- +{{- end }} + +{{- if .Values.pvcAutoDeletion.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name : {{include "blueapi.fullname" . }}-pvc-auto-deletion-script +data: + {{- $files := .Files }} + pvc-deletion.sh: |- +{{ $files.Get "files/scripts/pvc-deletion.sh" | indent 4 }} +{{- end }}