From e5974e1ef50bf579172cb387f9b395d0df5af42d Mon Sep 17 00:00:00 2001 From: Steven Kreitzer Date: Thu, 2 Jan 2025 13:54:53 -0600 Subject: [PATCH] feat(kyverno): volsync policy --- .envrc | 7 +- .minijinja.toml | 5 + .taskfiles/bootstrap/Taskfile.yaml | 66 +---- .../resources/rook-data-job.tmpl.yaml | 28 -- .../resources/rook-disk-job.tmpl.yaml | 32 --- .../bootstrap/resources/wait-for-job.sh | 13 - .../bootstrap/resources/wipe-rook.yaml.j2 | 59 +++++ .taskfiles/kubernetes/Taskfile.yaml | 7 +- ...d-pod.tmpl.yaml => privileged-pod.yaml.j2} | 2 +- .taskfiles/volsync/Taskfile.yaml | 247 +++++------------- .taskfiles/volsync/resources/list.tmpl.yaml | 20 -- .../replicationdestination.tmpl.yaml | 30 --- .../resources/replicationdestination.yaml.j2 | 23 ++ .../{unlock.tmpl.yaml => unlock.yaml.j2} | 8 +- .taskfiles/volsync/resources/wait-for-job.sh | 13 - .../volsync/resources/which-controller.sh | 21 -- .taskfiles/volsync/resources/wipe.tmpl.yaml | 26 -- .../home-assistant/app/kustomization.yaml | 2 +- .../apps/home/home-assistant/app/pvc.yaml | 15 ++ .../apps/home/home-assistant/app/volsync.yaml | 86 ------ .../home/zigbee2mqtt/app/kustomization.yaml | 2 +- kubernetes/apps/home/zigbee2mqtt/app/pvc.yaml | 15 ++ .../apps/home/zigbee2mqtt/app/volsync.yaml | 86 ------ .../apps/kyverno/kyverno/app/helmrelease.yaml | 37 ++- .../apps/kyverno/kyverno/policies/gatus.yaml | 10 +- .../kyverno/policies/kustomization.yaml | 1 + .../kyverno/kyverno/policies/volsync.yaml | 129 +++++++++ .../apps/media/bazarr/app/kustomization.yaml | 2 +- kubernetes/apps/media/bazarr/app/pvc.yaml | 15 ++ kubernetes/apps/media/bazarr/app/volsync.yaml | 86 ------ .../media/overseerr/app/kustomization.yaml | 1 - kubernetes/apps/media/overseerr/app/pvc.yaml | 15 ++ .../apps/media/overseerr/app/volsync.yaml | 86 ------ .../apps/media/plex/app/kustomization.yaml | 1 - kubernetes/apps/media/plex/app/pvc.yaml | 15 ++ kubernetes/apps/media/plex/app/volsync.yaml | 86 ------ .../media/qbittorrent/app/kustomization.yaml | 2 +- .../apps/media/qbittorrent/app/pvc.yaml | 15 ++ .../apps/media/qbittorrent/app/volsync.yaml | 86 ------ .../media/recyclarr/app/kustomization.yaml | 2 +- kubernetes/apps/media/recyclarr/app/pvc.yaml | 15 ++ .../apps/media/recyclarr/app/volsync.yaml | 86 ------ .../apps/media/sabnzbd/app/kustomization.yaml | 2 +- kubernetes/apps/media/sabnzbd/app/pvc.yaml | 15 ++ .../apps/media/sabnzbd/app/volsync.yaml | 86 ------ .../media/tautulli/app/kustomization.yaml | 1 - kubernetes/apps/media/tautulli/app/pvc.yaml | 15 ++ .../apps/media/tautulli/app/volsync.yaml | 86 ------ 48 files changed, 473 insertions(+), 1235 deletions(-) create mode 100644 .minijinja.toml delete mode 100755 .taskfiles/bootstrap/resources/rook-data-job.tmpl.yaml delete mode 100755 .taskfiles/bootstrap/resources/rook-disk-job.tmpl.yaml delete mode 100755 .taskfiles/bootstrap/resources/wait-for-job.sh create mode 100644 .taskfiles/bootstrap/resources/wipe-rook.yaml.j2 rename .taskfiles/kubernetes/resources/{privileged-pod.tmpl.yaml => privileged-pod.yaml.j2} (96%) delete mode 100755 .taskfiles/volsync/resources/list.tmpl.yaml delete mode 100755 .taskfiles/volsync/resources/replicationdestination.tmpl.yaml create mode 100644 .taskfiles/volsync/resources/replicationdestination.yaml.j2 rename .taskfiles/volsync/resources/{unlock.tmpl.yaml => unlock.yaml.j2} (72%) mode change 100755 => 100644 delete mode 100755 .taskfiles/volsync/resources/wait-for-job.sh delete mode 100755 .taskfiles/volsync/resources/which-controller.sh delete mode 100755 .taskfiles/volsync/resources/wipe.tmpl.yaml create mode 100644 kubernetes/apps/home/home-assistant/app/pvc.yaml delete mode 100644 kubernetes/apps/home/home-assistant/app/volsync.yaml create mode 100644 kubernetes/apps/home/zigbee2mqtt/app/pvc.yaml delete mode 100644 kubernetes/apps/home/zigbee2mqtt/app/volsync.yaml create mode 100644 kubernetes/apps/kyverno/kyverno/policies/volsync.yaml create mode 100644 kubernetes/apps/media/bazarr/app/pvc.yaml delete mode 100644 kubernetes/apps/media/bazarr/app/volsync.yaml delete mode 100644 kubernetes/apps/media/overseerr/app/volsync.yaml delete mode 100644 kubernetes/apps/media/plex/app/volsync.yaml create mode 100644 kubernetes/apps/media/qbittorrent/app/pvc.yaml delete mode 100644 kubernetes/apps/media/qbittorrent/app/volsync.yaml create mode 100644 kubernetes/apps/media/recyclarr/app/pvc.yaml delete mode 100644 kubernetes/apps/media/recyclarr/app/volsync.yaml create mode 100644 kubernetes/apps/media/sabnzbd/app/pvc.yaml delete mode 100644 kubernetes/apps/media/sabnzbd/app/volsync.yaml delete mode 100644 kubernetes/apps/media/tautulli/app/volsync.yaml diff --git a/.envrc b/.envrc index 5a86bcc687..3b8d77fc38 100644 --- a/.envrc +++ b/.envrc @@ -1,5 +1,6 @@ #shellcheck disable=SC2148,SC2155 -export KUBECONFIG="$(PWD)/kubernetes/kubeconfig" -export SOPS_AGE_KEY_FILE="$(PWD)/age.key" -export TALOSCONFIG="$(PWD)/talos/clusterconfig/talosconfig" +export MINIJINJA_CONFIG_FILE="$(expand_path ./.minijinja.toml)" +export KUBECONFIG="$(expand_path ./kubernetes/kubeconfig)" +export SOPS_AGE_KEY_FILE="$(expand_path ./age.key)" +export TALOSCONFIG="$(expand_path ./talos/clusterconfig/talosconfig)" export TASK_X_MAP_VARIABLES=0 diff --git a/.minijinja.toml b/.minijinja.toml new file mode 100644 index 0000000000..05c2a7b6a0 --- /dev/null +++ b/.minijinja.toml @@ -0,0 +1,5 @@ +autoescape = "none" +newline = true +trim-blocks = true +lstrip-blocks = true +env = true diff --git a/.taskfiles/bootstrap/Taskfile.yaml b/.taskfiles/bootstrap/Taskfile.yaml index acf1558d03..8725c17f3c 100644 --- a/.taskfiles/bootstrap/Taskfile.yaml +++ b/.taskfiles/bootstrap/Taskfile.yaml @@ -41,65 +41,17 @@ tasks: rook: internal: true cmds: - - for: { var: nodes } - task: rook-data - vars: - node: '{{.ITEM}}' - - for: { var: m0 } - task: rook-disk - vars: - node: m0 - serial: '{{.ITEM}}' - - for: { var: m1 } - task: rook-disk - vars: - node: m1 - serial: '{{.ITEM}}' - - for: { var: m2 } - task: rook-disk - vars: - node: m2 - serial: '{{.ITEM}}' - vars: - nodes: m0 m1 m2 - m0: S72ANJ0TC02334R - m1: S72ANJ0TC01288Z - m2: S72ANJ0TC02325Y - - rook-data: - internal: true - cmds: - - envsubst < <(cat {{.BOOTSTRAP_RESOURCES_DIR}}/rook-data-job.tmpl.yaml) | kubectl apply -f - - - bash {{.BOOTSTRAP_RESOURCES_DIR}}/wait-for-job.sh {{.job}} default - - kubectl --namespace default wait job/{{.job}} --for condition=complete --timeout=1m - - kubectl --namespace default logs job/{{.job}} - - kubectl --namespace default delete job {{.job}} - env: - job: '{{.job}}' - node: '{{.node}}' - vars: - job: wipe-data-{{.node}} - preconditions: - - test -f {{.BOOTSTRAP_RESOURCES_DIR}}/wait-for-job.sh - - test -f {{.BOOTSTRAP_RESOURCES_DIR}}/rook-data-job.tmpl.yaml - - rook-disk: - internal: true - cmds: - - envsubst < <(cat {{.BOOTSTRAP_RESOURCES_DIR}}/rook-disk-job.tmpl.yaml) | kubectl apply -f - - - bash {{.BOOTSTRAP_RESOURCES_DIR}}/wait-for-job.sh {{.job}} default - - kubectl --namespace default wait job/{{.job}} --for condition=complete --timeout=1m - - kubectl --namespace default logs job/{{.job}} - - kubectl --namespace default delete job {{.job}} + - minijinja-cli {{.BOOTSTRAP_RESOURCES_DIR}}/wipe-rook.yaml.j2 | kubectl apply --server-side --filename - + - until kubectl --namespace default get job/wipe-rook &>/dev/null; do sleep 5; done + - kubectl --namespace default wait job/wipe-rook --for=condition=complete --timeout=5m + - stern --namespace default job/wipe-rook --no-follow + - kubectl --namespace default delete job wipe-rook env: - disk: /dev/disk/by-id/nvme-SAMSUNG_MZQL23T8HCLS-00A07_{{.serial}} - job: '{{.job}}' - node: '{{.node}}' - vars: - job: wipe-disk-{{.node}}-{{.serial | lower}} + MODEL: SAMSUNG_MZQL23T8HCLS-00A07 + NODE_COUNT: + sh: talosctl config info --output json | jq --raw-output '.nodes | length' preconditions: - - test -f {{.BOOTSTRAP_RESOURCES_DIR}}/wait-for-job.sh - - test -f {{.BOOTSTRAP_RESOURCES_DIR}}/rook-disk-job.tmpl.yaml + - test -f {{.BOOTSTRAP_RESOURCES_DIR}}/wipe-rook.yaml.j2 flux: internal: true diff --git a/.taskfiles/bootstrap/resources/rook-data-job.tmpl.yaml b/.taskfiles/bootstrap/resources/rook-data-job.tmpl.yaml deleted file mode 100755 index 2bdccc321a..0000000000 --- a/.taskfiles/bootstrap/resources/rook-data-job.tmpl.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: ${job} - namespace: default -spec: - ttlSecondsAfterFinished: 3600 - template: - spec: - automountServiceAccountToken: false - restartPolicy: Never - nodeName: ${node} - containers: - - name: main - image: docker.io/library/alpine:latest - command: ["/bin/sh", "-c"] - args: ["rm -rf /mnt/host_var/lib/rook"] - volumeMounts: - - mountPath: /mnt/host_var - name: host-var - securityContext: - privileged: true - resources: {} - volumes: - - name: host-var - hostPath: - path: /var diff --git a/.taskfiles/bootstrap/resources/rook-disk-job.tmpl.yaml b/.taskfiles/bootstrap/resources/rook-disk-job.tmpl.yaml deleted file mode 100755 index cf3ec452e5..0000000000 --- a/.taskfiles/bootstrap/resources/rook-disk-job.tmpl.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: ${job} - namespace: default -spec: - ttlSecondsAfterFinished: 3600 - template: - spec: - automountServiceAccountToken: false - restartPolicy: Never - nodeName: ${node} - containers: - - name: main - image: docker.io/library/alpine:latest - command: ["/bin/sh", "-c"] - args: - - apk add --no-cache sgdisk util-linux util-linux-misc parted device-mapper; - sgdisk --zap-all ${disk}; - dd if=/dev/zero of=${disk} bs=1M count=100 oflag=direct; - blkdiscard ${disk}; - partprobe ${disk}; - volumeMounts: - - mountPath: ${disk} - name: disk - securityContext: - privileged: true - volumes: - - name: disk - hostPath: - path: ${disk} diff --git a/.taskfiles/bootstrap/resources/wait-for-job.sh b/.taskfiles/bootstrap/resources/wait-for-job.sh deleted file mode 100755 index dcb7a8c7c3..0000000000 --- a/.taskfiles/bootstrap/resources/wait-for-job.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -JOB=$1 -NAMESPACE="${2:-default}" - -[[ -z "${JOB}" ]] && echo "Job name not specified" && exit 1 -while true; do - STATUS="$(kubectl --namespace "${NAMESPACE}" get pod -l job-name="${JOB}" -o jsonpath='{.items[*].status.phase}')" - if [ "${STATUS}" == "Pending" ]; then - break - fi - sleep 1 -done diff --git a/.taskfiles/bootstrap/resources/wipe-rook.yaml.j2 b/.taskfiles/bootstrap/resources/wipe-rook.yaml.j2 new file mode 100644 index 0000000000..b93ee2baa8 --- /dev/null +++ b/.taskfiles/bootstrap/resources/wipe-rook.yaml.j2 @@ -0,0 +1,59 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: &app wipe-rook + namespace: default + labels: + app.kubernetes.io/name: *app +spec: + parallelism: {{ ENV.NODE_COUNT }} + template: + metadata: + labels: + app.kubernetes.io/name: *app + spec: + restartPolicy: Never + initContainers: + - name: data + image: docker.io/library/alpine:latest + command: ["/bin/sh", "-c"] + args: ["rm -rf /mnt/host_var/lib/rook"] + volumeMounts: + - mountPath: /mnt/host_var + name: host-var + securityContext: + privileged: true + resources: {} + containers: + - name: disk + image: docker.io/library/alpine:latest + command: ["/bin/sh", "-c"] + args: + - | + apk add --no-cache findutils nvme-cli; + DISK=$(find /dev/disk/by-id/ -iname "*{{ ENV.MODEL }}*" -not -name "*-part[0-9+]"); + echo "=== Wiping $DISK ==="; + nvme format --lbaf=1 $DISK --force; + nvme format --block-size=4096 $DISK --force; + securityContext: + privileged: true + volumeMounts: + - name: host-dev + mountPath: /dev/disk/by-id + resources: {} + volumes: + - name: host-var + hostPath: + path: /var + - name: host-dev + hostPath: + path: /dev/disk/by-id + type: Directory + topologySpreadConstraints: + - maxSkew: 1 + labelSelector: + matchLabels: + app.kubernetes.io/name: *app + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule diff --git a/.taskfiles/kubernetes/Taskfile.yaml b/.taskfiles/kubernetes/Taskfile.yaml index 1b94612d5e..7fb5dbdc71 100644 --- a/.taskfiles/kubernetes/Taskfile.yaml +++ b/.taskfiles/kubernetes/Taskfile.yaml @@ -30,8 +30,9 @@ tasks: desc: Run a privileged pod cmd: | kubectl run privileged-{{.node}} -i --rm --image=null \ - --overrides="$(yq {{.KUBERNETES_RESOURCES_DIR}}/privileged-pod.tmpl.yaml -o=json | envsubst)" + --overrides="$(yq {{.KUBERNETES_RESOURCES_DIR}}/privileged-pod.yaml.j2 -o json | minijinja-cli)" env: - node: '{{.node}}' + NODE: '{{.node}}' preconditions: - - test -f {{.KUBERNETES_RESOURCES_DIR}}/privileged-pod.tmpl.yaml + - test -f {{.KUBERNETES_RESOURCES_DIR}}/privileged-pod.yaml.j2 + - which kubectl minijinja-cli diff --git a/.taskfiles/kubernetes/resources/privileged-pod.tmpl.yaml b/.taskfiles/kubernetes/resources/privileged-pod.yaml.j2 similarity index 96% rename from .taskfiles/kubernetes/resources/privileged-pod.tmpl.yaml rename to .taskfiles/kubernetes/resources/privileged-pod.yaml.j2 index 220ef2021b..effe4ea13f 100644 --- a/.taskfiles/kubernetes/resources/privileged-pod.tmpl.yaml +++ b/.taskfiles/kubernetes/resources/privileged-pod.yaml.j2 @@ -22,7 +22,7 @@ spec: hostIPC: true hostNetwork: true hostPID: true - nodeName: ${node} + nodeName: "{{ ENV.NODE }}" restartPolicy: Never volumes: - name: rootfs diff --git a/.taskfiles/volsync/Taskfile.yaml b/.taskfiles/volsync/Taskfile.yaml index ad051acd7e..73354eca09 100644 --- a/.taskfiles/volsync/Taskfile.yaml +++ b/.taskfiles/volsync/Taskfile.yaml @@ -1,23 +1,11 @@ --- +# yaml-language-server: $schema=https://taskfile.dev/schema.json version: "3" -# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below. +# Taskfile used to manage certain VolSync tasks for a given application, limitations are as followed. # 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex) # 2. ReplicationSource and ReplicationDestination are a Restic repository -# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet -# 4. Each application only has one PVC that is being replicated - -x-env-vars: &env-vars - am: "{{.am}}" - app: "{{.app}}" - claim: "{{.claim}}" - controller: "{{.controller}}" - job: "{{.job}}" - ns: "{{.ns}}" - pgid: "{{.pgid}}" - previous: "{{.previous}}" - puid: "{{.puid}}" - sc: "{{.sc}}" +# 3. Each application only has one PVC that is being replicated vars: VOLSYNC_RESOURCES_DIR: "{{.ROOT_DIR}}/.taskfiles/volsync/resources" @@ -25,191 +13,96 @@ vars: tasks: state-*: - desc: Suspend or Resume Volsync - summary: | - state: resume or suspend (required) + desc: Suspend or resume Volsync cmds: - - flux {{.state}} kustomization volsync - - flux -n {{.ns}} {{.state}} helmrelease volsync - - kubectl -n {{.ns}} scale deployment volsync --replicas {{if eq "suspend" .state}}0{{else}}1{{end}} - env: *env-vars + - flux --namespace flux-system {{.state}} kustomization volsync + - flux --namespace volsync-system {{.state}} helmrelease volsync + - kubectl --namespace volsync-system scale deployment volsync --replicas {{if eq .state "suspend"}}0{{else}}1{{end}} vars: - ns: '{{.ns | default "volsync-system"}}' state: '{{index .MATCH 0}}' - - list: - desc: List snapshots for an application - summary: | - ns: Namespace the PVC is in (default: default) - app: Application to list snapshots for (required) - cmds: - - envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/list.tmpl.yaml) | kubectl apply -f - - - bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} - - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m - - kubectl -n {{.ns}} logs job/{{.job}} --container main - - kubectl -n {{.ns}} delete job {{.job}} - env: *env-vars - requires: - vars: ["app"] - vars: - ns: '{{.ns | default "default"}}' - job: volsync-list-{{.app}} preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh - - test -f {{.VOLSYNC_RESOURCES_DIR}}/list.tmpl.yaml - silent: true + - '[[ "{{.state}}" == "suspend" || "{{.state}}" == "resume" ]]' unlock: - desc: Unlock a Restic repository for an application - summary: | - ns: Namespace the PVC is in (default: default) - app: Application to unlock (required) + desc: Unlock all restic source repos cmds: - - envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/unlock.tmpl.yaml) | kubectl apply -f - - - bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} - - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m - - kubectl -n {{.ns}} logs job/{{.job}} --container unlock - - kubectl -n {{.ns}} delete job {{.job}} - env: *env-vars - requires: - vars: ["app"] + - for: { var: SOURCES, split: "\n" } + cmd: kubectl --namespace {{splitList "," .ITEM | first}} patch --field-manager=flux-client-side-apply replicationsources {{splitList "," .ITEM | last}} --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}" vars: - ns: '{{.ns | default "default"}}' - job: volsync-unlock-{{.app}} - preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh - - test -f {{.VOLSYNC_RESOURCES_DIR}}/unlock.tmpl.yaml - silent: true + SOURCES: + sh: kubectl get replicationsources --all-namespaces --no-headers --output=jsonpath='{range .items[*]}{.metadata.namespace},{.metadata.name}{"\n"}{end}' - # To run backup jobs in parallel for all replicationsources: - # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot app=$0 ns=$1' snapshot: - desc: Snapshot a PVC for an application - summary: | - ns: Namespace the PVC is in (default: default) - app: Application to snapshot (required) + desc: Snapshot an app [ns=default] [app=required] cmds: - - kubectl -n {{.ns}} patch replicationsources {{.app}} --type merge -p '{"spec":{"trigger":{"manual":"{{.now}}"}}}' - - bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} - - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m - env: *env-vars - requires: - vars: ["app"] + - kubectl patch clusterpolicy volsync --type merge -p '{"spec":{"useServerSideApply":true}}' + - kubectl --namespace {{.ns}} patch replicationsources {{.app}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}' + - until kubectl --namespace {{.ns}} get job/{{.job}} &>/dev/null; do sleep 5; done + - kubectl --namespace {{.ns}} wait job/{{.job}} --for=condition=complete --timeout=120m + - kubectl patch clusterpolicy volsync --type merge -p '{"spec":{"useServerSideApply":null}}' vars: - now: '{{now | date "150405"}}' ns: '{{.ns | default "default"}}' job: volsync-src-{{.app}} - controller: - sh: true && {{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh {{.app}} {{.ns}} + requires: + vars: [app] preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh - - kubectl -n {{.ns}} get replicationsources {{.app}} + - kubectl --namespace {{.ns}} get replicationsources {{.app}} - # To run restore jobs in parallel for all replicationdestinations: - # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore app=$0 ns=$1' restore: - desc: Restore a PVC for an application - summary: | - ns: Namespace the PVC is in (default: default) - app: Application to restore (required) - previous: Previous number of snapshots to restore (default: 2) + desc: Restore an app [ns=default] [app=required] [previous=required] cmds: - - { task: .suspend, vars: *env-vars } - - { task: .wipe, vars: *env-vars } - - { task: .restore, vars: *env-vars } - - { task: .resume, vars: *env-vars } - env: *env-vars - requires: - vars: ["app"] + # Suspend + - flux --namespace flux-system suspend kustomization {{.app}} + - flux --namespace {{.ns}} suspend helmrelease {{.app}} + - kubectl --namespace {{.ns}} scale {{.controller}}/{{.app}} --replicas 0 + - kubectl --namespace {{.ns}} wait pod --for=delete --selector="app.kubernetes.io/name={{.app}}" --timeout=5m + # Restore + - minijinja-cli {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename - + - until kubectl --namespace {{.ns}} get job/volsync-dst-{{.app}}-manual &>/dev/null; do sleep 5; done + - kubectl --namespace {{.ns}} wait job/volsync-dst-{{.app}}-manual --for=condition=complete --timeout=120m + - kubectl --namespace {{.ns}} delete replicationdestination {{.app}}-manual + # Resume + - flux --namespace flux-system resume kustomization {{.app}} + - flux --namespace {{.ns}} resume helmrelease {{.app}} + - flux --namespace {{.ns}} reconcile helmrelease {{.app}} --force + - kubectl --namespace {{.ns}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.app}}" --timeout=5m vars: ns: '{{.ns | default "default"}}' - previous: '{{.previous | default 2}}' - am: - sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.accessModes}" - claim: - sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.sourcePVC}" controller: - sh: "{{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh {{.app}} {{.ns}}" - pgid: - sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}" - puid: - sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsUser}" - sc: - sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.storageClassName}" - preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh - - cleanup: - desc: Delete volume populator PVCs in all namespaces - cmds: - - for: { var: dest } - cmd: | - {{- $items := (split "/" .ITEM) }} - kubectl delete pvc -n {{ $items._0 }} {{ $items._1 }} - - for: { var: cache } - cmd: | - {{- $items := (split "/" .ITEM) }} - kubectl delete pvc -n {{ $items._0 }} {{ $items._1 }} - - for: { var: snaps } - cmd: | - {{- $items := (split "/" .ITEM) }} - kubectl delete volumesnapshot -n {{ $items._0 }} {{ $items._1 }} - env: *env-vars - vars: - dest: - sh: kubectl get pvc --all-namespaces --no-headers | grep "volsync.*-dest" | awk '{print $1 "/" $2}' - cache: - sh: kubectl get pvc --all-namespaces --no-headers | grep "volsync.*-cache" | awk '{print $1 "/" $2}' - snaps: - sh: kubectl get volumesnapshot --all-namespaces --no-headers | grep "volsync.*" | awk '{print $1 "/" $2}' - - # Suspend the Flux ks and hr - .suspend: - internal: true - cmds: - - flux -n flux-system suspend kustomization {{.app}} - - flux -n {{.ns}} suspend helmrelease {{.app}} - - kubectl -n {{.ns}} scale {{.controller}} --replicas 0 - - kubectl -n {{.ns}} wait pod --for delete --selector="app.kubernetes.io/name={{.app}}" --timeout=2m - env: *env-vars - - # Wipe the PVC of all data - .wipe: - internal: true - cmds: - - envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/wipe.tmpl.yaml) | kubectl apply -f - - - bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} - - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m - - kubectl -n {{.ns}} logs job/{{.job}} --container main - - kubectl -n {{.ns}} delete job {{.job}} - env: *env-vars - vars: - job: volsync-wipe-{{.app}} + sh: kubectl --namespace {{.ns}} get deployment {{.app}} &>/dev/null && echo deployment || echo statefulset + env: + NS: '{{.ns}}' + APP: '{{.app}}' + PREVIOUS: '{{.previous}}' + CLAIM: + sh: kubectl --namespace {{.ns}} get replicationsources/{{.app}} --output=jsonpath="{.spec.sourcePVC}" + ACCESS_MODES: + sh: kubectl --namespace {{.ns}} get replicationsources/{{.app}} --output=jsonpath="{.spec.restic.accessModes}" + STORAGE_CLASS_NAME: + sh: kubectl --namespace {{.ns}} get replicationsources/{{.app}} --output=jsonpath="{.spec.restic.storageClassName}" + PUID: + sh: kubectl --namespace {{.ns}} get replicationsources/{{.app}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}" + PGID: + sh: kubectl --namespace {{.ns}} get replicationsources/{{.app}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}" + requires: + vars: [app, previous] preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wipe.tmpl.yaml - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 - # Create VolSync replicationdestination CR to restore data - .restore: - internal: true + unlock-local: + desc: Unlock a restic source repo from local machine [ns=default] [app=required] cmds: - - envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.tmpl.yaml) | kubectl apply -f - - - bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh volsync-dst-{{.job}} {{.ns}} - - kubectl -n {{.ns}} wait job/volsync-dst-{{.job}} --for condition=complete --timeout=120m - - kubectl -n {{.ns}} delete replicationdestination {{.job}} - env: *env-vars + - minijinja-cli {{.VOLSYNC_RESOURCES_DIR}}/unlock.yaml.j2 | kubectl apply --server-side --filename - + - until kubectl --namespace {{.ns}} get job/volsync-unlock-{{.app}} &>/dev/null; do sleep 5; done + - kubectl --namespace {{.ns}} wait job/volsync-unlock-{{.app}} --for condition=complete --timeout=5m + - stern --namespace {{.ns}} job/volsync-unlock-{{.app}} --no-follow + - kubectl --namespace {{.ns}} delete job volsync-unlock-{{.app}} vars: - job: volsync-restore-{{.app}} + ns: '{{.ns | default "default"}}' + env: + NS: '{{.ns}}' + APP: '{{.app}}' + requires: + vars: [app] preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.tmpl.yaml - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh - - # Resume Flux ks and hr - .resume: - internal: true - cmds: - - flux -n {{.ns}} resume helmrelease {{.app}} - - flux -n flux-system resume kustomization {{.app}} - - flux -n {{.ns}} reconcile helmrelease {{.app}} --force - env: *env-vars + - test -f {{.VOLSYNC_RESOURCES_DIR}}/unlock.yaml.j2 diff --git a/.taskfiles/volsync/resources/list.tmpl.yaml b/.taskfiles/volsync/resources/list.tmpl.yaml deleted file mode 100755 index 30c329b2fe..0000000000 --- a/.taskfiles/volsync/resources/list.tmpl.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: ${job} - namespace: ${ns} -spec: - ttlSecondsAfterFinished: 3600 - template: - spec: - automountServiceAccountToken: false - restartPolicy: OnFailure - containers: - - name: main - image: docker.io/restic/restic:latest - args: ["snapshots"] - envFrom: - - secretRef: - name: ${app}-restic-secret - resources: {} diff --git a/.taskfiles/volsync/resources/replicationdestination.tmpl.yaml b/.taskfiles/volsync/resources/replicationdestination.tmpl.yaml deleted file mode 100755 index 2fb0c334cf..0000000000 --- a/.taskfiles/volsync/resources/replicationdestination.tmpl.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: ${job} - namespace: ${ns} -spec: - trigger: - manual: restore-once - restic: - repository: ${app}-restic-secret - destinationPVC: ${claim} - copyMethod: Direct - storageClassName: ${sc} - accessModes: ${am} - # IMPORTANT NOTE: - # Set to the last X number of snapshots to restore from - previous: ${previous} - # OR; - # IMPORTANT NOTE: - # On bootstrap set `restoreAsOf` to the time the old cluster was destroyed. - # This will essentially prevent volsync from trying to restore a backup - # from a application that started with default data in the PVC. - # Do not restore snapshots made after the following RFC3339 Timestamp. - # date --rfc-3339=seconds (--utc) - # restoreAsOf: "2022-12-10T16:00:00-05:00" - moverSecurityContext: - runAsUser: ${puid} - runAsGroup: ${pgid} - fsGroup: ${pgid} diff --git a/.taskfiles/volsync/resources/replicationdestination.yaml.j2 b/.taskfiles/volsync/resources/replicationdestination.yaml.j2 new file mode 100644 index 0000000000..4ce4813f26 --- /dev/null +++ b/.taskfiles/volsync/resources/replicationdestination.yaml.j2 @@ -0,0 +1,23 @@ +--- +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: {{ ENV.APP }}-manual + namespace: {{ ENV.NS }} +spec: + trigger: + manual: restore-once + restic: + repository: {{ ENV.APP }}-restic-secret + destinationPVC: {{ ENV.CLAIM }} + copyMethod: Direct + storageClassName: {{ ENV.STORAGE_CLASS_NAME }} + accessModes: {{ ENV.ACCESS_MODES }} + previous: {{ ENV.PREVIOUS }} + moverSecurityContext: + runAsUser: {{ ENV.PUID }} + runAsGroup: {{ ENV.PGID }} + fsGroup: {{ ENV.PGID }} + enableFileDeletion: true + cleanupCachePVC: true + cleanupTempPVC: true diff --git a/.taskfiles/volsync/resources/unlock.tmpl.yaml b/.taskfiles/volsync/resources/unlock.yaml.j2 old mode 100755 new mode 100644 similarity index 72% rename from .taskfiles/volsync/resources/unlock.tmpl.yaml rename to .taskfiles/volsync/resources/unlock.yaml.j2 index e081397363..00963e4f5b --- a/.taskfiles/volsync/resources/unlock.tmpl.yaml +++ b/.taskfiles/volsync/resources/unlock.yaml.j2 @@ -2,8 +2,8 @@ apiVersion: batch/v1 kind: Job metadata: - name: ${job} - namespace: ${ns} + name: volsync-unlock-{{ ENV.APP }} + namespace: {{ ENV.NS }} spec: ttlSecondsAfterFinished: 3600 template: @@ -11,10 +11,10 @@ spec: automountServiceAccountToken: false restartPolicy: OnFailure containers: - - name: unlock + - name: restic image: docker.io/restic/restic:latest args: ["unlock", "--remove-all"] envFrom: - secretRef: - name: ${app}-restic-secret + name: {{ ENV.APP }}-restic-secret resources: {} diff --git a/.taskfiles/volsync/resources/wait-for-job.sh b/.taskfiles/volsync/resources/wait-for-job.sh deleted file mode 100755 index 8cb3fbe190..0000000000 --- a/.taskfiles/volsync/resources/wait-for-job.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -JOB=$1 -NAMESPACE="${2:-default}" - -[[ -z "${JOB}" ]] && echo "Job name not specified" && exit 1 -while true; do - STATUS="$(kubectl -n "${NAMESPACE}" get pod -l job-name="${JOB}" -o jsonpath='{.items[*].status.phase}')" - if [ "${STATUS}" == "Pending" ]; then - break - fi - sleep 1 -done diff --git a/.taskfiles/volsync/resources/which-controller.sh b/.taskfiles/volsync/resources/which-controller.sh deleted file mode 100755 index 311760ca30..0000000000 --- a/.taskfiles/volsync/resources/which-controller.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -APP=$1 -NAMESPACE="${2:-default}" - -is_deployment() { - kubectl -n "${NAMESPACE}" get deployment "${APP}" >/dev/null 2>&1 -} - -is_statefulset() { - kubectl -n "${NAMESPACE}" get statefulset "${APP}" >/dev/null 2>&1 -} - -if is_deployment; then - echo "deployment.apps/${APP}" -elif is_statefulset; then - echo "statefulset.apps/${APP}" -else - echo "No deployment or statefulset found for ${APP}" - exit 1 -fi diff --git a/.taskfiles/volsync/resources/wipe.tmpl.yaml b/.taskfiles/volsync/resources/wipe.tmpl.yaml deleted file mode 100755 index ffc1cc75a7..0000000000 --- a/.taskfiles/volsync/resources/wipe.tmpl.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: ${job} - namespace: ${ns} -spec: - ttlSecondsAfterFinished: 3600 - template: - spec: - automountServiceAccountToken: false - restartPolicy: OnFailure - containers: - - name: main - image: docker.io/library/alpine:latest - command: ["/bin/sh", "-c", "cd /config; find . -delete"] - volumeMounts: - - name: config - mountPath: /config - securityContext: - privileged: true - resources: {} - volumes: - - name: config - persistentVolumeClaim: - claimName: ${claim} diff --git a/kubernetes/apps/home/home-assistant/app/kustomization.yaml b/kubernetes/apps/home/home-assistant/app/kustomization.yaml index 7df2f9e464..bad014b1eb 100644 --- a/kubernetes/apps/home/home-assistant/app/kustomization.yaml +++ b/kubernetes/apps/home/home-assistant/app/kustomization.yaml @@ -4,4 +4,4 @@ kind: Kustomization resources: - ./externalsecret.yaml - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml diff --git a/kubernetes/apps/home/home-assistant/app/pvc.yaml b/kubernetes/apps/home/home-assistant/app/pvc.yaml new file mode 100644 index 0000000000..03d198eacc --- /dev/null +++ b/kubernetes/apps/home/home-assistant/app/pvc.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: &name home-assistant +spec: + accessModes: ["ReadWriteOnce"] + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: *name + resources: + requests: + storage: 5Gi + storageClassName: ceph-block diff --git a/kubernetes/apps/home/home-assistant/app/volsync.yaml b/kubernetes/apps/home/home-assistant/app/volsync.yaml deleted file mode 100644 index 065f25714c..0000000000 --- a/kubernetes/apps/home/home-assistant/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: home-assistant-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: home-assistant-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/home-assistant" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: home-assistant -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: home-assistant - resources: - requests: - storage: 5Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: home-assistant -spec: - trigger: - manual: restore-once - restic: - repository: home-assistant-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 5Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: home-assistant -spec: - sourcePVC: home-assistant - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: home-assistant-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/home/zigbee2mqtt/app/kustomization.yaml b/kubernetes/apps/home/zigbee2mqtt/app/kustomization.yaml index 016c51ad6d..c0bda12381 100644 --- a/kubernetes/apps/home/zigbee2mqtt/app/kustomization.yaml +++ b/kubernetes/apps/home/zigbee2mqtt/app/kustomization.yaml @@ -4,7 +4,7 @@ kind: Kustomization resources: - ./externalsecret.yaml - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml configMapGenerator: - name: zigbee2mqtt-loki-rules files: diff --git a/kubernetes/apps/home/zigbee2mqtt/app/pvc.yaml b/kubernetes/apps/home/zigbee2mqtt/app/pvc.yaml new file mode 100644 index 0000000000..93ad83c49d --- /dev/null +++ b/kubernetes/apps/home/zigbee2mqtt/app/pvc.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: &name zigbee2mqtt +spec: + accessModes: ["ReadWriteOnce"] + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: *name + resources: + requests: + storage: 1Gi + storageClassName: ceph-block diff --git a/kubernetes/apps/home/zigbee2mqtt/app/volsync.yaml b/kubernetes/apps/home/zigbee2mqtt/app/volsync.yaml deleted file mode 100644 index 164d763a41..0000000000 --- a/kubernetes/apps/home/zigbee2mqtt/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: zigbee2mqtt-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: zigbee2mqtt-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/zigbee2mqtt" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: zigbee2mqtt -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: zigbee2mqtt - resources: - requests: - storage: 1Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: zigbee2mqtt -spec: - trigger: - manual: restore-once - restic: - repository: zigbee2mqtt-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 1Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: zigbee2mqtt -spec: - sourcePVC: zigbee2mqtt - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: zigbee2mqtt-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml b/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml index 91aa568710..b434479c8d 100644 --- a/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml +++ b/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml @@ -31,31 +31,30 @@ spec: rbac: clusterRole: extraResources: - - apiGroups: - - "" - resources: - - pods - verbs: - - create - - update - - delete + - apiGroups: [""] + resources: ["pods"] + verbs: ["create", "update", "delete"] + - apiGroups: ["external-secrets.io"] + resources: ["externalsecrets"] + verbs: ["create", "update", "patch", "delete", "get", "list"] + - apiGroups: ["volsync.backube"] + resources: ["replicationsources", "replicationdestinations"] + verbs: ["create", "update", "patch", "delete", "get", "list"] serviceMonitor: enabled: true backgroundController: rbac: clusterRole: extraResources: - - apiGroups: - - "" - resources: - - pods - verbs: - - create - - update - - patch - - delete - - get - - list + - apiGroups: [""] + resources: ["pods"] + verbs: ["create", "update", "patch", "delete", "get", "list"] + - apiGroups: ["external-secrets.io"] + resources: ["externalsecrets"] + verbs: ["create", "update", "patch", "delete", "get", "list"] + - apiGroups: ["volsync.backube"] + resources: ["replicationsources", "replicationdestinations"] + verbs: ["create", "update", "patch", "delete", "get", "list"] resources: requests: cpu: 100m diff --git a/kubernetes/apps/kyverno/kyverno/policies/gatus.yaml b/kubernetes/apps/kyverno/kyverno/policies/gatus.yaml index b54aa6c7c6..9494348466 100644 --- a/kubernetes/apps/kyverno/kyverno/policies/gatus.yaml +++ b/kubernetes/apps/kyverno/kyverno/policies/gatus.yaml @@ -13,7 +13,6 @@ metadata: all Ingresses with the ingressClassName set to external. pod-policies.kyverno.io/autogen-controllers: none spec: - generateExisting: true rules: - name: *name match: @@ -36,21 +35,22 @@ spec: context: - name: GATUS_HOST variable: - value: '{{ request.object.metadata.annotations."gatus.io/host" || request.object.spec.rules[0].host }}' + value: "{{ request.object.metadata.annotations.\"gatus.io/host\" || request.object.spec.rules[0].host }}" jmesPath: "to_string(@)" - name: GATUS_NAME variable: - value: '{{ request.object.metadata.annotations."gatus.io/name" || request.object.metadata.name }}' + value: "{{ request.object.metadata.annotations.\"gatus.io/name\" || request.object.metadata.name }}" jmesPath: "to_string(@)" - name: GATUS_PATH variable: - value: '{{ request.object.metadata.annotations."gatus.io/path" || request.object.spec.rules[0].http.paths[0].path }}' + value: "{{ request.object.metadata.annotations.\"gatus.io/path\" || request.object.spec.rules[0].http.paths[0].path }}" jmesPath: "to_string(@)" - name: GATUS_STATUS_CODE variable: - value: '{{ request.object.metadata.annotations."gatus.io/status-code" || `200` }}' + value: "{{ request.object.metadata.annotations.\"gatus.io/status-code\" || '200' }}" jmesPath: "to_string(@)" generate: + generateExisting: true apiVersion: v1 kind: ConfigMap name: "{{ request.object.metadata.name }}-gatus-ep" diff --git a/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml b/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml index 15d774b918..a5a86b1c94 100644 --- a/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml +++ b/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml @@ -5,3 +5,4 @@ resources: - ./gatus.yaml - ./limits.yaml - ./ndots.yaml + - ./volsync.yaml diff --git a/kubernetes/apps/kyverno/kyverno/policies/volsync.yaml b/kubernetes/apps/kyverno/kyverno/policies/volsync.yaml new file mode 100644 index 0000000000..8db8356ac4 --- /dev/null +++ b/kubernetes/apps/kyverno/kyverno/policies/volsync.yaml @@ -0,0 +1,129 @@ +--- +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: volsync + annotations: + policies.kyverno.io/title: Volume Synchronization + policies.kyverno.io/category: Storage + policies.kyverno.io/severity: low + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + This policy will automatically synchronize volumes for all Pods with + the volumeSynchronization set to true. + pod-policies.kyverno.io/autogen-controllers: none +spec: + rules: + - &rule + name: volsync-external-secret + match: + resources: + kinds: + - PersistentVolumeClaim + preconditions: + all: + - key: "{{ request.object.spec.dataSourceRef.kind }}" + operator: Equals + value: ReplicationDestination + - key: "{{ request.object.spec.dataSourceRef.apiGroup }}" + operator: Equals + value: volsync.backube + - key: "{{ request.object.spec.dataSourceRef.name }}" + operator: Equals + value: "{{ request.object.metadata.name }}" + context: + - name: VOLSYNC_USER + variable: + value: "{{ request.object.metadata.annotations.\"volsync.io/user\" || '568' }}" + jmesPath: "to_number(@)" + - name: VOLSYNC_GROUP + variable: + value: "{{ request.object.metadata.annotations.\"volsync.io/group\" || '568' }}" + jmesPath: "to_number(@)" + - name: VOLSYNC_CACHE + variable: + value: "{{ request.object.metadata.annotations.\"volsync.io/cache\" || '8Gi' }}" + jmesPath: "to_string(@)" + generate: + generateExisting: true + apiVersion: external-secrets.io/v1beta1 + kind: ExternalSecret + name: "{{ request.object.metadata.name }}-restic" + namespace: "{{ request.object.metadata.namespace }}" + synchronize: true + data: + spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: "{{ request.object.metadata.name }}-restic-secret" + creationPolicy: Owner + template: + engineVersion: v2 + data: + RESTIC_REPOSITORY: "\\{{ .REPOSITORY_TEMPLATE }}/{{ request.object.metadata.name }}" + RESTIC_PASSWORD: "\\{{ .RESTIC_PASSWORD }}" + AWS_ACCESS_KEY_ID: "\\{{ .AWS_ACCESS_KEY_ID }}" + AWS_SECRET_ACCESS_KEY: "\\{{ .AWS_SECRET_ACCESS_KEY }}" + dataFrom: + - extract: + key: volsync-restic-template + - <<: *rule + name: volsync-replication-destination + generate: + generateExisting: true + apiVersion: volsync.backube/v1alpha1 + kind: ReplicationDestination + name: "{{ request.object.metadata.name }}" + namespace: "{{ request.object.metadata.namespace }}" + synchronize: true + data: + spec: + trigger: + manual: restore-once + restic: + repository: "{{ request.object.metadata.name }}-restic-secret" + copyMethod: Snapshot + accessModes: "{{ request.object.spec.accessModes }}" + storageClassName: "{{ request.object.spec.storageClassName }}" + volumeSnapshotClassName: "csi-{{ request.object.spec.storageClassName }}" + cacheAccessModes: ["ReadWriteOnce"] + cacheCapacity: "{{ VOLSYNC_CACHE }}" + cacheStorageClassName: openebs-hostpath + moverSecurityContext: + runAsUser: "{{ VOLSYNC_USER }}" + runAsGroup: "{{ VOLSYNC_GROUP }}" + fsGroup: "{{ VOLSYNC_GROUP }}" + capacity: "{{ request.object.spec.resources.requests.storage }}" + - <<: *rule + name: volsync-replication-source + generate: + generateExisting: true + apiVersion: volsync.backube/v1alpha1 + kind: ReplicationSource + name: "{{ request.object.metadata.name }}" + namespace: "{{ request.object.metadata.namespace }}" + synchronize: true + data: + spec: + sourcePVC: "{{ request.object.metadata.name }}" + trigger: + schedule: "0 * * * *" + restic: + pruneIntervalDays: 14 + repository: "{{ request.object.metadata.name }}-restic-secret" + copyMethod: Snapshot + accessModes: "{{ request.object.spec.accessModes }}" + storageClassName: "{{ request.object.spec.storageClassName }}" + volumeSnapshotClassName: "csi-{{ request.object.spec.storageClassName }}" + cacheAccessModes: ["ReadWriteOnce"] + cacheCapacity: "{{ VOLSYNC_CACHE }}" + cacheStorageClassName: openebs-hostpath + moverSecurityContext: + runAsUser: "{{ VOLSYNC_USER }}" + runAsGroup: "{{ VOLSYNC_GROUP }}" + fsGroup: "{{ VOLSYNC_GROUP }}" + retain: + hourly: 24 + daily: 7 diff --git a/kubernetes/apps/media/bazarr/app/kustomization.yaml b/kubernetes/apps/media/bazarr/app/kustomization.yaml index 97c37fd959..a1c661c70b 100644 --- a/kubernetes/apps/media/bazarr/app/kustomization.yaml +++ b/kubernetes/apps/media/bazarr/app/kustomization.yaml @@ -4,7 +4,7 @@ kind: Kustomization resources: - ./externalsecret.yaml - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml configMapGenerator: - name: bazarr-scripts files: diff --git a/kubernetes/apps/media/bazarr/app/pvc.yaml b/kubernetes/apps/media/bazarr/app/pvc.yaml new file mode 100644 index 0000000000..6d35c95b40 --- /dev/null +++ b/kubernetes/apps/media/bazarr/app/pvc.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: &name bazarr +spec: + accessModes: ["ReadWriteOnce"] + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: *name + resources: + requests: + storage: 2Gi + storageClassName: ceph-block diff --git a/kubernetes/apps/media/bazarr/app/volsync.yaml b/kubernetes/apps/media/bazarr/app/volsync.yaml deleted file mode 100644 index 81482b3434..0000000000 --- a/kubernetes/apps/media/bazarr/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: bazarr-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: bazarr-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/bazarr" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: bazarr -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: bazarr - resources: - requests: - storage: 2Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: bazarr -spec: - trigger: - manual: restore-once - restic: - repository: bazarr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 2Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: bazarr -spec: - sourcePVC: bazarr - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: bazarr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/overseerr/app/kustomization.yaml b/kubernetes/apps/media/overseerr/app/kustomization.yaml index 2920d15c8b..c82ceb8bc3 100644 --- a/kubernetes/apps/media/overseerr/app/kustomization.yaml +++ b/kubernetes/apps/media/overseerr/app/kustomization.yaml @@ -4,4 +4,3 @@ kind: Kustomization resources: - ./helmrelease.yaml - ./pvc.yaml - - ./volsync.yaml diff --git a/kubernetes/apps/media/overseerr/app/pvc.yaml b/kubernetes/apps/media/overseerr/app/pvc.yaml index 30adee028e..f62b3bdcd1 100644 --- a/kubernetes/apps/media/overseerr/app/pvc.yaml +++ b/kubernetes/apps/media/overseerr/app/pvc.yaml @@ -1,6 +1,21 @@ --- apiVersion: v1 kind: PersistentVolumeClaim +metadata: + name: &name overseerr +spec: + accessModes: ["ReadWriteOnce"] + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: *name + resources: + requests: + storage: 2Gi + storageClassName: ceph-block +--- +apiVersion: v1 +kind: PersistentVolumeClaim metadata: name: overseerr-cache spec: diff --git a/kubernetes/apps/media/overseerr/app/volsync.yaml b/kubernetes/apps/media/overseerr/app/volsync.yaml deleted file mode 100644 index 2629511cff..0000000000 --- a/kubernetes/apps/media/overseerr/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: overseerr-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: overseerr-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/overseerr" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: overseerr -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: overseerr - resources: - requests: - storage: 2Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: overseerr -spec: - trigger: - manual: restore-once - restic: - repository: overseerr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 2Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: overseerr -spec: - sourcePVC: overseerr - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: overseerr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/plex/app/kustomization.yaml b/kubernetes/apps/media/plex/app/kustomization.yaml index 87a40e3247..d55e221bbd 100644 --- a/kubernetes/apps/media/plex/app/kustomization.yaml +++ b/kubernetes/apps/media/plex/app/kustomization.yaml @@ -4,7 +4,6 @@ kind: Kustomization resources: - ./helmrelease.yaml - ./pvc.yaml - - ./volsync.yaml configMapGenerator: - name: plex-loki-rules files: diff --git a/kubernetes/apps/media/plex/app/pvc.yaml b/kubernetes/apps/media/plex/app/pvc.yaml index 9398813ba2..e7dd4c4df2 100644 --- a/kubernetes/apps/media/plex/app/pvc.yaml +++ b/kubernetes/apps/media/plex/app/pvc.yaml @@ -1,6 +1,21 @@ --- apiVersion: v1 kind: PersistentVolumeClaim +metadata: + name: &name plex +spec: + accessModes: ["ReadWriteOnce"] + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: *name + resources: + requests: + storage: 50Gi + storageClassName: ceph-block +--- +apiVersion: v1 +kind: PersistentVolumeClaim metadata: name: plex-cache spec: diff --git a/kubernetes/apps/media/plex/app/volsync.yaml b/kubernetes/apps/media/plex/app/volsync.yaml deleted file mode 100644 index aaf7ec2262..0000000000 --- a/kubernetes/apps/media/plex/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: plex-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: plex-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/plex" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: plex -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: plex - resources: - requests: - storage: 50Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: plex -spec: - trigger: - manual: restore-once - restic: - repository: plex-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 50Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: plex -spec: - sourcePVC: plex - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: plex-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/qbittorrent/app/kustomization.yaml b/kubernetes/apps/media/qbittorrent/app/kustomization.yaml index 5f9a766628..f8aa167af9 100644 --- a/kubernetes/apps/media/qbittorrent/app/kustomization.yaml +++ b/kubernetes/apps/media/qbittorrent/app/kustomization.yaml @@ -3,7 +3,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml configMapGenerator: - name: qbittorrent-loki-rules files: diff --git a/kubernetes/apps/media/qbittorrent/app/pvc.yaml b/kubernetes/apps/media/qbittorrent/app/pvc.yaml new file mode 100644 index 0000000000..ca50d1543d --- /dev/null +++ b/kubernetes/apps/media/qbittorrent/app/pvc.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: &name qbittorrent +spec: + accessModes: ["ReadWriteMany"] + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: *name + resources: + requests: + storage: 2Gi + storageClassName: ceph-filesystem diff --git a/kubernetes/apps/media/qbittorrent/app/volsync.yaml b/kubernetes/apps/media/qbittorrent/app/volsync.yaml deleted file mode 100644 index b8494df4a6..0000000000 --- a/kubernetes/apps/media/qbittorrent/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: qbittorrent-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: qbittorrent-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/qbittorrent" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: qbittorrent -spec: - accessModes: ["ReadWriteMany"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: qbittorrent - resources: - requests: - storage: 2Gi - storageClassName: ceph-filesystem ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: qbittorrent -spec: - trigger: - manual: restore-once - restic: - repository: qbittorrent-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteMany"] - storageClassName: ceph-filesystem - volumeSnapshotClassName: csi-ceph-filesystem - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 2Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: qbittorrent -spec: - sourcePVC: qbittorrent - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: qbittorrent-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteMany"] - storageClassName: ceph-filesystem - volumeSnapshotClassName: csi-ceph-filesystem - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/recyclarr/app/kustomization.yaml b/kubernetes/apps/media/recyclarr/app/kustomization.yaml index 020d6f4586..7dbbcc71ad 100644 --- a/kubernetes/apps/media/recyclarr/app/kustomization.yaml +++ b/kubernetes/apps/media/recyclarr/app/kustomization.yaml @@ -4,7 +4,7 @@ kind: Kustomization resources: - ./externalsecret.yaml - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml configMapGenerator: - name: recyclarr-configmap files: diff --git a/kubernetes/apps/media/recyclarr/app/pvc.yaml b/kubernetes/apps/media/recyclarr/app/pvc.yaml new file mode 100644 index 0000000000..dd933b6a67 --- /dev/null +++ b/kubernetes/apps/media/recyclarr/app/pvc.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: &name recyclarr +spec: + accessModes: ["ReadWriteOnce"] + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: *name + resources: + requests: + storage: 2Gi + storageClassName: ceph-block diff --git a/kubernetes/apps/media/recyclarr/app/volsync.yaml b/kubernetes/apps/media/recyclarr/app/volsync.yaml deleted file mode 100644 index b8ee19255a..0000000000 --- a/kubernetes/apps/media/recyclarr/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: recyclarr-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: recyclarr-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/recyclarr" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: recyclarr -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: recyclarr - resources: - requests: - storage: 2Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: recyclarr -spec: - trigger: - manual: restore-once - restic: - repository: recyclarr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 2Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: recyclarr -spec: - sourcePVC: recyclarr - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: recyclarr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/sabnzbd/app/kustomization.yaml b/kubernetes/apps/media/sabnzbd/app/kustomization.yaml index 7df2f9e464..bad014b1eb 100644 --- a/kubernetes/apps/media/sabnzbd/app/kustomization.yaml +++ b/kubernetes/apps/media/sabnzbd/app/kustomization.yaml @@ -4,4 +4,4 @@ kind: Kustomization resources: - ./externalsecret.yaml - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml diff --git a/kubernetes/apps/media/sabnzbd/app/pvc.yaml b/kubernetes/apps/media/sabnzbd/app/pvc.yaml new file mode 100644 index 0000000000..1ec355db91 --- /dev/null +++ b/kubernetes/apps/media/sabnzbd/app/pvc.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: &name sabnzbd +spec: + accessModes: ["ReadWriteOnce"] + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: *name + resources: + requests: + storage: 2Gi + storageClassName: ceph-block diff --git a/kubernetes/apps/media/sabnzbd/app/volsync.yaml b/kubernetes/apps/media/sabnzbd/app/volsync.yaml deleted file mode 100644 index a09fa9c3a1..0000000000 --- a/kubernetes/apps/media/sabnzbd/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: sabnzbd-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: sabnzbd-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/sabnzbd" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: sabnzbd -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: sabnzbd - resources: - requests: - storage: 2Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: sabnzbd -spec: - trigger: - manual: restore-once - restic: - repository: sabnzbd-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 2Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: sabnzbd -spec: - sourcePVC: sabnzbd - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: sabnzbd-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/tautulli/app/kustomization.yaml b/kubernetes/apps/media/tautulli/app/kustomization.yaml index 2920d15c8b..c82ceb8bc3 100644 --- a/kubernetes/apps/media/tautulli/app/kustomization.yaml +++ b/kubernetes/apps/media/tautulli/app/kustomization.yaml @@ -4,4 +4,3 @@ kind: Kustomization resources: - ./helmrelease.yaml - ./pvc.yaml - - ./volsync.yaml diff --git a/kubernetes/apps/media/tautulli/app/pvc.yaml b/kubernetes/apps/media/tautulli/app/pvc.yaml index d2931fb4b0..b79518acaa 100644 --- a/kubernetes/apps/media/tautulli/app/pvc.yaml +++ b/kubernetes/apps/media/tautulli/app/pvc.yaml @@ -1,6 +1,21 @@ --- apiVersion: v1 kind: PersistentVolumeClaim +metadata: + name: &name tautulli +spec: + accessModes: ["ReadWriteOnce"] + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: *name + resources: + requests: + storage: 5Gi + storageClassName: ceph-block +--- +apiVersion: v1 +kind: PersistentVolumeClaim metadata: name: tautulli-cache spec: diff --git a/kubernetes/apps/media/tautulli/app/volsync.yaml b/kubernetes/apps/media/tautulli/app/volsync.yaml deleted file mode 100644 index 01ff04c60a..0000000000 --- a/kubernetes/apps/media/tautulli/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: tautulli-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: tautulli-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/tautulli" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: tautulli -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: tautulli - resources: - requests: - storage: 5Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: tautulli -spec: - trigger: - manual: restore-once - restic: - repository: tautulli-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 5Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: tautulli -spec: - sourcePVC: tautulli - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: tautulli-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5