diff --git a/.envrc b/.envrc index 5a86bcc687..3b8d77fc38 100644 --- a/.envrc +++ b/.envrc @@ -1,5 +1,6 @@ #shellcheck disable=SC2148,SC2155 -export KUBECONFIG="$(PWD)/kubernetes/kubeconfig" -export SOPS_AGE_KEY_FILE="$(PWD)/age.key" -export TALOSCONFIG="$(PWD)/talos/clusterconfig/talosconfig" +export MINIJINJA_CONFIG_FILE="$(expand_path ./.minijinja.toml)" +export KUBECONFIG="$(expand_path ./kubernetes/kubeconfig)" +export SOPS_AGE_KEY_FILE="$(expand_path ./age.key)" +export TALOSCONFIG="$(expand_path ./talos/clusterconfig/talosconfig)" export TASK_X_MAP_VARIABLES=0 diff --git a/.minijinja.toml b/.minijinja.toml new file mode 100644 index 0000000000..05c2a7b6a0 --- /dev/null +++ b/.minijinja.toml @@ -0,0 +1,5 @@ +autoescape = "none" +newline = true +trim-blocks = true +lstrip-blocks = true +env = true diff --git a/.taskfiles/volsync/Taskfile.yaml b/.taskfiles/volsync/Taskfile.yaml index ad051acd7e..3a414b2741 100644 --- a/.taskfiles/volsync/Taskfile.yaml +++ b/.taskfiles/volsync/Taskfile.yaml @@ -1,215 +1,114 @@ --- -version: "3" +# yaml-language-server: $schema=https://taskfile.dev/schema.json +version: '3' -# This taskfile is used to manage certain VolSync tasks for a given application, limitations are described below. +# Taskfile used to manage certain VolSync tasks for a given application, limitations are as followed. # 1. Fluxtomization, HelmRelease, PVC, ReplicationSource all have the same name (e.g. plex) # 2. ReplicationSource and ReplicationDestination are a Restic repository -# 3. Applications are deployed as either a Kubernetes Deployment or StatefulSet -# 4. Each application only has one PVC that is being replicated - -x-env-vars: &env-vars - am: "{{.am}}" - app: "{{.app}}" - claim: "{{.claim}}" - controller: "{{.controller}}" - job: "{{.job}}" - ns: "{{.ns}}" - pgid: "{{.pgid}}" - previous: "{{.previous}}" - puid: "{{.puid}}" - sc: "{{.sc}}" +# 3. Each application only has one PVC that is being replicated vars: - VOLSYNC_RESOURCES_DIR: "{{.ROOT_DIR}}/.taskfiles/volsync/resources" + VOLSYNC_RESOURCES_DIR: '{{.ROOT_DIR}}/.taskfiles/volsync/resources' tasks: state-*: - desc: Suspend or Resume Volsync - summary: | - state: resume or suspend (required) + desc: Suspend or resume Volsync cmds: - - flux {{.state}} kustomization volsync - - flux -n {{.ns}} {{.state}} helmrelease volsync - - kubectl -n {{.ns}} scale deployment volsync --replicas {{if eq "suspend" .state}}0{{else}}1{{end}} - env: *env-vars + - flux --namespace flux-system {{.state}} kustomization volsync + - flux --namespace volsync-system {{.state}} helmrelease volsync + - kubectl --namespace volsync-system scale deployment volsync --replicas {{if eq .state "suspend"}}0{{else}}1{{end}} vars: - ns: '{{.ns | default "volsync-system"}}' state: '{{index .MATCH 0}}' - - list: - desc: List snapshots for an application - summary: | - ns: Namespace the PVC is in (default: default) - app: Application to list snapshots for (required) - cmds: - - envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/list.tmpl.yaml) | kubectl apply -f - - - bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} - - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m - - kubectl -n {{.ns}} logs job/{{.job}} --container main - - kubectl -n {{.ns}} delete job {{.job}} - env: *env-vars - requires: - vars: ["app"] - vars: - ns: '{{.ns | default "default"}}' - job: volsync-list-{{.app}} preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh - - test -f {{.VOLSYNC_RESOURCES_DIR}}/list.tmpl.yaml - silent: true + - '[[ "{{.STATE}}" == "suspend" || "{{.STATE}}" == "resume" ]]' + - which flux kubectl unlock: - desc: Unlock a Restic repository for an application - summary: | - ns: Namespace the PVC is in (default: default) - app: Application to unlock (required) + desc: Unlock all restic source repos cmds: - - envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/unlock.tmpl.yaml) | kubectl apply -f - - - bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} - - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=1m - - kubectl -n {{.ns}} logs job/{{.job}} --container unlock - - kubectl -n {{.ns}} delete job {{.job}} - env: *env-vars - requires: - vars: ["app"] + - for: { var: SOURCES, split: "\n" } + cmd: kubectl --namespace {{splitList "," .ITEM | first}} patch --field-manager=flux-client-side-apply replicationsources {{splitList "," .ITEM | last}} --type merge --patch "{\"spec\":{\"restic\":{\"unlock\":\"{{now | unixEpoch}}\"}}}" vars: - ns: '{{.ns | default "default"}}' - job: volsync-unlock-{{.app}} + SOURCES: + sh: kubectl get replicationsources --all-namespaces --no-headers --output=jsonpath='{range .items[*]}{.metadata.namespace},{.metadata.name}{"\n"}{end}' preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh - - test -f {{.VOLSYNC_RESOURCES_DIR}}/unlock.tmpl.yaml - silent: true + - which kubectl - # To run backup jobs in parallel for all replicationsources: - # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:snapshot app=$0 ns=$1' snapshot: - desc: Snapshot a PVC for an application - summary: | - ns: Namespace the PVC is in (default: default) - app: Application to snapshot (required) + desc: Snapshot an app [ns=default] [app=required] cmds: - - kubectl -n {{.ns}} patch replicationsources {{.app}} --type merge -p '{"spec":{"trigger":{"manual":"{{.now}}"}}}' - - bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} - - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m - env: *env-vars - requires: - vars: ["app"] + - kubectl patch clusterpolicy volsync --type merge -p '{"spec":{"useServerSideApply":true}}' + - kubectl --namespace {{.ns}} patch replicationsources {{.app}} --type merge -p '{"spec":{"trigger":{"manual":"{{now | unixEpoch}}"}}}' + - until kubectl --namespace {{.ns}} get job/{{.job}} &>/dev/null; do sleep 5; done + - kubectl --namespace {{.ns}} wait job/{{.job}} --for=condition=complete --timeout=120m + - kubectl patch clusterpolicy volsync --type merge -p '{"spec":{"useServerSideApply":null}}' vars: - now: '{{now | date "150405"}}' ns: '{{.ns | default "default"}}' job: volsync-src-{{.app}} - controller: - sh: true && {{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh {{.app}} {{.ns}} + requires: + vars: [app] preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh - - kubectl -n {{.ns}} get replicationsources {{.app}} + - kubectl --namespace {{.ns}} get replicationsources {{.app}} + - which kubectl - # To run restore jobs in parallel for all replicationdestinations: - # - kubectl get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=4 -l bash -c 'task volsync:restore app=$0 ns=$1' restore: - desc: Restore a PVC for an application - summary: | - ns: Namespace the PVC is in (default: default) - app: Application to restore (required) - previous: Previous number of snapshots to restore (default: 2) + desc: Restore an app [ns=default] [app=required] [previous=required] cmds: - - { task: .suspend, vars: *env-vars } - - { task: .wipe, vars: *env-vars } - - { task: .restore, vars: *env-vars } - - { task: .resume, vars: *env-vars } - env: *env-vars - requires: - vars: ["app"] + # Suspend + - flux --namespace flux-system suspend kustomization {{.app}} + - flux --namespace {{.ns}} suspend helmrelease {{.app}} + # - kubectl --namespace {{.ns}} scale {{.controller}}/{{.app}} --replicas 0 + # - kubectl --namespace {{.ns}} wait pod --for=delete --selector="app.kubernetes.io/name={{.app}}" --timeout=5m + # Restore + - minijinja-cli {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 | kubectl apply --server-side --filename - + - until kubectl --namespace {{.ns}} get job/volsync-dst-{{.app}}-manual &>/dev/null; do sleep 5; done + - kubectl --namespace {{.ns}} wait job/volsync-dst-{{.app}}-manual --for=condition=complete --timeout=120m + - kubectl --namespace {{.ns}} delete replicationdestination {{.app}}-manual + # Resume + - flux --namespace flux-system resume kustomization {{.app}} + - flux --namespace {{.ns}} resume helmrelease {{.app}} + - flux --namespace {{.ns}} reconcile helmrelease {{.app}} --force + # - kubectl --namespace {{.ns}} wait pod --for=condition=ready --selector="app.kubernetes.io/name={{.app}}" --timeout=5m vars: ns: '{{.ns | default "default"}}' - previous: '{{.previous | default 2}}' - am: - sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.accessModes}" - claim: - sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.sourcePVC}" controller: - sh: "{{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh {{.app}} {{.ns}}" - pgid: - sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}" - puid: - sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.moverSecurityContext.runAsUser}" - sc: - sh: kubectl -n {{.ns}} get replicationsources/{{.app}} -o jsonpath="{.spec.restic.storageClassName}" - preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/which-controller.sh - - cleanup: - desc: Delete volume populator PVCs in all namespaces - cmds: - - for: { var: dest } - cmd: | - {{- $items := (split "/" .ITEM) }} - kubectl delete pvc -n {{ $items._0 }} {{ $items._1 }} - - for: { var: cache } - cmd: | - {{- $items := (split "/" .ITEM) }} - kubectl delete pvc -n {{ $items._0 }} {{ $items._1 }} - - for: { var: snaps } - cmd: | - {{- $items := (split "/" .ITEM) }} - kubectl delete volumesnapshot -n {{ $items._0 }} {{ $items._1 }} - env: *env-vars - vars: - dest: - sh: kubectl get pvc --all-namespaces --no-headers | grep "volsync.*-dest" | awk '{print $1 "/" $2}' - cache: - sh: kubectl get pvc --all-namespaces --no-headers | grep "volsync.*-cache" | awk '{print $1 "/" $2}' - snaps: - sh: kubectl get volumesnapshot --all-namespaces --no-headers | grep "volsync.*" | awk '{print $1 "/" $2}' - - # Suspend the Flux ks and hr - .suspend: - internal: true - cmds: - - flux -n flux-system suspend kustomization {{.app}} - - flux -n {{.ns}} suspend helmrelease {{.app}} - - kubectl -n {{.ns}} scale {{.controller}} --replicas 0 - - kubectl -n {{.ns}} wait pod --for delete --selector="app.kubernetes.io/name={{.app}}" --timeout=2m - env: *env-vars - - # Wipe the PVC of all data - .wipe: - internal: true - cmds: - - envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/wipe.tmpl.yaml) | kubectl apply -f - - - bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh {{.job}} {{.ns}} - - kubectl -n {{.ns}} wait job/{{.job}} --for condition=complete --timeout=120m - - kubectl -n {{.ns}} logs job/{{.job}} --container main - - kubectl -n {{.ns}} delete job {{.job}} - env: *env-vars - vars: - job: volsync-wipe-{{.app}} + sh: kubectl --namespace {{.ns}} get deployment {{.app}} &>/dev/null && echo deployment || echo statefulset + env: + NS: '{{.ns}}' + APP: '{{.app}}' + PREVIOUS: '{{.previous}}' + CLAIM: + sh: kubectl --namespace {{.ns}} get replicationsources/{{.app}} --output=jsonpath="{.spec.sourcePVC}" + ACCESS_MODES: + sh: kubectl --namespace {{.ns}} get replicationsources/{{.app}} --output=jsonpath="{.spec.restic.accessModes}" + STORAGE_CLASS_NAME: + sh: kubectl --namespace {{.ns}} get replicationsources/{{.app}} --output=jsonpath="{.spec.restic.storageClassName}" + PUID: + sh: kubectl --namespace {{.ns}} get replicationsources/{{.app}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsUser}" + PGID: + sh: kubectl --namespace {{.ns}} get replicationsources/{{.app}} --output=jsonpath="{.spec.restic.moverSecurityContext.runAsGroup}" + requires: + vars: [app, previous] preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wipe.tmpl.yaml - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh + - test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.yaml.j2 + - which flux kubectl minijinja-cli - # Create VolSync replicationdestination CR to restore data - .restore: - internal: true + unlock-local: + desc: Unlock a restic source repo from local machine [ns=default] [app=required] cmds: - - envsubst < <(cat {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.tmpl.yaml) | kubectl apply -f - - - bash {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh volsync-dst-{{.job}} {{.ns}} - - kubectl -n {{.ns}} wait job/volsync-dst-{{.job}} --for condition=complete --timeout=120m - - kubectl -n {{.ns}} delete replicationdestination {{.job}} - env: *env-vars + - minijinja-cli {{.VOLSYNC_RESOURCES_DIR}}/unlock.yaml.j2 | kubectl apply --server-side --filename - + - until kubectl --namespace {{.ns}} get job/volsync-unlock-{{.app}} &>/dev/null; do sleep 5; done + - kubectl --namespace {{.ns}} wait job/volsync-unlock-{{.app}} --for condition=complete --timeout=5m + - stern --namespace {{.ns}} job/volsync-unlock-{{.app}} --no-follow + - kubectl --namespace {{.ns}} delete job volsync-unlock-{{.app}} vars: - job: volsync-restore-{{.app}} + ns: '{{.ns | default "default"}}' + env: + NS: '{{.ns}}' + APP: '{{.app}}' + requires: + vars: [app] preconditions: - - test -f {{.VOLSYNC_RESOURCES_DIR}}/replicationdestination.tmpl.yaml - - test -f {{.VOLSYNC_RESOURCES_DIR}}/wait-for-job.sh - - # Resume Flux ks and hr - .resume: - internal: true - cmds: - - flux -n {{.ns}} resume helmrelease {{.app}} - - flux -n flux-system resume kustomization {{.app}} - - flux -n {{.ns}} reconcile helmrelease {{.app}} --force - env: *env-vars + - test -f {{.VOLSYNC_RESOURCES_DIR}}/unlock.yaml.j2 + - which kubectl minijinja-cli stern diff --git a/.taskfiles/volsync/resources/list.tmpl.yaml b/.taskfiles/volsync/resources/list.tmpl.yaml deleted file mode 100755 index 30c329b2fe..0000000000 --- a/.taskfiles/volsync/resources/list.tmpl.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: ${job} - namespace: ${ns} -spec: - ttlSecondsAfterFinished: 3600 - template: - spec: - automountServiceAccountToken: false - restartPolicy: OnFailure - containers: - - name: main - image: docker.io/restic/restic:latest - args: ["snapshots"] - envFrom: - - secretRef: - name: ${app}-restic-secret - resources: {} diff --git a/.taskfiles/volsync/resources/replicationdestination.tmpl.yaml b/.taskfiles/volsync/resources/replicationdestination.tmpl.yaml deleted file mode 100755 index 2fb0c334cf..0000000000 --- a/.taskfiles/volsync/resources/replicationdestination.tmpl.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: ${job} - namespace: ${ns} -spec: - trigger: - manual: restore-once - restic: - repository: ${app}-restic-secret - destinationPVC: ${claim} - copyMethod: Direct - storageClassName: ${sc} - accessModes: ${am} - # IMPORTANT NOTE: - # Set to the last X number of snapshots to restore from - previous: ${previous} - # OR; - # IMPORTANT NOTE: - # On bootstrap set `restoreAsOf` to the time the old cluster was destroyed. - # This will essentially prevent volsync from trying to restore a backup - # from a application that started with default data in the PVC. - # Do not restore snapshots made after the following RFC3339 Timestamp. - # date --rfc-3339=seconds (--utc) - # restoreAsOf: "2022-12-10T16:00:00-05:00" - moverSecurityContext: - runAsUser: ${puid} - runAsGroup: ${pgid} - fsGroup: ${pgid} diff --git a/.taskfiles/volsync/resources/replicationdestination.yaml.j2 b/.taskfiles/volsync/resources/replicationdestination.yaml.j2 new file mode 100644 index 0000000000..4ce4813f26 --- /dev/null +++ b/.taskfiles/volsync/resources/replicationdestination.yaml.j2 @@ -0,0 +1,23 @@ +--- +apiVersion: volsync.backube/v1alpha1 +kind: ReplicationDestination +metadata: + name: {{ ENV.APP }}-manual + namespace: {{ ENV.NS }} +spec: + trigger: + manual: restore-once + restic: + repository: {{ ENV.APP }}-restic-secret + destinationPVC: {{ ENV.CLAIM }} + copyMethod: Direct + storageClassName: {{ ENV.STORAGE_CLASS_NAME }} + accessModes: {{ ENV.ACCESS_MODES }} + previous: {{ ENV.PREVIOUS }} + moverSecurityContext: + runAsUser: {{ ENV.PUID }} + runAsGroup: {{ ENV.PGID }} + fsGroup: {{ ENV.PGID }} + enableFileDeletion: true + cleanupCachePVC: true + cleanupTempPVC: true diff --git a/.taskfiles/volsync/resources/unlock.tmpl.yaml b/.taskfiles/volsync/resources/unlock.yaml.j2 old mode 100755 new mode 100644 similarity index 72% rename from .taskfiles/volsync/resources/unlock.tmpl.yaml rename to .taskfiles/volsync/resources/unlock.yaml.j2 index e081397363..00963e4f5b --- a/.taskfiles/volsync/resources/unlock.tmpl.yaml +++ b/.taskfiles/volsync/resources/unlock.yaml.j2 @@ -2,8 +2,8 @@ apiVersion: batch/v1 kind: Job metadata: - name: ${job} - namespace: ${ns} + name: volsync-unlock-{{ ENV.APP }} + namespace: {{ ENV.NS }} spec: ttlSecondsAfterFinished: 3600 template: @@ -11,10 +11,10 @@ spec: automountServiceAccountToken: false restartPolicy: OnFailure containers: - - name: unlock + - name: restic image: docker.io/restic/restic:latest args: ["unlock", "--remove-all"] envFrom: - secretRef: - name: ${app}-restic-secret + name: {{ ENV.APP }}-restic-secret resources: {} diff --git a/.taskfiles/volsync/resources/wait-for-job.sh b/.taskfiles/volsync/resources/wait-for-job.sh deleted file mode 100755 index 8cb3fbe190..0000000000 --- a/.taskfiles/volsync/resources/wait-for-job.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/env bash - -JOB=$1 -NAMESPACE="${2:-default}" - -[[ -z "${JOB}" ]] && echo "Job name not specified" && exit 1 -while true; do - STATUS="$(kubectl -n "${NAMESPACE}" get pod -l job-name="${JOB}" -o jsonpath='{.items[*].status.phase}')" - if [ "${STATUS}" == "Pending" ]; then - break - fi - sleep 1 -done diff --git a/.taskfiles/volsync/resources/which-controller.sh b/.taskfiles/volsync/resources/which-controller.sh deleted file mode 100755 index 311760ca30..0000000000 --- a/.taskfiles/volsync/resources/which-controller.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -APP=$1 -NAMESPACE="${2:-default}" - -is_deployment() { - kubectl -n "${NAMESPACE}" get deployment "${APP}" >/dev/null 2>&1 -} - -is_statefulset() { - kubectl -n "${NAMESPACE}" get statefulset "${APP}" >/dev/null 2>&1 -} - -if is_deployment; then - echo "deployment.apps/${APP}" -elif is_statefulset; then - echo "statefulset.apps/${APP}" -else - echo "No deployment or statefulset found for ${APP}" - exit 1 -fi diff --git a/.taskfiles/volsync/resources/wipe.tmpl.yaml b/.taskfiles/volsync/resources/wipe.tmpl.yaml deleted file mode 100755 index ffc1cc75a7..0000000000 --- a/.taskfiles/volsync/resources/wipe.tmpl.yaml +++ /dev/null @@ -1,26 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: ${job} - namespace: ${ns} -spec: - ttlSecondsAfterFinished: 3600 - template: - spec: - automountServiceAccountToken: false - restartPolicy: OnFailure - containers: - - name: main - image: docker.io/library/alpine:latest - command: ["/bin/sh", "-c", "cd /config; find . -delete"] - volumeMounts: - - name: config - mountPath: /config - securityContext: - privileged: true - resources: {} - volumes: - - name: config - persistentVolumeClaim: - claimName: ${claim} diff --git a/kubernetes/apps/home/home-assistant/app/kustomization.yaml b/kubernetes/apps/home/home-assistant/app/kustomization.yaml index 7df2f9e464..bad014b1eb 100644 --- a/kubernetes/apps/home/home-assistant/app/kustomization.yaml +++ b/kubernetes/apps/home/home-assistant/app/kustomization.yaml @@ -4,4 +4,4 @@ kind: Kustomization resources: - ./externalsecret.yaml - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml diff --git a/kubernetes/apps/home/home-assistant/app/pvc.yaml b/kubernetes/apps/home/home-assistant/app/pvc.yaml new file mode 100644 index 0000000000..374e046ca2 --- /dev/null +++ b/kubernetes/apps/home/home-assistant/app/pvc.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: home-assistant + annotations: + volsync.io/enabled: "true" +spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 5Gi + storageClassName: ceph-block diff --git a/kubernetes/apps/home/home-assistant/app/volsync.yaml b/kubernetes/apps/home/home-assistant/app/volsync.yaml deleted file mode 100644 index 065f25714c..0000000000 --- a/kubernetes/apps/home/home-assistant/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: home-assistant-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: home-assistant-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/home-assistant" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: home-assistant -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: home-assistant - resources: - requests: - storage: 5Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: home-assistant -spec: - trigger: - manual: restore-once - restic: - repository: home-assistant-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 5Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: home-assistant -spec: - sourcePVC: home-assistant - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: home-assistant-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/home/zigbee2mqtt/app/kustomization.yaml b/kubernetes/apps/home/zigbee2mqtt/app/kustomization.yaml index 016c51ad6d..c0bda12381 100644 --- a/kubernetes/apps/home/zigbee2mqtt/app/kustomization.yaml +++ b/kubernetes/apps/home/zigbee2mqtt/app/kustomization.yaml @@ -4,7 +4,7 @@ kind: Kustomization resources: - ./externalsecret.yaml - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml configMapGenerator: - name: zigbee2mqtt-loki-rules files: diff --git a/kubernetes/apps/home/zigbee2mqtt/app/pvc.yaml b/kubernetes/apps/home/zigbee2mqtt/app/pvc.yaml new file mode 100644 index 0000000000..a3490ec42e --- /dev/null +++ b/kubernetes/apps/home/zigbee2mqtt/app/pvc.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: zigbee2mqtt + annotations: + volsync.io/enabled: "true" +spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 1Gi + storageClassName: ceph-block diff --git a/kubernetes/apps/home/zigbee2mqtt/app/volsync.yaml b/kubernetes/apps/home/zigbee2mqtt/app/volsync.yaml deleted file mode 100644 index 164d763a41..0000000000 --- a/kubernetes/apps/home/zigbee2mqtt/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: zigbee2mqtt-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: zigbee2mqtt-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/zigbee2mqtt" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: zigbee2mqtt -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: zigbee2mqtt - resources: - requests: - storage: 1Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: zigbee2mqtt -spec: - trigger: - manual: restore-once - restic: - repository: zigbee2mqtt-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 1Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: zigbee2mqtt -spec: - sourcePVC: zigbee2mqtt - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: zigbee2mqtt-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml b/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml index 91aa568710..b434479c8d 100644 --- a/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml +++ b/kubernetes/apps/kyverno/kyverno/app/helmrelease.yaml @@ -31,31 +31,30 @@ spec: rbac: clusterRole: extraResources: - - apiGroups: - - "" - resources: - - pods - verbs: - - create - - update - - delete + - apiGroups: [""] + resources: ["pods"] + verbs: ["create", "update", "delete"] + - apiGroups: ["external-secrets.io"] + resources: ["externalsecrets"] + verbs: ["create", "update", "patch", "delete", "get", "list"] + - apiGroups: ["volsync.backube"] + resources: ["replicationsources", "replicationdestinations"] + verbs: ["create", "update", "patch", "delete", "get", "list"] serviceMonitor: enabled: true backgroundController: rbac: clusterRole: extraResources: - - apiGroups: - - "" - resources: - - pods - verbs: - - create - - update - - patch - - delete - - get - - list + - apiGroups: [""] + resources: ["pods"] + verbs: ["create", "update", "patch", "delete", "get", "list"] + - apiGroups: ["external-secrets.io"] + resources: ["externalsecrets"] + verbs: ["create", "update", "patch", "delete", "get", "list"] + - apiGroups: ["volsync.backube"] + resources: ["replicationsources", "replicationdestinations"] + verbs: ["create", "update", "patch", "delete", "get", "list"] resources: requests: cpu: 100m diff --git a/kubernetes/apps/kyverno/kyverno/policies/gatus.yaml b/kubernetes/apps/kyverno/kyverno/policies/gatus.yaml index b54aa6c7c6..9494348466 100644 --- a/kubernetes/apps/kyverno/kyverno/policies/gatus.yaml +++ b/kubernetes/apps/kyverno/kyverno/policies/gatus.yaml @@ -13,7 +13,6 @@ metadata: all Ingresses with the ingressClassName set to external. pod-policies.kyverno.io/autogen-controllers: none spec: - generateExisting: true rules: - name: *name match: @@ -36,21 +35,22 @@ spec: context: - name: GATUS_HOST variable: - value: '{{ request.object.metadata.annotations."gatus.io/host" || request.object.spec.rules[0].host }}' + value: "{{ request.object.metadata.annotations.\"gatus.io/host\" || request.object.spec.rules[0].host }}" jmesPath: "to_string(@)" - name: GATUS_NAME variable: - value: '{{ request.object.metadata.annotations."gatus.io/name" || request.object.metadata.name }}' + value: "{{ request.object.metadata.annotations.\"gatus.io/name\" || request.object.metadata.name }}" jmesPath: "to_string(@)" - name: GATUS_PATH variable: - value: '{{ request.object.metadata.annotations."gatus.io/path" || request.object.spec.rules[0].http.paths[0].path }}' + value: "{{ request.object.metadata.annotations.\"gatus.io/path\" || request.object.spec.rules[0].http.paths[0].path }}" jmesPath: "to_string(@)" - name: GATUS_STATUS_CODE variable: - value: '{{ request.object.metadata.annotations."gatus.io/status-code" || `200` }}' + value: "{{ request.object.metadata.annotations.\"gatus.io/status-code\" || '200' }}" jmesPath: "to_string(@)" generate: + generateExisting: true apiVersion: v1 kind: ConfigMap name: "{{ request.object.metadata.name }}-gatus-ep" diff --git a/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml b/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml index 15d774b918..a5a86b1c94 100644 --- a/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml +++ b/kubernetes/apps/kyverno/kyverno/policies/kustomization.yaml @@ -5,3 +5,4 @@ resources: - ./gatus.yaml - ./limits.yaml - ./ndots.yaml + - ./volsync.yaml diff --git a/kubernetes/apps/kyverno/kyverno/policies/volsync.yaml b/kubernetes/apps/kyverno/kyverno/policies/volsync.yaml new file mode 100644 index 0000000000..617592eb01 --- /dev/null +++ b/kubernetes/apps/kyverno/kyverno/policies/volsync.yaml @@ -0,0 +1,129 @@ +--- +apiVersion: kyverno.io/v1 +kind: ClusterPolicy +metadata: + name: volsync + annotations: + policies.kyverno.io/title: Volume Synchronization + policies.kyverno.io/category: Storage + policies.kyverno.io/severity: low + policies.kyverno.io/subject: Pod + policies.kyverno.io/description: >- + This policy will automatically synchronize volumes for all Pods with + the volumeSynchronization set to true. + pod-policies.kyverno.io/autogen-controllers: none +spec: + rules: + - name: volsync-mutate-pvc + match: &match + resources: + kinds: + - PersistentVolumeClaim + annotations: + volsync.io/enabled: "true" + mutate: + patchStrategicMerge: + spec: + dataSourceRef: + kind: ReplicationDestination + apiGroup: volsync.backube + name: "{{ request.object.metadata.name }}" + - name: volsync-external-secret + match: *match + generate: + generateExisting: true + apiVersion: external-secrets.io/v1beta1 + kind: ExternalSecret + name: "{{ request.object.metadata.name }}-restic" + namespace: "{{ request.object.metadata.namespace }}" + synchronize: true + data: + spec: + secretStoreRef: + kind: ClusterSecretStore + name: onepassword-connect + target: + name: "{{ request.object.metadata.name }}-restic-secret" + creationPolicy: Owner + template: + engineVersion: v2 + data: + RESTIC_REPOSITORY: "\\{{ .REPOSITORY_TEMPLATE }}/{{ request.object.metadata.name }}" + RESTIC_PASSWORD: "\\{{ .RESTIC_PASSWORD }}" + AWS_ACCESS_KEY_ID: "\\{{ .AWS_ACCESS_KEY_ID }}" + AWS_SECRET_ACCESS_KEY: "\\{{ .AWS_SECRET_ACCESS_KEY }}" + dataFrom: + - extract: + key: volsync-restic-template + - name: volsync-replication-destination + match: *match + context: &context + - name: VOLSYNC_USER + variable: + value: "{{ request.object.metadata.annotations.\"volsync.io/user\" || '568' }}" + jmesPath: "to_number(@)" + - name: VOLSYNC_GROUP + variable: + value: "{{ request.object.metadata.annotations.\"volsync.io/group\" || '568' }}" + jmesPath: "to_number(@)" + - name: VOLSYNC_CACHE + variable: + value: "{{ request.object.metadata.annotations.\"volsync.io/cache\" || '8Gi' }}" + jmesPath: "to_string(@)" + generate: + generateExisting: true + apiVersion: volsync.backube/v1alpha1 + kind: ReplicationDestination + name: "{{ request.object.metadata.name }}" + namespace: "{{ request.object.metadata.namespace }}" + synchronize: true + data: + spec: + trigger: + manual: restore-once + restic: + repository: "{{ request.object.metadata.name }}-restic-secret" + copyMethod: Snapshot + accessModes: "{{ request.object.spec.accessModes }}" + storageClassName: "{{ request.object.spec.storageClassName }}" + volumeSnapshotClassName: "csi-{{ request.object.spec.storageClassName }}" + cacheAccessModes: ["ReadWriteOnce"] + cacheCapacity: "{{ VOLSYNC_CACHE }}" + cacheStorageClassName: openebs-hostpath + moverSecurityContext: + runAsUser: "{{ VOLSYNC_USER }}" + runAsGroup: "{{ VOLSYNC_GROUP }}" + fsGroup: "{{ VOLSYNC_GROUP }}" + capacity: "{{ request.object.spec.resources.requests.storage }}" + - name: volsync-replication-source + match: *match + context: *context + generate: + generateExisting: true + apiVersion: volsync.backube/v1alpha1 + kind: ReplicationSource + name: "{{ request.object.metadata.name }}" + namespace: "{{ request.object.metadata.namespace }}" + synchronize: true + data: + spec: + sourcePVC: "{{ request.object.metadata.name }}" + trigger: + schedule: "0 * * * *" + restic: + pruneIntervalDays: 14 + repository: "{{ request.object.metadata.name }}-restic-secret" + copyMethod: Snapshot + accessModes: "{{ request.object.spec.accessModes }}" + storageClassName: "{{ request.object.spec.storageClassName }}" + volumeSnapshotClassName: "csi-{{ request.object.spec.storageClassName }}" + cacheAccessModes: ["ReadWriteOnce"] + cacheCapacity: "{{ VOLSYNC_CACHE }}" + cacheStorageClassName: openebs-hostpath + moverSecurityContext: + runAsUser: "{{ VOLSYNC_USER }}" + runAsGroup: "{{ VOLSYNC_GROUP }}" + fsGroup: "{{ VOLSYNC_GROUP }}" + retain: + hourly: 24 + daily: 7 diff --git a/kubernetes/apps/media/bazarr/app/kustomization.yaml b/kubernetes/apps/media/bazarr/app/kustomization.yaml index 97c37fd959..a1c661c70b 100644 --- a/kubernetes/apps/media/bazarr/app/kustomization.yaml +++ b/kubernetes/apps/media/bazarr/app/kustomization.yaml @@ -4,7 +4,7 @@ kind: Kustomization resources: - ./externalsecret.yaml - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml configMapGenerator: - name: bazarr-scripts files: diff --git a/kubernetes/apps/media/bazarr/app/pvc.yaml b/kubernetes/apps/media/bazarr/app/pvc.yaml new file mode 100644 index 0000000000..4adad82c2f --- /dev/null +++ b/kubernetes/apps/media/bazarr/app/pvc.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: bazarr + annotations: + volsync.io/enabled: "true" +spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi + storageClassName: ceph-block diff --git a/kubernetes/apps/media/bazarr/app/volsync.yaml b/kubernetes/apps/media/bazarr/app/volsync.yaml deleted file mode 100644 index 81482b3434..0000000000 --- a/kubernetes/apps/media/bazarr/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: bazarr-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: bazarr-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/bazarr" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: bazarr -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: bazarr - resources: - requests: - storage: 2Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: bazarr -spec: - trigger: - manual: restore-once - restic: - repository: bazarr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 2Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: bazarr -spec: - sourcePVC: bazarr - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: bazarr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/overseerr/app/kustomization.yaml b/kubernetes/apps/media/overseerr/app/kustomization.yaml index 2920d15c8b..c82ceb8bc3 100644 --- a/kubernetes/apps/media/overseerr/app/kustomization.yaml +++ b/kubernetes/apps/media/overseerr/app/kustomization.yaml @@ -4,4 +4,3 @@ kind: Kustomization resources: - ./helmrelease.yaml - ./pvc.yaml - - ./volsync.yaml diff --git a/kubernetes/apps/media/overseerr/app/pvc.yaml b/kubernetes/apps/media/overseerr/app/pvc.yaml index 30adee028e..d332e4fa4b 100644 --- a/kubernetes/apps/media/overseerr/app/pvc.yaml +++ b/kubernetes/apps/media/overseerr/app/pvc.yaml @@ -1,6 +1,19 @@ --- apiVersion: v1 kind: PersistentVolumeClaim +metadata: + name: overseerr + annotations: + volsync.io/enabled: "true" +spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi + storageClassName: ceph-block +--- +apiVersion: v1 +kind: PersistentVolumeClaim metadata: name: overseerr-cache spec: diff --git a/kubernetes/apps/media/overseerr/app/volsync.yaml b/kubernetes/apps/media/overseerr/app/volsync.yaml deleted file mode 100644 index 2629511cff..0000000000 --- a/kubernetes/apps/media/overseerr/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: overseerr-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: overseerr-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/overseerr" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: overseerr -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: overseerr - resources: - requests: - storage: 2Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: overseerr -spec: - trigger: - manual: restore-once - restic: - repository: overseerr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 2Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: overseerr -spec: - sourcePVC: overseerr - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: overseerr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/plex/app/kustomization.yaml b/kubernetes/apps/media/plex/app/kustomization.yaml index 87a40e3247..d55e221bbd 100644 --- a/kubernetes/apps/media/plex/app/kustomization.yaml +++ b/kubernetes/apps/media/plex/app/kustomization.yaml @@ -4,7 +4,6 @@ kind: Kustomization resources: - ./helmrelease.yaml - ./pvc.yaml - - ./volsync.yaml configMapGenerator: - name: plex-loki-rules files: diff --git a/kubernetes/apps/media/plex/app/pvc.yaml b/kubernetes/apps/media/plex/app/pvc.yaml index 9398813ba2..74b8861e40 100644 --- a/kubernetes/apps/media/plex/app/pvc.yaml +++ b/kubernetes/apps/media/plex/app/pvc.yaml @@ -1,6 +1,19 @@ --- apiVersion: v1 kind: PersistentVolumeClaim +metadata: + name: plex + annotations: + volsync.io/enabled: "true" +spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 50Gi + storageClassName: ceph-block +--- +apiVersion: v1 +kind: PersistentVolumeClaim metadata: name: plex-cache spec: diff --git a/kubernetes/apps/media/plex/app/volsync.yaml b/kubernetes/apps/media/plex/app/volsync.yaml deleted file mode 100644 index aaf7ec2262..0000000000 --- a/kubernetes/apps/media/plex/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: plex-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: plex-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/plex" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: plex -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: plex - resources: - requests: - storage: 50Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: plex -spec: - trigger: - manual: restore-once - restic: - repository: plex-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 50Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: plex -spec: - sourcePVC: plex - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: plex-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/qbittorrent/app/kustomization.yaml b/kubernetes/apps/media/qbittorrent/app/kustomization.yaml index 5f9a766628..f8aa167af9 100644 --- a/kubernetes/apps/media/qbittorrent/app/kustomization.yaml +++ b/kubernetes/apps/media/qbittorrent/app/kustomization.yaml @@ -3,7 +3,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml configMapGenerator: - name: qbittorrent-loki-rules files: diff --git a/kubernetes/apps/media/qbittorrent/app/pvc.yaml b/kubernetes/apps/media/qbittorrent/app/pvc.yaml new file mode 100644 index 0000000000..32fe431f40 --- /dev/null +++ b/kubernetes/apps/media/qbittorrent/app/pvc.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: qbittorrent + annotations: + volsync.io/enabled: "true" +spec: + accessModes: ["ReadWriteMany"] + resources: + requests: + storage: 2Gi + storageClassName: ceph-filesystem diff --git a/kubernetes/apps/media/qbittorrent/app/volsync.yaml b/kubernetes/apps/media/qbittorrent/app/volsync.yaml deleted file mode 100644 index b8494df4a6..0000000000 --- a/kubernetes/apps/media/qbittorrent/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: qbittorrent-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: qbittorrent-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/qbittorrent" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: qbittorrent -spec: - accessModes: ["ReadWriteMany"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: qbittorrent - resources: - requests: - storage: 2Gi - storageClassName: ceph-filesystem ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: qbittorrent -spec: - trigger: - manual: restore-once - restic: - repository: qbittorrent-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteMany"] - storageClassName: ceph-filesystem - volumeSnapshotClassName: csi-ceph-filesystem - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 2Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: qbittorrent -spec: - sourcePVC: qbittorrent - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: qbittorrent-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteMany"] - storageClassName: ceph-filesystem - volumeSnapshotClassName: csi-ceph-filesystem - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/recyclarr/app/kustomization.yaml b/kubernetes/apps/media/recyclarr/app/kustomization.yaml index 020d6f4586..7dbbcc71ad 100644 --- a/kubernetes/apps/media/recyclarr/app/kustomization.yaml +++ b/kubernetes/apps/media/recyclarr/app/kustomization.yaml @@ -4,7 +4,7 @@ kind: Kustomization resources: - ./externalsecret.yaml - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml configMapGenerator: - name: recyclarr-configmap files: diff --git a/kubernetes/apps/media/recyclarr/app/pvc.yaml b/kubernetes/apps/media/recyclarr/app/pvc.yaml new file mode 100644 index 0000000000..6f4744a2c7 --- /dev/null +++ b/kubernetes/apps/media/recyclarr/app/pvc.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: recyclarr + annotations: + volsync.io/enabled: "true" +spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi + storageClassName: ceph-block diff --git a/kubernetes/apps/media/recyclarr/app/volsync.yaml b/kubernetes/apps/media/recyclarr/app/volsync.yaml deleted file mode 100644 index b8ee19255a..0000000000 --- a/kubernetes/apps/media/recyclarr/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: recyclarr-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: recyclarr-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/recyclarr" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: recyclarr -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: recyclarr - resources: - requests: - storage: 2Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: recyclarr -spec: - trigger: - manual: restore-once - restic: - repository: recyclarr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 2Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: recyclarr -spec: - sourcePVC: recyclarr - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: recyclarr-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/sabnzbd/app/kustomization.yaml b/kubernetes/apps/media/sabnzbd/app/kustomization.yaml index 7df2f9e464..bad014b1eb 100644 --- a/kubernetes/apps/media/sabnzbd/app/kustomization.yaml +++ b/kubernetes/apps/media/sabnzbd/app/kustomization.yaml @@ -4,4 +4,4 @@ kind: Kustomization resources: - ./externalsecret.yaml - ./helmrelease.yaml - - ./volsync.yaml + - ./pvc.yaml diff --git a/kubernetes/apps/media/sabnzbd/app/pvc.yaml b/kubernetes/apps/media/sabnzbd/app/pvc.yaml new file mode 100644 index 0000000000..cf6bb0bb9d --- /dev/null +++ b/kubernetes/apps/media/sabnzbd/app/pvc.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: sabnzbd + annotations: + volsync.io/enabled: "true" +spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 2Gi + storageClassName: ceph-block diff --git a/kubernetes/apps/media/sabnzbd/app/volsync.yaml b/kubernetes/apps/media/sabnzbd/app/volsync.yaml deleted file mode 100644 index a09fa9c3a1..0000000000 --- a/kubernetes/apps/media/sabnzbd/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: sabnzbd-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: sabnzbd-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/sabnzbd" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: sabnzbd -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: sabnzbd - resources: - requests: - storage: 2Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: sabnzbd -spec: - trigger: - manual: restore-once - restic: - repository: sabnzbd-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 2Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: sabnzbd -spec: - sourcePVC: sabnzbd - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: sabnzbd-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/media/tautulli/app/kustomization.yaml b/kubernetes/apps/media/tautulli/app/kustomization.yaml index 2920d15c8b..c82ceb8bc3 100644 --- a/kubernetes/apps/media/tautulli/app/kustomization.yaml +++ b/kubernetes/apps/media/tautulli/app/kustomization.yaml @@ -4,4 +4,3 @@ kind: Kustomization resources: - ./helmrelease.yaml - ./pvc.yaml - - ./volsync.yaml diff --git a/kubernetes/apps/media/tautulli/app/pvc.yaml b/kubernetes/apps/media/tautulli/app/pvc.yaml index d2931fb4b0..e5101ca7b8 100644 --- a/kubernetes/apps/media/tautulli/app/pvc.yaml +++ b/kubernetes/apps/media/tautulli/app/pvc.yaml @@ -1,6 +1,19 @@ --- apiVersion: v1 kind: PersistentVolumeClaim +metadata: + name: tautulli + annotations: + volsync.io/enabled: "true" +spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 5Gi + storageClassName: ceph-block +--- +apiVersion: v1 +kind: PersistentVolumeClaim metadata: name: tautulli-cache spec: diff --git a/kubernetes/apps/media/tautulli/app/volsync.yaml b/kubernetes/apps/media/tautulli/app/volsync.yaml deleted file mode 100644 index 01ff04c60a..0000000000 --- a/kubernetes/apps/media/tautulli/app/volsync.yaml +++ /dev/null @@ -1,86 +0,0 @@ ---- -apiVersion: external-secrets.io/v1beta1 -kind: ExternalSecret -metadata: - name: tautulli-restic -spec: - secretStoreRef: - kind: ClusterSecretStore - name: onepassword-connect - target: - name: tautulli-restic-secret - creationPolicy: Owner - template: - engineVersion: v2 - data: - RESTIC_REPOSITORY: "{{ .REPOSITORY_TEMPLATE }}/tautulli" - RESTIC_PASSWORD: "{{ .RESTIC_PASSWORD }}" - AWS_ACCESS_KEY_ID: "{{ .AWS_ACCESS_KEY_ID }}" - AWS_SECRET_ACCESS_KEY: "{{ .AWS_SECRET_ACCESS_KEY }}" - dataFrom: - - extract: - key: volsync-restic-template ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: tautulli -spec: - accessModes: ["ReadWriteOnce"] - dataSourceRef: - kind: ReplicationDestination - apiGroup: volsync.backube - name: tautulli - resources: - requests: - storage: 5Gi - storageClassName: ceph-block ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationDestination -metadata: - name: tautulli -spec: - trigger: - manual: restore-once - restic: - repository: tautulli-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - capacity: 5Gi # must match the PersistentVolumeClaim `.resources.requests.storage` size above ---- -apiVersion: volsync.backube/v1alpha1 -kind: ReplicationSource -metadata: - name: tautulli -spec: - sourcePVC: tautulli - trigger: - schedule: "15 */8 * * *" - restic: - pruneIntervalDays: 7 - repository: tautulli-restic-secret - copyMethod: Snapshot - accessModes: ["ReadWriteOnce"] - storageClassName: ceph-block - volumeSnapshotClassName: csi-ceph-block - cacheAccessModes: ["ReadWriteOnce"] - cacheCapacity: 8Gi - cacheStorageClassName: openebs-hostpath - moverSecurityContext: - runAsUser: 568 - runAsGroup: 568 - fsGroup: 568 - retain: - hourly: 24 - daily: 7 - weekly: 5 diff --git a/kubernetes/apps/volsync-system/volsync/ks.yaml b/kubernetes/apps/volsync-system/volsync/ks.yaml index 5eae3d7d6e..ae9658b1e4 100644 --- a/kubernetes/apps/volsync-system/volsync/ks.yaml +++ b/kubernetes/apps/volsync-system/volsync/ks.yaml @@ -10,6 +10,7 @@ spec: labels: app.kubernetes.io/name: *app dependsOn: + - name: kyverno-policies - name: snapshot-controller path: ./kubernetes/apps/volsync-system/volsync/app prune: true