$ oc -n openshift-kube-apiserver-operator get secret kube-apiserver-to-kubelet-signer -o jsonpath='{.metadata.annotations.auth\.openshift\.io/certificate-not-after}'
2022-08-05T14:37:50Zuser@user:~ $ 
1
$ for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); do echo ${node} ; oc adm cordon ${node} ; done
node/ci-ln-mgdnf4b-72292-n547t-master-0 cordoned ci-ln-mgdnf4b-72292-n547t-master-1 node/ci-ln-mgdnf4b-72292-n547t-master-1 cordoned ci-ln-mgdnf4b-72292-n547t-master-2 node/ci-ln-mgdnf4b-72292-n547t-master-2 cordoned ci-ln-mgdnf4b-72292-n547t-worker-a-s7ntl node/ci-ln-mgdnf4b-72292-n547t-worker-a-s7ntl cordoned ci-ln-mgdnf4b-72292-n547t-worker-b-cmc9k node/ci-ln-mgdnf4b-72292-n547t-worker-b-cmc9k cordoned ci-ln-mgdnf4b-72292-n547t-worker-c-vcmtn node/ci-ln-mgdnf4b-72292-n547t-worker-c-vcmtn cordoned
ci-ln-mgdnf4b-72292-n547t-master-0
node/ci-ln-mgdnf4b-72292-n547t-master-0 cordoned
ci-ln-mgdnf4b-72292-n547t-master-1
node/ci-ln-mgdnf4b-72292-n547t-master-1 cordoned
ci-ln-mgdnf4b-72292-n547t-master-2
node/ci-ln-mgdnf4b-72292-n547t-master-2 cordoned
ci-ln-mgdnf4b-72292-n547t-worker-a-s7ntl
node/ci-ln-mgdnf4b-72292-n547t-worker-a-s7ntl cordoned
ci-ln-mgdnf4b-72292-n547t-worker-b-cmc9k
node/ci-ln-mgdnf4b-72292-n547t-worker-b-cmc9k cordoned
ci-ln-mgdnf4b-72292-n547t-worker-c-vcmtn
node/ci-ln-mgdnf4b-72292-n547t-worker-c-vcmtn cordoned
$ for node in $(oc get nodes -l node-role.kubernetes.io/worker -o jsonpath='{.items[*].metadata.name}'); do echo ${node} ; oc adm drain ${node} --delete-emptydir-data --ignore-daemonsets=true --timeout=15s --force ; done
$ for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); do oc debug node/${node} -- chroot /host shutdown -h 1; done 
1
To use host binaries, run `chroot /host` Shutdown scheduled for Mon 2021-09-13 09:36:17 UTC, use 'shutdown -c' to cancel. Removing debug pod ... Starting pod/ip-10-0-150-116us-east-2computeinternal-debug ... To use host binaries, run `chroot /host` Shutdown scheduled for Mon 2021-09-13 09:36:29 UTC, use 'shutdown -c' to cancel.
Starting pod/ip-10-0-130-169us-east-2computeinternal-debug ...
To use host binaries, run `chroot /host`
Shutdown scheduled for Mon 2021-09-13 09:36:17 UTC, use 'shutdown -c' to cancel.
Removing debug pod ...
Starting pod/ip-10-0-150-116us-east-2computeinternal-debug ...
To use host binaries, run `chroot /host`
Shutdown scheduled for Mon 2021-09-13 09:36:29 UTC, use 'shutdown -c' to cancel.
$ oc adm uncordon <node>
$ oc adm uncordon <node>
$ oc get nodes -l node-role.kubernetes.io/master
ip-10-0-168-251.ec2.internal Ready control-plane,master 75m v1.32.3 ip-10-0-170-223.ec2.internal Ready control-plane,master 75m v1.32.3 ip-10-0-211-16.ec2.internal Ready control-plane,master 75m v1.32.3
NAME                           STATUS   ROLES                  AGE   VERSION
ip-10-0-168-251.ec2.internal   Ready    control-plane,master   75m   v1.32.3
ip-10-0-170-223.ec2.internal   Ready    control-plane,master   75m   v1.32.3
ip-10-0-211-16.ec2.internal    Ready    control-plane,master   75m   v1.32.3
$ oc get csr
$ oc describe csr <csr_name> 
1
$ oc adm certificate approve <csr_name>
$ oc get nodes -l node-role.kubernetes.io/worker
ip-10-0-179-95.ec2.internal Ready worker 64m v1.32.3 ip-10-0-182-134.ec2.internal Ready worker 64m v1.32.3 ip-10-0-250-100.ec2.internal Ready worker 64m v1.32.3
NAME                           STATUS   ROLES    AGE   VERSION
ip-10-0-179-95.ec2.internal    Ready    worker   64m   v1.32.3
ip-10-0-182-134.ec2.internal   Ready    worker   64m   v1.32.3
ip-10-0-250-100.ec2.internal   Ready    worker   64m   v1.32.3
$ oc get csr
$ oc describe csr <csr_name> 
1
$ oc adm certificate approve <csr_name>
$ for node in $(oc get nodes -o jsonpath='{.items[*].metadata.name}'); do echo ${node} ; oc adm uncordon ${node} ; done
$ oc get clusteroperators
authentication 4.19.0 True False False 59m cloud-credential 4.19.0 True False False 85m cluster-autoscaler 4.19.0 True False False 73m config-operator 4.19.0 True False False 73m console 4.19.0 True False False 62m csi-snapshot-controller 4.19.0 True False False 66m dns 4.19.0 True False False 76m etcd 4.19.0 True False False 76m ...
NAME                                       VERSION   AVAILABLE   PROGRESSING   DEGRADED   SINCE
authentication                             4.19.0    True        False         False      59m
cloud-credential                           4.19.0    True        False         False      85m
cluster-autoscaler                         4.19.0    True        False         False      73m
config-operator                            4.19.0    True        False         False      73m
console                                    4.19.0    True        False         False      62m
csi-snapshot-controller                    4.19.0    True        False         False      66m
dns                                        4.19.0    True        False         False      76m
etcd                                       4.19.0    True        False         False      76m
...
$ oc get nodes
ip-10-0-168-251.ec2.internal Ready control-plane,master 82m v1.32.3 ip-10-0-170-223.ec2.internal Ready control-plane,master 82m v1.32.3 ip-10-0-179-95.ec2.internal Ready worker 70m v1.32.3 ip-10-0-182-134.ec2.internal Ready worker 70m v1.32.3 ip-10-0-211-16.ec2.internal Ready control-plane,master 82m v1.32.3 ip-10-0-250-100.ec2.internal Ready worker 69m v1.32.3
NAME                           STATUS   ROLES                  AGE   VERSION
ip-10-0-168-251.ec2.internal   Ready    control-plane,master   82m   v1.32.3
ip-10-0-170-223.ec2.internal   Ready    control-plane,master   82m   v1.32.3
ip-10-0-179-95.ec2.internal    Ready    worker                 70m   v1.32.3
ip-10-0-182-134.ec2.internal   Ready    worker                 70m   v1.32.3
ip-10-0-211-16.ec2.internal    Ready    control-plane,master   82m   v1.32.3
ip-10-0-250-100.ec2.internal   Ready    worker                 69m   v1.32.3
$ oc get nodes
ci-ln-812tb4k-72292-8bcj7-master-0 Ready control-plane,master 32m v1.32.3 ci-ln-812tb4k-72292-8bcj7-master-1 Ready control-plane,master 32m v1.32.3 ci-ln-812tb4k-72292-8bcj7-master-2 Ready control-plane,master 32m v1.32.3 Ci-ln-812tb4k-72292-8bcj7-worker-a-zhdvk Ready worker 19m v1.32.3 ci-ln-812tb4k-72292-8bcj7-worker-b-9hrmv Ready worker 19m v1.32.3 ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2 Ready worker 19m v1.32.3
NAME                                      STATUS  ROLES                 AGE   VERSION
ci-ln-812tb4k-72292-8bcj7-master-0        Ready	  control-plane,master  32m   v1.32.3
ci-ln-812tb4k-72292-8bcj7-master-1        Ready	  control-plane,master  32m   v1.32.3
ci-ln-812tb4k-72292-8bcj7-master-2        Ready	  control-plane,master  32m   v1.32.3
Ci-ln-812tb4k-72292-8bcj7-worker-a-zhdvk  Ready	  worker                19m   v1.32.3
ci-ln-812tb4k-72292-8bcj7-worker-b-9hrmv  Ready	  worker                19m   v1.32.3
ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2  Ready	  worker                19m   v1.32.3
$ oc get clusteroperators
authentication 4.19.0-0 True False False 51m baremetal 4.19.0-0 True False False 72m cloud-controller-manager 4.19.0-0 True False False 75m cloud-credential 4.19.0-0 True False False 77m cluster-api 4.19.0-0 True False False 42m cluster-autoscaler 4.19.0-0 True False False 72m config-operator 4.19.0-0 True False False 72m console 4.19.0-0 True False False 55m ...
NAME                      VERSION   AVAILABLE  PROGRESSING  DEGRADED  SINCE   MESSAGE
authentication            4.19.0-0  True       False        False     51m
baremetal                 4.19.0-0  True       False        False     72m
cloud-controller-manager  4.19.0-0  True       False        False     75m
cloud-credential          4.19.0-0  True       False        False     77m
cluster-api               4.19.0-0  True       False        False     42m
cluster-autoscaler        4.19.0-0  True       False        False     72m
config-operator           4.19.0-0  True       False        False     72m
console                   4.19.0-0  True       False        False     55m
...
$ oc get mcp
master rendered-master-87871f187930e67233c837e1d07f49c7 True False False 3 3 3 0 96m worker rendered-worker-3c4c459dc5d90017983d7e72928b8aed True False False 3 3 3 0 96m
NAME    CONFIG                                            UPDATED  UPDATING  DEGRADED  MACHINECOUNT  READYMACHINECOUNT  UPDATEDMACHINECOUNT  DEGRADEDMACHINECOUNT  AGE
master  rendered-master-87871f187930e67233c837e1d07f49c7  True     False     False     3             3                  3                    0                     96m
worker  rendered-worker-3c4c459dc5d90017983d7e72928b8aed  True     False     False     3             3                  3                    0                     96m
$ oc get csr
csr-4dwsd 37m kubernetes.io/kube-apiserver-client system:node:ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2 24h Pending csr-4vrbr 49m kubernetes.io/kube-apiserver-client system:node:ci-ln-812tb4k-72292-8bcj7-master-1 24h Pending csr-4wk5x 51m kubernetes.io/kubelet-serving system:node:ci-ln-812tb4k-72292-8bcj7-master-1 <none> Pending csr-84vb6 51m kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper <none> Pending
NAME       AGE  SIGNERNAME                                   REQUESTOR                                                                  REQUESTEDDURATION  CONDITION
csr-4dwsd  37m  kubernetes.io/kube-apiserver-client          system:node:ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2                       24h                Pending
csr-4vrbr  49m  kubernetes.io/kube-apiserver-client          system:node:ci-ln-812tb4k-72292-8bcj7-master-1                             24h                Pending
csr-4wk5x  51m  kubernetes.io/kubelet-serving                system:node:ci-ln-812tb4k-72292-8bcj7-master-1                             <none>             Pending
csr-84vb6  51m  kubernetes.io/kube-apiserver-client-kubelet  system:serviceaccount:openshift-machine-config-operator:node-bootstrapper  <none>             Pending
$ oc adm certificate approve <csr_name>
$ oc get csr
csr-4dwsd 37m kubernetes.io/kube-apiserver-client system:node:ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2 24h Approved,Issued csr-4vrbr 49m kubernetes.io/kube-apiserver-client system:node:ci-ln-812tb4k-72292-8bcj7-master-1 24h Approved,Issued csr-4wk5x 51m kubernetes.io/kubelet-serving system:node:ci-ln-812tb4k-72292-8bcj7-master-1 <none> Approved,Issued csr-84vb6 51m kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper <none> Approved,Issued
NAME       AGE  SIGNERNAME                                   REQUESTOR                                                                  REQUESTEDDURATION  CONDITION
csr-4dwsd  37m  kubernetes.io/kube-apiserver-client          system:node:ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2                       24h                Approved,Issued
csr-4vrbr  49m  kubernetes.io/kube-apiserver-client          system:node:ci-ln-812tb4k-72292-8bcj7-master-1                             24h                Approved,Issued
csr-4wk5x  51m  kubernetes.io/kubelet-serving                system:node:ci-ln-812tb4k-72292-8bcj7-master-1                             <none>             Approved,Issued
csr-84vb6  51m  kubernetes.io/kube-apiserver-client-kubelet  system:serviceaccount:openshift-machine-config-operator:node-bootstrapper  <none>             Approved,Issued
$ oc get nodes
ci-ln-812tb4k-72292-8bcj7-master-0 Ready control-plane,master 32m v1.32.3 ci-ln-812tb4k-72292-8bcj7-master-1 Ready control-plane,master 32m v1.32.3 ci-ln-812tb4k-72292-8bcj7-master-2 Ready control-plane,master 32m v1.32.3 Ci-ln-812tb4k-72292-8bcj7-worker-a-zhdvk Ready worker 19m v1.32.3 ci-ln-812tb4k-72292-8bcj7-worker-b-9hrmv Ready worker 19m v1.32.3 ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2 Ready worker 19m v1.32.3
NAME                                      STATUS  ROLES                 AGE   VERSION
ci-ln-812tb4k-72292-8bcj7-master-0        Ready	  control-plane,master  32m   v1.32.3
ci-ln-812tb4k-72292-8bcj7-master-1        Ready	  control-plane,master  32m   v1.32.3
ci-ln-812tb4k-72292-8bcj7-master-2        Ready	  control-plane,master  32m   v1.32.3
Ci-ln-812tb4k-72292-8bcj7-worker-a-zhdvk  Ready	  worker                19m   v1.32.3
ci-ln-812tb4k-72292-8bcj7-worker-b-9hrmv  Ready	  worker                19m   v1.32.3
ci-ln-812tb4k-72292-8bcj7-worker-c-q8mw2  Ready	  worker                19m   v1.32.3
$ oc get clusteroperators
authentication 4.19.0-0 True False False 51m baremetal 4.19.0-0 True False False 72m cloud-controller-manager 4.19.0-0 True False False 75m cloud-credential 4.19.0-0 True False False 77m cluster-api 4.19.0-0 True False False 42m cluster-autoscaler 4.19.0-0 True False False 72m config-operator 4.19.0-0 True False False 72m console 4.19.0-0 True False False 55m ...
NAME                      VERSION   AVAILABLE  PROGRESSING  DEGRADED  SINCE   MESSAGE
authentication            4.19.0-0  True       False        False     51m
baremetal                 4.19.0-0  True       False        False     72m
cloud-controller-manager  4.19.0-0  True       False        False     75m
cloud-credential          4.19.0-0  True       False        False     77m
cluster-api               4.19.0-0  True       False        False     42m
cluster-autoscaler        4.19.0-0  True       False        False     72m
config-operator           4.19.0-0  True       False        False     72m
console                   4.19.0-0  True       False        False     55m
...
$ oc get dpa -n openshift-adp
NAME            RECONCILED   AGE
velero-sample   True         2m51s
annotations: velero.io/restore-status: "true"
metadata:
  annotations:
    velero.io/restore-status: "true"
mark it as "Failed"
failureReason: found a backup with status "InProgress" during the server starting,
mark it as "Failed"
ERROR unable to determine if bucket exists. {"error": "open /tmp/aws-shared-credentials1211864681: read-only file system"}
$ oc get dpa -n openshift-adp -o yaml > dpa.orig.backup
spec: configuration: nodeAgent: enable: true uploaderType: kopia ...
...
 spec:
   configuration:
     nodeAgent:
       enable: true
       uploaderType: kopia
...
spec: configuration: nodeAgent: backupPVC: ... loadConcurrency: ... podResources: ... restorePVC: ... ...
...
 spec:
   configuration:
     nodeAgent:
       backupPVC:
         ...
       loadConcurrency:
         ...
       podResources:
         ...
       restorePVC:
        ...
...
$ oc get dpa dpa-sample -n openshift-adp
dpa-sample True 2m51s
NAME            RECONCILED   AGE
dpa-sample      True         2m51s
$ oc get all -n openshift-adp
pod/node-agent-9pjz9 1/1 Running 0 3d17h pod/node-agent-fmn84 1/1 Running 0 3d17h pod/node-agent-xw2dg 1/1 Running 0 3d17h pod/openshift-adp-controller-manager-76b8bc8d7b-kgkcw 1/1 Running 0 3d17h pod/velero-64475b8c5b-nh2qc 1/1 Running 0 3d17h NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/openshift-adp-controller-manager-metrics-service ClusterIP 172.30.194.192 <none> 8443/TCP 3d17h service/openshift-adp-velero-metrics-svc ClusterIP 172.30.190.174 <none> 8085/TCP 3d17h NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/node-agent 3 3 3 3 3 <none> 3d17h NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/openshift-adp-controller-manager 1/1 1 1 3d17h deployment.apps/velero 1/1 1 1 3d17h NAME DESIRED CURRENT READY AGE replicaset.apps/openshift-adp-controller-manager-76b8bc8d7b 1 1 1 3d17h replicaset.apps/openshift-adp-controller-manager-85fff975b8 0 0 0 3d17h replicaset.apps/velero-64475b8c5b 1 1 1 3d17h replicaset.apps/velero-8b5bc54fd 0 0 0 3d17h replicaset.apps/velero-f5c9ffb66 0 0 0 3d17h
NAME                                                    READY   STATUS    RESTARTS   AGE
pod/node-agent-9pjz9                                    1/1     Running   0          3d17h
pod/node-agent-fmn84                                    1/1     Running   0          3d17h
pod/node-agent-xw2dg                                    1/1     Running   0          3d17h
pod/openshift-adp-controller-manager-76b8bc8d7b-kgkcw   1/1     Running   0          3d17h
pod/velero-64475b8c5b-nh2qc                             1/1     Running   0          3d17h

NAME                                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
service/openshift-adp-controller-manager-metrics-service   ClusterIP   172.30.194.192   <none>        8443/TCP   3d17h
service/openshift-adp-velero-metrics-svc                   ClusterIP   172.30.190.174   <none>        8085/TCP   3d17h

NAME                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/node-agent   3         3         3       3            3           <none>          3d17h

NAME                                               READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/openshift-adp-controller-manager   1/1     1            1           3d17h
deployment.apps/velero                             1/1     1            1           3d17h

NAME                                                          DESIRED   CURRENT   READY   AGE
replicaset.apps/openshift-adp-controller-manager-76b8bc8d7b   1         1         1       3d17h
replicaset.apps/openshift-adp-controller-manager-85fff975b8   0         0         0       3d17h
replicaset.apps/velero-64475b8c5b                             1         1         1       3d17h
replicaset.apps/velero-8b5bc54fd                              0         0         0       3d17h
replicaset.apps/velero-f5c9ffb66                              0         0         0       3d17h
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 1s 3d16h true
NAME           PHASE       LAST VALIDATED   AGE     DEFAULT
dpa-sample-1   Available   1s               3d16h   true
Requests specifying Server Side Encryption with Customer provided keys must provide the client calculated MD5 of the secret key.
mark it as "Failed".
found a podvolumebackup with status "InProgress" during the server starting,
mark it as "Failed".
snapshot : snapshot not found
data path restore failed: Failed to run kopia restore: Unable to load
    snapshot : snapshot not found
The generated label name is too long.
--from-backup <BACKUP_NAME> \ --exclude-resources=deployment.apps
$ velero restore create <RESTORE_NAME> \
  --from-backup <BACKUP_NAME> \
  --exclude-resources=deployment.apps
--from-backup <BACKUP_NAME> \ --include-resources=deployment.apps
$ velero restore create <RESTORE_NAME> \
  --from-backup <BACKUP_NAME> \
  --include-resources=deployment.apps
$ oc get dpa -n openshift-adp -o yaml > dpa.orig.backup
$ oc get all -n openshift-adp
pod/oadp-operator-controller-manager-67d9494d47-6l8z8 2/2 Running 0 2m8s pod/restic-9cq4q 1/1 Running 0 94s pod/restic-m4lts 1/1 Running 0 94s pod/restic-pv4kr 1/1 Running 0 95s pod/velero-588db7f655-n842v 1/1 Running 0 95s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oadp-operator-controller-manager-metrics-service ClusterIP 172.30.70.140 <none> 8443/TCP 2m8s NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/restic 3 3 3 3 3 <none> 96s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/oadp-operator-controller-manager 1/1 1 1 2m9s deployment.apps/velero 1/1 1 1 96s NAME DESIRED CURRENT READY AGE replicaset.apps/oadp-operator-controller-manager-67d9494d47 1 1 1 2m9s replicaset.apps/velero-588db7f655 1 1 1 96s
NAME                                                     READY   STATUS    RESTARTS   AGE
pod/oadp-operator-controller-manager-67d9494d47-6l8z8    2/2     Running   0          2m8s
pod/restic-9cq4q                                         1/1     Running   0          94s
pod/restic-m4lts                                         1/1     Running   0          94s
pod/restic-pv4kr                                         1/1     Running   0          95s
pod/velero-588db7f655-n842v                              1/1     Running   0          95s

NAME                                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
service/oadp-operator-controller-manager-metrics-service   ClusterIP   172.30.70.140    <none>        8443/TCP   2m8s

NAME                    DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/restic   3         3         3       3            3           <none>          96s

NAME                                                READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/oadp-operator-controller-manager    1/1     1            1           2m9s
deployment.apps/velero                              1/1     1            1           96s

NAME                                                           DESIRED   CURRENT   READY   AGE
replicaset.apps/oadp-operator-controller-manager-67d9494d47    1         1         1       2m9s
replicaset.apps/velero-588db7f655                              1         1         1       96s
$ oc get dpa dpa-sample -n openshift-adp -o jsonpath='{.status}'
{"conditions":[{"lastTransitionTime":"2023-10-27T01:23:57Z","message":"Reconcile complete","reason":"Complete","status":"True","type":"Reconciled"}]}
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 1s 3d16h true
NAME           PHASE       LAST VALIDATED   AGE     DEFAULT
dpa-sample-1   Available   1s               3d16h   true
kind: DataProtectionApplication metadata: name: dpa-sample spec: configuration: velero: defaultPlugins: - openshift - aws - azure - gcp
 apiVersion: oadp.openshift.io/v1alpha1
 kind: DataProtectionApplication
 metadata:
   name: dpa-sample
 spec:
   configuration:
     velero:
       defaultPlugins:
       - openshift
       - aws
       - azure
       - gcp
kind: DataProtectionApplication metadata: name: dpa-sample spec: configuration: velero: defaultPlugins: - openshift - azure - gcp customPlugins: - name: custom-plugin-example image: quay.io/example-repo/custom-velero-plugin
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
 name: dpa-sample
spec:
 configuration:
   velero:
     defaultPlugins:
     - openshift
     - azure
     - gcp
     customPlugins:
     - name: custom-plugin-example
       image: quay.io/example-repo/custom-velero-plugin
backup=openshift-adp/<backup name> error="error executing custom action (groupResource=imagestreams.image.openshift.io, namespace=<BSL Name>, name=postgres): rpc error: code = Aborted desc = plugin panicked: runtime error: index out of range with length 1, stack trace: goroutine 94…
024-02-27T10:46:50.028951744Z time="2024-02-27T10:46:50Z" level=error msg="Error backing up item"
backup=openshift-adp/<backup name> error="error executing custom action (groupResource=imagestreams.image.openshift.io,
namespace=<BSL Name>, name=postgres): rpc error: code = Aborted desc = plugin panicked:
runtime error: index out of range with length 1, stack trace: goroutine 94…
$ oc label backupstoragelocations.velero.io <bsl_name> app.kubernetes.io/component=bsl
$ oc -n openshift-adp get secret/oadp-<bsl_name>-<bsl_provider>-registry-secret -o json | jq -r '.data'
kind: ObjectBucketClaim metadata: name: test-obc namespace: openshift-adp spec: storageClassName: openshift-storage.noobaa.io generateBucketName: test-backup-bucket
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
  name: test-obc 
1

  namespace: openshift-adp
spec:
  storageClassName: openshift-storage.noobaa.io
  generateBucketName: test-backup-bucket 
2
$ oc create -f <obc_file_name> 
1
$ oc extract --to=- cm/test-obc 
1
BUCKET_PORT BUCKET_REGION BUCKET_SUBREGION BUCKET_HOST
# BUCKET_NAME
backup-c20...41fd
# BUCKET_PORT
443
# BUCKET_REGION

# BUCKET_SUBREGION

# BUCKET_HOST
s3.openshift-storage.svc
$ oc extract --to=- secret/test-obc
AWS_SECRET_ACCESS_KEY
# AWS_ACCESS_KEY_ID
ebYR....xLNMc
# AWS_SECRET_ACCESS_KEY
YXf...+NaCkdyC3QPym
$ oc get route s3 -n openshift-storage
aws_access_key_id=<AWS_ACCESS_KEY_ID> aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
cloud-credentials \ -n openshift-adp \ --from-file cloud=cloud-credentials
$ oc create secret generic \
  cloud-credentials \
  -n openshift-adp \
  --from-file cloud=cloud-credentials
kind: DataProtectionApplication metadata: name: oadp-backup namespace: openshift-adp spec: configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - aws - openshift - csi defaultSnapshotMoveData: true backupLocations: - velero: config: profile: "default" region: noobaa s3Url: https://s3.openshift-storage.svc s3ForcePathStyle: "true" insecureSkipTLSVerify: "true" provider: aws default: true credential: key: cloud name: cloud-credentials objectStorage: bucket: <bucket_name> prefix: oadp
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: oadp-backup
  namespace: openshift-adp
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - aws
        - openshift
        - csi
      defaultSnapshotMoveData: true 
1

  backupLocations:
    - velero:
        config:
          profile: "default"
          region: noobaa
          s3Url: https://s3.openshift-storage.svc 
2

          s3ForcePathStyle: "true"
          insecureSkipTLSVerify: "true"
        provider: aws
        default: true
        credential:
          key: cloud
          name:  cloud-credentials
        objectStorage:
          bucket: <bucket_name> 
3

          prefix: oadp
$ oc apply -f <dpa_filename>
$ oc get dpa -o yaml
items: - apiVersion: oadp.openshift.io/v1alpha1 kind: DataProtectionApplication metadata: namespace: openshift-adp #...# spec: backupLocations: - velero: config: #...# status: conditions: - lastTransitionTime: "20....9:54:02Z" message: Reconcile complete reason: Complete status: "True" type: Reconciled kind: List metadata: resourceVersion: ""
apiVersion: v1
items:
- apiVersion: oadp.openshift.io/v1alpha1
  kind: DataProtectionApplication
  metadata:
    namespace: openshift-adp
    #...#
  spec:
    backupLocations:
    - velero:
        config:
          #...#
  status:
    conditions:
    - lastTransitionTime: "20....9:54:02Z"
      message: Reconcile complete
      reason: Complete
      status: "True"
      type: Reconciled
kind: List
metadata:
  resourceVersion: ""
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 3s 15s true
NAME           PHASE       LAST VALIDATED   AGE   DEFAULT
dpa-sample-1   Available   3s               15s   true
kind: Backup metadata: name: test-backup namespace: openshift-adp spec: includedNamespaces: - <application_namespace>
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: test-backup
  namespace: openshift-adp
spec:
  includedNamespaces:
  - <application_namespace> 
1
$ oc apply -f <backup_cr_filename>
$ oc describe backup test-backup -n openshift-adp
Namespace: openshift-adp # ....# Status: Backup Item Operations Attempted: 1 Backup Item Operations Completed: 1 Completion Timestamp: 2024-09-25T10:17:01Z Expiration: 2024-10-25T10:16:31Z Format Version: 1.1.0 Hook Status: Phase: Completed Progress: Items Backed Up: 34 Total Items: 34 Start Timestamp: 2024-09-25T10:16:31Z Version: 1 Events: <none>
Name:         test-backup
Namespace:    openshift-adp
# ....#
Status:
  Backup Item Operations Attempted:  1
  Backup Item Operations Completed:  1
  Completion Timestamp:              2024-09-25T10:17:01Z
  Expiration:                        2024-10-25T10:16:31Z
  Format Version:                    1.1.0
  Hook Status:
  Phase:  Completed
  Progress:
    Items Backed Up:  34
    Total Items:      34
  Start Timestamp:    2024-09-25T10:16:31Z
  Version:            1
Events:               <none>
kind: Restore metadata: name: test-restore namespace: openshift-adp spec: backupName: <backup_name> restorePVs: true namespaceMapping: <application_namespace>: test-restore-application
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: test-restore 
1

  namespace: openshift-adp
spec:
  backupName: <backup_name> 
2

  restorePVs: true
  namespaceMapping:
    <application_namespace>: test-restore-application 
3
$ oc apply -f <restore_cr_filename>
$ oc describe restores.velero.io <restore_name> -n openshift-adp
$ oc project test-restore-application
$ oc get pvc,svc,deployment,secret,configmap
persistentvolumeclaim/mysql Bound pvc-9b3583db-...-14b86 NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/mysql ClusterIP 172....157 <none> 3306/TCP 2m56s service/todolist ClusterIP 172.....15 <none> 8000/TCP 2m56s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/mysql 0/1 1 0 2m55s NAME TYPE DATA AGE secret/builder-dockercfg-6bfmd kubernetes.io/dockercfg 1 2m57s secret/default-dockercfg-hz9kz kubernetes.io/dockercfg 1 2m57s secret/deployer-dockercfg-86cvd kubernetes.io/dockercfg 1 2m57s secret/mysql-persistent-sa-dockercfg-rgp9b kubernetes.io/dockercfg 1 2m57s NAME DATA AGE configmap/kube-root-ca.crt 1 2m57s configmap/openshift-service-ca.crt 1 2m57s
NAME                          STATUS   VOLUME
persistentvolumeclaim/mysql   Bound    pvc-9b3583db-...-14b86

NAME               TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
service/mysql      ClusterIP   172....157     <none>        3306/TCP   2m56s
service/todolist   ClusterIP   172.....15     <none>        8000/TCP   2m56s

NAME                    READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/mysql   0/1     1            0           2m55s

NAME                                         TYPE                      DATA   AGE
secret/builder-dockercfg-6bfmd               kubernetes.io/dockercfg   1      2m57s
secret/default-dockercfg-hz9kz               kubernetes.io/dockercfg   1      2m57s
secret/deployer-dockercfg-86cvd              kubernetes.io/dockercfg   1      2m57s
secret/mysql-persistent-sa-dockercfg-rgp9b   kubernetes.io/dockercfg   1      2m57s

NAME                                 DATA   AGE
configmap/kube-root-ca.crt           1      2m57s
configmap/openshift-service-ca.crt   1      2m57s
kind: ObjectBucketClaim metadata: name: test-obc namespace: openshift-adp spec: storageClassName: openshift-storage.noobaa.io generateBucketName: test-backup-bucket
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
  name: test-obc 
1

  namespace: openshift-adp
spec:
  storageClassName: openshift-storage.noobaa.io
  generateBucketName: test-backup-bucket 
2
$ oc create -f <obc_file_name>
$ oc extract --to=- cm/test-obc 
1
BUCKET_PORT BUCKET_REGION BUCKET_SUBREGION BUCKET_HOST
# BUCKET_NAME
backup-c20...41fd
# BUCKET_PORT
443
# BUCKET_REGION

# BUCKET_SUBREGION

# BUCKET_HOST
s3.openshift-storage.svc
$ oc extract --to=- secret/test-obc
AWS_SECRET_ACCESS_KEY
# AWS_ACCESS_KEY_ID
ebYR....xLNMc
# AWS_SECRET_ACCESS_KEY
YXf...+NaCkdyC3QPym
aws_access_key_id=<AWS_ACCESS_KEY_ID> aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
cloud-credentials \ -n openshift-adp \ --from-file cloud=cloud-credentials
$ oc create secret generic \
  cloud-credentials \
  -n openshift-adp \
  --from-file cloud=cloud-credentials
-o jsonpath='{.data.service-ca\.crt}' | base64 -w0; echo
$ oc get cm/openshift-service-ca.crt \
  -o jsonpath='{.data.service-ca\.crt}' | base64 -w0; echo
....gpwOHMwaG9CRmk5a3....FLS0tLS0K
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0...
....gpwOHMwaG9CRmk5a3....FLS0tLS0K
kind: DataProtectionApplication metadata: name: oadp-backup namespace: openshift-adp spec: configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - aws - openshift - csi defaultSnapshotMoveData: true backupLocations: - velero: config: profile: "default" region: noobaa s3Url: https://s3.openshift-storage.svc s3ForcePathStyle: "true" insecureSkipTLSVerify: "false" provider: aws default: true credential: key: cloud name: cloud-credentials objectStorage: bucket: <bucket_name> prefix: oadp caCert: <ca_cert>
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: oadp-backup
  namespace: openshift-adp
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - aws
        - openshift
        - csi
      defaultSnapshotMoveData: true
  backupLocations:
    - velero:
        config:
          profile: "default"
          region: noobaa
          s3Url: https://s3.openshift-storage.svc
          s3ForcePathStyle: "true"
          insecureSkipTLSVerify: "false" 
1

        provider: aws
        default: true
        credential:
          key: cloud
          name:  cloud-credentials
        objectStorage:
          bucket: <bucket_name> 
2

          prefix: oadp
          caCert: <ca_cert> 
3
$ oc apply -f <dpa_filename>
$ oc get dpa -o yaml
items: - apiVersion: oadp.openshift.io/v1alpha1 kind: DataProtectionApplication metadata: namespace: openshift-adp #...# spec: backupLocations: - velero: config: #...# status: conditions: - lastTransitionTime: "20....9:54:02Z" message: Reconcile complete reason: Complete status: "True" type: Reconciled kind: List metadata: resourceVersion: ""
apiVersion: v1
items:
- apiVersion: oadp.openshift.io/v1alpha1
  kind: DataProtectionApplication
  metadata:
    namespace: openshift-adp
    #...#
  spec:
    backupLocations:
    - velero:
        config:
          #...#
  status:
    conditions:
    - lastTransitionTime: "20....9:54:02Z"
      message: Reconcile complete
      reason: Complete
      status: "True"
      type: Reconciled
kind: List
metadata:
  resourceVersion: ""
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 3s 15s true
NAME           PHASE       LAST VALIDATED   AGE   DEFAULT
dpa-sample-1   Available   3s               15s   true
kind: Backup metadata: name: test-backup namespace: openshift-adp spec: includedNamespaces: - <application_namespace>
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: test-backup
  namespace: openshift-adp
spec:
  includedNamespaces:
  - <application_namespace> 
1
$ oc apply -f <backup_cr_filename>
$ oc describe backup test-backup -n openshift-adp
Namespace: openshift-adp # ....# Status: Backup Item Operations Attempted: 1 Backup Item Operations Completed: 1 Completion Timestamp: 2024-09-25T10:17:01Z Expiration: 2024-10-25T10:16:31Z Format Version: 1.1.0 Hook Status: Phase: Completed Progress: Items Backed Up: 34 Total Items: 34 Start Timestamp: 2024-09-25T10:16:31Z Version: 1 Events: <none>
Name:         test-backup
Namespace:    openshift-adp
# ....#
Status:
  Backup Item Operations Attempted:  1
  Backup Item Operations Completed:  1
  Completion Timestamp:              2024-09-25T10:17:01Z
  Expiration:                        2024-10-25T10:16:31Z
  Format Version:                    1.1.0
  Hook Status:
  Phase:  Completed
  Progress:
    Items Backed Up:  34
    Total Items:      34
  Start Timestamp:    2024-09-25T10:16:31Z
  Version:            1
Events:               <none>
kind: DataProtectionApplication metadata: name: oadp-backup namespace: openshift-adp spec: configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - legacy-aws - openshift - csi defaultSnapshotMoveData: true backupLocations: - velero: config: profile: "default" region: noobaa s3Url: https://s3.openshift-storage.svc s3ForcePathStyle: "true" insecureSkipTLSVerify: "true" provider: aws default: true credential: key: cloud name: cloud-credentials objectStorage: bucket: <bucket_name> prefix: oadp
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: oadp-backup
  namespace: openshift-adp
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - legacy-aws 
1

        - openshift
        - csi
      defaultSnapshotMoveData: true
  backupLocations:
    - velero:
        config:
          profile: "default"
          region: noobaa
          s3Url: https://s3.openshift-storage.svc
          s3ForcePathStyle: "true"
          insecureSkipTLSVerify: "true"
        provider: aws
        default: true
        credential:
          key: cloud
          name:  cloud-credentials
        objectStorage:
          bucket: <bucket_name> 
2

          prefix: oadp
$ oc apply -f <dpa_filename>
$ oc get dpa -o yaml
items: - apiVersion: oadp.openshift.io/v1alpha1 kind: DataProtectionApplication metadata: namespace: openshift-adp #...# spec: backupLocations: - velero: config: #...# status: conditions: - lastTransitionTime: "20....9:54:02Z" message: Reconcile complete reason: Complete status: "True" type: Reconciled kind: List metadata: resourceVersion: ""
apiVersion: v1
items:
- apiVersion: oadp.openshift.io/v1alpha1
  kind: DataProtectionApplication
  metadata:
    namespace: openshift-adp
    #...#
  spec:
    backupLocations:
    - velero:
        config:
          #...#
  status:
    conditions:
    - lastTransitionTime: "20....9:54:02Z"
      message: Reconcile complete
      reason: Complete
      status: "True"
      type: Reconciled
kind: List
metadata:
  resourceVersion: ""
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 3s 15s true
NAME           PHASE       LAST VALIDATED   AGE   DEFAULT
dpa-sample-1   Available   3s               15s   true
kind: Backup metadata: name: test-backup namespace: openshift-adp spec: includedNamespaces: - <application_namespace>
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: test-backup
  namespace: openshift-adp
spec:
  includedNamespaces:
  - <application_namespace> 
1
$ oc apply -f <backup_cr_filename>
$ oc describe backups.velero.io test-backup -n openshift-adp
Namespace: openshift-adp # ....# Status: Backup Item Operations Attempted: 1 Backup Item Operations Completed: 1 Completion Timestamp: 2024-09-25T10:17:01Z Expiration: 2024-10-25T10:16:31Z Format Version: 1.1.0 Hook Status: Phase: Completed Progress: Items Backed Up: 34 Total Items: 34 Start Timestamp: 2024-09-25T10:16:31Z Version: 1 Events: <none>
Name:         test-backup
Namespace:    openshift-adp
# ....#
Status:
  Backup Item Operations Attempted:  1
  Backup Item Operations Completed:  1
  Completion Timestamp:              2024-09-25T10:17:01Z
  Expiration:                        2024-10-25T10:16:31Z
  Format Version:                    1.1.0
  Hook Status:
  Phase:  Completed
  Progress:
    Items Backed Up:  34
    Total Items:      34
  Start Timestamp:    2024-09-25T10:16:31Z
  Version:            1
Events:               <none>
$ oc create namespace hello-world
$ oc new-app -n hello-world --image=docker.io/openshift/hello-openshift
$ oc expose service/hello-openshift -n hello-world
$ curl `oc get route/hello-openshift -n hello-world -o jsonpath='{.spec.host}'`
Hello OpenShift!
apiVersion: velero.io/v1 kind: Backup metadata: name: hello-world namespace: openshift-adp spec: includedNamespaces: - hello-world storageLocation: ${CLUSTER_NAME}-dpa-1 ttl: 720h0m0s EOF
$ cat << EOF | oc create -f -
  apiVersion: velero.io/v1
  kind: Backup
  metadata:
    name: hello-world
    namespace: openshift-adp
  spec:
    includedNamespaces:
    - hello-world
    storageLocation: ${CLUSTER_NAME}-dpa-1
    ttl: 720h0m0s
EOF
$ watch "oc -n openshift-adp get backup hello-world -o json | jq .status"
"completionTimestamp": "2022-09-07T22:20:44Z", "expiration": "2022-10-07T22:20:22Z", "formatVersion": "1.1.0", "phase": "Completed", "progress": { "itemsBackedUp": 58, "totalItems": 58 }, "startTimestamp": "2022-09-07T22:20:22Z", "version": 1 }
{
  "completionTimestamp": "2022-09-07T22:20:44Z",
  "expiration": "2022-10-07T22:20:22Z",
  "formatVersion": "1.1.0",
  "phase": "Completed",
  "progress": {
    "itemsBackedUp": 58,
    "totalItems": 58
  },
  "startTimestamp": "2022-09-07T22:20:22Z",
  "version": 1
}
$ oc delete ns hello-world
apiVersion: velero.io/v1 kind: Restore metadata: name: hello-world namespace: openshift-adp spec: backupName: hello-world EOF
$ cat << EOF | oc create -f -
  apiVersion: velero.io/v1
  kind: Restore
  metadata:
    name: hello-world
    namespace: openshift-adp
  spec:
    backupName: hello-world
EOF
$ watch "oc -n openshift-adp get restore hello-world -o json | jq .status"
"completionTimestamp": "2022-09-07T22:25:47Z", "phase": "Completed", "progress": { "itemsRestored": 38, "totalItems": 38 }, "startTimestamp": "2022-09-07T22:25:28Z", "warnings": 9 }
{
  "completionTimestamp": "2022-09-07T22:25:47Z",
  "phase": "Completed",
  "progress": {
    "itemsRestored": 38,
    "totalItems": 38
  },
  "startTimestamp": "2022-09-07T22:25:28Z",
  "warnings": 9
}
$ oc -n hello-world get pods
hello-openshift-9f885f7c6-kdjpj 1/1 Running 0 90s
NAME                              READY   STATUS    RESTARTS   AGE
hello-openshift-9f885f7c6-kdjpj   1/1     Running   0          90s
$ curl `oc get route/hello-openshift -n hello-world -o jsonpath='{.spec.host}'`
Hello OpenShift!
$ oc delete ns hello-world
$ oc -n openshift-adp delete dpa ${CLUSTER_NAME}-dpa
$ oc -n openshift-adp delete cloudstorage ${CLUSTER_NAME}-oadp
$ oc -n openshift-adp patch cloudstorage ${CLUSTER_NAME}-oadp -p '{"metadata":{"finalizers":null}}' --type=merge
$ oc -n openshift-adp delete subscription oadp-operator
$ oc delete ns openshift-adp
$ oc delete backups.velero.io hello-world
$ velero backup delete hello-world
$ for CRD in `oc get crds | grep velero | awk '{print $1}'`; do oc delete crd $CRD; done
$ aws s3 rm s3://${CLUSTER_NAME}-oadp --recursive
$ aws s3api delete-bucket --bucket ${CLUSTER_NAME}-oadp
$ aws iam detach-role-policy --role-name "${ROLE_NAME}"  --policy-arn "${POLICY_ARN}"
$ aws iam delete-role --role-name "${ROLE_NAME}"
$ BUCKET=<your_bucket>
$ REGION=<your_region>
--bucket $BUCKET \ --region $REGION \ --create-bucket-configuration LocationConstraint=$REGION
$ aws s3api create-bucket \
    --bucket $BUCKET \
    --region $REGION \
    --create-bucket-configuration LocationConstraint=$REGION 
1
$ aws iam create-user --user-name velero 
1
{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "ec2:DescribeVolumes", "ec2:DescribeSnapshots", "ec2:CreateTags", "ec2:CreateVolume", "ec2:CreateSnapshot", "ec2:DeleteSnapshot" ], "Resource": "*" }, { "Effect": "Allow", "Action": [ "s3:GetObject", "s3:DeleteObject", "s3:PutObject", "s3:AbortMultipartUpload", "s3:ListMultipartUploadParts" ], "Resource": [ "arn:aws:s3:::${BUCKET}/*" ] }, { "Effect": "Allow", "Action": [ "s3:ListBucket", "s3:GetBucketLocation", "s3:ListBucketMultipartUploads" ], "Resource": [ "arn:aws:s3:::${BUCKET}" ] } ] } EOF
$ cat > velero-policy.json <<EOF
{
    "Version": "2012-10-17",
    "Statement": [
        {
            "Effect": "Allow",
            "Action": [
                "ec2:DescribeVolumes",
                "ec2:DescribeSnapshots",
                "ec2:CreateTags",
                "ec2:CreateVolume",
                "ec2:CreateSnapshot",
                "ec2:DeleteSnapshot"
            ],
            "Resource": "*"
        },
        {
            "Effect": "Allow",
            "Action": [
                "s3:GetObject",
                "s3:DeleteObject",
                "s3:PutObject",
                "s3:AbortMultipartUpload",
                "s3:ListMultipartUploadParts"
            ],
            "Resource": [
                "arn:aws:s3:::${BUCKET}/*"
            ]
        },
        {
            "Effect": "Allow",
            "Action": [
                "s3:ListBucket",
                "s3:GetBucketLocation",
                "s3:ListBucketMultipartUploads"
            ],
            "Resource": [
                "arn:aws:s3:::${BUCKET}"
            ]
        }
    ]
}
EOF
--user-name velero \ --policy-name velero \ --policy-document file://velero-policy.json
$ aws iam put-user-policy \
  --user-name velero \
  --policy-name velero \
  --policy-document file://velero-policy.json
$ aws iam create-access-key --user-name velero
"AccessKey": { "UserName": "velero", "Status": "Active", "CreateDate": "2017-07-31T22:24:41.576Z", "SecretAccessKey": <AWS_SECRET_ACCESS_KEY>, "AccessKeyId": <AWS_ACCESS_KEY_ID> } }
{
  "AccessKey": {
        "UserName": "velero",
        "Status": "Active",
        "CreateDate": "2017-07-31T22:24:41.576Z",
        "SecretAccessKey": <AWS_SECRET_ACCESS_KEY>,
        "AccessKeyId": <AWS_ACCESS_KEY_ID>
  }
}
[default] aws_access_key_id=<AWS_ACCESS_KEY_ID> aws_secret_access_key=<AWS_SECRET_ACCESS_KEY> EOF
$ cat << EOF > ./credentials-velero
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
EOF
aws_access_key_id=<AWS_ACCESS_KEY_ID> aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
$ oc create secret generic cloud-credentials -n openshift-adp --from-file cloud=credentials-velero
aws_access_key_id=<AWS_ACCESS_KEY_ID> aws_secret_access_key=<AWS_SECRET_ACCESS_KEY> [volumeSnapshot] aws_access_key_id=<AWS_ACCESS_KEY_ID> aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
[backupStorage]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>

[volumeSnapshot]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
$ oc create secret generic cloud-credentials -n openshift-adp --from-file cloud=credentials-velero 
1
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: ... backupLocations: - name: default velero: provider: aws default: true objectStorage: bucket: <bucket_name> prefix: <prefix> config: region: us-east-1 profile: "backupStorage" credential: key: cloud name: cloud-credentials snapshotLocations: - velero: provider: aws config: region: us-west-2 profile: "volumeSnapshot"
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp
spec:
...
  backupLocations:
    - name: default
      velero:
        provider: aws
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: <prefix>
        config:
          region: us-east-1
          profile: "backupStorage"
        credential:
          key: cloud
          name: cloud-credentials
  snapshotLocations:
    - velero:
        provider: aws
        config:
          region: us-west-2
          profile: "volumeSnapshot"
kind: BackupStorageLocation metadata: name: default namespace: openshift-adp spec: provider: aws objectStorage: bucket: <bucket_name> prefix: <bucket_prefix> credential: key: cloud name: cloud-credentials config: region: <bucket_region> s3ForcePathStyle: "true" s3Url: <s3_url> publicUrl: <public_s3_url> serverSideEncryption: AES256 kmsKeyId: "50..c-4da1-419f-a16e-ei...49f" customerKeyEncryptionFile: "/credentials/customer-key" signatureVersion: "1" profile: "default" insecureSkipTLSVerify: "true" enableSharedConfig: "true" tagging: "" checksumAlgorithm: "CRC32"
apiVersion: oadp.openshift.io/v1alpha1
kind: BackupStorageLocation
metadata:
  name: default
  namespace: openshift-adp
spec:
  provider: aws 
1

  objectStorage:
    bucket: <bucket_name> 
2

    prefix: <bucket_prefix> 
3

  credential: 
4

    key: cloud 
5

    name: cloud-credentials 
6

  config:
    region: <bucket_region> 
7

    s3ForcePathStyle: "true" 
8

    s3Url: <s3_url> 
9

    publicUrl: <public_s3_url> 
10

    serverSideEncryption: AES256 
11

    kmsKeyId: "50..c-4da1-419f-a16e-ei...49f" 
12

    customerKeyEncryptionFile: "/credentials/customer-key" 
13

    signatureVersion: "1" 
14

    profile: "default" 
15

    insecureSkipTLSVerify: "true" 
16

    enableSharedConfig: "true" 
17

    tagging: "" 
18

    checksumAlgorithm: "CRC32" 
19
- velero: config: profile: default region: <region> provider: aws # ...
 snapshotLocations:
  - velero:
      config:
        profile: default
        region: <region>
      provider: aws
# ...
$ dd if=/dev/urandom bs=1 count=32 > sse.key
$ oc create secret generic cloud-credentials --namespace openshift-adp --from-file cloud=<path>/openshift_aws_credentials,customer-key=<path>/sse.key
data: cloud: W2Rfa2V5X2lkPSJBS0lBVkJRWUIyRkQ0TlFHRFFPQiIKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5P<snip>rUE1mNWVSbTN5K2FpeWhUTUQyQk1WZHBOIgo= customer-key: v+<snip>TFIiq6aaXPbj8dhos= kind: Secret # ...
apiVersion: v1
data:
  cloud: W2Rfa2V5X2lkPSJBS0lBVkJRWUIyRkQ0TlFHRFFPQiIKYXdzX3NlY3JldF9hY2Nlc3Nfa2V5P<snip>rUE1mNWVSbTN5K2FpeWhUTUQyQk1WZHBOIgo=
  customer-key: v+<snip>TFIiq6aaXPbj8dhos=
kind: Secret
# ...
backupLocations: - velero: config: customerKeyEncryptionFile: /credentials/customer-key profile: default # ...
spec:
  backupLocations:
    - velero:
        config:
          customerKeyEncryptionFile: /credentials/customer-key
          profile: default
# ...
$ echo "encrypt me please" > test.txt
--bucket <bucket> \ --key test.txt \ --body test.txt \ --sse-customer-key fileb://sse.key \ --sse-customer-algorithm AES256
$ aws s3api put-object \
  --bucket <bucket> \
  --key test.txt \
  --body test.txt \
  --sse-customer-key fileb://sse.key \
  --sse-customer-algorithm AES256
$ s3cmd get s3://<bucket>/test.txt test.txt
--bucket <bucket> \ --key test.txt \ --sse-customer-key fileb://sse.key \ --sse-customer-algorithm AES256 \ downloaded.txt
$ aws s3api get-object \
    --bucket <bucket> \
    --key test.txt \
    --sse-customer-key fileb://sse.key \
    --sse-customer-algorithm AES256 \
    downloaded.txt
$ cat downloaded.txt
encrypt me please
--bucket <bucket> \ --key velero/backups/mysql-persistent-customerkeyencryptionfile4/mysql-persistent-customerkeyencryptionfile4.tar.gz \ --sse-customer-key fileb://sse.key \ --sse-customer-algorithm AES256 \ --debug \ velero_download.tar.gz
$ aws s3api get-object \
  --bucket <bucket> \
  --key velero/backups/mysql-persistent-customerkeyencryptionfile4/mysql-persistent-customerkeyencryptionfile4.tar.gz \
  --sse-customer-key fileb://sse.key \
  --sse-customer-algorithm AES256 \
  --debug \
  velero_download.tar.gz
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... configuration: velero: podConfig: nodeSelector: <node_selector> resourceAllocations: limits: cpu: "1" memory: 1024Mi requests: cpu: 200m memory: 256Mi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  configuration:
    velero:
      podConfig:
        nodeSelector: <node_selector> 
1

        resourceAllocations: 
2

          limits:
            cpu: "1"
            memory: 1024Mi
          requests:
            cpu: 200m
            memory: 256Mi
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... backupLocations: - name: default velero: provider: aws default: true objectStorage: bucket: <bucket> prefix: <prefix> caCert: <base64_encoded_cert_string> config: insecureSkipTLSVerify: "false" # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  backupLocations:
    - name: default
      velero:
        provider: aws
        default: true
        objectStorage:
          bucket: <bucket>
          prefix: <prefix>
          caCert: <base64_encoded_cert_string> 
1

        config:
          insecureSkipTLSVerify: "false" 
2

# ...
$ alias velero='oc -n openshift-adp exec deployment/velero -c velero -it -- ./velero'
$ velero version
Client:
	Version: v1.12.1-OADP
	Git commit: -
Server:
	Version: v1.12.1-OADP
[[ -n $CA_CERT ]] && echo "$CA_CERT" | base64 -d | oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "cat > /tmp/your-cacert.txt" || echo "DPA BSL has no caCert"
$ CA_CERT=$(oc -n openshift-adp get dataprotectionapplications.oadp.openshift.io <dpa-name> -o jsonpath='{.spec.backupLocations[0].velero.objectStorage.caCert}')

$ [[ -n $CA_CERT ]] && echo "$CA_CERT" | base64 -d | oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "cat > /tmp/your-cacert.txt" || echo "DPA BSL has no caCert"
$ velero describe backup <backup_name> --details --cacert /tmp/<your_cacert>.txt
$ velero backup logs  <backup_name>  --cacert /tmp/<your_cacert.txt>
$ oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "ls /tmp/your-cacert.txt"
/tmp/your-cacert.txt
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: configuration: velero: defaultPlugins: - openshift - aws resourceTimeout: 10m nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: <node_selector> backupLocations: - name: default velero: provider: aws default: true objectStorage: bucket: <bucket_name> prefix: <prefix> config: region: <region> profile: "default" s3ForcePathStyle: "true" s3Url: <s3_url> credential: key: cloud name: cloud-credentials snapshotLocations: - name: default velero: provider: aws config: region: <region> profile: "default" credential: key: cloud name: cloud-credentials
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp 
1

spec:
  configuration:
    velero:
      defaultPlugins:
        - openshift 
2

        - aws
      resourceTimeout: 10m 
3

    nodeAgent: 
4

      enable: true 
5

      uploaderType: kopia 
6

      podConfig:
        nodeSelector: <node_selector> 
7

  backupLocations:
    - name: default
      velero:
        provider: aws
        default: true
        objectStorage:
          bucket: <bucket_name> 
8

          prefix: <prefix> 
9

        config:
          region: <region>
          profile: "default"
          s3ForcePathStyle: "true" 
10

          s3Url: <s3_url> 
11

        credential:
          key: cloud
          name: cloud-credentials 
12

  snapshotLocations: 
13

    - name: default
      velero:
        provider: aws
        config:
          region: <region> 
14

          profile: "default"
        credential:
          key: cloud
          name: cloud-credentials 
15
$ oc get all -n openshift-adp
pod/oadp-operator-controller-manager-67d9494d47-6l8z8 2/2 Running 0 2m8s pod/node-agent-9cq4q 1/1 Running 0 94s pod/node-agent-m4lts 1/1 Running 0 94s pod/node-agent-pv4kr 1/1 Running 0 95s pod/velero-588db7f655-n842v 1/1 Running 0 95s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oadp-operator-controller-manager-metrics-service ClusterIP 172.30.70.140 <none> 8443/TCP 2m8s service/openshift-adp-velero-metrics-svc ClusterIP 172.30.10.0 <none> 8085/TCP 8h NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/node-agent 3 3 3 3 3 <none> 96s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/oadp-operator-controller-manager 1/1 1 1 2m9s deployment.apps/velero 1/1 1 1 96s NAME DESIRED CURRENT READY AGE replicaset.apps/oadp-operator-controller-manager-67d9494d47 1 1 1 2m9s replicaset.apps/velero-588db7f655 1 1 1 96s
NAME                                                     READY   STATUS    RESTARTS   AGE
pod/oadp-operator-controller-manager-67d9494d47-6l8z8    2/2     Running   0          2m8s
pod/node-agent-9cq4q                                     1/1     Running   0          94s
pod/node-agent-m4lts                                     1/1     Running   0          94s
pod/node-agent-pv4kr                                     1/1     Running   0          95s
pod/velero-588db7f655-n842v                              1/1     Running   0          95s

NAME                                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
service/oadp-operator-controller-manager-metrics-service   ClusterIP   172.30.70.140    <none>        8443/TCP   2m8s
service/openshift-adp-velero-metrics-svc                   ClusterIP   172.30.10.0      <none>        8085/TCP   8h

NAME                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/node-agent    3         3         3       3            3           <none>          96s

NAME                                                READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/oadp-operator-controller-manager    1/1     1            1           2m9s
deployment.apps/velero                              1/1     1            1           96s

NAME                                                           DESIRED   CURRENT   READY   AGE
replicaset.apps/oadp-operator-controller-manager-67d9494d47    1         1         1       2m9s
replicaset.apps/velero-588db7f655                              1         1         1       96s
$ oc get dpa dpa-sample -n openshift-adp -o jsonpath='{.status}'
{"conditions":[{"lastTransitionTime":"2023-10-27T01:23:57Z","message":"Reconcile complete","reason":"Complete","status":"True","type":"Reconciled"}]}
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 1s 3d16h true
NAME           PHASE       LAST VALIDATED   AGE     DEFAULT
dpa-sample-1   Available   1s               3d16h   true
$ oc label node/<node_name> node-role.kubernetes.io/nodeAgent=""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/nodeAgent: ""
configuration:
  nodeAgent:
    enable: true
    podConfig:
      nodeSelector:
        node-role.kubernetes.io/nodeAgent: ""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/infra: "" node-role.kubernetes.io/worker: ""
    configuration:
      nodeAgent:
        enable: true
        podConfig:
          nodeSelector:
            node-role.kubernetes.io/infra: ""
            node-role.kubernetes.io/worker: ""
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: checksumAlgorithm: "" insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: velero: defaultPlugins: - openshift - aws - csi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
  - name: default
    velero:
      config:
        checksumAlgorithm: "" 
1

        insecureSkipTLSVerify: "true"
        profile: "default"
        region: <bucket_region>
        s3ForcePathStyle: "true"
        s3Url: <bucket_url>
      credential:
        key: cloud
        name: cloud-credentials
      default: true
      objectStorage:
        bucket: <bucket_name>
        prefix: velero
      provider: aws
  configuration:
    velero:
      defaultPlugins:
      - openshift
      - aws
      - csi
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: restic velero: client-burst: 500 client-qps: 300 defaultPlugins: - openshift - aws - kubevirt
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: restic
    velero:
      client-burst: 500 
1

      client-qps: 300 
2

      defaultPlugins:
        - openshift
        - aws
        - kubevirt
spec: configuration: nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: label.io/role: cpu-1 other-label.io/other-role: cpu-2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      podConfig:
        nodeSelector:
          label.io/role: cpu-1
          other-label.io/other-role: cpu-2
        ...
spec: configuration: nodeAgent: enable: true loadAffinity: - nodeSelector: matchLabels: label.io/role: cpu-1 matchExpressions: - key: label.io/hostname operator: In values: - node1 - node2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      loadAffinity: 
1

        - nodeSelector:
            matchLabels:
              label.io/role: cpu-1
            matchExpressions: 
2

              - key: label.io/hostname
                operator: In
                values:
                  - node1
                  - node2
                  ...
spec: configuration: nodeAgent: enable: true uploaderType: kopia loadAffinity: - nodeSelector: matchLabels: label.io/location: 'US' label.io/gpu: 'no' podConfig: nodeSelector: label.io/gpu: 'no'
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/location: 'US'
              label.io/gpu: 'no'
      podConfig:
        nodeSelector:
          label.io/gpu: 'no'
$ oc label node/<node_name> label.io/instance-type='large'
nodeAgent: enable: true uploaderType: kopia loadConcurrency: globalConfig: 1 perNodeConfig: - nodeSelector: matchLabels: label.io/instance-type: large number: 3
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadConcurrency:
        globalConfig: 1 
1

        perNodeConfig:
        - nodeSelector:
              matchLabels:
                 label.io/instance-type: large 
2

          number: 3 
3
kind: DataProtectionApplication metadata: name: ts-dpa namespace: openshift-adp spec: backupLocations: - velero: credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: gcp configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - csi - gcp - openshift disableFsBackup: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: ts-dpa
  namespace: openshift-adp
spec:
  backupLocations:
  - velero:
      credential:
        key: cloud
        name: cloud-credentials
      default: true
      objectStorage:
        bucket: <bucket_name>
        prefix: velero
      provider: gcp
  configuration:
    nodeAgent: 
1

      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
      - csi
      - gcp
      - openshift
      disableFsBackup: true 
2
$ oc get daemonset node-agent -o yaml
kind: DaemonSet metadata: ... name: node-agent namespace: openshift-adp ... spec: ... template: metadata: ... spec: containers: ... securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: true ... nodeSelector: kubernetes.io/os: linux os: name: linux restartPolicy: Always schedulerName: default-scheduler securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault serviceAccount: velero serviceAccountName: velero ....
apiVersion: apps/v1
kind: DaemonSet
metadata:
  ...
  name: node-agent
  namespace: openshift-adp
  ...
spec:
  ...
  template:
    metadata:
      ...
    spec:
      containers:
      ...
        securityContext:
          allowPrivilegeEscalation: false 
1

          capabilities:
            drop:
            - ALL
          privileged: false 
2

          readOnlyRootFilesystem: true 
3

        ...
      nodeSelector:
        kubernetes.io/os: linux
      os:
        name: linux
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext:
        runAsNonRoot: true 
4

        seccompProfile:
          type: RuntimeDefault
      serviceAccount: velero
      serviceAccountName: velero
      ....
spec: configuration: repositoryMaintenance: global: podResources: cpuRequest: "100m" cpuLimit: "200m" memoryRequest: "100Mi" memoryLimit: "200Mi" loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    repositoryMaintenance: 
1

      global: 
2

        podResources:
          cpuRequest: "100m"
          cpuLimit: "200m"
          memoryRequest: "100Mi"
          memoryLimit: "200Mi"
        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/gpu: 'no'
              matchExpressions:
                - key: label.io/location
                  operator: In
                  values:
                    - US
                    - EU
spec: configuration: repositoryMaintenance: myrepositoryname: loadAffinity: - nodeSelector: matchLabels: label.io/cpu: 'yes'
...
spec:
  configuration:
    repositoryMaintenance:
      myrepositoryname: 
1

        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/cpu: 'yes'
spec: configuration: velero: podConfig: nodeSelector: some-label.io/custom-node-role: backup-core
...
spec:
  configuration:
    velero:
      podConfig:
        nodeSelector:
          some-label.io/custom-node-role: backup-core
spec: configuration: velero: loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    velero:
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/gpu: 'no'
            matchExpressions:
              - key: label.io/location
                operator: In
                values:
                  - US
                  - EU
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws - kubevirt - csi imagePullPolicy: Never
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - openshift
        - aws
        - kubevirt
        - csi
  imagePullPolicy: Never 
1
kind: DataProtectionApplication #... backupLocations: - name: aws velero: provider: aws default: true objectStorage: bucket: <bucket_name> prefix: <prefix> config: region: <region_name> profile: "default" credential: key: cloud name: cloud-credentials - name: odf velero: provider: aws default: false objectStorage: bucket: <bucket_name> prefix: <prefix> config: profile: "default" region: <region_name> s3Url: <url> insecureSkipTLSVerify: "true" s3ForcePathStyle: "true" credential: key: cloud name: <custom_secret_name_odf> #...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
#...
backupLocations:
  - name: aws 
1

    velero:
      provider: aws
      default: true 
2

      objectStorage:
        bucket: <bucket_name> 
3

        prefix: <prefix> 
4

      config:
        region: <region_name> 
5

        profile: "default"
      credential:
        key: cloud
        name: cloud-credentials 
6

  - name: odf 
7

    velero:
      provider: aws
      default: false
      objectStorage:
        bucket: <bucket_name>
        prefix: <prefix>
      config:
        profile: "default"
        region: <region_name>
        s3Url: <url> 
8

        insecureSkipTLSVerify: "true"
        s3ForcePathStyle: "true"
      credential:
        key: cloud
        name: <custom_secret_name_odf> 
9

#...
kind: Backup # ... spec: includedNamespaces: - <namespace> storageLocation: <backup_storage_location> defaultVolumesToFsBackup: true
apiVersion: velero.io/v1
kind: Backup
# ...
spec:
  includedNamespaces:
  - <namespace> 
1

  storageLocation: <backup_storage_location> 
2

  defaultVolumesToFsBackup: true
kind: DataProtectionApplication ... spec: configuration: velero: defaultPlugins: - openshift - csi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
...
spec:
  configuration:
    velero:
      defaultPlugins:
      - openshift
      - csi 
1
configuration: nodeAgent: enable: false uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: false  
1

    uploaderType: kopia
# ...
configuration: nodeAgent: enable: true uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: true  
1

    uploaderType: kopia
# ...
$ ibmcloud plugin install cos -f
$ BUCKET=<bucket_name>
$ REGION=<bucket_region> 
1
$ ibmcloud resource group-create <resource_group_name>
$ ibmcloud target -g <resource_group_name>
$ ibmcloud target
Region: User: test-user Account: Test Account (fb6......e95) <-> 2...122 Resource group: Default
API endpoint:     https://cloud.ibm.com
Region:
User:             test-user
Account:          Test Account (fb6......e95) <-> 2...122
Resource group:   Default
$ RESOURCE_GROUP=<resource_group> 
1
<service_instance_name> \ <service_name> \ <service_plan> \ <region_name>
$ ibmcloud resource service-instance-create \
<service_instance_name> \
1

<service_name> \
2

<service_plan> \
3

<region_name> 
4
standard \ global \ -d premium-global-deployment
$ ibmcloud resource service-instance-create test-service-instance cloud-object-storage \ 
1

standard \
global \
-d premium-global-deployment 
2
$ SERVICE_INSTANCE_ID=$(ibmcloud resource service-instance test-service-instance --output json | jq -r '.[0].id')
$ ibmcloud cos bucket-create \//
--bucket $BUCKET \//
--ibm-service-instance-id $SERVICE_INSTANCE_ID \//
--region $REGION
$ ibmcloud resource service-key-create test-key Writer --instance-name test-service-instance --parameters {\"HMAC\":true}
[default] aws_access_key_id=$(ibmcloud resource service-key test-key -o json | jq -r '.[0].credentials.cos_hmac_keys.access_key_id') aws_secret_access_key=$(ibmcloud resource service-key test-key -o json | jq -r '.[0].credentials.cos_hmac_keys.secret_access_key') __EOF__
$ cat > credentials-velero << __EOF__
[default]
aws_access_key_id=$(ibmcloud resource service-key test-key -o json  | jq -r '.[0].credentials.cos_hmac_keys.access_key_id')
aws_secret_access_key=$(ibmcloud resource service-key test-key -o json  | jq -r '.[0].credentials.cos_hmac_keys.secret_access_key')
__EOF__
$ oc create secret generic cloud-credentials -n openshift-adp --from-file cloud=credentials-velero
$ oc create secret generic cloud-credentials -n openshift-adp --from-file cloud=credentials-velero
$ oc create secret generic <custom_secret> -n openshift-adp --from-file cloud=credentials-velero
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: ... backupLocations: - velero: provider: <provider> default: true credential: key: cloud name: <custom_secret> objectStorage: bucket: <bucket_name> prefix: <prefix>
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp
spec:
...
  backupLocations:
    - velero:
        provider: <provider>
        default: true
        credential:
          key: cloud
          name: <custom_secret> 
1

        objectStorage:
          bucket: <bucket_name>
          prefix: <prefix>
kind: DataProtectionApplication metadata: namespace: openshift-adp name: <dpa_name> spec: configuration: velero: defaultPlugins: - openshift - aws - csi backupLocations: - velero: provider: aws default: true objectStorage: bucket: <bucket_name> prefix: velero config: insecureSkipTLSVerify: 'true' profile: default region: <region_name> s3ForcePathStyle: 'true' s3Url: <s3_url> credential: key: cloud name: cloud-credentials
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  namespace: openshift-adp
  name: <dpa_name>
spec:
  configuration:
    velero:
      defaultPlugins:
      - openshift
      - aws
      - csi
  backupLocations:
    - velero:
        provider: aws 
1

        default: true
        objectStorage:
          bucket: <bucket_name> 
2

          prefix: velero
        config:
          insecureSkipTLSVerify: 'true'
          profile: default
          region: <region_name> 
3

          s3ForcePathStyle: 'true'
          s3Url: <s3_url> 
4

        credential:
          key: cloud
          name: cloud-credentials 
5
$ oc get all -n openshift-adp
pod/oadp-operator-controller-manager-67d9494d47-6l8z8 2/2 Running 0 2m8s pod/node-agent-9cq4q 1/1 Running 0 94s pod/node-agent-m4lts 1/1 Running 0 94s pod/node-agent-pv4kr 1/1 Running 0 95s pod/velero-588db7f655-n842v 1/1 Running 0 95s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oadp-operator-controller-manager-metrics-service ClusterIP 172.30.70.140 <none> 8443/TCP 2m8s service/openshift-adp-velero-metrics-svc ClusterIP 172.30.10.0 <none> 8085/TCP 8h NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/node-agent 3 3 3 3 3 <none> 96s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/oadp-operator-controller-manager 1/1 1 1 2m9s deployment.apps/velero 1/1 1 1 96s NAME DESIRED CURRENT READY AGE replicaset.apps/oadp-operator-controller-manager-67d9494d47 1 1 1 2m9s replicaset.apps/velero-588db7f655 1 1 1 96s
NAME                                                     READY   STATUS    RESTARTS   AGE
pod/oadp-operator-controller-manager-67d9494d47-6l8z8    2/2     Running   0          2m8s
pod/node-agent-9cq4q                                     1/1     Running   0          94s
pod/node-agent-m4lts                                     1/1     Running   0          94s
pod/node-agent-pv4kr                                     1/1     Running   0          95s
pod/velero-588db7f655-n842v                              1/1     Running   0          95s

NAME                                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
service/oadp-operator-controller-manager-metrics-service   ClusterIP   172.30.70.140    <none>        8443/TCP   2m8s
service/openshift-adp-velero-metrics-svc                   ClusterIP   172.30.10.0      <none>        8085/TCP   8h

NAME                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/node-agent    3         3         3       3            3           <none>          96s

NAME                                                READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/oadp-operator-controller-manager    1/1     1            1           2m9s
deployment.apps/velero                              1/1     1            1           96s

NAME                                                           DESIRED   CURRENT   READY   AGE
replicaset.apps/oadp-operator-controller-manager-67d9494d47    1         1         1       2m9s
replicaset.apps/velero-588db7f655                              1         1         1       96s
$ oc get dpa dpa-sample -n openshift-adp -o jsonpath='{.status}'
{"conditions":[{"lastTransitionTime":"2023-10-27T01:23:57Z","message":"Reconcile complete","reason":"Complete","status":"True","type":"Reconciled"}]}
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 1s 3d16h true
NAME           PHASE       LAST VALIDATED   AGE     DEFAULT
dpa-sample-1   Available   1s               3d16h   true
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... configuration: velero: podConfig: nodeSelector: <node_selector> resourceAllocations: limits: cpu: "1" memory: 1024Mi requests: cpu: 200m memory: 256Mi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  configuration:
    velero:
      podConfig:
        nodeSelector: <node_selector> 
1

        resourceAllocations: 
2

          limits:
            cpu: "1"
            memory: 1024Mi
          requests:
            cpu: 200m
            memory: 256Mi
$ oc label node/<node_name> node-role.kubernetes.io/nodeAgent=""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/nodeAgent: ""
configuration:
  nodeAgent:
    enable: true
    podConfig:
      nodeSelector:
        node-role.kubernetes.io/nodeAgent: ""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/infra: "" node-role.kubernetes.io/worker: ""
    configuration:
      nodeAgent:
        enable: true
        podConfig:
          nodeSelector:
            node-role.kubernetes.io/infra: ""
            node-role.kubernetes.io/worker: ""
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: restic velero: client-burst: 500 client-qps: 300 defaultPlugins: - openshift - aws - kubevirt
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: restic
    velero:
      client-burst: 500 
1

      client-qps: 300 
2

      defaultPlugins:
        - openshift
        - aws
        - kubevirt
spec: configuration: nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: label.io/role: cpu-1 other-label.io/other-role: cpu-2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      podConfig:
        nodeSelector:
          label.io/role: cpu-1
          other-label.io/other-role: cpu-2
        ...
spec: configuration: nodeAgent: enable: true loadAffinity: - nodeSelector: matchLabels: label.io/role: cpu-1 matchExpressions: - key: label.io/hostname operator: In values: - node1 - node2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      loadAffinity: 
1

        - nodeSelector:
            matchLabels:
              label.io/role: cpu-1
            matchExpressions: 
2

              - key: label.io/hostname
                operator: In
                values:
                  - node1
                  - node2
                  ...
spec: configuration: nodeAgent: enable: true uploaderType: kopia loadAffinity: - nodeSelector: matchLabels: label.io/location: 'US' label.io/gpu: 'no' podConfig: nodeSelector: label.io/gpu: 'no'
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/location: 'US'
              label.io/gpu: 'no'
      podConfig:
        nodeSelector:
          label.io/gpu: 'no'
$ oc label node/<node_name> label.io/instance-type='large'
nodeAgent: enable: true uploaderType: kopia loadConcurrency: globalConfig: 1 perNodeConfig: - nodeSelector: matchLabels: label.io/instance-type: large number: 3
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadConcurrency:
        globalConfig: 1 
1

        perNodeConfig:
        - nodeSelector:
              matchLabels:
                 label.io/instance-type: large 
2

          number: 3 
3
spec: configuration: repositoryMaintenance: global: podResources: cpuRequest: "100m" cpuLimit: "200m" memoryRequest: "100Mi" memoryLimit: "200Mi" loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    repositoryMaintenance: 
1

      global: 
2

        podResources:
          cpuRequest: "100m"
          cpuLimit: "200m"
          memoryRequest: "100Mi"
          memoryLimit: "200Mi"
        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/gpu: 'no'
              matchExpressions:
                - key: label.io/location
                  operator: In
                  values:
                    - US
                    - EU
spec: configuration: repositoryMaintenance: myrepositoryname: loadAffinity: - nodeSelector: matchLabels: label.io/cpu: 'yes'
...
spec:
  configuration:
    repositoryMaintenance:
      myrepositoryname: 
1

        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/cpu: 'yes'
spec: configuration: velero: podConfig: nodeSelector: some-label.io/custom-node-role: backup-core
...
spec:
  configuration:
    velero:
      podConfig:
        nodeSelector:
          some-label.io/custom-node-role: backup-core
spec: configuration: velero: loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    velero:
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/gpu: 'no'
            matchExpressions:
              - key: label.io/location
                operator: In
                values:
                  - US
                  - EU
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws - kubevirt - csi imagePullPolicy: Never
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - openshift
        - aws
        - kubevirt
        - csi
  imagePullPolicy: Never 
1
kind: DataProtectionApplication #... backupLocations: - name: aws velero: provider: aws default: true objectStorage: bucket: <bucket_name> prefix: <prefix> config: region: <region_name> profile: "default" credential: key: cloud name: cloud-credentials - name: odf velero: provider: aws default: false objectStorage: bucket: <bucket_name> prefix: <prefix> config: profile: "default" region: <region_name> s3Url: <url> insecureSkipTLSVerify: "true" s3ForcePathStyle: "true" credential: key: cloud name: <custom_secret_name_odf> #...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
#...
backupLocations:
  - name: aws 
1

    velero:
      provider: aws
      default: true 
2

      objectStorage:
        bucket: <bucket_name> 
3

        prefix: <prefix> 
4

      config:
        region: <region_name> 
5

        profile: "default"
      credential:
        key: cloud
        name: cloud-credentials 
6

  - name: odf 
7

    velero:
      provider: aws
      default: false
      objectStorage:
        bucket: <bucket_name>
        prefix: <prefix>
      config:
        profile: "default"
        region: <region_name>
        s3Url: <url> 
8

        insecureSkipTLSVerify: "true"
        s3ForcePathStyle: "true"
      credential:
        key: cloud
        name: <custom_secret_name_odf> 
9

#...
kind: Backup # ... spec: includedNamespaces: - <namespace> storageLocation: <backup_storage_location> defaultVolumesToFsBackup: true
apiVersion: velero.io/v1
kind: Backup
# ...
spec:
  includedNamespaces:
  - <namespace> 
1

  storageLocation: <backup_storage_location> 
2

  defaultVolumesToFsBackup: true
configuration: nodeAgent: enable: false uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: false  
1

    uploaderType: kopia
# ...
configuration: nodeAgent: enable: true uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: true  
1

    uploaderType: kopia
# ...
AZURE_TENANT_ID=<azure_tenant_id> AZURE_CLIENT_ID=<azure_client_id> AZURE_CLIENT_SECRET=<azure_client_secret> AZURE_RESOURCE_GROUP=<azure_resource_group> AZURE_CLOUD_NAME=<azure_cloud_name>
AZURE_SUBSCRIPTION_ID=<azure_subscription_id>
AZURE_TENANT_ID=<azure_tenant_id>
AZURE_CLIENT_ID=<azure_client_id>
AZURE_CLIENT_SECRET=<azure_client_secret>
AZURE_RESOURCE_GROUP=<azure_resource_group>
AZURE_CLOUD_NAME=<azure_cloud_name>
AZURE_SUBSCRIPTION_ID=<azure_subscription_id> AZURE_RESOURCE_GROUP=<azure_resource_group> AZURE_CLOUD_NAME=<azure_cloud_name>
AZURE_STORAGE_ACCOUNT_ACCESS_KEY=<azure_storage_account_access_key>
AZURE_SUBSCRIPTION_ID=<azure_subscription_id>
AZURE_RESOURCE_GROUP=<azure_resource_group>
AZURE_CLOUD_NAME=<azure_cloud_name>
$ oc create secret generic cloud-credentials-azure -n openshift-adp --from-file cloud=credentials-velero
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: ... backupLocations: - velero: config: resourceGroup: <azure_resource_group> storageAccount: <azure_storage_account_id> subscriptionId: <azure_subscription_id> credential: key: cloud name: <custom_secret> provider: azure default: true objectStorage: bucket: <bucket_name> prefix: <prefix> snapshotLocations: - velero: config: resourceGroup: <azure_resource_group> subscriptionId: <azure_subscription_id> incremental: "true" provider: azure
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp
spec:
...
  backupLocations:
    - velero:
        config:
          resourceGroup: <azure_resource_group>
          storageAccount: <azure_storage_account_id>
          subscriptionId: <azure_subscription_id>
        credential:
          key: cloud
          name: <custom_secret> 
1

        provider: azure
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: <prefix>
  snapshotLocations:
    - velero:
        config:
          resourceGroup: <azure_resource_group>
          subscriptionId: <azure_subscription_id>
          incremental: "true"
        provider: azure
export CLUSTER_NAME=$(echo "$API_URL" | sed 's|https://api\.||' | sed 's|\..*||') export CLUSTER_RESOURCE_GROUP="${CLUSTER_NAME}-rg" # Get Azure information export AZURE_SUBSCRIPTION_ID=$(az account show --query id -o tsv) export AZURE_TENANT_ID=$(az account show --query tenantId -o tsv) # Set names for resources export IDENTITY_NAME="velero" export APP_NAME="velero-${CLUSTER_NAME}" export STORAGE_ACCOUNT_NAME=$(echo "velero${CLUSTER_NAME}" | tr -d '-' | tr '[:upper:]' '[:lower:]' | cut -c1-24) export CONTAINER_NAME="velero"
export API_URL=$(oc whoami --show-server) # Get cluster information
export CLUSTER_NAME=$(echo "$API_URL" | sed 's|https://api\.||' | sed 's|\..*||')
export CLUSTER_RESOURCE_GROUP="${CLUSTER_NAME}-rg"

# Get Azure information
export AZURE_SUBSCRIPTION_ID=$(az account show --query id -o tsv)
export AZURE_TENANT_ID=$(az account show --query tenantId -o tsv)

# Set names for resources
export IDENTITY_NAME="velero"
export APP_NAME="velero-${CLUSTER_NAME}"
export STORAGE_ACCOUNT_NAME=$(echo "velero${CLUSTER_NAME}" | tr -d '-' | tr '[:upper:]' '[:lower:]' | cut -c1-24)
export CONTAINER_NAME="velero"
--subscription "$AZURE_SUBSCRIPTION_ID" \ --resource-group "$CLUSTER_RESOURCE_GROUP" \ --name "$IDENTITY_NAME" # Get identity details export IDENTITY_CLIENT_ID=$(az identity show -g "$CLUSTER_RESOURCE_GROUP" -n "$IDENTITY_NAME" --query clientId -o tsv) export IDENTITY_PRINCIPAL_ID=$(az identity show -g "$CLUSTER_RESOURCE_GROUP" -n "$IDENTITY_NAME" --query principalId -o tsv)
az identity create \ # Create managed identity
    --subscription "$AZURE_SUBSCRIPTION_ID" \
    --resource-group "$CLUSTER_RESOURCE_GROUP" \
    --name "$IDENTITY_NAME"

# Get identity details
export IDENTITY_CLIENT_ID=$(az identity show -g "$CLUSTER_RESOURCE_GROUP" -n "$IDENTITY_NAME" --query clientId -o tsv)
export IDENTITY_PRINCIPAL_ID=$(az identity show -g "$CLUSTER_RESOURCE_GROUP" -n "$IDENTITY_NAME" --query principalId -o tsv)
# Required roles for OADP operations REQUIRED_ROLES=( "Contributor" "Storage Blob Data Contributor" "Disk Snapshot Contributor" ) for role in "${REQUIRED_ROLES[@]}"; do echo "Assigning role: $role" az role assignment create \ --assignee "$IDENTITY_PRINCIPAL_ID" \ --role "$role" \ --scope "/subscriptions/$SUBSCRIPTION_ID" done
export SUBSCRIPTION_ID=$(az account show --query id -o tsv) # Get subscription ID for role assignments

# Required roles for OADP operations
REQUIRED_ROLES=(
    "Contributor"
    "Storage Blob Data Contributor"
    "Disk Snapshot Contributor"
)

for role in "${REQUIRED_ROLES[@]}"; do
    echo "Assigning role: $role"
    az role assignment create \
        --assignee "$IDENTITY_PRINCIPAL_ID" \
        --role "$role" \
        --scope "/subscriptions/$SUBSCRIPTION_ID"
done
--name "$STORAGE_ACCOUNT_NAME" \ --resource-group "$CLUSTER_RESOURCE_GROUP" \ --location "$(az group show -n $CLUSTER_RESOURCE_GROUP --query location -o tsv)" \ --sku Standard_LRS \ --kind StorageV2
az storage account create \ # Create storage account
    --name "$STORAGE_ACCOUNT_NAME" \
    --resource-group "$CLUSTER_RESOURCE_GROUP" \
    --location "$(az group show -n $CLUSTER_RESOURCE_GROUP --query location -o tsv)" \
    --sku Standard_LRS \
    --kind StorageV2
echo "OIDC Issuer: $SERVICE_ACCOUNT_ISSUER"
export SERVICE_ACCOUNT_ISSUER=$(oc get authentication.config.openshift.io cluster -o json | jq -r .spec.serviceAccountIssuer)
echo "OIDC Issuer: $SERVICE_ACCOUNT_ISSUER"
--name "velero-federated-credential" \ --identity-name "$IDENTITY_NAME" \ --resource-group "$CLUSTER_RESOURCE_GROUP" \ --issuer "$SERVICE_ACCOUNT_ISSUER" \ --subject "system:serviceaccount:openshift-adp:velero" \ --audiences "openshift" # Create federated identity credential for OADP controller manager az identity federated-credential create \ --name "oadp-controller-federated-credential" \ --identity-name "$IDENTITY_NAME" \ --resource-group "$CLUSTER_RESOURCE_GROUP" \ --issuer "$SERVICE_ACCOUNT_ISSUER" \ --subject "system:serviceaccount:openshift-adp:openshift-adp-controller-manager" \ --audiences "openshift"
az identity federated-credential create \ # Create federated identity credential for Velero service account
    --name "velero-federated-credential" \
    --identity-name "$IDENTITY_NAME" \
    --resource-group "$CLUSTER_RESOURCE_GROUP" \
    --issuer "$SERVICE_ACCOUNT_ISSUER" \
    --subject "system:serviceaccount:openshift-adp:velero" \
    --audiences "openshift"

# Create federated identity credential for OADP controller manager
az identity federated-credential create \
    --name "oadp-controller-federated-credential" \
    --identity-name "$IDENTITY_NAME" \
    --resource-group "$CLUSTER_RESOURCE_GROUP" \
    --issuer "$SERVICE_ACCOUNT_ISSUER" \
    --subject "system:serviceaccount:openshift-adp:openshift-adp-controller-manager" \
    --audiences "openshift"
oc create namespace openshift-adp
apiVersion: oadp.openshift.io/v1alpha1 kind: CloudStorage metadata: name: azure-backup-storage namespace: openshift-adp spec: name: ${CONTAINER_NAME} provider: azure creationSecret: name: cloud-credentials-azure key: azurekey config: storageAccount: ${STORAGE_ACCOUNT_NAME} EOF
cat <<EOF | oc apply -f -
apiVersion: oadp.openshift.io/v1alpha1
kind: CloudStorage
metadata:
  name: azure-backup-storage
  namespace: openshift-adp
spec:
  name: ${CONTAINER_NAME}
  provider: azure
  creationSecret:
    name: cloud-credentials-azure
    key: azurekey
  config:
    storageAccount: ${STORAGE_ACCOUNT_NAME}
EOF
apiVersion: oadp.openshift.io/v1alpha1 kind: DataProtectionApplication metadata: name: dpa-azure-workload-id-cloudstorage namespace: openshift-adp spec: backupLocations: - bucket: cloudStorageRef: name: <cloud_storage_cr> config: storageAccount: <storage_account_name> useAAD: "true" credential: key: azurekey name: cloud-credentials-azure default: true prefix: velero name: default configuration: velero: defaultPlugins: - azure - openshift - csi disableFsBackup: false logFormat: text snapshotLocations: - name: default velero: config: resourceGroup: <resource_group> subscriptionId: <subscription_ID> credential: key: azurekey name: cloud-credentials-azure provider: azure EOF
cat <<EOF | oc apply -f -
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: dpa-azure-workload-id-cloudstorage
  namespace: openshift-adp
spec:
  backupLocations:
  - bucket:
      cloudStorageRef:
        name: <cloud_storage_cr> 
1

      config:
        storageAccount: <storage_account_name> 
2

        useAAD: "true"
      credential:
        key: azurekey
        name: cloud-credentials-azure
      default: true
      prefix: velero
    name: default
  configuration:
    velero:
      defaultPlugins:
      - azure
      - openshift
      - csi
      disableFsBackup: false
  logFormat: text
  snapshotLocations:
  - name: default
    velero:
      config:
        resourceGroup: <resource_group> 
3

        subscriptionId: <subscription_ID> 
4

      credential:
        key: azurekey
        name: cloud-credentials-azure
      provider: azure
EOF
$ oc get pods -n openshift-adp
az role assignment list --assignee ${IDENTITY_PRINCIPAL_ID} --all --query "[].roleDefinitionName" -o tsv
Check AZURE_CLIENT_ID environment variable oc get pod ${VELERO_POD} -n openshift-adp -o jsonpath='{.spec.containers[0].env[?(@.name=="AZURE_CLIENT_ID")]}' Check AZURE_FEDERATED_TOKEN_FILE environment variable oc get pod ${VELERO_POD} -n openshift-adp -o jsonpath='{.spec.containers[0].env[?(@.name=="AZURE_FEDERATED_TOKEN_FILE")]}'
$ VELERO_POD=$(oc get pods -n openshift-adp -l app.kubernetes.io/name=velero -o jsonpath='{.items[0].metadata.name}') # Check Velero pod environment variables

# Check AZURE_CLIENT_ID environment variable
$ oc get pod ${VELERO_POD} -n openshift-adp -o jsonpath='{.spec.containers[0].env[?(@.name=="AZURE_CLIENT_ID")]}'

# Check AZURE_FEDERATED_TOKEN_FILE environment variable
$ oc get pod ${VELERO_POD} -n openshift-adp -o jsonpath='{.spec.containers[0].env[?(@.name=="AZURE_FEDERATED_TOKEN_FILE")]}'
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... configuration: velero: podConfig: nodeSelector: <node_selector> resourceAllocations: limits: cpu: "1" memory: 1024Mi requests: cpu: 200m memory: 256Mi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  configuration:
    velero:
      podConfig:
        nodeSelector: <node_selector> 
1

        resourceAllocations: 
2

          limits:
            cpu: "1"
            memory: 1024Mi
          requests:
            cpu: 200m
            memory: 256Mi
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... backupLocations: - name: default velero: provider: aws default: true objectStorage: bucket: <bucket> prefix: <prefix> caCert: <base64_encoded_cert_string> config: insecureSkipTLSVerify: "false" # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  backupLocations:
    - name: default
      velero:
        provider: aws
        default: true
        objectStorage:
          bucket: <bucket>
          prefix: <prefix>
          caCert: <base64_encoded_cert_string> 
1

        config:
          insecureSkipTLSVerify: "false" 
2

# ...
$ alias velero='oc -n openshift-adp exec deployment/velero -c velero -it -- ./velero'
$ velero version
Client:
	Version: v1.12.1-OADP
	Git commit: -
Server:
	Version: v1.12.1-OADP
[[ -n $CA_CERT ]] && echo "$CA_CERT" | base64 -d | oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "cat > /tmp/your-cacert.txt" || echo "DPA BSL has no caCert"
$ CA_CERT=$(oc -n openshift-adp get dataprotectionapplications.oadp.openshift.io <dpa-name> -o jsonpath='{.spec.backupLocations[0].velero.objectStorage.caCert}')

$ [[ -n $CA_CERT ]] && echo "$CA_CERT" | base64 -d | oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "cat > /tmp/your-cacert.txt" || echo "DPA BSL has no caCert"
$ velero describe backup <backup_name> --details --cacert /tmp/<your_cacert>.txt
$ velero backup logs  <backup_name>  --cacert /tmp/<your_cacert.txt>
$ oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "ls /tmp/your-cacert.txt"
/tmp/your-cacert.txt
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: configuration: velero: defaultPlugins: - azure - openshift resourceTimeout: 10m nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: <node_selector> backupLocations: - velero: config: resourceGroup: <azure_resource_group> storageAccount: <azure_storage_account_id> subscriptionId: <azure_subscription_id> credential: key: cloud name: cloud-credentials-azure provider: azure default: true objectStorage: bucket: <bucket_name> prefix: <prefix> snapshotLocations: - velero: config: resourceGroup: <azure_resource_group> subscriptionId: <azure_subscription_id> incremental: "true" name: default provider: azure credential: key: cloud name: cloud-credentials-azure
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp 
1

spec:
  configuration:
    velero:
      defaultPlugins:
        - azure
        - openshift 
2

      resourceTimeout: 10m 
3

    nodeAgent: 
4

      enable: true 
5

      uploaderType: kopia 
6

      podConfig:
        nodeSelector: <node_selector> 
7

  backupLocations:
    - velero:
        config:
          resourceGroup: <azure_resource_group> 
8

          storageAccount: <azure_storage_account_id> 
9

          subscriptionId: <azure_subscription_id> 
10

        credential:
          key: cloud
          name: cloud-credentials-azure  
11

        provider: azure
        default: true
        objectStorage:
          bucket: <bucket_name> 
12

          prefix: <prefix> 
13

  snapshotLocations: 
14

    - velero:
        config:
          resourceGroup: <azure_resource_group>
          subscriptionId: <azure_subscription_id>
          incremental: "true"
        name: default
        provider: azure
        credential:
          key: cloud
          name: cloud-credentials-azure 
15
$ oc get all -n openshift-adp
pod/oadp-operator-controller-manager-67d9494d47-6l8z8 2/2 Running 0 2m8s pod/node-agent-9cq4q 1/1 Running 0 94s pod/node-agent-m4lts 1/1 Running 0 94s pod/node-agent-pv4kr 1/1 Running 0 95s pod/velero-588db7f655-n842v 1/1 Running 0 95s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oadp-operator-controller-manager-metrics-service ClusterIP 172.30.70.140 <none> 8443/TCP 2m8s service/openshift-adp-velero-metrics-svc ClusterIP 172.30.10.0 <none> 8085/TCP 8h NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/node-agent 3 3 3 3 3 <none> 96s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/oadp-operator-controller-manager 1/1 1 1 2m9s deployment.apps/velero 1/1 1 1 96s NAME DESIRED CURRENT READY AGE replicaset.apps/oadp-operator-controller-manager-67d9494d47 1 1 1 2m9s replicaset.apps/velero-588db7f655 1 1 1 96s
NAME                                                     READY   STATUS    RESTARTS   AGE
pod/oadp-operator-controller-manager-67d9494d47-6l8z8    2/2     Running   0          2m8s
pod/node-agent-9cq4q                                     1/1     Running   0          94s
pod/node-agent-m4lts                                     1/1     Running   0          94s
pod/node-agent-pv4kr                                     1/1     Running   0          95s
pod/velero-588db7f655-n842v                              1/1     Running   0          95s

NAME                                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
service/oadp-operator-controller-manager-metrics-service   ClusterIP   172.30.70.140    <none>        8443/TCP   2m8s
service/openshift-adp-velero-metrics-svc                   ClusterIP   172.30.10.0      <none>        8085/TCP   8h

NAME                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/node-agent    3         3         3       3            3           <none>          96s

NAME                                                READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/oadp-operator-controller-manager    1/1     1            1           2m9s
deployment.apps/velero                              1/1     1            1           96s

NAME                                                           DESIRED   CURRENT   READY   AGE
replicaset.apps/oadp-operator-controller-manager-67d9494d47    1         1         1       2m9s
replicaset.apps/velero-588db7f655                              1         1         1       96s
$ oc get dpa dpa-sample -n openshift-adp -o jsonpath='{.status}'
{"conditions":[{"lastTransitionTime":"2023-10-27T01:23:57Z","message":"Reconcile complete","reason":"Complete","status":"True","type":"Reconciled"}]}
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 1s 3d16h true
NAME           PHASE       LAST VALIDATED   AGE     DEFAULT
dpa-sample-1   Available   1s               3d16h   true
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: restic velero: client-burst: 500 client-qps: 300 defaultPlugins: - openshift - aws - kubevirt
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: restic
    velero:
      client-burst: 500 
1

      client-qps: 300 
2

      defaultPlugins:
        - openshift
        - aws
        - kubevirt
$ oc label node/<node_name> node-role.kubernetes.io/nodeAgent=""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/nodeAgent: ""
configuration:
  nodeAgent:
    enable: true
    podConfig:
      nodeSelector:
        node-role.kubernetes.io/nodeAgent: ""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/infra: "" node-role.kubernetes.io/worker: ""
    configuration:
      nodeAgent:
        enable: true
        podConfig:
          nodeSelector:
            node-role.kubernetes.io/infra: ""
            node-role.kubernetes.io/worker: ""
spec: configuration: nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: label.io/role: cpu-1 other-label.io/other-role: cpu-2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      podConfig:
        nodeSelector:
          label.io/role: cpu-1
          other-label.io/other-role: cpu-2
        ...
spec: configuration: nodeAgent: enable: true loadAffinity: - nodeSelector: matchLabels: label.io/role: cpu-1 matchExpressions: - key: label.io/hostname operator: In values: - node1 - node2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      loadAffinity: 
1

        - nodeSelector:
            matchLabels:
              label.io/role: cpu-1
            matchExpressions: 
2

              - key: label.io/hostname
                operator: In
                values:
                  - node1
                  - node2
                  ...
spec: configuration: nodeAgent: enable: true uploaderType: kopia loadAffinity: - nodeSelector: matchLabels: label.io/location: 'US' label.io/gpu: 'no' podConfig: nodeSelector: label.io/gpu: 'no'
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/location: 'US'
              label.io/gpu: 'no'
      podConfig:
        nodeSelector:
          label.io/gpu: 'no'
$ oc label node/<node_name> label.io/instance-type='large'
nodeAgent: enable: true uploaderType: kopia loadConcurrency: globalConfig: 1 perNodeConfig: - nodeSelector: matchLabels: label.io/instance-type: large number: 3
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadConcurrency:
        globalConfig: 1 
1

        perNodeConfig:
        - nodeSelector:
              matchLabels:
                 label.io/instance-type: large 
2

          number: 3 
3
kind: DataProtectionApplication metadata: name: ts-dpa namespace: openshift-adp spec: backupLocations: - velero: credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: gcp configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - csi - gcp - openshift disableFsBackup: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: ts-dpa
  namespace: openshift-adp
spec:
  backupLocations:
  - velero:
      credential:
        key: cloud
        name: cloud-credentials
      default: true
      objectStorage:
        bucket: <bucket_name>
        prefix: velero
      provider: gcp
  configuration:
    nodeAgent: 
1

      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
      - csi
      - gcp
      - openshift
      disableFsBackup: true 
2
$ oc get daemonset node-agent -o yaml
kind: DaemonSet metadata: ... name: node-agent namespace: openshift-adp ... spec: ... template: metadata: ... spec: containers: ... securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: true ... nodeSelector: kubernetes.io/os: linux os: name: linux restartPolicy: Always schedulerName: default-scheduler securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault serviceAccount: velero serviceAccountName: velero ....
apiVersion: apps/v1
kind: DaemonSet
metadata:
  ...
  name: node-agent
  namespace: openshift-adp
  ...
spec:
  ...
  template:
    metadata:
      ...
    spec:
      containers:
      ...
        securityContext:
          allowPrivilegeEscalation: false 
1

          capabilities:
            drop:
            - ALL
          privileged: false 
2

          readOnlyRootFilesystem: true 
3

        ...
      nodeSelector:
        kubernetes.io/os: linux
      os:
        name: linux
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext:
        runAsNonRoot: true 
4

        seccompProfile:
          type: RuntimeDefault
      serviceAccount: velero
      serviceAccountName: velero
      ....
spec: configuration: repositoryMaintenance: global: podResources: cpuRequest: "100m" cpuLimit: "200m" memoryRequest: "100Mi" memoryLimit: "200Mi" loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    repositoryMaintenance: 
1

      global: 
2

        podResources:
          cpuRequest: "100m"
          cpuLimit: "200m"
          memoryRequest: "100Mi"
          memoryLimit: "200Mi"
        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/gpu: 'no'
              matchExpressions:
                - key: label.io/location
                  operator: In
                  values:
                    - US
                    - EU
spec: configuration: repositoryMaintenance: myrepositoryname: loadAffinity: - nodeSelector: matchLabels: label.io/cpu: 'yes'
...
spec:
  configuration:
    repositoryMaintenance:
      myrepositoryname: 
1

        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/cpu: 'yes'
spec: configuration: velero: podConfig: nodeSelector: some-label.io/custom-node-role: backup-core
...
spec:
  configuration:
    velero:
      podConfig:
        nodeSelector:
          some-label.io/custom-node-role: backup-core
spec: configuration: velero: loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    velero:
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/gpu: 'no'
            matchExpressions:
              - key: label.io/location
                operator: In
                values:
                  - US
                  - EU
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws - kubevirt - csi imagePullPolicy: Never
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - openshift
        - aws
        - kubevirt
        - csi
  imagePullPolicy: Never 
1
kind: DataProtectionApplication ... spec: configuration: velero: defaultPlugins: - openshift - csi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
...
spec:
  configuration:
    velero:
      defaultPlugins:
      - openshift
      - csi 
1
configuration: nodeAgent: enable: false uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: false  
1

    uploaderType: kopia
# ...
configuration: nodeAgent: enable: true uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: true  
1

    uploaderType: kopia
# ...
$ gcloud auth login
$ BUCKET=<bucket> 
1
$ gsutil mb gs://$BUCKET/
$ PROJECT_ID=$(gcloud config get-value project)
--display-name "Velero service account"
$ gcloud iam service-accounts create velero \
    --display-name "Velero service account"
$ gcloud iam service-accounts list
--filter="displayName:Velero service account" \ --format 'value(email)')
$ SERVICE_ACCOUNT_EMAIL=$(gcloud iam service-accounts list \
    --filter="displayName:Velero service account" \
    --format 'value(email)')
$ ROLE_PERMISSIONS=(
    compute.disks.get
    compute.disks.create
    compute.disks.createSnapshot
    compute.snapshots.get
    compute.snapshots.create
    compute.snapshots.useReadOnly
    compute.snapshots.delete
    compute.zones.get
    storage.objects.create
    storage.objects.delete
    storage.objects.get
    storage.objects.list
    iam.serviceAccounts.signBlob
)
--project $PROJECT_ID \ --title "Velero Server" \ --permissions "$(IFS=","; echo "${ROLE_PERMISSIONS[*]}")"
$ gcloud iam roles create velero.server \
    --project $PROJECT_ID \
    --title "Velero Server" \
    --permissions "$(IFS=","; echo "${ROLE_PERMISSIONS[*]}")"
--member serviceAccount:$SERVICE_ACCOUNT_EMAIL \ --role projects/$PROJECT_ID/roles/velero.server
$ gcloud projects add-iam-policy-binding $PROJECT_ID \
    --member serviceAccount:$SERVICE_ACCOUNT_EMAIL \
    --role projects/$PROJECT_ID/roles/velero.server
$ gsutil iam ch serviceAccount:$SERVICE_ACCOUNT_EMAIL:objectAdmin gs://${BUCKET}
--iam-account $SERVICE_ACCOUNT_EMAIL
$ gcloud iam service-accounts keys create credentials-velero \
    --iam-account $SERVICE_ACCOUNT_EMAIL
$ oc create secret generic cloud-credentials-gcp -n openshift-adp --from-file cloud=credentials-velero
$ oc create secret generic cloud-credentials-gcp -n openshift-adp --from-file cloud=credentials-velero
$ oc create secret generic <custom_secret> -n openshift-adp --from-file cloud=credentials-velero
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: ... backupLocations: - velero: provider: gcp default: true credential: key: cloud name: <custom_secret> objectStorage: bucket: <bucket_name> prefix: <prefix> snapshotLocations: - velero: provider: gcp default: true config: project: <project> snapshotLocation: us-west1
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp
spec:
...
  backupLocations:
    - velero:
        provider: gcp
        default: true
        credential:
          key: cloud
          name: <custom_secret> 
1

        objectStorage:
          bucket: <bucket_name>
          prefix: <prefix>
  snapshotLocations:
    - velero:
        provider: gcp
        default: true
        config:
          project: <project>
          snapshotLocation: us-west1
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... configuration: velero: podConfig: nodeSelector: <node_selector> resourceAllocations: limits: cpu: "1" memory: 1024Mi requests: cpu: 200m memory: 256Mi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  configuration:
    velero:
      podConfig:
        nodeSelector: <node_selector> 
1

        resourceAllocations: 
2

          limits:
            cpu: "1"
            memory: 1024Mi
          requests:
            cpu: 200m
            memory: 256Mi
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... backupLocations: - name: default velero: provider: aws default: true objectStorage: bucket: <bucket> prefix: <prefix> caCert: <base64_encoded_cert_string> config: insecureSkipTLSVerify: "false" # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  backupLocations:
    - name: default
      velero:
        provider: aws
        default: true
        objectStorage:
          bucket: <bucket>
          prefix: <prefix>
          caCert: <base64_encoded_cert_string> 
1

        config:
          insecureSkipTLSVerify: "false" 
2

# ...
$ alias velero='oc -n openshift-adp exec deployment/velero -c velero -it -- ./velero'
$ velero version
Client:
	Version: v1.12.1-OADP
	Git commit: -
Server:
	Version: v1.12.1-OADP
[[ -n $CA_CERT ]] && echo "$CA_CERT" | base64 -d | oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "cat > /tmp/your-cacert.txt" || echo "DPA BSL has no caCert"
$ CA_CERT=$(oc -n openshift-adp get dataprotectionapplications.oadp.openshift.io <dpa-name> -o jsonpath='{.spec.backupLocations[0].velero.objectStorage.caCert}')

$ [[ -n $CA_CERT ]] && echo "$CA_CERT" | base64 -d | oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "cat > /tmp/your-cacert.txt" || echo "DPA BSL has no caCert"
$ velero describe backup <backup_name> --details --cacert /tmp/<your_cacert>.txt
$ velero backup logs  <backup_name>  --cacert /tmp/<your_cacert.txt>
$ oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "ls /tmp/your-cacert.txt"
/tmp/your-cacert.txt
$ mkdir -p oadp-credrequest
kind: CredentialsRequest metadata: name: oadp-operator-credentials namespace: openshift-cloud-credential-operator spec: providerSpec: apiVersion: cloudcredential.openshift.io/v1 kind: GCPProviderSpec permissions: - compute.disks.get - compute.disks.create - compute.disks.createSnapshot - compute.snapshots.get - compute.snapshots.create - compute.snapshots.useReadOnly - compute.snapshots.delete - compute.zones.get - storage.objects.create - storage.objects.delete - storage.objects.get - storage.objects.list - iam.serviceAccounts.signBlob skipServiceCheck: true secretRef: name: cloud-credentials-gcp namespace: <OPERATOR_INSTALL_NS> serviceAccountNames: - velero ' > oadp-credrequest/credrequest.yaml
echo 'apiVersion: cloudcredential.openshift.io/v1
kind: CredentialsRequest
metadata:
  name: oadp-operator-credentials
  namespace: openshift-cloud-credential-operator
spec:
  providerSpec:
    apiVersion: cloudcredential.openshift.io/v1
    kind: GCPProviderSpec
    permissions:
    - compute.disks.get
    - compute.disks.create
    - compute.disks.createSnapshot
    - compute.snapshots.get
    - compute.snapshots.create
    - compute.snapshots.useReadOnly
    - compute.snapshots.delete
    - compute.zones.get
    - storage.objects.create
    - storage.objects.delete
    - storage.objects.get
    - storage.objects.list
    - iam.serviceAccounts.signBlob
    skipServiceCheck: true
  secretRef:
    name: cloud-credentials-gcp
    namespace: <OPERATOR_INSTALL_NS>
  serviceAccountNames:
  - velero
' > oadp-credrequest/credrequest.yaml
--name=<name> \ --project=<gcp_project_id> \ --credentials-requests-dir=oadp-credrequest \ --workload-identity-pool=<pool_id> \ --workload-identity-provider=<provider_id>
$ ccoctl gcp create-service-accounts \
    --name=<name> \
    --project=<gcp_project_id> \
    --credentials-requests-dir=oadp-credrequest \
    --workload-identity-pool=<pool_id> \
    --workload-identity-provider=<provider_id>
$ oc create namespace <OPERATOR_INSTALL_NS>
$ oc apply -f manifests/openshift-adp-cloud-credentials-gcp-credentials.yaml
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: <OPERATOR_INSTALL_NS> spec: configuration: velero: defaultPlugins: - gcp - openshift resourceTimeout: 10m nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: <node_selector> backupLocations: - velero: provider: gcp default: true credential: key: cloud name: cloud-credentials-gcp objectStorage: bucket: <bucket_name> prefix: <prefix> snapshotLocations: - velero: provider: gcp default: true config: project: <project> snapshotLocation: us-west1 credential: key: cloud name: cloud-credentials-gcp backupImages: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: <OPERATOR_INSTALL_NS> 
1

spec:
  configuration:
    velero:
      defaultPlugins:
        - gcp
        - openshift 
2

      resourceTimeout: 10m 
3

    nodeAgent: 
4

      enable: true 
5

      uploaderType: kopia 
6

      podConfig:
        nodeSelector: <node_selector> 
7

  backupLocations:
    - velero:
        provider: gcp
        default: true
        credential:
          key: cloud 
8

          name: cloud-credentials-gcp 
9

        objectStorage:
          bucket: <bucket_name> 
10

          prefix: <prefix> 
11

  snapshotLocations: 
12

    - velero:
        provider: gcp
        default: true
        config:
          project: <project>
          snapshotLocation: us-west1 
13

        credential:
          key: cloud
          name: cloud-credentials-gcp 
14

  backupImages: true 
15
$ oc get all -n openshift-adp
pod/oadp-operator-controller-manager-67d9494d47-6l8z8 2/2 Running 0 2m8s pod/node-agent-9cq4q 1/1 Running 0 94s pod/node-agent-m4lts 1/1 Running 0 94s pod/node-agent-pv4kr 1/1 Running 0 95s pod/velero-588db7f655-n842v 1/1 Running 0 95s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oadp-operator-controller-manager-metrics-service ClusterIP 172.30.70.140 <none> 8443/TCP 2m8s service/openshift-adp-velero-metrics-svc ClusterIP 172.30.10.0 <none> 8085/TCP 8h NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/node-agent 3 3 3 3 3 <none> 96s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/oadp-operator-controller-manager 1/1 1 1 2m9s deployment.apps/velero 1/1 1 1 96s NAME DESIRED CURRENT READY AGE replicaset.apps/oadp-operator-controller-manager-67d9494d47 1 1 1 2m9s replicaset.apps/velero-588db7f655 1 1 1 96s
NAME                                                     READY   STATUS    RESTARTS   AGE
pod/oadp-operator-controller-manager-67d9494d47-6l8z8    2/2     Running   0          2m8s
pod/node-agent-9cq4q                                     1/1     Running   0          94s
pod/node-agent-m4lts                                     1/1     Running   0          94s
pod/node-agent-pv4kr                                     1/1     Running   0          95s
pod/velero-588db7f655-n842v                              1/1     Running   0          95s

NAME                                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
service/oadp-operator-controller-manager-metrics-service   ClusterIP   172.30.70.140    <none>        8443/TCP   2m8s
service/openshift-adp-velero-metrics-svc                   ClusterIP   172.30.10.0      <none>        8085/TCP   8h

NAME                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/node-agent    3         3         3       3            3           <none>          96s

NAME                                                READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/oadp-operator-controller-manager    1/1     1            1           2m9s
deployment.apps/velero                              1/1     1            1           96s

NAME                                                           DESIRED   CURRENT   READY   AGE
replicaset.apps/oadp-operator-controller-manager-67d9494d47    1         1         1       2m9s
replicaset.apps/velero-588db7f655                              1         1         1       96s
$ oc get dpa dpa-sample -n openshift-adp -o jsonpath='{.status}'
{"conditions":[{"lastTransitionTime":"2023-10-27T01:23:57Z","message":"Reconcile complete","reason":"Complete","status":"True","type":"Reconciled"}]}
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 1s 3d16h true
NAME           PHASE       LAST VALIDATED   AGE     DEFAULT
dpa-sample-1   Available   1s               3d16h   true
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: restic velero: client-burst: 500 client-qps: 300 defaultPlugins: - openshift - aws - kubevirt
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: restic
    velero:
      client-burst: 500 
1

      client-qps: 300 
2

      defaultPlugins:
        - openshift
        - aws
        - kubevirt
$ oc label node/<node_name> node-role.kubernetes.io/nodeAgent=""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/nodeAgent: ""
configuration:
  nodeAgent:
    enable: true
    podConfig:
      nodeSelector:
        node-role.kubernetes.io/nodeAgent: ""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/infra: "" node-role.kubernetes.io/worker: ""
    configuration:
      nodeAgent:
        enable: true
        podConfig:
          nodeSelector:
            node-role.kubernetes.io/infra: ""
            node-role.kubernetes.io/worker: ""
spec: configuration: nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: label.io/role: cpu-1 other-label.io/other-role: cpu-2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      podConfig:
        nodeSelector:
          label.io/role: cpu-1
          other-label.io/other-role: cpu-2
        ...
spec: configuration: nodeAgent: enable: true loadAffinity: - nodeSelector: matchLabels: label.io/role: cpu-1 matchExpressions: - key: label.io/hostname operator: In values: - node1 - node2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      loadAffinity: 
1

        - nodeSelector:
            matchLabels:
              label.io/role: cpu-1
            matchExpressions: 
2

              - key: label.io/hostname
                operator: In
                values:
                  - node1
                  - node2
                  ...
spec: configuration: nodeAgent: enable: true uploaderType: kopia loadAffinity: - nodeSelector: matchLabels: label.io/location: 'US' label.io/gpu: 'no' podConfig: nodeSelector: label.io/gpu: 'no'
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/location: 'US'
              label.io/gpu: 'no'
      podConfig:
        nodeSelector:
          label.io/gpu: 'no'
$ oc label node/<node_name> label.io/instance-type='large'
nodeAgent: enable: true uploaderType: kopia loadConcurrency: globalConfig: 1 perNodeConfig: - nodeSelector: matchLabels: label.io/instance-type: large number: 3
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadConcurrency:
        globalConfig: 1 
1

        perNodeConfig:
        - nodeSelector:
              matchLabels:
                 label.io/instance-type: large 
2

          number: 3 
3
kind: DataProtectionApplication metadata: name: ts-dpa namespace: openshift-adp spec: backupLocations: - velero: credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: gcp configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - csi - gcp - openshift disableFsBackup: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: ts-dpa
  namespace: openshift-adp
spec:
  backupLocations:
  - velero:
      credential:
        key: cloud
        name: cloud-credentials
      default: true
      objectStorage:
        bucket: <bucket_name>
        prefix: velero
      provider: gcp
  configuration:
    nodeAgent: 
1

      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
      - csi
      - gcp
      - openshift
      disableFsBackup: true 
2
$ oc get daemonset node-agent -o yaml
kind: DaemonSet metadata: ... name: node-agent namespace: openshift-adp ... spec: ... template: metadata: ... spec: containers: ... securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: true ... nodeSelector: kubernetes.io/os: linux os: name: linux restartPolicy: Always schedulerName: default-scheduler securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault serviceAccount: velero serviceAccountName: velero ....
apiVersion: apps/v1
kind: DaemonSet
metadata:
  ...
  name: node-agent
  namespace: openshift-adp
  ...
spec:
  ...
  template:
    metadata:
      ...
    spec:
      containers:
      ...
        securityContext:
          allowPrivilegeEscalation: false 
1

          capabilities:
            drop:
            - ALL
          privileged: false 
2

          readOnlyRootFilesystem: true 
3

        ...
      nodeSelector:
        kubernetes.io/os: linux
      os:
        name: linux
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext:
        runAsNonRoot: true 
4

        seccompProfile:
          type: RuntimeDefault
      serviceAccount: velero
      serviceAccountName: velero
      ....
spec: configuration: repositoryMaintenance: global: podResources: cpuRequest: "100m" cpuLimit: "200m" memoryRequest: "100Mi" memoryLimit: "200Mi" loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    repositoryMaintenance: 
1

      global: 
2

        podResources:
          cpuRequest: "100m"
          cpuLimit: "200m"
          memoryRequest: "100Mi"
          memoryLimit: "200Mi"
        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/gpu: 'no'
              matchExpressions:
                - key: label.io/location
                  operator: In
                  values:
                    - US
                    - EU
spec: configuration: repositoryMaintenance: myrepositoryname: loadAffinity: - nodeSelector: matchLabels: label.io/cpu: 'yes'
...
spec:
  configuration:
    repositoryMaintenance:
      myrepositoryname: 
1

        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/cpu: 'yes'
spec: configuration: velero: podConfig: nodeSelector: some-label.io/custom-node-role: backup-core
...
spec:
  configuration:
    velero:
      podConfig:
        nodeSelector:
          some-label.io/custom-node-role: backup-core
spec: configuration: velero: loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    velero:
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/gpu: 'no'
            matchExpressions:
              - key: label.io/location
                operator: In
                values:
                  - US
                  - EU
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws - kubevirt - csi imagePullPolicy: Never
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - openshift
        - aws
        - kubevirt
        - csi
  imagePullPolicy: Never 
1
kind: DataProtectionApplication ... spec: configuration: velero: defaultPlugins: - openshift - csi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
...
spec:
  configuration:
    velero:
      defaultPlugins:
      - openshift
      - csi 
1
configuration: nodeAgent: enable: false uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: false  
1

    uploaderType: kopia
# ...
configuration: nodeAgent: enable: true uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: true  
1

    uploaderType: kopia
# ...
[default] aws_access_key_id=<AWS_ACCESS_KEY_ID> aws_secret_access_key=<AWS_SECRET_ACCESS_KEY> EOF
$ cat << EOF > ./credentials-velero
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
EOF
aws_access_key_id=<AWS_ACCESS_KEY_ID> aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
$ oc create secret generic cloud-credentials -n openshift-adp --from-file cloud=credentials-velero
$ oc create secret generic cloud-credentials -n openshift-adp --from-file cloud=credentials-velero
$ oc create secret generic <custom_secret> -n openshift-adp --from-file cloud=credentials-velero
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: ... backupLocations: - velero: config: profile: "default" region: <region_name> s3Url: <url> insecureSkipTLSVerify: "true" s3ForcePathStyle: "true" provider: aws default: true credential: key: cloud name: <custom_secret> objectStorage: bucket: <bucket_name> prefix: <prefix>
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp
spec:
...
  backupLocations:
    - velero:
        config:
          profile: "default"
          region: <region_name> 
1

          s3Url: <url>
          insecureSkipTLSVerify: "true"
          s3ForcePathStyle: "true"
        provider: aws
        default: true
        credential:
          key: cloud
          name:  <custom_secret> 
2

        objectStorage:
          bucket: <bucket_name>
          prefix: <prefix>
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... configuration: velero: podConfig: nodeSelector: <node_selector> resourceAllocations: limits: cpu: "1" memory: 1024Mi requests: cpu: 200m memory: 256Mi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  configuration:
    velero:
      podConfig:
        nodeSelector: <node_selector> 
1

        resourceAllocations: 
2

          limits:
            cpu: "1"
            memory: 1024Mi
          requests:
            cpu: 200m
            memory: 256Mi
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... backupLocations: - name: default velero: provider: aws default: true objectStorage: bucket: <bucket> prefix: <prefix> caCert: <base64_encoded_cert_string> config: insecureSkipTLSVerify: "false" # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  backupLocations:
    - name: default
      velero:
        provider: aws
        default: true
        objectStorage:
          bucket: <bucket>
          prefix: <prefix>
          caCert: <base64_encoded_cert_string> 
1

        config:
          insecureSkipTLSVerify: "false" 
2

# ...
$ alias velero='oc -n openshift-adp exec deployment/velero -c velero -it -- ./velero'
$ velero version
Client:
	Version: v1.12.1-OADP
	Git commit: -
Server:
	Version: v1.12.1-OADP
[[ -n $CA_CERT ]] && echo "$CA_CERT" | base64 -d | oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "cat > /tmp/your-cacert.txt" || echo "DPA BSL has no caCert"
$ CA_CERT=$(oc -n openshift-adp get dataprotectionapplications.oadp.openshift.io <dpa-name> -o jsonpath='{.spec.backupLocations[0].velero.objectStorage.caCert}')

$ [[ -n $CA_CERT ]] && echo "$CA_CERT" | base64 -d | oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "cat > /tmp/your-cacert.txt" || echo "DPA BSL has no caCert"
$ velero describe backup <backup_name> --details --cacert /tmp/<your_cacert>.txt
$ velero backup logs  <backup_name>  --cacert /tmp/<your_cacert.txt>
$ oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "ls /tmp/your-cacert.txt"
/tmp/your-cacert.txt
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: configuration: velero: defaultPlugins: - aws - openshift resourceTimeout: 10m nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: <node_selector> backupLocations: - velero: config: profile: "default" region: <region_name> s3Url: <url> insecureSkipTLSVerify: "true" s3ForcePathStyle: "true" provider: aws default: true credential: key: cloud name: cloud-credentials objectStorage: bucket: <bucket_name> prefix: <prefix>
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp 
1

spec:
  configuration:
    velero:
      defaultPlugins:
        - aws 
2

        - openshift 
3

      resourceTimeout: 10m 
4

    nodeAgent: 
5

      enable: true 
6

      uploaderType: kopia 
7

      podConfig:
        nodeSelector: <node_selector> 
8

  backupLocations:
    - velero:
        config:
          profile: "default"
          region: <region_name> 
9

          s3Url: <url> 
10

          insecureSkipTLSVerify: "true"
          s3ForcePathStyle: "true"
        provider: aws
        default: true
        credential:
          key: cloud
          name: cloud-credentials 
11

        objectStorage:
          bucket: <bucket_name> 
12

          prefix: <prefix> 
13
$ oc get all -n openshift-adp
pod/oadp-operator-controller-manager-67d9494d47-6l8z8 2/2 Running 0 2m8s pod/node-agent-9cq4q 1/1 Running 0 94s pod/node-agent-m4lts 1/1 Running 0 94s pod/node-agent-pv4kr 1/1 Running 0 95s pod/velero-588db7f655-n842v 1/1 Running 0 95s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oadp-operator-controller-manager-metrics-service ClusterIP 172.30.70.140 <none> 8443/TCP 2m8s service/openshift-adp-velero-metrics-svc ClusterIP 172.30.10.0 <none> 8085/TCP 8h NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/node-agent 3 3 3 3 3 <none> 96s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/oadp-operator-controller-manager 1/1 1 1 2m9s deployment.apps/velero 1/1 1 1 96s NAME DESIRED CURRENT READY AGE replicaset.apps/oadp-operator-controller-manager-67d9494d47 1 1 1 2m9s replicaset.apps/velero-588db7f655 1 1 1 96s
NAME                                                     READY   STATUS    RESTARTS   AGE
pod/oadp-operator-controller-manager-67d9494d47-6l8z8    2/2     Running   0          2m8s
pod/node-agent-9cq4q                                     1/1     Running   0          94s
pod/node-agent-m4lts                                     1/1     Running   0          94s
pod/node-agent-pv4kr                                     1/1     Running   0          95s
pod/velero-588db7f655-n842v                              1/1     Running   0          95s

NAME                                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
service/oadp-operator-controller-manager-metrics-service   ClusterIP   172.30.70.140    <none>        8443/TCP   2m8s
service/openshift-adp-velero-metrics-svc                   ClusterIP   172.30.10.0      <none>        8085/TCP   8h

NAME                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/node-agent    3         3         3       3            3           <none>          96s

NAME                                                READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/oadp-operator-controller-manager    1/1     1            1           2m9s
deployment.apps/velero                              1/1     1            1           96s

NAME                                                           DESIRED   CURRENT   READY   AGE
replicaset.apps/oadp-operator-controller-manager-67d9494d47    1         1         1       2m9s
replicaset.apps/velero-588db7f655                              1         1         1       96s
$ oc get dpa dpa-sample -n openshift-adp -o jsonpath='{.status}'
{"conditions":[{"lastTransitionTime":"2023-10-27T01:23:57Z","message":"Reconcile complete","reason":"Complete","status":"True","type":"Reconciled"}]}
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 1s 3d16h true
NAME           PHASE       LAST VALIDATED   AGE     DEFAULT
dpa-sample-1   Available   1s               3d16h   true
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: restic velero: client-burst: 500 client-qps: 300 defaultPlugins: - openshift - aws - kubevirt
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: restic
    velero:
      client-burst: 500 
1

      client-qps: 300 
2

      defaultPlugins:
        - openshift
        - aws
        - kubevirt
$ oc label node/<node_name> node-role.kubernetes.io/nodeAgent=""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/nodeAgent: ""
configuration:
  nodeAgent:
    enable: true
    podConfig:
      nodeSelector:
        node-role.kubernetes.io/nodeAgent: ""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/infra: "" node-role.kubernetes.io/worker: ""
    configuration:
      nodeAgent:
        enable: true
        podConfig:
          nodeSelector:
            node-role.kubernetes.io/infra: ""
            node-role.kubernetes.io/worker: ""
spec: configuration: nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: label.io/role: cpu-1 other-label.io/other-role: cpu-2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      podConfig:
        nodeSelector:
          label.io/role: cpu-1
          other-label.io/other-role: cpu-2
        ...
spec: configuration: nodeAgent: enable: true loadAffinity: - nodeSelector: matchLabels: label.io/role: cpu-1 matchExpressions: - key: label.io/hostname operator: In values: - node1 - node2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      loadAffinity: 
1

        - nodeSelector:
            matchLabels:
              label.io/role: cpu-1
            matchExpressions: 
2

              - key: label.io/hostname
                operator: In
                values:
                  - node1
                  - node2
                  ...
spec: configuration: nodeAgent: enable: true uploaderType: kopia loadAffinity: - nodeSelector: matchLabels: label.io/location: 'US' label.io/gpu: 'no' podConfig: nodeSelector: label.io/gpu: 'no'
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/location: 'US'
              label.io/gpu: 'no'
      podConfig:
        nodeSelector:
          label.io/gpu: 'no'
$ oc label node/<node_name> label.io/instance-type='large'
nodeAgent: enable: true uploaderType: kopia loadConcurrency: globalConfig: 1 perNodeConfig: - nodeSelector: matchLabels: label.io/instance-type: large number: 3
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadConcurrency:
        globalConfig: 1 
1

        perNodeConfig:
        - nodeSelector:
              matchLabels:
                 label.io/instance-type: large 
2

          number: 3 
3
kind: DataProtectionApplication metadata: name: ts-dpa namespace: openshift-adp spec: backupLocations: - velero: credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: gcp configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - csi - gcp - openshift disableFsBackup: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: ts-dpa
  namespace: openshift-adp
spec:
  backupLocations:
  - velero:
      credential:
        key: cloud
        name: cloud-credentials
      default: true
      objectStorage:
        bucket: <bucket_name>
        prefix: velero
      provider: gcp
  configuration:
    nodeAgent: 
1

      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
      - csi
      - gcp
      - openshift
      disableFsBackup: true 
2
$ oc get daemonset node-agent -o yaml
kind: DaemonSet metadata: ... name: node-agent namespace: openshift-adp ... spec: ... template: metadata: ... spec: containers: ... securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: true ... nodeSelector: kubernetes.io/os: linux os: name: linux restartPolicy: Always schedulerName: default-scheduler securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault serviceAccount: velero serviceAccountName: velero ....
apiVersion: apps/v1
kind: DaemonSet
metadata:
  ...
  name: node-agent
  namespace: openshift-adp
  ...
spec:
  ...
  template:
    metadata:
      ...
    spec:
      containers:
      ...
        securityContext:
          allowPrivilegeEscalation: false 
1

          capabilities:
            drop:
            - ALL
          privileged: false 
2

          readOnlyRootFilesystem: true 
3

        ...
      nodeSelector:
        kubernetes.io/os: linux
      os:
        name: linux
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext:
        runAsNonRoot: true 
4

        seccompProfile:
          type: RuntimeDefault
      serviceAccount: velero
      serviceAccountName: velero
      ....
spec: configuration: repositoryMaintenance: global: podResources: cpuRequest: "100m" cpuLimit: "200m" memoryRequest: "100Mi" memoryLimit: "200Mi" loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    repositoryMaintenance: 
1

      global: 
2

        podResources:
          cpuRequest: "100m"
          cpuLimit: "200m"
          memoryRequest: "100Mi"
          memoryLimit: "200Mi"
        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/gpu: 'no'
              matchExpressions:
                - key: label.io/location
                  operator: In
                  values:
                    - US
                    - EU
spec: configuration: repositoryMaintenance: myrepositoryname: loadAffinity: - nodeSelector: matchLabels: label.io/cpu: 'yes'
...
spec:
  configuration:
    repositoryMaintenance:
      myrepositoryname: 
1

        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/cpu: 'yes'
spec: configuration: velero: podConfig: nodeSelector: some-label.io/custom-node-role: backup-core
...
spec:
  configuration:
    velero:
      podConfig:
        nodeSelector:
          some-label.io/custom-node-role: backup-core
spec: configuration: velero: loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    velero:
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/gpu: 'no'
            matchExpressions:
              - key: label.io/location
                operator: In
                values:
                  - US
                  - EU
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws - kubevirt - csi imagePullPolicy: Never
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - openshift
        - aws
        - kubevirt
        - csi
  imagePullPolicy: Never 
1
kind: DataProtectionApplication ... spec: configuration: velero: defaultPlugins: - openshift - csi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
...
spec:
  configuration:
    velero:
      defaultPlugins:
      - openshift
      - csi 
1
configuration: nodeAgent: enable: false uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: false  
1

    uploaderType: kopia
# ...
configuration: nodeAgent: enable: true uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: true  
1

    uploaderType: kopia
# ...
aws_access_key_id=<AWS_ACCESS_KEY_ID> aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
[default]
aws_access_key_id=<AWS_ACCESS_KEY_ID>
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
$ oc create secret generic cloud-credentials -n openshift-adp --from-file cloud=credentials-velero
$ oc create secret generic cloud-credentials -n openshift-adp --from-file cloud=credentials-velero
$ oc create secret generic <custom_secret> -n openshift-adp --from-file cloud=credentials-velero
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: ... backupLocations: - velero: provider: <provider> default: true credential: key: cloud name: <custom_secret> objectStorage: bucket: <bucket_name> prefix: <prefix>
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp
spec:
...
  backupLocations:
    - velero:
        provider: <provider>
        default: true
        credential:
          key: cloud
          name: <custom_secret> 
1

        objectStorage:
          bucket: <bucket_name>
          prefix: <prefix>
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... configuration: velero: podConfig: nodeSelector: <node_selector> resourceAllocations: limits: cpu: "1" memory: 1024Mi requests: cpu: 200m memory: 256Mi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  configuration:
    velero:
      podConfig:
        nodeSelector: <node_selector> 
1

        resourceAllocations: 
2

          limits:
            cpu: "1"
            memory: 1024Mi
          requests:
            cpu: 200m
            memory: 256Mi
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... backupLocations: - name: default velero: provider: aws default: true objectStorage: bucket: <bucket> prefix: <prefix> caCert: <base64_encoded_cert_string> config: insecureSkipTLSVerify: "false" # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  backupLocations:
    - name: default
      velero:
        provider: aws
        default: true
        objectStorage:
          bucket: <bucket>
          prefix: <prefix>
          caCert: <base64_encoded_cert_string> 
1

        config:
          insecureSkipTLSVerify: "false" 
2

# ...
$ alias velero='oc -n openshift-adp exec deployment/velero -c velero -it -- ./velero'
$ velero version
Client:
	Version: v1.12.1-OADP
	Git commit: -
Server:
	Version: v1.12.1-OADP
[[ -n $CA_CERT ]] && echo "$CA_CERT" | base64 -d | oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "cat > /tmp/your-cacert.txt" || echo "DPA BSL has no caCert"
$ CA_CERT=$(oc -n openshift-adp get dataprotectionapplications.oadp.openshift.io <dpa-name> -o jsonpath='{.spec.backupLocations[0].velero.objectStorage.caCert}')

$ [[ -n $CA_CERT ]] && echo "$CA_CERT" | base64 -d | oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "cat > /tmp/your-cacert.txt" || echo "DPA BSL has no caCert"
$ velero describe backup <backup_name> --details --cacert /tmp/<your_cacert>.txt
$ velero backup logs  <backup_name>  --cacert /tmp/<your_cacert.txt>
$ oc exec -n openshift-adp -i deploy/velero -c velero -- bash -c "ls /tmp/your-cacert.txt"
/tmp/your-cacert.txt
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: configuration: velero: defaultPlugins: - aws - kubevirt - csi - openshift resourceTimeout: 10m nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: <node_selector> backupLocations: - velero: provider: gcp default: true credential: key: cloud name: <default_secret> objectStorage: bucket: <bucket_name> prefix: <prefix>
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp 
1

spec:
  configuration:
    velero:
      defaultPlugins:
        - aws 
2

        - kubevirt 
3

        - csi 
4

        - openshift 
5

      resourceTimeout: 10m 
6

    nodeAgent: 
7

      enable: true 
8

      uploaderType: kopia 
9

      podConfig:
        nodeSelector: <node_selector> 
10

  backupLocations:
    - velero:
        provider: gcp 
11

        default: true
        credential:
          key: cloud
          name: <default_secret> 
12

        objectStorage:
          bucket: <bucket_name> 
13

          prefix: <prefix> 
14
$ oc get all -n openshift-adp
pod/oadp-operator-controller-manager-67d9494d47-6l8z8 2/2 Running 0 2m8s pod/node-agent-9cq4q 1/1 Running 0 94s pod/node-agent-m4lts 1/1 Running 0 94s pod/node-agent-pv4kr 1/1 Running 0 95s pod/velero-588db7f655-n842v 1/1 Running 0 95s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oadp-operator-controller-manager-metrics-service ClusterIP 172.30.70.140 <none> 8443/TCP 2m8s service/openshift-adp-velero-metrics-svc ClusterIP 172.30.10.0 <none> 8085/TCP 8h NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/node-agent 3 3 3 3 3 <none> 96s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/oadp-operator-controller-manager 1/1 1 1 2m9s deployment.apps/velero 1/1 1 1 96s NAME DESIRED CURRENT READY AGE replicaset.apps/oadp-operator-controller-manager-67d9494d47 1 1 1 2m9s replicaset.apps/velero-588db7f655 1 1 1 96s
NAME                                                     READY   STATUS    RESTARTS   AGE
pod/oadp-operator-controller-manager-67d9494d47-6l8z8    2/2     Running   0          2m8s
pod/node-agent-9cq4q                                     1/1     Running   0          94s
pod/node-agent-m4lts                                     1/1     Running   0          94s
pod/node-agent-pv4kr                                     1/1     Running   0          95s
pod/velero-588db7f655-n842v                              1/1     Running   0          95s

NAME                                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
service/oadp-operator-controller-manager-metrics-service   ClusterIP   172.30.70.140    <none>        8443/TCP   2m8s
service/openshift-adp-velero-metrics-svc                   ClusterIP   172.30.10.0      <none>        8085/TCP   8h

NAME                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/node-agent    3         3         3       3            3           <none>          96s

NAME                                                READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/oadp-operator-controller-manager    1/1     1            1           2m9s
deployment.apps/velero                              1/1     1            1           96s

NAME                                                           DESIRED   CURRENT   READY   AGE
replicaset.apps/oadp-operator-controller-manager-67d9494d47    1         1         1       2m9s
replicaset.apps/velero-588db7f655                              1         1         1       96s
$ oc get dpa dpa-sample -n openshift-adp -o jsonpath='{.status}'
{"conditions":[{"lastTransitionTime":"2023-10-27T01:23:57Z","message":"Reconcile complete","reason":"Complete","status":"True","type":"Reconciled"}]}
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 1s 3d16h true
NAME           PHASE       LAST VALIDATED   AGE     DEFAULT
dpa-sample-1   Available   1s               3d16h   true
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: restic velero: client-burst: 500 client-qps: 300 defaultPlugins: - openshift - aws - kubevirt
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: restic
    velero:
      client-burst: 500 
1

      client-qps: 300 
2

      defaultPlugins:
        - openshift
        - aws
        - kubevirt
$ oc label node/<node_name> node-role.kubernetes.io/nodeAgent=""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/nodeAgent: ""
configuration:
  nodeAgent:
    enable: true
    podConfig:
      nodeSelector:
        node-role.kubernetes.io/nodeAgent: ""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/infra: "" node-role.kubernetes.io/worker: ""
    configuration:
      nodeAgent:
        enable: true
        podConfig:
          nodeSelector:
            node-role.kubernetes.io/infra: ""
            node-role.kubernetes.io/worker: ""
spec: configuration: nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: label.io/role: cpu-1 other-label.io/other-role: cpu-2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      podConfig:
        nodeSelector:
          label.io/role: cpu-1
          other-label.io/other-role: cpu-2
        ...
spec: configuration: nodeAgent: enable: true loadAffinity: - nodeSelector: matchLabels: label.io/role: cpu-1 matchExpressions: - key: label.io/hostname operator: In values: - node1 - node2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      loadAffinity: 
1

        - nodeSelector:
            matchLabels:
              label.io/role: cpu-1
            matchExpressions: 
2

              - key: label.io/hostname
                operator: In
                values:
                  - node1
                  - node2
                  ...
spec: configuration: nodeAgent: enable: true uploaderType: kopia loadAffinity: - nodeSelector: matchLabels: label.io/location: 'US' label.io/gpu: 'no' podConfig: nodeSelector: label.io/gpu: 'no'
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/location: 'US'
              label.io/gpu: 'no'
      podConfig:
        nodeSelector:
          label.io/gpu: 'no'
$ oc label node/<node_name> label.io/instance-type='large'
nodeAgent: enable: true uploaderType: kopia loadConcurrency: globalConfig: 1 perNodeConfig: - nodeSelector: matchLabels: label.io/instance-type: large number: 3
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadConcurrency:
        globalConfig: 1 
1

        perNodeConfig:
        - nodeSelector:
              matchLabels:
                 label.io/instance-type: large 
2

          number: 3 
3
kind: DataProtectionApplication metadata: name: ts-dpa namespace: openshift-adp spec: backupLocations: - velero: credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: gcp configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - csi - gcp - openshift disableFsBackup: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: ts-dpa
  namespace: openshift-adp
spec:
  backupLocations:
  - velero:
      credential:
        key: cloud
        name: cloud-credentials
      default: true
      objectStorage:
        bucket: <bucket_name>
        prefix: velero
      provider: gcp
  configuration:
    nodeAgent: 
1

      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
      - csi
      - gcp
      - openshift
      disableFsBackup: true 
2
$ oc get daemonset node-agent -o yaml
kind: DaemonSet metadata: ... name: node-agent namespace: openshift-adp ... spec: ... template: metadata: ... spec: containers: ... securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: true ... nodeSelector: kubernetes.io/os: linux os: name: linux restartPolicy: Always schedulerName: default-scheduler securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault serviceAccount: velero serviceAccountName: velero ....
apiVersion: apps/v1
kind: DaemonSet
metadata:
  ...
  name: node-agent
  namespace: openshift-adp
  ...
spec:
  ...
  template:
    metadata:
      ...
    spec:
      containers:
      ...
        securityContext:
          allowPrivilegeEscalation: false 
1

          capabilities:
            drop:
            - ALL
          privileged: false 
2

          readOnlyRootFilesystem: true 
3

        ...
      nodeSelector:
        kubernetes.io/os: linux
      os:
        name: linux
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext:
        runAsNonRoot: true 
4

        seccompProfile:
          type: RuntimeDefault
      serviceAccount: velero
      serviceAccountName: velero
      ....
spec: configuration: repositoryMaintenance: global: podResources: cpuRequest: "100m" cpuLimit: "200m" memoryRequest: "100Mi" memoryLimit: "200Mi" loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    repositoryMaintenance: 
1

      global: 
2

        podResources:
          cpuRequest: "100m"
          cpuLimit: "200m"
          memoryRequest: "100Mi"
          memoryLimit: "200Mi"
        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/gpu: 'no'
              matchExpressions:
                - key: label.io/location
                  operator: In
                  values:
                    - US
                    - EU
spec: configuration: repositoryMaintenance: myrepositoryname: loadAffinity: - nodeSelector: matchLabels: label.io/cpu: 'yes'
...
spec:
  configuration:
    repositoryMaintenance:
      myrepositoryname: 
1

        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/cpu: 'yes'
spec: configuration: velero: podConfig: nodeSelector: some-label.io/custom-node-role: backup-core
...
spec:
  configuration:
    velero:
      podConfig:
        nodeSelector:
          some-label.io/custom-node-role: backup-core
spec: configuration: velero: loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    velero:
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/gpu: 'no'
            matchExpressions:
              - key: label.io/location
                operator: In
                values:
                  - US
                  - EU
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws - kubevirt - csi imagePullPolicy: Never
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - openshift
        - aws
        - kubevirt
        - csi
  imagePullPolicy: Never 
1
kind: DataProtectionApplication ... spec: configuration: velero: defaultPlugins: - openshift - csi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
...
spec:
  configuration:
    velero:
      defaultPlugins:
      - openshift
      - csi 
1
configuration: nodeAgent: enable: false uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: false  
1

    uploaderType: kopia
# ...
configuration: nodeAgent: enable: true uploaderType: kopia # ...
# ...
configuration:
  nodeAgent:
    enable: true  
1

    uploaderType: kopia
# ...
kind: DataProtectionApplication metadata: name: <dpa_sample> namespace: openshift-adp spec: configuration: velero: defaultPlugins: - kubevirt - gcp - csi - openshift resourceTimeout: 10m nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: <node_selector> backupLocations: - velero: provider: gcp default: true credential: key: cloud name: <default_secret> objectStorage: bucket: <bucket_name> prefix: <prefix>
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
  namespace: openshift-adp 
1

spec:
  configuration:
    velero:
      defaultPlugins:
        - kubevirt 
2

        - gcp 
3

        - csi 
4

        - openshift 
5

      resourceTimeout: 10m 
6

    nodeAgent: 
7

      enable: true 
8

      uploaderType: kopia 
9

      podConfig:
        nodeSelector: <node_selector> 
10

  backupLocations:
    - velero:
        provider: gcp 
11

        default: true
        credential:
          key: cloud
          name: <default_secret> 
12

        objectStorage:
          bucket: <bucket_name> 
13

          prefix: <prefix> 
14
$ oc get all -n openshift-adp
pod/oadp-operator-controller-manager-67d9494d47-6l8z8 2/2 Running 0 2m8s pod/node-agent-9cq4q 1/1 Running 0 94s pod/node-agent-m4lts 1/1 Running 0 94s pod/node-agent-pv4kr 1/1 Running 0 95s pod/velero-588db7f655-n842v 1/1 Running 0 95s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oadp-operator-controller-manager-metrics-service ClusterIP 172.30.70.140 <none> 8443/TCP 2m8s service/openshift-adp-velero-metrics-svc ClusterIP 172.30.10.0 <none> 8085/TCP 8h NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE daemonset.apps/node-agent 3 3 3 3 3 <none> 96s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/oadp-operator-controller-manager 1/1 1 1 2m9s deployment.apps/velero 1/1 1 1 96s NAME DESIRED CURRENT READY AGE replicaset.apps/oadp-operator-controller-manager-67d9494d47 1 1 1 2m9s replicaset.apps/velero-588db7f655 1 1 1 96s
NAME                                                     READY   STATUS    RESTARTS   AGE
pod/oadp-operator-controller-manager-67d9494d47-6l8z8    2/2     Running   0          2m8s
pod/node-agent-9cq4q                                     1/1     Running   0          94s
pod/node-agent-m4lts                                     1/1     Running   0          94s
pod/node-agent-pv4kr                                     1/1     Running   0          95s
pod/velero-588db7f655-n842v                              1/1     Running   0          95s

NAME                                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
service/oadp-operator-controller-manager-metrics-service   ClusterIP   172.30.70.140    <none>        8443/TCP   2m8s
service/openshift-adp-velero-metrics-svc                   ClusterIP   172.30.10.0      <none>        8085/TCP   8h

NAME                        DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/node-agent    3         3         3       3            3           <none>          96s

NAME                                                READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/oadp-operator-controller-manager    1/1     1            1           2m9s
deployment.apps/velero                              1/1     1            1           96s

NAME                                                           DESIRED   CURRENT   READY   AGE
replicaset.apps/oadp-operator-controller-manager-67d9494d47    1         1         1       2m9s
replicaset.apps/velero-588db7f655                              1         1         1       96s
$ oc get dpa dpa-sample -n openshift-adp -o jsonpath='{.status}'
{"conditions":[{"lastTransitionTime":"2023-10-27T01:23:57Z","message":"Reconcile complete","reason":"Complete","status":"True","type":"Reconciled"}]}
$ oc get backupstoragelocations.velero.io -n openshift-adp
dpa-sample-1 Available 1s 3d16h true
NAME           PHASE       LAST VALIDATED   AGE     DEFAULT
dpa-sample-1   Available   1s               3d16h   true
kind: Backup metadata: name: vmbackupsingle namespace: openshift-adp spec: snapshotMoveData: true includedNamespaces: - <vm_namespace> labelSelector: matchLabels: app: <vm_app_name> storageLocation: <backup_storage_location_name>
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: vmbackupsingle
  namespace: openshift-adp
spec:
  snapshotMoveData: true
  includedNamespaces:
  - <vm_namespace> 
1

  labelSelector:
    matchLabels:
      app: <vm_app_name> 
2

  storageLocation: <backup_storage_location_name> 
3
$ oc apply -f <backup_cr_file_name> 
1
kind: Restore metadata: name: vmrestoresingle namespace: openshift-adp spec: backupName: vmbackupsingle restorePVs: true
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: vmrestoresingle
  namespace: openshift-adp
spec:
  backupName: vmbackupsingle 
1

  restorePVs: true
$ oc apply -f <restore_cr_file_name> 
1
$ oc label vm <vm_name> app=<vm_name> -n openshift-adp
kind: Restore metadata: name: singlevmrestore namespace: openshift-adp spec: backupName: multiplevmbackup restorePVs: true LabelSelectors: - matchLabels: kubevirt.io/created-by: <datavolume_uid> - matchLabels: app: <vm_name>
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: singlevmrestore
  namespace: openshift-adp
spec:
  backupName: multiplevmbackup
  restorePVs: true
  LabelSelectors:
    - matchLabels:
        kubevirt.io/created-by: <datavolume_uid> 
1

    - matchLabels:
        app: <vm_name> 
2
$ oc apply -f <restore_cr_file_name> 
1
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: restic velero: client-burst: 500 client-qps: 300 defaultPlugins: - openshift - aws - kubevirt
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: restic
    velero:
      client-burst: 500 
1

      client-qps: 300 
2

      defaultPlugins:
        - openshift
        - aws
        - kubevirt
kind: DataProtectionApplication metadata: name: ts-dpa namespace: openshift-adp spec: backupLocations: - velero: credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: gcp configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - csi - gcp - openshift disableFsBackup: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: ts-dpa
  namespace: openshift-adp
spec:
  backupLocations:
  - velero:
      credential:
        key: cloud
        name: cloud-credentials
      default: true
      objectStorage:
        bucket: <bucket_name>
        prefix: velero
      provider: gcp
  configuration:
    nodeAgent: 
1

      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
      - csi
      - gcp
      - openshift
      disableFsBackup: true 
2
$ oc get daemonset node-agent -o yaml
kind: DaemonSet metadata: ... name: node-agent namespace: openshift-adp ... spec: ... template: metadata: ... spec: containers: ... securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: true ... nodeSelector: kubernetes.io/os: linux os: name: linux restartPolicy: Always schedulerName: default-scheduler securityContext: runAsNonRoot: true seccompProfile: type: RuntimeDefault serviceAccount: velero serviceAccountName: velero ....
apiVersion: apps/v1
kind: DaemonSet
metadata:
  ...
  name: node-agent
  namespace: openshift-adp
  ...
spec:
  ...
  template:
    metadata:
      ...
    spec:
      containers:
      ...
        securityContext:
          allowPrivilegeEscalation: false 
1

          capabilities:
            drop:
            - ALL
          privileged: false 
2

          readOnlyRootFilesystem: true 
3

        ...
      nodeSelector:
        kubernetes.io/os: linux
      os:
        name: linux
      restartPolicy: Always
      schedulerName: default-scheduler
      securityContext:
        runAsNonRoot: true 
4

        seccompProfile:
          type: RuntimeDefault
      serviceAccount: velero
      serviceAccountName: velero
      ....
$ oc label node/<node_name> node-role.kubernetes.io/nodeAgent=""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/nodeAgent: ""
configuration:
  nodeAgent:
    enable: true
    podConfig:
      nodeSelector:
        node-role.kubernetes.io/nodeAgent: ""
nodeAgent: enable: true podConfig: nodeSelector: node-role.kubernetes.io/infra: "" node-role.kubernetes.io/worker: ""
    configuration:
      nodeAgent:
        enable: true
        podConfig:
          nodeSelector:
            node-role.kubernetes.io/infra: ""
            node-role.kubernetes.io/worker: ""
spec: configuration: nodeAgent: enable: true uploaderType: kopia podConfig: nodeSelector: label.io/role: cpu-1 other-label.io/other-role: cpu-2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      podConfig:
        nodeSelector:
          label.io/role: cpu-1
          other-label.io/other-role: cpu-2
        ...
spec: configuration: nodeAgent: enable: true loadAffinity: - nodeSelector: matchLabels: label.io/role: cpu-1 matchExpressions: - key: label.io/hostname operator: In values: - node1 - node2 ...
...
spec:
  configuration:
    nodeAgent:
      enable: true
      loadAffinity: 
1

        - nodeSelector:
            matchLabels:
              label.io/role: cpu-1
            matchExpressions: 
2

              - key: label.io/hostname
                operator: In
                values:
                  - node1
                  - node2
                  ...
spec: configuration: nodeAgent: enable: true uploaderType: kopia loadAffinity: - nodeSelector: matchLabels: label.io/location: 'US' label.io/gpu: 'no' podConfig: nodeSelector: label.io/gpu: 'no'
...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/location: 'US'
              label.io/gpu: 'no'
      podConfig:
        nodeSelector:
          label.io/gpu: 'no'
$ oc label node/<node_name> label.io/instance-type='large'
nodeAgent: enable: true uploaderType: kopia loadConcurrency: globalConfig: 1 perNodeConfig: - nodeSelector: matchLabels: label.io/instance-type: large number: 3
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      loadConcurrency:
        globalConfig: 1 
1

        perNodeConfig:
        - nodeSelector:
              matchLabels:
                 label.io/instance-type: large 
2

          number: 3 
3
spec: configuration: repositoryMaintenance: global: podResources: cpuRequest: "100m" cpuLimit: "200m" memoryRequest: "100Mi" memoryLimit: "200Mi" loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    repositoryMaintenance: 
1

      global: 
2

        podResources:
          cpuRequest: "100m"
          cpuLimit: "200m"
          memoryRequest: "100Mi"
          memoryLimit: "200Mi"
        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/gpu: 'no'
              matchExpressions:
                - key: label.io/location
                  operator: In
                  values:
                    - US
                    - EU
spec: configuration: repositoryMaintenance: myrepositoryname: loadAffinity: - nodeSelector: matchLabels: label.io/cpu: 'yes'
...
spec:
  configuration:
    repositoryMaintenance:
      myrepositoryname: 
1

        loadAffinity:
          - nodeSelector:
              matchLabels:
                label.io/cpu: 'yes'
spec: configuration: velero: podConfig: nodeSelector: some-label.io/custom-node-role: backup-core
...
spec:
  configuration:
    velero:
      podConfig:
        nodeSelector:
          some-label.io/custom-node-role: backup-core
spec: configuration: velero: loadAffinity: - nodeSelector: matchLabels: label.io/gpu: 'no' matchExpressions: - key: label.io/location operator: In values: - US - EU
...
spec:
  configuration:
    velero:
      loadAffinity:
        - nodeSelector:
            matchLabels:
              label.io/gpu: 'no'
            matchExpressions:
              - key: label.io/location
                operator: In
                values:
                  - US
                  - EU
kind: DataProtectionApplication metadata: name: test-dpa namespace: openshift-adp spec: backupLocations: - name: default velero: config: insecureSkipTLSVerify: "true" profile: "default" region: <bucket_region> s3ForcePathStyle: "true" s3Url: <bucket_url> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws - kubevirt - csi imagePullPolicy: Never
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-dpa
  namespace: openshift-adp
spec:
  backupLocations:
    - name: default
      velero:
        config:
          insecureSkipTLSVerify: "true"
          profile: "default"
          region: <bucket_region>
          s3ForcePathStyle: "true"
          s3Url: <bucket_url>
        credential:
          key: cloud
          name: cloud-credentials
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
        provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - openshift
        - aws
        - kubevirt
        - csi
  imagePullPolicy: Never 
1
kind: DataProtectionApplication #... backupLocations: - name: aws velero: provider: aws default: true objectStorage: bucket: <bucket_name> prefix: <prefix> config: region: <region_name> profile: "default" credential: key: cloud name: cloud-credentials - name: odf velero: provider: aws default: false objectStorage: bucket: <bucket_name> prefix: <prefix> config: profile: "default" region: <region_name> s3Url: <url> insecureSkipTLSVerify: "true" s3ForcePathStyle: "true" credential: key: cloud name: <custom_secret_name_odf> #...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
#...
backupLocations:
  - name: aws 
1

    velero:
      provider: aws
      default: true 
2

      objectStorage:
        bucket: <bucket_name> 
3

        prefix: <prefix> 
4

      config:
        region: <region_name> 
5

        profile: "default"
      credential:
        key: cloud
        name: cloud-credentials 
6

  - name: odf 
7

    velero:
      provider: aws
      default: false
      objectStorage:
        bucket: <bucket_name>
        prefix: <prefix>
      config:
        profile: "default"
        region: <region_name>
        s3Url: <url> 
8

        insecureSkipTLSVerify: "true"
        s3ForcePathStyle: "true"
      credential:
        key: cloud
        name: <custom_secret_name_odf> 
9

#...
kind: Backup # ... spec: includedNamespaces: - <namespace> storageLocation: <backup_storage_location> defaultVolumesToFsBackup: true
apiVersion: velero.io/v1
kind: Backup
# ...
spec:
  includedNamespaces:
  - <namespace> 
1

  storageLocation: <backup_storage_location> 
2

  defaultVolumesToFsBackup: true
$ oc create secret generic cloud-credentials -n openshift-adp --from-file cloud=<aws_credentials_file_name> 
1
$ oc create secret generic mcg-secret -n openshift-adp --from-file cloud=<MCG_credentials_file_name> 
1
kind: DataProtectionApplication metadata: name: two-bsl-dpa namespace: openshift-adp spec: backupLocations: - name: aws velero: config: profile: default region: <region_name> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws - name: mcg velero: config: insecureSkipTLSVerify: "true" profile: noobaa region: <region_name> s3ForcePathStyle: "true" s3Url: <s3_url> credential: key: cloud name: mcg-secret objectStorage: bucket: <bucket_name_mcg> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: two-bsl-dpa
  namespace: openshift-adp
spec:
  backupLocations:
  - name: aws
    velero:
      config:
        profile: default
        region: <region_name> 
1

      credential:
        key: cloud
        name: cloud-credentials
      default: true
      objectStorage:
        bucket: <bucket_name> 
2

        prefix: velero
      provider: aws
  - name: mcg
    velero:
      config:
        insecureSkipTLSVerify: "true"
        profile: noobaa
        region: <region_name> 
3

        s3ForcePathStyle: "true"
        s3Url: <s3_url> 
4

      credential:
        key: cloud
        name: mcg-secret 
5

      objectStorage:
        bucket: <bucket_name_mcg> 
6

        prefix: velero
      provider: aws
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
      - openshift
      - aws
$ oc create -f <dpa_file_name> 
1
$ oc get dpa -o yaml
$ oc get bsl
aws Available 5s 3m28s true mcg Available 5s 3m28s
NAME   PHASE       LAST VALIDATED   AGE     DEFAULT
aws    Available   5s               3m28s   true
mcg    Available   5s               3m28s
kind: Backup metadata: name: test-backup1 namespace: openshift-adp spec: includedNamespaces: - <mysql_namespace> defaultVolumesToFsBackup: true
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: test-backup1
  namespace: openshift-adp
spec:
  includedNamespaces:
  - <mysql_namespace> 
1

  defaultVolumesToFsBackup: true
$ oc apply -f <backup_file_name> 
1
$ oc get backups.velero.io <backup_name> -o yaml 
1
kind: Backup metadata: name: test-backup1 namespace: openshift-adp spec: includedNamespaces: - <mysql_namespace> storageLocation: mcg defaultVolumesToFsBackup: true
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: test-backup1
  namespace: openshift-adp
spec:
  includedNamespaces:
  - <mysql_namespace> 
1

  storageLocation: mcg 
2

  defaultVolumesToFsBackup: true
$ oc apply -f <backup_file_name> 
1
$ oc get backups.velero.io <backup_name> -o yaml 
1
kind: DataProtectionApplication #... snapshotLocations: - velero: config: profile: default region: <region> credential: key: cloud name: cloud-credentials provider: aws - velero: config: profile: default region: <region> credential: key: cloud name: <custom_credential> provider: aws #...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
#...
snapshotLocations:
  - velero:
      config:
        profile: default
        region: <region> 
1

      credential:
        key: cloud
        name: cloud-credentials
      provider: aws
  - velero:
      config:
        profile: default
        region: <region>
      credential:
        key: cloud
        name: <custom_credential> 
2

      provider: aws
#...
$ velero backup create <backup-name> --snapshot-volumes false 
1
$ velero describe backup <backup_name> --details 
1
$ velero restore create --from-backup <backup-name> 
1
$ velero describe restore <restore_name> --details 
1
$ oc get backupstoragelocations.velero.io -n openshift-adp
openshift-adp velero-sample-1 Available 11s 31m
NAMESPACE       NAME              PHASE       LAST VALIDATED   AGE   DEFAULT
openshift-adp   velero-sample-1   Available   11s              31m
kind: Backup metadata: name: <backup> labels: velero.io/storage-location: default namespace: openshift-adp spec: hooks: {} includedNamespaces: - <namespace> includedResources: [] excludedResources: [] storageLocation: <velero-sample-1> ttl: 720h0m0s labelSelector: matchLabels: app: <label_1> app: <label_2> app: <label_3> orLabelSelectors: - matchLabels: app: <label_1> app: <label_2> app: <label_3>
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: <backup>
  labels:
    velero.io/storage-location: default
  namespace: openshift-adp
spec:
  hooks: {}
  includedNamespaces:
  - <namespace> 
1

  includedResources: [] 
2

  excludedResources: [] 
3

  storageLocation: <velero-sample-1> 
4

  ttl: 720h0m0s
  labelSelector: 
5

    matchLabels:
      app: <label_1>
      app: <label_2>
      app: <label_3>
  orLabelSelectors: 
6

  - matchLabels:
      app: <label_1>
      app: <label_2>
      app: <label_3>
$ oc get backups.velero.io -n openshift-adp <backup> -o jsonpath='{.status.phase}'
kind: VolumeSnapshotClass metadata: name: <volume_snapshot_class_name> labels: velero.io/csi-volumesnapshot-class: "true" annotations: snapshot.storage.kubernetes.io/is-default-class: true driver: <csi_driver> deletionPolicy: <deletion_policy_type>
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
  name: <volume_snapshot_class_name>
  labels:
    velero.io/csi-volumesnapshot-class: "true" 
1

  annotations:
    snapshot.storage.kubernetes.io/is-default-class: true 
2

driver: <csi_driver>
deletionPolicy: <deletion_policy_type> 
3
kind: Backup metadata: name: <backup> labels: velero.io/storage-location: default namespace: openshift-adp spec: defaultVolumesToFsBackup: true ...
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: <backup>
  labels:
    velero.io/storage-location: default
  namespace: openshift-adp
spec:
  defaultVolumesToFsBackup: true 
1

...
kind: Backup metadata: name: <backup> namespace: openshift-adp spec: hooks: resources: - name: <hook_name> includedNamespaces: - <namespace> excludedNamespaces: - <namespace> includedResources: [] - pods excludedResources: [] labelSelector: matchLabels: app: velero component: server pre: - exec: container: <container> command: - /bin/uname - -a onError: Fail timeout: 30s post: ...
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: <backup>
  namespace: openshift-adp
spec:
  hooks:
    resources:
      - name: <hook_name>
        includedNamespaces:
        - <namespace> 
1

        excludedNamespaces: 
2

        - <namespace>
        includedResources: []
        - pods 
3

        excludedResources: [] 
4

        labelSelector: 
5

          matchLabels:
            app: velero
            component: server
        pre: 
6

          - exec:
              container: <container> 
7

              command:
              - /bin/uname 
8

              - -a
              onError: Fail 
9

              timeout: 30s 
10

        post: 
11

...
$ oc get backupStorageLocations -n openshift-adp
openshift-adp velero-sample-1 Available 11s 31m
NAMESPACE       NAME              PHASE       LAST VALIDATED   AGE   DEFAULT
openshift-adp   velero-sample-1   Available   11s              31m
apiVersion: velero.io/v1 kind: Schedule metadata: name: <schedule> namespace: openshift-adp spec: schedule: 0 7 * * * template: hooks: {} includedNamespaces: - <namespace> storageLocation: <velero-sample-1> defaultVolumesToFsBackup: true ttl: 720h0m0s EOF
$ cat << EOF | oc apply -f -
apiVersion: velero.io/v1
kind: Schedule
metadata:
  name: <schedule>
  namespace: openshift-adp
spec:
  schedule: 0 7 * * * 
1

  template:
    hooks: {}
    includedNamespaces:
    - <namespace> 
2

    storageLocation: <velero-sample-1> 
3

    defaultVolumesToFsBackup: true 
4

    ttl: 720h0m0s
EOF
  schedule: "*/10 * * * *"
$ oc get schedule -n openshift-adp <schedule> -o jsonpath='{.status.phase}'
kind: DeleteBackupRequest metadata: name: deletebackuprequest namespace: openshift-adp spec: backupName: <backup_name>
apiVersion: velero.io/v1
kind: DeleteBackupRequest
metadata:
  name: deletebackuprequest
  namespace: openshift-adp
spec:
  backupName: <backup_name> 
1
$ oc apply -f <deletebackuprequest_cr_filename>
$ velero backup delete <backup_name> -n openshift-adp 
1
pod/repo-maintain-job-173....536-fl9tm 0/1 Completed 0 108m pod/repo-maintain-job-173...2545-55ggx 0/1 Completed 0 48m
pod/repo-maintain-job-173...2527-2nbls                             0/1     Completed   0          168m
pod/repo-maintain-job-173....536-fl9tm                             0/1     Completed   0          108m
pod/repo-maintain-job-173...2545-55ggx                             0/1     Completed   0          48m
not due for full maintenance cycle until 2024-00-00 18:29:4
$ oc get backuprepositories.velero.io -n openshift-adp
$ oc delete backuprepository <backup_repository_name> -n openshift-adp 
1
kind: DataProtectionApplication metadata: name: dpa-sample spec: configuration: nodeAgent: enable: true uploaderType: kopia # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: dpa-sample
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
# ...
$ velero backup create <backup-name> --snapshot-volumes false 
1
$ velero describe backup <backup_name> --details 
1
$ velero restore create --from-backup <backup-name> 
1
$ velero describe restore <restore_name> --details 
1
kind: Restore metadata: name: <restore> namespace: openshift-adp spec: backupName: <backup> includedResources: [] excludedResources: - nodes - events - events.events.k8s.io - backups.velero.io - restores.velero.io - resticrepositories.velero.io restorePVs: true
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: <restore>
  namespace: openshift-adp
spec:
  backupName: <backup> 
1

  includedResources: [] 
2

  excludedResources:
  - nodes
  - events
  - events.events.k8s.io
  - backups.velero.io
  - restores.velero.io
  - resticrepositories.velero.io
  restorePVs: true 
3
$ oc get restores.velero.io -n openshift-adp <restore> -o jsonpath='{.status.phase}'
$ oc get all -n <namespace> 
1
$ bash dc-restic-post-restore.sh -> dc-post-restore.sh
#!/bin/bash
set -e

# if sha256sum exists, use it to check the integrity of the file
if command -v sha256sum >/dev/null 2>&1; then
  CHECKSUM_CMD="sha256sum"
else
  CHECKSUM_CMD="shasum -a 256"
fi

label_name () {
    if [ "${#1}" -le "63" ]; then
	echo $1
	return
    fi
    sha=$(echo -n $1|$CHECKSUM_CMD)
    echo "${1:0:57}${sha:0:6}"
}

if [[ $# -ne 1 ]]; then
    echo "usage: ${BASH_SOURCE} restore-name"
    exit 1
fi

echo "restore: $1"

label=$(label_name $1)
echo "label:   $label"

echo Deleting disconnected restore pods
oc delete pods --all-namespaces -l oadp.openshift.io/disconnected-from-dc=$label

for dc in $(oc get dc --all-namespaces -l oadp.openshift.io/replicas-modified=$label -o jsonpath='{range .items[*]}{.metadata.namespace}{","}{.metadata.name}{","}{.metadata.annotations.oadp\.openshift\.io/original-replicas}{","}{.metadata.annotations.oadp\.openshift\.io/original-paused}{"\n"}')
do
    IFS=',' read -ra dc_arr <<< "$dc"
    if [ ${#dc_arr[0]} -gt 0 ]; then
	echo Found deployment ${dc_arr[0]}/${dc_arr[1]}, setting replicas: ${dc_arr[2]}, paused: ${dc_arr[3]}
	cat <<EOF | oc patch dc  -n ${dc_arr[0]} ${dc_arr[1]} --patch-file /dev/stdin
spec:
  replicas: ${dc_arr[2]}
  paused: ${dc_arr[3]}
EOF
    fi
done
kind: Restore metadata: name: <restore> namespace: openshift-adp spec: hooks: resources: - name: <hook_name> includedNamespaces: - <namespace> excludedNamespaces: - <namespace> includedResources: - pods excludedResources: [] labelSelector: matchLabels: app: velero component: server postHooks: - init: initContainers: - name: restore-hook-init image: alpine:latest volumeMounts: - mountPath: /restores/pvc1-vm name: pvc1-vm command: - /bin/ash - -c timeout: - exec: container: <container> command: - /bin/bash - -c - "psql < /backup/backup.sql" waitTimeout: 5m execTimeout: 1m onError: Continue
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: <restore>
  namespace: openshift-adp
spec:
  hooks:
    resources:
      - name: <hook_name>
        includedNamespaces:
        - <namespace> 
1

        excludedNamespaces:
        - <namespace>
        includedResources:
        - pods 
2

        excludedResources: []
        labelSelector: 
3

          matchLabels:
            app: velero
            component: server
        postHooks:
        - init:
            initContainers:
            - name: restore-hook-init
              image: alpine:latest
              volumeMounts:
              - mountPath: /restores/pvc1-vm
                name: pvc1-vm
              command:
              - /bin/ash
              - -c
            timeout: 
4

        - exec:
            container: <container> 
5

            command:
            - /bin/bash 
6

            - -c
            - "psql < /backup/backup.sql"
            waitTimeout: 5m 
7

            execTimeout: 1m 
8

            onError: Continue 
9
--from-backup <BACKUP_NAME> \ --exclude-resources=deployment.apps
$ velero restore create <RESTORE_NAME> \
  --from-backup <BACKUP_NAME> \
  --exclude-resources=deployment.apps
--from-backup <BACKUP_NAME> \ --include-resources=deployment.apps
$ velero restore create <RESTORE_NAME> \
  --from-backup <BACKUP_NAME> \
  --include-resources=deployment.apps
- apiGroups: - oadp.openshift.io resources: - nonadminbackups - nonadminrestores - nonadminbackupstoragelocations - nonadmindownloadrequests verbs: - create - delete - get - list - patch - update - watch - apiGroups: - oadp.openshift.io resources: - nonadminbackups/status - nonadminrestores/status verbs: - get
...
- apiGroups:
      - oadp.openshift.io
    resources:
      - nonadminbackups
      - nonadminrestores
      - nonadminbackupstoragelocations
      - nonadmindownloadrequests
    verbs:
      - create
      - delete
      - get
      - list
      - patch
      - update
      - watch
  - apiGroups:
      - oadp.openshift.io
    resources:
      - nonadminbackups/status
      - nonadminrestores/status
    verbs:
      - get
message: only a single instance of Non-Admin Controller can be installed across the entire cluster. Non-Admin controller is already configured and installed in openshift-adp namespace.
kind: DataProtectionApplication metadata: name: oadp-backup namespace: openshift-adp spec: configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - aws - openshift - csi defaultSnapshotMoveData: true nonAdmin: enable: true backupLocations: - velero: config: profile: "default" region: noobaa s3Url: https://s3.openshift-storage.svc s3ForcePathStyle: "true" insecureSkipTLSVerify: "true" provider: aws default: true credential: key: cloud name: <cloud_credentials> objectStorage: bucket: <bucket_name> prefix: oadp
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: oadp-backup
  namespace: openshift-adp
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - aws
        - openshift
        - csi
      defaultSnapshotMoveData: true
  nonAdmin: 
1

    enable: true 
2

  backupLocations:
    - velero:
        config:
          profile: "default"
          region: noobaa
          s3Url: https://s3.openshift-storage.svc
          s3ForcePathStyle: "true"
          insecureSkipTLSVerify: "true"
        provider: aws
        default: true
        credential:
          key: cloud
          name:  <cloud_credentials>
        objectStorage:
          bucket: <bucket_name>
          prefix: oadp
$ oc get pod -n openshift-adp -l control-plane=non-admin-controller
non-admin-controller-5d....f5-p..9p 1/1 Running 0 99m
NAME                                  READY   STATUS    RESTARTS   AGE
non-admin-controller-5d....f5-p..9p   1/1     Running   0          99m
kind: DataProtectionApplication metadata: name: oadp-backup namespace: openshift-adp spec: configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - aws - openshift - csi noDefaultBackupLocation: true nonAdmin: enable: true requireApprovalForBSL: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: oadp-backup
  namespace: openshift-adp
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    velero:
      defaultPlugins:
        - aws
        - openshift
        - csi
      noDefaultBackupLocation: true 
1

  nonAdmin:
    enable: true
    requireApprovalForBSL: true 
2
$ oc -n openshift-adp get NonAdminBackupStorageLocationRequests
non-admin-bsl-test-.....175 Approved non-admin-bsl-test incorrect-bucket-nabsl 4m57s non-admin-bsl-test-.....196 Approved non-admin-bsl-test perfect-nabsl 5m26s non-admin-bsl-test-s....e1a Rejected non-admin-bsl-test suspicious-sample 2m56s non-admin-bsl-test-.....5e0 Pending non-admin-bsl-test waitingapproval-nabsl 4m20s
NAME                          REQUEST-PHASE   REQUEST-NAMESPACE     REQUEST-NAME               AGE
non-admin-bsl-test-.....175   Approved        non-admin-bsl-test    incorrect-bucket-nabsl    4m57s
non-admin-bsl-test-.....196   Approved        non-admin-bsl-test    perfect-nabsl             5m26s
non-admin-bsl-test-s....e1a   Rejected        non-admin-bsl-test    suspicious-sample         2m56s
non-admin-bsl-test-.....5e0   Pending         non-admin-bsl-test    waitingapproval-nabsl     4m20s
$ oc patch nabslrequest <nabsl_name> -n openshift-adp --type=merge -p '{"spec": {"approvalDecision": "approve"}}' 
1
$ oc get velero.io.backupstoragelocation
test-nac-test-bsl-cd...930 Available 62s 62s
NAME                         PHASE       LAST VALIDATED   AGE   DEFAULT
test-nac-test-bsl-cd...930   Available   62s              62s
$ oc -n openshift-adp get NonAdminBackupStorageLocationRequests
$ oc get nabslrequest
NAME                          REQUEST-PHASE   REQUEST-NAMESPACE     REQUEST-NAME               AGE
non-admin-bsl-test-.....175   Approved        non-admin-bsl-test    incorrect-bucket-nabsl    4m57s
non-admin-bsl-test-.....196   Approved        non-admin-bsl-test    perfect-nabsl             5m26s
non-admin-bsl-test-s....e1a   Rejected        non-admin-bsl-test    suspicious-sample         2m56s
non-admin-bsl-test-.....5e0   Pending         non-admin-bsl-test    waitingapproval-nabsl     4m20s
$ oc patch nabslrequest <nabsl_name> -n openshift-adp --type=merge -p '{"spec": {"approvalDecision": "reject"}}' 
1
kind: DataProtectionApplication ... spec: nonAdmin: enable: true enforceBSLSpec: config: checksumAlgorithm: "" profile: default region: us-west-2 objectStorage: bucket: my-company-bucket prefix: velero provider: aws
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
...
spec:
  nonAdmin:
    enable: true
    enforceBSLSpec: 
1

      config: 
2

        checksumAlgorithm: ""
        profile: default
        region: us-west-2
      objectStorage: 
3

        bucket: my-company-bucket
        prefix: velero
      provider: aws
kind: DataProtectionApplication ... spec: nonAdmin: enable: true enforceBackupSpec: snapshotMoveData: true ttl: 158h0m0s
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
...
spec:
  nonAdmin:
    enable: true
    enforceBackupSpec: 
1

      snapshotMoveData: true 
2

      ttl: 158h0m0s 
3
$ oc create secret generic cloud-credentials -n test-nac-ns --from-file <cloud_key_name>=<cloud_credentials_file> 
1
kind: NonAdminBackupStorageLocation metadata: name: test-nabsl namespace: test-nac-ns spec: backupStorageLocationSpec: config: profile: default region: <region_name> credential: key: cloud name: cloud-credentials objectStorage: bucket: <bucket_name> prefix: velero provider: aws
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminBackupStorageLocation
metadata:
  name: test-nabsl
  namespace: test-nac-ns 
1

spec:
  backupStorageLocationSpec:
    config:
      profile: default
      region: <region_name> 
2

    credential:
      key: cloud
      name: cloud-credentials
    objectStorage:
      bucket: <bucket_name> 
3

      prefix: velero
    provider: aws
$ oc apply -f <nabsl_cr_filename> 
1
$ oc get nabsl test-nabsl -o yaml
kind: NonAdminBackupStorageLocation ... status: conditions: - lastTransitionTime: "2025-02-26T09:07:15Z" message: NonAdminBackupStorageLocation spec validation successful reason: BslSpecValidation status: "True" type: Accepted - lastTransitionTime: "2025-02-26T09:07:15Z" message: NonAdminBackupStorageLocationRequest approval pending reason: BslSpecApprovalPending status: "False" type: ClusterAdminApproved phase: New veleroBackupStorageLocation: nacuuid: test-nac-test-bsl-c...d4389a1930 name: test-nac-test-bsl-cd....1930 namespace: openshift-adp
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminBackupStorageLocation
...
status:
  conditions:
  - lastTransitionTime: "2025-02-26T09:07:15Z"
    message: NonAdminBackupStorageLocation spec validation successful
    reason: BslSpecValidation
    status: "True"
    type: Accepted
  - lastTransitionTime: "2025-02-26T09:07:15Z"
    message: NonAdminBackupStorageLocationRequest approval pending 
1

    reason: BslSpecApprovalPending
    status: "False"
    type: ClusterAdminApproved
  phase: New 
2

  veleroBackupStorageLocation:
    nacuuid: test-nac-test-bsl-c...d4389a1930
    name: test-nac-test-bsl-cd....1930
    namespace: openshift-adp
$ oc get nabsl test-nabsl -o yaml
kind: NonAdminBackupStorageLocation metadata: creationTimestamp: "2025-02-19T09:30:34Z" finalizers: - nonadminbackupstoragelocation.oadp.openshift.io/finalizer generation: 1 name: test-nabsl namespace: test-nac-ns resourceVersion: "159973" uid: 4a..80-3260-4ef9-a3..5a-00...d1922 spec: backupStorageLocationSpec: credential: key: cloud name: cloud-credentials objectStorage: bucket: oadp...51rrdqj prefix: velero provider: aws status: conditions: - lastTransitionTime: "2025-02-19T09:30:34Z" message: NonAdminBackupStorageLocation spec validation successful reason: BslSpecValidation status: "True" type: Accepted - lastTransitionTime: "2025-02-19T09:30:34Z" message: Secret successfully created in the OADP namespace reason: SecretCreated status: "True" type: SecretSynced - lastTransitionTime: "2025-02-19T09:30:34Z" message: BackupStorageLocation successfully created in the OADP namespace reason: BackupStorageLocationCreated status: "True" type: BackupStorageLocationSynced phase: Created veleroBackupStorageLocation: nacuuid: test-nac-..f933a-4ec1-4f6a-8099-ee...b8b26 name: test-nac-test-nabsl-36...11ab8b26 namespace: openshift-adp status: lastSyncedTime: "2025-02-19T11:47:10Z" lastValidationTime: "2025-02-19T11:47:31Z" phase: Available
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminBackupStorageLocation
metadata:
  creationTimestamp: "2025-02-19T09:30:34Z"
  finalizers:
  - nonadminbackupstoragelocation.oadp.openshift.io/finalizer
  generation: 1
  name: test-nabsl
  namespace: test-nac-ns
  resourceVersion: "159973"
  uid: 4a..80-3260-4ef9-a3..5a-00...d1922
spec:
  backupStorageLocationSpec:
    credential:
      key: cloud
      name: cloud-credentials
    objectStorage:
      bucket: oadp...51rrdqj
      prefix: velero
    provider: aws
status:
  conditions:
  - lastTransitionTime: "2025-02-19T09:30:34Z"
    message: NonAdminBackupStorageLocation spec validation successful 
1

    reason: BslSpecValidation
    status: "True"
    type: Accepted
  - lastTransitionTime: "2025-02-19T09:30:34Z"
    message: Secret successfully created in the OADP namespace 
2

    reason: SecretCreated
    status: "True"
    type: SecretSynced
  - lastTransitionTime: "2025-02-19T09:30:34Z"
    message: BackupStorageLocation successfully created in the OADP namespace 
3

    reason: BackupStorageLocationCreated
    status: "True"
    type: BackupStorageLocationSynced
  phase: Created
  veleroBackupStorageLocation:
    nacuuid: test-nac-..f933a-4ec1-4f6a-8099-ee...b8b26 
4

    name: test-nac-test-nabsl-36...11ab8b26 
5

    namespace: openshift-adp
    status:
      lastSyncedTime: "2025-02-19T11:47:10Z"
      lastValidationTime: "2025-02-19T11:47:31Z"
      phase: Available 
6
kind: NonAdminBackup metadata: name: test-nab spec: backupSpec: defaultVolumesToFsBackup: true snapshotMoveData: false storageLocation: test-bsl
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminBackup
metadata:
  name: test-nab 
1

spec:
  backupSpec:
    defaultVolumesToFsBackup: true 
2

    snapshotMoveData: false 
3

    storageLocation: test-bsl 
4
$ oc apply -f <nab_cr_filename> 
1
$ oc get nab test-nab -o yaml
kind: NonAdminBackup metadata: creationTimestamp: "2025-03-06T10:02:56Z" finalizers: - nonadminbackup.oadp.openshift.io/finalizer generation: 2 name: test-nab namespace: test-nac-ns resourceVersion: "134316" uid: c5...4c8a8 spec: backupSpec: csiSnapshotTimeout: 0s defaultVolumesToFsBackup: true hooks: {} itemOperationTimeout: 0s metadata: {} storageLocation: test-bsl ttl: 0s status: conditions: - lastTransitionTime: "202...56Z" message: backup accepted reason: BackupAccepted status: "True" type: Accepted - lastTransitionTime: "202..T10:02:56Z" message: Created Velero Backup object reason: BackupScheduled status: "True" type: Queued dataMoverDataUploads: {} fileSystemPodVolumeBackups: completed: 2 total: 2 phase: Created queueInfo: estimatedQueuePosition: 0 veleroBackup: nacuuid: test-nac-test-nab-d2...a9b14 name: test-nac-test-nab-d2...b14 namespace: openshift-adp spec: csiSnapshotTimeout: 10m0s defaultVolumesToFsBackup: true excludedResources: - nonadminbackups - nonadminrestores - nonadminbackupstoragelocations - securitycontextconstraints - clusterroles - clusterrolebindings - priorityclasses - customresourcedefinitions - virtualmachineclusterinstancetypes - virtualmachineclusterpreferences hooks: {} includedNamespaces: - test-nac-ns itemOperationTimeout: 4h0m0s metadata: {} snapshotMoveData: false storageLocation: test-nac-test-bsl-bf..02b70a ttl: 720h0m0s status: completionTimestamp: "2025-0..3:13Z" expiration: "2025..2:56Z" formatVersion: 1.1.0 hookStatus: {} phase: Completed progress: itemsBackedUp: 46 totalItems: 46 startTimestamp: "2025-..56Z" version: 1 warnings: 1
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminBackup
metadata:
  creationTimestamp: "2025-03-06T10:02:56Z"
  finalizers:
  - nonadminbackup.oadp.openshift.io/finalizer
  generation: 2
  name: test-nab
  namespace: test-nac-ns 
1

  resourceVersion: "134316"
  uid: c5...4c8a8
spec:
  backupSpec:
    csiSnapshotTimeout: 0s
    defaultVolumesToFsBackup: true
    hooks: {}
    itemOperationTimeout: 0s
    metadata: {}
    storageLocation: test-bsl
    ttl: 0s
status:
  conditions:
  - lastTransitionTime: "202...56Z"
    message: backup accepted 
2

    reason: BackupAccepted
    status: "True"
    type: Accepted
  - lastTransitionTime: "202..T10:02:56Z"
    message: Created Velero Backup object
    reason: BackupScheduled
    status: "True"
    type: Queued
  dataMoverDataUploads: {}
  fileSystemPodVolumeBackups: 
3

    completed: 2
    total: 2
  phase: Created 
4

  queueInfo:
    estimatedQueuePosition: 0 
5

  veleroBackup:
    nacuuid: test-nac-test-nab-d2...a9b14 
6

    name: test-nac-test-nab-d2...b14 
7

    namespace: openshift-adp
    spec:
      csiSnapshotTimeout: 10m0s
      defaultVolumesToFsBackup: true
      excludedResources:
      - nonadminbackups
      - nonadminrestores
      - nonadminbackupstoragelocations
      - securitycontextconstraints
      - clusterroles
      - clusterrolebindings
      - priorityclasses
      - customresourcedefinitions
      - virtualmachineclusterinstancetypes
      - virtualmachineclusterpreferences
      hooks: {}
      includedNamespaces:
      - test-nac-ns
      itemOperationTimeout: 4h0m0s
      metadata: {}
      snapshotMoveData: false
      storageLocation: test-nac-test-bsl-bf..02b70a
      ttl: 720h0m0s
    status: 
8

      completionTimestamp: "2025-0..3:13Z"
      expiration: "2025..2:56Z"
      formatVersion: 1.1.0
      hookStatus: {}
      phase: Completed 
9

      progress:
        itemsBackedUp: 46
        totalItems: 46
      startTimestamp: "2025-..56Z"
      version: 1
      warnings: 1
kind: NonAdminRestore metadata: name: test-nar spec: restoreSpec: backupName: test-nab
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminRestore
metadata:
  name: test-nar 
1

spec:
  restoreSpec:
    backupName: test-nab 
2
$ oc apply -f <nar_cr_filename> 
1
$ oc get nar test-nar -o yaml
kind: NonAdminRestore metadata: creationTimestamp: "2025-..:15Z" finalizers: - nonadminrestore.oadp.openshift.io/finalizer generation: 2 name: test-nar namespace: test-nac-ns resourceVersion: "156517" uid: f9f5...63ef34 spec: restoreSpec: backupName: test-nab hooks: {} itemOperationTimeout: 0s status: conditions: - lastTransitionTime: "2025..15Z" message: restore accepted reason: RestoreAccepted status: "True" type: Accepted - lastTransitionTime: "2025-03-06T11:22:15Z" message: Created Velero Restore object reason: RestoreScheduled status: "True" type: Queued dataMoverDataDownloads: {} fileSystemPodVolumeRestores: completed: 2 total: 2 phase: Created queueInfo: estimatedQueuePosition: 0 veleroRestore: nacuuid: test-nac-test-nar-c...1ba name: test-nac-test-nar-c7...1ba namespace: openshift-adp status: completionTimestamp: "2025...22:44Z" hookStatus: {} phase: Completed progress: itemsRestored: 28 totalItems: 28 startTimestamp: "2025..15Z" warnings: 7
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminRestore
metadata:
  creationTimestamp: "2025-..:15Z"
  finalizers:
  - nonadminrestore.oadp.openshift.io/finalizer
  generation: 2
  name: test-nar
  namespace: test-nac-ns
  resourceVersion: "156517"
  uid: f9f5...63ef34
spec:
  restoreSpec:
    backupName: test-nab
    hooks: {}
    itemOperationTimeout: 0s
status:
  conditions:
  - lastTransitionTime: "2025..15Z"
    message: restore accepted 
1

    reason: RestoreAccepted
    status: "True"
    type: Accepted
  - lastTransitionTime: "2025-03-06T11:22:15Z"
    message: Created Velero Restore object
    reason: RestoreScheduled
    status: "True"
    type: Queued
  dataMoverDataDownloads: {}
  fileSystemPodVolumeRestores: 
2

    completed: 2
    total: 2
  phase: Created 
3

  queueInfo:
    estimatedQueuePosition: 0 
4

  veleroRestore:
    nacuuid: test-nac-test-nar-c...1ba 
5

    name: test-nac-test-nar-c7...1ba 
6

    namespace: openshift-adp
    status:
      completionTimestamp: "2025...22:44Z"
      hookStatus: {}
      phase: Completed 
7

      progress:
        itemsRestored: 28
        totalItems: 28
      startTimestamp: "2025..15Z"
      warnings: 7
kind: NonAdminDownloadRequest metadata: name: test-nadr-backup spec: target: kind: BackupLog name: test-nab
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminDownloadRequest
metadata:
  name: test-nadr-backup
spec:
  target:
    kind: BackupLog 
1

    name: test-nab 
2
$ oc get nadr test-nadr-backup -o yaml
kind: NonAdminDownloadRequest metadata: creationTimestamp: "2025-03-06T10:05:22Z" generation: 1 name: test-nadr-backup namespace: test-nac-ns resourceVersion: "134866" uid: 520...8d9 spec: target: kind: BackupLog name: test-nab status: conditions: - lastTransitionTime: "202...5:22Z" message: "" reason: Success status: "True" type: Processed phase: Created velero: status: downloadURL: https://... expiration: "202...22Z" phase: Processed
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminDownloadRequest
metadata:
  creationTimestamp: "2025-03-06T10:05:22Z"
  generation: 1
  name: test-nadr-backup
  namespace: test-nac-ns
  resourceVersion: "134866"
  uid: 520...8d9
spec:
  target:
    kind: BackupLog
    name: test-nab
status:
  conditions:
  - lastTransitionTime: "202...5:22Z"
    message: ""
    reason: Success
    status: "True"
    type: Processed
  phase: Created
  velero:
    status:
      downloadURL: https://... 
1

      expiration: "202...22Z"
      phase: Processed 
2
kind: NonAdminDownloadRequest metadata: name: test-nadr-restore spec: target: kind: RestoreLog name: test-nar
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminDownloadRequest
metadata:
  name: test-nadr-restore
spec:
  target:
    kind: RestoreLog 
1

    name: test-nar 
2
$ oc get nadr test-nadr-restore -o yaml
kind: NonAdminDownloadRequest metadata: creationTimestamp: "2025-03-06T11:26:01Z" generation: 1 name: test-nadr-restore namespace: test-nac-ns resourceVersion: "157842" uid: f3e...7862f spec: target: kind: RestoreLog name: test-nar status: conditions: - lastTransitionTime: "202..:01Z" message: "" reason: Success status: "True" type: Processed phase: Created velero: status: downloadURL: https://... expiration: "202..:01Z" phase: Processed
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminDownloadRequest
metadata:
  creationTimestamp: "2025-03-06T11:26:01Z"
  generation: 1
  name: test-nadr-restore
  namespace: test-nac-ns
  resourceVersion: "157842"
  uid: f3e...7862f
spec:
  target:
    kind: RestoreLog
    name: test-nar
status:
  conditions:
  - lastTransitionTime: "202..:01Z"
    message: ""
    reason: Success
    status: "True"
    type: Processed
  phase: Created
  velero:
    status:
      downloadURL: https://... 
1

      expiration: "202..:01Z"
      phase: Processed 
2
NonAdminBackupStorageLocation not found in the namespace: NonAdminBackupStorageLocation.oadp.openshift.io
kind: NonAdminBackup ... status: conditions: - lastTransitionTime: "2025-02-20T10:13:00Z" message: 'NonAdminBackupStorageLocation not found in the namespace: NonAdminBackupStorageLocation.oadp.openshift.io "nabsl2" not found' reason: InvalidBackupSpec status: "False" type: Accepted phase: BackingOff
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminBackup
...
status:
  conditions:
  - lastTransitionTime: "2025-02-20T10:13:00Z"
  message: 'NonAdminBackupStorageLocation not found in the namespace: NonAdminBackupStorageLocation.oadp.openshift.io
    "nabsl2" not found'
  reason: InvalidBackupSpec
  status: "False"
  type: Accepted
  phase: BackingOff
kind: NonAdminBackupStorageLocation metadata: creationTimestamp: "20...:03Z" generation: 1 name: nabsl1 namespace: test-nac-1 resourceVersion: "11...9" uid: 8d2fc....c9b6c4401 spec: backupStorageLocationSpec: credential: key: cloud name: cloud-credentials-gcp default: true objectStorage: bucket: oad..7l8 prefix: velero provider: gcp status: conditions: - lastTransitionTime: "20...:27:03Z" message: NonAdminBackupStorageLocation cannot be used as a default BSL reason: BslSpecValidation status: "False" type: Accepted phase: BackingOff
apiVersion: oadp.openshift.io/v1alpha1
kind: NonAdminBackupStorageLocation
metadata:
  creationTimestamp: "20...:03Z"
  generation: 1
  name: nabsl1
  namespace: test-nac-1
  resourceVersion: "11...9"
  uid: 8d2fc....c9b6c4401
spec:
  backupStorageLocationSpec:
    credential:
      key: cloud
      name: cloud-credentials-gcp
    default: true 
1

    objectStorage:
      bucket: oad..7l8
      prefix: velero
    provider: gcp
status:
  conditions:
  - lastTransitionTime: "20...:27:03Z"
    message: NonAdminBackupStorageLocation cannot be used as a default BSL 
2

    reason: BslSpecValidation
    status: "False"
    type: Accepted
  phase: BackingOff
$ export CLUSTER_NAME=my-cluster 
1

  export ROSA_CLUSTER_ID=$(rosa describe cluster -c ${CLUSTER_NAME} --output json | jq -r .id)
  export REGION=$(rosa describe cluster -c ${CLUSTER_NAME} --output json | jq -r .region.id)
  export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||')
  export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
  export CLUSTER_VERSION=$(rosa describe cluster -c ${CLUSTER_NAME} -o json | jq -r .version.raw_id | cut -f -2 -d '.')
  export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials"
  export SCRATCH="/tmp/${CLUSTER_NAME}/oadp"
  mkdir -p ${SCRATCH}
  echo "Cluster ID: ${ROSA_CLUSTER_ID}, Region: ${REGION}, OIDC Endpoint:
  ${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}"
$ POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='RosaOadpVer1'].{ARN:Arn}" --output text) 
1
$ if [[ -z "${POLICY_ARN}" ]]; then
  cat << EOF > ${SCRATCH}/policy.json 
1

  {
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Action": [
        "s3:CreateBucket",
        "s3:DeleteBucket",
        "s3:PutBucketTagging",
        "s3:GetBucketTagging",
        "s3:PutEncryptionConfiguration",
        "s3:GetEncryptionConfiguration",
        "s3:PutLifecycleConfiguration",
        "s3:GetLifecycleConfiguration",
        "s3:GetBucketLocation",
        "s3:ListBucket",
        "s3:GetObject",
        "s3:PutObject",
        "s3:DeleteObject",
        "s3:ListBucketMultipartUploads",
        "s3:AbortMultipartUpload",
        "s3:ListMultipartUploadParts",
        "ec2:DescribeSnapshots",
        "ec2:DescribeVolumes",
        "ec2:DescribeVolumeAttribute",
        "ec2:DescribeVolumesModifications",
        "ec2:DescribeVolumeStatus",
        "ec2:CreateTags",
        "ec2:CreateVolume",
        "ec2:CreateSnapshot",
        "ec2:DeleteSnapshot"
      ],
      "Resource": "*"
    }
  ]}
EOF

  POLICY_ARN=$(aws iam create-policy --policy-name "RosaOadpVer1" \
  --policy-document file:///${SCRATCH}/policy.json --query Policy.Arn \
  --tags Key=rosa_openshift_version,Value=${CLUSTER_VERSION} Key=rosa_role_prefix,Value=ManagedOpenShift Key=operator_namespace,Value=openshift-oadp Key=operator_name,Value=openshift-oadp \
  --output text)
  fi
$ echo ${POLICY_ARN}
{ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_ENDPOINT}" }, "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringEquals": { "${OIDC_ENDPOINT}:sub": [ "system:serviceaccount:openshift-adp:openshift-adp-controller-manager", "system:serviceaccount:openshift-adp:velero"] } } }] } EOF
$ cat <<EOF > ${SCRATCH}/trust-policy.json
  {
      "Version": "2012-10-17",
      "Statement": [{
        "Effect": "Allow",
        "Principal": {
          "Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_ENDPOINT}"
        },
        "Action": "sts:AssumeRoleWithWebIdentity",
        "Condition": {
          "StringEquals": {
            "${OIDC_ENDPOINT}:sub": [
              "system:serviceaccount:openshift-adp:openshift-adp-controller-manager",
              "system:serviceaccount:openshift-adp:velero"]
          }
        }
      }]
  }
EOF
"${ROLE_NAME}" \ --assume-role-policy-document file://${SCRATCH}/trust-policy.json \ --tags Key=rosa_cluster_id,Value=${ROSA_CLUSTER_ID} \ Key=rosa_openshift_version,Value=${CLUSTER_VERSION} \ Key=rosa_role_prefix,Value=ManagedOpenShift \ Key=operator_namespace,Value=openshift-adp \ Key=operator_name,Value=openshift-oadp \ --query Role.Arn --output text)
$ ROLE_ARN=$(aws iam create-role --role-name \
  "${ROLE_NAME}" \
  --assume-role-policy-document file://${SCRATCH}/trust-policy.json \
  --tags Key=rosa_cluster_id,Value=${ROSA_CLUSTER_ID} \
         Key=rosa_openshift_version,Value=${CLUSTER_VERSION} \
         Key=rosa_role_prefix,Value=ManagedOpenShift \
         Key=operator_namespace,Value=openshift-adp \
         Key=operator_name,Value=openshift-oadp \
  --query Role.Arn --output text)
$ echo ${ROLE_ARN}
--policy-arn ${POLICY_ARN}
$ aws iam attach-role-policy --role-name "${ROLE_NAME}" \
  --policy-arn ${POLICY_ARN}
[default] role_arn = ${ROLE_ARN} web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token region = <aws_region> EOF
$ cat <<EOF > ${SCRATCH}/credentials
  [default]
  role_arn = ${ROLE_ARN}
  web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
  region = <aws_region> 
1

EOF
$ oc create namespace openshift-adp
--from-file=${SCRATCH}/credentials
$ oc -n openshift-adp create secret generic cloud-credentials \
  --from-file=${SCRATCH}/credentials
apiVersion: oadp.openshift.io/v1alpha1 kind: CloudStorage metadata: name: ${CLUSTER_NAME}-oadp namespace: openshift-adp spec: creationSecret: key: credentials name: cloud-credentials enableSharedConfig: true name: ${CLUSTER_NAME}-oadp provider: aws region: $REGION EOF
$ cat << EOF | oc create -f -
  apiVersion: oadp.openshift.io/v1alpha1
  kind: CloudStorage
  metadata:
    name: ${CLUSTER_NAME}-oadp
    namespace: openshift-adp
  spec:
    creationSecret:
      key: credentials
      name: cloud-credentials
    enableSharedConfig: true
    name: ${CLUSTER_NAME}-oadp
    provider: aws
    region: $REGION
EOF
$ oc get pvc -n <namespace>
applog Bound pvc-351791ae-b6ab-4e8b-88a4-30f73caf5ef8 1Gi RWO gp3-csi 4d19h mysql Bound pvc-16b8e009-a20a-4379-accc-bc81fedd0621 1Gi RWO gp3-csi 4d19h
NAME     STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
applog   Bound    pvc-351791ae-b6ab-4e8b-88a4-30f73caf5ef8   1Gi        RWO            gp3-csi        4d19h
mysql    Bound    pvc-16b8e009-a20a-4379-accc-bc81fedd0621   1Gi        RWO            gp3-csi        4d19h
$ oc get storageclass
gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer true 4d21h gp2-csi ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h gp3 ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h gp3-csi (default) ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h
NAME                PROVISIONER             RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
gp2                 kubernetes.io/aws-ebs   Delete          WaitForFirstConsumer   true                   4d21h
gp2-csi             ebs.csi.aws.com         Delete          WaitForFirstConsumer   true                   4d21h
gp3                 ebs.csi.aws.com         Delete          WaitForFirstConsumer   true                   4d21h
gp3-csi (default)   ebs.csi.aws.com         Delete          WaitForFirstConsumer   true                   4d21h
apiVersion: oadp.openshift.io/v1alpha1 kind: DataProtectionApplication metadata: name: ${CLUSTER_NAME}-dpa namespace: openshift-adp spec: backupImages: true features: dataMover: enable: false backupLocations: - bucket: cloudStorageRef: name: ${CLUSTER_NAME}-oadp credential: key: credentials name: cloud-credentials prefix: velero default: true config: region: ${REGION} configuration: velero: defaultPlugins: - openshift - aws - csi nodeAgent: enable: false uploaderType: kopia EOF
$ cat << EOF | oc create -f -
  apiVersion: oadp.openshift.io/v1alpha1
  kind: DataProtectionApplication
  metadata:
    name: ${CLUSTER_NAME}-dpa
    namespace: openshift-adp
  spec:
    backupImages: true 
1

    features:
      dataMover:
        enable: false
    backupLocations:
    - bucket:
        cloudStorageRef:
          name: ${CLUSTER_NAME}-oadp
        credential:
          key: credentials
          name: cloud-credentials
        prefix: velero
        default: true
        config:
          region: ${REGION}
    configuration:
      velero:
        defaultPlugins:
        - openshift
        - aws
        - csi
      nodeAgent:  
2

        enable: false
        uploaderType: kopia 
3

EOF
apiVersion: oadp.openshift.io/v1alpha1 kind: DataProtectionApplication metadata: name: ${CLUSTER_NAME}-dpa namespace: openshift-adp spec: backupImages: true backupLocations: - bucket: cloudStorageRef: name: ${CLUSTER_NAME}-oadp credential: key: credentials name: cloud-credentials prefix: velero default: true config: region: ${REGION} configuration: velero: defaultPlugins: - openshift - aws nodeAgent: enable: false uploaderType: restic snapshotLocations: - velero: config: credentialsFile: /tmp/credentials/openshift-adp/cloud-credentials-credentials enableSharedConfig: "true" profile: default region: ${REGION} provider: aws EOF
$ cat << EOF | oc create -f -
  apiVersion: oadp.openshift.io/v1alpha1
  kind: DataProtectionApplication
  metadata:
    name: ${CLUSTER_NAME}-dpa
    namespace: openshift-adp
  spec:
    backupImages: true 
1

    backupLocations:
    - bucket:
        cloudStorageRef:
          name: ${CLUSTER_NAME}-oadp
        credential:
          key: credentials
          name: cloud-credentials
        prefix: velero
        default: true
        config:
          region: ${REGION}
    configuration:
      velero:
        defaultPlugins:
        - openshift
        - aws
      nodeAgent: 
2

        enable: false
        uploaderType: restic
    snapshotLocations:
      - velero:
          config:
            credentialsFile: /tmp/credentials/openshift-adp/cloud-credentials-credentials 
3

            enableSharedConfig: "true" 
4

            profile: default 
5

            region: ${REGION} 
6

          provider: aws
EOF
enable: false uploaderType: restic
nodeAgent:
  enable: false
  uploaderType: restic
enable: false
restic:
  enable: false
$ oc get sub -o yaml redhat-oadp-operator
kind: Subscription metadata: annotations: creationTimestamp: "2025-01-15T07:18:31Z" generation: 1 labels: operators.coreos.com/redhat-oadp-operator.openshift-adp: "" name: redhat-oadp-operator namespace: openshift-adp resourceVersion: "77363" uid: 5ba00906-5ad2-4476-ae7b-ffa90986283d spec: channel: stable-1.4 config: env: - name: ROLEARN value: arn:aws:iam::11111111:role/wrong-role-arn installPlanApproval: Manual name: redhat-oadp-operator source: prestage-operators sourceNamespace: openshift-marketplace startingCSV: oadp-operator.v1.4.2
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
  annotations:
  creationTimestamp: "2025-01-15T07:18:31Z"
  generation: 1
  labels:
    operators.coreos.com/redhat-oadp-operator.openshift-adp: ""
  name: redhat-oadp-operator
  namespace: openshift-adp
  resourceVersion: "77363"
  uid: 5ba00906-5ad2-4476-ae7b-ffa90986283d
spec:
  channel: stable-1.4
  config:
    env:
    - name: ROLEARN
      value: arn:aws:iam::11111111:role/wrong-role-arn 
1

  installPlanApproval: Manual
  name: redhat-oadp-operator
  source: prestage-operators
  sourceNamespace: openshift-marketplace
  startingCSV: oadp-operator.v1.4.2
$ oc patch subscription redhat-oadp-operator -p '{"spec": {"config": {"env": [{"name": "ROLEARN", "value": "<role_arn>"}]}}}' --type='merge'
$ oc get secret cloud-credentials -o jsonpath='{.data.credentials}' | base64 -d
sts_regional_endpoints = regional role_arn = arn:aws:iam::160.....6956:role/oadprosa.....8wlf web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
[default]
sts_regional_endpoints = regional
role_arn = arn:aws:iam::160.....6956:role/oadprosa.....8wlf
web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
kind: DataProtectionApplication metadata: name: test-rosa-dpa namespace: openshift-adp spec: backupLocations: - bucket: config: region: us-east-1 cloudStorageRef: name: <cloud_storage> credential: name: cloud-credentials key: credentials prefix: velero default: true configuration: velero: defaultPlugins: - aws - openshift
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: test-rosa-dpa
  namespace: openshift-adp
spec:
  backupLocations:
  - bucket:
      config:
        region: us-east-1
      cloudStorageRef:
        name: <cloud_storage> 
1

      credential:
        name: cloud-credentials
        key: credentials
      prefix: velero
      default: true
  configuration:
    velero:
      defaultPlugins:
      - aws
      - openshift
$ oc create -f <dpa_manifest_file>
$  oc get dpa -n openshift-adp -o yaml
kind: DataProtectionApplication ... status: conditions: - lastTransitionTime: "2023-07-31T04:48:12Z" message: Reconcile complete reason: Complete status: "True" type: Reconciled
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
...
status:
    conditions:
    - lastTransitionTime: "2023-07-31T04:48:12Z"
      message: Reconcile complete
      reason: Complete
      status: "True"
      type: Reconciled
$ oc get backupstoragelocations.velero.io -n openshift-adp
ts-dpa-1 Available 3s 6s true
NAME       PHASE       LAST VALIDATED   AGE   DEFAULT
ts-dpa-1   Available   3s               6s    true
$ oc create namespace hello-world
$ oc new-app -n hello-world --image=docker.io/openshift/hello-openshift
$ oc expose service/hello-openshift -n hello-world
$ curl `oc get route/hello-openshift -n hello-world -o jsonpath='{.spec.host}'`
Hello OpenShift!
apiVersion: velero.io/v1 kind: Backup metadata: name: hello-world namespace: openshift-adp spec: includedNamespaces: - hello-world storageLocation: ${CLUSTER_NAME}-dpa-1 ttl: 720h0m0s EOF
$ cat << EOF | oc create -f -
  apiVersion: velero.io/v1
  kind: Backup
  metadata:
    name: hello-world
    namespace: openshift-adp
  spec:
    includedNamespaces:
    - hello-world
    storageLocation: ${CLUSTER_NAME}-dpa-1
    ttl: 720h0m0s
EOF
$ watch "oc -n openshift-adp get backup hello-world -o json | jq .status"
"completionTimestamp": "2022-09-07T22:20:44Z", "expiration": "2022-10-07T22:20:22Z", "formatVersion": "1.1.0", "phase": "Completed", "progress": { "itemsBackedUp": 58, "totalItems": 58 }, "startTimestamp": "2022-09-07T22:20:22Z", "version": 1 }
{
  "completionTimestamp": "2022-09-07T22:20:44Z",
  "expiration": "2022-10-07T22:20:22Z",
  "formatVersion": "1.1.0",
  "phase": "Completed",
  "progress": {
    "itemsBackedUp": 58,
    "totalItems": 58
  },
  "startTimestamp": "2022-09-07T22:20:22Z",
  "version": 1
}
$ oc delete ns hello-world
apiVersion: velero.io/v1 kind: Restore metadata: name: hello-world namespace: openshift-adp spec: backupName: hello-world EOF
$ cat << EOF | oc create -f -
  apiVersion: velero.io/v1
  kind: Restore
  metadata:
    name: hello-world
    namespace: openshift-adp
  spec:
    backupName: hello-world
EOF
$ watch "oc -n openshift-adp get restore hello-world -o json | jq .status"
"completionTimestamp": "2022-09-07T22:25:47Z", "phase": "Completed", "progress": { "itemsRestored": 38, "totalItems": 38 }, "startTimestamp": "2022-09-07T22:25:28Z", "warnings": 9 }
{
  "completionTimestamp": "2022-09-07T22:25:47Z",
  "phase": "Completed",
  "progress": {
    "itemsRestored": 38,
    "totalItems": 38
  },
  "startTimestamp": "2022-09-07T22:25:28Z",
  "warnings": 9
}
$ oc -n hello-world get pods
hello-openshift-9f885f7c6-kdjpj 1/1 Running 0 90s
NAME                              READY   STATUS    RESTARTS   AGE
hello-openshift-9f885f7c6-kdjpj   1/1     Running   0          90s
$ curl `oc get route/hello-openshift -n hello-world -o jsonpath='{.spec.host}'`
Hello OpenShift!
$ oc delete ns hello-world
$ oc -n openshift-adp delete dpa ${CLUSTER_NAME}-dpa
$ oc -n openshift-adp delete cloudstorage ${CLUSTER_NAME}-oadp
$ oc -n openshift-adp patch cloudstorage ${CLUSTER_NAME}-oadp -p '{"metadata":{"finalizers":null}}' --type=merge
$ oc -n openshift-adp delete subscription oadp-operator
$ oc delete ns openshift-adp
$ oc delete backups.velero.io hello-world
$ velero backup delete hello-world
$ for CRD in `oc get crds | grep velero | awk '{print $1}'`; do oc delete crd $CRD; done
$ aws s3 rm s3://${CLUSTER_NAME}-oadp --recursive
$ aws s3api delete-bucket --bucket ${CLUSTER_NAME}-oadp
$ aws iam detach-role-policy --role-name "${ROLE_NAME}"  --policy-arn "${POLICY_ARN}"
$ aws iam delete-role --role-name "${ROLE_NAME}"
$ export CLUSTER_NAME= <AWS_cluster_name> 
1
$ export CLUSTER_VERSION=$(oc get clusterversion version -o jsonpath='{.status.desired.version}{"\n"}')

export AWS_CLUSTER_ID=$(oc get clusterversion version -o jsonpath='{.spec.clusterID}{"\n"}')

export OIDC_ENDPOINT=$(oc get authentication.config.openshift.io cluster -o jsonpath='{.spec.serviceAccountIssuer}' | sed 's|^https://||')

export REGION=$(oc get infrastructures cluster -o jsonpath='{.status.platformStatus.aws.region}' --allow-missing-template-keys=false || echo us-east-2)

export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)

export ROLE_NAME="${CLUSTER_NAME}-openshift-oadp-aws-cloud-credentials"
$ export SCRATCH="/tmp/${CLUSTER_NAME}/oadp"
mkdir -p ${SCRATCH}
$ echo "Cluster ID: ${AWS_CLUSTER_ID}, Region: ${REGION}, OIDC Endpoint:
${OIDC_ENDPOINT}, AWS Account ID: ${AWS_ACCOUNT_ID}"
$ export POLICY_NAME="OadpVer1" 
1
$ POLICY_ARN=$(aws iam list-policies --query "Policies[?PolicyName=='$POLICY_NAME'].{ARN:Arn}" --output text)
$ if [[ -z "${POLICY_ARN}" ]]; then
cat << EOF > ${SCRATCH}/policy.json
{
"Version": "2012-10-17",
"Statement": [
 {
   "Effect": "Allow",
   "Action": [
     "s3:CreateBucket",
     "s3:DeleteBucket",
     "s3:PutBucketTagging",
     "s3:GetBucketTagging",
     "s3:PutEncryptionConfiguration",
     "s3:GetEncryptionConfiguration",
     "s3:PutLifecycleConfiguration",
     "s3:GetLifecycleConfiguration",
     "s3:GetBucketLocation",
     "s3:ListBucket",
     "s3:GetObject",
     "s3:PutObject",
     "s3:DeleteObject",
     "s3:ListBucketMultipartUploads",
     "s3:AbortMultipartUpload",
     "s3:ListMultipartUploadParts",
     "ec2:DescribeSnapshots",
     "ec2:DescribeVolumes",
     "ec2:DescribeVolumeAttribute",
     "ec2:DescribeVolumesModifications",
     "ec2:DescribeVolumeStatus",
     "ec2:CreateTags",
     "ec2:CreateVolume",
     "ec2:CreateSnapshot",
     "ec2:DeleteSnapshot"
   ],
   "Resource": "*"
 }
]}
EOF

POLICY_ARN=$(aws iam create-policy --policy-name $POLICY_NAME \
--policy-document file:///${SCRATCH}/policy.json --query Policy.Arn \
--tags Key=openshift_version,Value=${CLUSTER_VERSION} Key=operator_namespace,Value=openshift-adp Key=operator_name,Value=oadp \
--output text) 
1

fi
$ echo ${POLICY_ARN}
{ "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Principal": { "Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_ENDPOINT}" }, "Action": "sts:AssumeRoleWithWebIdentity", "Condition": { "StringEquals": { "${OIDC_ENDPOINT}:sub": [ "system:serviceaccount:openshift-adp:openshift-adp-controller-manager", "system:serviceaccount:openshift-adp:velero"] } } }] } EOF
$ cat <<EOF > ${SCRATCH}/trust-policy.json
{
    "Version": "2012-10-17",
    "Statement": [{
      "Effect": "Allow",
      "Principal": {
        "Federated": "arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/${OIDC_ENDPOINT}"
      },
      "Action": "sts:AssumeRoleWithWebIdentity",
      "Condition": {
        "StringEquals": {
          "${OIDC_ENDPOINT}:sub": [
            "system:serviceaccount:openshift-adp:openshift-adp-controller-manager",
            "system:serviceaccount:openshift-adp:velero"]
        }
      }
    }]
}
EOF
"${ROLE_NAME}" \ --assume-role-policy-document file://${SCRATCH}/trust-policy.json \ --tags Key=cluster_id,Value=${AWS_CLUSTER_ID} Key=openshift_version,Value=${CLUSTER_VERSION} Key=operator_namespace,Value=openshift-adp Key=operator_name,Value=oadp --query Role.Arn --output text)
$ ROLE_ARN=$(aws iam create-role --role-name \
  "${ROLE_NAME}" \
  --assume-role-policy-document file://${SCRATCH}/trust-policy.json \
  --tags Key=cluster_id,Value=${AWS_CLUSTER_ID}  Key=openshift_version,Value=${CLUSTER_VERSION} Key=operator_namespace,Value=openshift-adp Key=operator_name,Value=oadp --query Role.Arn --output text)
$ echo ${ROLE_ARN}
$ aws iam attach-role-policy --role-name "${ROLE_NAME}" --policy-arn ${POLICY_ARN}
kind: DataProtectionApplication metadata: name: <dpa_sample> spec: # ... configuration: velero: podConfig: nodeSelector: <node_selector> resourceAllocations: limits: cpu: "1" memory: 1024Mi requests: cpu: 200m memory: 256Mi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: <dpa_sample>
spec:
# ...
  configuration:
    velero:
      podConfig:
        nodeSelector: <node_selector> 
1

        resourceAllocations: 
2

          limits:
            cpu: "1"
            memory: 1024Mi
          requests:
            cpu: 200m
            memory: 256Mi
[default] role_arn = ${ROLE_ARN} web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token region = <aws_region> EOF
$ cat <<EOF > ${SCRATCH}/credentials
  [default]
  role_arn = ${ROLE_ARN}
  web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token
  region = <aws_region> 
1

EOF
$ oc create namespace openshift-adp
--from-file=${SCRATCH}/credentials
$ oc -n openshift-adp create secret generic cloud-credentials \
  --from-file=${SCRATCH}/credentials
apiVersion: oadp.openshift.io/v1alpha1 kind: CloudStorage metadata: name: ${CLUSTER_NAME}-oadp namespace: openshift-adp spec: creationSecret: key: credentials name: cloud-credentials enableSharedConfig: true name: ${CLUSTER_NAME}-oadp provider: aws region: $REGION EOF
$ cat << EOF | oc create -f -
  apiVersion: oadp.openshift.io/v1alpha1
  kind: CloudStorage
  metadata:
    name: ${CLUSTER_NAME}-oadp
    namespace: openshift-adp
  spec:
    creationSecret:
      key: credentials
      name: cloud-credentials
    enableSharedConfig: true
    name: ${CLUSTER_NAME}-oadp
    provider: aws
    region: $REGION
EOF
$ oc get pvc -n <namespace>
applog Bound pvc-351791ae-b6ab-4e8b-88a4-30f73caf5ef8 1Gi RWO gp3-csi 4d19h mysql Bound pvc-16b8e009-a20a-4379-accc-bc81fedd0621 1Gi RWO gp3-csi 4d19h
NAME     STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
applog   Bound    pvc-351791ae-b6ab-4e8b-88a4-30f73caf5ef8   1Gi        RWO            gp3-csi        4d19h
mysql    Bound    pvc-16b8e009-a20a-4379-accc-bc81fedd0621   1Gi        RWO            gp3-csi        4d19h
$ oc get storageclass
gp2 kubernetes.io/aws-ebs Delete WaitForFirstConsumer true 4d21h gp2-csi ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h gp3 ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h gp3-csi (default) ebs.csi.aws.com Delete WaitForFirstConsumer true 4d21h
NAME                PROVISIONER             RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
gp2                 kubernetes.io/aws-ebs   Delete          WaitForFirstConsumer   true                   4d21h
gp2-csi             ebs.csi.aws.com         Delete          WaitForFirstConsumer   true                   4d21h
gp3                 ebs.csi.aws.com         Delete          WaitForFirstConsumer   true                   4d21h
gp3-csi (default)   ebs.csi.aws.com         Delete          WaitForFirstConsumer   true                   4d21h
apiVersion: oadp.openshift.io/v1alpha1 kind: DataProtectionApplication metadata: name: ${CLUSTER_NAME}-dpa namespace: openshift-adp spec: backupImages: true features: dataMover: enable: false backupLocations: - bucket: cloudStorageRef: name: ${CLUSTER_NAME}-oadp credential: key: credentials name: cloud-credentials prefix: velero default: true config: region: ${REGION} configuration: velero: defaultPlugins: - openshift - aws - csi nodeAgent: enable: false uploaderType: kopia EOF
$ cat << EOF | oc create -f -
  apiVersion: oadp.openshift.io/v1alpha1
  kind: DataProtectionApplication
  metadata:
    name: ${CLUSTER_NAME}-dpa
    namespace: openshift-adp
  spec:
    backupImages: true 
1

    features:
      dataMover:
        enable: false
    backupLocations:
    - bucket:
        cloudStorageRef:
          name: ${CLUSTER_NAME}-oadp
        credential:
          key: credentials
          name: cloud-credentials
        prefix: velero
        default: true
        config:
          region: ${REGION}
    configuration:
      velero:
        defaultPlugins:
        - openshift
        - aws
        - csi
      nodeAgent: 
2

        enable: false
        uploaderType: kopia 
3

EOF
apiVersion: oadp.openshift.io/v1alpha1 kind: DataProtectionApplication metadata: name: ${CLUSTER_NAME}-dpa namespace: openshift-adp spec: backupImages: true features: dataMover: enable: false backupLocations: - bucket: cloudStorageRef: name: ${CLUSTER_NAME}-oadp credential: key: credentials name: cloud-credentials prefix: velero default: true config: region: ${REGION} configuration: velero: defaultPlugins: - openshift - aws nodeAgent: enable: false uploaderType: restic snapshotLocations: - velero: config: credentialsFile: /tmp/credentials/openshift-adp/cloud-credentials-credentials enableSharedConfig: "true" profile: default region: ${REGION} provider: aws EOF
$ cat << EOF | oc create -f -
  apiVersion: oadp.openshift.io/v1alpha1
  kind: DataProtectionApplication
  metadata:
    name: ${CLUSTER_NAME}-dpa
    namespace: openshift-adp
  spec:
    backupImages: true 
1

    features:
      dataMover:
         enable: false
    backupLocations:
    - bucket:
        cloudStorageRef:
          name: ${CLUSTER_NAME}-oadp
        credential:
          key: credentials
          name: cloud-credentials
        prefix: velero
        default: true
        config:
          region: ${REGION}
    configuration:
      velero:
        defaultPlugins:
        - openshift
        - aws
      nodeAgent: 
2

        enable: false
        uploaderType: restic
    snapshotLocations:
      - velero:
          config:
            credentialsFile: /tmp/credentials/openshift-adp/cloud-credentials-credentials 
3

            enableSharedConfig: "true" 
4

            profile: default 
5

            region: ${REGION} 
6

          provider: aws
EOF
enable: false uploaderType: restic
nodeAgent:
  enable: false
  uploaderType: restic
enable: false
restic:
  enable: false
$ oc create namespace hello-world
$ oc new-app -n hello-world --image=docker.io/openshift/hello-openshift
$ oc expose service/hello-openshift -n hello-world
$ curl `oc get route/hello-openshift -n hello-world -o jsonpath='{.spec.host}'`
Hello OpenShift!
apiVersion: velero.io/v1 kind: Backup metadata: name: hello-world namespace: openshift-adp spec: includedNamespaces: - hello-world storageLocation: ${CLUSTER_NAME}-dpa-1 ttl: 720h0m0s EOF
$ cat << EOF | oc create -f -
  apiVersion: velero.io/v1
  kind: Backup
  metadata:
    name: hello-world
    namespace: openshift-adp
  spec:
    includedNamespaces:
    - hello-world
    storageLocation: ${CLUSTER_NAME}-dpa-1
    ttl: 720h0m0s
EOF
$ watch "oc -n openshift-adp get backup hello-world -o json | jq .status"
"completionTimestamp": "2022-09-07T22:20:44Z", "expiration": "2022-10-07T22:20:22Z", "formatVersion": "1.1.0", "phase": "Completed", "progress": { "itemsBackedUp": 58, "totalItems": 58 }, "startTimestamp": "2022-09-07T22:20:22Z", "version": 1 }
{
  "completionTimestamp": "2022-09-07T22:20:44Z",
  "expiration": "2022-10-07T22:20:22Z",
  "formatVersion": "1.1.0",
  "phase": "Completed",
  "progress": {
    "itemsBackedUp": 58,
    "totalItems": 58
  },
  "startTimestamp": "2022-09-07T22:20:22Z",
  "version": 1
}
$ oc delete ns hello-world
apiVersion: velero.io/v1 kind: Restore metadata: name: hello-world namespace: openshift-adp spec: backupName: hello-world EOF
$ cat << EOF | oc create -f -
  apiVersion: velero.io/v1
  kind: Restore
  metadata:
    name: hello-world
    namespace: openshift-adp
  spec:
    backupName: hello-world
EOF
$ watch "oc -n openshift-adp get restore hello-world -o json | jq .status"
"completionTimestamp": "2022-09-07T22:25:47Z", "phase": "Completed", "progress": { "itemsRestored": 38, "totalItems": 38 }, "startTimestamp": "2022-09-07T22:25:28Z", "warnings": 9 }
{
  "completionTimestamp": "2022-09-07T22:25:47Z",
  "phase": "Completed",
  "progress": {
    "itemsRestored": 38,
    "totalItems": 38
  },
  "startTimestamp": "2022-09-07T22:25:28Z",
  "warnings": 9
}
$ oc -n hello-world get pods
hello-openshift-9f885f7c6-kdjpj 1/1 Running 0 90s
NAME                              READY   STATUS    RESTARTS   AGE
hello-openshift-9f885f7c6-kdjpj   1/1     Running   0          90s
$ curl `oc get route/hello-openshift -n hello-world -o jsonpath='{.spec.host}'`
Hello OpenShift!
$ oc delete ns hello-world
$ oc -n openshift-adp delete dpa ${CLUSTER_NAME}-dpa
$ oc -n openshift-adp delete cloudstorage ${CLUSTER_NAME}-oadp
$ oc -n openshift-adp patch cloudstorage ${CLUSTER_NAME}-oadp -p '{"metadata":{"finalizers":null}}' --type=merge
$ oc -n openshift-adp delete subscription oadp-operator
$ oc delete ns openshift-adp
$ oc delete backups.velero.io hello-world
$ velero backup delete hello-world
$ for CRD in `oc get crds | grep velero | awk '{print $1}'`; do oc delete crd $CRD; done
$ aws s3 rm s3://${CLUSTER_NAME}-oadp --recursive
$ aws s3api delete-bucket --bucket ${CLUSTER_NAME}-oadp
$ aws iam detach-role-policy --role-name "${ROLE_NAME}"  --policy-arn "${POLICY_ARN}"
$ aws iam delete-role --role-name "${ROLE_NAME}"
kind: DataProtectionApplication metadata: name: dpa-sample namespace: openshift-adp spec: configuration: velero: defaultPlugins: - openshift - aws - csi resourceTimeout: 10m nodeAgent: enable: true uploaderType: kopia backupLocations: - name: default velero: provider: aws default: true objectStorage: bucket: <bucket_name> prefix: <prefix> config: region: <region> profile: "default" s3ForcePathStyle: "true" s3Url: <s3_url> credential: key: cloud name: cloud-credentials
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: dpa-sample
  namespace: openshift-adp
spec:
  configuration:
    velero:
      defaultPlugins:
        - openshift
        - aws
        - csi
      resourceTimeout: 10m
    nodeAgent:
      enable: true
      uploaderType: kopia
  backupLocations:
    - name: default
      velero:
        provider: aws
        default: true
        objectStorage:
          bucket: <bucket_name> 
1

          prefix: <prefix> 
2

        config:
          region: <region> 
3

          profile: "default"
          s3ForcePathStyle: "true"
          s3Url: <s3_url> 
4

        credential:
          key: cloud
          name: cloud-credentials
$ oc create -f dpa.yaml
kind: Backup metadata: name: operator-install-backup namespace: openshift-adp spec: csiSnapshotTimeout: 10m0s defaultVolumesToFsBackup: false includedNamespaces: - threescale includedResources: - operatorgroups - subscriptions - namespaces itemOperationTimeout: 1h0m0s snapshotMoveData: false ttl: 720h0m0s
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: operator-install-backup 
1

  namespace: openshift-adp
spec:
  csiSnapshotTimeout: 10m0s
  defaultVolumesToFsBackup: false
  includedNamespaces:
  - threescale   
2

  includedResources:
  - operatorgroups
  - subscriptions
  - namespaces
  itemOperationTimeout: 1h0m0s
  snapshotMoveData: false
  ttl: 720h0m0s
$ oc create -f backup.yaml
backup.velero.io/operator-install-backup created
kind: Backup metadata: name: operator-resources-secrets namespace: openshift-adp spec: csiSnapshotTimeout: 10m0s defaultVolumesToFsBackup: false includedNamespaces: - threescale includedResources: - secrets itemOperationTimeout: 1h0m0s labelSelector: matchLabels: app: 3scale-api-management snapshotMoveData: false snapshotVolumes: false ttl: 720h0m0s
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: operator-resources-secrets 
1

  namespace: openshift-adp
spec:
  csiSnapshotTimeout: 10m0s
  defaultVolumesToFsBackup: false
  includedNamespaces:
  - threescale
  includedResources:
  - secrets
  itemOperationTimeout: 1h0m0s
  labelSelector:
    matchLabels:
      app: 3scale-api-management
  snapshotMoveData: false
  snapshotVolumes: false
  ttl: 720h0m0s
$ oc create -f backup-secret.yaml
backup.velero.io/operator-resources-secrets created
kind: Backup metadata: name: operator-resources-apim namespace: openshift-adp spec: csiSnapshotTimeout: 10m0s defaultVolumesToFsBackup: false includedNamespaces: - threescale includedResources: - apimanagers itemOperationTimeout: 1h0m0s snapshotMoveData: false snapshotVolumes: false storageLocation: ts-dpa-1 ttl: 720h0m0s volumeSnapshotLocations: - ts-dpa-1
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: operator-resources-apim 
1

  namespace: openshift-adp
spec:
  csiSnapshotTimeout: 10m0s
  defaultVolumesToFsBackup: false
  includedNamespaces:
  - threescale
  includedResources:
  - apimanagers
  itemOperationTimeout: 1h0m0s
  snapshotMoveData: false
  snapshotVolumes: false
  storageLocation: ts-dpa-1
  ttl: 720h0m0s
  volumeSnapshotLocations:
  - ts-dpa-1
$ oc create -f backup-apimanager.yaml
backup.velero.io/operator-resources-apim created
apiVersion: v1 metadata: name: example-claim namespace: threescale spec: accessModes: - ReadWriteOnce resources: requests: storage: 1Gi storageClassName: gp3-csi volumeMode: Filesystem
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: example-claim
  namespace: threescale
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 1Gi
  storageClassName: gp3-csi
  volumeMode: Filesystem
$ oc create -f ts_pvc.yml
$ oc edit deployment system-mysql -n threescale
- name: example-claim mountPath: /var/lib/mysqldump/data - name: mysql-storage mountPath: /var/lib/mysql/data - name: mysql-extra-conf mountPath: /etc/my-extra.d - name: mysql-main-conf mountPath: /etc/my-extra ... serviceAccount: amp volumes: - name: example-claim persistentVolumeClaim: claimName: example-claim ...
  volumeMounts:
    - name: example-claim
      mountPath: /var/lib/mysqldump/data
    - name: mysql-storage
      mountPath: /var/lib/mysql/data
    - name: mysql-extra-conf
      mountPath: /etc/my-extra.d
    - name: mysql-main-conf
      mountPath: /etc/my-extra
    ...
      serviceAccount: amp
  volumes:
        - name: example-claim
          persistentVolumeClaim:
            claimName: example-claim 
1

    ...
kind: Backup metadata: name: mysql-backup namespace: openshift-adp spec: csiSnapshotTimeout: 10m0s defaultVolumesToFsBackup: true hooks: resources: - name: dumpdb pre: - exec: command: - /bin/sh - -c - mysqldump -u $MYSQL_USER --password=$MYSQL_PASSWORD system --no-tablespaces > /var/lib/mysqldump/data/dump.sql container: system-mysql onError: Fail timeout: 5m includedNamespaces: - threescale includedResources: - deployment - pods - replicationControllers - persistentvolumeclaims - persistentvolumes itemOperationTimeout: 1h0m0s labelSelector: matchLabels: app: 3scale-api-management threescale_component_element: mysql snapshotMoveData: false ttl: 720h0m0s
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: mysql-backup 
1

  namespace: openshift-adp
spec:
  csiSnapshotTimeout: 10m0s
  defaultVolumesToFsBackup: true
  hooks:
    resources:
    - name: dumpdb
      pre:
      - exec:
          command:
          - /bin/sh
          - -c
          - mysqldump -u $MYSQL_USER --password=$MYSQL_PASSWORD system --no-tablespaces
            > /var/lib/mysqldump/data/dump.sql 
2

          container: system-mysql
          onError: Fail
          timeout: 5m
  includedNamespaces:
  - threescale
  includedResources: 
3

  - deployment
  - pods
  - replicationControllers
  - persistentvolumeclaims
  - persistentvolumes
  itemOperationTimeout: 1h0m0s
  labelSelector:
    matchLabels:
      app: 3scale-api-management
      threescale_component_element: mysql
  snapshotMoveData: false
  ttl: 720h0m0s
$ oc create -f mysql.yaml
backup.velero.io/mysql-backup created
$ oc get backups.velero.io mysql-backup -o yaml
completionTimestamp: "2025-04-17T13:25:19Z" errors: 1 expiration: "2025-05-17T13:25:16Z" formatVersion: 1.1.0 hookStatus: {} phase: Completed progress: {} startTimestamp: "2025-04-17T13:25:16Z" version: 1
status:
completionTimestamp: "2025-04-17T13:25:19Z"
errors: 1
expiration: "2025-05-17T13:25:16Z"
formatVersion: 1.1.0
hookStatus: {}
phase: Completed
progress: {}
startTimestamp: "2025-04-17T13:25:16Z"
version: 1
$ oc edit deployment backend-redis -n threescale
post.hook.backup.velero.io/command: >- ["/bin/bash", "-c", "redis-cli CONFIG SET auto-aof-rewrite-percentage 100"] pre.hook.backup.velero.io/command: >- ["/bin/bash", "-c", "redis-cli CONFIG SET auto-aof-rewrite-percentage 0"]
annotations:
post.hook.backup.velero.io/command: >-
         ["/bin/bash", "-c", "redis-cli CONFIG SET auto-aof-rewrite-percentage
         100"]
       pre.hook.backup.velero.io/command: >-
         ["/bin/bash", "-c", "redis-cli CONFIG SET auto-aof-rewrite-percentage
         0"]
kind: Backup metadata: name: redis-backup namespace: openshift-adp spec: csiSnapshotTimeout: 10m0s defaultVolumesToFsBackup: true includedNamespaces: - threescale includedResources: - deployment - pods - replicationcontrollers - persistentvolumes - persistentvolumeclaims itemOperationTimeout: 1h0m0s labelSelector: matchLabels: app: 3scale-api-management threescale_component: backend threescale_component_element: redis snapshotMoveData: false snapshotVolumes: false ttl: 720h0m0s
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: redis-backup 
1

  namespace: openshift-adp
spec:
  csiSnapshotTimeout: 10m0s
  defaultVolumesToFsBackup: true
  includedNamespaces:
  - threescale
  includedResources:
  - deployment
  - pods
  - replicationcontrollers
  - persistentvolumes
  - persistentvolumeclaims
  itemOperationTimeout: 1h0m0s
  labelSelector:
    matchLabels:
      app: 3scale-api-management
      threescale_component: backend
      threescale_component_element: redis
  snapshotMoveData: false
  snapshotVolumes: false
  ttl: 720h0m0s
$ oc create -f redis-backup.yaml
backup.velero.io/redis-backup created
$ oc get backups.velero.io redis-backup -o yaml
completionTimestamp: "2025-04-17T13:25:19Z" errors: 1 expiration: "2025-05-17T13:25:16Z" formatVersion: 1.1.0 hookStatus: {} phase: Completed progress: {} startTimestamp: "2025-04-17T13:25:16Z" version: 1
status:
completionTimestamp: "2025-04-17T13:25:19Z"
errors: 1
expiration: "2025-05-17T13:25:16Z"
formatVersion: 1.1.0
hookStatus: {}
phase: Completed
progress: {}
startTimestamp: "2025-04-17T13:25:16Z"
version: 1
$ oc delete project threescale
"threescale" project deleted successfully
kind: Restore metadata: name: operator-installation-restore namespace: openshift-adp spec: backupName: operator-install-backup excludedResources: - nodes - events - events.events.k8s.io - backups.velero.io - restores.velero.io - resticrepositories.velero.io - csinodes.storage.k8s.io - volumeattachments.storage.k8s.io - backuprepositories.velero.io itemOperationTimeout: 4h0m0s
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: operator-installation-restore
  namespace: openshift-adp
spec:
  backupName: operator-install-backup 
1

  excludedResources:
  - nodes
  - events
  - events.events.k8s.io
  - backups.velero.io
  - restores.velero.io
  - resticrepositories.velero.io
  - csinodes.storage.k8s.io
  - volumeattachments.storage.k8s.io
  - backuprepositories.velero.io
  itemOperationTimeout: 4h0m0s
$ oc create -f restore.yaml
restore.velerio.io/operator-installation-restore created
--- apiVersion: v1 kind: Secret metadata: name: s3-credentials namespace: threescale stringData: AWS_ACCESS_KEY_ID: <ID_123456> AWS_SECRET_ACCESS_KEY: <ID_98765544> AWS_BUCKET: <mybucket.example.com> AWS_REGION: <us-east-1> type: Opaque EOF
$ oc apply -f - <<EOF
---
apiVersion: v1
kind: Secret
metadata:
      name: s3-credentials
      namespace: threescale
stringData:
  AWS_ACCESS_KEY_ID: <ID_123456> 
1

  AWS_SECRET_ACCESS_KEY: <ID_98765544> 
2

  AWS_BUCKET: <mybucket.example.com> 
3

  AWS_REGION: <us-east-1> 
4

type: Opaque
EOF
$ oc scale deployment threescale-operator-controller-manager-v2 --replicas=0 -n threescale
deployment.apps/threescale-operator-controller-manager-v2 scaled
kind: Restore metadata: name: operator-resources-secrets namespace: openshift-adp spec: backupName: operator-resources-secrets excludedResources: - nodes - events - events.events.k8s.io - backups.velero.io - restores.velero.io - resticrepositories.velero.io - csinodes.storage.k8s.io - volumeattachments.storage.k8s.io - backuprepositories.velero.io itemOperationTimeout: 4h0m0s
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: operator-resources-secrets
  namespace: openshift-adp
spec:
  backupName: operator-resources-secrets 
1

  excludedResources:
  - nodes
  - events
  - events.events.k8s.io
  - backups.velero.io
  - restores.velero.io
  - resticrepositories.velero.io
  - csinodes.storage.k8s.io
  - volumeattachments.storage.k8s.io
  - backuprepositories.velero.io
  itemOperationTimeout: 4h0m0s
$ oc create -f restore-secrets.yaml
restore.velerio.io/operator-resources-secrets created
kind: Restore metadata: name: operator-resources-apim namespace: openshift-adp spec: backupName: operator-resources-apim excludedResources: - nodes - events - events.events.k8s.io - backups.velero.io - restores.velero.io - resticrepositories.velero.io - csinodes.storage.k8s.io - volumeattachments.storage.k8s.io - backuprepositories.velero.io itemOperationTimeout: 4h0m0s
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: operator-resources-apim
  namespace: openshift-adp
spec:
  backupName: operator-resources-apim 
1

  excludedResources: 
2

  - nodes
  - events
  - events.events.k8s.io
  - backups.velero.io
  - restores.velero.io
  - resticrepositories.velero.io
  - csinodes.storage.k8s.io
  - volumeattachments.storage.k8s.io
  - backuprepositories.velero.io
  itemOperationTimeout: 4h0m0s
$ oc create -f restore-apimanager.yaml
restore.velerio.io/operator-resources-apim created
$ oc scale deployment threescale-operator-controller-manager-v2 --replicas=1 -n threescale
deployment.apps/threescale-operator-controller-manager-v2 scaled
$ oc scale deployment threescale-operator-controller-manager-v2 --replicas=0 -n threescale
deployment.apps/threescale-operator-controller-manager-v2 scaled
$ vi ./scaledowndeployment.sh
oc scale deployment/$deployment --replicas=0 -n threescale done
for deployment in apicast-production apicast-staging backend-cron backend-listener backend-redis backend-worker system-app system-memcache system-mysql system-redis system-searchd system-sidekiq zync zync-database zync-que; do
    oc scale deployment/$deployment --replicas=0 -n threescale
done
$ ./scaledowndeployment.sh
deployment.apps.openshift.io/apicast-staging scaled deployment.apps.openshift.io/backend-cron scaled deployment.apps.openshift.io/backend-listener scaled deployment.apps.openshift.io/backend-redis scaled deployment.apps.openshift.io/backend-worker scaled deployment.apps.openshift.io/system-app scaled deployment.apps.openshift.io/system-memcache scaled deployment.apps.openshift.io/system-mysql scaled deployment.apps.openshift.io/system-redis scaled deployment.apps.openshift.io/system-searchd scaled deployment.apps.openshift.io/system-sidekiq scaled deployment.apps.openshift.io/zync scaled deployment.apps.openshift.io/zync-database scaled deployment.apps.openshift.io/zync-que scaled
deployment.apps.openshift.io/apicast-production scaled
deployment.apps.openshift.io/apicast-staging scaled
deployment.apps.openshift.io/backend-cron scaled
deployment.apps.openshift.io/backend-listener scaled
deployment.apps.openshift.io/backend-redis scaled
deployment.apps.openshift.io/backend-worker scaled
deployment.apps.openshift.io/system-app scaled
deployment.apps.openshift.io/system-memcache scaled
deployment.apps.openshift.io/system-mysql scaled
deployment.apps.openshift.io/system-redis scaled
deployment.apps.openshift.io/system-searchd scaled
deployment.apps.openshift.io/system-sidekiq scaled
deployment.apps.openshift.io/zync scaled
deployment.apps.openshift.io/zync-database scaled
deployment.apps.openshift.io/zync-que scaled
$ oc delete deployment system-mysql -n threescale
deployment.apps.openshift.io "system-mysql" deleted
Warning: apps.openshift.io/v1 deployment is deprecated in v4.14+, unavailable in v4.10000+
deployment.apps.openshift.io "system-mysql" deleted
kind: Restore metadata: name: restore-mysql namespace: openshift-adp spec: backupName: mysql-backup excludedResources: - nodes - events - events.events.k8s.io - backups.velero.io - restores.velero.io - csinodes.storage.k8s.io - volumeattachments.storage.k8s.io - backuprepositories.velero.io - resticrepositories.velero.io hooks: resources: - name: restoreDB postHooks: - exec: command: - /bin/sh - '-c' - > sleep 30 mysql -h 127.0.0.1 -D system -u root --password=$MYSQL_ROOT_PASSWORD < /var/lib/mysqldump/data/dump.sql container: system-mysql execTimeout: 80s onError: Fail waitTimeout: 5m itemOperationTimeout: 1h0m0s restorePVs: true
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: restore-mysql
  namespace: openshift-adp
spec:
  backupName: mysql-backup 
1

  excludedResources:
    - nodes
    - events
    - events.events.k8s.io
    - backups.velero.io
    - restores.velero.io
    - csinodes.storage.k8s.io
    - volumeattachments.storage.k8s.io
    - backuprepositories.velero.io
    - resticrepositories.velero.io
  hooks:
    resources:
      - name: restoreDB
        postHooks:
          - exec:
              command:
                - /bin/sh
                - '-c'
                - >
                  sleep 30

                  mysql -h 127.0.0.1 -D system -u root
                  --password=$MYSQL_ROOT_PASSWORD <
                  /var/lib/mysqldump/data/dump.sql 
2

              container: system-mysql
              execTimeout: 80s
              onError: Fail
              waitTimeout: 5m
  itemOperationTimeout: 1h0m0s
  restorePVs: true
$ oc create -f restore-mysql.yaml
restore.velerio.io/restore-mysql created
$ oc get podvolumerestores.velero.io -n openshift-adp
restore-mysql-rbzvm threescale system-mysql-2-kjkhl kopia mysql-storage Completed 771879108 771879108 40m restore-mysql-z7x7l threescale system-mysql-2-kjkhl kopia example-claim Completed 380415 380415 40m
NAME                    NAMESPACE    POD                     UPLOADER TYPE   VOLUME                  STATUS      TOTALBYTES   BYTESDONE   AGE
restore-mysql-rbzvm     threescale   system-mysql-2-kjkhl    kopia           mysql-storage           Completed   771879108    771879108   40m
restore-mysql-z7x7l     threescale   system-mysql-2-kjkhl    kopia           example-claim           Completed   380415       380415      40m
$ oc get pvc -n threescale
backend-redis-storage Bound pvc-3dca410d-3b9f-49d4-aebf-75f47152e09d 1Gi RWO gp3-csi <unset> 68m example-claim Bound pvc-cbaa49b0-06cd-4b1a-9e90-0ef755c67a54 1Gi RWO gp3-csi <unset> 57m mysql-storage Bound pvc-4549649f-b9ad-44f7-8f67-dd6b9dbb3896 1Gi RWO gp3-csi <unset> 68m system-redis-storage Bound pvc-04dadafd-8a3e-4d00-8381-6041800a24fc 1Gi RWO gp3-csi <unset> 68m system-searchd Bound pvc-afbf606c-d4a8-4041-8ec6-54c5baf1a3b9 1Gi RWO gp3-csi <unset> 68m
NAME                    STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   VOLUMEATTRIBUTESCLASS   AGE
backend-redis-storage   Bound    pvc-3dca410d-3b9f-49d4-aebf-75f47152e09d   1Gi        RWO            gp3-csi        <unset>                 68m
example-claim           Bound    pvc-cbaa49b0-06cd-4b1a-9e90-0ef755c67a54   1Gi        RWO            gp3-csi        <unset>                 57m
mysql-storage           Bound    pvc-4549649f-b9ad-44f7-8f67-dd6b9dbb3896   1Gi        RWO            gp3-csi        <unset>                 68m
system-redis-storage    Bound    pvc-04dadafd-8a3e-4d00-8381-6041800a24fc   1Gi        RWO            gp3-csi        <unset>                 68m
system-searchd          Bound    pvc-afbf606c-d4a8-4041-8ec6-54c5baf1a3b9   1Gi        RWO            gp3-csi        <unset>                 68m
$ oc delete deployment backend-redis -n threescale
deployment.apps.openshift.io "backend-redis" deleted
Warning: apps.openshift.io/v1 deployment is deprecated in v4.14+, unavailable in v4.10000+

deployment.apps.openshift.io "backend-redis" deleted
kind: Restore metadata: name: restore-backend namespace: openshift-adp spec: backupName: redis-backup excludedResources: - nodes - events - events.events.k8s.io - backups.velero.io - restores.velero.io - resticrepositories.velero.io - csinodes.storage.k8s.io - volumeattachments.storage.k8s.io - backuprepositories.velero.io itemOperationTimeout: 1h0m0s restorePVs: true
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: restore-backend
  namespace: openshift-adp
spec:
  backupName: redis-backup 
1

  excludedResources:
    - nodes
    - events
    - events.events.k8s.io
    - backups.velero.io
    - restores.velero.io
    - resticrepositories.velero.io
    - csinodes.storage.k8s.io
    - volumeattachments.storage.k8s.io
    - backuprepositories.velero.io
  itemOperationTimeout: 1h0m0s
  restorePVs: true
$ oc create -f restore-backend.yaml
restore.velerio.io/restore-backend created
$ oc get podvolumerestores.velero.io -n openshift-adp
restore-backend-jmrwx threescale backend-redis-1-bsfmv kopia backend-redis-storage Completed 76123 76123 21m
NAME                    NAMESPACE    POD                     UPLOADER TYPE   VOLUME                  STATUS      TOTALBYTES   BYTESDONE   AGE
restore-backend-jmrwx   threescale   backend-redis-1-bsfmv   kopia           backend-redis-storage   Completed   76123        76123       21m
$ oc scale deployment threescale-operator-controller-manager-v2 --replicas=1 -n threescale
deployment.apps/threescale-operator-controller-manager-v2 scaled
$ oc get pods -n threescale
threescale-operator-controller-manager-v2-79546bd8c-b4qbh 1/1 Running 0 2m5s
NAME									                    READY        STATUS	  RESTARTS	 AGE
threescale-operator-controller-manager-v2-79546bd8c-b4qbh	1/1	         Running  0          2m5s
$ vi ./scaledeployment.sh
oc scale deployment/$deployment --replicas=1 -n threescale done
for deployment in apicast-production apicast-staging backend-cron backend-listener backend-redis backend-worker system-app system-memcache system-mysql system-redis system-searchd system-sidekiq zync zync-database zync-que; do
    oc scale deployment/$deployment --replicas=1 -n threescale
done
$ ./scaledeployment.sh
deployment.apps.openshift.io/apicast-staging scaled deployment.apps.openshift.io/backend-cron scaled deployment.apps.openshift.io/backend-listener scaled deployment.apps.openshift.io/backend-redis scaled deployment.apps.openshift.io/backend-worker scaled deployment.apps.openshift.io/system-app scaled deployment.apps.openshift.io/system-memcache scaled deployment.apps.openshift.io/system-mysql scaled deployment.apps.openshift.io/system-redis scaled deployment.apps.openshift.io/system-searchd scaled deployment.apps.openshift.io/system-sidekiq scaled deployment.apps.openshift.io/zync scaled deployment.apps.openshift.io/zync-database scaled deployment.apps.openshift.io/zync-que scaled
deployment.apps.openshift.io/apicast-production scaled
deployment.apps.openshift.io/apicast-staging scaled
deployment.apps.openshift.io/backend-cron scaled
deployment.apps.openshift.io/backend-listener scaled
deployment.apps.openshift.io/backend-redis scaled
deployment.apps.openshift.io/backend-worker scaled
deployment.apps.openshift.io/system-app scaled
deployment.apps.openshift.io/system-memcache scaled
deployment.apps.openshift.io/system-mysql scaled
deployment.apps.openshift.io/system-redis scaled
deployment.apps.openshift.io/system-searchd scaled
deployment.apps.openshift.io/system-sidekiq scaled
deployment.apps.openshift.io/zync scaled
deployment.apps.openshift.io/zync-database scaled
deployment.apps.openshift.io/zync-que scaled
$ oc get routes -n threescale
backend backend-3scale.apps.custom-cluster-name.openshift.com backend-listener http edge/Allow None zync-3scale-api-b4l4d api-3scale-apicast-production.apps.custom-cluster-name.openshift.com apicast-production gateway edge/Redirect None zync-3scale-api-b6sns api-3scale-apicast-staging.apps.custom-cluster-name.openshift.com apicast-staging gateway edge/Redirect None zync-3scale-master-7sc4j master.apps.custom-cluster-name.openshift.com system-master http edge/Redirect None zync-3scale-provider-7r2nm 3scale-admin.apps.custom-cluster-name.openshift.com system-provider http edge/Redirect None zync-3scale-provider-mjxlb 3scale.apps.custom-cluster-name.openshift.com system-developer http edge/Redirect None
NAME                         HOST/PORT                                                                   PATH   SERVICES             PORT      TERMINATION     WILDCARD
backend                      backend-3scale.apps.custom-cluster-name.openshift.com                         backend-listener     http      edge/Allow      None
zync-3scale-api-b4l4d        api-3scale-apicast-production.apps.custom-cluster-name.openshift.com          apicast-production   gateway   edge/Redirect   None
zync-3scale-api-b6sns        api-3scale-apicast-staging.apps.custom-cluster-name.openshift.com             apicast-staging      gateway   edge/Redirect   None
zync-3scale-master-7sc4j     master.apps.custom-cluster-name.openshift.com                                 system-master        http      edge/Redirect   None
zync-3scale-provider-7r2nm   3scale-admin.apps.custom-cluster-name.openshift.com                           system-provider      http      edge/Redirect   None
zync-3scale-provider-mjxlb   3scale.apps.custom-cluster-name.openshift.com                                 system-developer     http      edge/Redirect   None
kind: DataProtectionApplication metadata: name: dpa-sample spec: configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws - csi defaultSnapshotMoveData: true defaultVolumesToFSBackup: featureFlags: - EnableCSI # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: dpa-sample
spec:
  configuration:
    nodeAgent:
      enable: true 
1

      uploaderType: kopia 
2

    velero:
      defaultPlugins:
      - openshift
      - aws
      - csi 
3

      defaultSnapshotMoveData: true
      defaultVolumesToFSBackup: 
4

      featureFlags:
      - EnableCSI
# ...
apiVersion: velero.io/v1 metadata: name: backup namespace: openshift-adp spec: csiSnapshotTimeout: 10m0s defaultVolumesToFsBackup: includedNamespaces: - mysql-persistent itemOperationTimeout: 4h0m0s snapshotMoveData: true storageLocation: default ttl: 720h0m0s volumeSnapshotLocations: - dpa-sample-1 # ...
kind: Backup
apiVersion: velero.io/v1
metadata:
  name: backup
  namespace: openshift-adp
spec:
  csiSnapshotTimeout: 10m0s
  defaultVolumesToFsBackup: 
1

  includedNamespaces:
  - mysql-persistent
  itemOperationTimeout: 4h0m0s
  snapshotMoveData: true 
2

  storageLocation: default
  ttl: 720h0m0s
  volumeSnapshotLocations:
  - dpa-sample-1
# ...
kubernetes.io~csi/pvc-684..12c/mount: lsetxattr /var/lib/kubelet/ \ pods/3ac..34/volumes/kubernetes.io~csi/pvc-68..2c/mount/data-xfs-103: \ no space left on device
Error: relabel failed /var/lib/kubelet/pods/3ac..34/volumes/ \
kubernetes.io~csi/pvc-684..12c/mount: lsetxattr /var/lib/kubelet/ \
pods/3ac..34/volumes/kubernetes.io~csi/pvc-68..2c/mount/data-xfs-103: \
no space left on device
$ oc create -f backup.yaml
$ oc get datauploads -A
openshift-adp backup-test-1-sw76b Completed 9m47s 108104082 108104082 dpa-sample-1 9m47s ip-10-0-150-57.us-west-2.compute.internal openshift-adp mongo-block-7dtpf Completed 14m 1073741824 1073741824 dpa-sample-1 14m ip-10-0-150-57.us-west-2.compute.internal
NAMESPACE       NAME                  STATUS      STARTED   BYTES DONE   TOTAL BYTES   STORAGE LOCATION   AGE     NODE
openshift-adp   backup-test-1-sw76b   Completed   9m47s     108104082    108104082     dpa-sample-1       9m47s   ip-10-0-150-57.us-west-2.compute.internal
openshift-adp   mongo-block-7dtpf     Completed   14m       1073741824   1073741824    dpa-sample-1       14m     ip-10-0-150-57.us-west-2.compute.internal
$ oc get datauploads <dataupload_name> -o yaml
kind: DataUpload metadata: name: backup-test-1-sw76b namespace: openshift-adp spec: backupStorageLocation: dpa-sample-1 csiSnapshot: snapshotClass: "" storageClass: gp3-csi volumeSnapshot: velero-mysql-fq8sl operationTimeout: 10m0s snapshotType: CSI sourceNamespace: mysql-persistent sourcePVC: mysql status: completionTimestamp: "2023-11-02T16:57:02Z" node: ip-10-0-150-57.us-west-2.compute.internal path: /host_pods/15116bac-cc01-4d9b-8ee7-609c3bef6bde/volumes/kubernetes.io~csi/pvc-eead8167-556b-461a-b3ec-441749e291c4/mount phase: Completed progress: bytesDone: 108104082 totalBytes: 108104082 snapshotID: 8da1c5febf25225f4577ada2aeb9f899 startTimestamp: "2023-11-02T16:56:22Z"
apiVersion: velero.io/v2alpha1
kind: DataUpload
metadata:
  name: backup-test-1-sw76b
  namespace: openshift-adp
spec:
  backupStorageLocation: dpa-sample-1
  csiSnapshot:
    snapshotClass: ""
    storageClass: gp3-csi
    volumeSnapshot: velero-mysql-fq8sl
  operationTimeout: 10m0s
  snapshotType: CSI
  sourceNamespace: mysql-persistent
  sourcePVC: mysql
status:
  completionTimestamp: "2023-11-02T16:57:02Z"
  node: ip-10-0-150-57.us-west-2.compute.internal
  path: /host_pods/15116bac-cc01-4d9b-8ee7-609c3bef6bde/volumes/kubernetes.io~csi/pvc-eead8167-556b-461a-b3ec-441749e291c4/mount
  phase: Completed 
1

  progress:
    bytesDone: 108104082
    totalBytes: 108104082
  snapshotID: 8da1c5febf25225f4577ada2aeb9f899
  startTimestamp: "2023-11-02T16:56:22Z"
kind: Restore metadata: name: restore namespace: openshift-adp spec: backupName: <backup> # ...
apiVersion: velero.io/v1
kind: Restore
metadata:
  name: restore
  namespace: openshift-adp
spec:
  backupName: <backup>
# ...
$ oc create -f restore.yaml
$ oc get datadownloads -A
openshift-adp restore-test-1-sk7lg Completed 7m11s 108104082 108104082 dpa-sample-1 7m11s ip-10-0-150-57.us-west-2.compute.internal
NAMESPACE       NAME                   STATUS      STARTED   BYTES DONE   TOTAL BYTES   STORAGE LOCATION   AGE     NODE
openshift-adp   restore-test-1-sk7lg   Completed   7m11s     108104082    108104082     dpa-sample-1       7m11s   ip-10-0-150-57.us-west-2.compute.internal
$ oc get datadownloads <datadownload_name> -o yaml
kind: DataDownload metadata: name: restore-test-1-sk7lg namespace: openshift-adp spec: backupStorageLocation: dpa-sample-1 operationTimeout: 10m0s snapshotID: 8da1c5febf25225f4577ada2aeb9f899 sourceNamespace: mysql-persistent targetVolume: namespace: mysql-persistent pv: "" pvc: mysql status: completionTimestamp: "2023-11-02T17:01:24Z" node: ip-10-0-150-57.us-west-2.compute.internal phase: Completed progress: bytesDone: 108104082 totalBytes: 108104082 startTimestamp: "2023-11-02T17:00:52Z"
apiVersion: velero.io/v2alpha1
kind: DataDownload
metadata:
  name: restore-test-1-sk7lg
  namespace: openshift-adp
spec:
  backupStorageLocation: dpa-sample-1
  operationTimeout: 10m0s
  snapshotID: 8da1c5febf25225f4577ada2aeb9f899
  sourceNamespace: mysql-persistent
  targetVolume:
    namespace: mysql-persistent
    pv: ""
    pvc: mysql
status:
  completionTimestamp: "2023-11-02T17:01:24Z"
  node: ip-10-0-150-57.us-west-2.compute.internal
  phase: Completed 
1

  progress:
    bytesDone: 108104082
    totalBytes: 108104082
  startTimestamp: "2023-11-02T17:00:52Z"
kind: DataProtectionApplication metadata: name: ts-dpa namespace: openshift-adp spec: backupLocations: - velero: credential: key: cloud name: cloud-credentials-gcp default: true objectStorage: bucket: oadp...2jw prefix: velero provider: gcp configuration: nodeAgent: enable: true uploaderType: kopia backupPVC: storage-class-1: readOnly: true spcNoRelabeling: true storageClass: gp3-csi storage-class-2: readOnly: false spcNoRelabeling: false storageClass: gp3-csi velero: defaultPlugins: - gcp - openshift - csi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: ts-dpa
  namespace: openshift-adp
spec:
  backupLocations:
  - velero:
      credential:
        key: cloud
        name: cloud-credentials-gcp
      default: true
      objectStorage:
        bucket: oadp...2jw
        prefix: velero
      provider: gcp
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
      backupPVC: 
1

        storage-class-1:
          readOnly: true 
2

          spcNoRelabeling: true 
3

          storageClass: gp3-csi
        storage-class-2:
          readOnly: false
          spcNoRelabeling: false
          storageClass: gp3-csi
    velero:
      defaultPlugins:
      - gcp
      - openshift
      - csi
kind: Backup metadata: name: test-backup namespace: openshift-adp spec: includedNamespaces: - <application_namespace> snapshotMoveData: true
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: test-backup
  namespace: openshift-adp
spec:
  includedNamespaces:
  - <application_namespace>
  snapshotMoveData: true 
1
$ oc get pvc -n openshift-adp -w
test-backup1-l..d Bound pvc-1298....022f8 2Gi ROX standard-csi <unset> 37s
test-backup1-l..d   Bound   pvc-1298.....22f8   2Gi        ROX            standard-csi   <unset>                 37s
test-backup1-l..d   Bound   pvc-1298....022f8   2Gi        ROX            standard-csi   <unset>                 37s
kind: DataProtectionApplication metadata: name: ts-dpa namespace: openshift-adp spec: # ... configuration: nodeAgent: enable: true uploaderType: kopia restorePVC: ignoreDelayBinding: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: ts-dpa
  namespace: openshift-adp
spec:
#  ...
  configuration:
    nodeAgent:
      enable: true
      uploaderType: kopia
    restorePVC: 
1

      ignoreDelayBinding: true 
2
kind: DataProtectionApplication #... configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws - csi defaultSnapshotMoveData: true podConfig: env: - name: KOPIA_HASHING_ALGORITHM value: <hashing_algorithm_name> - name: KOPIA_ENCRYPTION_ALGORITHM value: <encryption_algorithm_name> - name: KOPIA_SPLITTER_ALGORITHM value: <splitter_algorithm_name>
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
#...
configuration:
  nodeAgent:
    enable: true 
1

    uploaderType: kopia 
2

  velero:
    defaultPlugins:
    - openshift
    - aws
    - csi 
3

    defaultSnapshotMoveData: true
    podConfig:
      env:
        - name: KOPIA_HASHING_ALGORITHM
          value: <hashing_algorithm_name> 
4

        - name: KOPIA_ENCRYPTION_ALGORITHM
          value: <encryption_algorithm_name> 
5

        - name: KOPIA_SPLITTER_ALGORITHM
          value: <splitter_algorithm_name> 
6
kind: DataProtectionApplication metadata: name: <dpa_name> namespace: openshift-adp spec: backupLocations: - name: aws velero: config: profile: default region: <region_name> credential: key: cloud name: cloud-credentials default: true objectStorage: bucket: <bucket_name> prefix: velero provider: aws configuration: nodeAgent: enable: true uploaderType: kopia velero: defaultPlugins: - openshift - aws - csi defaultSnapshotMoveData: true podConfig: env: - name: KOPIA_HASHING_ALGORITHM value: BLAKE3-256 - name: KOPIA_ENCRYPTION_ALGORITHM value: CHACHA20-POLY1305-HMAC-SHA256 - name: KOPIA_SPLITTER_ALGORITHM value: DYNAMIC-8M-RABINKARP
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
name: <dpa_name> 
1

namespace: openshift-adp
spec:
backupLocations:
- name: aws
  velero:
    config:
      profile: default
      region: <region_name> 
2

    credential:
      key: cloud
      name: cloud-credentials 
3

    default: true
    objectStorage:
      bucket: <bucket_name> 
4

      prefix: velero
    provider: aws
configuration:
  nodeAgent:
    enable: true
    uploaderType: kopia
  velero:
    defaultPlugins:
    - openshift
    - aws
    - csi 
5

    defaultSnapshotMoveData: true
    podConfig:
      env:
        - name: KOPIA_HASHING_ALGORITHM
          value: BLAKE3-256 
6

        - name: KOPIA_ENCRYPTION_ALGORITHM
          value: CHACHA20-POLY1305-HMAC-SHA256 
7

        - name: KOPIA_SPLITTER_ALGORITHM
          value: DYNAMIC-8M-RABINKARP 
8
$ oc create -f <dpa_file_name> 
1
$ oc get dpa -o yaml
kind: Backup metadata: name: test-backup namespace: openshift-adp spec: includedNamespaces: - <application_namespace> defaultVolumesToFsBackup: true
apiVersion: velero.io/v1
kind: Backup
metadata:
  name: test-backup
  namespace: openshift-adp
spec:
  includedNamespaces:
  - <application_namespace> 
1

  defaultVolumesToFsBackup: true
$ oc apply -f <backup_file_name> 
1
$ oc get backups.velero.io <backup_name> -o yaml 
1
--bucket=<bucket_name> \ --prefix=velero/kopia/<application_namespace> \ --password=static-passw0rd \ --access-key="<aws_s3_access_key>" \ --secret-access-key="<aws_s3_secret_access_key>" \
$ kopia repository connect s3 \
  --bucket=<bucket_name> \ 
1

  --prefix=velero/kopia/<application_namespace> \ 
2

  --password=static-passw0rd \ 
3

  --access-key="<aws_s3_access_key>" \ 
4

  --secret-access-key="<aws_s3_secret_access_key>" \ 
5
$ kopia repository status
Description: Repository in S3: s3.amazonaws.com <bucket_name> # ... Storage type: s3 Storage capacity: unbounded Storage config: { "bucket": <bucket_name>, "prefix": "velero/kopia/<application_namespace>/", "endpoint": "s3.amazonaws.com", "accessKeyID": <access_key>, "secretAccessKey": "****************************************", "sessionToken": "" } Unique ID: 58....aeb0 Hash: BLAKE3-256 Encryption: CHACHA20-POLY1305-HMAC-SHA256 Splitter: DYNAMIC-8M-RABINKARP Format version: 3 # ...
Config file:         /../.config/kopia/repository.config

Description:         Repository in S3: s3.amazonaws.com <bucket_name>
# ...

Storage type:        s3
Storage capacity:    unbounded
Storage config:      {
                       "bucket": <bucket_name>,
                       "prefix": "velero/kopia/<application_namespace>/",
                       "endpoint": "s3.amazonaws.com",
                       "accessKeyID": <access_key>,
                       "secretAccessKey": "****************************************",
                       "sessionToken": ""
                     }

Unique ID:           58....aeb0
Hash:                BLAKE3-256
Encryption:          CHACHA20-POLY1305-HMAC-SHA256
Splitter:            DYNAMIC-8M-RABINKARP
Format version:      3
# ...
kind: Pod metadata: name: oadp-mustgather-pod labels: purpose: user-interaction spec: containers: - name: oadp-mustgather-container image: registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.3 command: ["sleep"] args: ["infinity"]
apiVersion: v1
kind: Pod
metadata:
  name: oadp-mustgather-pod
  labels:
    purpose: user-interaction
spec:
  containers:
  - name: oadp-mustgather-container
    image: registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.3
    command: ["sleep"]
    args: ["infinity"]
$ oc apply -f <pod_config_file_name> 
1
$ oc describe pod/oadp-mustgather-pod | grep scc
openshift.io/scc: anyuid
$ oc -n openshift-adp rsh pod/oadp-mustgather-pod
--bucket=<bucket_name> \ --prefix=velero/kopia/<application_namespace> \ --password=static-passw0rd \ --access-key="<access_key>" \ --secret-access-key="<secret_access_key>" \ --endpoint=<bucket_endpoint> \
sh-5.1# kopia repository connect s3 \
  --bucket=<bucket_name> \ 
1

  --prefix=velero/kopia/<application_namespace> \ 
2

  --password=static-passw0rd \ 
3

  --access-key="<access_key>" \ 
4

  --secret-access-key="<secret_access_key>" \ 
5

  --endpoint=<bucket_endpoint> \ 
6
sh-5.1# kopia benchmark hashing
Benchmarking hash 'BLAKE2B-256-128' (100 x 1048576 bytes, parallelism 1) Benchmarking hash 'BLAKE2S-128' (100 x 1048576 bytes, parallelism 1) Benchmarking hash 'BLAKE2S-256' (100 x 1048576 bytes, parallelism 1) Benchmarking hash 'BLAKE3-256' (100 x 1048576 bytes, parallelism 1) Benchmarking hash 'BLAKE3-256-128' (100 x 1048576 bytes, parallelism 1) Benchmarking hash 'HMAC-SHA224' (100 x 1048576 bytes, parallelism 1) Benchmarking hash 'HMAC-SHA256' (100 x 1048576 bytes, parallelism 1) Benchmarking hash 'HMAC-SHA256-128' (100 x 1048576 bytes, parallelism 1) Benchmarking hash 'HMAC-SHA3-224' (100 x 1048576 bytes, parallelism 1) Benchmarking hash 'HMAC-SHA3-256' (100 x 1048576 bytes, parallelism 1) Hash Throughput ----------------------------------------------------------------- 0. BLAKE3-256 15.3 GB / second 1. BLAKE3-256-128 15.2 GB / second 2. HMAC-SHA256-128 6.4 GB / second 3. HMAC-SHA256 6.4 GB / second 4. HMAC-SHA224 6.4 GB / second 5. BLAKE2B-256-128 4.2 GB / second 6. BLAKE2B-256 4.1 GB / second 7. BLAKE2S-256 2.9 GB / second 8. BLAKE2S-128 2.9 GB / second 9. HMAC-SHA3-224 1.6 GB / second 10. HMAC-SHA3-256 1.5 GB / second ----------------------------------------------------------------- Fastest option for this machine is: --block-hash=BLAKE3-256
Benchmarking hash 'BLAKE2B-256' (100 x 1048576 bytes, parallelism 1)
Benchmarking hash 'BLAKE2B-256-128' (100 x 1048576 bytes, parallelism 1)
Benchmarking hash 'BLAKE2S-128' (100 x 1048576 bytes, parallelism 1)
Benchmarking hash 'BLAKE2S-256' (100 x 1048576 bytes, parallelism 1)
Benchmarking hash 'BLAKE3-256' (100 x 1048576 bytes, parallelism 1)
Benchmarking hash 'BLAKE3-256-128' (100 x 1048576 bytes, parallelism 1)
Benchmarking hash 'HMAC-SHA224' (100 x 1048576 bytes, parallelism 1)
Benchmarking hash 'HMAC-SHA256' (100 x 1048576 bytes, parallelism 1)
Benchmarking hash 'HMAC-SHA256-128' (100 x 1048576 bytes, parallelism 1)
Benchmarking hash 'HMAC-SHA3-224' (100 x 1048576 bytes, parallelism 1)
Benchmarking hash 'HMAC-SHA3-256' (100 x 1048576 bytes, parallelism 1)
     Hash                 Throughput
-----------------------------------------------------------------
  0. BLAKE3-256           15.3 GB / second
  1. BLAKE3-256-128       15.2 GB / second
  2. HMAC-SHA256-128      6.4 GB / second
  3. HMAC-SHA256          6.4 GB / second
  4. HMAC-SHA224          6.4 GB / second
  5. BLAKE2B-256-128      4.2 GB / second
  6. BLAKE2B-256          4.1 GB / second
  7. BLAKE2S-256          2.9 GB / second
  8. BLAKE2S-128          2.9 GB / second
  9. HMAC-SHA3-224        1.6 GB / second
 10. HMAC-SHA3-256        1.5 GB / second
-----------------------------------------------------------------
Fastest option for this machine is: --block-hash=BLAKE3-256
sh-5.1# kopia benchmark encryption
Benchmarking encryption 'CHACHA20-POLY1305-HMAC-SHA256'... (1000 x 1048576 bytes, parallelism 1) Encryption Throughput ----------------------------------------------------------------- 0. AES256-GCM-HMAC-SHA256 2.2 GB / second 1. CHACHA20-POLY1305-HMAC-SHA256 1.8 GB / second ----------------------------------------------------------------- Fastest option for this machine is: --encryption=AES256-GCM-HMAC-SHA256
Benchmarking encryption 'AES256-GCM-HMAC-SHA256'... (1000 x 1048576 bytes, parallelism 1)
Benchmarking encryption 'CHACHA20-POLY1305-HMAC-SHA256'... (1000 x 1048576 bytes, parallelism 1)
     Encryption                     Throughput
-----------------------------------------------------------------
  0. AES256-GCM-HMAC-SHA256         2.2 GB / second
  1. CHACHA20-POLY1305-HMAC-SHA256  1.8 GB / second
-----------------------------------------------------------------
Fastest option for this machine is: --encryption=AES256-GCM-HMAC-SHA256
sh-5.1# kopia benchmark splitter
DYNAMIC 747.6 MB/s count:107 min:9467 10th:2277562 25th:2971794 50th:4747177 75th:7603998 90th:8388608 max:8388608 DYNAMIC-128K-BUZHASH 718.5 MB/s count:3183 min:3076 10th:80896 25th:104312 50th:157621 75th:249115 90th:262144 max:262144 DYNAMIC-128K-RABINKARP 164.4 MB/s count:3160 min:9667 10th:80098 25th:106626 50th:162269 75th:250655 90th:262144 max:262144 # ... FIXED-512K 102.9 TB/s count:1024 min:524288 10th:524288 25th:524288 50th:524288 75th:524288 90th:524288 max:524288 FIXED-8M 566.3 TB/s count:64 min:8388608 10th:8388608 25th:8388608 50th:8388608 75th:8388608 90th:8388608 max:8388608 ----------------------------------------------------------------- 0. FIXED-8M 566.3 TB/s count:64 min:8388608 10th:8388608 25th:8388608 50th:8388608 75th:8388608 90th:8388608 max:8388608 1. FIXED-4M 425.8 TB/s count:128 min:4194304 10th:4194304 25th:4194304 50th:4194304 75th:4194304 90th:4194304 max:4194304 # ... 22. DYNAMIC-128K-RABINKARP 164.4 MB/s count:3160 min:9667 10th:80098 25th:106626 50th:162269 75th:250655 90th:262144 max:262144
splitting 16 blocks of 32MiB each, parallelism 1
DYNAMIC                     747.6 MB/s count:107 min:9467 10th:2277562 25th:2971794 50th:4747177 75th:7603998 90th:8388608 max:8388608
DYNAMIC-128K-BUZHASH        718.5 MB/s count:3183 min:3076 10th:80896 25th:104312 50th:157621 75th:249115 90th:262144 max:262144
DYNAMIC-128K-RABINKARP      164.4 MB/s count:3160 min:9667 10th:80098 25th:106626 50th:162269 75th:250655 90th:262144 max:262144
# ...
FIXED-512K                  102.9 TB/s count:1024 min:524288 10th:524288 25th:524288 50th:524288 75th:524288 90th:524288 max:524288
FIXED-8M                    566.3 TB/s count:64 min:8388608 10th:8388608 25th:8388608 50th:8388608 75th:8388608 90th:8388608 max:8388608
-----------------------------------------------------------------
  0. FIXED-8M                  566.3 TB/s   count:64 min:8388608 10th:8388608 25th:8388608 50th:8388608 75th:8388608 90th:8388608 max:8388608
  1. FIXED-4M                  425.8 TB/s   count:128 min:4194304 10th:4194304 25th:4194304 50th:4194304 75th:4194304 90th:4194304 max:4194304
  # ...
 22. DYNAMIC-128K-RABINKARP    164.4 MB/s   count:3160 min:9667 10th:80098 25th:106626 50th:162269 75th:250655 90th:262144 max:262144
$ oc api-resources
kind: DataProtectionApplication ... spec: configuration: velero: featureFlags: - EnableAPIGroupVersions
apiVersion: oadp.openshift.io/vialpha1
kind: DataProtectionApplication
...
spec:
  configuration:
    velero:
      featureFlags:
      - EnableAPIGroupVersions
backup.velero.io/backup-volumes=<your_volume_name_1>, \ <your_volume_name_2>>,...,<your_volume_name_n>
$ oc -n <your_pod_namespace> annotate pod/<your_pod_name> \
  backup.velero.io/backup-volumes=<your_volume_name_1>, \ <your_volume_name_2>>,...,<your_volume_name_n>
backup.velero.io/backup-volumes-excludes=<your_volume_name_1>, \ <your_volume_name_2>>,...,<your_volume_name_n>
$ oc -n <your_pod_namespace> annotate pod/<your_pod_name> \
  backup.velero.io/backup-volumes-excludes=<your_volume_name_1>, \ <your_volume_name_2>>,...,<your_volume_name_n>
$ velero backup create <backup_name> --default-volumes-to-fs-backup <any_other_options>
$ cat change-storageclass.yaml
kind: ConfigMap metadata: name: change-storage-class-config namespace: openshift-adp labels: velero.io/plugin-config: "" velero.io/change-storage-class: RestoreItemAction data: standard-csi: ssd-csi
apiVersion: v1
kind: ConfigMap
metadata:
  name: change-storage-class-config
  namespace: openshift-adp
  labels:
    velero.io/plugin-config: ""
    velero.io/change-storage-class: RestoreItemAction
data:
  standard-csi: ssd-csi
$ oc create -f change-storage-class-config
$ alias velero='oc -n openshift-adp exec deployment/velero -c velero -it -- ./velero'
$ oc describe <velero_cr> <cr_name>
$ oc logs pod/<velero>
kind: DataProtectionApplication metadata: name: velero-sample spec: configuration: velero: logLevel: warning
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: velero-sample
spec:
  configuration:
    velero:
      logLevel: warning
<backup_restore_cr> <command> <cr_name>
$ oc -n openshift-adp exec deployment/velero -c velero -- ./velero \
  <backup_restore_cr> <command> <cr_name>
backup describe 0e44ae00-5dc3-11eb-9ca8-df7e5254778b-2d8ql
$ oc -n openshift-adp exec deployment/velero -c velero -- ./velero \
  backup describe 0e44ae00-5dc3-11eb-9ca8-df7e5254778b-2d8ql
--help
$ oc -n openshift-adp exec deployment/velero -c velero -- ./velero \
  --help
<backup_restore_cr> logs <cr_name>
$ oc -n openshift-adp exec deployment/velero -c velero -- ./velero \
  <backup_restore_cr> logs <cr_name>
restore logs ccc7c2d0-6017-11eb-afab-85d0007f5a19-x4lbf
$ oc -n openshift-adp exec deployment/velero -c velero -- ./velero \
  restore logs ccc7c2d0-6017-11eb-afab-85d0007f5a19-x4lbf
<backup_restore_cr> describe <cr_name>
$ oc -n openshift-adp exec deployment/velero -c velero -- ./velero \
  <backup_restore_cr> describe <cr_name>
backup describe 0e44ae00-5dc3-11eb-9ca8-df7e5254778b-2d8ql
$ oc -n openshift-adp exec deployment/velero -c velero -- ./velero \
  backup describe 0e44ae00-5dc3-11eb-9ca8-df7e5254778b-2d8ql
cpu: 500m memory: 128Mi
requests:
  cpu: 500m
  memory: 128Mi
kind: DataProtectionApplication ... configuration: velero: podConfig: resourceAllocations: requests: cpu: 200m memory: 256Mi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
...
configuration:
  velero:
    podConfig:
      resourceAllocations: 
1

        requests:
          cpu: 200m
          memory: 256Mi
kind: DataProtectionApplication ... configuration: restic: podConfig: resourceAllocations: requests: cpu: 1000m memory: 16Gi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
...
configuration:
  restic:
    podConfig:
      resourceAllocations: 
1

        requests:
          cpu: 1000m
          memory: 16Gi
kind: DataProtectionApplication metadata: name: ts-dpa spec: backupLocations: - velero: default: true objectStorage: bucket: oadp.....njph prefix: velero credential: key: cloud name: cloud-credentials-gcp provider: gcp configuration: velero: defaultPlugins: - gcp - openshift - csi nodeAgent: enable: true uploaderType: kopia podConfig: resourceAllocations: requests: cpu: 1000m memory: 16Gi
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: ts-dpa
spec:
  backupLocations:
  - velero:
      default: true
      objectStorage:
        bucket: oadp.....njph
        prefix: velero
      credential:
        key: cloud
        name: cloud-credentials-gcp
      provider: gcp
  configuration:
    velero:
      defaultPlugins:
      - gcp
      - openshift
      - csi
    nodeAgent:
      enable: true
      uploaderType: kopia
      podConfig:
        resourceAllocations: 
1

          requests:
            cpu: 1000m
            memory: 16Gi 
2
$ oc create -f nodeAgent.yaml
$ oc get pods
node-agent-hbj9l 1/1 Running 0 97s node-agent-wmwgz 1/1 Running 0 95s node-agent-zvc7k 1/1 Running 0 98s openshift-adp-controller-manager-7f9db86d96-4lhgq 1/1 Running 0 137m velero-7b6c7fb8d7-ppc8m 1/1 Running 0 4m2s
NAME                                                        READY   STATUS      RESTARTS   AGE
node-agent-hbj9l                                            1/1     Running     0          97s
node-agent-wmwgz                                            1/1     Running     0          95s
node-agent-zvc7k                                            1/1     Running     0          98s
openshift-adp-controller-manager-7f9db86d96-4lhgq           1/1     Running     0          137m
velero-7b6c7fb8d7-ppc8m                                     1/1     Running     0          4m2s
$ oc describe pod node-agent-hbj9l | grep -C 5 Requests
State: Running Started: Mon, 09 Jun 2025 16:22:15 +0530 Ready: True Restart Count: 0 Requests: cpu: 1 memory: 1Gi Environment: NODE_NAME: (v1:spec.nodeName) VELERO_NAMESPACE: openshift-adp (v1:metadata.namespace)
      --log-format=text
    State:          Running
      Started:      Mon, 09 Jun 2025 16:22:15 +0530
    Ready:          True
    Restart Count:  0
    Requests:
      cpu:     1
      memory:  1Gi
    Environment:
      NODE_NAME:            (v1:spec.nodeName)
      VELERO_NAMESPACE:    openshift-adp (v1:metadata.namespace)
--from-backup=<backup_name> --include-resources \ service.serving.knavtive.dev
$ velero restore <restore_name> \
  --from-backup=<backup_name> --include-resources \
  service.serving.knavtive.dev
$ oc get mutatingwebhookconfigurations
backup=openshift-adp/<backup name> error="error executing custom action (groupResource=imagestreams.image.openshift.io, namespace=<BSL Name>, name=postgres): rpc error: code = Aborted desc = plugin panicked: runtime error: index out of range with length 1, stack trace: goroutine 94…
024-02-27T10:46:50.028951744Z time="2024-02-27T10:46:50Z" level=error msg="Error backing up item"
backup=openshift-adp/<backup name> error="error executing custom action (groupResource=imagestreams.image.openshift.io,
namespace=<BSL Name>, name=postgres): rpc error: code = Aborted desc = plugin panicked:
runtime error: index out of range with length 1, stack trace: goroutine 94…
$ oc label backupstoragelocations.velero.io <bsl_name> app.kubernetes.io/component=bsl
$ oc -n openshift-adp get secret/oadp-<bsl_name>-<bsl_provider>-registry-secret -o json | jq -r '.data'
Backup storage contains invalid top-level directories.
`InvalidAccessKeyId: The AWS Access Key Id you provided does not exist in our records.`
NoCredentialProviders: no valid providers in chain.
aws_access_key_id=AKIAIOSFODNN7EXAMPLE aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
[default] 
1

aws_access_key_id=AKIAIOSFODNN7EXAMPLE 
2

aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
$ oc get backupstoragelocations.velero.io -A
$ velero backup-location get -n <oadp_operator_namespace>
$ oc get backupstoragelocations.velero.io -n <namespace> -o yaml
items: - apiVersion: velero.io/v1 kind: BackupStorageLocation metadata: creationTimestamp: "2023-11-03T19:49:04Z" generation: 9703 name: example-dpa-1 namespace: openshift-adp-operator ownerReferences: - apiVersion: oadp.openshift.io/v1alpha1 blockOwnerDeletion: true controller: true kind: DataProtectionApplication name: example-dpa uid: 0beeeaff-0287-4f32-bcb1-2e3c921b6e82 resourceVersion: "24273698" uid: ba37cd15-cf17-4f7d-bf03-8af8655cea83 spec: config: enableSharedConfig: "true" region: us-west-2 credential: key: credentials name: cloud-credentials default: true objectStorage: bucket: example-oadp-operator prefix: example provider: aws status: lastValidationTime: "2023-11-10T22:06:46Z" message: "BackupStorageLocation \"example-dpa-1\" is unavailable: rpc error: code = Unknown desc = WebIdentityErr: failed to retrieve credentials\ncaused by: AccessDenied: Not authorized to perform sts:AssumeRoleWithWebIdentity\n\tstatus code: 403, request id: d3f2e099-70a0-467b-997e-ff62345e3b54" phase: Unavailable kind: List metadata: resourceVersion: ""
apiVersion: v1
items:
- apiVersion: velero.io/v1
  kind: BackupStorageLocation
  metadata:
    creationTimestamp: "2023-11-03T19:49:04Z"
    generation: 9703
    name: example-dpa-1
    namespace: openshift-adp-operator
    ownerReferences:
    - apiVersion: oadp.openshift.io/v1alpha1
      blockOwnerDeletion: true
      controller: true
      kind: DataProtectionApplication
      name: example-dpa
      uid: 0beeeaff-0287-4f32-bcb1-2e3c921b6e82
    resourceVersion: "24273698"
    uid: ba37cd15-cf17-4f7d-bf03-8af8655cea83
  spec:
    config:
      enableSharedConfig: "true"
      region: us-west-2
    credential:
      key: credentials
      name: cloud-credentials
    default: true
    objectStorage:
      bucket: example-oadp-operator
      prefix: example
    provider: aws
  status:
    lastValidationTime: "2023-11-10T22:06:46Z"
    message: "BackupStorageLocation \"example-dpa-1\" is unavailable: rpc
      error: code = Unknown desc = WebIdentityErr: failed to retrieve credentials\ncaused
      by: AccessDenied: Not authorized to perform sts:AssumeRoleWithWebIdentity\n\tstatus
      code: 403, request id: d3f2e099-70a0-467b-997e-ff62345e3b54"
    phase: Unavailable
kind: List
metadata:
  resourceVersion: ""
level=error msg="Error backing up item" backup=velero/monitoring error="timed out waiting for all PodVolumeBackups to complete"
kind: DataProtectionApplication metadata: name: <dpa_name> spec: configuration: nodeAgent: enable: true uploaderType: restic timeout: 1h # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
 name: <dpa_name>
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: restic
      timeout: 1h
# ...
kind: DataProtectionApplication metadata: name: <dpa_name> spec: configuration: velero: resourceTimeout: 10m # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
 name: <dpa_name>
spec:
  configuration:
    velero:
      resourceTimeout: 10m
# ...
kind: DataProtectionApplication metadata: name: <dpa_name> spec: configuration: velero: defaultItemOperationTimeout: 1h # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
 name: <dpa_name>
spec:
  configuration:
    velero:
      defaultItemOperationTimeout: 1h
# ...
kind: DataProtectionApplication metadata: name: <dpa_name> spec: features: dataMover: timeout: 10m # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
 name: <dpa_name>
spec:
  features:
    dataMover:
      timeout: 10m
# ...
kind: Backup metadata: name: <backup_name> spec: csiSnapshotTimeout: 10m # ...
apiVersion: velero.io/v1
kind: Backup
metadata:
 name: <backup_name>
spec:
 csiSnapshotTimeout: 10m
# ...
kind: Restore metadata: name: <restore_name> spec: itemOperationTimeout: 1h # ...
apiVersion: velero.io/v1
kind: Restore
metadata:
 name: <restore_name>
spec:
 itemOperationTimeout: 1h
# ...
kind: Backup metadata: name: <backup_name> spec: itemOperationTimeout: 1h # ...
apiVersion: velero.io/v1
kind: Backup
metadata:
 name: <backup_name>
spec:
 itemOperationTimeout: 1h
# ...
InvalidVolume.NotFound: The volume ‘vol-xxxx’ does not exist.
backup describe <backup>
$ oc -n {namespace} exec deployment/velero -c velero -- ./velero \
  backup describe <backup>
$ oc delete backups.velero.io <backup> -n openshift-adp
$ velero backup describe <backup_name> --details
time="2023-02-17T16:33:13Z" level=error msg="Error backing up item" backup=openshift-adp/user1-backup-check5 error="error executing custom action (groupResource=persistentvolumeclaims, namespace=busy1, name=pvc1-user1): rpc error: code = Unknown desc = failed to get volumesnapshotclass for storageclass ocs-storagecluster-ceph-rbd: failed to get volumesnapshotclass for provisioner openshift-storage.rbd.csi.ceph.com, ensure that the desired volumesnapshot class has the velero.io/csi-volumesnapshot-class label" logSource="/remote-source/velero/app/pkg/backup/backup.go:417" name=busybox-79799557b5-vprq
$ oc delete backups.velero.io <backup> -n openshift-adp
$ oc label volumesnapshotclass/<snapclass_name> velero.io/csi-volumesnapshot-class=true
controller=pod-volume-backup error="fork/exec/usr/bin/restic: permission denied".
kind: DataProtectionApplication # ... spec: configuration: nodeAgent: enable: true uploaderType: restic supplementalGroups: - <group_id> # ...
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
# ...
spec:
  configuration:
    nodeAgent:
      enable: true
      uploaderType: restic
      supplementalGroups:
      - <group_id> 
1

# ...
stderr=Fatal: unable to open config file: Stat: The specified key does not exist.\nIs there a repository at the following location?
$ oc delete resticrepository openshift-adp <name_of_the_restic_repository>
encountered backup up item" backup=velero/backup65 logSource="pkg/backup/backup.go:431" name=mysql-7d99fc949-qbkds time="2021-12-29T18:29:14Z" level=error msg="Error backing up item" backup=velero/backup65 error="pod volume backup failed: error running restic backup, stderr=Fatal: unable to open config file: Stat: The specified key does not exist.\nIs there a repository at the following location?\ns3:http://minio-minio.apps.mayap-oadp- veleo-1234.qe.devcluster.openshift.com/mayapvelerooadp2/velero1/ restic/mysql-persistent\n: exit status 1" error.file="/remote-source/ src/github.com/vmware-tanzu/velero/pkg/restic/backupper.go:184" error.function="github.com/vmware-tanzu/velero/ pkg/restic.(*backupper).BackupPodVolumes" logSource="pkg/backup/backup.go:435" name=mysql-7d99fc949-qbkds
 time="2021-12-29T18:29:14Z" level=info msg="1 errors
 encountered backup up item" backup=velero/backup65
 logSource="pkg/backup/backup.go:431" name=mysql-7d99fc949-qbkds
 time="2021-12-29T18:29:14Z" level=error msg="Error backing up item"
 backup=velero/backup65 error="pod volume backup failed: error running
 restic backup, stderr=Fatal: unable to open config file: Stat: The
 specified key does not exist.\nIs there a repository at the following
 location?\ns3:http://minio-minio.apps.mayap-oadp-
 veleo-1234.qe.devcluster.openshift.com/mayapvelerooadp2/velero1/
 restic/mysql-persistent\n: exit status 1" error.file="/remote-source/
 src/github.com/vmware-tanzu/velero/pkg/restic/backupper.go:184"
 error.function="github.com/vmware-tanzu/velero/
 pkg/restic.(*backupper).BackupPodVolumes"
 logSource="pkg/backup/backup.go:435" name=mysql-7d99fc949-qbkds
level=error msg=\"error restoring mysql-869f9f44f6-tp5lv: pods\\\ "mysql-869f9f44f6-tp5lv\\\" is forbidden: violates PodSecurity\\\ "restricted:v1.24\\\": privil eged (container \\\"mysql\\\ " must not set securityContext.privileged=true), allowPrivilegeEscalation != false (containers \\\ "restic-wait\\\", \\\"mysql\\\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (containers \\\ "restic-wait\\\", \\\"mysql\\\" must set securityContext.capabilities.drop=[\\\"ALL\\\"]), seccompProfile (pod or containers \\\ "restic-wait\\\", \\\"mysql\\\" must set securityContext.seccompProfile.type to \\\ "RuntimeDefault\\\" or \\\"Localhost\\\")\" logSource=\"/remote-source/velero/app/pkg/restore/restore.go:1388\" restore=openshift-adp/todolist-backup-0780518c-08ed-11ee-805c-0a580a80e92c\n velero container contains \"level=error\" in line#2447: time=\"2023-06-12T06:50:05Z\" level=error msg=\"Namespace todolist-mariadb, resource restore error: error restoring pods/todolist-mariadb/mysql-869f9f44f6-tp5lv: pods \\\ "mysql-869f9f44f6-tp5lv\\\" is forbidden: violates PodSecurity \\\"restricted:v1.24\\\": privileged (container \\\ "mysql\\\" must not set securityContext.privileged=true), allowPrivilegeEscalation != false (containers \\\ "restic-wait\\\",\\\"mysql\\\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (containers \\\ "restic-wait\\\", \\\"mysql\\\" must set securityContext.capabilities.drop=[\\\"ALL\\\"]), seccompProfile (pod or containers \\\ "restic-wait\\\", \\\"mysql\\\" must set securityContext.seccompProfile.type to \\\ "RuntimeDefault\\\" or \\\"Localhost\\\")\" logSource=\"/remote-source/velero/app/pkg/controller/restore_controller.go:510\" restore=openshift-adp/todolist-backup-0780518c-08ed-11ee-805c-0a580a80e92c\n]",
\"level=error\" in line#2273: time=\"2023-06-12T06:50:04Z\"
level=error msg=\"error restoring mysql-869f9f44f6-tp5lv: pods\\\
"mysql-869f9f44f6-tp5lv\\\" is forbidden: violates PodSecurity\\\
"restricted:v1.24\\\": privil eged (container \\\"mysql\\\
" must not set securityContext.privileged=true),
allowPrivilegeEscalation != false (containers \\\
"restic-wait\\\", \\\"mysql\\\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (containers \\\
"restic-wait\\\", \\\"mysql\\\" must set securityContext.capabilities.drop=[\\\"ALL\\\"]), seccompProfile (pod or containers \\\
"restic-wait\\\", \\\"mysql\\\" must set securityContext.seccompProfile.type to \\\
"RuntimeDefault\\\" or \\\"Localhost\\\")\" logSource=\"/remote-source/velero/app/pkg/restore/restore.go:1388\" restore=openshift-adp/todolist-backup-0780518c-08ed-11ee-805c-0a580a80e92c\n
velero container contains \"level=error\" in line#2447: time=\"2023-06-12T06:50:05Z\"
level=error msg=\"Namespace todolist-mariadb,
resource restore error: error restoring pods/todolist-mariadb/mysql-869f9f44f6-tp5lv: pods \\\
"mysql-869f9f44f6-tp5lv\\\" is forbidden: violates PodSecurity \\\"restricted:v1.24\\\": privileged (container \\\
"mysql\\\" must not set securityContext.privileged=true),
allowPrivilegeEscalation != false (containers \\\
"restic-wait\\\",\\\"mysql\\\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (containers \\\
"restic-wait\\\", \\\"mysql\\\" must set securityContext.capabilities.drop=[\\\"ALL\\\"]), seccompProfile (pod or containers \\\
"restic-wait\\\", \\\"mysql\\\" must set securityContext.seccompProfile.type to \\\
"RuntimeDefault\\\" or \\\"Localhost\\\")\"
logSource=\"/remote-source/velero/app/pkg/controller/restore_controller.go:510\"
restore=openshift-adp/todolist-backup-0780518c-08ed-11ee-805c-0a580a80e92c\n]",
$ oc get dpa -o yaml
configuration: restic: enable: true velero: args: restore-resource-priorities: 'securitycontextconstraints,customresourcedefinitions,namespaces,storageclasses,volumesnapshotclass.snapshot.storage.k8s.io,volumesnapshotcontents.snapshot.storage.k8s.io,volumesnapshots.snapshot.storage.k8s.io,datauploads.velero.io,persistentvolumes,persistentvolumeclaims,serviceaccounts,secrets,configmaps,limitranges,pods,replicasets.apps,clusterclasses.cluster.x-k8s.io,endpoints,services,-,clusterbootstraps.run.tanzu.vmware.com,clusters.cluster.x-k8s.io,clusterresourcesets.addons.cluster.x-k8s.io' defaultPlugins: - gcp - openshift
# ...
configuration:
  restic:
    enable: true
  velero:
    args:
      restore-resource-priorities: 'securitycontextconstraints,customresourcedefinitions,namespaces,storageclasses,volumesnapshotclass.snapshot.storage.k8s.io,volumesnapshotcontents.snapshot.storage.k8s.io,volumesnapshots.snapshot.storage.k8s.io,datauploads.velero.io,persistentvolumes,persistentvolumeclaims,serviceaccounts,secrets,configmaps,limitranges,pods,replicasets.apps,clusterclasses.cluster.x-k8s.io,endpoints,services,-,clusterbootstraps.run.tanzu.vmware.com,clusters.cluster.x-k8s.io,clusterresourcesets.addons.cluster.x-k8s.io' 
1

    defaultPlugins:
    - gcp
    - openshift
kind: DataProtectionTest metadata: name: dpt-sample namespace: openshift-adp spec: backupLocationName: <bsl_name> csiVolumeSnapshotTestConfigs: - snapshotClassName: csi-gce-pd-vsc timeout: 90s volumeSnapshotSource: persistentVolumeClaimName: <pvc1_name> persistentVolumeClaimNamespace: <pvc_namespace> - snapshotClassName: csi-gce-pd-vsc timeout: 120s volumeSnapshotSource: persistentVolumeClaimName: <pvc2_name> persistentVolumeClaimNamespace: <pvc_namespace> forceRun: false uploadSpeedTestConfig: fileSize: 200MB timeout: 120s
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionTest
metadata:
  name: dpt-sample
  namespace: openshift-adp
spec:
  backupLocationName: <bsl_name> 
1

  csiVolumeSnapshotTestConfigs: 
2

  - snapshotClassName: csi-gce-pd-vsc
    timeout: 90s
    volumeSnapshotSource:
      persistentVolumeClaimName: <pvc1_name> 
3

      persistentVolumeClaimNamespace: <pvc_namespace> 
4

  - snapshotClassName: csi-gce-pd-vsc
    timeout: 120s
    volumeSnapshotSource:
      persistentVolumeClaimName: <pvc2_name> 
5

      persistentVolumeClaimNamespace: <pvc_namespace>
  forceRun: false 
6

  uploadSpeedTestConfig: 
7

    fileSize: 200MB
    timeout: 120s
$ oc create -f <dpt_file_name> 
1
$ oc get dpt dpt-sample
dpt-sample Complete 17m 546 AES256 Enabled 2/2 passed 17m
NAME         PHASE      LASTTESTED   UPLOADSPEED(MBPS)   ENCRYPTION   VERSIONING   SNAPSHOTS    AGE
dpt-sample   Complete   17m          546                 AES256       Enabled      2/2 passed   17m
$ oc get dpt dpt-sample -o yaml
kind: DataProtectionTest .... status: bucketMetadata: encryptionAlgorithm: AES256 versioningStatus: Enabled lastTested: "202...:47:51Z" phase: Complete s3Vendor: AWS snapshotSummary: 2/2 passed snapshotTests: - persistentVolumeClaimName: mysql-data persistentVolumeClaimNamespace: ocp-mysql readyDuration: 24s status: Ready - persistentVolumeClaimName: mysql-data1 persistentVolumeClaimNamespace: ocp-mysql readyDuration: 40s status: Ready uploadTest: duration: 3.071s speedMbps: 546 success: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionTest
....
status:
  bucketMetadata: 
1

    encryptionAlgorithm: AES256
    versioningStatus: Enabled
  lastTested: "202...:47:51Z"
  phase: Complete
  s3Vendor: AWS 
2

  snapshotSummary: 2/2 passed 
3

  snapshotTests:
  - persistentVolumeClaimName: mysql-data
    persistentVolumeClaimNamespace: ocp-mysql
    readyDuration: 24s
    status: Ready
  - persistentVolumeClaimName: mysql-data1
    persistentVolumeClaimNamespace: ocp-mysql
    readyDuration: 40s
    status: Ready
  uploadTest: 
4

    duration: 3.071s
    speedMbps: 546
    success: true
kind: DataProtectionTest metadata: name: dpt-sample namespace: openshift-adp spec: backupLocationSpec: provider: aws default: true objectStorage: bucket: sample-bucket prefix: velero config: region: us-east-1 profile: "default" insecureSkipTLSVerify: "true" s3Url: "https://s3.amazonaws.com/sample-bucket" credential: name: cloud-credentials key: cloud uploadSpeedTestConfig: fileSize: 50MB timeout: 120s csiVolumeSnapshotTestConfigs: - volumeSnapshotSource: persistentVolumeClaimName: mongo persistentVolumeClaimNamespace: mongo-persistent snapshotClassName: csi-snapclass timeout: 2m forceRun: true skipTLSVerify: true
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionTest
metadata:
  name: dpt-sample
  namespace: openshift-adp
spec:
  backupLocationSpec: 
1

    provider: aws
    default: true
    objectStorage:
      bucket: sample-bucket 
2

      prefix: velero
    config:
      region: us-east-1 
3

      profile: "default"
      insecureSkipTLSVerify: "true"
      s3Url: "https://s3.amazonaws.com/sample-bucket"
    credential: 
4

      name: cloud-credentials
      key: cloud
  uploadSpeedTestConfig: 
5

    fileSize: 50MB
    timeout: 120s
  csiVolumeSnapshotTestConfigs: 
6

    - volumeSnapshotSource:
        persistentVolumeClaimName: mongo
        persistentVolumeClaimNamespace: mongo-persistent
      snapshotClassName: csi-snapclass
      timeout: 2m
  forceRun: true
  skipTLSVerify: true 
7
$ oc create -f <dpt_file_name> 
1
$ oc get dpt dpt-sample
dpt-sample Complete 17m 546 AES256 Enabled 2/2 passed 17m
NAME         PHASE      LASTTESTED   UPLOADSPEED(MBPS)   ENCRYPTION   VERSIONING   SNAPSHOTS    AGE
dpt-sample   Complete   17m          546                 AES256       Enabled      2/2 passed   17m
--assignee "$AZURE_CLIENT_ID" \ --role "Storage Blob Data Contributor" \ --scope "/subscriptions/$AZURE_SUBSCRIPTION_ID/resourceGroups/$AZURE_RESOURCE_GROUP/providers/Microsoft.Storage/storageAccounts/$AZURE_STORAGE_ACCOUNT_ID"
$ az role assignment create \
--assignee "$AZURE_CLIENT_ID" \
--role "Storage Blob Data Contributor" \
--scope "/subscriptions/$AZURE_SUBSCRIPTION_ID/resourceGroups/$AZURE_RESOURCE_GROUP/providers/Microsoft.Storage/storageAccounts/$AZURE_STORAGE_ACCOUNT_ID"
AZURE_TENANT_ID=<tenant-id> AZURE_CLIENT_ID=<client-id> AZURE_CLIENT_SECRET=<client-secret> AZURE_RESOURCE_GROUP=<resource-group> AZURE_STORAGE_ACCOUNT_ID=<storage-account>
AZURE_SUBSCRIPTION_ID=<subscription-id>
AZURE_TENANT_ID=<tenant-id>
AZURE_CLIENT_ID=<client-id>
AZURE_CLIENT_SECRET=<client-secret>
AZURE_RESOURCE_GROUP=<resource-group>
AZURE_STORAGE_ACCOUNT_ID=<storage-account>
$ oc create secret generic cloud-credentials-azure -n openshift-adp --from-file cloud=<credentials_file_path>
kind: DataProtectionApplication metadata: name: ts-dpa namespace: openshift-adp spec: configuration: velero: defaultPlugins: - azure - openshift backupLocations: - velero: config: resourceGroup: oadp-....-b7q4-rg storageAccount: oadp...kb7q4 subscriptionId: 53b8f5...fd54c8a credential: key: cloud name: cloud-credentials-azure provider: azure default: true objectStorage: bucket: <bucket_name> prefix: velero
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionApplication
metadata:
  name: ts-dpa
  namespace: openshift-adp
spec:
  configuration:
    velero:
      defaultPlugins:
        - azure
        - openshift
  backupLocations:
    - velero:
        config:
          resourceGroup: oadp-....-b7q4-rg
          storageAccount: oadp...kb7q4
          subscriptionId: 53b8f5...fd54c8a
        credential:
          key: cloud
          name: cloud-credentials-azure 
1

        provider: azure
        default: true
        objectStorage:
          bucket: <bucket_name>
          prefix: velero
kind: DataProtectionTest metadata: name: dpt-sample namespace: openshift-adp spec: backupLocationName: <bsl_name> uploadSpeedTestConfig: fileSize: 40MB timeout: 120s csiVolumeSnapshotTestConfigs: - snapshotClassName: csi-azuredisk-vsc timeout: 90s volumeSnapshotSource: persistentVolumeClaimName: mysql-data persistentVolumeClaimNamespace: ocp-mysql - snapshotClassName: csi-azuredisk-vsc timeout: 120s volumeSnapshotSource: persistentVolumeClaimName: mysql-data1 persistentVolumeClaimNamespace: ocp-mysql
apiVersion: oadp.openshift.io/v1alpha1
kind: DataProtectionTest
metadata:
  name: dpt-sample
  namespace: openshift-adp
spec:
  backupLocationName: <bsl_name> 
1

  uploadSpeedTestConfig:
    fileSize: 40MB
    timeout: 120s
  csiVolumeSnapshotTestConfigs:
    - snapshotClassName: csi-azuredisk-vsc 
2

      timeout: 90s
      volumeSnapshotSource:
        persistentVolumeClaimName: mysql-data 
3

        persistentVolumeClaimNamespace: ocp-mysql 
4

    - snapshotClassName: csi-azuredisk-vsc
      timeout: 120s
      volumeSnapshotSource:
        persistentVolumeClaimName: mysql-data1
        persistentVolumeClaimNamespace: ocp-mysql
$ oc adm must-gather --image=registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.5 -- /usr/bin/gather -h
$ oc adm must-gather --image=registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.5
$ oc adm must-gather --image=registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.5 -- /usr/bin/gather --request-timeout 1m 
1
$ oc adm must-gather --image=registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.5 -- /usr/bin/gather --skip-tls
$ oc adm must-gather --image=registry.redhat.io/oadp/oadp-mustgather-rhel9:v1.5 -- /usr/bin/gather --request-timeout 15s --skip-tls 
1
$ oc edit configmap cluster-monitoring-config -n openshift-monitoring
kind: ConfigMap data: config.yaml: | enableUserWorkload: true metadata: # ...
apiVersion: v1
kind: ConfigMap
data:
  config.yaml: |
    enableUserWorkload: true 
1

metadata:
# ...
$ oc get pods -n openshift-user-workload-monitoring
prometheus-operator-6844b4b99c-b57j9 2/2 Running 0 43s prometheus-user-workload-0 5/5 Running 0 32s prometheus-user-workload-1 5/5 Running 0 32s thanos-ruler-user-workload-0 3/3 Running 0 32s thanos-ruler-user-workload-1 3/3 Running 0 32s
NAME                                   READY   STATUS    RESTARTS   AGE
prometheus-operator-6844b4b99c-b57j9   2/2     Running   0          43s
prometheus-user-workload-0             5/5     Running   0          32s
prometheus-user-workload-1             5/5     Running   0          32s
thanos-ruler-user-workload-0           3/3     Running   0          32s
thanos-ruler-user-workload-1           3/3     Running   0          32s
$ oc get configmap user-workload-monitoring-config -n openshift-user-workload-monitoring
Error from server (NotFound): configmaps "user-workload-monitoring-config" not found
kind: ConfigMap metadata: name: user-workload-monitoring-config namespace: openshift-user-workload-monitoring data: config.yaml: |
apiVersion: v1
kind: ConfigMap
metadata:
  name: user-workload-monitoring-config
  namespace: openshift-user-workload-monitoring
data:
  config.yaml: |
$ oc apply -f 2_configure_user_workload_monitoring.yaml
configmap/user-workload-monitoring-config created
$ oc get svc -n openshift-adp -l app.kubernetes.io/name=velero
openshift-adp-velero-metrics-svc ClusterIP 172.30.38.244 <none> 8085/TCP 1h
NAME                               TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
openshift-adp-velero-metrics-svc   ClusterIP   172.30.38.244   <none>        8085/TCP   1h
kind: ServiceMonitor metadata: labels: app: oadp-service-monitor name: oadp-service-monitor namespace: openshift-adp spec: endpoints: - interval: 30s path: /metrics targetPort: 8085 scheme: http selector: matchLabels: app.kubernetes.io/name: "velero"
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
  labels:
    app: oadp-service-monitor
  name: oadp-service-monitor
  namespace: openshift-adp
spec:
  endpoints:
  - interval: 30s
    path: /metrics
    targetPort: 8085
    scheme: http
  selector:
    matchLabels:
      app.kubernetes.io/name: "velero"
$ oc apply -f 3_create_oadp_service_monitor.yaml
servicemonitor.monitoring.coreos.com/oadp-service-monitor created
kind: PrometheusRule metadata: name: sample-oadp-alert namespace: openshift-adp spec: groups: - name: sample-oadp-backup-alert rules: - alert: OADPBackupFailing annotations: description: 'OADP had {{#123;{{#123;$value | humanize}} backup failures over the last 2 hours.' summary: OADP has issues creating backups expr: | increase(velero_backup_failure_total{job="openshift-adp-velero-metrics-svc"}[2h]) > 0 for: 5m labels: severity: warning
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
  name: sample-oadp-alert
  namespace: openshift-adp
spec:
  groups:
  - name: sample-oadp-backup-alert
    rules:
    - alert: OADPBackupFailing
      annotations:
        description: 'OADP had {{#123;{{#123;$value | humanize}} backup failures over the last 2 hours.'
        summary: OADP has issues creating backups
      expr: |
        increase(velero_backup_failure_total{job="openshift-adp-velero-metrics-svc"}[2h]) > 0
      for: 5m
      labels:
        severity: warning
$ oc apply -f 4_create_oadp_alert_rule.yaml
prometheusrule.monitoring.coreos.com/sample-oadp-alert created
$ oc debug --as-root node/<node_name>
sh-4.4# chroot /host
$ export HTTP_PROXY=http://<your_proxy.example.com>:8080
$ export HTTPS_PROXY=https://<your_proxy.example.com>:8080
$ export NO_PROXY=<example.com>
sh-4.4# /usr/local/bin/cluster-backup.sh /home/core/assets/backup
found latest kube-controller-manager: /etc/kubernetes/static-pod-resources/kube-controller-manager-pod-7 found latest kube-scheduler: /etc/kubernetes/static-pod-resources/kube-scheduler-pod-6 found latest etcd: /etc/kubernetes/static-pod-resources/etcd-pod-3 ede95fe6b88b87ba86a03c15e669fb4aa5bf0991c180d3c6895ce72eaade54a1 etcdctl version: 3.4.14 API version: 3.4 {"level":"info","ts":1624647639.0188997,"caller":"snapshot/v3_snapshot.go:119","msg":"created temporary db file","path":"/home/core/assets/backup/snapshot_2021-06-25_190035.db.part"} {"level":"info","ts":"2021-06-25T19:00:39.030Z","caller":"clientv3/maintenance.go:200","msg":"opened snapshot stream; downloading"} {"level":"info","ts":1624647639.0301006,"caller":"snapshot/v3_snapshot.go:127","msg":"fetching snapshot","endpoint":"https://10.0.0.5:2379"} {"level":"info","ts":"2021-06-25T19:00:40.215Z","caller":"clientv3/maintenance.go:208","msg":"completed snapshot read; closing"} {"level":"info","ts":1624647640.6032252,"caller":"snapshot/v3_snapshot.go:142","msg":"fetched snapshot","endpoint":"https://10.0.0.5:2379","size":"114 MB","took":1.584090459} {"level":"info","ts":1624647640.6047094,"caller":"snapshot/v3_snapshot.go:152","msg":"saved","path":"/home/core/assets/backup/snapshot_2021-06-25_190035.db"} Snapshot saved at /home/core/assets/backup/snapshot_2021-06-25_190035.db {"hash":3866667823,"revision":31407,"totalKey":12828,"totalSize":114446336} snapshot db and kube resources are successfully saved to /home/core/assets/backup
found latest kube-apiserver: /etc/kubernetes/static-pod-resources/kube-apiserver-pod-6
found latest kube-controller-manager: /etc/kubernetes/static-pod-resources/kube-controller-manager-pod-7
found latest kube-scheduler: /etc/kubernetes/static-pod-resources/kube-scheduler-pod-6
found latest etcd: /etc/kubernetes/static-pod-resources/etcd-pod-3
ede95fe6b88b87ba86a03c15e669fb4aa5bf0991c180d3c6895ce72eaade54a1
etcdctl version: 3.4.14
API version: 3.4
{"level":"info","ts":1624647639.0188997,"caller":"snapshot/v3_snapshot.go:119","msg":"created temporary db file","path":"/home/core/assets/backup/snapshot_2021-06-25_190035.db.part"}
{"level":"info","ts":"2021-06-25T19:00:39.030Z","caller":"clientv3/maintenance.go:200","msg":"opened snapshot stream; downloading"}
{"level":"info","ts":1624647639.0301006,"caller":"snapshot/v3_snapshot.go:127","msg":"fetching snapshot","endpoint":"https://10.0.0.5:2379"}
{"level":"info","ts":"2021-06-25T19:00:40.215Z","caller":"clientv3/maintenance.go:208","msg":"completed snapshot read; closing"}
{"level":"info","ts":1624647640.6032252,"caller":"snapshot/v3_snapshot.go:142","msg":"fetched snapshot","endpoint":"https://10.0.0.5:2379","size":"114 MB","took":1.584090459}
{"level":"info","ts":1624647640.6047094,"caller":"snapshot/v3_snapshot.go:152","msg":"saved","path":"/home/core/assets/backup/snapshot_2021-06-25_190035.db"}
Snapshot saved at /home/core/assets/backup/snapshot_2021-06-25_190035.db
{"hash":3866667823,"revision":31407,"totalKey":12828,"totalSize":114446336}
snapshot db and kube resources are successfully saved to /home/core/assets/backup
kind: FeatureGate metadata: name: cluster spec: featureSet: TechPreviewNoUpgrade
apiVersion: config.openshift.io/v1
kind: FeatureGate
metadata:
  name: cluster
spec:
  featureSet: TechPreviewNoUpgrade
$ oc apply -f enable-tech-preview-no-upgrade.yaml
$ oc get crd | grep backup
etcdbackups.operator.openshift.io 2023-10-25T13:32:04Z
backups.config.openshift.io 2023-10-25T13:32:43Z
etcdbackups.operator.openshift.io 2023-10-25T13:32:04Z
apiVersion: v1 metadata: name: etcd-backup-pvc namespace: openshift-etcd spec: accessModes: - ReadWriteOnce resources: requests: storage: 200Gi volumeMode: Filesystem
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: etcd-backup-pvc
  namespace: openshift-etcd
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 200Gi 
1

  volumeMode: Filesystem
$ oc apply -f etcd-backup-pvc.yaml
$ oc get pvc
etcd-backup-pvc Bound 51s
NAME              STATUS    VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
etcd-backup-pvc   Bound                                                       51s
kind: EtcdBackup metadata: name: etcd-single-backup namespace: openshift-etcd spec: pvcName: etcd-backup-pvc
apiVersion: operator.openshift.io/v1alpha1
kind: EtcdBackup
metadata:
  name: etcd-single-backup
  namespace: openshift-etcd
spec:
  pvcName: etcd-backup-pvc 
1
$ oc apply -f etcd-single-backup.yaml
kind: StorageClass metadata: name: etcd-backup-local-storage provisioner: kubernetes.io/no-provisioner volumeBindingMode: Immediate
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: etcd-backup-local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
$ oc apply -f etcd-backup-local-storage.yaml
kind: PersistentVolume metadata: name: etcd-backup-pv-fs spec: capacity: storage: 100Gi volumeMode: Filesystem accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Retain storageClassName: etcd-backup-local-storage local: path: /mnt nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - <example_master_node>
apiVersion: v1
kind: PersistentVolume
metadata:
  name: etcd-backup-pv-fs
spec:
  capacity:
    storage: 100Gi 
1

  volumeMode: Filesystem
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: etcd-backup-local-storage
  local:
    path: /mnt
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
      - key: kubernetes.io/hostname
         operator: In
         values:
         - <example_master_node> 
2
$ oc get pv
etcd-backup-pv-fs 100Gi RWO Retain Available etcd-backup-local-storage 10s
NAME                    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS                REASON   AGE
etcd-backup-pv-fs       100Gi      RWO            Retain           Available           etcd-backup-local-storage            10s
apiVersion: v1 metadata: name: etcd-backup-pvc namespace: openshift-etcd spec: accessModes: - ReadWriteOnce volumeMode: Filesystem resources: requests: storage: 10Gi
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: etcd-backup-pvc
  namespace: openshift-etcd
spec:
  accessModes:
  - ReadWriteOnce
  volumeMode: Filesystem
  resources:
    requests:
      storage: 10Gi 
1
$ oc apply -f etcd-backup-pvc.yaml
kind: EtcdBackup metadata: name: etcd-single-backup namespace: openshift-etcd spec: pvcName: etcd-backup-pvc
apiVersion: operator.openshift.io/v1alpha1
kind: EtcdBackup
metadata:
  name: etcd-single-backup
  namespace: openshift-etcd
spec:
  pvcName: etcd-backup-pvc 
1
$ oc apply -f etcd-single-backup.yaml
apiVersion: v1 metadata: name: etcd-backup-pvc namespace: openshift-etcd spec: accessModes: - ReadWriteOnce resources: requests: storage: 200Gi volumeMode: Filesystem storageClassName: etcd-backup-local-storage
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: etcd-backup-pvc
  namespace: openshift-etcd
spec:
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 200Gi 
1

  volumeMode: Filesystem
  storageClassName: etcd-backup-local-storage
$ oc apply -f etcd-backup-pvc.yaml
$ oc get pvc
etcd-backup-pvc Bound 51s
NAME              STATUS    VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
etcd-backup-pvc   Bound                                                       51s
kind: StorageClass metadata: name: etcd-backup-local-storage provisioner: kubernetes.io/no-provisioner volumeBindingMode: Immediate
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: etcd-backup-local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
$ oc apply -f etcd-backup-local-storage.yaml
kind: PersistentVolume metadata: name: etcd-backup-pv-fs spec: capacity: storage: 100Gi volumeMode: Filesystem accessModes: - ReadWriteMany persistentVolumeReclaimPolicy: Delete storageClassName: etcd-backup-local-storage local: path: /mnt/ nodeAffinity: required: nodeSelectorTerms: - matchExpressions: - key: kubernetes.io/hostname operator: In values: - <example_master_node>
apiVersion: v1
kind: PersistentVolume
metadata:
  name: etcd-backup-pv-fs
spec:
  capacity:
    storage: 100Gi 
1

  volumeMode: Filesystem
  accessModes:
  - ReadWriteMany
  persistentVolumeReclaimPolicy: Delete
  storageClassName: etcd-backup-local-storage
  local:
    path: /mnt/
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - <example_master_node> 
2
$ oc get nodes
$ oc get pv
etcd-backup-pv-fs 100Gi RWX Delete Available etcd-backup-local-storage 10s
NAME                    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS                REASON   AGE
etcd-backup-pv-fs       100Gi      RWX            Delete           Available           etcd-backup-local-storage            10s
apiVersion: v1 metadata: name: etcd-backup-pvc spec: accessModes: - ReadWriteMany volumeMode: Filesystem resources: requests: storage: 10Gi storageClassName: etcd-backup-local-storage
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: etcd-backup-pvc
spec:
  accessModes:
  - ReadWriteMany
  volumeMode: Filesystem
  resources:
    requests:
      storage: 10Gi 
1

  storageClassName: etcd-backup-local-storage
$ oc apply -f etcd-backup-pvc.yaml
kind: Backup metadata: name: etcd-recurring-backup spec: etcd: schedule: "20 4 * * *" timeZone: "UTC" pvcName: etcd-backup-pvc
apiVersion: config.openshift.io/v1alpha1
kind: Backup
metadata:
  name: etcd-recurring-backup
spec:
  etcd:
    schedule: "20 4 * * *" 
1

    timeZone: "UTC"
    pvcName: etcd-backup-pvc
etcd: retentionPolicy: retentionType: RetentionNumber retentionNumber: maxNumberOfBackups: 5
spec:
  etcd:
    retentionPolicy:
      retentionType: RetentionNumber 
1

      retentionNumber:
        maxNumberOfBackups: 5 
2
etcd: retentionPolicy: retentionType: RetentionSize retentionSize: maxSizeOfBackupsGb: 20
spec:
  etcd:
    retentionPolicy:
      retentionType: RetentionSize
      retentionSize:
        maxSizeOfBackupsGb: 20 
1
$ oc create -f etcd-recurring-backup.yaml
$ oc get cronjob -n openshift-etcd
$ oc get etcd -o=jsonpath='{range .items[0].status.conditions[?(@.type=="EtcdMembersAvailable")]}{.message}{"\n"}{end}'
2 of 3 members are available, ip-10-0-131-183.ec2.internal is unhealthy
$ oc get machines -A -ojsonpath='{range .items[*]}{@.status.nodeRef.name}{"\t"}{@.status.providerStatus.instanceState}{"\n"}' | grep -v running
ip-10-0-131-183.ec2.internal  stopped 
1
$ oc get nodes -o jsonpath='{range .items[*]}{"\n"}{.metadata.name}{"\t"}{range .spec.taints[*]}{.key}{" "}' | grep unreachable
ip-10-0-131-183.ec2.internal	node-role.kubernetes.io/master node.kubernetes.io/unreachable node.kubernetes.io/unreachable 
1
$ oc get nodes -l node-role.kubernetes.io/master | grep "NotReady"
ip-10-0-131-183.ec2.internal   NotReady   master   122m   v1.32.3 
1
$ oc get nodes -l node-role.kubernetes.io/master
ip-10-0-131-183.ec2.internal Ready master 6h13m v1.32.3 ip-10-0-164-97.ec2.internal Ready master 6h13m v1.32.3 ip-10-0-154-204.ec2.internal Ready master 6h13m v1.32.3
NAME                           STATUS   ROLES    AGE     VERSION
ip-10-0-131-183.ec2.internal   Ready    master   6h13m   v1.32.3
ip-10-0-164-97.ec2.internal    Ready    master   6h13m   v1.32.3
ip-10-0-154-204.ec2.internal   Ready    master   6h13m   v1.32.3
$ oc -n openshift-etcd get pods -l k8s-app=etcd
etcd-ip-10-0-164-97.ec2.internal 3/3 Running 0 6h6m etcd-ip-10-0-154-204.ec2.internal 3/3 Running 0 6h6m
etcd-ip-10-0-131-183.ec2.internal                2/3     Error       7          6h9m 
1

etcd-ip-10-0-164-97.ec2.internal                 3/3     Running     0          6h6m
etcd-ip-10-0-154-204.ec2.internal                3/3     Running     0          6h6m
$ oc -n openshift-etcd get pods -l k8s-app=etcd
etcd-ip-10-0-164-97.ec2.internal 3/3 Running 0 123m etcd-ip-10-0-154-204.ec2.internal 3/3 Running 0 124m
etcd-ip-10-0-131-183.ec2.internal                3/3     Running     0          123m
etcd-ip-10-0-164-97.ec2.internal                 3/3     Running     0          123m
etcd-ip-10-0-154-204.ec2.internal                3/3     Running     0          124m
$ oc rsh -n openshift-etcd etcd-ip-10-0-154-204.ec2.internal
sh-4.2# etcdctl member list -w table
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | +------------------+---------+------------------------------+---------------------------+---------------------------+ | 6fc1e7c9db35841d | started | ip-10-0-131-183.ec2.internal | https://10.0.131.183:2380 | https://10.0.131.183:2379 | | 757b6793e2408b6c | started | ip-10-0-164-97.ec2.internal | https://10.0.164.97:2380 | https://10.0.164.97:2379 | | ca8c2990a0aa29d1 | started | ip-10-0-154-204.ec2.internal | https://10.0.154.204:2380 | https://10.0.154.204:2379 | +------------------+---------+------------------------------+---------------------------+---------------------------+
+------------------+---------+------------------------------+---------------------------+---------------------------+
|        ID        | STATUS  |             NAME             |        PEER ADDRS         |       CLIENT ADDRS        |
+------------------+---------+------------------------------+---------------------------+---------------------------+
| 6fc1e7c9db35841d | started | ip-10-0-131-183.ec2.internal | https://10.0.131.183:2380 | https://10.0.131.183:2379 |
| 757b6793e2408b6c | started |  ip-10-0-164-97.ec2.internal |  https://10.0.164.97:2380 |  https://10.0.164.97:2379 |
| ca8c2990a0aa29d1 | started | ip-10-0-154-204.ec2.internal | https://10.0.154.204:2380 | https://10.0.154.204:2379 |
+------------------+---------+------------------------------+---------------------------+---------------------------+
sh-4.2# etcdctl member remove 6fc1e7c9db35841d
Member 6fc1e7c9db35841d removed from cluster ead669ce1fbfb346
sh-4.2# etcdctl member list -w table
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | +------------------+---------+------------------------------+---------------------------+---------------------------+ | 757b6793e2408b6c | started | ip-10-0-164-97.ec2.internal | https://10.0.164.97:2380 | https://10.0.164.97:2379 | | ca8c2990a0aa29d1 | started | ip-10-0-154-204.ec2.internal | https://10.0.154.204:2380 | https://10.0.154.204:2379 | +------------------+---------+------------------------------+---------------------------+---------------------------+
+------------------+---------+------------------------------+---------------------------+---------------------------+
|        ID        | STATUS  |             NAME             |        PEER ADDRS         |       CLIENT ADDRS        |
+------------------+---------+------------------------------+---------------------------+---------------------------+
| 757b6793e2408b6c | started |  ip-10-0-164-97.ec2.internal |  https://10.0.164.97:2380 |  https://10.0.164.97:2379 |
| ca8c2990a0aa29d1 | started | ip-10-0-154-204.ec2.internal | https://10.0.154.204:2380 | https://10.0.154.204:2379 |
+------------------+---------+------------------------------+---------------------------+---------------------------+
$ oc patch etcd/cluster --type=merge -p '{"spec": {"unsupportedConfigOverrides": {"useUnsupportedUnsafeNonHANonProductionUnstableEtcd": true}}}'
$ oc delete node <node_name>
$ oc delete node ip-10-0-131-183.ec2.internal
$ oc get secrets -n openshift-etcd | grep ip-10-0-131-183.ec2.internal 
1
etcd-serving-ip-10-0-131-183.ec2.internal kubernetes.io/tls 2 47m etcd-serving-metrics-ip-10-0-131-183.ec2.internal kubernetes.io/tls 2 47m
etcd-peer-ip-10-0-131-183.ec2.internal              kubernetes.io/tls                     2      47m
etcd-serving-ip-10-0-131-183.ec2.internal           kubernetes.io/tls                     2      47m
etcd-serving-metrics-ip-10-0-131-183.ec2.internal   kubernetes.io/tls                     2      47m
$ oc delete secret -n openshift-etcd etcd-peer-ip-10-0-131-183.ec2.internal
$ oc delete secret -n openshift-etcd etcd-serving-ip-10-0-131-183.ec2.internal
$ oc delete secret -n openshift-etcd etcd-serving-metrics-ip-10-0-131-183.ec2.internal
$ oc -n openshift-machine-api get controlplanemachineset
$ oc get machines -n openshift-machine-api -o wide
clustername-8qw5l-master-0 Running m4.xlarge us-east-1 us-east-1a 3h37m ip-10-0-131-183.ec2.internal aws:///us-east-1a/i-0ec2782f8287dfb7e stopped clustername-8qw5l-master-1 Running m4.xlarge us-east-1 us-east-1b 3h37m ip-10-0-154-204.ec2.internal aws:///us-east-1b/i-096c349b700a19631 running clustername-8qw5l-master-2 Running m4.xlarge us-east-1 us-east-1c 3h37m ip-10-0-164-97.ec2.internal aws:///us-east-1c/i-02626f1dba9ed5bba running clustername-8qw5l-worker-us-east-1a-wbtgd Running m4.large us-east-1 us-east-1a 3h28m ip-10-0-129-226.ec2.internal aws:///us-east-1a/i-010ef6279b4662ced running clustername-8qw5l-worker-us-east-1b-lrdxb Running m4.large us-east-1 us-east-1b 3h28m ip-10-0-144-248.ec2.internal aws:///us-east-1b/i-0cb45ac45a166173b running clustername-8qw5l-worker-us-east-1c-pkg26 Running m4.large us-east-1 us-east-1c 3h28m ip-10-0-170-181.ec2.internal aws:///us-east-1c/i-06861c00007751b0a running
NAME                                        PHASE     TYPE        REGION      ZONE         AGE     NODE                           PROVIDERID                              STATE
clustername-8qw5l-master-0                  Running   m4.xlarge   us-east-1   us-east-1a   3h37m   ip-10-0-131-183.ec2.internal   aws:///us-east-1a/i-0ec2782f8287dfb7e   stopped 
1

clustername-8qw5l-master-1                  Running   m4.xlarge   us-east-1   us-east-1b   3h37m   ip-10-0-154-204.ec2.internal   aws:///us-east-1b/i-096c349b700a19631   running
clustername-8qw5l-master-2                  Running   m4.xlarge   us-east-1   us-east-1c   3h37m   ip-10-0-164-97.ec2.internal    aws:///us-east-1c/i-02626f1dba9ed5bba   running
clustername-8qw5l-worker-us-east-1a-wbtgd   Running   m4.large    us-east-1   us-east-1a   3h28m   ip-10-0-129-226.ec2.internal   aws:///us-east-1a/i-010ef6279b4662ced   running
clustername-8qw5l-worker-us-east-1b-lrdxb   Running   m4.large    us-east-1   us-east-1b   3h28m   ip-10-0-144-248.ec2.internal   aws:///us-east-1b/i-0cb45ac45a166173b   running
clustername-8qw5l-worker-us-east-1c-pkg26   Running   m4.large    us-east-1   us-east-1c   3h28m   ip-10-0-170-181.ec2.internal   aws:///us-east-1c/i-06861c00007751b0a   running
$ oc delete machine -n openshift-machine-api clustername-8qw5l-master-0 
1
$ oc get machines -n openshift-machine-api -o wide
clustername-8qw5l-master-1 Running m4.xlarge us-east-1 us-east-1b 3h37m ip-10-0-154-204.ec2.internal aws:///us-east-1b/i-096c349b700a19631 running clustername-8qw5l-master-2 Running m4.xlarge us-east-1 us-east-1c 3h37m ip-10-0-164-97.ec2.internal aws:///us-east-1c/i-02626f1dba9ed5bba running clustername-8qw5l-master-3 Provisioning m4.xlarge us-east-1 us-east-1a 85s ip-10-0-133-53.ec2.internal aws:///us-east-1a/i-015b0888fe17bc2c8 running clustername-8qw5l-worker-us-east-1a-wbtgd Running m4.large us-east-1 us-east-1a 3h28m ip-10-0-129-226.ec2.internal aws:///us-east-1a/i-010ef6279b4662ced running clustername-8qw5l-worker-us-east-1b-lrdxb Running m4.large us-east-1 us-east-1b 3h28m ip-10-0-144-248.ec2.internal aws:///us-east-1b/i-0cb45ac45a166173b running clustername-8qw5l-worker-us-east-1c-pkg26 Running m4.large us-east-1 us-east-1c 3h28m ip-10-0-170-181.ec2.internal aws:///us-east-1c/i-06861c00007751b0a running
NAME                                        PHASE          TYPE        REGION      ZONE         AGE     NODE                           PROVIDERID                              STATE
clustername-8qw5l-master-1                  Running        m4.xlarge   us-east-1   us-east-1b   3h37m   ip-10-0-154-204.ec2.internal   aws:///us-east-1b/i-096c349b700a19631   running
clustername-8qw5l-master-2                  Running        m4.xlarge   us-east-1   us-east-1c   3h37m   ip-10-0-164-97.ec2.internal    aws:///us-east-1c/i-02626f1dba9ed5bba   running
clustername-8qw5l-master-3                  Provisioning   m4.xlarge   us-east-1   us-east-1a   85s     ip-10-0-133-53.ec2.internal    aws:///us-east-1a/i-015b0888fe17bc2c8   running 
1

clustername-8qw5l-worker-us-east-1a-wbtgd   Running        m4.large    us-east-1   us-east-1a   3h28m   ip-10-0-129-226.ec2.internal   aws:///us-east-1a/i-010ef6279b4662ced   running
clustername-8qw5l-worker-us-east-1b-lrdxb   Running        m4.large    us-east-1   us-east-1b   3h28m   ip-10-0-144-248.ec2.internal   aws:///us-east-1b/i-0cb45ac45a166173b   running
clustername-8qw5l-worker-us-east-1c-pkg26   Running        m4.large    us-east-1   us-east-1c   3h28m   ip-10-0-170-181.ec2.internal   aws:///us-east-1c/i-06861c00007751b0a   running
$ oc get machines -n openshift-machine-api -o wide
clustername-8qw5l-master-0 Running m4.xlarge us-east-1 us-east-1a 3h37m ip-10-0-131-183.ec2.internal aws:///us-east-1a/i-0ec2782f8287dfb7e stopped clustername-8qw5l-master-1 Running m4.xlarge us-east-1 us-east-1b 3h37m ip-10-0-154-204.ec2.internal aws:///us-east-1b/i-096c349b700a19631 running clustername-8qw5l-master-2 Running m4.xlarge us-east-1 us-east-1c 3h37m ip-10-0-164-97.ec2.internal aws:///us-east-1c/i-02626f1dba9ed5bba running clustername-8qw5l-worker-us-east-1a-wbtgd Running m4.large us-east-1 us-east-1a 3h28m ip-10-0-129-226.ec2.internal aws:///us-east-1a/i-010ef6279b4662ced running clustername-8qw5l-worker-us-east-1b-lrdxb Running m4.large us-east-1 us-east-1b 3h28m ip-10-0-144-248.ec2.internal aws:///us-east-1b/i-0cb45ac45a166173b running clustername-8qw5l-worker-us-east-1c-pkg26 Running m4.large us-east-1 us-east-1c 3h28m ip-10-0-170-181.ec2.internal aws:///us-east-1c/i-06861c00007751b0a running
NAME                                        PHASE     TYPE        REGION      ZONE         AGE     NODE                           PROVIDERID                              STATE
clustername-8qw5l-master-0                  Running   m4.xlarge   us-east-1   us-east-1a   3h37m   ip-10-0-131-183.ec2.internal   aws:///us-east-1a/i-0ec2782f8287dfb7e   stopped 
1

clustername-8qw5l-master-1                  Running   m4.xlarge   us-east-1   us-east-1b   3h37m   ip-10-0-154-204.ec2.internal   aws:///us-east-1b/i-096c349b700a19631   running
clustername-8qw5l-master-2                  Running   m4.xlarge   us-east-1   us-east-1c   3h37m   ip-10-0-164-97.ec2.internal    aws:///us-east-1c/i-02626f1dba9ed5bba   running
clustername-8qw5l-worker-us-east-1a-wbtgd   Running   m4.large    us-east-1   us-east-1a   3h28m   ip-10-0-129-226.ec2.internal   aws:///us-east-1a/i-010ef6279b4662ced   running
clustername-8qw5l-worker-us-east-1b-lrdxb   Running   m4.large    us-east-1   us-east-1b   3h28m   ip-10-0-144-248.ec2.internal   aws:///us-east-1b/i-0cb45ac45a166173b   running
clustername-8qw5l-worker-us-east-1c-pkg26   Running   m4.large    us-east-1   us-east-1c   3h28m   ip-10-0-170-181.ec2.internal   aws:///us-east-1c/i-06861c00007751b0a   running
-n openshift-machine-api \ -o yaml \ > new-master-machine.yaml
$ oc get machine clustername-8qw5l-master-0 \ 
1

    -n openshift-machine-api \
    -o yaml \
    > new-master-machine.yaml
addresses: - address: 10.0.131.183 type: InternalIP - address: ip-10-0-131-183.ec2.internal type: InternalDNS - address: ip-10-0-131-183.ec2.internal type: Hostname lastUpdated: "2020-04-20T17:44:29Z" nodeRef: kind: Node name: ip-10-0-131-183.ec2.internal uid: acca4411-af0d-4387-b73e-52b2484295ad phase: Running providerStatus: apiVersion: awsproviderconfig.openshift.io/v1beta1 conditions: - lastProbeTime: "2020-04-20T16:53:50Z" lastTransitionTime: "2020-04-20T16:53:50Z" message: machine successfully created reason: MachineCreationSucceeded status: "True" type: MachineCreation instanceId: i-0fdb85790d76d0c3f instanceState: stopped kind: AWSMachineProviderStatus
status:
  addresses:
  - address: 10.0.131.183
    type: InternalIP
  - address: ip-10-0-131-183.ec2.internal
    type: InternalDNS
  - address: ip-10-0-131-183.ec2.internal
    type: Hostname
  lastUpdated: "2020-04-20T17:44:29Z"
  nodeRef:
    kind: Node
    name: ip-10-0-131-183.ec2.internal
    uid: acca4411-af0d-4387-b73e-52b2484295ad
  phase: Running
  providerStatus:
    apiVersion: awsproviderconfig.openshift.io/v1beta1
    conditions:
    - lastProbeTime: "2020-04-20T16:53:50Z"
      lastTransitionTime: "2020-04-20T16:53:50Z"
      message: machine successfully created
      reason: MachineCreationSucceeded
      status: "True"
      type: MachineCreation
    instanceId: i-0fdb85790d76d0c3f
    instanceState: stopped
    kind: AWSMachineProviderStatus
kind: Machine metadata: ... name: clustername-8qw5l-master-3 ...
apiVersion: machine.openshift.io/v1beta1
kind: Machine
metadata:
  ...
  name: clustername-8qw5l-master-3
  ...
  providerID: aws:///us-east-1a/i-0fdb85790d76d0c3f
$ oc delete machine -n openshift-machine-api clustername-8qw5l-master-0 
1
$ oc get machines -n openshift-machine-api -o wide
clustername-8qw5l-master-1 Running m4.xlarge us-east-1 us-east-1b 3h37m ip-10-0-154-204.ec2.internal aws:///us-east-1b/i-096c349b700a19631 running clustername-8qw5l-master-2 Running m4.xlarge us-east-1 us-east-1c 3h37m ip-10-0-164-97.ec2.internal aws:///us-east-1c/i-02626f1dba9ed5bba running clustername-8qw5l-worker-us-east-1a-wbtgd Running m4.large us-east-1 us-east-1a 3h28m ip-10-0-129-226.ec2.internal aws:///us-east-1a/i-010ef6279b4662ced running clustername-8qw5l-worker-us-east-1b-lrdxb Running m4.large us-east-1 us-east-1b 3h28m ip-10-0-144-248.ec2.internal aws:///us-east-1b/i-0cb45ac45a166173b running clustername-8qw5l-worker-us-east-1c-pkg26 Running m4.large us-east-1 us-east-1c 3h28m ip-10-0-170-181.ec2.internal aws:///us-east-1c/i-06861c00007751b0a running
NAME                                        PHASE     TYPE        REGION      ZONE         AGE     NODE                           PROVIDERID                              STATE
clustername-8qw5l-master-1                  Running   m4.xlarge   us-east-1   us-east-1b   3h37m   ip-10-0-154-204.ec2.internal   aws:///us-east-1b/i-096c349b700a19631   running
clustername-8qw5l-master-2                  Running   m4.xlarge   us-east-1   us-east-1c   3h37m   ip-10-0-164-97.ec2.internal    aws:///us-east-1c/i-02626f1dba9ed5bba   running
clustername-8qw5l-worker-us-east-1a-wbtgd   Running   m4.large    us-east-1   us-east-1a   3h28m   ip-10-0-129-226.ec2.internal   aws:///us-east-1a/i-010ef6279b4662ced   running
clustername-8qw5l-worker-us-east-1b-lrdxb   Running   m4.large    us-east-1   us-east-1b   3h28m   ip-10-0-144-248.ec2.internal   aws:///us-east-1b/i-0cb45ac45a166173b   running
clustername-8qw5l-worker-us-east-1c-pkg26   Running   m4.large    us-east-1   us-east-1c   3h28m   ip-10-0-170-181.ec2.internal   aws:///us-east-1c/i-06861c00007751b0a   running
$ oc apply -f new-master-machine.yaml
$ oc get machines -n openshift-machine-api -o wide
clustername-8qw5l-master-1 Running m4.xlarge us-east-1 us-east-1b 3h37m ip-10-0-154-204.ec2.internal aws:///us-east-1b/i-096c349b700a19631 running clustername-8qw5l-master-2 Running m4.xlarge us-east-1 us-east-1c 3h37m ip-10-0-164-97.ec2.internal aws:///us-east-1c/i-02626f1dba9ed5bba running clustername-8qw5l-master-3 Provisioning m4.xlarge us-east-1 us-east-1a 85s ip-10-0-133-53.ec2.internal aws:///us-east-1a/i-015b0888fe17bc2c8 running clustername-8qw5l-worker-us-east-1a-wbtgd Running m4.large us-east-1 us-east-1a 3h28m ip-10-0-129-226.ec2.internal aws:///us-east-1a/i-010ef6279b4662ced running clustername-8qw5l-worker-us-east-1b-lrdxb Running m4.large us-east-1 us-east-1b 3h28m ip-10-0-144-248.ec2.internal aws:///us-east-1b/i-0cb45ac45a166173b running clustername-8qw5l-worker-us-east-1c-pkg26 Running m4.large us-east-1 us-east-1c 3h28m ip-10-0-170-181.ec2.internal aws:///us-east-1c/i-06861c00007751b0a running
NAME                                        PHASE          TYPE        REGION      ZONE         AGE     NODE                           PROVIDERID                              STATE
clustername-8qw5l-master-1                  Running        m4.xlarge   us-east-1   us-east-1b   3h37m   ip-10-0-154-204.ec2.internal   aws:///us-east-1b/i-096c349b700a19631   running
clustername-8qw5l-master-2                  Running        m4.xlarge   us-east-1   us-east-1c   3h37m   ip-10-0-164-97.ec2.internal    aws:///us-east-1c/i-02626f1dba9ed5bba   running
clustername-8qw5l-master-3                  Provisioning   m4.xlarge   us-east-1   us-east-1a   85s     ip-10-0-133-53.ec2.internal    aws:///us-east-1a/i-015b0888fe17bc2c8   running 
1

clustername-8qw5l-worker-us-east-1a-wbtgd   Running        m4.large    us-east-1   us-east-1a   3h28m   ip-10-0-129-226.ec2.internal   aws:///us-east-1a/i-010ef6279b4662ced   running
clustername-8qw5l-worker-us-east-1b-lrdxb   Running        m4.large    us-east-1   us-east-1b   3h28m   ip-10-0-144-248.ec2.internal   aws:///us-east-1b/i-0cb45ac45a166173b   running
clustername-8qw5l-worker-us-east-1c-pkg26   Running        m4.large    us-east-1   us-east-1c   3h28m   ip-10-0-170-181.ec2.internal   aws:///us-east-1c/i-06861c00007751b0a   running
$ oc patch etcd/cluster --type=merge -p '{"spec": {"unsupportedConfigOverrides": null}}'
$ oc get etcd/cluster -oyaml
EtcdCertSignerControllerDegraded: [Operation cannot be fulfilled on secrets "etcd-peer-sno-0": the object has been modified; please apply your changes to the latest version and try again, Operation cannot be fulfilled on secrets "etcd-serving-sno-0": the object has been modified; please apply your changes to the latest version and try again, Operation cannot be fulfilled on secrets "etcd-serving-metrics-sno-0": the object has been modified; please apply your changes to the latest version and try again]
$ oc -n openshift-etcd get pods -l k8s-app=etcd
etcd-ip-10-0-164-97.ec2.internal 3/3 Running 0 123m etcd-ip-10-0-154-204.ec2.internal 3/3 Running 0 124m
etcd-ip-10-0-133-53.ec2.internal                 3/3     Running     0          7m49s
etcd-ip-10-0-164-97.ec2.internal                 3/3     Running     0          123m
etcd-ip-10-0-154-204.ec2.internal                3/3     Running     0          124m
$ oc patch etcd cluster -p='{"spec": {"forceRedeploymentReason": "recovery-'"$( date --rfc-3339=ns )"'"}}' --type=merge 
1
$ oc rsh -n openshift-etcd etcd-ip-10-0-154-204.ec2.internal
sh-4.2# etcdctl member list -w table
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | +------------------+---------+------------------------------+---------------------------+---------------------------+ | 5eb0d6b8ca24730c | started | ip-10-0-133-53.ec2.internal | https://10.0.133.53:2380 | https://10.0.133.53:2379 | | 757b6793e2408b6c | started | ip-10-0-164-97.ec2.internal | https://10.0.164.97:2380 | https://10.0.164.97:2379 | | ca8c2990a0aa29d1 | started | ip-10-0-154-204.ec2.internal | https://10.0.154.204:2380 | https://10.0.154.204:2379 | +------------------+---------+------------------------------+---------------------------+---------------------------+
+------------------+---------+------------------------------+---------------------------+---------------------------+
|        ID        | STATUS  |             NAME             |        PEER ADDRS         |       CLIENT ADDRS        |
+------------------+---------+------------------------------+---------------------------+---------------------------+
| 5eb0d6b8ca24730c | started |  ip-10-0-133-53.ec2.internal |  https://10.0.133.53:2380 |  https://10.0.133.53:2379 |
| 757b6793e2408b6c | started |  ip-10-0-164-97.ec2.internal |  https://10.0.164.97:2380 |  https://10.0.164.97:2379 |
| ca8c2990a0aa29d1 | started | ip-10-0-154-204.ec2.internal | https://10.0.154.204:2380 | https://10.0.154.204:2379 |
+------------------+---------+------------------------------+---------------------------+---------------------------+
$ oc debug node/ip-10-0-131-183.ec2.internal 
1
sh-4.2# chroot /host
sh-4.2# mkdir /var/lib/etcd-backup
sh-4.2# mv /etc/kubernetes/manifests/etcd-pod.yaml /var/lib/etcd-backup/
sh-4.2# mv /var/lib/etcd/ /tmp
$ oc -n openshift-etcd get pods -l k8s-app=etcd
etcd-ip-10-0-164-97.ec2.internal 3/3 Running 0 6h6m etcd-ip-10-0-154-204.ec2.internal 3/3 Running 0 6h6m
etcd-ip-10-0-131-183.ec2.internal                2/3     Error       7          6h9m
etcd-ip-10-0-164-97.ec2.internal                 3/3     Running     0          6h6m
etcd-ip-10-0-154-204.ec2.internal                3/3     Running     0          6h6m
$ oc rsh -n openshift-etcd etcd-ip-10-0-154-204.ec2.internal
sh-4.2# etcdctl member list -w table
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | +------------------+---------+------------------------------+---------------------------+---------------------------+ | 62bcf33650a7170a | started | ip-10-0-131-183.ec2.internal | https://10.0.131.183:2380 | https://10.0.131.183:2379 | | b78e2856655bc2eb | started | ip-10-0-164-97.ec2.internal | https://10.0.164.97:2380 | https://10.0.164.97:2379 | | d022e10b498760d5 | started | ip-10-0-154-204.ec2.internal | https://10.0.154.204:2380 | https://10.0.154.204:2379 | +------------------+---------+------------------------------+---------------------------+---------------------------+
+------------------+---------+------------------------------+---------------------------+---------------------------+
|        ID        | STATUS  |             NAME             |        PEER ADDRS         |       CLIENT ADDRS        |
+------------------+---------+------------------------------+---------------------------+---------------------------+
| 62bcf33650a7170a | started | ip-10-0-131-183.ec2.internal | https://10.0.131.183:2380 | https://10.0.131.183:2379 |
| b78e2856655bc2eb | started |  ip-10-0-164-97.ec2.internal |  https://10.0.164.97:2380 |  https://10.0.164.97:2379 |
| d022e10b498760d5 | started | ip-10-0-154-204.ec2.internal | https://10.0.154.204:2380 | https://10.0.154.204:2379 |
+------------------+---------+------------------------------+---------------------------+---------------------------+
sh-4.2# etcdctl member remove 62bcf33650a7170a
Member 62bcf33650a7170a removed from cluster ead669ce1fbfb346
sh-4.2# etcdctl member list -w table
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | +------------------+---------+------------------------------+---------------------------+---------------------------+ | b78e2856655bc2eb | started | ip-10-0-164-97.ec2.internal | https://10.0.164.97:2380 | https://10.0.164.97:2379 | | d022e10b498760d5 | started | ip-10-0-154-204.ec2.internal | https://10.0.154.204:2380 | https://10.0.154.204:2379 | +------------------+---------+------------------------------+---------------------------+---------------------------+
+------------------+---------+------------------------------+---------------------------+---------------------------+
|        ID        | STATUS  |             NAME             |        PEER ADDRS         |       CLIENT ADDRS        |
+------------------+---------+------------------------------+---------------------------+---------------------------+
| b78e2856655bc2eb | started |  ip-10-0-164-97.ec2.internal |  https://10.0.164.97:2380 |  https://10.0.164.97:2379 |
| d022e10b498760d5 | started | ip-10-0-154-204.ec2.internal | https://10.0.154.204:2380 | https://10.0.154.204:2379 |
+------------------+---------+------------------------------+---------------------------+---------------------------+
$ oc patch etcd/cluster --type=merge -p '{"spec": {"unsupportedConfigOverrides": {"useUnsupportedUnsafeNonHANonProductionUnstableEtcd": true}}}'
$ oc get secrets -n openshift-etcd | grep ip-10-0-131-183.ec2.internal 
1
etcd-serving-ip-10-0-131-183.ec2.internal kubernetes.io/tls 2 47m etcd-serving-metrics-ip-10-0-131-183.ec2.internal kubernetes.io/tls 2 47m
etcd-peer-ip-10-0-131-183.ec2.internal              kubernetes.io/tls                     2      47m
etcd-serving-ip-10-0-131-183.ec2.internal           kubernetes.io/tls                     2      47m
etcd-serving-metrics-ip-10-0-131-183.ec2.internal   kubernetes.io/tls                     2      47m
$ oc delete secret -n openshift-etcd etcd-peer-ip-10-0-131-183.ec2.internal
$ oc delete secret -n openshift-etcd etcd-serving-ip-10-0-131-183.ec2.internal
$ oc delete secret -n openshift-etcd etcd-serving-metrics-ip-10-0-131-183.ec2.internal
$ oc patch etcd cluster -p='{"spec": {"forceRedeploymentReason": "single-master-recovery-'"$( date --rfc-3339=ns )"'"}}' --type=merge 
1
$ oc patch etcd/cluster --type=merge -p '{"spec": {"unsupportedConfigOverrides": null}}'
$ oc get etcd/cluster -oyaml
EtcdCertSignerControllerDegraded: [Operation cannot be fulfilled on secrets "etcd-peer-sno-0": the object has been modified; please apply your changes to the latest version and try again, Operation cannot be fulfilled on secrets "etcd-serving-sno-0": the object has been modified; please apply your changes to the latest version and try again, Operation cannot be fulfilled on secrets "etcd-serving-metrics-sno-0": the object has been modified; please apply your changes to the latest version and try again]
$ oc rsh -n openshift-etcd etcd-ip-10-0-154-204.ec2.internal
sh-4.2# etcdctl endpoint health
https://10.0.154.204:2379 is healthy: successfully committed proposal: took = 16.698331ms https://10.0.164.97:2379 is healthy: successfully committed proposal: took = 16.621645ms
https://10.0.131.183:2379 is healthy: successfully committed proposal: took = 16.671434ms
https://10.0.154.204:2379 is healthy: successfully committed proposal: took = 16.698331ms
https://10.0.164.97:2379 is healthy: successfully committed proposal: took = 16.621645ms
$ oc -n openshift-etcd get pods -l k8s-app=etcd -o wide
etcd-openshift-control-plane-1 5/5 Running 0 3h54m 192.168.10.10 openshift-control-plane-1 <none> <none> etcd-openshift-control-plane-2 5/5 Running 0 3h58m 192.168.10.11 openshift-control-plane-2 <none> <none>
etcd-openshift-control-plane-0   5/5   Running   11   3h56m   192.168.10.9   openshift-control-plane-0  <none>           <none>
etcd-openshift-control-plane-1   5/5   Running   0    3h54m   192.168.10.10   openshift-control-plane-1   <none>           <none>
etcd-openshift-control-plane-2   5/5   Running   0    3h58m   192.168.10.11   openshift-control-plane-2   <none>           <none>
$ oc rsh -n openshift-etcd etcd-openshift-control-plane-0
sh-4.2# etcdctl member list -w table
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER | +------------------+---------+--------------------+---------------------------+---------------------------+---------------------+ | 7a8197040a5126c8 | started | openshift-control-plane-2 | https://192.168.10.11:2380/ | https://192.168.10.11:2379/ | false | | 8d5abe9669a39192 | started | openshift-control-plane-1 | https://192.168.10.10:2380/ | https://192.168.10.10:2379/ | false | | cc3830a72fc357f9 | started | openshift-control-plane-0 | https://192.168.10.9:2380/ | https://192.168.10.9:2379/ | false | +------------------+---------+--------------------+---------------------------+---------------------------+---------------------+
+------------------+---------+--------------------+---------------------------+---------------------------+---------------------+
| ID               | STATUS  | NAME                      | PEER ADDRS                  | CLIENT ADDRS                | IS LEARNER |
+------------------+---------+--------------------+---------------------------+---------------------------+---------------------+
| 7a8197040a5126c8 | started | openshift-control-plane-2 | https://192.168.10.11:2380/ | https://192.168.10.11:2379/ | false |
| 8d5abe9669a39192 | started | openshift-control-plane-1 | https://192.168.10.10:2380/ | https://192.168.10.10:2379/ | false |
| cc3830a72fc357f9 | started | openshift-control-plane-0 | https://192.168.10.9:2380/ | https://192.168.10.9:2379/   | false |
+------------------+---------+--------------------+---------------------------+---------------------------+---------------------+
sh-4.2# etcdctl member remove 7a8197040a5126c8
Member 7a8197040a5126c8 removed from cluster b23536c33f2cdd1b
sh-4.2# etcdctl member list -w table
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER | +------------------+---------+--------------------+---------------------------+---------------------------+-------------------------+ | cc3830a72fc357f9 | started | openshift-control-plane-2 | https://192.168.10.11:2380/ | https://192.168.10.11:2379/ | false | | 8d5abe9669a39192 | started | openshift-control-plane-1 | https://192.168.10.10:2380/ | https://192.168.10.10:2379/ | false | +------------------+---------+--------------------+---------------------------+---------------------------+-------------------------+
+------------------+---------+--------------------+---------------------------+---------------------------+-------------------------+
| ID               | STATUS  | NAME                      | PEER ADDRS                  | CLIENT ADDRS                | IS LEARNER |
+------------------+---------+--------------------+---------------------------+---------------------------+-------------------------+
| cc3830a72fc357f9 | started | openshift-control-plane-2 | https://192.168.10.11:2380/ | https://192.168.10.11:2379/ | false |
| 8d5abe9669a39192 | started | openshift-control-plane-1 | https://192.168.10.10:2380/ | https://192.168.10.10:2379/ | false |
+------------------+---------+--------------------+---------------------------+---------------------------+-------------------------+
$ oc patch etcd/cluster --type=merge -p '{"spec": {"unsupportedConfigOverrides": {"useUnsupportedUnsafeNonHANonProductionUnstableEtcd": true}}}'
$ oc get secrets -n openshift-etcd | grep openshift-control-plane-2
etcd-serving-metrics-openshift-control-plane-2 kubernetes.io/tls 2 134m etcd-serving-openshift-control-plane-2 kubernetes.io/tls 2 134m
etcd-peer-openshift-control-plane-2             kubernetes.io/tls   2   134m
etcd-serving-metrics-openshift-control-plane-2  kubernetes.io/tls   2   134m
etcd-serving-openshift-control-plane-2          kubernetes.io/tls   2   134m
$ oc delete secret etcd-peer-openshift-control-plane-2 -n openshift-etcd

secret "etcd-peer-openshift-control-plane-2" deleted
$ oc delete secret etcd-serving-metrics-openshift-control-plane-2 -n openshift-etcd

secret "etcd-serving-metrics-openshift-control-plane-2" deleted
$ oc delete secret etcd-serving-openshift-control-plane-2 -n openshift-etcd

secret "etcd-serving-openshift-control-plane-2" deleted
$ oc get machines -n openshift-machine-api -o wide
examplecluster-control-plane-0 Running 3h11m openshift-control-plane-0 baremetalhost:///openshift-machine-api/openshift-control-plane-0/da1ebe11-3ff2-41c5-b099-0aa41222964e externally provisioned examplecluster-control-plane-1 Running 3h11m openshift-control-plane-1 baremetalhost:///openshift-machine-api/openshift-control-plane-1/d9f9acbc-329c-475e-8d81-03b20280a3e1 externally provisioned examplecluster-control-plane-2 Running 3h11m openshift-control-plane-2 baremetalhost:///openshift-machine-api/openshift-control-plane-2/3354bdac-61d8-410f-be5b-6a395b056135 externally provisioned examplecluster-compute-0 Running 165m openshift-compute-0 baremetalhost:///openshift-machine-api/openshift-compute-0/3d685b81-7410-4bb3-80ec-13a31858241f provisioned examplecluster-compute-1 Running 165m openshift-compute-1 baremetalhost:///openshift-machine-api/openshift-compute-1/0fdae6eb-2066-4241-91dc-e7ea72ab13b9 provisioned
NAME                              PHASE     TYPE   REGION   ZONE   AGE     NODE                               PROVIDERID                                                                                              STATE
examplecluster-control-plane-0    Running                          3h11m   openshift-control-plane-0   baremetalhost:///openshift-machine-api/openshift-control-plane-0/da1ebe11-3ff2-41c5-b099-0aa41222964e   externally provisioned 
1

examplecluster-control-plane-1    Running                          3h11m   openshift-control-plane-1   baremetalhost:///openshift-machine-api/openshift-control-plane-1/d9f9acbc-329c-475e-8d81-03b20280a3e1   externally provisioned
examplecluster-control-plane-2    Running                          3h11m   openshift-control-plane-2   baremetalhost:///openshift-machine-api/openshift-control-plane-2/3354bdac-61d8-410f-be5b-6a395b056135   externally provisioned
examplecluster-compute-0          Running                          165m    openshift-compute-0         baremetalhost:///openshift-machine-api/openshift-compute-0/3d685b81-7410-4bb3-80ec-13a31858241f         provisioned
examplecluster-compute-1          Running                          165m    openshift-compute-1         baremetalhost:///openshift-machine-api/openshift-compute-1/0fdae6eb-2066-4241-91dc-e7ea72ab13b9         provisioned
$ oc get clusteroperator baremetal
baremetal 4.19.0 True False False 3d15h
NAME        VERSION   AVAILABLE   PROGRESSING   DEGRADED   SINCE   MESSAGE
baremetal   4.19.0    True        False         False      3d15h
$ oc delete bmh openshift-control-plane-2 -n openshift-machine-api
baremetalhost.metal3.io "openshift-control-plane-2" deleted
$ oc delete machine -n openshift-machine-api examplecluster-control-plane-2
$ oc edit machine -n openshift-machine-api examplecluster-control-plane-2
- machine.machine.openshift.io
finalizers:
- machine.machine.openshift.io
machine.machine.openshift.io/examplecluster-control-plane-2 edited
$ oc get machines -n openshift-machine-api -o wide
examplecluster-control-plane-0 Running 3h11m openshift-control-plane-0 baremetalhost:///openshift-machine-api/openshift-control-plane-0/da1ebe11-3ff2-41c5-b099-0aa41222964e externally provisioned examplecluster-control-plane-1 Running 3h11m openshift-control-plane-1 baremetalhost:///openshift-machine-api/openshift-control-plane-1/d9f9acbc-329c-475e-8d81-03b20280a3e1 externally provisioned examplecluster-compute-0 Running 165m openshift-compute-0 baremetalhost:///openshift-machine-api/openshift-compute-0/3d685b81-7410-4bb3-80ec-13a31858241f provisioned examplecluster-compute-1 Running 165m openshift-compute-1 baremetalhost:///openshift-machine-api/openshift-compute-1/0fdae6eb-2066-4241-91dc-e7ea72ab13b9 provisioned
NAME                              PHASE     TYPE   REGION   ZONE   AGE     NODE                                 PROVIDERID                                                                                       STATE
examplecluster-control-plane-0    Running                          3h11m   openshift-control-plane-0   baremetalhost:///openshift-machine-api/openshift-control-plane-0/da1ebe11-3ff2-41c5-b099-0aa41222964e   externally provisioned
examplecluster-control-plane-1    Running                          3h11m   openshift-control-plane-1   baremetalhost:///openshift-machine-api/openshift-control-plane-1/d9f9acbc-329c-475e-8d81-03b20280a3e1   externally provisioned
examplecluster-compute-0          Running                          165m    openshift-compute-0         baremetalhost:///openshift-machine-api/openshift-compute-0/3d685b81-7410-4bb3-80ec-13a31858241f         provisioned
examplecluster-compute-1          Running                          165m    openshift-compute-1         baremetalhost:///openshift-machine-api/openshift-compute-1/0fdae6eb-2066-4241-91dc-e7ea72ab13b9         provisioned
$ oc get nodes

NAME                     STATUS ROLES   AGE   VERSION
openshift-control-plane-0 Ready master 3h24m v1.32.3
openshift-control-plane-1 Ready master 3h24m v1.32.3
openshift-compute-0       Ready worker 176m v1.32.3
openshift-compute-1       Ready worker 176m v1.32.3
apiVersion: v1 kind: Secret metadata: name: openshift-control-plane-2-bmc-secret namespace: openshift-machine-api data: password: <password> username: <username> type: Opaque --- apiVersion: metal3.io/v1alpha1 kind: BareMetalHost metadata: name: openshift-control-plane-2 namespace: openshift-machine-api spec: automatedCleaningMode: disabled bmc: address: redfish://10.46.61.18:443/redfish/v1/Systems/1 credentialsName: openshift-control-plane-2-bmc-secret disableCertificateVerification: true bootMACAddress: 48:df:37:b0:8a:a0 bootMode: UEFI externallyProvisioned: false online: true rootDeviceHints: deviceName: /dev/disk/by-id/scsi-<serial_number> userData: name: master-user-data-managed namespace: openshift-machine-api EOF
$ cat <<EOF | oc apply -f -
apiVersion: v1
kind: Secret
metadata:
  name: openshift-control-plane-2-bmc-secret
  namespace: openshift-machine-api
data:
  password: <password>
  username: <username>
type: Opaque
---
apiVersion: metal3.io/v1alpha1
kind: BareMetalHost
metadata:
  name: openshift-control-plane-2
  namespace: openshift-machine-api
spec:
  automatedCleaningMode: disabled
  bmc:
    address: redfish://10.46.61.18:443/redfish/v1/Systems/1
    credentialsName: openshift-control-plane-2-bmc-secret
    disableCertificateVerification: true
  bootMACAddress: 48:df:37:b0:8a:a0
  bootMode: UEFI
  externallyProvisioned: false
  online: true
  rootDeviceHints:
    deviceName: /dev/disk/by-id/scsi-<serial_number>
  userData:
    name: master-user-data-managed
    namespace: openshift-machine-api
EOF
$ oc get bmh -n openshift-machine-api

NAME                      STATE                  CONSUMER                      ONLINE ERROR   AGE
openshift-control-plane-0 externally provisioned examplecluster-control-plane-0 true         4h48m
openshift-control-plane-1 externally provisioned examplecluster-control-plane-1 true         4h48m
openshift-control-plane-2 available              examplecluster-control-plane-3 true         47m
openshift-compute-0       provisioned            examplecluster-compute-0       true         4h48m
openshift-compute-1       provisioned            examplecluster-compute-1       true         4h48m
$ oc get machines -n openshift-machine-api -o wide
examplecluster-control-plane-0 Running 3h11m openshift-control-plane-0 baremetalhost:///openshift-machine-api/openshift-control-plane-0/da1ebe11-3ff2-41c5-b099-0aa41222964e externally provisioned examplecluster-control-plane-1 Running 3h11m openshift-control-plane-1 baremetalhost:///openshift-machine-api/openshift-control-plane-1/d9f9acbc-329c-475e-8d81-03b20280a3e1 externally provisioned examplecluster-control-plane-2 Running 3h11m openshift-control-plane-2 baremetalhost:///openshift-machine-api/openshift-control-plane-2/3354bdac-61d8-410f-be5b-6a395b056135 externally provisioned examplecluster-compute-0 Running 165m openshift-compute-0 baremetalhost:///openshift-machine-api/openshift-compute-0/3d685b81-7410-4bb3-80ec-13a31858241f provisioned examplecluster-compute-1 Running 165m openshift-compute-1 baremetalhost:///openshift-machine-api/openshift-compute-1/0fdae6eb-2066-4241-91dc-e7ea72ab13b9 provisioned
NAME                                   PHASE     TYPE   REGION   ZONE   AGE     NODE                              PROVIDERID                                                                                            STATE
examplecluster-control-plane-0         Running                          3h11m   openshift-control-plane-0   baremetalhost:///openshift-machine-api/openshift-control-plane-0/da1ebe11-3ff2-41c5-b099-0aa41222964e   externally provisioned 
1

examplecluster-control-plane-1         Running                          3h11m   openshift-control-plane-1   baremetalhost:///openshift-machine-api/openshift-control-plane-1/d9f9acbc-329c-475e-8d81-03b20280a3e1   externally provisioned
examplecluster-control-plane-2         Running                          3h11m   openshift-control-plane-2   baremetalhost:///openshift-machine-api/openshift-control-plane-2/3354bdac-61d8-410f-be5b-6a395b056135   externally provisioned
examplecluster-compute-0               Running                          165m    openshift-compute-0         baremetalhost:///openshift-machine-api/openshift-compute-0/3d685b81-7410-4bb3-80ec-13a31858241f         provisioned
examplecluster-compute-1               Running                          165m    openshift-compute-1         baremetalhost:///openshift-machine-api/openshift-compute-1/0fdae6eb-2066-4241-91dc-e7ea72ab13b9         provisioned
$ oc get bmh -n openshift-machine-api
$ oc get bmh -n openshift-machine-api
NAME                      STATE                  CONSUMER                       ONLINE ERROR AGE
openshift-control-plane-0 externally provisioned examplecluster-control-plane-0 true         4h48m
openshift-control-plane-1 externally provisioned examplecluster-control-plane-1 true         4h48m
openshift-control-plane-2 provisioned            examplecluster-control-plane-3 true          47m
openshift-compute-0       provisioned            examplecluster-compute-0       true         4h48m
openshift-compute-1       provisioned            examplecluster-compute-1       true         4h48m
$ oc get nodes
$ oc get nodes
NAME                     STATUS ROLES   AGE   VERSION
openshift-control-plane-0 Ready master 4h26m v1.32.3
openshift-control-plane-1 Ready master 4h26m v1.32.3
openshift-control-plane-2 Ready master 12m   v1.32.3
openshift-compute-0       Ready worker 3h58m v1.32.3
openshift-compute-1       Ready worker 3h58m v1.32.3
$ oc patch etcd/cluster --type=merge -p '{"spec": {"unsupportedConfigOverrides": null}}'
$ oc get etcd/cluster -oyaml
EtcdCertSignerControllerDegraded: [Operation cannot be fulfilled on secrets "etcd-peer-sno-0": the object has been modified; please apply your changes to the latest version and try again, Operation cannot be fulfilled on secrets "etcd-serving-sno-0": the object has been modified; please apply your changes to the latest version and try again, Operation cannot be fulfilled on secrets "etcd-serving-metrics-sno-0": the object has been modified; please apply your changes to the latest version and try again]
$ oc -n openshift-etcd get pods -l k8s-app=etcd
etcd-openshift-control-plane-1 5/5 Running 0 107m etcd-openshift-control-plane-2 5/5 Running 0 103m
etcd-openshift-control-plane-0      5/5     Running     0     105m
etcd-openshift-control-plane-1      5/5     Running     0     107m
etcd-openshift-control-plane-2      5/5     Running     0     103m
$ oc patch etcd cluster -p='{"spec": {"forceRedeploymentReason": "recovery-'"$( date --rfc-3339=ns )"'"}}' --type=merge 
1
$ oc rsh -n openshift-etcd etcd-openshift-control-plane-0
sh-4.2# etcdctl member list -w table
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER | +------------------+---------+--------------------+---------------------------+---------------------------+-----------------+ | 7a8197040a5126c8 | started | openshift-control-plane-2 | https://192.168.10.11:2380 | https://192.168.10.11:2379 | false | | 8d5abe9669a39192 | started | openshift-control-plane-1 | https://192.168.10.10:2380 | https://192.168.10.10:2379 | false | | cc3830a72fc357f9 | started | openshift-control-plane-0 | https://192.168.10.9:2380 | https://192.168.10.9:2379 | false | +------------------+---------+--------------------+---------------------------+---------------------------+-----------------+
+------------------+---------+--------------------+---------------------------+---------------------------+-----------------+
|        ID        | STATUS  |        NAME        |        PEER ADDRS         |       CLIENT ADDRS        |    IS LEARNER    |
+------------------+---------+--------------------+---------------------------+---------------------------+-----------------+
| 7a8197040a5126c8 | started | openshift-control-plane-2 | https://192.168.10.11:2380 | https://192.168.10.11:2379 |   false |
| 8d5abe9669a39192 | started | openshift-control-plane-1 | https://192.168.10.10:2380 | https://192.168.10.10:2379 |   false |
| cc3830a72fc357f9 | started | openshift-control-plane-0 | https://192.168.10.9:2380 | https://192.168.10.9:2379 |     false |
+------------------+---------+--------------------+---------------------------+---------------------------+-----------------+
# etcdctl endpoint health --cluster
https://192.168.10.9:2379 is healthy: successfully committed proposal: took = 11.559829ms https://192.168.10.11:2379 is healthy: successfully committed proposal: took = 11.665203ms
https://192.168.10.10:2379 is healthy: successfully committed proposal: took = 8.973065ms
https://192.168.10.9:2379 is healthy: successfully committed proposal: took = 11.559829ms
https://192.168.10.11:2379 is healthy: successfully committed proposal: took = 11.665203ms
$ oc get etcd -o=jsonpath='{range.items[0].status.conditions[?(@.type=="NodeInstallerProgressing")]}{.reason}{"\n"}{.message}{"\n"}'
AllNodesAtLatestRevision
$ sudo /usr/local/bin/disable-etcd.sh
$ sudo rm -rf /var/lib/etcd
$ sudo systemctl disable kubelet.service
$ oc get nodes
$ sudo systemctl enable kubelet.service
$ oc get nodes
$ oc get pods -n openshift-etcd
$ oc get pods -n openshift-etcd -l app=etcd --field-selector="status.phase==Running"
$ oc exec -n openshift-etcd <etcd-pod> -c etcdctl -- etcdctl endpoint status -w table
$ oc get nodes -o jsonpath='{range .items[*]}[{.metadata.name},{.status.addresses[?(@.type=="InternalIP")].address}]{end}'
$ sudo -E /usr/local/bin/quorum-restore.sh
$ oc get machines -n openshift-machine-api -o wide
clustername-8qw5l-master-0 Running m4.xlarge us-east-1 us-east-1a 3h37m ip-10-0-131-183.ec2.internal aws:///us-east-1a/i-0ec2782f8287dfb7e stopped clustername-8qw5l-master-1 Running m4.xlarge us-east-1 us-east-1b 3h37m ip-10-0-143-125.ec2.internal aws:///us-east-1b/i-096c349b700a19631 running clustername-8qw5l-master-2 Running m4.xlarge us-east-1 us-east-1c 3h37m ip-10-0-154-194.ec2.internal aws:///us-east-1c/i-02626f1dba9ed5bba running clustername-8qw5l-worker-us-east-1a-wbtgd Running m4.large us-east-1 us-east-1a 3h28m ip-10-0-129-226.ec2.internal aws:///us-east-1a/i-010ef6279b4662ced running clustername-8qw5l-worker-us-east-1b-lrdxb Running m4.large us-east-1 us-east-1b 3h28m ip-10-0-144-248.ec2.internal aws:///us-east-1b/i-0cb45ac45a166173b running clustername-8qw5l-worker-us-east-1c-pkg26 Running m4.large us-east-1 us-east-1c 3h28m ip-10-0-170-181.ec2.internal aws:///us-east-1c/i-06861c00007751b0a running
NAME                                        PHASE     TYPE        REGION      ZONE         AGE     NODE                           PROVIDERID                              STATE
clustername-8qw5l-master-0                  Running   m4.xlarge   us-east-1   us-east-1a   3h37m   ip-10-0-131-183.ec2.internal   aws:///us-east-1a/i-0ec2782f8287dfb7e   stopped 
1

clustername-8qw5l-master-1                  Running   m4.xlarge   us-east-1   us-east-1b   3h37m   ip-10-0-143-125.ec2.internal   aws:///us-east-1b/i-096c349b700a19631   running
clustername-8qw5l-master-2                  Running   m4.xlarge   us-east-1   us-east-1c   3h37m   ip-10-0-154-194.ec2.internal    aws:///us-east-1c/i-02626f1dba9ed5bba  running
clustername-8qw5l-worker-us-east-1a-wbtgd   Running   m4.large    us-east-1   us-east-1a   3h28m   ip-10-0-129-226.ec2.internal   aws:///us-east-1a/i-010ef6279b4662ced   running
clustername-8qw5l-worker-us-east-1b-lrdxb   Running   m4.large    us-east-1   us-east-1b   3h28m   ip-10-0-144-248.ec2.internal   aws:///us-east-1b/i-0cb45ac45a166173b   running
clustername-8qw5l-worker-us-east-1c-pkg26   Running   m4.large    us-east-1   us-east-1c   3h28m   ip-10-0-170-181.ec2.internal   aws:///us-east-1c/i-06861c00007751b0a   running
$ oc delete machine -n openshift-machine-api clustername-8qw5l-master-0 
1
$ oc get machines -n openshift-machine-api -o wide
clustername-8qw5l-master-1 Running m4.xlarge us-east-1 us-east-1b 3h37m ip-10-0-143-125.ec2.internal aws:///us-east-1b/i-096c349b700a19631 running clustername-8qw5l-master-2 Running m4.xlarge us-east-1 us-east-1c 3h37m ip-10-0-154-194.ec2.internal aws:///us-east-1c/i-02626f1dba9ed5bba running clustername-8qw5l-master-3 Provisioning m4.xlarge us-east-1 us-east-1a 85s ip-10-0-173-171.ec2.internal aws:///us-east-1a/i-015b0888fe17bc2c8 running clustername-8qw5l-worker-us-east-1a-wbtgd Running m4.large us-east-1 us-east-1a 3h28m ip-10-0-129-226.ec2.internal aws:///us-east-1a/i-010ef6279b4662ced running clustername-8qw5l-worker-us-east-1b-lrdxb Running m4.large us-east-1 us-east-1b 3h28m ip-10-0-144-248.ec2.internal aws:///us-east-1b/i-0cb45ac45a166173b running clustername-8qw5l-worker-us-east-1c-pkg26 Running m4.large us-east-1 us-east-1c 3h28m ip-10-0-170-181.ec2.internal aws:///us-east-1c/i-06861c00007751b0a running
NAME                                        PHASE          TYPE        REGION      ZONE         AGE     NODE                           PROVIDERID                              STATE
clustername-8qw5l-master-1                  Running        m4.xlarge   us-east-1   us-east-1b   3h37m   ip-10-0-143-125.ec2.internal   aws:///us-east-1b/i-096c349b700a19631   running
clustername-8qw5l-master-2                  Running        m4.xlarge   us-east-1   us-east-1c   3h37m   ip-10-0-154-194.ec2.internal    aws:///us-east-1c/i-02626f1dba9ed5bba  running
clustername-8qw5l-master-3                  Provisioning   m4.xlarge   us-east-1   us-east-1a   85s     ip-10-0-173-171.ec2.internal    aws:///us-east-1a/i-015b0888fe17bc2c8  running 
1

clustername-8qw5l-worker-us-east-1a-wbtgd   Running        m4.large    us-east-1   us-east-1a   3h28m   ip-10-0-129-226.ec2.internal   aws:///us-east-1a/i-010ef6279b4662ced   running
clustername-8qw5l-worker-us-east-1b-lrdxb   Running        m4.large    us-east-1   us-east-1b   3h28m   ip-10-0-144-248.ec2.internal   aws:///us-east-1b/i-0cb45ac45a166173b   running
clustername-8qw5l-worker-us-east-1c-pkg26   Running        m4.large    us-east-1   us-east-1c   3h28m   ip-10-0-170-181.ec2.internal   aws:///us-east-1c/i-06861c00007751b0a   running
$ oc adm wait-for-stable-cluster
$ oc patch etcd cluster -p='{"spec": {"forceRedeploymentReason": "recovery-'"$(date --rfc-3339=ns )"'"}}' --type=merge
$ cp <etcd_backup_directory> /home/core
$ sudo -E /usr/local/bin/cluster-restore.sh /home/core/<etcd_backup_directory>
$ oc adm wait-for-stable-cluster
$ sudo -E /usr/local/bin/disable-etcd.sh
$ sudo -E /usr/local/bin/cluster-restore.sh /home/core/<etcd-backup-directory>
$ oc patch etcd/cluster --type=merge -p '{"spec": {"unsupportedConfigOverrides": {"useUnsupportedUnsafeNonHANonProductionUnstableEtcd": true}}}'
$ oc adm wait-for-stable-cluster
$ oc patch etcd/cluster --type=merge -p '{"spec": {"unsupportedConfigOverrides": null}}'
$ oc patch etcd cluster -p='{"spec": {"forceRedeploymentReason": "recovery-'"$(date --rfc-3339=ns )"'"}}' --type=merge
mv /etc/kubernetes/manifests/kube-apiserver-pod.yaml /root/manifests-backup/
$ mkdir -p /root/manifests-backup
$ mv /etc/kubernetes/manifests/kube-apiserver-pod.yaml /root/manifests-backup/
$ crictl ps | grep kube-apiserver | grep -E -v "operator|guard"
$ crictl stop <container_id>
$ mv /etc/kubernetes/manifests/kube-controller-manager-pod.yaml /root/manifests-backup/
$ crictl ps | grep kube-controller-manager | grep -E -v "operator|guard"
$ mv /etc/kubernetes/manifests/kube-scheduler-pod.yaml /root/manifests-backup/
$ crictl ps | grep kube-scheduler | grep -E -v "operator|guard"
$ mv /etc/kubernetes/manifests/etcd-pod.yaml /root/manifests-backup/
$ crictl ps | grep etcd | grep -E -v "operator|guard"
mv /var/lib/etcd/member /home/core/assets/old-member-data
$ mkdir /home/core/assets/old-member-data
$ mv /var/lib/etcd/member /home/core/assets/old-member-data
cat $RESTORE_ETCD_POD_YAML | \ grep -A 1 $(cat $RESTORE_ETCD_POD_YAML | grep 'export ETCD_NAME' | grep -Eo 'NODE_.+_ETCD_NAME') | \ grep -Po '(?<=value: ").+(?=")'
RESTORE_ETCD_POD_YAML="/etc/kubernetes/static-pod-resources/etcd-certs/configmaps/restore-etcd-pod/pod.yaml"
cat $RESTORE_ETCD_POD_YAML | \
  grep -A 1 $(cat $RESTORE_ETCD_POD_YAML | grep 'export ETCD_NAME' | grep -Eo 'NODE_.+_ETCD_NAME') | \
  grep -Po '(?<=value: ").+(?=")'
$ uuidgen
https://<IP_CURRENT_HOST>:2380
sed -E 's/[.-]/_/g' | \ xargs -I {} grep {} /etc/kubernetes/static-pod-resources/etcd-certs/configmaps/etcd-scripts/etcd.env | \ grep "IP" | grep -Po '(?<=").+(?=")'
$ echo <ETCD_NAME> | \
  sed -E 's/[.-]/_/g' | \
  xargs -I {} grep {} /etc/kubernetes/static-pod-resources/etcd-certs/configmaps/etcd-scripts/etcd.env | \
  grep "IP" | grep -Po '(?<=").+(?=")'
<ETCD_NAME_0>=<ETCD_NODE_PEER_URL_0>,<ETCD_NAME_1>=<ETCD_NODE_PEER_URL_1>,<ETCD_NAME_2>=<ETCD_NODE_PEER_URL_2> 
1
$ cp /home/core/assets/backup/<snapshot_yyyy-mm-dd_hhmmss>.db /var/lib/etcd
$ jq -r '.spec.containers[]|select(.name=="etcdctl")|.image' /root/manifests-backup/etcd-pod.yaml
$ podman run --rm -it --entrypoint="/bin/bash" -v /var/lib/etcd:/var/lib/etcd:z <image-hash>
$ etcdctl version
--name "<ETCD_NAME>" \ --initial-cluster="<ETCD_INITIAL_CLUSTER>" \ --initial-cluster-token "openshift-etcd-<UUID>" \ --initial-advertise-peer-urls "<ETCD_NODE_PEER_URL>" \ --data-dir="/var/lib/etcd/restore-<UUID>" \ --skip-hash-check=true
$ ETCDCTL_API=3 /usr/bin/etcdctl snapshot restore /var/lib/etcd/<snapshot_yyyy-mm-dd_hhmmss>.db \
  --name "<ETCD_NAME>" \
  --initial-cluster="<ETCD_INITIAL_CLUSTER>" \
  --initial-cluster-token "openshift-etcd-<UUID>" \
  --initial-advertise-peer-urls "<ETCD_NODE_PEER_URL>" \
  --data-dir="/var/lib/etcd/restore-<UUID>" \
  --skip-hash-check=true
2022-06-28T19:52:43Z info membership/cluster.go:421 added member {"cluster-id": "c5996b7c11c30d6b", "local-member-id": "0", "added-peer-id": "1f63d01b31bb9a9e", "added-peer-peer-urls": ["https://10.0.90.221:2380"], "added-peer-is-learner": false} 2022-06-28T19:52:43Z info membership/cluster.go:421 added member {"cluster-id": "c5996b7c11c30d6b", "local-member-id": "0", "added-peer-id": "fdc2725b3b70127c", "added-peer-peer-urls": ["https://10.0.94.214:2380"], "added-peer-is-learner": false}
2022-06-28T19:52:43Z    info    membership/cluster.go:421   added member    {"cluster-id": "c5996b7c11c30d6b", "local-member-id": "0", "added-peer-id": "56cd73b614699e7", "added-peer-peer-urls": ["https://10.0.91.5:2380"], "added-peer-is-learner": false}
2022-06-28T19:52:43Z    info    membership/cluster.go:421   added member    {"cluster-id": "c5996b7c11c30d6b", "local-member-id": "0", "added-peer-id": "1f63d01b31bb9a9e", "added-peer-peer-urls": ["https://10.0.90.221:2380"], "added-peer-is-learner": false}
2022-06-28T19:52:43Z    info    membership/cluster.go:421   added member    {"cluster-id": "c5996b7c11c30d6b", "local-member-id": "0", "added-peer-id": "fdc2725b3b70127c", "added-peer-peer-urls": ["https://10.0.94.214:2380"], "added-peer-is-learner": false}
$ mv /var/lib/etcd/restore-<UUID>/member /var/lib/etcd
$ restorecon -vR /var/lib/etcd/
$ rm -rf /var/lib/etcd/restore-<UUID>
$ rm /var/lib/etcd/<snapshot_yyyy-mm-dd_hhmmss>.db
$ mv /tmp/etcd-pod.yaml /etc/kubernetes/manifests
$ crictl ps | grep etcd | grep -v operator
e1646b15207c6 9d28c15860870e85c91d0e36b45f7a6edd3da757b113ec4abb4507df88b17f06 About a minute ago Running etcd-metrics 0 fe4b9c3d6483c 08ba29b1f58a7 9d28c15860870e85c91d0e36b45f7a6edd3da757b113ec4abb4507df88b17f06 About a minute ago Running etcd 0 fe4b9c3d6483c 2ddc9eda16f53 9d28c15860870e85c91d0e36b45f7a6edd3da757b113ec4abb4507df88b17f06 About a minute ago Running etcdctl
38c814767ad983       f79db5a8799fd2c08960ad9ee22f784b9fbe23babe008e8a3bf68323f004c840                                                         28 seconds ago       Running             etcd-health-monitor                   2                   fe4b9c3d6483c
e1646b15207c6       9d28c15860870e85c91d0e36b45f7a6edd3da757b113ec4abb4507df88b17f06                                                         About a minute ago   Running             etcd-metrics                          0                   fe4b9c3d6483c
08ba29b1f58a7       9d28c15860870e85c91d0e36b45f7a6edd3da757b113ec4abb4507df88b17f06                                                         About a minute ago   Running             etcd                                  0                   fe4b9c3d6483c
2ddc9eda16f53       9d28c15860870e85c91d0e36b45f7a6edd3da757b113ec4abb4507df88b17f06                                                         About a minute ago   Running             etcdctl
$ crictl exec -it $(crictl ps | grep etcdctl | awk '{print $1}') etcdctl endpoint status -w table
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS | +--------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+ | https://10.0.89.133:2379 | 682e4a83a0cec6c0 | 3.5.0 | 67 MB | true | false | 2 | 218 | 218 | | | https://10.0.92.74:2379 | 450bcf6999538512 | 3.5.0 | 67 MB | false | false | 2 | 218 | 218 | | | https://10.0.93.129:2379 | 358efa9c1d91c3d6 | 3.5.0 | 67 MB | false | false | 2 | 218 | 218 | | +--------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+--------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
|         ENDPOINT         |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+--------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://10.0.89.133:2379 | 682e4a83a0cec6c0 |   3.5.0 |   67 MB |      true |      false |         2 |        218 |                218 |        |
|  https://10.0.92.74:2379 | 450bcf6999538512 |   3.5.0 |   67 MB |     false |      false |         2 |        218 |                218 |        |
| https://10.0.93.129:2379 | 358efa9c1d91c3d6 |   3.5.0 |   67 MB |     false |      false |         2 |        218 |                218 |        |
+--------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
$ mv /root/manifests-backup/kube-apiserver-pod.yaml /etc/kubernetes/manifests
$ crictl ps | grep kube-apiserver | grep -v operator
$ systemctl restart kubelet
$ mv /root/manifests-backup/kube-* /etc/kubernetes/manifests/
$ crictl ps | grep -E 'kube-(apiserver|scheduler|controller-manager)' | grep -v -E 'operator|guard'
do oc debug node/${NODE} -- chroot /host /bin/bash -c 'rm -f /var/lib/ovn-ic/etc/ovn*.db && systemctl restart ovs-vswitchd ovsdb-server' oc -n openshift-ovn-kubernetes delete pod -l app=ovnkube-node --field-selector=spec.nodeName=${NODE} --wait oc -n openshift-ovn-kubernetes wait pod -l app=ovnkube-node --field-selector=spec.nodeName=${NODE} --for condition=ContainersReady --timeout=600s done
for NODE in  $(oc get node -o name | sed 's:node/::g')
do
  oc debug node/${NODE} -- chroot /host /bin/bash -c  'rm -f /var/lib/ovn-ic/etc/ovn*.db && systemctl restart ovs-vswitchd ovsdb-server'
  oc -n openshift-ovn-kubernetes delete pod -l app=ovnkube-node --field-selector=spec.nodeName=${NODE} --wait
  oc -n openshift-ovn-kubernetes wait pod -l app=ovnkube-node --field-selector=spec.nodeName=${NODE} --for condition=ContainersReady --timeout=600s
done
$ oc get csr
csr-2s94x 8m3s kubernetes.io/kubelet-serving system:node:<node_name> Pending csr-4bd6t 8m3s kubernetes.io/kubelet-serving system:node:<node_name> Pending csr-4hl85 13m kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending csr-zhhhp 3m8s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending ...
NAME        AGE    SIGNERNAME                                    REQUESTOR                                                                   CONDITION
csr-2s94x   8m3s   kubernetes.io/kubelet-serving                 system:node:<node_name>                                                     Pending 
1

csr-4bd6t   8m3s   kubernetes.io/kubelet-serving                 system:node:<node_name>                                                     Pending
csr-4hl85   13m    kubernetes.io/kube-apiserver-client-kubelet   system:serviceaccount:openshift-machine-config-operator:node-bootstrapper   Pending 
2

csr-zhhhp   3m8s   kubernetes.io/kube-apiserver-client-kubelet   system:serviceaccount:openshift-machine-config-operator:node-bootstrapper   Pending
...
$ oc describe csr <csr_name> 
1
$ oc adm certificate approve <csr_name>
$ oc adm certificate approve <csr_name>