From 9b65ff6d30097c3ca8658a998c8c915ec202c8c6 Mon Sep 17 00:00:00 2001 From: Khaja Omer Date: Tue, 10 Sep 2024 15:09:55 -0500 Subject: [PATCH] Update e2e test --- .../assert-csi-driver-resources.yaml | 0 .../assert-pvc-pod.yaml | 6 +- .../chainsaw-test.yaml | 279 ++++++++++++++++++ .../create-storage-class-statefulset.yaml | 72 +++++ .../pod-pvc-luks-movVolume/chainsaw-test.yaml | 123 -------- .../create-pvc-pod.yaml | 64 ---- 6 files changed, 354 insertions(+), 190 deletions(-) rename tests/e2e/test/{pod-pvc-luks-movVolume => pod-pvc-luks-mov-volume}/assert-csi-driver-resources.yaml (100%) rename tests/e2e/test/{pod-pvc-luks-movVolume => pod-pvc-luks-mov-volume}/assert-pvc-pod.yaml (74%) create mode 100644 tests/e2e/test/pod-pvc-luks-mov-volume/chainsaw-test.yaml create mode 100644 tests/e2e/test/pod-pvc-luks-mov-volume/create-storage-class-statefulset.yaml delete mode 100644 tests/e2e/test/pod-pvc-luks-movVolume/chainsaw-test.yaml delete mode 100644 tests/e2e/test/pod-pvc-luks-movVolume/create-pvc-pod.yaml diff --git a/tests/e2e/test/pod-pvc-luks-movVolume/assert-csi-driver-resources.yaml b/tests/e2e/test/pod-pvc-luks-mov-volume/assert-csi-driver-resources.yaml similarity index 100% rename from tests/e2e/test/pod-pvc-luks-movVolume/assert-csi-driver-resources.yaml rename to tests/e2e/test/pod-pvc-luks-mov-volume/assert-csi-driver-resources.yaml diff --git a/tests/e2e/test/pod-pvc-luks-movVolume/assert-pvc-pod.yaml b/tests/e2e/test/pod-pvc-luks-mov-volume/assert-pvc-pod.yaml similarity index 74% rename from tests/e2e/test/pod-pvc-luks-movVolume/assert-pvc-pod.yaml rename to tests/e2e/test/pod-pvc-luks-mov-volume/assert-pvc-pod.yaml index 49fa3c81..6dc02a60 100644 --- a/tests/e2e/test/pod-pvc-luks-movVolume/assert-pvc-pod.yaml +++ b/tests/e2e/test/pod-pvc-luks-mov-volume/assert-pvc-pod.yaml @@ -1,10 +1,10 @@ apiVersion: v1 kind: Pod metadata: - name: e2e-pod + name: test-statefulset-0 status: containerStatuses: - - name: e2e-pod + - name: example ready: true started: true phase: Running @@ -12,7 +12,7 @@ status: apiVersion: v1 kind: PersistentVolumeClaim metadata: - name: pvc-filesystem-luks + name: data-test-statefulset-0 status: capacity: storage: 10Gi diff --git a/tests/e2e/test/pod-pvc-luks-mov-volume/chainsaw-test.yaml b/tests/e2e/test/pod-pvc-luks-mov-volume/chainsaw-test.yaml new file mode 100644 index 00000000..8c445dd7 --- /dev/null +++ b/tests/e2e/test/pod-pvc-luks-mov-volume/chainsaw-test.yaml @@ -0,0 +1,279 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json + +# This test confirms that moving a LUKS volume around different nodes will not cause any issues. +# The test involves creating a StatefulSet, checking if the Pod is first scheduled on the control plane node, and verifying that the Volume is mounted. +# Then, the Pod is moved to a worker node, and it checks that the Volume remains mounted and the Pod is still ready. +# After that, the Volume is moved back to the control plane node, and it verifies again that the Pod is ready, the Volume is mounted, and the Volume remains a LUKS volume. +# Finally, it cleans up the StatefulSet, detaches the Volume from the node and Linode, deletes the PVC, and verifies that the Volume has been deleted. +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + creationTimestamp: null + name: pod-pvc-luks-mov-volume + labels: + all: +spec: + bindings: + - name: lukskey + value: (env('LUKS_KEY')) + - name: nodes + # number of nodes in cluster + value: ((env('WORKER_NODES') | to_number(@)) + (env('CONTROLPLANE_NODES') | to_number(@))) + steps: + - name: Check if CSI Driver is deployed + try: + - assert: + file: assert-csi-driver-resources.yaml + - name: Create storage class and statefulset (Schedule on control plane node) + try: + - apply: + file: create-storage-class-statefulset.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: PersistentVolumeClaim + - name: Check if Pod is ready and Volume is mounted (Schedule on control plane node) + try: + - assert: + file: assert-pvc-pod.yaml + catch: + - describe: + apiVersion: v1 + kind: PersistentVolumeClaim + - describe: + apiVersion: v1 + kind: Pod + - name: Check if the pod is scheduled on control plane node + try: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl get pod -n $NAMESPACE -l app=example -o jsonpath='{.items[0].spec.nodeName}' | grep -q 'control-plane' && echo "true" || echo "false" + check: + ($error): ~ + (contains($stdout, 'true')): true + - name: Check if volume is created + try: + - script: + env: + - name: TARGET_API + value: api.linode.com + - name: TARGET_API_VERSION + value: v4beta + - name: URI + value: volumes + - name: FILTER + value: (to_string({"tags":($namespace)})) + content: | + set -e + curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "X-Filter: $FILTER" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4beta/volumes" + check: + ($error): ~ + (json_parse($stdout)): + results: 1 + - name: Create a file inside the pod and check it was created (on control plane node) + try: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl exec -n $NAMESPACE test-statefulset-0 -- sh -c "cd data && touch testfile" && \ + kubectl exec -n $NAMESPACE test-statefulset-0 -- sh -c "ls data" + check: + ($error): ~ + (contains($stdout, 'testfile')): true + - name: Check if the volume is a LUKS volume (on control plane node) + try: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl exec -n $NAMESPACE test-statefulset-0 -- sh -c "blkid | grep crypto_LUKS" + check: + ($error): ~ + (contains($stdout, 'TYPE="crypto_LUKS"')): true + + # This section updates the StatefulSet to schedule the Pod on a worker node and verifies that the pod is ready, + # the volume is mounted, and that the volume remains a LUKS volume after the move. + - name: Schedule the Pod on a worker node + try: + - patch: + resource: + apiVersion: v1 + kind: StatefulSet + metadata: + name: test-statefulset + spec: + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: DoesNotExist + - name: Check if Pod is ready and Volume is mounted (On worker node) + try: + - assert: + file: assert-pvc-pod.yaml + catch: + - describe: + apiVersion: v1 + kind: PersistentVolumeClaim + - describe: + apiVersion: v1 + kind: Pod + - name: Check if the pod is scheduled on worker node + try: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl get pod -n $NAMESPACE -l app=example -o jsonpath='{.items[0].spec.nodeName}' | grep -q 'control-plane' && echo "true" || echo "false" + check: + ($error): ~ + (contains($stdout, 'false')): true + - name: Check file still exists (worker node) + try: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl exec -n $NAMESPACE test-statefulset-0 -- sh -c "ls data" + check: + ($error): ~ + (contains($stdout, 'testfile')): true + - name: Check if the volume is a LUKS volume (worker node) + try: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl exec -n $NAMESPACE test-statefulset-0 -- sh -c "blkid | grep crypto_LUKS" + check: + ($error): ~ + (contains($stdout, 'TYPE="crypto_LUKS"')): true + + + # This section moves the volume back to the control plane node and verifies that the pod is ready, + # the volume is mounted, and that the volume remains a LUKS volume after the move. + - name: Move the volume back to control plane node + try: + - patch: + resource: + apiVersion: v1 + kind: StatefulSet + metadata: + name: test-statefulset + spec: + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + - name: Check if Pod is ready and Volume is mounted (after moving back to control plane node) + try: + - assert: + file: assert-pvc-pod.yaml + catch: + - describe: + apiVersion: v1 + kind: PersistentVolumeClaim + - describe: + apiVersion: v1 + kind: Pod + - name: Check if the pod is scheduled on control plane node + try: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl get pod -n $NAMESPACE -l app=example -o jsonpath='{.items[0].spec.nodeName}' | grep -q 'control-plane' && echo "true" || echo "false" + check: + ($error): ~ + (contains($stdout, 'true')): true + - name: Check file still exists (after moving back to control plane node) + try: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl exec -n $NAMESPACE test-statefulset-0 -- sh -c "ls data" + check: + ($error): ~ + (contains($stdout, 'testfile')): true + - name: Check if the volume is a LUKS volume (after moving back to control plane node) + try: + - script: + env: + - name: NAMESPACE + value: ($namespace) + content: | + kubectl exec -n $NAMESPACE test-statefulset-0 -- sh -c "blkid | grep crypto_LUKS" + check: + ($error): ~ + (contains($stdout, 'TYPE="crypto_LUKS"')): true + + # This section handles the cleanup process after the test, which includes deleting the StatefulSet, + # checking if the associated volume is detached from the node and Linode, deleting the PersistentVolumeClaim (PVC), + # and verifying that the volume has been successfully deleted. + - name: Delete the statefulset + try: + - delete: + ref: + apiVersion: v1 + kind: StatefulSet + - name: Check if the volume is detached on Node resource and in Linode (using API) + try: + - script: + env: + - name: FILTER + value: (to_string({"tags":($namespace)})) + content: | + ../check-volume-detached.sh $FILTER + check: + ($error): ~ + (contains($stdout, 'Volume was successfully detached')): true + (contains($stdout, 'Volume detached in Linode')): true + - name: Delete PVC + try: + - delete: + ref: + apiVersion: v1 + kind: PersistentVolumeClaim + - name: Check if the Volume was deleted + try: + - script: + env: + - name: FILTER + value: (to_string({"tags":($namespace)})) + content: | + ../check-volume-deleted.sh $FILTER + check: + ($error): ~ + (contains($stdout, 'Volume deleted in Linode')): true diff --git a/tests/e2e/test/pod-pvc-luks-mov-volume/create-storage-class-statefulset.yaml b/tests/e2e/test/pod-pvc-luks-mov-volume/create-storage-class-statefulset.yaml new file mode 100644 index 00000000..0e75e294 --- /dev/null +++ b/tests/e2e/test/pod-pvc-luks-mov-volume/create-storage-class-statefulset.yaml @@ -0,0 +1,72 @@ +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + name: (join('-', ['linode-block-storage-luks', $namespace])) + namespace: kube-system +provisioner: linodebs.csi.linode.com +reclaimPolicy: Delete +parameters: + linodebs.csi.linode.com/luks-encrypted: "true" + linodebs.csi.linode.com/luks-cipher: "aes-xts-plain64" + linodebs.csi.linode.com/luks-key-size: "512" + csi.storage.k8s.io/node-stage-secret-namespace: ($namespace) + csi.storage.k8s.io/node-stage-secret-name: csi-encrypt-example-luks-key + linodebs.csi.linode.com/volumeTags: (to_string($namespace)) +--- +apiVersion: v1 +kind: Secret +metadata: + name: csi-encrypt-example-luks-key +stringData: + luksKey: ($lukskey) +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-statefulset +spec: + serviceName: "example" + replicas: 1 + selector: + matchLabels: + app: example + template: + metadata: + labels: + app: example + spec: + ffinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + containers: + - name: example + image: nginx + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + - name: data + mountPath: /data + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: ["ReadWriteOnce"] + storageClassName: (join('-', ['linode-block-storage-luks', $namespace])) + resources: + requests: + storage: 10Gi + diff --git a/tests/e2e/test/pod-pvc-luks-movVolume/chainsaw-test.yaml b/tests/e2e/test/pod-pvc-luks-movVolume/chainsaw-test.yaml deleted file mode 100644 index 20265dc2..00000000 --- a/tests/e2e/test/pod-pvc-luks-movVolume/chainsaw-test.yaml +++ /dev/null @@ -1,123 +0,0 @@ -# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json -apiVersion: chainsaw.kyverno.io/v1alpha1 -kind: Test -metadata: - creationTimestamp: null - name: pod-pvc-luks-movVolume - labels: - all: -spec: - bindings: - - name: lukskey - value: (env('LUKS_KEY')) - - name: nodes - # number of nodes in cluster - value: ((env('WORKER_NODES') | to_number(@)) + (env('CONTROLPLANE_NODES') | to_number(@))) - steps: - - name: Check if CSI Driver is deployed - try: - - assert: - file: assert-csi-driver-resources.yaml - - name: Create PVC and Pod - try: - - apply: - file: create-pvc-pod.yaml - catch: - - describe: - apiVersion: v1 - kind: Pod - - describe: - apiVersion: v1 - kind: PersistentVolumeClaim - - name: Check if Pod is ready and Volume is mounted - try: - - assert: - file: assert-pvc-pod.yaml - catch: - - describe: - apiVersion: v1 - kind: PersistentVolumeClaim - - describe: - apiVersion: v1 - kind: Pod - - name: Check if volume is created - try: - - script: - env: - - name: TARGET_API - value: api.linode.com - - name: TARGET_API_VERSION - value: v4beta - - name: URI - value: volumes - - name: FILTER - value: (to_string({"tags":($namespace)})) - content: | - set -e - curl -s \ - -H "Authorization: Bearer $LINODE_TOKEN" \ - -H "X-Filter: $FILTER" \ - -H "Content-Type: application/json" \ - "https://api.linode.com/v4beta/volumes" - check: - ($error): ~ - (json_parse($stdout)): - results: 1 - - name: Create a file inside the pod and check it was created - try: - - script: - env: - - name: NAMESPACE - value: ($namespace) - content: | - kubectl exec -n $NAMESPACE e2e-pod -- sh -c "cd data && touch testfile" && \ - kubectl exec -n $NAMESPACE e2e-pod -- sh -c "ls data" - check: - ($error): ~ - (contains($stdout, 'testfile')): true - - name: Check if the volume is a LUKS volume - try: - - script: - env: - - name: NAMESPACE - value: ($namespace) - content: | - kubectl exec -n $NAMESPACE e2e-pod -- sh -c "blkid | grep crypto_LUKS" - check: - ($error): ~ - (contains($stdout, 'TYPE="crypto_LUKS"')): true - - name: Delete the Pod - try: - - delete: - ref: - apiVersion: v1 - kind: Pod - - name: Check if the volume is detached on Node resource and in Linode (using API) - try: - - script: - env: - - name: FILTER - value: (to_string({"tags":($namespace)})) - content: | - ../check-volume-detached.sh $FILTER - check: - ($error): ~ - (contains($stdout, 'Volume was successfully detached')): true - (contains($stdout, 'Volume detached in Linode')): true - - name: Delete PVC - try: - - delete: - ref: - apiVersion: v1 - kind: PersistentVolumeClaim - - name: Check if the Volume was deleted - try: - - script: - env: - - name: FILTER - value: (to_string({"tags":($namespace)})) - content: | - ../check-volume-deleted.sh $FILTER - check: - ($error): ~ - (contains($stdout, 'Volume deleted in Linode')): true diff --git a/tests/e2e/test/pod-pvc-luks-movVolume/create-pvc-pod.yaml b/tests/e2e/test/pod-pvc-luks-movVolume/create-pvc-pod.yaml deleted file mode 100644 index aba588e2..00000000 --- a/tests/e2e/test/pod-pvc-luks-movVolume/create-pvc-pod.yaml +++ /dev/null @@ -1,64 +0,0 @@ -allowVolumeExpansion: true -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - annotations: - storageclass.kubernetes.io/is-default-class: "true" - name: (join('-', ['linode-block-storage-luks', $namespace])) - namespace: kube-system -provisioner: linodebs.csi.linode.com -reclaimPolicy: Delete -parameters: - linodebs.csi.linode.com/luks-encrypted: "true" - linodebs.csi.linode.com/luks-cipher: "aes-xts-plain64" - linodebs.csi.linode.com/luks-key-size: "512" - csi.storage.k8s.io/node-stage-secret-namespace: ($namespace) - csi.storage.k8s.io/node-stage-secret-name: csi-encrypt-example-luks-key - linodebs.csi.linode.com/volumeTags: (to_string($namespace)) ---- -apiVersion: v1 -kind: Secret -metadata: - name: csi-encrypt-example-luks-key -stringData: - luksKey: ($lukskey) ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: pvc-filesystem-luks -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi - storageClassName: (join('-', ['linode-block-storage-luks', $namespace])) ---- -apiVersion: v1 -kind: Pod -metadata: - name: e2e-pod -spec: - containers: - - name: e2e-pod - image: ubuntu - command: - - sleep - - "1000000" - volumeMounts: - - mountPath: /data - name: csi-volume - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true - tolerations: - - key: "node-role.kubernetes.io/control-plane" - operator: "Exists" - effect: "NoSchedule" - volumes: - - name: csi-volume - persistentVolumeClaim: - claimName: pvc-filesystem-luks