diff --git a/charts/generic/experiments.yaml b/charts/generic/experiments.yaml index 1ca110896..3fbf101c6 100644 --- a/charts/generic/experiments.yaml +++ b/charts/generic/experiments.yaml @@ -1,39 +1,33 @@ +--- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Injects cpu consumption on pods belonging to an app deployment + Drain the node where application pod is scheduled kind: ChaosExperiment metadata: - name: pod-cpu-hog + name: node-drain labels: - name: pod-cpu-hog + name: node-drain app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Namespaced + scope: Cluster permissions: - apiGroups: - "" - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - "litmuschaos.io" + - "apps" resources: - "jobs" - "pods" - - "pods/log" - "events" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" + - "pods/log" - "pods/exec" + - "daemonsets" + - "pods/eviction" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -45,60 +39,43 @@ spec: - "update" - "delete" - "deletecollection" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + - "patch" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name pod-cpu-hog + - ./experiments -name node-drain command: - /bin/bash env: - - name: TOTAL_CHAOS_DURATION - value: '60' - - ## Number of CPU cores to stress - - name: CPU_CORES - value: '1' - - ## Percentage of total pods to target - - name: PODS_AFFECTED_PERC + + - name: TARGET_NODE value: '' - ## Period to wait before and after injection of chaos in sec - - name: RAMP_TIME + - name: NODE_LABEL value: '' - ## env var that describes the library used to execute the chaos - ## default: litmus. Supported values: litmus, pumba + - name: TOTAL_CHAOS_DURATION + value: '60' + + # Provide the LIB here + # Only litmus supported - name: LIB value: 'litmus' - ## It is used in pumba lib only - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:1.13.8' - - ## It is used in pumba lib only - - name: STRESS_IMAGE - value: 'alexeiled/stress-ng:latest-ubuntu' - - ## provide the cluster runtime - - name: CONTAINER_RUNTIME - value: 'docker' - - # provide the socket file path - - name: SOCKET_PATH - value: '/var/run/docker.sock' - - - name: TARGET_PODS + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME value: '' - - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' labels: - name: pod-cpu-hog + name: node-drain app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -107,12 +84,12 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Detaching a persistent disk from a node/instance. Supports only for AWS and GCP + Give a memory hog on a node belonging to a deployment kind: ChaosExperiment metadata: - name: disk-loss + name: node-memory-hog labels: - name: disk-loss + name: node-memory-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -127,9 +104,9 @@ spec: resources: - "jobs" - "pods" - - "events" - "pods/log" - - "secrets" + - "pods/exec" + - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -140,70 +117,84 @@ spec: - "patch" - "update" - "delete" - image: "litmuschaos/ansible-runner:1.13.8" + - "deletecollection" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/generic/disk_loss/disk_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ./experiments -name node-memory-hog command: - /bin/bash env: - - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' - name: TOTAL_CHAOS_DURATION - value: '15' + value: '120' - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME + ## Specify the size as percent of total node capacity Ex: '30' + ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_MEBIBYTES empty + - name: MEMORY_CONSUMPTION_PERCENTAGE + value: '' + + ## Specify the amount of memory to be consumed in mebibytes + ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_PERCENTAGE empty + - name: MEMORY_CONSUMPTION_MEBIBYTES value: '' - - name: APP_CHECK - value: 'true' - - # GKE and AWS supported - - name: CLOUD_PLATFORM - value: 'GKE' + - name: NUMBER_OF_WORKERS + value: '1' - - name: PROJECT_ID - value: '' + # ENTER THE COMMA SEPARATED TARGET NODES NAME + - name: TARGET_NODES + value: '' - - name: NODE_NAME - value: '' + - name: NODE_LABEL + value: '' - - name: DISK_NAME - value: '' + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' - # provide the LIB - # only litmus supported + # Provide the LIB here + # Only litmus supported - name: LIB - value: 'litmus' - - - name: ZONE_NAME - value: '' + value: 'litmus' - - name: DEVICE_NAME - value: '' - + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:1.13.8' + + ## percentage of total nodes to target + - name: NODES_AFFECTED_PERC + value: '' + + ## it defines the sequence of chaos execution for multiple target nodes + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + labels: - name: disk-loss + name: node-memory-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 - secrets: - - name: cloud-secret - mountPath: /tmp/ --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Deletes a pod belonging to a deployment/statefulset/daemonset + Pod DNS Spoof can spoof particular dns requests in target pod container to desired target hostnames kind: ChaosExperiment metadata: - name: pod-delete + name: pod-dns-spoof labels: - name: pod-delete + name: pod-dns-spoof app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -213,13 +204,12 @@ spec: permissions: - apiGroups: - "" + - "batch" - "apps" - "apps.openshift.io" - "argoproj.io" - - "batch" - "litmuschaos.io" resources: - - "deployments" - "jobs" - "pods" - "pods/log" @@ -244,127 +234,174 @@ spec: - "delete" - "deletecollection" image: "litmuschaos/go-runner:1.13.8" - imagePullPolicy: Always args: - - -c - - ./experiments -name pod-delete + - -c + - ./experiments -name pod-dns-spoof command: - - /bin/bash + - /bin/bash env: + - name: TARGET_CONTAINER + value: "" - - name: TOTAL_CHAOS_DURATION - value: '15' + # provide lib image + - name: LIB_IMAGE + value: "litmuschaos/go-runner:1.13.8" - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' + - name: TOTAL_CHAOS_DURATION + value: "60" # in seconds - - name: FORCE - value: 'true' + # Time period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: "" - - name: CHAOS_INTERVAL - value: '5' + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: "" - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' + - name: TARGET_PODS + value: "" - - name: LIB - value: 'litmus' + # provide the name of container runtime, it supports docker, containerd, crio + - name: CONTAINER_RUNTIME + value: "docker" - - name: TARGET_PODS - value: '' + # provide the socket file path + - name: SOCKET_PATH + value: "/var/run/docker.sock" + + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: "parallel" + + # map of the target hostnames eg. '{"abc.com":"spoofabc.com"}' . If empty no queries will be spoofed + - name: SPOOF_MAP + value: "" - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - labels: - name: pod-delete + experiment: pod-dns-spoof app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 ---- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Drain the node where application pod is scheduled + Injects network packet loss on pods belonging to an app deployment kind: ChaosExperiment metadata: - name: node-drain + name: pod-network-loss labels: - name: node-drain + name: pod-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Cluster + scope: Namespaced permissions: - - apiGroups: - - "" - - "batch" - - "litmuschaos.io" - - "apps" - resources: - - "jobs" - - "pods" - - "events" - - "pods/log" - - "pods/exec" - - "daemonsets" - - "pods/eviction" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" - - "patch" + - apiGroups: + - "" + - "batch" + - "apps" + - "apps.openshift.io" + - "argoproj.io" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "pods/exec" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "get" + - "list" + - "patch" + - "create" + - "update" + - "delete" + - "deletecollection" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name node-drain + - ./experiments -name pod-network-loss command: - /bin/bash env: - - name: TARGET_NODE + - name: TARGET_CONTAINER value: '' - - name: NODE_LABEL - value: '' + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:1.13.8' + + - name: NETWORK_INTERFACE + value: 'eth0' + + - name: TC_IMAGE + value: 'gaiadocker/iproute2' + + - name: NETWORK_PACKET_LOSS_PERCENTAGE + value: '100' #in PERCENTAGE - name: TOTAL_CHAOS_DURATION - value: '60' + value: '60' # in seconds - # Provide the LIB here - # Only litmus supported + # ime period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + # it can be litmus or pumba - name: LIB value: 'litmus' - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' + + - name: TARGET_PODS + value: '' + + # provide the name of container runtime + # for litmus LIB, it supports docker, containerd, crio + # for pumba LIB, it supports docker only + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the destination ips + # chaos injection will be triggered for these destination ips + - name: DESTINATION_IPS + value: '' + + # provide the destination hosts + # chaos injection will be triggered for these destination hosts + - name: DESTINATION_HOSTS value: '' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' labels: - name: node-drain + name: pod-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -373,37 +410,27 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Injects network latency on pods belonging to an app deployment + Kills the kubelet service on the application node to check the resiliency. kind: ChaosExperiment metadata: - name: pod-network-latency + name: kubelet-service-kill labels: - name: pod-network-latency + name: kubelet-service-kill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Namespaced + scope: Cluster permissions: - apiGroups: - "" - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - "litmuschaos.io" resources: - "jobs" - "pods" - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - "pods/exec" - "events" - "chaosengines" @@ -417,76 +444,45 @@ spec: - "update" - "delete" - "deletecollection" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name pod-network-latency + - ./experiments -name kubelet-service-kill command: - /bin/bash env: - - - name: TARGET_CONTAINER - value: '' - - - name: NETWORK_INTERFACE - value: 'eth0' - - # provide lib image - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:1.13.8' - - - name: TC_IMAGE - value: 'gaiadocker/iproute2' - - - name: NETWORK_LATENCY - value: '2000' #in ms - + - name: TOTAL_CHAOS_DURATION value: '60' # in seconds - # Time period to wait before and after injection of chaos in sec + # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' - # lib can be litmus or pumba - name: LIB value: 'litmus' - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' - - - name: TARGET_PODS - value: '' - - # provide the name of container runtime - # for litmus LIB, it supports docker, containerd, crio - # for pumba LIB, it supports docker only - - name: CONTAINER_RUNTIME - value: 'docker' - - # provide the destination ips - # chaos injection will be triggered for these destination ips - - name: DESTINATION_IPS + - name: NODE_LABEL value: '' - # provide the destination hosts - # chaos injection will be triggered for these destination hosts - - name: DESTINATION_HOSTS + # provide lib image + - name: LIB_IMAGE + value: 'ubuntu:16.04' + + # provide the target node name + - name: TARGET_NODE value: '' - # provide the socket file path - - name: SOCKET_PATH - value: '/var/run/docker.sock' - - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - labels: - name: pod-network-latency + name: kubelet-service-kill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -495,32 +491,33 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Deletes a pod belonging to a deployment/statefulset/daemonset + poweroff node kind: ChaosExperiment metadata: - name: k8-pod-delete + name: node-poweroff labels: - name: k8-pod-delete + name: node-poweroff app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Namespaced + scope: Cluster permissions: - apiGroups: - "" - - "apps" - "batch" - "litmuschaos.io" resources: - - "deployments" - "jobs" - "pods" - - "configmaps" + - "pods/log" + - "pods/exec" + - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" + - "secrets" verbs: - "create" - "list" @@ -528,64 +525,73 @@ spec: - "patch" - "update" - "delete" + - "deletecollection" - apiGroups: - "" resources: - "nodes" - verbs : + verbs: - "get" - "list" - image: "litmuschaos/py-runner:latest" + image: "litmuschaos/go-runner:1.13.8" + imagePullPolicy: Always args: - -c - - python /litmus/byoc/chaostest/chaostest/kubernetes/k8_wrapper.py; exit 0 + - ./experiments -name node-restart command: - /bin/bash env: - - name: CHAOSTOOLKIT_IN_POD - value: 'true' + - name: SSH_USER + value: 'root' - - name: FILE - value: 'pod-app-kill-count.json' + - name: TOTAL_CHAOS_DURATION + value: '60' - - name: NAME_SPACE + - name: REBOOT_COMMAND + value: '-o ServerAliveInterval=1 -o ServerAliveCountMax=1 "sudo systemctl poweroff --force --force" ; true' + + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME value: '' - - name: LABEL_NAME + # PROVIDE THE LIB HERE + # ONLY LITMUS SUPPORTED + - name: LIB + value: 'litmus' + + # provide lib image + - name: LIB_IMAGE + value: "litmuschaos/go-runner:1.13.8" + + # ENTER THE TARGET NODE NAME + - name: TARGET_NODE value: '' - - name: APP_ENDPOINT + - name: NODE_LABEL value: '' - - name: PERCENTAGE - value: '50' - - - name: REPORT - value: 'true' - - - name: REPORT_ENDPOINT - value: 'none' - - - name: TEST_NAMESPACE - value: 'default' - + # ENTER THE TARGET NODE IP + - name: TARGET_NODE_IP + value: '' labels: - name: k8-pod-delete + name: node-poweroff app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 + secrets: + - name: id-rsa + mountPath: /mnt/ --- apiVersion: litmuschaos.io/v1alpha1 description: - message: | - Injects network packet loss on pods belonging to an app deployment + message: "Kills a container belonging to an application pod \n" kind: ChaosExperiment metadata: - name: pod-network-loss + name: container-kill labels: - name: pod-network-loss + name: container-kill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -593,107 +599,95 @@ spec: definition: scope: Namespaced permissions: - - apiGroups: - - "" - - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "get" - - "list" - - "patch" - - "create" - - "update" - - "delete" - - "deletecollection" + - apiGroups: + - "" + - "apps" + - "batch" + - "apps.openshift.io" + - "argoproj.io" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "events" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "pods/exec" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "update" + - "patch" + - "delete" + - "deletecollection" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name pod-network-loss + - ./experiments -name container-kill command: - /bin/bash env: - + - name: TARGET_CONTAINER value: '' - # provide lib image - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:1.13.8' - - - name: NETWORK_INTERFACE - value: 'eth0' - - - name: TC_IMAGE - value: 'gaiadocker/iproute2' - - - name: NETWORK_PACKET_LOSS_PERCENTAGE - value: '100' #in PERCENTAGE - - - name: TOTAL_CHAOS_DURATION - value: '60' # in seconds - - # ime period to wait before and after injection of chaos in sec + # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' - # it can be litmus or pumba + # lib can be litmus or pumba - name: LIB value: 'litmus' - - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' - + - name: TARGET_PODS value: '' + # provide the chaos interval + - name: CHAOS_INTERVAL + value: '10' + + - name: SIGNAL + value: 'SIGKILL' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + # provide the name of container runtime # for litmus LIB, it supports docker, containerd, crio # for pumba LIB, it supports docker only - name: CONTAINER_RUNTIME value: 'docker' - # provide the destination ips - # chaos injection will be triggered for these destination ips - - name: DESTINATION_IPS - value: '' - - # provide the destination hosts - # chaos injection will be triggered for these destination hosts - - name: DESTINATION_HOSTS + # provide the total chaos duration + - name: TOTAL_CHAOS_DURATION + value: '20' + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC value: '' - # provide the socket file path - - name: SOCKET_PATH - value: '/var/run/docker.sock' + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:1.13.8' ## it defines the sequence of chaos execution for multiple target pods ## supported values: serial, parallel - name: SEQUENCE value: 'parallel' - + labels: - name: pod-network-loss + name: container-kill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -703,39 +697,32 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Injects memory consumption on pods belonging to an app deployment + Taint the node where application pod is scheduled kind: ChaosExperiment metadata: - name: pod-memory-hog + name: node-taint labels: - name: pod-memory-hog + name: node-taint app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Namespaced + scope: Cluster permissions: - apiGroups: - "" - "batch" - "apps" - - "apps.openshift.io" - - "argoproj.io" - "litmuschaos.io" resources: - "jobs" - "pods" + - "events" - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - "pods/exec" - - "events" + - "daemonsets" + - "pods/eviction" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -747,64 +734,49 @@ spec: - "update" - "delete" - "deletecollection" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + - "patch" + - "update" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name pod-memory-hog + - ./experiments -name node-taint command: - /bin/bash env: - - name: TOTAL_CHAOS_DURATION - value: '60' - - ## enter the amount of memory in megabytes to be consumed by the application pod - - name: MEMORY_CONSUMPTION - value: '500' - - ## Number of workers to perform stress - - name: NUMBER_OF_WORKERS - value: '1' - - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' - - ## Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - ## env var that describes the library used to execute the chaos - ## default: litmus. Supported values: litmus, pumba - - name: LIB - value: 'litmus' + - name: TARGET_NODE + value: '' - ## It is used in pumba lib only - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:1.13.8' + - name: NODE_LABEL + value: '' - ## It is used in pumba lib only - - name: STRESS_IMAGE - value: 'alexeiled/stress-ng:latest-ubuntu' + - name: TOTAL_CHAOS_DURATION + value: '60' - ## provide the cluster runtime - - name: CONTAINER_RUNTIME - value: 'docker' + # Provide the LIB here + # Only litmus supported + - name: LIB + value: 'litmus' - # provide the socket file path - - name: SOCKET_PATH - value: '/var/run/docker.sock' - - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' - - name: TARGET_PODS - value: '' + # set taint label & effect + # key=value:effect or key:effect + - name: TAINTS + value: '' labels: - name: pod-memory-hog + name: node-taint app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -813,29 +785,29 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Kills the docker service on the application node to check the resiliency. + Deletes a pod belonging to a deployment/statefulset/daemonset kind: ChaosExperiment metadata: - name: docker-service-kill + name: k8-pod-delete labels: - name: docker-service-kill + name: k8-pod-delete app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Cluster + scope: Namespaced permissions: - apiGroups: - "" - - "batch" - "apps" + - "batch" - "litmuschaos.io" resources: + - "deployments" - "jobs" - "pods" - - "pods/log" - - "events" + - "configmaps" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -850,55 +822,60 @@ spec: - "" resources: - "nodes" - verbs: + verbs : - "get" - "list" - image: "litmuschaos/go-runner:1.13.8" - imagePullPolicy: Always + image: "litmuschaos/py-runner:1.13.8" args: - -c - - ./experiments -name docker-service-kill + - python /litmus/byoc/chaostest/chaostest/kubernetes/k8_wrapper.py; exit 0 command: - /bin/bash env: - - - name: TOTAL_CHAOS_DURATION - value: '90' # in seconds + - name: CHAOSTOOLKIT_IN_POD + value: 'true' - # Period to wait before injection of chaos in sec - - name: RAMP_TIME - value: '' + - name: FILE + value: 'pod-app-kill-count.json' - - name: LIB - value: 'litmus' + - name: NAME_SPACE + value: '' - - name: NODE_LABEL + - name: LABEL_NAME value: '' - # provide lib image - - name: LIB_IMAGE - value: 'ubuntu:16.04' - - # provide the target node name - - name: TARGET_NODE + - name: APP_ENDPOINT value: '' + - name: PERCENTAGE + value: '50' + + - name: REPORT + value: 'true' + + - name: REPORT_ENDPOINT + value: 'none' + + - name: TEST_NAMESPACE + value: 'default' + + labels: - name: docker-service-kill + name: k8-pod-delete app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 - --- apiVersion: litmuschaos.io/v1alpha1 description: - message: "Kills a container belonging to an application pod \n" + message: | + Injects network latency on pods belonging to an app deployment kind: ChaosExperiment metadata: - name: container-kill + name: pod-network-latency labels: - name: container-kill + name: pod-network-latency app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -908,8 +885,8 @@ spec: permissions: - apiGroups: - "" - - "apps" - "batch" + - "apps" - "apps.openshift.io" - "argoproj.io" - "litmuschaos.io" @@ -917,7 +894,6 @@ spec: - "jobs" - "pods" - "pods/log" - - "events" - "replicationcontrollers" - "deployments" - "statefulsets" @@ -926,6 +902,7 @@ spec: - "deploymentconfigs" - "rollouts" - "pods/exec" + - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -933,43 +910,52 @@ spec: - "create" - "list" - "get" - - "update" - "patch" + - "update" - "delete" - "deletecollection" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name container-kill + - ./experiments -name pod-network-latency command: - /bin/bash env: - + - name: TARGET_CONTAINER value: '' - # Period to wait before and after injection of chaos in sec + - name: NETWORK_INTERFACE + value: 'eth0' + + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:1.13.8' + + - name: TC_IMAGE + value: 'gaiadocker/iproute2' + + - name: NETWORK_LATENCY + value: '2000' #in ms + + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + # Time period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' # lib can be litmus or pumba - name: LIB value: 'litmus' - - - name: TARGET_PODS - value: '' - # provide the chaos interval - - name: CHAOS_INTERVAL - value: '10' - - - name: SIGNAL - value: 'SIGKILL' + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' - # provide the socket file path - - name: SOCKET_PATH - value: '/var/run/docker.sock' + - name: TARGET_PODS + value: '' # provide the name of container runtime # for litmus LIB, it supports docker, containerd, crio @@ -977,16 +963,19 @@ spec: - name: CONTAINER_RUNTIME value: 'docker' - # provide the total chaos duration - - name: TOTAL_CHAOS_DURATION - value: '20' - - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC + # provide the destination ips + # chaos injection will be triggered for these destination ips + - name: DESTINATION_IPS value: '' - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:1.13.8' + # provide the destination hosts + # chaos injection will be triggered for these destination hosts + - name: DESTINATION_HOSTS + value: '' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' ## it defines the sequence of chaos execution for multiple target pods ## supported values: serial, parallel @@ -994,7 +983,7 @@ spec: value: 'parallel' labels: - name: container-kill + name: pod-network-latency app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -1003,12 +992,12 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Kills the kubelet service on the application node to check the resiliency. + Scale the application replicas and test the node autoscaling on cluster kind: ChaosExperiment metadata: - name: kubelet-service-kill + name: pod-autoscaler labels: - name: kubelet-service-kill + name: pod-autoscaler app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -1019,12 +1008,15 @@ spec: - apiGroups: - "" - "batch" + - "apps" - "litmuschaos.io" resources: - "jobs" - "pods" - "pods/log" - "pods/exec" + - "deployments" + - "statefulsets" - "events" - "chaosengines" - "chaosexperiments" @@ -1044,52 +1036,51 @@ spec: verbs: - "get" - "list" + - "create" + - "patch" + - "update" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name kubelet-service-kill + - ./experiments -name pod-autoscaler command: - /bin/bash env: - + - name: TOTAL_CHAOS_DURATION - value: '60' # in seconds + value: '60' # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' + # Number of replicas to scale + - name: REPLICA_COUNT + value: '5' + + # PROVIDE THE LIB HERE + # ONLY LITMUS SUPPORTED - name: LIB value: 'litmus' - - - name: NODE_LABEL - value: '' - - # provide lib image - - name: LIB_IMAGE - value: 'ubuntu:16.04' - - # provide the target node name - - name: TARGET_NODE - value: '' - + labels: - name: kubelet-service-kill + name: pod-autoscaler app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 +--- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Pod DNS Spoof can spoof particular dns requests in target pod container to desired target hostnames + Injects memory consumption on pods belonging to an app deployment kind: ChaosExperiment metadata: - name: pod-dns-spoof + name: pod-memory-hog-exec labels: - name: pod-dns-spoof + name: pod-memory-hog-exec app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -1129,52 +1120,43 @@ spec: - "delete" - "deletecollection" image: "litmuschaos/go-runner:1.13.8" + imagePullPolicy: Always args: - - -c - - ./experiments -name pod-dns-spoof + - -c + - ./experiments -name pod-memory-hog-exec command: - - /bin/bash + - /bin/bash env: - - name: TARGET_CONTAINER - value: "" - - # provide lib image - - name: LIB_IMAGE - value: "litmuschaos/go-runner:1.13.8" - - name: TOTAL_CHAOS_DURATION - value: "60" # in seconds + value: '60' - # Time period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: "" + ## enter the amount of memory in megabytes to be consumed by the application pod + - name: MEMORY_CONSUMPTION + value: '500' ## percentage of total pods to target - name: PODS_AFFECTED_PERC - value: "" - - - name: TARGET_PODS - value: "" - - # provide the name of container runtime, it supports docker, containerd, crio - - name: CONTAINER_RUNTIME - value: "docker" + value: '' - # provide the socket file path - - name: SOCKET_PATH - value: "/var/run/docker.sock" + ## Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + ## env var that describes the library used to execute the chaos + ## default: litmus. Supported values: litmus + - name: LIB + value: 'litmus' + ## it defines the sequence of chaos execution for multiple target pods ## supported values: serial, parallel - name: SEQUENCE - value: "parallel" + value: 'parallel' - # map of the target hostnames eg. '{"abc.com":"spoofabc.com"}' . If empty no queries will be spoofed - - name: SPOOF_MAP - value: "" + - name: TARGET_PODS + value: '' labels: - experiment: pod-dns-spoof + name: pod-memory-hog-exec app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -1183,12 +1165,12 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Give a memory hog on a node belonging to a deployment + Detaching a persistent disk from a node/instance. Supports only for AWS and GCP kind: ChaosExperiment metadata: - name: node-memory-hog + name: disk-loss labels: - name: node-memory-hog + name: disk-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -1203,9 +1185,9 @@ spec: resources: - "jobs" - "pods" - - "pods/log" - - "pods/exec" - "events" + - "pods/log" + - "secrets" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -1216,101 +1198,97 @@ spec: - "patch" - "update" - "delete" - - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" - image: "litmuschaos/go-runner:1.13.8" + image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name node-memory-hog + - ansible-playbook ./experiments/generic/disk_loss/disk_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' - name: TOTAL_CHAOS_DURATION - value: '120' + value: '15' - ## Specify the size as percent of total node capacity Ex: '30' - ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_MEBIBYTES empty - - name: MEMORY_CONSUMPTION_PERCENTAGE - value: '' - - ## Specify the amount of memory to be consumed in mebibytes - ## NOTE: for selecting this option keep MEMORY_CONSUMPTION_PERCENTAGE empty - - name: MEMORY_CONSUMPTION_MEBIBYTES + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME value: '' - - name: NUMBER_OF_WORKERS - value: '1' + - name: APP_CHECK + value: 'true' + + # GKE and AWS supported + - name: CLOUD_PLATFORM + value: 'GKE' - # ENTER THE COMMA SEPARATED TARGET NODES NAME - - name: TARGET_NODES - value: '' - - - name: NODE_LABEL - value: '' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME + - name: PROJECT_ID value: '' - # Provide the LIB here - # Only litmus supported - - name: LIB - value: 'litmus' + - name: NODE_NAME + value: '' - # provide lib image - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:1.13.8' + - name: DISK_NAME + value: '' - ## percentage of total nodes to target - - name: NODES_AFFECTED_PERC - value: '' + # provide the LIB + # only litmus supported + - name: LIB + value: 'litmus' + + - name: ZONE_NAME + value: '' - ## it defines the sequence of chaos execution for multiple target nodes - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - + - name: DEVICE_NAME + value: '' + labels: - name: node-memory-hog + name: disk-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 + secrets: + - name: cloud-secret + mountPath: /tmp/ --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Give a cpu spike on a node belonging to a deployment + Injects cpu consumption on pods belonging to an app deployment kind: ChaosExperiment metadata: - name: node-cpu-hog + name: pod-cpu-hog labels: - name: node-cpu-hog + name: pod-cpu-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Cluster + scope: Namespaced permissions: - apiGroups: - "" - "batch" + - "apps" + - "apps.openshift.io" + - "argoproj.io" - "litmuschaos.io" resources: - "jobs" - "pods" - "pods/log" - - "pods/exec" - "events" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "pods/exec" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -1322,94 +1300,101 @@ spec: - "update" - "delete" - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name node-cpu-hog + - ./experiments -name pod-cpu-hog command: - /bin/bash env: - - name: TOTAL_CHAOS_DURATION value: '60' - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - ## ENTER THE NUMBER OF CORES OF CPU FOR CPU HOGGING - ## OPTIONAL VALUE IN CASE OF EMPTY VALUE IT WILL TAKE NODE CPU CAPACITY - - name: NODE_CPU_CORE - value: '' + ## Number of CPU cores to stress + - name: CPU_CORES + value: '1' - # ENTER THE COMMA SEPARATED TARGET NODES NAME - - name: TARGET_NODES + ## Percentage of total pods to target + - name: PODS_AFFECTED_PERC value: '' - - name: NODE_LABEL + ## Period to wait before and after injection of chaos in sec + - name: RAMP_TIME value: '' - # PROVIDE THE LIB HERE - # ONLY LITMUS SUPPORTED + ## env var that describes the library used to execute the chaos + ## default: litmus. Supported values: litmus, pumba - name: LIB value: 'litmus' - # provide lib image + ## It is used in pumba lib only - name: LIB_IMAGE - value: 'litmuschaos/go-runner:1.13.8' + value: 'litmuschaos/go-runner:1.13.8' - ## percentage of total nodes to target - - name: NODES_AFFECTED_PERC + ## It is used in pumba lib only + - name: STRESS_IMAGE + value: 'alexeiled/stress-ng:latest-ubuntu' + + ## provide the cluster runtime + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + - name: TARGET_PODS value: '' - ## it defines the sequence of chaos execution for multiple target nodes + ## it defines the sequence of chaos execution for multiple target pods ## supported values: serial, parallel - name: SEQUENCE value: 'parallel' - + labels: - name: node-cpu-hog + name: pod-cpu-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 +--- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Scale the application replicas and test the node autoscaling on cluster + IO stress on a app pods belonging to an app deployment kind: ChaosExperiment metadata: - name: pod-autoscaler + name: pod-io-stress labels: - name: pod-autoscaler + name: pod-io-stress app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Cluster + scope: Namespaced permissions: - apiGroups: - "" - "batch" - "apps" + - "apps.openshift.io" + - "argoproj.io" - "litmuschaos.io" resources: - "jobs" - "pods" - "pods/log" - - "pods/exec" + - "replicationcontrollers" - "deployments" - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "pods/exec" - "events" - "chaosengines" - "chaosexperiments" @@ -1422,43 +1407,67 @@ spec: - "update" - "delete" - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" - - "create" - - "patch" - - "update" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name pod-autoscaler + - ./experiments -name pod-io-stress command: - /bin/bash env: + - name: TOTAL_CHAOS_DURATION + value: '120' - - name: TOTAL_CHAOS_DURATION - value: '60' + ## specify the size as percentage of free space on the file system + ## default value 90 (in percentage) + - name: FILESYSTEM_UTILIZATION_PERCENTAGE + value: '10' - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' + ## we can specify the size in Gigabyte (Gb) also in place of percentage of free space + ## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty + - name: FILESYSTEM_UTILIZATION_BYTES + value: '' - # Number of replicas to scale - - name: REPLICA_COUNT - value: '5' + ## Total number of workers default value is 4 + - name: NUMBER_OF_WORKERS + value: '4' + + ## Percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' + + # provide volume mount path + - name: VOLUME_MOUNT_PATH + value: '' + + ## specify the comma separated target pods + - name: TARGET_PODS + value: '' + + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + # Provide the LIB here + # support litmus and pumba + - name: LIB + value: 'litmus' + + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:1.13.8' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' - # PROVIDE THE LIB HERE - # ONLY LITMUS SUPPORTED - - name: LIB - value: 'litmus' - labels: - name: pod-autoscaler + name: pod-io-stress app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -1467,33 +1476,42 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - poweroff node + Pod DNS Error injects dns failure/error in target pod containers kind: ChaosExperiment metadata: - name: node-poweroff + name: pod-dns-error labels: - name: node-poweroff + name: pod-dns-error app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Cluster + scope: Namespaced permissions: - apiGroups: - "" - "batch" + - "apps" + - "apps.openshift.io" + - "argoproj.io" - "litmuschaos.io" resources: - "jobs" - "pods" - "pods/log" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" - "pods/exec" - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" - - "secrets" verbs: - "create" - "list" @@ -1502,150 +1520,57 @@ spec: - "update" - "delete" - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" image: "litmuschaos/go-runner:1.13.8" - imagePullPolicy: Always args: - - -c - - ./experiments -name node-restart + - -c + - ./experiments -name pod-dns-error command: - - /bin/bash + - /bin/bash env: - - name: SSH_USER - value: 'root' + - name: TARGET_CONTAINER + value: "" - - name: TOTAL_CHAOS_DURATION - value: '60' + # provide lib image + - name: LIB_IMAGE + value: "litmuschaos/go-runner:1.13.8" - - name: REBOOT_COMMAND - value: '-o ServerAliveInterval=1 -o ServerAliveCountMax=1 "sudo systemctl poweroff --force --force" ; true' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' - - # PROVIDE THE LIB HERE - # ONLY LITMUS SUPPORTED - - name: LIB - value: 'litmus' - - # provide lib image - - name: LIB_IMAGE - value: "litmuschaos/go-runner:1.13.8" - - # ENTER THE TARGET NODE NAME - - name: TARGET_NODE - value: '' - - - name: NODE_LABEL - value: '' - - # ENTER THE TARGET NODE IP - - name: TARGET_NODE_IP - value: '' - - labels: - name: node-poweroff - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: experiment-job - app.kubernetes.io/version: 1.13.8 - secrets: - - name: id-rsa - mountPath: /mnt/ - ---- ---- -apiVersion: litmuschaos.io/v1alpha1 -description: - message: | - Injects memory consumption on pods belonging to an app deployment -kind: ChaosExperiment -metadata: - name: pod-memory-hog-exec - labels: - name: pod-memory-hog-exec - app.kubernetes.io/part-of: litmus - app.kubernetes.io/component: chaosexperiment - app.kubernetes.io/version: 1.13.8 -spec: - definition: - scope: Namespaced - permissions: - - apiGroups: - - "" - - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "create" - - "list" - - "get" - - "patch" - - "update" - - "delete" - - "deletecollection" - image: "litmuschaos/go-runner:1.13.8" - imagePullPolicy: Always - args: - - -c - - ./experiments -name pod-memory-hog-exec - command: - - /bin/bash - env: - name: TOTAL_CHAOS_DURATION - value: '60' + value: "60" # in seconds - ## enter the amount of memory in megabytes to be consumed by the application pod - - name: MEMORY_CONSUMPTION - value: '500' + # Time period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: "" ## percentage of total pods to target - name: PODS_AFFECTED_PERC - value: '' + value: "" - ## Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' + - name: TARGET_PODS + value: "" + + # provide the name of container runtime, it supports docker, containerd, crio + - name: CONTAINER_RUNTIME + value: "docker" + + # provide the socket file path + - name: SOCKET_PATH + value: "/var/run/docker.sock" - ## env var that describes the library used to execute the chaos - ## default: litmus. Supported values: litmus - - name: LIB - value: 'litmus' - ## it defines the sequence of chaos execution for multiple target pods ## supported values: serial, parallel - name: SEQUENCE - value: 'parallel' + value: "parallel" - - name: TARGET_PODS - value: '' + # list of the target hostnames or kewywords eg. '["litmuschaos","chaosnative.io"]' . If empty all hostnames are targets + - name: TARGET_HOSTNAMES + value: "" + + # can be either exact or substring, determines whether the dns query has to match exactly with one of the targets or can have any of the targets as substring + - name: MATCH_SCHEME + value: "exact" labels: - name: pod-memory-hog-exec + experiment: pod-dns-error app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -1654,12 +1579,12 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Give IO disk stress on a node belonging to a deployment + Give a cpu spike on a node belonging to a deployment kind: ChaosExperiment metadata: - name: node-io-stress + name: node-cpu-hog labels: - name: node-io-stress + name: node-cpu-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -1699,49 +1624,32 @@ spec: imagePullPolicy: Always args: - -c - - ./experiments -name node-io-stress + - ./experiments -name node-cpu-hog command: - /bin/bash env: - name: TOTAL_CHAOS_DURATION - value: '120' - - ## specify the size as percentage of free space on the file system - ## default value 90 (in percentage) - - name: FILESYSTEM_UTILIZATION_PERCENTAGE - value: '10' + value: '60' - ## we can specify the size in Gigabyte (Gb) also in place of percentage of free space - ## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty - - name: FILESYSTEM_UTILIZATION_BYTES + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME value: '' - ## Number of core of CPU - - name: CPU - value: '1' - - ## Total number of workers default value is 4 - - name: NUMBER_OF_WORKERS - value: '4' - - ## Total number of vm workers - - name: VM_WORKERS - value: '1' + ## ENTER THE NUMBER OF CORES OF CPU FOR CPU HOGGING + ## OPTIONAL VALUE IN CASE OF EMPTY VALUE IT WILL TAKE NODE CPU CAPACITY + - name: NODE_CPU_CORE + value: '' - ## enter the comma separated target nodes name + # ENTER THE COMMA SEPARATED TARGET NODES NAME - name: TARGET_NODES value: '' - name: NODE_LABEL - value: '' - - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME value: '' - # Provide the LIB here - # Only litmus supported + # PROVIDE THE LIB HERE + # ONLY LITMUS SUPPORTED - name: LIB value: 'litmus' @@ -1757,24 +1665,23 @@ spec: ## supported values: serial, parallel - name: SEQUENCE value: 'parallel' - + labels: - name: node-io-stress + name: node-cpu-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 ---- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Taint the node where application pod is scheduled + Kills the docker service on the application node to check the resiliency. kind: ChaosExperiment metadata: - name: node-taint + name: docker-service-kill labels: - name: node-taint + name: docker-service-kill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -1790,11 +1697,8 @@ spec: resources: - "jobs" - "pods" - - "events" - "pods/log" - - "pods/exec" - - "daemonsets" - - "pods/eviction" + - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -1805,94 +1709,81 @@ spec: - "patch" - "update" - "delete" - - "deletecollection" - apiGroups: - "" - resources: + resources: - "nodes" verbs: - "get" - "list" - - "patch" - - "update" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name node-taint + - ./experiments -name docker-service-kill command: - /bin/bash env: + + - name: TOTAL_CHAOS_DURATION + value: '90' # in seconds - - name: TARGET_NODE - value: '' - - - name: NODE_LABEL + # Period to wait before injection of chaos in sec + - name: RAMP_TIME value: '' - - name: TOTAL_CHAOS_DURATION - value: '60' - - # Provide the LIB here - # Only litmus supported - name: LIB value: 'litmus' - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME + - name: NODE_LABEL value: '' - # set taint label & effect - # key=value:effect or key:effect - - name: TAINTS - value: '' + # provide lib image + - name: LIB_IMAGE + value: 'ubuntu:16.04' + + # provide the target node name + - name: TARGET_NODE + value: '' labels: - name: node-taint + name: docker-service-kill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 + --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Fillup Ephemeral Storage of a Resource + Restart node kind: ChaosExperiment metadata: - name: disk-fill + name: node-restart labels: - name: disk-fill + name: node-restart app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Namespaced - permissions: + scope: Cluster + permissions: - apiGroups: - "" - - "apps" - "batch" - - "apps.openshift.io" - - "argoproj.io" - "litmuschaos.io" resources: - "jobs" - "pods" - - "pods/exec" - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" + - "pods/exec" - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" + - "secrets" verbs: - "create" - "list" @@ -1901,20 +1792,23 @@ spec: - "update" - "delete" - "deletecollection" - image: "litmuschaos/go-runner:1.13.8" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name disk-fill + - ./experiments -name node-restart command: - /bin/bash env: - - - name: TARGET_CONTAINER - value: '' - - - name: FILL_PERCENTAGE - value: '80' + - name: SSH_USER + value: 'root' - name: TOTAL_CHAOS_DURATION value: '60' @@ -1923,80 +1817,60 @@ spec: - name: RAMP_TIME value: '' - # Provide the LIB here - # Only litmus supported + # PROVIDE THE LIB HERE + # ONLY LITMUS SUPPORTED - name: LIB value: 'litmus' - # provide the data block size - # supported unit is KB - - name: DATA_BLOCK_SIZE - value: '256' + # provide lib image + - name: LIB_IMAGE + value: "litmuschaos/go-runner:1.13.8" - - name: TARGET_PODS + # ENTER THE TARGET NODE NAME + - name: TARGET_NODE value: '' - - name: EPHEMERAL_STORAGE_MEBIBYTES + - name: NODE_LABEL value: '' - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC + # ENTER THE TARGET NODE IP + - name: TARGET_NODE_IP value: '' - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:1.13.8' - - # Provide the container runtime path - # Default set to docker container path - - name: CONTAINER_PATH - value: '/var/lib/docker/containers' - - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - labels: - name: disk-fill + name: node-restart app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 + secrets: + - name: id-rsa + mountPath: /mnt/ --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Pod DNS Error injects dns failure/error in target pod containers + Give IO disk stress on a node belonging to a deployment kind: ChaosExperiment metadata: - name: pod-dns-error + name: node-io-stress labels: - name: pod-dns-error + name: node-io-stress app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Namespaced + scope: Cluster permissions: - apiGroups: - "" - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - "litmuschaos.io" resources: - "jobs" - "pods" - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - "pods/exec" - "events" - "chaosengines" @@ -2010,72 +1884,92 @@ spec: - "update" - "delete" - "deletecollection" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" image: "litmuschaos/go-runner:1.13.8" + imagePullPolicy: Always args: - - -c - - ./experiments -name pod-dns-error + - -c + - ./experiments -name node-io-stress command: - - /bin/bash + - /bin/bash env: - - name: TARGET_CONTAINER - value: "" - # provide lib image - - name: LIB_IMAGE - value: "litmuschaos/go-runner:1.13.8" + - name: TOTAL_CHAOS_DURATION + value: '120' - - name: TOTAL_CHAOS_DURATION - value: "60" # in seconds + ## specify the size as percentage of free space on the file system + ## default value 90 (in percentage) + - name: FILESYSTEM_UTILIZATION_PERCENTAGE + value: '10' - # Time period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: "" + ## we can specify the size in Gigabyte (Gb) also in place of percentage of free space + ## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty + - name: FILESYSTEM_UTILIZATION_BYTES + value: '' - ## percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: "" + ## Number of core of CPU + - name: CPU + value: '1' - - name: TARGET_PODS - value: "" + ## Total number of workers default value is 4 + - name: NUMBER_OF_WORKERS + value: '4' - # provide the name of container runtime, it supports docker, containerd, crio - - name: CONTAINER_RUNTIME - value: "docker" + ## Total number of vm workers + - name: VM_WORKERS + value: '1' - # provide the socket file path - - name: SOCKET_PATH - value: "/var/run/docker.sock" + ## enter the comma separated target nodes name + - name: TARGET_NODES + value: '' - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: "parallel" + - name: NODE_LABEL + value: '' - # list of the target hostnames or kewywords eg. '["litmuschaos","chaosnative.io"]' . If empty all hostnames are targets - - name: TARGET_HOSTNAMES - value: "" + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' - # can be either exact or substring, determines whether the dns query has to match exactly with one of the targets or can have any of the targets as substring - - name: MATCH_SCHEME - value: "exact" + # Provide the LIB here + # Only litmus supported + - name: LIB + value: 'litmus' + + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:1.13.8' + + ## percentage of total nodes to target + - name: NODES_AFFECTED_PERC + value: '' + ## it defines the sequence of chaos execution for multiple target nodes + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + labels: - experiment: pod-dns-error + name: node-io-stress app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 ---- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - IO stress on a app pods belonging to an app deployment + Inject network packet corruption into application pod kind: ChaosExperiment metadata: - name: pod-io-stress + name: pod-network-corruption labels: - name: pod-io-stress + name: pod-network-corruption app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -2108,73 +2002,82 @@ spec: - "chaosresults" verbs: - "create" + - "delete" - "list" - - "get" - "patch" - "update" - - "delete" + - "get" - "deletecollection" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name pod-io-stress + - ./experiments -name pod-network-corruption command: - /bin/bash env: - - name: TOTAL_CHAOS_DURATION - value: '120' + + - name: TARGET_CONTAINER + value: '' - ## specify the size as percentage of free space on the file system - ## default value 90 (in percentage) - - name: FILESYSTEM_UTILIZATION_PERCENTAGE - value: '10' + # provide lib image + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:1.13.8' - ## we can specify the size in Gigabyte (Gb) also in place of percentage of free space - ## NOTE: for selecting this option FILESYSTEM_UTILIZATION_PERCENTAGE should be empty - - name: FILESYSTEM_UTILIZATION_BYTES - value: '' + - name: NETWORK_INTERFACE + value: 'eth0' - ## Total number of workers default value is 4 - - name: NUMBER_OF_WORKERS - value: '4' + - name: TC_IMAGE + value: 'gaiadocker/iproute2' - ## Percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' + - name: NETWORK_PACKET_CORRUPTION_PERCENTAGE + value: '100' #in PERCENTAGE - # provide volume mount path - - name: VOLUME_MOUNT_PATH - value: '' + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds - ## specify the comma separated target pods - - name: TARGET_PODS - value: '' + # Time period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' + + # lib can be litmus or pumba + - name: LIB + value: 'litmus' - # Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' - # Provide the LIB here - # support litmus and pumba - - name: LIB - value: 'litmus' + - name: TARGET_PODS + value: '' - # provide lib image - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:1.13.8' + # provide the name of container runtime + # for litmus LIB, it supports docker, containerd, crio + # for pumba LIB, it supports docker only + - name: CONTAINER_RUNTIME + value: 'docker' - # provide the socket file path - - name: SOCKET_PATH - value: '/var/run/docker.sock' + # provide the destination ips + # chaos injection will be triggered for these destination ips + - name: DESTINATION_IPS + value: '' - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' + # provide the destination hosts + # chaos injection will be triggered for these destination hosts + - name: DESTINATION_HOSTS + value: '' + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + labels: - name: pod-io-stress + name: pod-network-corruption app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -2183,12 +2086,12 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Inject network packet corruption into application pod + Injects cpu consumption on pods belonging to an app deployment kind: ChaosExperiment metadata: - name: pod-network-corruption + name: pod-cpu-hog-exec labels: - name: pod-network-corruption + name: pod-cpu-hog-exec app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -2207,6 +2110,7 @@ spec: - "jobs" - "pods" - "pods/log" + - "events" - "replicationcontrollers" - "deployments" - "statefulsets" @@ -2215,60 +2119,145 @@ spec: - "deploymentconfigs" - "rollouts" - "pods/exec" - - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" verbs: - "create" - - "delete" - "list" + - "get" - "patch" - "update" - - "get" - - "deletecollection" + - "delete" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name pod-network-corruption + - ./experiments -name pod-cpu-hog-exec command: - /bin/bash env: - - - name: TARGET_CONTAINER + - name: TOTAL_CHAOS_DURATION + value: '60' + + ## Number of CPU cores to stress + - name: CPU_CORES + value: '1' + + ## Percentage of total pods to target + - name: PODS_AFFECTED_PERC value: '' - # provide lib image - - name: LIB_IMAGE - value: 'litmuschaos/go-runner:1.13.8' + ## Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' - - name: NETWORK_INTERFACE - value: 'eth0' + ## env var that describes the library used to execute the chaos + ## default: litmus. Supported values: litmus + - name: LIB + value: 'litmus' - - name: TC_IMAGE - value: 'gaiadocker/iproute2' + - name: TARGET_PODS + value: '' - - name: NETWORK_PACKET_CORRUPTION_PERCENTAGE - value: '100' #in PERCENTAGE + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + + labels: + name: pod-cpu-hog-exec + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: experiment-job + app.kubernetes.io/version: 1.13.8 +--- +apiVersion: litmuschaos.io/v1alpha1 +description: + message: | + Injects network packet duplication on pods belonging to an app deployment +kind: ChaosExperiment +metadata: + name: pod-network-duplication + labels: + name: pod-network-duplication + app.kubernetes.io/part-of: litmus + app.kubernetes.io/component: chaosexperiment + app.kubernetes.io/version: 1.13.8 +spec: + definition: + scope: Namespaced + permissions: + - apiGroups: + - "" + - "batch" + - "apps" + - "apps.openshift.io" + - "argoproj.io" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/log" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "pods/exec" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "get" + - "list" + - "patch" + - "create" + - "update" + - "delete" + - "deletecollection" + image: "litmuschaos/go-runner:1.13.8" + imagePullPolicy: Always + args: + - -c + - ./experiments -name pod-network-duplication + command: + - /bin/bash + env: - name: TOTAL_CHAOS_DURATION - value: '60' # in seconds + value: '60' - # Time period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' - + + - name: TARGET_CONTAINER + value: '' + + - name: TC_IMAGE + value: 'gaiadocker/iproute2' + + - name: NETWORK_INTERFACE + value: 'eth0' + + - name: NETWORK_PACKET_DUPLICATION_PERCENTAGE + value: '100' # in percentage + # lib can be litmus or pumba - name: LIB - value: 'litmus' + value: 'litmus' + + - name: TARGET_PODS + value: '' ## percentage of total pods to target - name: PODS_AFFECTED_PERC value: '' - - name: TARGET_PODS - value: '' + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:1.13.8' # provide the name of container runtime # for litmus LIB, it supports docker, containerd, crio @@ -2294,9 +2283,9 @@ spec: ## supported values: serial, parallel - name: SEQUENCE value: 'parallel' - + labels: - name: pod-network-corruption + name: pod-network-duplication app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -2305,33 +2294,43 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Restart node + Deletes a pod belonging to a deployment/statefulset/daemonset kind: ChaosExperiment metadata: - name: node-restart + name: pod-delete labels: - name: node-restart + name: pod-delete app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: - scope: Cluster + scope: Namespaced permissions: - apiGroups: - "" + - "apps" + - "apps.openshift.io" + - "argoproj.io" - "batch" - "litmuschaos.io" resources: + - "deployments" - "jobs" - "pods" - "pods/log" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" - "pods/exec" - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" - - "secrets" verbs: - "create" - "list" @@ -2340,70 +2339,60 @@ spec: - "update" - "delete" - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name node-restart + - ./experiments -name pod-delete command: - /bin/bash env: - - name: SSH_USER - value: 'root' - name: TOTAL_CHAOS_DURATION - value: '60' + value: '15' # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' - # PROVIDE THE LIB HERE - # ONLY LITMUS SUPPORTED - - name: LIB - value: 'litmus' + - name: FORCE + value: 'true' - # provide lib image - - name: LIB_IMAGE - value: "litmuschaos/go-runner:1.13.8" + - name: CHAOS_INTERVAL + value: '5' - # ENTER THE TARGET NODE NAME - - name: TARGET_NODE + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC value: '' - - name: NODE_LABEL - value: '' + - name: LIB + value: 'litmus' - # ENTER THE TARGET NODE IP - - name: TARGET_NODE_IP + - name: TARGET_PODS value: '' + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + labels: - name: node-restart + name: pod-delete app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 - secrets: - - name: id-rsa - mountPath: /mnt/ +--- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Injects cpu consumption on pods belonging to an app deployment + Injects memory consumption on pods belonging to an app deployment kind: ChaosExperiment metadata: - name: pod-cpu-hog-exec + name: pod-memory-hog labels: - name: pod-cpu-hog-exec + name: pod-memory-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -2422,7 +2411,6 @@ spec: - "jobs" - "pods" - "pods/log" - - "events" - "replicationcontrollers" - "deployments" - "statefulsets" @@ -2431,6 +2419,7 @@ spec: - "deploymentconfigs" - "rollouts" - "pods/exec" + - "events" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -2441,44 +2430,65 @@ spec: - "patch" - "update" - "delete" - image: "litmuschaos/ansible-runner:1.13.8" + - "deletecollection" + image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name pod-cpu-hog-exec + - ./experiments -name pod-memory-hog command: - /bin/bash env: - - name: TOTAL_CHAOS_DURATION - value: '60' + - name: TOTAL_CHAOS_DURATION + value: '60' - ## Number of CPU cores to stress - - name: CPU_CORES - value: '1' + ## enter the amount of memory in megabytes to be consumed by the application pod + - name: MEMORY_CONSUMPTION + value: '500' + + ## Number of workers to perform stress + - name: NUMBER_OF_WORKERS + value: '1' - ## Percentage of total pods to target - - name: PODS_AFFECTED_PERC - value: '' + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' - ## Period to wait before and after injection of chaos in sec - - name: RAMP_TIME - value: '' + ## Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' - ## env var that describes the library used to execute the chaos - ## default: litmus. Supported values: litmus - - name: LIB - value: 'litmus' + ## env var that describes the library used to execute the chaos + ## default: litmus. Supported values: litmus, pumba + - name: LIB + value: 'litmus' - - name: TARGET_PODS - value: '' + ## It is used in pumba lib only + - name: LIB_IMAGE + value: 'litmuschaos/go-runner:1.13.8' + + ## It is used in pumba lib only + - name: STRESS_IMAGE + value: 'alexeiled/stress-ng:latest-ubuntu' + + ## provide the cluster runtime + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' + + - name: TARGET_PODS + value: '' - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' - labels: - name: pod-cpu-hog-exec + name: pod-memory-hog app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -2487,82 +2497,87 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Injects network packet duplication on pods belonging to an app deployment + Fillup Ephemeral Storage of a Resource kind: ChaosExperiment metadata: - name: pod-network-duplication + name: disk-fill labels: - name: pod-network-duplication + name: disk-fill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 spec: definition: scope: Namespaced - permissions: - - apiGroups: - - "" - - "batch" - - "apps" - - "apps.openshift.io" - - "argoproj.io" - - "litmuschaos.io" - resources: - - "jobs" - - "pods" - - "pods/log" - - "replicationcontrollers" - - "deployments" - - "statefulsets" - - "daemonsets" - - "replicasets" - - "deploymentconfigs" - - "rollouts" - - "pods/exec" - - "events" - - "chaosengines" - - "chaosexperiments" - - "chaosresults" - verbs: - - "get" - - "list" - - "patch" - - "create" - - "update" - - "delete" - - "deletecollection" + permissions: + - apiGroups: + - "" + - "apps" + - "batch" + - "apps.openshift.io" + - "argoproj.io" + - "litmuschaos.io" + resources: + - "jobs" + - "pods" + - "pods/exec" + - "pods/log" + - "replicationcontrollers" + - "deployments" + - "statefulsets" + - "daemonsets" + - "replicasets" + - "deploymentconfigs" + - "rollouts" + - "events" + - "chaosengines" + - "chaosexperiments" + - "chaosresults" + verbs: + - "create" + - "list" + - "get" + - "patch" + - "update" + - "delete" + - "deletecollection" image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name pod-network-duplication + - ./experiments -name disk-fill command: - /bin/bash env: - - name: TOTAL_CHAOS_DURATION - value: '60' - - - name: RAMP_TIME - value: '' - name: TARGET_CONTAINER value: '' + + - name: FILL_PERCENTAGE + value: '80' - - name: TC_IMAGE - value: 'gaiadocker/iproute2' - - - name: NETWORK_INTERFACE - value: 'eth0' + - name: TOTAL_CHAOS_DURATION + value: '60' - - name: NETWORK_PACKET_DUPLICATION_PERCENTAGE - value: '100' # in percentage + # Period to wait before and after injection of chaos in sec + - name: RAMP_TIME + value: '' - # lib can be litmus or pumba + # Provide the LIB here + # Only litmus supported - name: LIB - value: 'litmus' + value: 'litmus' + + # provide the data block size + # supported unit is KB + - name: DATA_BLOCK_SIZE + value: '256' - name: TARGET_PODS - value: '' + value: '' + + - name: EPHEMERAL_STORAGE_MEBIBYTES + value: '' ## percentage of total pods to target - name: PODS_AFFECTED_PERC @@ -2571,25 +2586,10 @@ spec: - name: LIB_IMAGE value: 'litmuschaos/go-runner:1.13.8' - # provide the name of container runtime - # for litmus LIB, it supports docker, containerd, crio - # for pumba LIB, it supports docker only - - name: CONTAINER_RUNTIME - value: 'docker' - - # provide the destination ips - # chaos injection will be triggered for these destination ips - - name: DESTINATION_IPS - value: '' - - # provide the destination hosts - # chaos injection will be triggered for these destination hosts - - name: DESTINATION_HOSTS - value: '' - - # provide the socket file path - - name: SOCKET_PATH - value: '/var/run/docker.sock' + # Provide the container runtime path + # Default set to docker container path + - name: CONTAINER_PATH + value: '/var/lib/docker/containers' ## it defines the sequence of chaos execution for multiple target pods ## supported values: serial, parallel @@ -2597,7 +2597,7 @@ spec: value: 'parallel' labels: - name: pod-network-duplication + name: disk-fill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 diff --git a/charts/kafka/experiments.yaml b/charts/kafka/experiments.yaml index b13c43617..5735c3282 100644 --- a/charts/kafka/experiments.yaml +++ b/charts/kafka/experiments.yaml @@ -1,12 +1,12 @@ apiVersion: litmuschaos.io/v1alpha1 description: message: | - Deleting a kafka broker pod + Detaching a persistent disk from a node/instance for kafka. kind: ChaosExperiment metadata: - name: kafka-broker-pod-failure + name: kafka-broker-disk-failure labels: - name: kafka-broker-pod-failure + name: kafka-broker-disk-failure app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -21,11 +21,10 @@ spec: - "litmuschaos.io" resources: - "statefulsets" - - "deployments" - - "events" - - "pods/log" - - "configmaps" + - "secrets" - "jobs" + - "pods/log" + - "events" - "pods" - "pods/exec" - "chaosengines" @@ -33,27 +32,20 @@ spec: - "chaosresults" verbs: - "create" - - "get" - "delete" - - "list" - - "update" - - "patch" - - "deletecollection" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - "get" - "list" - image: "litmuschaos/go-runner:1.13.8" + - "patch" + image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ./experiments -name kafka-broker-pod-failure + - ansible-playbook ./experiments/kafka/kafka-broker-disk-failure/kafka-broker-disk-failure-ansible-logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' - name: KAFKA_KIND value: 'statefulset' @@ -64,26 +56,24 @@ spec: - name: KAFKA_LIVENESS_IMAGE value: 'litmuschaos/kafka-client:ci' - # Recommended timeout for EKS platform: 60000 ms - name: KAFKA_CONSUMER_TIMEOUT - value: '30000' #in ms + value: '30000' - name: TOTAL_CHAOS_DURATION value: '15' - - name: CHAOS_INTERVAL - value: '5' + - name: PROJECT_ID + value: '' - ## it defines the sequence of chaos execution for multiple target pods - ## supported values: serial, parallel - - name: SEQUENCE - value: 'parallel' + - name: DISK_NAME + value: '' - - name: FORCE - value: 'true' + - name: ZONE_NAME + value: '' - - name: KAFKA_INSTANCE_NAME - value: '' + # GKE and AWS supported + - name: CLOUD_PLATFORM + value: 'GKE' - name: KAFKA_NAMESPACE value: '' @@ -94,6 +84,9 @@ spec: - name: KAFKA_BROKER value: '' + - name: KAFKA_INSTANCE_NAME + value: '' + - name: KAFKA_REPLICATION_FACTOR value: '' @@ -114,28 +107,26 @@ spec: - name: ZOOKEEPER_PORT value: '' - - ## env var that describes the library used to execute the chaos - ## default: litmus. Supported values: litmus - - name: LIB - value: 'litmus' - labels: - name: kafka-broker-pod-failure + name: kafka-broker-disk-failure app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 + secrets: + - name: kafka-broker-disk-failure + mountPath: /tmp/ + --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Detaching a persistent disk from a node/instance for kafka. + Deleting a kafka broker pod kind: ChaosExperiment metadata: - name: kafka-broker-disk-failure + name: kafka-broker-pod-failure labels: - name: kafka-broker-disk-failure + name: kafka-broker-pod-failure app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -150,10 +141,11 @@ spec: - "litmuschaos.io" resources: - "statefulsets" - - "secrets" - - "jobs" - - "pods/log" + - "deployments" - "events" + - "pods/log" + - "configmaps" + - "jobs" - "pods" - "pods/exec" - "chaosengines" @@ -161,20 +153,27 @@ spec: - "chaosresults" verbs: - "create" - - "delete" - "get" + - "delete" - "list" + - "update" - "patch" - image: "litmuschaos/ansible-runner:1.13.8" + - "deletecollection" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + image: "litmuschaos/go-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/kafka/kafka-broker-disk-failure/kafka-broker-disk-failure-ansible-logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ./experiments -name kafka-broker-pod-failure command: - /bin/bash env: - - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' - name: KAFKA_KIND value: 'statefulset' @@ -185,24 +184,26 @@ spec: - name: KAFKA_LIVENESS_IMAGE value: 'litmuschaos/kafka-client:ci' + # Recommended timeout for EKS platform: 60000 ms - name: KAFKA_CONSUMER_TIMEOUT - value: '30000' + value: '30000' #in ms - name: TOTAL_CHAOS_DURATION value: '15' - - name: PROJECT_ID - value: '' + - name: CHAOS_INTERVAL + value: '5' - - name: DISK_NAME - value: '' + ## it defines the sequence of chaos execution for multiple target pods + ## supported values: serial, parallel + - name: SEQUENCE + value: 'parallel' - - name: ZONE_NAME - value: '' + - name: FORCE + value: 'true' - # GKE and AWS supported - - name: CLOUD_PLATFORM - value: 'GKE' + - name: KAFKA_INSTANCE_NAME + value: '' - name: KAFKA_NAMESPACE value: '' @@ -213,9 +214,6 @@ spec: - name: KAFKA_BROKER value: '' - - name: KAFKA_INSTANCE_NAME - value: '' - - name: KAFKA_REPLICATION_FACTOR value: '' @@ -236,14 +234,16 @@ spec: - name: ZOOKEEPER_PORT value: '' + + ## env var that describes the library used to execute the chaos + ## default: litmus. Supported values: litmus + - name: LIB + value: 'litmus' + labels: - name: kafka-broker-disk-failure + name: kafka-broker-pod-failure app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 - secrets: - - name: kafka-broker-disk-failure - mountPath: /tmp/ - --- diff --git a/charts/kube-aws/experiments.yaml b/charts/kube-aws/experiments.yaml index 796d719e7..955352176 100644 --- a/charts/kube-aws/experiments.yaml +++ b/charts/kube-aws/experiments.yaml @@ -93,9 +93,9 @@ description: Detaching an ebs volume from ec2 instance. kind: ChaosExperiment metadata: - name: ec2-terminate-by-tag + name: ebs-loss-by-tag labels: - name: ec2-terminate-by-tag + name: ebs-loss-by-tag app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -128,7 +128,7 @@ spec: imagePullPolicy: Always args: - -c - - ./experiments -name ec2-terminate-by-tag + - ./experiments -name ebs-loss-by-tag command: - /bin/bash env: @@ -138,28 +138,22 @@ spec: - name: CHAOS_INTERVAL value: '30' - # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' - - name: INSTANCE_TAG + - name: EBS_VOLUME_TAG value: '' - # enable it if the target instance is a part of self-managed nodegroup. - - name: MANAGED_NODEGROUP - value: 'disable' - - name: REGION value: '' - # Target the percentage of instance filtered from tag - - name: INSTANCE_AFFECTED_PERC - value: '' - - name: SEQUENCE - value: 'parallel' + value: 'parallel' - # Provide the path of aws credentials mounted from secret + - name: VOLUME_AFFECTED_PERC + value: '' + + # Provide the path of aws credentials mounted from secret - name: AWS_SHARED_CREDENTIALS_FILE value: '/tmp/cloud_config.yml' @@ -169,7 +163,7 @@ spec: value: 'litmus' labels: - name: ec2-terminate-by-tag + name: ebs-loss-by-tag app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 @@ -267,9 +261,9 @@ description: Detaching an ebs volume from ec2 instance. kind: ChaosExperiment metadata: - name: ebs-loss-by-tag + name: ec2-terminate-by-tag labels: - name: ebs-loss-by-tag + name: ec2-terminate-by-tag app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 @@ -302,7 +296,7 @@ spec: imagePullPolicy: Always args: - -c - - ./experiments -name ebs-loss-by-tag + - ./experiments -name ec2-terminate-by-tag command: - /bin/bash env: @@ -312,22 +306,28 @@ spec: - name: CHAOS_INTERVAL value: '30' + # Period to wait before and after injection of chaos in sec - name: RAMP_TIME value: '' - - name: EBS_VOLUME_TAG + - name: INSTANCE_TAG value: '' + # enable it if the target instance is a part of self-managed nodegroup. + - name: MANAGED_NODEGROUP + value: 'disable' + - name: REGION value: '' - - name: SEQUENCE - value: 'parallel' + # Target the percentage of instance filtered from tag + - name: INSTANCE_AFFECTED_PERC + value: '' - - name: VOLUME_AFFECTED_PERC - value: '' + - name: SEQUENCE + value: 'parallel' - # Provide the path of aws credentials mounted from secret + # Provide the path of aws credentials mounted from secret - name: AWS_SHARED_CREDENTIALS_FILE value: '/tmp/cloud_config.yml' @@ -337,7 +337,7 @@ spec: value: 'litmus' labels: - name: ebs-loss-by-tag + name: ec2-terminate-by-tag app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 diff --git a/charts/openebs/experiments.yaml b/charts/openebs/experiments.yaml index 8a49a0a59..db9f8340b 100644 --- a/charts/openebs/experiments.yaml +++ b/charts/openebs/experiments.yaml @@ -1,44 +1,46 @@ ---- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Kill the pool pod and check if gets scheduled again + Network loss to pool pod belonging to a OpenEBS PVC kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-pool-pod-failure + name: openebs-pool-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 - name: openebs-pool-pod-failure + name: openebs-pool-network-loss spec: definition: scope: Cluster permissions: - apiGroups: - "" - - "extensions" - "apps" - - "batch" - "litmuschaos.io" - - "openebs.io" + - "batch" + - "extensions" - "storage.k8s.io" + - "openebs.io" resources: - - "deployments" - - "replicasets" - - "jobs" + - "pods" - "pods/log" + - "pods/exec" - "events" - - "pods" + - "jobs" - "configmaps" - - "secrets" + - "services" + - "persistentvolumeclaims" - "storageclasses" - "persistentvolumeclaims" - - "cstorvolumereplicas" + - "persistentvolumes" - "chaosengines" - "chaosexperiments" - "chaosresults" + - "cstorpools" + - "cstorvolumereplicas" + - "replicasets" verbs: - "create" - "get" @@ -46,112 +48,110 @@ spec: - "list" - "patch" - "update" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - - "list" image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-pool-pod-failure/openebs_pool_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-pool-network-loss/openebs_pool_network_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: - name: ANSIBLE_STDOUT_CALLBACK - value: default + value: 'default' - - name: OPENEBS_NS + - name: OPENEBS_NAMESPACE value: 'openebs' - - name: APP_PVC - value: '' + # only pumba supported + # For pumba image use : gaiaadm/pumba:0.6.5 + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' + + - name: TC_IMAGE + value: 'gaiadocker/iproute2' + + - name: NETWORK_PACKET_LOSS_PERCENTAGE + value: '100' # in percentage + + - name: TOTAL_CHAOS_DURATION + value: '120' # in seconds - name: LIVENESS_APP_LABEL value: '' - name: LIVENESS_APP_NAMESPACE - value: '' - - - name: CHAOS_ITERATIONS - value: '2' - - # provide the kill count - - name: KILL_COUNT value: '' - name: DATA_PERSISTENCE value: '' labels: - name: openebs-pool-pod-failure + name: openebs-pool-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 - #configmaps: - #- name: openebs-pool-pod-failure - # mountPath: /mnt - +--- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Network delay to pool pod belonging to a OpenEBS PVC - This experiment is using pumba lib for network chaos + Kill the cstor target/Jiva controller pod and check if gets created again kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-pool-network-delay + name: openebs-target-pod-failure app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 - name: openebs-pool-network-delay + name: openebs-target-pod-failure spec: definition: scope: Cluster permissions: - apiGroups: - "" + - "extensions" - "apps" - - "litmuschaos.io" - "batch" - - "extensions" + - "litmuschaos.io" - "storage.k8s.io" - - "openebs.io" resources: - - "pods" - - "pods/exec" + - "deployments" - "jobs" - - "pods/log" - "events" + - "pods" + - "pods/log" + - "pods/exec" - "configmaps" + - "secrets" - "services" - - "persistentvolumeclaims" - - "storageclasses" - - "persistentvolumes" - "chaosengines" - "chaosexperiments" - "chaosresults" - - "cstorpools" - - "cstorvolumereplicas" - - "replicasets" + - "persistentvolumeclaims" + - "storageclasses" + - "persistentvolumes" verbs: - "create" - "get" + - "delete" - "list" - "patch" - "update" - - "delete" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-pool-network-delay/openebs_pool_network_delay_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-target-pod-failure/openebs_target_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: @@ -160,21 +160,12 @@ spec: - name: OPENEBS_NAMESPACE value: 'openebs' + + - name: APP_PVC + value: '' - # only pumba supported - # For pumba image use : gaiaadm/pumba:0.6.5 - - name: LIB_IMAGE - value: 'gaiaadm/pumba:0.6.5' - - # in milliseconds - - name: NETWORK_DELAY - value: '60000' - - - name: TC_IMAGE - value: 'gaiadocker/iproute2' - - - name: TOTAL_CHAOS_DURATION - value: '60' # in seconds + - name: FORCE + value: 'true' - name: LIVENESS_APP_LABEL value: '' @@ -185,88 +176,113 @@ spec: - name: DATA_PERSISTENCE value: '' + - name: TOTAL_CHAOS_DURATION + value: '60' + + # provide the kill count + - name: KILL_COUNT + value: '' + + - name: CHAOS_INTERVAL + value: '15' + + - name: DEPLOY_TYPE + value: 'deployment' + labels: - name: openebs-pool-network-delay - app.kubernetes.io/part-of: litmus + name: openebs-target-pod-failure + app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 + #configmaps: + #- name: openebs-target-pod-failure + # mountPath: /mnt --- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Kill the pool container and check if gets scheduled again + Kill the OpenEBS NFS provisioner container and check if pods consuming the NFS PVs continue to be available and volumes are writable (RWM mode) kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-pool-container-failure + name: openebs-nfs-provisioner-kill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 - name: openebs-pool-container-failure + name: openebs-nfs-provisioner-kill spec: definition: scope: Cluster permissions: - apiGroups: - "" - - "extensions" - "apps" - - "batch" - "litmuschaos.io" - - "openebs.io" + - "batch" + - "extensions" - "storage.k8s.io" resources: - - "replicasets" - - "events" - - "jobs" - "pods" - - "pods/log" - "pods/exec" + - "pods/log" + - "deployments" + - "events" + - "jobs" - "configmaps" - - "secrets" + - "services" - "persistentvolumeclaims" - - "cstorvolumereplicas" - - "chaosengines" + - "storageclasses" + - "persistentvolumes" - "chaosexperiments" - "chaosresults" + - "chaosengines" verbs: - "create" - - "get" - - "delete" - "list" + - "get" - "patch" - "update" + - "delete" + - apiGroups: + - "" + resources: + - "nodes" + verbs: + - "get" + - "list" + image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-pool-container-failure/openebs_pool_container_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-nfs-provisioner-kill/openebs_nfs_provisioner_kill_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: - name: ANSIBLE_STDOUT_CALLBACK value: 'default' - - name: OPENEBS_NS - value: 'openebs' + # NFS default container + - name: TARGET_CONTAINER + value: 'nfs-provisioner' - - name: APP_PVC + # Period to wait before injection of chaos in sec + - name: RAMP_TIME value: '' - - name: LIVENESS_APP_LABEL - value: '' + # It supports pumba and containerd + - name: LIB + value: 'pumba' - # only pumba supported - # For pumba image use : gaiaadm/pumba:0.6.5 + # LIB_IMAGE can be - gaiaadm/pumba:0.6.5, gprasath/crictl:ci + # For pumba image use: gaiaadm/pumba:0.6.5 + # For containerd image use: gprasath/crictl:ci - name: LIB_IMAGE value: 'gaiaadm/pumba:0.6.5' - - name: LIVENESS_APP_NAMESPACE - value: '' - # provide the chaos interval - name: CHAOS_INTERVAL value: '10' @@ -275,35 +291,30 @@ spec: - name: TOTAL_CHAOS_DURATION value: '20' - - name: DATA_PERSISTENCE - value: '' - - - name: CHAOS_ITERATIONS - value: '2' - labels: - name: openebs-pool-container-failure + name: openebs-nfs-provisioner-kill app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 - #configmaps: - #- name: openebs-pool-container-failure - # mountPath: /mnt + configmaps: + - name: openebs-nfs-provisioner-kill + mountPath: /mnt/ --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Network loss to pool pod belonging to a OpenEBS PVC + Network delay to pool pod belonging to a OpenEBS PVC + This experiment is using pumba lib for network chaos kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-pool-network-loss + name: openebs-pool-network-delay app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 - name: openebs-pool-network-loss + name: openebs-pool-network-delay spec: definition: scope: Cluster @@ -318,15 +329,14 @@ spec: - "openebs.io" resources: - "pods" - - "pods/log" - "pods/exec" - - "events" - "jobs" + - "pods/log" + - "events" - "configmaps" - "services" - "persistentvolumeclaims" - "storageclasses" - - "persistentvolumeclaims" - "persistentvolumes" - "chaosengines" - "chaosexperiments" @@ -337,15 +347,15 @@ spec: verbs: - "create" - "get" - - "delete" - "list" - "patch" - "update" + - "delete" image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-pool-network-loss/openebs_pool_network_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-pool-network-delay/openebs_pool_network_delay_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: @@ -360,14 +370,15 @@ spec: - name: LIB_IMAGE value: 'gaiaadm/pumba:0.6.5' + # in milliseconds + - name: NETWORK_DELAY + value: '60000' + - name: TC_IMAGE value: 'gaiadocker/iproute2' - - name: NETWORK_PACKET_LOSS_PERCENTAGE - value: '100' # in percentage - - name: TOTAL_CHAOS_DURATION - value: '120' # in seconds + value: '60' # in seconds - name: LIVENESS_APP_LABEL value: '' @@ -379,123 +390,106 @@ spec: value: '' labels: - name: openebs-pool-network-loss - app.kubernetes.io/part-of: litmus + name: openebs-pool-network-delay + app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 ---- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - OpenEBS Pool Disk Loss contains chaos to disrupt state of infra resources. Experiments can inject disk loss against openEBS pool. + Network loss to target pod belonging to a OpenEBS PVC kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-pool-disk-loss + name: openebs-target-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 - name: openebs-pool-disk-loss + name: openebs-target-network-loss spec: definition: scope: Cluster permissions: - apiGroups: - "" + - "extensions" - "apps" - - "litmuschaos.io" - "batch" - - "extensions" + - "litmuschaos.io" - "storage.k8s.io" - - "openebs.io" resources: - - "pods" - "jobs" - - "pods/log" + - "pods" - "events" + - "services" + - "pods/log" - "pods/exec" - - "cstorpools" - "configmaps" - "secrets" + - "persistentvolumeclaims" - "storageclasses" - "persistentvolumes" - - "persistentvolumeclaims" - - "cstorvolumereplicas" + - "chaosengines" - "chaosexperiments" - "chaosresults" - - "chaosengines" verbs: - "create" - - "list" - "get" + - "delete" + - "list" - "patch" - "update" - - "delete" image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-pool-disk-loss/openebs_pool_disk_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-target-network-loss/openebs_target_network_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: - - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' - - - name: TOTAL_CHAOS_DURATION - value: '60' + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' - # Period to wait before injection of chaos in sec - - name: RAMP_TIME - value: '' + - name: OPENEBS_NAMESPACE + value: 'openebs' - # GKE and AWS supported - - name: CLOUD_PLATFORM - value: 'GKE' + - name: APP_PVC + value: '' - - name: PROJECT_ID - value: '' + - name: TC_IMAGE + value: 'gaiadocker/iproute2' - - name: NODE_NAME - value: '' + # only pumba supported + # For pumba image use : gaiaadm/pumba:0.6.5 + - name: LIB_IMAGE + value: 'gaiaadm/pumba:0.6.5' - - name: DISK_NAME - value: '' + - name: NETWORK_PACKET_LOSS_PERCENTAGE + value: '100' # in percentage - - name: ZONE_NAME - value: '' + - name: TOTAL_CHAOS_DURATION + value: '120' # in seconds - - name: DEVICE_NAME - value: '' - - - name: LIVENESS_APP_LABEL - value: '' + - name: LIVENESS_APP_LABEL + value: '' - - name: LIVENESS_APP_NAMESPACE - value: '' + - name: LIVENESS_APP_NAMESPACE + value: '' - - name: DATA_PERSISTENCE - value: '' + - name: DATA_PERSISTENCE + value: '' - - name: OPENEBS_NAMESPACE - value: 'openebs' - labels: - name: openebs-pool-disk-loss + name: openebs-target-network-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 - - configmaps: - - name: openebs-pool-disk-loss - mountPath: /mnt/ - - secrets: - - name: cloud-secret - mountPath: /tmp/ + #configmaps: + #- name: openebs-target-network-loss + # mountPath: /mnt --- --- @@ -612,20 +606,21 @@ spec: #- name: openebs-target-container-failure # mountPath: /mnt +--- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Network delay to target pod belonging to a deployment/statefulset/daemonset + Kill the pool container and check if gets scheduled again kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-target-network-delay + name: openebs-pool-container-failure app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 - name: openebs-target-network-delay + name: openebs-pool-container-failure spec: definition: scope: Cluster @@ -636,19 +631,19 @@ spec: - "apps" - "batch" - "litmuschaos.io" + - "openebs.io" - "storage.k8s.io" resources: + - "replicasets" + - "events" - "jobs" - "pods" - - "services" - - "events" - - "pods/exec" - "pods/log" + - "pods/exec" - "configmaps" - "secrets" - "persistentvolumeclaims" - - "storageclasses" - - "persistentvolumes" + - "cstorvolumereplicas" - "chaosengines" - "chaosexperiments" - "chaosresults" @@ -663,49 +658,51 @@ spec: imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-target-network-delay/openebs_target_network_delay_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-pool-container-failure/openebs_pool_container_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: - name: ANSIBLE_STDOUT_CALLBACK value: 'default' - - name: OPENEBS_NAMESPACE + - name: OPENEBS_NS value: 'openebs' - name: APP_PVC value: '' - - name: TC_IMAGE - value: 'gaiadocker/iproute2' + - name: LIVENESS_APP_LABEL + value: '' # only pumba supported # For pumba image use : gaiaadm/pumba:0.6.5 - - name: LIB_IMAGE + - name: LIB_IMAGE value: 'gaiaadm/pumba:0.6.5' - - name: NETWORK_DELAY - value: '60000' # in milliseconds + - name: LIVENESS_APP_NAMESPACE + value: '' + # provide the chaos interval + - name: CHAOS_INTERVAL + value: '10' + + # provide the total chaos duration - name: TOTAL_CHAOS_DURATION - value: '60' # in seconds - - - name: LIVENESS_APP_LABEL - value: '' + value: '20' - - name: LIVENESS_APP_NAMESPACE + - name: DATA_PERSISTENCE value: '' - - name: DATA_PERSISTENCE - value: '' + - name: CHAOS_ITERATIONS + value: '2' labels: - name: openebs-target-network-delay + name: openebs-pool-container-failure app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 #configmaps: - #- name: openebs-target-network-delay + #- name: openebs-pool-container-failure # mountPath: /mnt --- @@ -713,42 +710,50 @@ spec: apiVersion: litmuschaos.io/v1alpha1 description: message: | - Kill all openebs control plane pod and check if gets scheduled again + Kill the pool pod and check if gets scheduled again kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-control-plane-chaos + name: openebs-pool-pod-failure app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 - name: openebs-control-plane-chaos + name: openebs-pool-pod-failure spec: definition: - scope: Namespaced + scope: Cluster permissions: - apiGroups: - "" - - "litmuschaos.io" - - "batch" + - "extensions" - "apps" + - "batch" + - "litmuschaos.io" + - "openebs.io" + - "storage.k8s.io" resources: - - "pods" - - "pods/log" - "deployments" - - "events" + - "replicasets" - "jobs" + - "pods/log" + - "events" + - "pods" - "configmaps" + - "secrets" + - "storageclasses" + - "persistentvolumeclaims" + - "cstorvolumereplicas" - "chaosengines" - "chaosexperiments" - "chaosresults" verbs: - "create" - - "list" - "get" + - "delete" + - "list" - "patch" - "update" - - "delete" - apiGroups: - "" resources: @@ -756,286 +761,299 @@ spec: verbs: - "get" - "list" - image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-control-plane-chaos/openebs_control_plane_chaos_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-pool-pod-failure/openebs_pool_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' + value: default - - name: OPENEBS_NAMESPACE + - name: OPENEBS_NS value: 'openebs' - ## Period to wait before injection of chaos - - name: RAMP_TIME + - name: APP_PVC value: '' - - - name: FORCE + + - name: LIVENESS_APP_LABEL value: '' - ## env var that describes the library used to execute the chaos - ## default: litmus. Supported values: litmus, powerfulseal - - name: LIB - value: 'litmus' + - name: LIVENESS_APP_NAMESPACE + value: '' + + - name: CHAOS_ITERATIONS + value: '2' + + # provide the kill count + - name: KILL_COUNT + value: '' + + - name: DATA_PERSISTENCE + value: '' labels: - name: openebs-control-plane-chaos + name: openebs-pool-pod-failure app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 + #configmaps: + #- name: openebs-pool-pod-failure + # mountPath: /mnt + +--- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Network loss to target pod belonging to a OpenEBS PVC + OpenEBS Pool Disk Loss contains chaos to disrupt state of infra resources. Experiments can inject disk loss against openEBS pool. kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-target-network-loss + name: openebs-pool-disk-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 - name: openebs-target-network-loss + name: openebs-pool-disk-loss spec: definition: scope: Cluster permissions: - apiGroups: - "" - - "extensions" - "apps" - - "batch" - "litmuschaos.io" + - "batch" + - "extensions" - "storage.k8s.io" + - "openebs.io" resources: - - "jobs" - "pods" - - "events" - - "services" + - "jobs" - "pods/log" + - "events" - "pods/exec" + - "cstorpools" - "configmaps" - "secrets" - - "persistentvolumeclaims" - "storageclasses" - "persistentvolumes" - - "chaosengines" + - "persistentvolumeclaims" + - "cstorvolumereplicas" - "chaosexperiments" - "chaosresults" + - "chaosengines" verbs: - "create" - - "get" - - "delete" - "list" + - "get" - "patch" - "update" + - "delete" image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-target-network-loss/openebs_target_network_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-pool-disk-loss/openebs_pool_disk_loss_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: - - name: ANSIBLE_STDOUT_CALLBACK - value: 'default' + - name: ANSIBLE_STDOUT_CALLBACK + value: 'default' - - name: OPENEBS_NAMESPACE - value: 'openebs' + - name: TOTAL_CHAOS_DURATION + value: '60' - - name: APP_PVC - value: '' + # Period to wait before injection of chaos in sec + - name: RAMP_TIME + value: '' - - name: TC_IMAGE - value: 'gaiadocker/iproute2' + # GKE and AWS supported + - name: CLOUD_PLATFORM + value: 'GKE' - # only pumba supported - # For pumba image use : gaiaadm/pumba:0.6.5 - - name: LIB_IMAGE - value: 'gaiaadm/pumba:0.6.5' + - name: PROJECT_ID + value: '' - - name: NETWORK_PACKET_LOSS_PERCENTAGE - value: '100' # in percentage + - name: NODE_NAME + value: '' - - name: TOTAL_CHAOS_DURATION - value: '120' # in seconds + - name: DISK_NAME + value: '' - - name: LIVENESS_APP_LABEL - value: '' + - name: ZONE_NAME + value: '' - - name: LIVENESS_APP_NAMESPACE - value: '' + - name: DEVICE_NAME + value: '' + + - name: LIVENESS_APP_LABEL + value: '' - - name: DATA_PERSISTENCE - value: '' + - name: LIVENESS_APP_NAMESPACE + value: '' + + - name: DATA_PERSISTENCE + value: '' + - name: OPENEBS_NAMESPACE + value: 'openebs' + labels: - name: openebs-target-network-loss + name: openebs-pool-disk-loss app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 - #configmaps: - #- name: openebs-target-network-loss - # mountPath: /mnt ---- + configmaps: + - name: openebs-pool-disk-loss + mountPath: /mnt/ + + secrets: + - name: cloud-secret + mountPath: /tmp/ + --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Kill the OpenEBS NFS provisioner container and check if pods consuming the NFS PVs continue to be available and volumes are writable (RWM mode) + Network delay to target pod belonging to a deployment/statefulset/daemonset kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-nfs-provisioner-kill + name: openebs-target-network-delay app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 - name: openebs-nfs-provisioner-kill + name: openebs-target-network-delay spec: definition: scope: Cluster permissions: - apiGroups: - "" + - "extensions" - "apps" - - "litmuschaos.io" - "batch" - - "extensions" + - "litmuschaos.io" - "storage.k8s.io" resources: + - "jobs" - "pods" + - "services" + - "events" - "pods/exec" - "pods/log" - - "deployments" - - "events" - - "jobs" - "configmaps" - - "services" + - "secrets" - "persistentvolumeclaims" - "storageclasses" - "persistentvolumes" + - "chaosengines" - "chaosexperiments" - "chaosresults" - - "chaosengines" verbs: - "create" - - "list" - "get" - - "patch" - - "update" - "delete" - - apiGroups: - - "" - resources: - - "nodes" - verbs: - - "get" - "list" - + - "patch" + - "update" image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-nfs-provisioner-kill/openebs_nfs_provisioner_kill_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-target-network-delay/openebs_target_network_delay_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: - name: ANSIBLE_STDOUT_CALLBACK value: 'default' - # NFS default container - - name: TARGET_CONTAINER - value: 'nfs-provisioner' + - name: OPENEBS_NAMESPACE + value: 'openebs' - # Period to wait before injection of chaos in sec - - name: RAMP_TIME + - name: APP_PVC value: '' - # It supports pumba and containerd - - name: LIB - value: 'pumba' + - name: TC_IMAGE + value: 'gaiadocker/iproute2' - # LIB_IMAGE can be - gaiaadm/pumba:0.6.5, gprasath/crictl:ci - # For pumba image use: gaiaadm/pumba:0.6.5 - # For containerd image use: gprasath/crictl:ci - - name: LIB_IMAGE + # only pumba supported + # For pumba image use : gaiaadm/pumba:0.6.5 + - name: LIB_IMAGE value: 'gaiaadm/pumba:0.6.5' - # provide the chaos interval - - name: CHAOS_INTERVAL - value: '10' + - name: NETWORK_DELAY + value: '60000' # in milliseconds - # provide the total chaos duration - name: TOTAL_CHAOS_DURATION - value: '20' + value: '60' # in seconds + + - name: LIVENESS_APP_LABEL + value: '' + + - name: LIVENESS_APP_NAMESPACE + value: '' + + - name: DATA_PERSISTENCE + value: '' labels: - name: openebs-nfs-provisioner-kill + name: openebs-target-network-delay app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 - configmaps: - - name: openebs-nfs-provisioner-kill - mountPath: /mnt/ + #configmaps: + #- name: openebs-target-network-delay + # mountPath: /mnt --- --- apiVersion: litmuschaos.io/v1alpha1 description: message: | - Kill the cstor target/Jiva controller pod and check if gets created again + Kill all openebs control plane pod and check if gets scheduled again kind: ChaosExperiment metadata: labels: litmuschaos.io/name: openebs - name: openebs-target-pod-failure + name: openebs-control-plane-chaos app.kubernetes.io/part-of: litmus app.kubernetes.io/component: chaosexperiment app.kubernetes.io/version: 1.13.8 - name: openebs-target-pod-failure + name: openebs-control-plane-chaos spec: definition: - scope: Cluster + scope: Namespaced permissions: - apiGroups: - "" - - "extensions" - - "apps" - - "batch" - "litmuschaos.io" - - "storage.k8s.io" + - "batch" + - "apps" resources: - - "deployments" - - "jobs" - - "events" - "pods" - "pods/log" - - "pods/exec" + - "deployments" + - "events" + - "jobs" - "configmaps" - - "secrets" - - "services" - "chaosengines" - "chaosexperiments" - "chaosresults" - - "persistentvolumeclaims" - - "storageclasses" - - "persistentvolumes" verbs: - "create" - - "get" - - "delete" - "list" + - "get" - "patch" - "update" + - "delete" - apiGroups: - "" resources: @@ -1043,11 +1061,12 @@ spec: verbs: - "get" - "list" + image: "litmuschaos/ansible-runner:1.13.8" imagePullPolicy: Always args: - -c - - ansible-playbook ./experiments/openebs/openebs-target-pod-failure/openebs_target_pod_failure_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 + - ansible-playbook ./experiments/openebs/openebs-control-plane-chaos/openebs_control_plane_chaos_ansible_logic.yml -i /etc/ansible/hosts -vv; exit 0 command: - /bin/bash env: @@ -1056,42 +1075,23 @@ spec: - name: OPENEBS_NAMESPACE value: 'openebs' - - - name: APP_PVC - value: '' - - - name: FORCE - value: 'true' - - - name: LIVENESS_APP_LABEL - value: '' - - name: LIVENESS_APP_NAMESPACE - value: '' - - - name: DATA_PERSISTENCE + ## Period to wait before injection of chaos + - name: RAMP_TIME value: '' - - - name: TOTAL_CHAOS_DURATION - value: '60' - - # provide the kill count - - name: KILL_COUNT + + - name: FORCE value: '' - - name: CHAOS_INTERVAL - value: '15' - - - name: DEPLOY_TYPE - value: 'deployment' + ## env var that describes the library used to execute the chaos + ## default: litmus. Supported values: litmus, powerfulseal + - name: LIB + value: 'litmus' labels: - name: openebs-target-pod-failure + name: openebs-control-plane-chaos app.kubernetes.io/part-of: litmus app.kubernetes.io/component: experiment-job app.kubernetes.io/version: 1.13.8 - #configmaps: - #- name: openebs-target-pod-failure - # mountPath: /mnt ---