diff --git a/.pipelines/cni/load-test-templates/restart-hns-template.yaml b/.pipelines/cni/load-test-templates/restart-hns-template.yaml new file mode 100644 index 0000000000..f688c10aae --- /dev/null +++ b/.pipelines/cni/load-test-templates/restart-hns-template.yaml @@ -0,0 +1,29 @@ +parameters: + clusterName: "" + cni: "cniv1" + +steps: + - task: AzureCLI@1 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + kubectl get pod -owide -A + + echo "Validate pod IP assignment before HNS restart" + make test-validate-state CNI_TYPE=${{ parameters.cni }} OS_TYPE=windows + + podList=`kubectl get pods -n kube-system -l os=windows,app=privileged-daemonset -owide --no-headers | awk '{print $1}'` + echo "Restart HNS service on each node" + for pod in $podList; do + kubectl exec -i -n kube-system $pod -- powershell Restart-Service hns + done + + echo "Validate pod IP assignment after HNS restart" + make test-validate-state CNI_TYPE=${{ parameters.cni }} OS_TYPE=windows + name: "restartHNS" + displayName: "Restart HNS and Validate pods" + retryCountOnTaskFailure: 3 diff --git a/.pipelines/cni/pipeline.yaml b/.pipelines/cni/pipeline.yaml index 4cbc86aae9..69fe6db574 100644 --- a/.pipelines/cni/pipeline.yaml +++ b/.pipelines/cni/pipeline.yaml @@ -204,7 +204,7 @@ stages: condition: always() dependsOn: - cilium_overlay - - win22_cniv1 + - win22_cniv1_HNS - linux_cniv1 - linux_podsubnet - linux_overlay @@ -224,9 +224,9 @@ stages: cilium_overlay: name: cilium_overlay clusterName: cilium-overlay - win_cniv1: - name: win_cniv1 - clusterName: win-cniv1 + win22-cniv1: + name: win22-cniv1 + clusterName: win22-cniv1 linux_cniv1: name: linux_cniv1 clusterName: linux-cniv1 diff --git a/.pipelines/cni/singletenancy/cniv1-template.yaml b/.pipelines/cni/singletenancy/cniv1-template.yaml index af3c003d22..5f7d1c54e2 100644 --- a/.pipelines/cni/singletenancy/cniv1-template.yaml +++ b/.pipelines/cni/singletenancy/cniv1-template.yaml @@ -19,8 +19,8 @@ parameters: # + The azure-vnet state should be validated with that of CNI state. # + Pods should have ips assigned and connectivity/datapath test should be present. # Windows -# - The HNS state should be validated with that of CNI state. - +# + The HNS state should be validated with that of CNI state. +# + All CNI E2E is re-ran after HNS service is restarted stages: - stage: create_${{ parameters.name }} @@ -157,3 +157,86 @@ stages: service: true hostport: true + - ${{ if eq(parameters.os, 'windows') }}: + - stage: ${{ parameters.name }}_HNS + variables: + commitID: $[ stagedependencies.setup.env.outputs['SetEnvVars.commitID'] ] + npmVersion: $[ stagedependencies.setup.env.outputs['SetEnvVars.npmVersion'] ] + dropgzVersion: $[ stagedependencies.setup.env.outputs['SetEnvVars.dropgzVersion'] ] + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + dependsOn: + - ${{ parameters.name }} + - setup + displayName: "HNS Test - ${{ parameters.clusterName }}" + jobs: + - job: restart_hns + displayName: "Restart and Validate HNS" + condition: and( succeeded(), ${{ eq(parameters.os, 'windows') }} ) + steps: + - template: ../load-test-templates/restart-hns-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + cni: cniv1 + - job: deploy_pods + displayName: "Scale Test" + dependsOn: restart_hns + steps: + - template: ../load-test-templates/pod-deployment-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + scaleup: ${{ parameters.scaleup }} + os: ${{ parameters.os }} + iterations: ${{ parameters.iterations }} + nodeCount: ${{ parameters.nodeCount }} + - template: ../load-test-templates/validate-state-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cniv1 + - job: restart_nodes + displayName: "Restart Test" + dependsOn: deploy_pods + steps: + - template: ../load-test-templates/restart-node-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + nodeCount: ${{ parameters.nodeCount }} + scaleup: ${{ parameters.scaleup }} + - template: ../load-test-templates/validate-state-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + cni: cniv1 + restartCase: "true" + - job: recover + displayName: "Recover Resources" + dependsOn: restart_nodes + steps: + - task: AzureCLI@1 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + echo "Delete load-test Namespace" + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(commitID) + kubectl delete ns load-test + kubectl cluster-info + kubectl get po -owide -A + name: "recover" + displayName: "Delete test Namespaces" + - template: ../k8s-e2e/k8s-e2e-job-template.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + dependsOn: recover + datapath: true + dns: true + portforward: true + hybridWin: true + service: true + hostport: true diff --git a/test/integration/manifests/load/privileged-daemonset-windows.yaml b/test/integration/manifests/load/privileged-daemonset-windows.yaml index c9a7839013..1001f0721f 100644 --- a/test/integration/manifests/load/privileged-daemonset-windows.yaml +++ b/test/integration/manifests/load/privileged-daemonset-windows.yaml @@ -5,14 +5,17 @@ metadata: namespace: kube-system labels: app: privileged-daemonset + os: windows spec: selector: matchLabels: app: privileged-daemonset + os: windows template: metadata: labels: app: privileged-daemonset + os: windows spec: nodeSelector: kubernetes.io/os: windows