From 3a62dc5ac09ac31d06c91184a13b1472270f6001 Mon Sep 17 00:00:00 2001 From: Shufang Date: Tue, 5 Mar 2024 00:16:34 -0800 Subject: [PATCH 01/49] tmp commit to test whether the cluster can be set up. --- .../swiftv2-e2e-dummy-job-template.yaml | 0 .../swiftv2-e2e-dummy-step-template.yaml | 140 ++++++++++++++++++ .../swiftv2-e2e-job-template.yaml | 97 ++++++++++++ .../swiftv2-e2e-step-template.yaml | 140 ++++++++++++++++++ .pipelines/pipeline.yaml | 23 +++ hack/aks/Makefile | 21 +++ 6 files changed, 421 insertions(+) create mode 100644 .pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml create mode 100644 .pipelines/multitenancy/swiftv2-e2e-dummy-step-template.yaml create mode 100644 .pipelines/multitenancy/swiftv2-e2e-job-template.yaml create mode 100644 .pipelines/multitenancy/swiftv2-e2e-step-template.yaml diff --git a/.pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/.pipelines/multitenancy/swiftv2-e2e-dummy-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-dummy-step-template.yaml new file mode 100644 index 0000000000..3a519f83ce --- /dev/null +++ b/.pipelines/multitenancy/swiftv2-e2e-dummy-step-template.yaml @@ -0,0 +1,140 @@ +parameters: + name: "" + clusterName: "" + cni: "dualstack" + os: "" + +steps: + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + name: "kubeconfig" + displayName: "Set Kubeconfig" + + - ${{ if eq(parameters.os, 'linux') }}: + - script: | + kubectl cluster-info + kubectl get node + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true VALIDATE_DUALSTACK=true CNI_VERSION=$(make cni-version) CNS_VERSION=$(make cns-version) CLEANUP=true + retryCountOnTaskFailure: 3 + name: "integrationTest" + displayName: "Run CNS Integration Tests on AKS DualStack Overlay" + + - script: | + set -e + kubectl get po -owide -A + cd test/integration/datapath + echo "Dualstack Overlay Linux datapath IPv6 test" + go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true + echo "Dualstack Overlay Linux datapath IPv4 test" + go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration + retryCountOnTaskFailure: 3 + name: "DualStack_Overlay_Linux_Tests" + displayName: "DualStack Overlay Linux Tests" + + - task: AzureCLI@1 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@1 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + cd test/integration/load + clusterName=${{ parameters.clusterName }} + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${clusterName} + make -C ./hack/aks azcfg AZCLI=az REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) + kubectl get pods -A + echo "Validating Node Restart" + CNI_TYPE=dualstack RESTART_CASE=true go test -timeout 30m -tags load -run ^TestValidateState$ + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - script: | + echo "validate pod IP assignment before CNS restart" + kubectl get pod -owide -A + echo "validate pod state before CNS restarts" + cd test/integration/load + CNI_TYPE=dualstack go test -timeout 30m -tags load -run ^TestValidateState$ + kubectl rollout restart ds azure-cns -n kube-system + kubectl rollout status ds azure-cns -n kube-system + kubectl get pod -owide -A + echo "validate pods after CNS restarts" + CNI_TYPE=dualstack go test -timeout 30m -tags load -run ^TestValidateState$ + name: "restartCNS_ValidatePodState" + displayName: "Restart CNS and Validate Pod State" + retryCountOnTaskFailure: 3 + + - ${{ if eq(parameters.os, 'windows') }}: + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks windows-nodepool-up AZCLI=az SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} + echo "Windows nodes have been successfully added to DualStack Overlay Cluster" + kubectl cluster-info + kubectl get node -owide + kubectl get po -owide -A + name: "Add_Windows_Node" + displayName: "Add windows node" + + - script: | + nodeList=`kubectl get node -owide | grep Windows | awk '{print $1}'` + for node in $nodeList; do + taint=`kubectl describe node $node | grep Taints | awk '{print $2}'` + if [ $taint == "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule" ]; then + kubectl taint nodes $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule- + fi + done + sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=windows CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true VALIDATE_DUALSTACK=true CNI_VERSION=$(make cni-version) CNS_VERSION=$(make cns-version) CLEANUP=true + name: "WindowsDualStackOverlayControlPlaneScaleTests" + displayName: "Windows DualStack Overlay ControlPlane Scale Tests" + retryCountOnTaskFailure: 3 + + - script: | + echo "DualStack Overlay DataPath Test" + cd test/integration/datapath + sudo -E env "PATH=$PATH" go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -restartKubeproxy true -run ^TestDatapathWin$ + name: "WindowsDualStackOverlayDatapathTests" + displayName: "Windows DualStack Overlay Datapath Tests" + retryCountOnTaskFailure: 3 diff --git a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml new file mode 100644 index 0000000000..02967f1a5f --- /dev/null +++ b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml @@ -0,0 +1,97 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + nodePoolName: "" + dummyClusterName: "" + dummyClusterType: "" + dummyClusterDisplayName: "" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Swiftv2 has a specific region requirement and its the same with dualstack reagion + + - stage: ${{ parameters.dummyClusterName }} + displayName: Create Cluster - ${{ parameters.dummyClusterDisplayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.dummyClusterType }} + clusterName: ${{ parameters.dummyClusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Swiftv2 has a specific region requirement and its the same with dualstack reagion + + - stage: ${{ parameters.name }} + condition: and( succeeded(), not(eq(dependencies.dualstackoverlaye2e.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - publish + - ${{ parameters.clusterName }} + variables: + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + jobs: + - job: ${{ parameters.name }}_linux + displayName: Swiftv2 Multitenancy E2E Test Suite | Linux - (${{ parameters.name }}) + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - template: swiftv2-e2e-step-template.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: linux + + - job: failedE2ELogs_linux + displayName: "Linux Failure Logs" + dependsOn: + - ${{ parameters.name }}_linux + - cni_linux + condition: in(dependencies.${{ parameters.name }}_linux.result, 'Failed') + steps: + - template: ../../templates/log-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: linux + cni: cniv2 diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml new file mode 100644 index 0000000000..3a519f83ce --- /dev/null +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -0,0 +1,140 @@ +parameters: + name: "" + clusterName: "" + cni: "dualstack" + os: "" + +steps: + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + + - task: KubectlInstaller@0 + inputs: + kubectlVersion: latest + + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} + name: "kubeconfig" + displayName: "Set Kubeconfig" + + - ${{ if eq(parameters.os, 'linux') }}: + - script: | + kubectl cluster-info + kubectl get node + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true VALIDATE_DUALSTACK=true CNI_VERSION=$(make cni-version) CNS_VERSION=$(make cns-version) CLEANUP=true + retryCountOnTaskFailure: 3 + name: "integrationTest" + displayName: "Run CNS Integration Tests on AKS DualStack Overlay" + + - script: | + set -e + kubectl get po -owide -A + cd test/integration/datapath + echo "Dualstack Overlay Linux datapath IPv6 test" + go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true + echo "Dualstack Overlay Linux datapath IPv4 test" + go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration + retryCountOnTaskFailure: 3 + name: "DualStack_Overlay_Linux_Tests" + displayName: "DualStack Overlay Linux Tests" + + - task: AzureCLI@1 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + clusterName=${{ parameters.clusterName }} + echo "Restarting nodes" + for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) --query "[].name" -o tsv); do + make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) VMSS_NAME=${val} + done + displayName: "Restart Nodes" + + - task: AzureCLI@1 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + cd test/integration/load + clusterName=${{ parameters.clusterName }} + make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${clusterName} + make -C ./hack/aks azcfg AZCLI=az REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) + kubectl get pods -A + echo "Validating Node Restart" + CNI_TYPE=dualstack RESTART_CASE=true go test -timeout 30m -tags load -run ^TestValidateState$ + displayName: "Validate Node Restart" + retryCountOnTaskFailure: 3 + + - script: | + echo "validate pod IP assignment before CNS restart" + kubectl get pod -owide -A + echo "validate pod state before CNS restarts" + cd test/integration/load + CNI_TYPE=dualstack go test -timeout 30m -tags load -run ^TestValidateState$ + kubectl rollout restart ds azure-cns -n kube-system + kubectl rollout status ds azure-cns -n kube-system + kubectl get pod -owide -A + echo "validate pods after CNS restarts" + CNI_TYPE=dualstack go test -timeout 30m -tags load -run ^TestValidateState$ + name: "restartCNS_ValidatePodState" + displayName: "Restart CNS and Validate Pod State" + retryCountOnTaskFailure: 3 + + - ${{ if eq(parameters.os, 'windows') }}: + - task: AzureCLI@2 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + make -C ./hack/aks windows-nodepool-up AZCLI=az SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} + echo "Windows nodes have been successfully added to DualStack Overlay Cluster" + kubectl cluster-info + kubectl get node -owide + kubectl get po -owide -A + name: "Add_Windows_Node" + displayName: "Add windows node" + + - script: | + nodeList=`kubectl get node -owide | grep Windows | awk '{print $1}'` + for node in $nodeList; do + taint=`kubectl describe node $node | grep Taints | awk '{print $2}'` + if [ $taint == "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule" ]; then + kubectl taint nodes $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule- + fi + done + sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=windows CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true VALIDATE_DUALSTACK=true CNI_VERSION=$(make cni-version) CNS_VERSION=$(make cns-version) CLEANUP=true + name: "WindowsDualStackOverlayControlPlaneScaleTests" + displayName: "Windows DualStack Overlay ControlPlane Scale Tests" + retryCountOnTaskFailure: 3 + + - script: | + echo "DualStack Overlay DataPath Test" + cd test/integration/datapath + sudo -E env "PATH=$PATH" go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -restartKubeproxy true -run ^TestDatapathWin$ + name: "WindowsDualStackOverlayDatapathTests" + displayName: "Windows DualStack Overlay Datapath Tests" + retryCountOnTaskFailure: 3 diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index 12b74394ae..853925e55e 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -504,6 +504,29 @@ stages: vmSize: Standard_B2ms dependsOn: "test" + # Swiftv2 E2E tests with multitenancy cluster start up + - template: singletenancy/multitenancy/swiftv2-e2e-job-template.yaml + parameters: + name: "swiftv2_e2e" + displayName: Swiftv2 Multitenancy + os: linux + clusterType: swiftv2-multitenancy-cluster-up + clusterName: "mtcluster" + nodePoolName: "mtapool" + vmSize: Standard_D4_v2 + dependsOn: "test" + + # Swiftv2 E2E tests with dummy cluster start up + - template: singletenancy/multitenancy/swiftv2-e2e-job-template.yaml + parameters: + name: "swiftv2_e2e_dummy" + displayName: Swiftv2 Multitenancy Dummy Cluster + os: linux + clusterType: swiftv2-dummy-cluster-up + clusterName: "mtcluster" + vmSize: Standard_D4_v2 + dependsOn: "test" + - stage: delete displayName: Delete Clusters condition: always() diff --git a/hack/aks/Makefile b/hack/aks/Makefile index 0eef49717b..789591a147 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -233,6 +233,27 @@ swift-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster --yes @$(MAKE) set-kubeconf +swiftv2-multitenancy-cluster-up: rg-up + $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ + --network-plugin azure \ + --network-plugin-mode overlay \ + --kubernetes-version 1.28 \ + --nodepool-name "mtapool0" \ + --nodepool-tags fastpathenabled=true + --no-ssh-key \ + --yes + @$(MAKE) set-kubeconf +--network-plugin azure --network-plugin-mode overlay --location centraluseuap --kubernetes-version 1.28 --nodepool-name "mtapool0" --nodepool-tags fastpathenabled=true + +swiftv2-dummy-cluster-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster + $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ + --network-plugin azure \ + --vnet-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/nodenet \ + --pod-subnet-id /subscriptions/$(SUB)/resourceGroups/$(GROUP)/providers/Microsoft.Network/virtualNetworks/$(VNET)/subnets/podnet \ + --no-ssh-key \ + --yes + @$(MAKE) set-kubeconf + # The below Vnet Scale clusters are currently only in private preview and available with Kubernetes 1.28 # These AKS clusters can only be created in a limited subscription listed here: # https://dev.azure.com/msazure/CloudNativeCompute/_git/aks-rp?path=/resourceprovider/server/microsoft.com/containerservice/flags/network_flags.go&version=GBmaster&line=134&lineEnd=135&lineStartColumn=1&lineEndColumn=1&lineStyle=plain&_a=contents From 1f6ccc4fff5fb7c3156b22b78e922d503158b3a5 Mon Sep 17 00:00:00 2001 From: Shufang Date: Tue, 5 Mar 2024 14:47:32 -0800 Subject: [PATCH 02/49] Resolved merge conflicts. Update the route. --- .../swiftv2-e2e-dummy-job-template.yaml | 126 ++++++++++++++++++ .../swiftv2-e2e-job-template.yaml | 4 +- .pipelines/pipeline.yaml | 59 ++++++-- hack/aks/Makefile | 1 - 4 files changed, 177 insertions(+), 13 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml index e69de29bb2..0093dd8e48 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml @@ -0,0 +1,126 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" + vmSize: "" + k8sVersion: "" + dependsOn: "" + +stages: + - stage: ${{ parameters.clusterName }} + displayName: Create Cluster - ${{ parameters.displayName }} + dependsOn: + - ${{ parameters.dependsOn }} + - setup + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + variables: + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + jobs: + - template: ../../templates/create-cluster.yaml + parameters: + name: ${{ parameters.name }} + displayName: ${{ parameters.displayName }} + clusterType: ${{ parameters.clusterType }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + vmSize: ${{ parameters.vmSize }} + k8sVersion: ${{ parameters.k8sVersion }} + dependsOn: ${{ parameters.dependsOn }} + region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Dualstack has a specific region requirement + + - stage: ${{ parameters.name }} + condition: and( succeeded(), not(eq(dependencies.dualstackoverlaye2e.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - publish + - ${{ parameters.clusterName }} + variables: + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + jobs: + - job: ${{ parameters.name }}_linux + displayName: DualStack Overlay Test Suite | Linux - (${{ parameters.name }}) + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - template: dualstackoverlay-e2e-step-template.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: linux + + - job: ${{ parameters.name }}_windows + displayName: DualStack Overlay Test Suite | Windows - (${{ parameters.name }}) + dependsOn: ${{ parameters.name }}_linux + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - agent.os -equals Linux + - Role -equals $(CUSTOM_E2E_ROLE) + steps: + - template: dualstackoverlay-e2e-step-template.yaml + parameters: + name: ${{ parameters.name }} + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: windows + vmSizeWin: ${{ parameters.vmSize }} # Matching linux vmSize + + - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: linux + dependsOn: ${{ parameters.name }}_windows + dualstack: true + dns: true + portforward: true + hostport: true + service: true + + - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: windows + dependsOn: cni_linux + dualstack: true + dns: true + portforward: true + service: true + hostport: true + hybridWin: true + + - job: failedE2ELogs_linux + displayName: "Linux Failure Logs" + dependsOn: + - ${{ parameters.name }}_linux + - cni_linux + condition: in(dependencies.${{ parameters.name }}_linux.result, 'Failed') + steps: + - template: ../../templates/log-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: linux + cni: cniv2 + + - job: failedE2ELogs_windows + displayName: "Windows Failure Logs" + dependsOn: + - ${{ parameters.name }}_windows + - cni_windows + condition: in(dependencies.${{ parameters.name }}_windows.result, 'Failed') + steps: + - template: ../../templates/log-template.yaml + parameters: + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: windows + cni: cniv2 diff --git a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml index 02967f1a5f..f724a713d0 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml @@ -22,7 +22,7 @@ stages: variables: commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] jobs: - - template: ../../templates/create-cluster.yaml + - template: ../templates/create-cluster.yaml parameters: name: ${{ parameters.name }} displayName: ${{ parameters.displayName }} @@ -43,7 +43,7 @@ stages: variables: commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] jobs: - - template: ../../templates/create-cluster.yaml + - template: ../templates/create-cluster.yaml parameters: name: ${{ parameters.name }} displayName: ${{ parameters.displayName }} diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index 853925e55e..e54ab5151a 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -505,7 +505,7 @@ stages: dependsOn: "test" # Swiftv2 E2E tests with multitenancy cluster start up - - template: singletenancy/multitenancy/swiftv2-e2e-job-template.yaml + - template: multitenancy/swiftv2-e2e-job-template.yaml parameters: name: "swiftv2_e2e" displayName: Swiftv2 Multitenancy @@ -515,17 +515,20 @@ stages: nodePoolName: "mtapool" vmSize: Standard_D4_v2 dependsOn: "test" + dummyClusterName: "swiftv2dummy" + dummyClusterType: "swiftv2-dummy-cluster-up" + dummyClusterDisplayName: Swiftv2 Multitenancy Dummy Cluster # Swiftv2 E2E tests with dummy cluster start up - - template: singletenancy/multitenancy/swiftv2-e2e-job-template.yaml - parameters: - name: "swiftv2_e2e_dummy" - displayName: Swiftv2 Multitenancy Dummy Cluster - os: linux - clusterType: swiftv2-dummy-cluster-up - clusterName: "mtcluster" - vmSize: Standard_D4_v2 - dependsOn: "test" + # - template: multitenancy/swiftv2-e2e-job-template.yaml + # parameters: + # name: "swiftv2_e2e_dummy" + # displayName: Swiftv2 Multitenancy Dummy Cluster + # os: linux + # clusterType: swiftv2-dummy-cluster-up + # clusterName: "mtcluster" + # vmSize: Standard_D4_v2 + # dependsOn: "test" - stage: delete displayName: Delete Clusters @@ -541,6 +544,7 @@ stages: - aks_windows_22_e2e - dualstackoverlay_e2e - cilium_dualstackoverlay_e2e + - swiftv2_e2e variables: commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] jobs: @@ -589,6 +593,9 @@ stages: cilium_dualstackoverlay_e2e: name: cilium_dualstackoverlay_e2e clusterName: "cildsovere2e" + aswiftv2_e2e: + name: swiftv2_e2e + clusterName: "mtcluster" steps: - template: templates/delete-cluster.yaml parameters: @@ -615,3 +622,35 @@ stages: echo $TAG echo $CURRENT_VERSION echo "Checking if branch is up to date with master" + + - stage: cleanup + displayName: Cleanup + dependsOn: + - azure_overlay_e2e + - aks_swift_e2e + - cilium_e2e + - cilium_overlay_e2e + - cilium_h_overlay_e2e + - aks_ubuntu_22_linux_e2e + - aks_windows_22_e2e + - dualstackoverlay_e2e + - cilium_dualstackoverlay_e2e + - swiftv2_e2e + jobs: + - job: delete_remote_artifacts + displayName: Delete remote artifacts + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: agent.os -equals Linux + steps: + - checkout: none + - task: AzureCLI@1 + inputs: + azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + inlineScript: | + BUILD_NUMBER=$(Build.BuildNumber) + BUILD_NUMBER=${BUILD_NUMBER//./-} + echo Deleting storage container with name acn-$BUILD_NUMBER and account name $(STORAGE_ACCOUNT_NAME) + az storage container delete -n acn-$BUILD_NUMBER --account-name $(STORAGE_ACCOUNT_NAME) + displayName: Cleanup remote Azure storage container diff --git a/hack/aks/Makefile b/hack/aks/Makefile index 789591a147..1929120433 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -243,7 +243,6 @@ swiftv2-multitenancy-cluster-up: rg-up --no-ssh-key \ --yes @$(MAKE) set-kubeconf ---network-plugin azure --network-plugin-mode overlay --location centraluseuap --kubernetes-version 1.28 --nodepool-name "mtapool0" --nodepool-tags fastpathenabled=true swiftv2-dummy-cluster-up: rg-up swift-net-up ## Bring up a SWIFT AzCNI cluster $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ From 5d5dd84809e42166e111f3bc7df39dc98ebe9748 Mon Sep 17 00:00:00 2001 From: Shufang Date: Tue, 5 Mar 2024 15:08:12 -0800 Subject: [PATCH 03/49] Fix the route. --- .pipelines/multitenancy/swiftv2-e2e-job-template.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml index f724a713d0..664e5a9128 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml @@ -90,7 +90,7 @@ stages: - cni_linux condition: in(dependencies.${{ parameters.name }}_linux.result, 'Failed') steps: - - template: ../../templates/log-template.yaml + - template: ../templates/log-template.yaml parameters: clusterName: ${{ parameters.clusterName }}-$(commitID) os: linux From 364b1ee888c1f0395659b98873d34e0f3a67af72 Mon Sep 17 00:00:00 2001 From: Shufang Date: Tue, 5 Mar 2024 15:11:55 -0800 Subject: [PATCH 04/49] Remove unnecessary dependency. --- .pipelines/multitenancy/swiftv2-e2e-job-template.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml index 664e5a9128..847e7c12f3 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml @@ -87,7 +87,6 @@ stages: displayName: "Linux Failure Logs" dependsOn: - ${{ parameters.name }}_linux - - cni_linux condition: in(dependencies.${{ parameters.name }}_linux.result, 'Failed') steps: - template: ../templates/log-template.yaml From 68fed0a076f3723e997c4fab866e6c6610a5c16f Mon Sep 17 00:00:00 2001 From: Shufang Date: Tue, 5 Mar 2024 16:36:20 -0800 Subject: [PATCH 05/49] Modify the cluster creation. --- hack/aks/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/aks/Makefile b/hack/aks/Makefile index 1929120433..d03a4ee99e 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -239,7 +239,7 @@ swiftv2-multitenancy-cluster-up: rg-up --network-plugin-mode overlay \ --kubernetes-version 1.28 \ --nodepool-name "mtapool0" \ - --nodepool-tags fastpathenabled=true + --nodepool-tags fastpathenabled=true \ --no-ssh-key \ --yes @$(MAKE) set-kubeconf From aedd344a99759c7f4629eb0c0663b62ebc204c7a Mon Sep 17 00:00:00 2001 From: Shufang Date: Mon, 18 Mar 2024 00:10:34 -0700 Subject: [PATCH 06/49] Swift v2 e2e test structure. --- .../swiftv2-e2e-dummy-job-template.yaml | 126 ---------------- .../swiftv2-e2e-dummy-step-template.yaml | 140 ------------------ .../swiftv2-e2e-job-template.yaml | 46 +++--- .../swiftv2-e2e-step-template.yaml | 129 +++------------- .pipelines/pipeline.yaml | 5 +- test/integration/manifests/swiftv2/mtpod.yaml | 20 +++ test/integration/manifests/swiftv2/pni.yaml | 7 + .../manifests/swiftv2/podnetwork.yaml | 10 ++ 8 files changed, 85 insertions(+), 398 deletions(-) delete mode 100644 .pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml delete mode 100644 .pipelines/multitenancy/swiftv2-e2e-dummy-step-template.yaml create mode 100644 test/integration/manifests/swiftv2/mtpod.yaml create mode 100644 test/integration/manifests/swiftv2/pni.yaml create mode 100644 test/integration/manifests/swiftv2/podnetwork.yaml diff --git a/.pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml deleted file mode 100644 index 0093dd8e48..0000000000 --- a/.pipelines/multitenancy/swiftv2-e2e-dummy-job-template.yaml +++ /dev/null @@ -1,126 +0,0 @@ -parameters: - name: "" - displayName: "" - clusterType: "" - clusterName: "" - vmSize: "" - k8sVersion: "" - dependsOn: "" - -stages: - - stage: ${{ parameters.clusterName }} - displayName: Create Cluster - ${{ parameters.displayName }} - dependsOn: - - ${{ parameters.dependsOn }} - - setup - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - variables: - commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] - jobs: - - template: ../../templates/create-cluster.yaml - parameters: - name: ${{ parameters.name }} - displayName: ${{ parameters.displayName }} - clusterType: ${{ parameters.clusterType }} - clusterName: ${{ parameters.clusterName }}-$(commitID) - vmSize: ${{ parameters.vmSize }} - k8sVersion: ${{ parameters.k8sVersion }} - dependsOn: ${{ parameters.dependsOn }} - region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Dualstack has a specific region requirement - - - stage: ${{ parameters.name }} - condition: and( succeeded(), not(eq(dependencies.dualstackoverlaye2e.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies - displayName: E2E - ${{ parameters.displayName }} - dependsOn: - - setup - - publish - - ${{ parameters.clusterName }} - variables: - GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path - GOBIN: "$(GOPATH)/bin" # Go binaries path - modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" - commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - jobs: - - job: ${{ parameters.name }}_linux - displayName: DualStack Overlay Test Suite | Linux - (${{ parameters.name }}) - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - demands: - - agent.os -equals Linux - - Role -equals $(CUSTOM_E2E_ROLE) - steps: - - template: dualstackoverlay-e2e-step-template.yaml - parameters: - name: ${{ parameters.name }} - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: linux - - - job: ${{ parameters.name }}_windows - displayName: DualStack Overlay Test Suite | Windows - (${{ parameters.name }}) - dependsOn: ${{ parameters.name }}_linux - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - demands: - - agent.os -equals Linux - - Role -equals $(CUSTOM_E2E_ROLE) - steps: - - template: dualstackoverlay-e2e-step-template.yaml - parameters: - name: ${{ parameters.name }} - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: windows - vmSizeWin: ${{ parameters.vmSize }} # Matching linux vmSize - - - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml - parameters: - sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: linux - dependsOn: ${{ parameters.name }}_windows - dualstack: true - dns: true - portforward: true - hostport: true - service: true - - - template: ../../cni/k8s-e2e/k8s-e2e-job-template.yaml - parameters: - sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: windows - dependsOn: cni_linux - dualstack: true - dns: true - portforward: true - service: true - hostport: true - hybridWin: true - - - job: failedE2ELogs_linux - displayName: "Linux Failure Logs" - dependsOn: - - ${{ parameters.name }}_linux - - cni_linux - condition: in(dependencies.${{ parameters.name }}_linux.result, 'Failed') - steps: - - template: ../../templates/log-template.yaml - parameters: - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: linux - cni: cniv2 - - - job: failedE2ELogs_windows - displayName: "Windows Failure Logs" - dependsOn: - - ${{ parameters.name }}_windows - - cni_windows - condition: in(dependencies.${{ parameters.name }}_windows.result, 'Failed') - steps: - - template: ../../templates/log-template.yaml - parameters: - clusterName: ${{ parameters.clusterName }}-$(commitID) - os: windows - cni: cniv2 diff --git a/.pipelines/multitenancy/swiftv2-e2e-dummy-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-dummy-step-template.yaml deleted file mode 100644 index 3a519f83ce..0000000000 --- a/.pipelines/multitenancy/swiftv2-e2e-dummy-step-template.yaml +++ /dev/null @@ -1,140 +0,0 @@ -parameters: - name: "" - clusterName: "" - cni: "dualstack" - os: "" - -steps: - - bash: | - go version - go env - mkdir -p '$(GOBIN)' - mkdir -p '$(GOPATH)/pkg' - mkdir -p '$(modulePath)' - echo '##vso[task.prependpath]$(GOBIN)' - echo '##vso[task.prependpath]$(GOROOT)/bin' - name: "GoEnv" - displayName: "Set up the Go environment" - - - task: KubectlInstaller@0 - inputs: - kubectlVersion: latest - - - task: AzureCLI@2 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - set -e - make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} - name: "kubeconfig" - displayName: "Set Kubeconfig" - - - ${{ if eq(parameters.os, 'linux') }}: - - script: | - kubectl cluster-info - kubectl get node - kubectl get po -owide -A - sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true VALIDATE_DUALSTACK=true CNI_VERSION=$(make cni-version) CNS_VERSION=$(make cns-version) CLEANUP=true - retryCountOnTaskFailure: 3 - name: "integrationTest" - displayName: "Run CNS Integration Tests on AKS DualStack Overlay" - - - script: | - set -e - kubectl get po -owide -A - cd test/integration/datapath - echo "Dualstack Overlay Linux datapath IPv6 test" - go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true - echo "Dualstack Overlay Linux datapath IPv4 test" - go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration - retryCountOnTaskFailure: 3 - name: "DualStack_Overlay_Linux_Tests" - displayName: "DualStack Overlay Linux Tests" - - - task: AzureCLI@1 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - set -e - clusterName=${{ parameters.clusterName }} - echo "Restarting nodes" - for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) --query "[].name" -o tsv); do - make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) VMSS_NAME=${val} - done - displayName: "Restart Nodes" - - - task: AzureCLI@1 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - cd test/integration/load - clusterName=${{ parameters.clusterName }} - make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${clusterName} - make -C ./hack/aks azcfg AZCLI=az REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) - kubectl get pods -A - echo "Validating Node Restart" - CNI_TYPE=dualstack RESTART_CASE=true go test -timeout 30m -tags load -run ^TestValidateState$ - displayName: "Validate Node Restart" - retryCountOnTaskFailure: 3 - - - script: | - echo "validate pod IP assignment before CNS restart" - kubectl get pod -owide -A - echo "validate pod state before CNS restarts" - cd test/integration/load - CNI_TYPE=dualstack go test -timeout 30m -tags load -run ^TestValidateState$ - kubectl rollout restart ds azure-cns -n kube-system - kubectl rollout status ds azure-cns -n kube-system - kubectl get pod -owide -A - echo "validate pods after CNS restarts" - CNI_TYPE=dualstack go test -timeout 30m -tags load -run ^TestValidateState$ - name: "restartCNS_ValidatePodState" - displayName: "Restart CNS and Validate Pod State" - retryCountOnTaskFailure: 3 - - - ${{ if eq(parameters.os, 'windows') }}: - - task: AzureCLI@2 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - set -e - make -C ./hack/aks windows-nodepool-up AZCLI=az SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} - echo "Windows nodes have been successfully added to DualStack Overlay Cluster" - kubectl cluster-info - kubectl get node -owide - kubectl get po -owide -A - name: "Add_Windows_Node" - displayName: "Add windows node" - - - script: | - nodeList=`kubectl get node -owide | grep Windows | awk '{print $1}'` - for node in $nodeList; do - taint=`kubectl describe node $node | grep Taints | awk '{print $2}'` - if [ $taint == "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule" ]; then - kubectl taint nodes $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule- - fi - done - sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=windows CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true VALIDATE_DUALSTACK=true CNI_VERSION=$(make cni-version) CNS_VERSION=$(make cns-version) CLEANUP=true - name: "WindowsDualStackOverlayControlPlaneScaleTests" - displayName: "Windows DualStack Overlay ControlPlane Scale Tests" - retryCountOnTaskFailure: 3 - - - script: | - echo "DualStack Overlay DataPath Test" - cd test/integration/datapath - sudo -E env "PATH=$PATH" go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -restartKubeproxy true -run ^TestDatapathWin$ - name: "WindowsDualStackOverlayDatapathTests" - displayName: "Windows DualStack Overlay Datapath Tests" - retryCountOnTaskFailure: 3 diff --git a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml index 847e7c12f3..1e27e17c83 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml @@ -11,7 +11,28 @@ parameters: dummyClusterType: "" dummyClusterDisplayName: "" -stages: +# stages: +# - stage: ${{ parameters.dummyClusterName }} +# displayName: Create Dummy Cluster - ${{ parameters.dummyClusterDisplayName }} +# dependsOn: +# - ${{ parameters.dependsOn }} +# - setup +# pool: +# name: $(BUILD_POOL_NAME_DEFAULT) +# variables: +# commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] +# jobs: +# - template: ../templates/create-cluster.yaml +# parameters: +# name: ${{ parameters.name }} +# displayName: ${{ parameters.displayName }} +# clusterType: ${{ parameters.dummyClusterType }} +# clusterName: ${{ parameters.dummyClusterName }}-$(commitID) +# vmSize: ${{ parameters.vmSize }} +# k8sVersion: ${{ parameters.k8sVersion }} +# dependsOn: ${{ parameters.dependsOn }} +# region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Swiftv2 has a specific region requirement and its the same with dualstack reagion + - stage: ${{ parameters.clusterName }} displayName: Create Cluster - ${{ parameters.displayName }} dependsOn: @@ -31,28 +52,7 @@ stages: vmSize: ${{ parameters.vmSize }} k8sVersion: ${{ parameters.k8sVersion }} dependsOn: ${{ parameters.dependsOn }} - region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Swiftv2 has a specific region requirement and its the same with dualstack reagion - - - stage: ${{ parameters.dummyClusterName }} - displayName: Create Cluster - ${{ parameters.dummyClusterDisplayName }} - dependsOn: - - ${{ parameters.dependsOn }} - - setup - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - variables: - commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] - jobs: - - template: ../templates/create-cluster.yaml - parameters: - name: ${{ parameters.name }} - displayName: ${{ parameters.displayName }} - clusterType: ${{ parameters.dummyClusterType }} - clusterName: ${{ parameters.dummyClusterName }}-$(commitID) - vmSize: ${{ parameters.vmSize }} - k8sVersion: ${{ parameters.k8sVersion }} - dependsOn: ${{ parameters.dependsOn }} - region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Swiftv2 has a specific region requirement and its the same with dualstack reagion + region: $(REGION_SWIFTV2_CLUSTER_TEST) # Swiftv2 has a specific region requirement and its the same with dualstack reagion - stage: ${{ parameters.name }} condition: and( succeeded(), not(eq(dependencies.dualstackoverlaye2e.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 3a519f83ce..93c75fac11 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -1,7 +1,7 @@ parameters: name: "" clusterName: "" - cni: "dualstack" + cni: cni os: "" steps: @@ -20,7 +20,7 @@ steps: inputs: kubectlVersion: latest - - task: AzureCLI@2 + - task: AzureCLI@1 inputs: azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) scriptLocation: "inlineScript" @@ -29,112 +29,25 @@ steps: inlineScript: | set -e make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${{ parameters.clusterName }} - name: "kubeconfig" - displayName: "Set Kubeconfig" - - - ${{ if eq(parameters.os, 'linux') }}: - - script: | + ls -lah + pwd kubectl cluster-info - kubectl get node kubectl get po -owide -A - sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=linux CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true VALIDATE_DUALSTACK=true CNI_VERSION=$(make cni-version) CNS_VERSION=$(make cns-version) CLEANUP=true - retryCountOnTaskFailure: 3 - name: "integrationTest" - displayName: "Run CNS Integration Tests on AKS DualStack Overlay" - - - script: | - set -e - kubectl get po -owide -A - cd test/integration/datapath - echo "Dualstack Overlay Linux datapath IPv6 test" - go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true - echo "Dualstack Overlay Linux datapath IPv4 test" - go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration - retryCountOnTaskFailure: 3 - name: "DualStack_Overlay_Linux_Tests" - displayName: "DualStack Overlay Linux Tests" - - - task: AzureCLI@1 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - set -e - clusterName=${{ parameters.clusterName }} - echo "Restarting nodes" - for val in $(az vmss list -g MC_${clusterName}_${clusterName}_$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) --query "[].name" -o tsv); do - make -C ./hack/aks restart-vmss AZCLI=az CLUSTER=${clusterName} REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) VMSS_NAME=${val} - done - displayName: "Restart Nodes" - - - task: AzureCLI@1 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - cd test/integration/load - clusterName=${{ parameters.clusterName }} - make -C ./hack/aks set-kubeconf AZCLI=az CLUSTER=${clusterName} - make -C ./hack/aks azcfg AZCLI=az REGION=$(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) - kubectl get pods -A - echo "Validating Node Restart" - CNI_TYPE=dualstack RESTART_CASE=true go test -timeout 30m -tags load -run ^TestValidateState$ - displayName: "Validate Node Restart" - retryCountOnTaskFailure: 3 - - - script: | - echo "validate pod IP assignment before CNS restart" - kubectl get pod -owide -A - echo "validate pod state before CNS restarts" - cd test/integration/load - CNI_TYPE=dualstack go test -timeout 30m -tags load -run ^TestValidateState$ - kubectl rollout restart ds azure-cns -n kube-system - kubectl rollout status ds azure-cns -n kube-system - kubectl get pod -owide -A - echo "validate pods after CNS restarts" - CNI_TYPE=dualstack go test -timeout 30m -tags load -run ^TestValidateState$ - name: "restartCNS_ValidatePodState" - displayName: "Restart CNS and Validate Pod State" - retryCountOnTaskFailure: 3 - - - ${{ if eq(parameters.os, 'windows') }}: - - task: AzureCLI@2 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - set -e - make -C ./hack/aks windows-nodepool-up AZCLI=az SUB=$(SUB_AZURE_NETWORK_AGENT_BUILD_VALIDATIONS) CLUSTER=${{ parameters.clusterName }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} - echo "Windows nodes have been successfully added to DualStack Overlay Cluster" - kubectl cluster-info - kubectl get node -owide - kubectl get po -owide -A - name: "Add_Windows_Node" - displayName: "Add windows node" - - - script: | - nodeList=`kubectl get node -owide | grep Windows | awk '{print $1}'` - for node in $nodeList; do - taint=`kubectl describe node $node | grep Taints | awk '{print $2}'` - if [ $taint == "node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule" ]; then - kubectl taint nodes $node node.cloudprovider.kubernetes.io/uninitialized=true:NoSchedule- - fi - done - sudo -E env "PATH=$PATH" make test-load SCALE_UP=32 OS_TYPE=windows CNI_TYPE=cniv2 VALIDATE_STATEFILE=true INSTALL_CNS=true INSTALL_DUALSTACK_OVERLAY=true VALIDATE_DUALSTACK=true CNI_VERSION=$(make cni-version) CNS_VERSION=$(make cns-version) CLEANUP=true - name: "WindowsDualStackOverlayControlPlaneScaleTests" - displayName: "Windows DualStack Overlay ControlPlane Scale Tests" - retryCountOnTaskFailure: 3 + echo "Apply the pod network yaml to start the delegation" + less test/integration/manifests/swiftv2/podnetwork.yaml + envsubst '${DUMMY_CLUSTER_SUBNET_TOKEN},${SUBNET_GUID},${SUBNET_RESOURCE_ID},${VNET_GUID}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + kubectl apply -f test/integration/manifests/swiftv2/podnetwork.yaml + kubectl get pn + kubectl describe pn + echo "Apply the pod network instance yaml to reserve IP" + kubectl apply -f test/integration/manifests/swiftv2/pni.yaml + kubectl get pni + kubectl describe pni + echo "Start the pod using the reserved IP" + kubectl apply -f test/integration/manifests/swiftv2/mtpod.yaml + kubectl get pod -o wide + echo "Start the connection test" + kubectl exec mtpodjae2 -it -t -- /bin/bash + ip a + ping -c 3 172.25.0.22 - - script: | - echo "DualStack Overlay DataPath Test" - cd test/integration/datapath - sudo -E env "PATH=$PATH" go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -restartKubeproxy true -run ^TestDatapathWin$ - name: "WindowsDualStackOverlayDatapathTests" - displayName: "Windows DualStack Overlay Datapath Tests" - retryCountOnTaskFailure: 3 diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index e54ab5151a..ae7f083417 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -593,9 +593,12 @@ stages: cilium_dualstackoverlay_e2e: name: cilium_dualstackoverlay_e2e clusterName: "cildsovere2e" - aswiftv2_e2e: + swiftv2_e2e: name: swiftv2_e2e clusterName: "mtcluster" + swiftv2_dummy_e2e: + name: swiftv2_dummy_e2e + clusterName: "swiftv2dummy" steps: - template: templates/delete-cluster.yaml parameters: diff --git a/test/integration/manifests/swiftv2/mtpod.yaml b/test/integration/manifests/swiftv2/mtpod.yaml new file mode 100644 index 0000000000..8e285df6a4 --- /dev/null +++ b/test/integration/manifests/swiftv2/mtpod.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + kubernetes.azure.com/pod-network: aksswiftvnet + kubernetes.azure.com/pod-network-instance: pni1 + name: mtpodjae + namespace: default +spec: + containers: + - image: nicolaka/netshoot:latest + imagePullPolicy: Always + name: mtpodjae + command: ["/bin/bash"] + args: ["-c", "while true; do ping localhost; sleep 60;done"] + securityContext: + privileged: true + ports: + - containerPort: 80 + protocol: TCP \ No newline at end of file diff --git a/test/integration/manifests/swiftv2/pni.yaml b/test/integration/manifests/swiftv2/pni.yaml new file mode 100644 index 0000000000..4033d9d025 --- /dev/null +++ b/test/integration/manifests/swiftv2/pni.yaml @@ -0,0 +1,7 @@ +apiVersion: multitenancy.acn.azure.com/v1alpha1 +kind: PodNetworkInstance +metadata: + name: pni1 +spec: + podnetwork: aksswiftvnet + podIPReservationSize: 2 diff --git a/test/integration/manifests/swiftv2/podnetwork.yaml b/test/integration/manifests/swiftv2/podnetwork.yaml new file mode 100644 index 0000000000..513a4bf480 --- /dev/null +++ b/test/integration/manifests/swiftv2/podnetwork.yaml @@ -0,0 +1,10 @@ +apiVersion: multitenancy.acn.azure.com/v1alpha1 +kind: PodNetwork +metadata: + labels: + kubernetes.azure.com/override-subnet-token: DUMMY_CLUSTER_SUBNET_TOKEN + name: aksswiftvnet +spec: + subnetGUID: SUBNET_GUID + subnetResourceID: SUBNET_RESOURCE_ID + vnetGUID: VNET_GUID \ No newline at end of file From 6f7e24ef3e3aa7a2c516070d94ab00beed15a94d Mon Sep 17 00:00:00 2001 From: Shufang Date: Tue, 19 Mar 2024 14:05:35 -0700 Subject: [PATCH 07/49] Add variables. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 5 +++-- test/integration/manifests/swiftv2/mtpod.yaml | 4 ++-- test/integration/manifests/swiftv2/podnetwork.yaml | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 93c75fac11..7a3d1a53e7 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -35,8 +35,8 @@ steps: kubectl get po -owide -A echo "Apply the pod network yaml to start the delegation" less test/integration/manifests/swiftv2/podnetwork.yaml - envsubst '${DUMMY_CLUSTER_SUBNET_TOKEN},${SUBNET_GUID},${SUBNET_RESOURCE_ID},${VNET_GUID}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - - kubectl apply -f test/integration/manifests/swiftv2/podnetwork.yaml + envsubst '${SUBNET_TOKEN},${SUBNET_GUID},${SUBNET_RESOURCE_ID},${VNET_GUID}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + less test/integration/manifests/swiftv2/podnetwork.yaml kubectl get pn kubectl describe pn echo "Apply the pod network instance yaml to reserve IP" @@ -46,6 +46,7 @@ steps: echo "Start the pod using the reserved IP" kubectl apply -f test/integration/manifests/swiftv2/mtpod.yaml kubectl get pod -o wide + kubectl get po -owide -A echo "Start the connection test" kubectl exec mtpodjae2 -it -t -- /bin/bash ip a diff --git a/test/integration/manifests/swiftv2/mtpod.yaml b/test/integration/manifests/swiftv2/mtpod.yaml index 8e285df6a4..79b2cc23fa 100644 --- a/test/integration/manifests/swiftv2/mtpod.yaml +++ b/test/integration/manifests/swiftv2/mtpod.yaml @@ -4,13 +4,13 @@ metadata: labels: kubernetes.azure.com/pod-network: aksswiftvnet kubernetes.azure.com/pod-network-instance: pni1 - name: mtpodjae + name: mtpod namespace: default spec: containers: - image: nicolaka/netshoot:latest imagePullPolicy: Always - name: mtpodjae + name: mtpod command: ["/bin/bash"] args: ["-c", "while true; do ping localhost; sleep 60;done"] securityContext: diff --git a/test/integration/manifests/swiftv2/podnetwork.yaml b/test/integration/manifests/swiftv2/podnetwork.yaml index 513a4bf480..acb4a93702 100644 --- a/test/integration/manifests/swiftv2/podnetwork.yaml +++ b/test/integration/manifests/swiftv2/podnetwork.yaml @@ -2,8 +2,8 @@ apiVersion: multitenancy.acn.azure.com/v1alpha1 kind: PodNetwork metadata: labels: - kubernetes.azure.com/override-subnet-token: DUMMY_CLUSTER_SUBNET_TOKEN - name: aksswiftvnet + kubernetes.azure.com/override-subnet-token: SUBNET_TOKEN + name: aksswiftvnet172 spec: subnetGUID: SUBNET_GUID subnetResourceID: SUBNET_RESOURCE_ID From 84af0761d25c90e1e6fa32098e3d78def912c630 Mon Sep 17 00:00:00 2001 From: Shufang Date: Tue, 19 Mar 2024 22:20:47 -0700 Subject: [PATCH 08/49] Fix the format and make it a valid yaml. --- .../swiftv2-e2e-job-template.yaml | 28 ++----------------- .pipelines/pipeline.yaml | 11 -------- 2 files changed, 2 insertions(+), 37 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml index 1e27e17c83..cb9768dffe 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml @@ -7,32 +7,8 @@ parameters: k8sVersion: "" dependsOn: "" nodePoolName: "" - dummyClusterName: "" - dummyClusterType: "" - dummyClusterDisplayName: "" - -# stages: -# - stage: ${{ parameters.dummyClusterName }} -# displayName: Create Dummy Cluster - ${{ parameters.dummyClusterDisplayName }} -# dependsOn: -# - ${{ parameters.dependsOn }} -# - setup -# pool: -# name: $(BUILD_POOL_NAME_DEFAULT) -# variables: -# commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] -# jobs: -# - template: ../templates/create-cluster.yaml -# parameters: -# name: ${{ parameters.name }} -# displayName: ${{ parameters.displayName }} -# clusterType: ${{ parameters.dummyClusterType }} -# clusterName: ${{ parameters.dummyClusterName }}-$(commitID) -# vmSize: ${{ parameters.vmSize }} -# k8sVersion: ${{ parameters.k8sVersion }} -# dependsOn: ${{ parameters.dependsOn }} -# region: $(REGION_DUALSTACKOVERLAY_CLUSTER_TEST) # Swiftv2 has a specific region requirement and its the same with dualstack reagion +stages: - stage: ${{ parameters.clusterName }} displayName: Create Cluster - ${{ parameters.displayName }} dependsOn: @@ -52,7 +28,7 @@ parameters: vmSize: ${{ parameters.vmSize }} k8sVersion: ${{ parameters.k8sVersion }} dependsOn: ${{ parameters.dependsOn }} - region: $(REGION_SWIFTV2_CLUSTER_TEST) # Swiftv2 has a specific region requirement and its the same with dualstack reagion + region: $(REGION_SWIFTV2_CLUSTER_TEST) # Swiftv2 has a specific region requirements - stage: ${{ parameters.name }} condition: and( succeeded(), not(eq(dependencies.dualstackoverlaye2e.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index ae7f083417..fb5496a25a 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -519,17 +519,6 @@ stages: dummyClusterType: "swiftv2-dummy-cluster-up" dummyClusterDisplayName: Swiftv2 Multitenancy Dummy Cluster - # Swiftv2 E2E tests with dummy cluster start up - # - template: multitenancy/swiftv2-e2e-job-template.yaml - # parameters: - # name: "swiftv2_e2e_dummy" - # displayName: Swiftv2 Multitenancy Dummy Cluster - # os: linux - # clusterType: swiftv2-dummy-cluster-up - # clusterName: "mtcluster" - # vmSize: Standard_D4_v2 - # dependsOn: "test" - - stage: delete displayName: Delete Clusters condition: always() From 072dae17ce053db0355d99719b6f8b59113fe3d3 Mon Sep 17 00:00:00 2001 From: Shufang Date: Tue, 2 Apr 2024 23:45:31 -0700 Subject: [PATCH 09/49] Add datapath tests for swiftv2. --- .../swiftv2-e2e-job-template.yaml | 26 +- .../swiftv2-e2e-step-template.yaml | 4 +- hack/aks/Makefile | 2 +- test/integration/swiftv2/datapath_swiftv2.go | 223 ++++++++++++++++++ 4 files changed, 245 insertions(+), 10 deletions(-) create mode 100644 test/integration/swiftv2/datapath_swiftv2.go diff --git a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml index cb9768dffe..8c5603f2d1 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml @@ -31,7 +31,7 @@ stages: region: $(REGION_SWIFTV2_CLUSTER_TEST) # Swiftv2 has a specific region requirements - stage: ${{ parameters.name }} - condition: and( succeeded(), not(eq(dependencies.dualstackoverlaye2e.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies + # condition: and( succeeded(), not(eq(dependencies.dualstackoverlaye2e.result,'SucceededWithIssues')) ) # Cant use parameters in dependencies displayName: E2E - ${{ parameters.displayName }} dependsOn: - setup @@ -45,8 +45,8 @@ stages: pool: name: $(BUILD_POOL_NAME_DEFAULT) jobs: - - job: ${{ parameters.name }}_linux - displayName: Swiftv2 Multitenancy E2E Test Suite | Linux - (${{ parameters.name }}) + - job: ${{ parameters.name }} + displayName: Swiftv2 Multitenancy E2E Test Suite - (${{ parameters.name }}) pool: name: $(BUILD_POOL_NAME_DEFAULT) demands: @@ -59,11 +59,23 @@ stages: clusterName: ${{ parameters.clusterName }}-$(commitID) os: linux - - job: failedE2ELogs_linux - displayName: "Linux Failure Logs" + - template: ../cni/k8s-e2e/k8s-e2e-job-template.yaml + parameters: + sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + clusterName: ${{ parameters.clusterName }}-$(commitID) + os: ${{ parameters.os }} + dependsOn: ${{ parameters.name }} + datapath: true + dns: true + portforward: true + hostport: true + service: true + + - job: failedE2ELogs + displayName: "Failure Logs" dependsOn: - - ${{ parameters.name }}_linux - condition: in(dependencies.${{ parameters.name }}_linux.result, 'Failed') + - ${{ parameters.name }} + condition: failed() steps: - template: ../templates/log-template.yaml parameters: diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 7a3d1a53e7..acf879ddec 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -1,7 +1,7 @@ parameters: name: "" clusterName: "" - cni: cni + cni: cniv2 os: "" steps: @@ -35,7 +35,7 @@ steps: kubectl get po -owide -A echo "Apply the pod network yaml to start the delegation" less test/integration/manifests/swiftv2/podnetwork.yaml - envsubst '${SUBNET_TOKEN},${SUBNET_GUID},${SUBNET_RESOURCE_ID},${VNET_GUID}' < test/integration/manifests/cilium/v${DIR}/cilium-agent/templates/daemonset.yaml | kubectl apply -f - + envsubst '${SUBNET_TOKEN},${SUBNET_GUID},${SUBNET_RESOURCE_ID},${VNET_GUID}' < test/integration/manifests/swiftv2/podnetwork.yaml | kubectl apply -f - less test/integration/manifests/swiftv2/podnetwork.yaml kubectl get pn kubectl describe pn diff --git a/hack/aks/Makefile b/hack/aks/Makefile index d03a4ee99e..ca8b7efea7 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -238,7 +238,7 @@ swiftv2-multitenancy-cluster-up: rg-up --network-plugin azure \ --network-plugin-mode overlay \ --kubernetes-version 1.28 \ - --nodepool-name "mtapool0" \ + --nodepool-name "mtapool" \ --nodepool-tags fastpathenabled=true \ --no-ssh-key \ --yes diff --git a/test/integration/swiftv2/datapath_swiftv2.go b/test/integration/swiftv2/datapath_swiftv2.go new file mode 100644 index 0000000000..b8a0b7f955 --- /dev/null +++ b/test/integration/swiftv2/datapath_swiftv2.go @@ -0,0 +1,223 @@ +//go:build connection + +package connection + +import ( + "context" + "flag" + "testing" + "time" + + k8s "github.com/Azure/azure-container-networking/test/integration" + "github.com/Azure/azure-container-networking/test/integration/goldpinger" + "github.com/Azure/azure-container-networking/test/internal/kubernetes" + "github.com/Azure/azure-container-networking/test/internal/retry" + "github.com/pkg/errors" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + podLabelKey = "app" + podCount = 2 + nodepoolKey = "mtapool" + LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" + maxRetryDelaySeconds = 10 + defaultTimeoutSeconds = 120 + defaultRetryDelaySeconds = 1 + goldpingerRetryCount = 24 + goldpingerDelayTimeSeconds = 5 + gpFolder = "../manifests/goldpinger" + gpClusterRolePath = gpFolder + "/cluster-role.yaml" + gpClusterRoleBindingPath = gpFolder + "/cluster-role-binding.yaml" + gpServiceAccountPath = gpFolder + "/service-account.yaml" + gpDaemonset = gpFolder + "/daemonset.yaml" + gpDaemonsetIPv6 = gpFolder + "/daemonset-ipv6.yaml" + gpDeployment = gpFolder + "/deployment.yaml" +) + +var ( + podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods") + podNamespace = flag.String("namespace", "default", "Namespace for test pods") + nodepoolSelector = flag.String("nodepoolSelector", "mtapool", "Provides nodepool as a Linux Node-Selector for pods") + // TODO: add flag to support dual nic scenario + isDualStack = flag.Bool("isDualStack", false, "whether system supports dualstack scenario") + defaultRetrier = retry.Retrier{ + Attempts: 10, + Delay: defaultRetryDelaySeconds * time.Second, + } +) + +/* +This test assumes that you have the current credentials loaded in your default kubeconfig for a +k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. +*** The expected nodepool name is mtapool, if the nodepool has a different name ensure that you change nodepoolSelector with: + -nodepoolSelector="yournodepoolname" + +To run the test use one of the following commands: +go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration + + +This test checks pod to pod, pod to node, pod to Internet check + +Timeout context is controled by the -timeout flag. + +*/ + +func setupLinuxEnvironment(t *testing.T) { + ctx := context.Background() + + t.Log("Create Clientset") + clientset := kubernetes.MustGetClientset() + + t.Log("Create Label Selectors") + podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) + + t.Log("Get Nodes") + nodes, err := kubernetes.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) + if err != nil { + t.Fatalf("could not get k8s node list: %v", err) + } + + t.Log("Creating Linux pods through deployment") + + // run goldpinger ipv4 and ipv6 test cases saperately + var daemonset appsv1.DaemonSet + var deployment appsv1.Deployment + + deployment = kubernetes.MustParseDeployment(LinuxDeployIPV4) + daemonset = kubernetes.MustParseDaemonSet(gpDaemonset) + + // setup common RBAC, ClusteerRole, ClusterRoleBinding, ServiceAccount + rbacSetupFn := kubernetes.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) + + // Fields for overwritting existing deployment yaml. + // Defaults from flags will not change anything + deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix + deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix + deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector + deployment.Name = *podPrefix + deployment.Namespace = *podNamespace + daemonset.Namespace = *podNamespace + + deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) + kubernetes.MustCreateDeployment(ctx, deploymentsClient, deployment) + + daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) + kubernetes.MustCreateDaemonset(ctx, daemonsetClient, daemonset) + + t.Cleanup(func() { + t.Log("cleaning up resources") + rbacSetupFn() + + if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil { + t.Log(err) + } + + if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil { + t.Log(err) + } + }) + + t.Log("Waiting for pods to be running state") + err = kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + t.Fatalf("Pods are not in running state due to %+v", err) + } + + t.Log("Successfully created customer Linux pods") + + t.Log("Checking Linux test environment") + for _, node := range nodes.Items { + pods, err := kubernetes.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) + if err != nil { + t.Fatalf("could not get k8s clientset: %v", err) + } + if len(pods.Items) <= 1 { + t.Fatalf("Less than 2 pods on node: %v", node.Name) + } + } + + t.Log("Linux test environment ready") +} + +func TestDatapathLinux(t *testing.T) { + ctx := context.Background() + + t.Log("Get REST config") + restConfig := kubernetes.MustGetRestConfig() + + t.Log("Create Clientset") + clientset := kubernetes.MustGetClientset() + + setupLinuxEnvironment(t) + podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + + t.Run("Linux ping tests", func(t *testing.T) { + // Check goldpinger health + t.Run("all pods have IPs assigned", func(t *testing.T) { + err := kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + t.Fatalf("Pods are not in running state due to %+v", err) + } + t.Log("all pods have been allocated IPs") + }) + + t.Run("all linux pods can ping each other", func(t *testing.T) { + clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + + pfOpts := k8s.PortForwardingOpts{ + Namespace: *podNamespace, + LabelSelector: podLabelSelector, + LocalPort: 9090, + DestPort: 8080, + } + + pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts) + if err != nil { + t.Fatal(err) + } + + portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second) + defer cancel() + + portForwardFn := func() error { + err := pf.Forward(portForwardCtx) + if err != nil { + t.Logf("unable to start port forward: %v", err) + return err + } + return nil + } + + if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { + t.Fatalf("could not start port forward within %d: %v", defaultTimeoutSeconds, err) + } + defer pf.Stop() + + gpClient := goldpinger.Client{Host: pf.Address()} + clusterCheckFn := func() error { + clusterState, err := gpClient.CheckAll(clusterCheckCtx) + if err != nil { + return err + } + stats := goldpinger.ClusterStats(clusterState) + stats.PrintStats() + if stats.AllPingsHealthy() { + return nil + } + + return errors.New("not all pings are healthy") + } + retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second} + if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil { + t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err) + } + + t.Log("all pings successful!") + }) + }) +} From a8450fa95f0963a4d4b6a34b99754f27f3b6cb28 Mon Sep 17 00:00:00 2001 From: Shufang Date: Wed, 3 Apr 2024 11:39:26 -0700 Subject: [PATCH 10/49] Link the tests with the pipeline steps. --- .../multitenancy/swiftv2-e2e-step-template.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index acf879ddec..f57d6f3a44 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -51,4 +51,16 @@ steps: kubectl exec mtpodjae2 -it -t -- /bin/bash ip a ping -c 3 172.25.0.22 + name: "start_swiftv2_pods" + displayName: "Start Swiftv2 Pods" + + - script: | + set -e + kubectl get po -owide -A + cd test/integration/swiftv2 + echo "Swiftv2 test" + go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration + retryCountOnTaskFailure: 3 + name: "Swiftv2_Tests" + displayName: "Swiftv2 Tests" From 337b281cba4fa81a64b2b3b15ca0eaf0da7fe72f Mon Sep 17 00:00:00 2001 From: Shufang Date: Wed, 3 Apr 2024 16:07:43 -0700 Subject: [PATCH 11/49] Update the testing file name. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index f57d6f3a44..d014e514c1 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -59,7 +59,7 @@ steps: kubectl get po -owide -A cd test/integration/swiftv2 echo "Swiftv2 test" - go test -count=1 datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration + go test -count=1 datapath_swiftv2.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration retryCountOnTaskFailure: 3 name: "Swiftv2_Tests" displayName: "Swiftv2 Tests" From 6e03ae1086c576e4e5206e512407211423dbb218 Mon Sep 17 00:00:00 2001 From: Shufang Date: Wed, 3 Apr 2024 20:31:21 -0700 Subject: [PATCH 12/49] Remove unnecessary comment. --- test/integration/swiftv2/datapath_swiftv2.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/test/integration/swiftv2/datapath_swiftv2.go b/test/integration/swiftv2/datapath_swiftv2.go index b8a0b7f955..9cac6dfe33 100644 --- a/test/integration/swiftv2/datapath_swiftv2.go +++ b/test/integration/swiftv2/datapath_swiftv2.go @@ -55,10 +55,6 @@ k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. *** The expected nodepool name is mtapool, if the nodepool has a different name ensure that you change nodepoolSelector with: -nodepoolSelector="yournodepoolname" -To run the test use one of the following commands: -go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration - - This test checks pod to pod, pod to node, pod to Internet check Timeout context is controled by the -timeout flag. From 816a17f157b9780963888303dddea38b453762b6 Mon Sep 17 00:00:00 2001 From: Shufang Date: Thu, 4 Apr 2024 00:22:04 -0700 Subject: [PATCH 13/49] Fix the pod name --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index d014e514c1..57b5c93314 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -48,7 +48,7 @@ steps: kubectl get pod -o wide kubectl get po -owide -A echo "Start the connection test" - kubectl exec mtpodjae2 -it -t -- /bin/bash + kubectl exec mtpod -it -t -- /bin/bash ip a ping -c 3 172.25.0.22 name: "start_swiftv2_pods" From 8ab41adca02ab04026d4ac5226aa7c8519cf92d4 Mon Sep 17 00:00:00 2001 From: Shufang Date: Thu, 4 Apr 2024 14:18:45 -0700 Subject: [PATCH 14/49] Sleep before checking the pod. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 57b5c93314..770e7ae080 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -36,6 +36,7 @@ steps: echo "Apply the pod network yaml to start the delegation" less test/integration/manifests/swiftv2/podnetwork.yaml envsubst '${SUBNET_TOKEN},${SUBNET_GUID},${SUBNET_RESOURCE_ID},${VNET_GUID}' < test/integration/manifests/swiftv2/podnetwork.yaml | kubectl apply -f - + echo "Check the podnetwork yaml file" less test/integration/manifests/swiftv2/podnetwork.yaml kubectl get pn kubectl describe pn @@ -45,7 +46,10 @@ steps: kubectl describe pni echo "Start the pod using the reserved IP" kubectl apply -f test/integration/manifests/swiftv2/mtpod.yaml - kubectl get pod -o wide + sleep 2m + kubectl get pod -o wide -A + sleep 2m + echo "Check pods after 4 minutes" kubectl get po -owide -A echo "Start the connection test" kubectl exec mtpod -it -t -- /bin/bash From f4433d2c681dcde6ec050c05b5f6724edfba6197 Mon Sep 17 00:00:00 2001 From: Shufang Date: Thu, 4 Apr 2024 16:35:59 -0700 Subject: [PATCH 15/49] Check the pni after baking time. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 770e7ae080..a32b6fde57 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -51,6 +51,7 @@ steps: sleep 2m echo "Check pods after 4 minutes" kubectl get po -owide -A + kubectl describe pni echo "Start the connection test" kubectl exec mtpod -it -t -- /bin/bash ip a From 43ec47fd79648bba9fccb6d1180af9851efb1f67 Mon Sep 17 00:00:00 2001 From: Shufang Date: Thu, 4 Apr 2024 20:57:53 -0700 Subject: [PATCH 16/49] Update the vnet name. --- test/integration/manifests/swiftv2/mtpod.yaml | 2 +- test/integration/manifests/swiftv2/pni.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/manifests/swiftv2/mtpod.yaml b/test/integration/manifests/swiftv2/mtpod.yaml index 79b2cc23fa..32d7584f65 100644 --- a/test/integration/manifests/swiftv2/mtpod.yaml +++ b/test/integration/manifests/swiftv2/mtpod.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: labels: - kubernetes.azure.com/pod-network: aksswiftvnet + kubernetes.azure.com/pod-network: aksswiftvnet172 kubernetes.azure.com/pod-network-instance: pni1 name: mtpod namespace: default diff --git a/test/integration/manifests/swiftv2/pni.yaml b/test/integration/manifests/swiftv2/pni.yaml index 4033d9d025..a1824e4b39 100644 --- a/test/integration/manifests/swiftv2/pni.yaml +++ b/test/integration/manifests/swiftv2/pni.yaml @@ -3,5 +3,5 @@ kind: PodNetworkInstance metadata: name: pni1 spec: - podnetwork: aksswiftvnet + podnetwork: aksswiftvnet172 podIPReservationSize: 2 From 536968500c9e777e9a64eb759bdbb9c25ad1f170 Mon Sep 17 00:00:00 2001 From: Shufang Date: Thu, 4 Apr 2024 22:38:08 -0700 Subject: [PATCH 17/49] Update the variables value. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 2 +- test/integration/manifests/swiftv2/podnetwork.yaml | 8 ++++---- test/integration/swiftv2/datapath_swiftv2.go | 3 +++ 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index a32b6fde57..aff9ecc3ca 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -22,7 +22,7 @@ steps: - task: AzureCLI@1 inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + azureSubscription: $(ACN_TEST_SERVICE_CONNECTION) scriptLocation: "inlineScript" scriptType: "bash" addSpnToEnvironment: true diff --git a/test/integration/manifests/swiftv2/podnetwork.yaml b/test/integration/manifests/swiftv2/podnetwork.yaml index acb4a93702..446e54f973 100644 --- a/test/integration/manifests/swiftv2/podnetwork.yaml +++ b/test/integration/manifests/swiftv2/podnetwork.yaml @@ -2,9 +2,9 @@ apiVersion: multitenancy.acn.azure.com/v1alpha1 kind: PodNetwork metadata: labels: - kubernetes.azure.com/override-subnet-token: SUBNET_TOKEN + kubernetes.azure.com/override-subnet-token: $SUBNET_TOKEN name: aksswiftvnet172 spec: - subnetGUID: SUBNET_GUID - subnetResourceID: SUBNET_RESOURCE_ID - vnetGUID: VNET_GUID \ No newline at end of file + subnetGUID: $SUBNET_GUID + subnetResourceID: $SUBNET_RESOURCE_ID + vnetGUID: $VNET_GUID \ No newline at end of file diff --git a/test/integration/swiftv2/datapath_swiftv2.go b/test/integration/swiftv2/datapath_swiftv2.go index 9cac6dfe33..9dda73c489 100644 --- a/test/integration/swiftv2/datapath_swiftv2.go +++ b/test/integration/swiftv2/datapath_swiftv2.go @@ -23,6 +23,9 @@ const ( podCount = 2 nodepoolKey = "mtapool" LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" + podNetworkYaml = "../manifests/swiftv2/podnetwork.yaml" + mtpodYaml = "../manifests/swiftv2/mtpod.yaml" + pniYaml = "../manifests/swiftv2/pni.yaml" maxRetryDelaySeconds = 10 defaultTimeoutSeconds = 120 defaultRetryDelaySeconds = 1 From 1f270592d394b1e562d4f0a5cea827b5dbd4fc70 Mon Sep 17 00:00:00 2001 From: Shufang Date: Fri, 5 Apr 2024 00:44:11 -0700 Subject: [PATCH 18/49] Update the sub tests use. --- .../swiftv2-e2e-job-template.yaml | 4 +- .../templates/create-cluster-swiftv2.yaml | 46 +++++++++++++++++++ 2 files changed, 48 insertions(+), 2 deletions(-) create mode 100644 .pipelines/templates/create-cluster-swiftv2.yaml diff --git a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml index 8c5603f2d1..736dc53f2f 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-job-template.yaml @@ -19,7 +19,7 @@ stages: variables: commitID: $[ stagedependencies.setup.env.outputs['EnvironmentalVariables.commitID'] ] jobs: - - template: ../templates/create-cluster.yaml + - template: ../templates/create-cluster-swiftv2.yaml parameters: name: ${{ parameters.name }} displayName: ${{ parameters.displayName }} @@ -61,7 +61,7 @@ stages: - template: ../cni/k8s-e2e/k8s-e2e-job-template.yaml parameters: - sub: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) + sub: $(ACN_TEST_SERVICE_CONNECTION) clusterName: ${{ parameters.clusterName }}-$(commitID) os: ${{ parameters.os }} dependsOn: ${{ parameters.name }} diff --git a/.pipelines/templates/create-cluster-swiftv2.yaml b/.pipelines/templates/create-cluster-swiftv2.yaml new file mode 100644 index 0000000000..d0bfdf12c3 --- /dev/null +++ b/.pipelines/templates/create-cluster-swiftv2.yaml @@ -0,0 +1,46 @@ +parameters: + name: "" + displayName: "" + clusterType: "" + clusterName: "" # Recommended to pass in unique identifier + vmSize: "" + vmSizeWin: "" + k8sVersion: "" + osSkuWin: "Windows2022" # Currently we only support Windows2022 + dependsOn: "" + region: "" + os: linux + +jobs: + - job: ${{ parameters.name }} + displayName: Cluster - ${{ parameters.name }} + steps: + - task: AzureCLI@1 + inputs: + azureSubscription: $(ACN_TEST_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + set -e + echo "Check az version" + az version + if ${{ lower(contains(parameters.clusterType, 'dualstack')) }} + then + echo "Install az cli extension preview" + az extension add --name aks-preview + az extension update --name aks-preview + fi + mkdir -p ~/.kube/ + make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} + + make -C ./hack/aks ${{ parameters.clusterType }} \ + AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) \ + CLUSTER=${{ parameters.clusterName }} \ + VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \ + OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{parameters.os}} \ + WINDOWS_USERNAME=${WINDOWS_USERNAME} WINDOWS_PASSWORD=${WINDOWS_PASSWORD} + + echo "Cluster successfully created" + displayName: Cluster - ${{ parameters.clusterType }} + continueOnError: ${{ contains(parameters.clusterType, 'dualstack') }} From 0ea31739afe54778f3eeb243423760c1c8771588 Mon Sep 17 00:00:00 2001 From: Shufang Date: Fri, 5 Apr 2024 14:19:17 -0700 Subject: [PATCH 19/49] Update the cluster name from mt prefix to mta --- .pipelines/pipeline.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index fb5496a25a..0de802f1bf 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -511,7 +511,7 @@ stages: displayName: Swiftv2 Multitenancy os: linux clusterType: swiftv2-multitenancy-cluster-up - clusterName: "mtcluster" + clusterName: "mtacluster" nodePoolName: "mtapool" vmSize: Standard_D4_v2 dependsOn: "test" From aa2b175b27a7d17778b55147d7d0795160bcbc87 Mon Sep 17 00:00:00 2001 From: Shufang Date: Fri, 5 Apr 2024 15:58:16 -0700 Subject: [PATCH 20/49] Add the vm size. --- hack/aks/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/aks/Makefile b/hack/aks/Makefile index ca8b7efea7..ebf06499d4 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -239,6 +239,7 @@ swiftv2-multitenancy-cluster-up: rg-up --network-plugin-mode overlay \ --kubernetes-version 1.28 \ --nodepool-name "mtapool" \ + --node-vm-size Standard_D4_v2 \ --nodepool-tags fastpathenabled=true \ --no-ssh-key \ --yes From 15bb36d25b0d17b5cf1e85b52027c529a384e80a Mon Sep 17 00:00:00 2001 From: Shufang Date: Sat, 6 Apr 2024 00:20:22 -0700 Subject: [PATCH 21/49] Update the token. --- hack/aks/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/aks/Makefile b/hack/aks/Makefile index ebf06499d4..54b39d712f 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -240,6 +240,7 @@ swiftv2-multitenancy-cluster-up: rg-up --kubernetes-version 1.28 \ --nodepool-name "mtapool" \ --node-vm-size Standard_D4_v2 \ + --node-count 2 \ --nodepool-tags fastpathenabled=true \ --no-ssh-key \ --yes From 12c3ab30fc89ffb54e00c223a9c6768eea8d190c Mon Sep 17 00:00:00 2001 From: Shufang Date: Sun, 7 Apr 2024 17:15:06 -0700 Subject: [PATCH 22/49] Update the dummy cluster's vnet. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 2 +- test/integration/manifests/swiftv2/mtpod.yaml | 2 +- test/integration/manifests/swiftv2/pni.yaml | 2 +- test/integration/manifests/swiftv2/podnetwork.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index aff9ecc3ca..32c1d66f7a 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -53,7 +53,7 @@ steps: kubectl get po -owide -A kubectl describe pni echo "Start the connection test" - kubectl exec mtpod -it -t -- /bin/bash + kubectl exec mtpod -it -- /bin/bash ip a ping -c 3 172.25.0.22 name: "start_swiftv2_pods" diff --git a/test/integration/manifests/swiftv2/mtpod.yaml b/test/integration/manifests/swiftv2/mtpod.yaml index 32d7584f65..4e02aa0f5c 100644 --- a/test/integration/manifests/swiftv2/mtpod.yaml +++ b/test/integration/manifests/swiftv2/mtpod.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: labels: - kubernetes.azure.com/pod-network: aksswiftvnet172 + kubernetes.azure.com/pod-network: aksswiftvnetv2 kubernetes.azure.com/pod-network-instance: pni1 name: mtpod namespace: default diff --git a/test/integration/manifests/swiftv2/pni.yaml b/test/integration/manifests/swiftv2/pni.yaml index a1824e4b39..17c33cc534 100644 --- a/test/integration/manifests/swiftv2/pni.yaml +++ b/test/integration/manifests/swiftv2/pni.yaml @@ -3,5 +3,5 @@ kind: PodNetworkInstance metadata: name: pni1 spec: - podnetwork: aksswiftvnet172 + podnetwork: aksswiftvnetv2 podIPReservationSize: 2 diff --git a/test/integration/manifests/swiftv2/podnetwork.yaml b/test/integration/manifests/swiftv2/podnetwork.yaml index 446e54f973..7d6c357787 100644 --- a/test/integration/manifests/swiftv2/podnetwork.yaml +++ b/test/integration/manifests/swiftv2/podnetwork.yaml @@ -3,7 +3,7 @@ kind: PodNetwork metadata: labels: kubernetes.azure.com/override-subnet-token: $SUBNET_TOKEN - name: aksswiftvnet172 + name: aksswiftvnetv2 spec: subnetGUID: $SUBNET_GUID subnetResourceID: $SUBNET_RESOURCE_ID From fbbad76d864b3fab268c8fd934705c9351ca5043 Mon Sep 17 00:00:00 2001 From: Shufang Date: Sun, 7 Apr 2024 18:46:58 -0700 Subject: [PATCH 23/49] Add a connection test. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 32c1d66f7a..d903ea80ea 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -53,9 +53,8 @@ steps: kubectl get po -owide -A kubectl describe pni echo "Start the connection test" - kubectl exec mtpod -it -- /bin/bash - ip a - ping -c 3 172.25.0.22 + kubectl exec mtpod -it -- ip a + kubectl exec mtpod -it -- ping -c 3 -W 1 172.25.0.17 name: "start_swiftv2_pods" displayName: "Start Swiftv2 Pods" From 291cfe10cfbe4096c42acca40cc28a90ff7c6ba2 Mon Sep 17 00:00:00 2001 From: Shufang Date: Mon, 8 Apr 2024 00:03:49 -0700 Subject: [PATCH 24/49] Update the test file name. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 4 ++-- .../swiftv2/{datapath_swiftv2.go => datapath_swiftv2_test.go} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename test/integration/swiftv2/{datapath_swiftv2.go => datapath_swiftv2_test.go} (100%) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index d903ea80ea..4d002d286c 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -55,8 +55,8 @@ steps: echo "Start the connection test" kubectl exec mtpod -it -- ip a kubectl exec mtpod -it -- ping -c 3 -W 1 172.25.0.17 - name: "start_swiftv2_pods" - displayName: "Start Swiftv2 Pods" + name: "start_swiftv2_pods_and_test_basic_connection" + displayName: "Start Swiftv2 Pods and test basic connection" - script: | set -e diff --git a/test/integration/swiftv2/datapath_swiftv2.go b/test/integration/swiftv2/datapath_swiftv2_test.go similarity index 100% rename from test/integration/swiftv2/datapath_swiftv2.go rename to test/integration/swiftv2/datapath_swiftv2_test.go From 4dff05be8bd5c5537c5f509d9e30606f62316639 Mon Sep 17 00:00:00 2001 From: Shufang Date: Tue, 9 Apr 2024 00:30:45 -0700 Subject: [PATCH 25/49] Add one more pod with delegation network. --- .../swiftv2-e2e-step-template.yaml | 4 +++- .../integration/manifests/swiftv2/mtpod2.yaml | 20 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 test/integration/manifests/swiftv2/mtpod2.yaml diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 4d002d286c..dc71288499 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -46,6 +46,8 @@ steps: kubectl describe pni echo "Start the pod using the reserved IP" kubectl apply -f test/integration/manifests/swiftv2/mtpod.yaml + echo "Start another pod using the reserved IP" + kubectl apply -f test/integration/manifests/swiftv2/mtpod2.yaml sleep 2m kubectl get pod -o wide -A sleep 2m @@ -54,7 +56,7 @@ steps: kubectl describe pni echo "Start the connection test" kubectl exec mtpod -it -- ip a - kubectl exec mtpod -it -- ping -c 3 -W 1 172.25.0.17 + kubectl exec mtpod -it -- ping -c 3 -W 1 172.25.0.27 name: "start_swiftv2_pods_and_test_basic_connection" displayName: "Start Swiftv2 Pods and test basic connection" diff --git a/test/integration/manifests/swiftv2/mtpod2.yaml b/test/integration/manifests/swiftv2/mtpod2.yaml new file mode 100644 index 0000000000..ca97bcbbcc --- /dev/null +++ b/test/integration/manifests/swiftv2/mtpod2.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + kubernetes.azure.com/pod-network: aksswiftvnetv2 + kubernetes.azure.com/pod-network-instance: pni1 + name: mtpod2 + namespace: default +spec: + containers: + - image: nicolaka/netshoot:latest + imagePullPolicy: Always + name: mtpod2 + command: ["/bin/bash"] + args: ["-c", "while true; do ping localhost; sleep 60;done"] + securityContext: + privileged: true + ports: + - containerPort: 80 + protocol: TCP \ No newline at end of file From 8bbe74b8a203f5a0e43be1246cae7ba986757866 Mon Sep 17 00:00:00 2001 From: Shufang Date: Wed, 10 Apr 2024 00:45:21 -0700 Subject: [PATCH 26/49] Start 2 mtpod. --- .../multitenancy/swiftv2-e2e-step-template.yaml | 14 +++++++++----- .../manifests/swiftv2/{mtpod.yaml => mtpod0.yaml} | 8 +++++--- .../manifests/swiftv2/{mtpod2.yaml => mtpod1.yaml} | 8 +++++--- test/integration/swiftv2/datapath_swiftv2_test.go | 2 +- 4 files changed, 20 insertions(+), 12 deletions(-) rename test/integration/manifests/swiftv2/{mtpod.yaml => mtpod0.yaml} (79%) rename test/integration/manifests/swiftv2/{mtpod2.yaml => mtpod1.yaml} (79%) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index dc71288499..a1878b737a 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -44,10 +44,14 @@ steps: kubectl apply -f test/integration/manifests/swiftv2/pni.yaml kubectl get pni kubectl describe pni - echo "Start the pod using the reserved IP" - kubectl apply -f test/integration/manifests/swiftv2/mtpod.yaml + export NODE_NAME_0="$(kubectl get nodes -o json | jq -r .items[0].metadata.name)" + echo $NODE_NAME_0 + echo "Start the first pod using the reserved IP" + envsubst '$NODE_NAME_0' < test/integration/manifests/swiftv2/mtpod0.yaml | kubectl apply -f - + export NODE_NAME_1="$(kubectl get nodes -o json | jq -r .items[1].metadata.name)" + echo $NODE_NAME_1 echo "Start another pod using the reserved IP" - kubectl apply -f test/integration/manifests/swiftv2/mtpod2.yaml + envsubst '$NODE_NAME_1' < test/integration/manifests/swiftv2/mtpod1.yaml | kubectl apply -f - sleep 2m kubectl get pod -o wide -A sleep 2m @@ -55,8 +59,8 @@ steps: kubectl get po -owide -A kubectl describe pni echo "Start the connection test" - kubectl exec mtpod -it -- ip a - kubectl exec mtpod -it -- ping -c 3 -W 1 172.25.0.27 + kubectl exec mtpod0 -it -- ip a + kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.27 name: "start_swiftv2_pods_and_test_basic_connection" displayName: "Start Swiftv2 Pods and test basic connection" diff --git a/test/integration/manifests/swiftv2/mtpod.yaml b/test/integration/manifests/swiftv2/mtpod0.yaml similarity index 79% rename from test/integration/manifests/swiftv2/mtpod.yaml rename to test/integration/manifests/swiftv2/mtpod0.yaml index 4e02aa0f5c..f1d02a2a3f 100644 --- a/test/integration/manifests/swiftv2/mtpod.yaml +++ b/test/integration/manifests/swiftv2/mtpod0.yaml @@ -4,17 +4,19 @@ metadata: labels: kubernetes.azure.com/pod-network: aksswiftvnetv2 kubernetes.azure.com/pod-network-instance: pni1 - name: mtpod + name: mtpod0 namespace: default spec: containers: - image: nicolaka/netshoot:latest imagePullPolicy: Always - name: mtpod + name: mtpod0 command: ["/bin/bash"] args: ["-c", "while true; do ping localhost; sleep 60;done"] securityContext: privileged: true ports: - containerPort: 80 - protocol: TCP \ No newline at end of file + protocol: TCP + nodeSelector: + kubernetes.io/hostname: $NODE_NAME_0 \ No newline at end of file diff --git a/test/integration/manifests/swiftv2/mtpod2.yaml b/test/integration/manifests/swiftv2/mtpod1.yaml similarity index 79% rename from test/integration/manifests/swiftv2/mtpod2.yaml rename to test/integration/manifests/swiftv2/mtpod1.yaml index ca97bcbbcc..e584d943cd 100644 --- a/test/integration/manifests/swiftv2/mtpod2.yaml +++ b/test/integration/manifests/swiftv2/mtpod1.yaml @@ -4,17 +4,19 @@ metadata: labels: kubernetes.azure.com/pod-network: aksswiftvnetv2 kubernetes.azure.com/pod-network-instance: pni1 - name: mtpod2 + name: mtpod1 namespace: default spec: containers: - image: nicolaka/netshoot:latest imagePullPolicy: Always - name: mtpod2 + name: mtpod1 command: ["/bin/bash"] args: ["-c", "while true; do ping localhost; sleep 60;done"] securityContext: privileged: true ports: - containerPort: 80 - protocol: TCP \ No newline at end of file + protocol: TCP + nodeSelector: + kubernetes.io/hostname: $NODE_NAME_1 \ No newline at end of file diff --git a/test/integration/swiftv2/datapath_swiftv2_test.go b/test/integration/swiftv2/datapath_swiftv2_test.go index 9dda73c489..763e3a720f 100644 --- a/test/integration/swiftv2/datapath_swiftv2_test.go +++ b/test/integration/swiftv2/datapath_swiftv2_test.go @@ -24,7 +24,7 @@ const ( nodepoolKey = "mtapool" LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" podNetworkYaml = "../manifests/swiftv2/podnetwork.yaml" - mtpodYaml = "../manifests/swiftv2/mtpod.yaml" + mtpodYaml = "../manifests/swiftv2/mtpod0.yaml" pniYaml = "../manifests/swiftv2/pni.yaml" maxRetryDelaySeconds = 10 defaultTimeoutSeconds = 120 From 73f4da36d006cd497bd3372c5e2c55b7f182de22 Mon Sep 17 00:00:00 2001 From: Shufang Date: Wed, 10 Apr 2024 11:50:02 -0700 Subject: [PATCH 27/49] Add the test for mtpod to mtpod connection. --- .../multitenancy/swiftv2-e2e-step-template.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index a1878b737a..6773ef977c 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -60,7 +60,15 @@ steps: kubectl describe pni echo "Start the connection test" kubectl exec mtpod0 -it -- ip a + echo "Test the connection to a non-mt pod (172.25.0.27) in the same delegated network and in another cluster" kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.27 + echo "Test the connection to a mtpod in the same delegated network and in another node" + export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` + echo $IP0 + kubectl exec mtpod0 -it -- ping -c 3 $IP0 + export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` + echo $IP1 + kubectl exec mtpod0 -it -- ping -c 3 $IP1 name: "start_swiftv2_pods_and_test_basic_connection" displayName: "Start Swiftv2 Pods and test basic connection" @@ -69,7 +77,8 @@ steps: kubectl get po -owide -A cd test/integration/swiftv2 echo "Swiftv2 test" - go test -count=1 datapath_swiftv2.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration + echo "will run it in the near future" + echo "go test -count=1 datapath_swiftv2.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration" retryCountOnTaskFailure: 3 name: "Swiftv2_Tests" displayName: "Swiftv2 Tests" From 3a83bf25ad06b425b11cc70d42e2ee425b931305 Mon Sep 17 00:00:00 2001 From: shchen Date: Tue, 16 Apr 2024 23:33:21 -0700 Subject: [PATCH 28/49] Include the commented out code for swiftv2 datapath test. --- .../swiftv2-e2e-step-template.yaml | 4 +- .../swiftv2/datapath_swiftv2_test.go | 466 +++++++++++++++++- 2 files changed, 466 insertions(+), 4 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 6773ef977c..1db9b1a9ab 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -71,6 +71,8 @@ steps: kubectl exec mtpod0 -it -- ping -c 3 $IP1 name: "start_swiftv2_pods_and_test_basic_connection" displayName: "Start Swiftv2 Pods and test basic connection" + env: + SUBNET_TOKEN: $(SUBNET_TOKEN) - script: | set -e @@ -78,7 +80,7 @@ steps: cd test/integration/swiftv2 echo "Swiftv2 test" echo "will run it in the near future" - echo "go test -count=1 datapath_swiftv2.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration" + go test -count=1 datapath_swiftv2.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=swiftv2,integration retryCountOnTaskFailure: 3 name: "Swiftv2_Tests" displayName: "Swiftv2 Tests" diff --git a/test/integration/swiftv2/datapath_swiftv2_test.go b/test/integration/swiftv2/datapath_swiftv2_test.go index 763e3a720f..459c5f4257 100644 --- a/test/integration/swiftv2/datapath_swiftv2_test.go +++ b/test/integration/swiftv2/datapath_swiftv2_test.go @@ -1,6 +1,6 @@ -//go:build connection +//go:build swiftv2 -package connection +package swiftv2 import ( "context" @@ -128,7 +128,7 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Successfully created customer Linux pods") - t.Log("Checking Linux test environment") + t.Log("Checking swiftv2 multitenant pods number") for _, node := range nodes.Items { pods, err := kubernetes.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) if err != nil { @@ -142,6 +142,466 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Linux test environment ready") } +// func TestPodToPodSourceIP(config ergonomic.Config, +// kubeconfig string, +// settings *PodToPodSettings) { +// ctx := config.Ctx() +// kubeClient := clientgen.Default(kubeconfig) +// k8sClient := client.MustCreateK8SClientFromKubeConfig(kubeconfig) + +// // Create namespace. +// logger := config.Logger("k8sPodToVMSourceIP") +// logger.LogKV("step", "create namespace in the cluster") +// nsCreated := clientgen.EnsureNamespaceExists( +// ctx, +// kubeClient, +// logger, +// "pod-pod-src-ip-"+e2enaming.GenerateRandomName(5), +// ) +// logger.Logf("namespace %q created successfully", nsCreated.Name) + +// // Create pod network instance if labels exist +// if settings.PodLabels != nil { +// if podNetwork := settings.PodLabels["kubernetes.azure.com/pod-network"]; podNetwork != "" { +// if podNetworkInstance := settings.PodLabels["kubernetes.azure.com/pod-network-instance"]; podNetworkInstance != "" { +// CreatePodNetworkInstance(ctx, config.RunCtx(), kubeconfig, nsCreated.Name, podNetworkInstance, podNetwork, settings.NodeCountLinux) +// } +// } +// } + +// // Create hostNetwork deployment for the agnhost image from k/k E2E test framework. +// // AgnHost's netexec command runs an HTTP server with an endpoint /clientip that echos the client's source IP. +// deploymentPods := CreateAgnHostDeployment( +// config.RunCtx(), +// ctx, +// k8sClient, +// kubeClient, +// logger, +// nsCreated.Name, +// "linux", +// settings.DestPort, +// settings.HostNetwork, +// 1, +// false, +// ) + +// if settings.NodeCountWindows > 0 { +// deploymentPods = append(deploymentPods, +// CreateAgnHostDeployment( +// config.RunCtx(), +// ctx, +// k8sClient, +// kubeClient, +// logger, +// nsCreated.Name, +// "windows", +// settings.DestPort, +// settings.HostNetwork, +// 1, +// false, +// )...) +// } + +// // Create daemonset of curl pods, one on each node. +// curlPods := CreateCurlDaemonset( +// config.RunCtx(), +// ctx, +// k8sClient, +// kubeClient, +// logger, +// nsCreated.Name, +// settings.NodeCountLinux, +// settings.NodeCountWindows, +// settings.PodLabels, +// ) + +// cURLs := []string{} +// ipFamily := settings.IPFamily +// for _, deploymentPod := range deploymentPods { +// // From each curl pod, request /clientip from each agnhost deployment pod. +// // This will echo back the source IP as seen by the server, which we expect to be equal to the curl pod's IP unless SNAT is expected + +// clientIpURL := getClientIPEndpoint(getPodIPForFamily(deploymentPod.Status.PodIPs, ipFamily), ipFamily, deploymentPod.Spec.Containers[0].Ports[0].ContainerPort) +// m.Expect(clientIpURL).NotTo(m.BeEmpty(), +// "no valid IP in family %s found for pod %s", +// ipFamily, +// deploymentPod.Name) +// cURLs = append(cURLs, clientIpURL) + +// if settings.VerifyHostPort && !settings.HostNetwork { +// cURLs = append(cURLs, fmt.Sprintf("http://%s:%d", deploymentPod.Status.HostIP, deploymentPod.Spec.Containers[0].Ports[0].HostPort)) +// } +// } + +// logger.LogKV("step", "curl from each node") +// for _, curlPod := range curlPods { +// for _, url := range cURLs { +// var expectedPodIP string +// if settings.SNATExpected { +// node, err := k8sClient. +// Clientset. +// CoreV1(). +// Nodes(). +// Get(ctx, curlPod.Spec.NodeName, k8smetav1.GetOptions{}) +// m.Expect(err).NotTo(m.HaveOccurred()) + +// // get the ipfamily of the curl pod, then get the node ip of pod (host) +// expectedPodIP = getInternalNodeIPForFamily(*node, ipFamily).String() +// m.Expect(expectedPodIP).NotTo(m.BeNil(), "no host IP found for pod %s", curlPod.Name) +// if strings.Contains(url, curlPod.Status.HostIP) || checkIPsInNode(*node, url) { +// continue +// } +// } else { +// expectedPodIP = getPodIPForFamily(curlPod.Status.PodIPs, ipFamily).String() +// } + +// m.Expect(expectedPodIP).NotTo(m.BeEmpty(), +// "no valid IP in family %s found for pod %s", +// ipFamily, +// curlPod.Name) + +// // windows pod to its own host port times out +// if (settings.VerifyHostPort || settings.HostNetwork) && strings.Contains(curlPod.Name, "windows") && strings.Contains(url, curlPod.Status.HostIP) { +// continue +// } + +// logger.Logf("curl from pod %q to %q", curlPod.Name, url) +// result, err := retry.DoFixedRetryWithMaxCount( +// func() retry.Result { +// stdout, stderr, err := clientgen.PodExecWithError( +// logger, +// kubeconfig, +// curlPod.Name, +// curlPod.Namespace, +// []string{"curl", "-g", "--max-time", "30", url}, +// ) +// if err != nil { +// logger.Logf("curl %q request failed: error: %s, stdout: %s, stderr: %s", url, err, stdout, stderr) +// return retry.Result{ +// Status: retry.NeedRetry, +// Body: "curl request failed", +// } +// } + +// if strings.Contains(url, "/clientip") { + +// logger.Logf("checking response from agnhost endpoint contains expected IP. "+ +// "endpoint=%s\nexpected IP=%s\nstdout=%s\nstderr=%s\n", url, expectedPodIP, stdout, stderr) +// if strings.Contains(stdout, expectedPodIP) { +// logger.Logf("found expected IP %s in stdout %s", expectedPodIP, stdout) +// } else { +// logger.Logf("stdout %s does not contain expected IP %s", stdout, expectedPodIP) +// return retry.Result{Status: retry.NeedRetry, Body: stdout} +// } + +// } + +// return retry.Result{ +// Status: retry.Success, +// Body: stdout, +// } +// }, +// podExecRetryInterval, +// podExecRetryTimeout, +// podExecRetryMaxAttempts) + +// m.Expect(err).NotTo(m.HaveOccurred(), "err: %s", err) +// m.Expect(result.Status).To(m.Equal(retry.Success)) +// } +// } + +// // Cleanup by deleting the namespace. +// logger.LogKV("step", "delete namespace in the cluster") +// clientgen.EnsureNamespaceDeleted(ctx, kubeClient, logger, nsCreated.Name) +// logger.Logf("deleted namespace %q", nsCreated.Name) +// } + +// func GetMultitenantPodNetworkConfig(ctx context.Context, runCtx e2ev2.RunContext, kubeconfig, namespace, name string) v1alpha1.MultitenantPodNetworkConfig { +// crdClient, err := GetRESTClientForMultitenantCRD(kubeconfig) +// m.Expect(err).NotTo(m.HaveOccurred(), "failed to get multitenant crd rest client: %s", err) +// logger := runCtx.Logger("getMultitenantPodNetworkConfig") +// var mtpnc v1alpha1.MultitenantPodNetworkConfig +// retryResult := retry.RetryWithMaxCountWithContext( +// context.Background(), +// func() (*retry.Result, *cgerror.CategorizedError) { +// err = crdClient.Get().Namespace(namespace).Resource("multitenantpodnetworkconfigs").Name(name).Do(ctx).Into(&mtpnc) +// if err != nil { +// logger.Logf("failed to retrieve multitenantpodnetworkconfig: error: %s", err) +// retriable := true +// return &retry.Result{ +// Status: retry.NeedRetry, +// Body: err, +// }, &cgerror.CategorizedError{ +// Retriable: &retriable, +// } +// } +// if mtpnc.Status.MacAddress == "" || mtpnc.Status.PrimaryIP == "" { +// retriable := true +// return &retry.Result{ +// Status: retry.NeedRetry, +// Body: "waiting for mtpnc to be ready", +// }, &cgerror.CategorizedError{ +// Retriable: &retriable, +// } +// } +// return &retry.Result{ +// Status: retry.Success, +// Body: err, +// }, nil +// }, +// podExecRetryInterval, +// podExecRetryTimeout, +// podExecRetryMaxAttempts, +// retry.FixedType, +// ) +// m.Expect(retryResult.Status).To(m.Equal(retry.Success)) +// return mtpnc +// } + +// func GetRESTClientForMultitenantCRD(kubeconfig string) (*rest.RESTClient, error) { +// scheme := runtime.NewScheme() +// err := acnv1alpha1.AddToScheme(scheme) +// if err != nil { +// return nil, err +// } + +// restConfig, err := clientcmd.RESTConfigFromKubeConfig([]byte(kubeconfig)) +// if err != nil { +// return nil, err +// } + +// restConfig.ContentConfig.GroupVersion = &acnv1alpha1.GroupVersion +// restConfig.APIPath = "/apis" +// restConfig.NegotiatedSerializer = serializer.NewCodecFactory(scheme) +// restConfig.UserAgent = rest.DefaultKubernetesUserAgent() + +// return rest.UnversionedRESTClientFor(restConfig) +// } + +// // EnsureAllPodsAreRunning expects all pods returned from getPods are running. +// func EnsureAllPodsAreRunning( +// ctx context.Context, +// k Interface, +// runCtxLogger e2ev2.RunContextLogger, +// getPods func(k Interface) ([]k8scorev1.Pod, error), +// checkOpt *CheckPodOption, +// ) []k8scorev1.Pod { +// return k.Failer().MustPodList(ExpectAllPodsAreRunning(ctx, k, runCtxLogger, getPods, checkOpt)) +// } + +// // PodExecWithError executes a command in a pod by using its first container. +// // It returns the stdout, stderr and error. +// func PodExecWithError( +// logger e2ev2.RunContextLogger, +// kubeconfig string, +// podName string, namespace string, +// command []string, +// ) (string, string, error) { +// clientcmdConfig, err := k8sclientcmd.Load([]byte(kubeconfig)) +// if err != nil { +// return "", "", fmt.Errorf("failed to load kube config: %w", err) +// } + +// directClientcmdConfig := k8sclientcmd.NewNonInteractiveClientConfig( +// *clientcmdConfig, +// "", // default context +// &k8sclientcmd.ConfigOverrides{}, +// nil, // config access +// ) + +// clientRestConfig, err := directClientcmdConfig.ClientConfig() +// if err != nil { +// return "", "", fmt.Errorf("failed to create kube client config: %w", err) +// } +// WrapClientRestConfigWithRetry(clientRestConfig) + +// clientRestConfig.Timeout = 10 * time.Minute + +// client, err := k8sclientset.NewForConfig(clientRestConfig) +// if err != nil { +// return "", "", fmt.Errorf("failed to create kube clientset: %w", err) +// } + +// pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), podName, k8smetav1.GetOptions{}) +// if err != nil { +// return "", "", fmt.Errorf("get pod: %w", err) +// } +// containerName := pod.Spec.Containers[0].Name +// req := client.CoreV1().RESTClient().Post(). +// Resource("pods"). +// Name(podName). +// Namespace(namespace). +// SubResource("exec"). +// Param("container", containerName) + +// req.VersionedParams(&k8scorev1.PodExecOptions{ +// Command: command, +// Stdin: false, +// Stdout: true, +// Stderr: true, +// TTY: false, +// Container: containerName, +// }, k8sscheme.ParameterCodec) + +// var stdout, stderr bytes.Buffer +// executor, err := remotecommand.NewSPDYExecutor(clientRestConfig, "POST", req.URL()) +// if err != nil { +// return "", "", fmt.Errorf("NewSPDYExecutor: %w", err) +// } + +// // NOTE: remotecommand is not a Kubernetes pod resource API used here, but a tool API. +// ctx := context.Background() +// // yes, 3 mins is a magic number +// ctx, cancel := context.WithTimeout(ctx, 3*time.Minute) +// defer cancel() +// logger.Logf("executing command: %s", strings.Join(command, " ")) +// readStreamErr := executor.StreamWithContext(ctx, remotecommand.StreamOptions{ +// Stdin: nil, +// Stdout: &stdout, +// Stderr: &stderr, +// Tty: false, +// }) + +// // FIXME(hbc): Windows validation expect stdout/stderr output even seeing error +// // therefore we need to return the stdout/stderr output here +// stdoutRead := strings.TrimSpace(stdout.String()) +// stderrRead := strings.TrimSpace(stderr.String()) +// return stdoutRead, stderrRead, readStreamErr +// } + +// func HandleSwiftv2PodToPodTestcase() e2e.Handler { +// var ( +// kubeconfig string +// namespace string +// numNodes int +// podLabels map[string]string +// ) + +// fs := schemahelper.NewFlagSet() +// fs.RequiredStringVar(&kubeconfig, "kube_config", "First AKS cluster credentials to use") +// fs.StringVar(&namespace, "namespace", "", "namespace to deploy, generate if not specified") +// fs.IntVar(&numNodes, "num_linux_nodes", 2, "number of linux nodes in cluster") +// fs.StringMapStringVar(&podLabels, "pod_labels", nil, "client pod labels") + +// return &handler{ +// name: Swiftv2PodToPod, +// parametersSchema: fs.BuildParametersSchema(), +// handler: func(runCtx e2e.RunContext) { +// fs.Parse(runCtx.Parameters()) +// logger := runCtx.Logger("k8sPodConnectionCrossClusterWithPeeredNetwork") +// ctx := context.Background() + +// if namespace == "" { +// namespace = generateNamespace() +// } + +// // Create pod network instance +// m.Expect(podLabels).ToNot(m.BeNil()) +// podNetwork := podLabels["kubernetes.azure.com/pod-network"] +// podNetworkInstance := podLabels["kubernetes.azure.com/pod-network-instance"] + +// k8sClient := MustCreateK8SClientFromKubeConfig(kubeconfig) +// kubeClient := clientgen.Default(kubeconfig) + +// logger.LogKV("step", "create namespace in the cluster") +// clientgen.EnsureNamespaceExists(ctx, kubeClient, logger, namespace) +// logger.LogKV("namespace", namespace, "state", "created") + +// network.CreatePodNetworkInstance(ctx, runCtx, kubeconfig, namespace, podNetworkInstance, podNetwork, numNodes) + +// logger.LogKV("step", "create deployment in the cluster") +// testcase := BusyboxTestcase{ +// Namespace: namespace, +// BusyboxImage: dockerimage.ImageBusybox.MustGetFromRunContext(runCtx), +// Basename: "mtpod-to-mtpod", +// Replicas: numNodes, +// PodAntiAffinityHostname: true, +// Labels: podLabels, +// } +// deployment := testcase.Deployment() +// k8sClient.MustCreateDeployment(namespace, deployment) + +// logger.LogKV("step", "wait until the pods in the deployment are ready") +// k8sClient.MustWaitDeploymentReady(namespace, k8smetav1.ListOptions{}, numNodes, nil) + +// deploymentPods := EnsureAllPodsAreRunning(ctx, +// kubeClient, +// logger, +// func(k clientgen.Interface) ([]k8scorev1.Pod, error) { +// result := k.Pods(namespace).List(ctx, k8smetav1.ListOptions{LabelSelector: fmt.Sprintf("app in (%s)", testcase.Basename)}) +// err := result.Err() +// if err != nil { +// logger.Logf("failed to list pods on node: %s", err) +// return nil, err +// } + +// podList := result.OrElseThrow() + +// numPods := len(podList.Items) +// if numPods != numNodes { +// return nil, fmt.Errorf("waiting for %d/%d pods", numPods, numNodes) +// } + +// return podList.Items, nil +// }, +// &clientgen.CheckPodOption{ +// CheckInterval: podExecRetryInterval, +// CheckTimeout: podExecRetryTimeout, +// }) + +// logger.LogKV("step", "validate swiftv2 pods datapath") +// ipsToPing := make([]string, 0, numNodes) +// for _, pod := range deploymentPods { +// mtpnc := network.GetMultitenantPodNetworkConfig(ctx, runCtx, kubeconfig, pod.Namespace, pod.Name) +// m.Expect(pod.Status.PodIPs).To(m.HaveLen(1)) +// // remove /32 from PrimaryIP +// splitcidr := strings.Split(mtpnc.Status.PrimaryIP, "/") +// m.Expect(splitcidr).To(m.HaveLen(2)) +// ipsToPing = append(ipsToPing, splitcidr[0]) +// } + +// for _, pod := range deploymentPods { +// for _, ip := range ipsToPing { +// logger.Logf("ping from pod %q to %q", pod.Name, ip) +// result, err := retry.DoFixedRetryWithMaxCount( +// func() retry.Result { +// stdout, stderr, err := clientgen.PodExecWithError( +// logger, +// kubeconfig, +// pod.Name, +// pod.Namespace, +// []string{"ping", "-c", "3", ip}, +// ) +// if err != nil { +// logger.Logf("ping %q failed: error: %s, stdout: %s, stderr: %s", ip, err, stdout, stderr) +// return retry.Result{ +// Status: retry.NeedRetry, +// Body: "ping failed", +// } +// } + +// return retry.Result{ +// Status: retry.Success, +// Body: stdout, +// } +// }, +// podExecRetryInterval, +// podExecRetryTimeout, +// podExecRetryMaxAttempts) + +// m.Expect(err).NotTo(m.HaveOccurred(), "err: %s", err) +// m.Expect(result.Status).To(m.Equal(retry.Success)) +// } +// } + +// // Cleanup by deleting the namespace. +// logger.LogKV("step", "delete namespace in the cluster") +// clientgen.EnsureNamespaceDeleted(ctx, kubeClient, logger, namespace) +// logger.Logf("deleted namespace %q", namespace) +// }, +// } +// } + func TestDatapathLinux(t *testing.T) { ctx := context.Background() From 5945be8cf78e28c9085d9dc0d28fbd05f0e304be Mon Sep 17 00:00:00 2001 From: shchen Date: Tue, 16 Apr 2024 23:37:24 -0700 Subject: [PATCH 29/49] Update the yaml file format. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 1db9b1a9ab..b7267a8ebe 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -72,7 +72,7 @@ steps: name: "start_swiftv2_pods_and_test_basic_connection" displayName: "Start Swiftv2 Pods and test basic connection" env: - SUBNET_TOKEN: $(SUBNET_TOKEN) + SUBNET_TOKEN: $(SUBNET_TOKEN) - script: | set -e From 69ada3c27f327236483553fb954b9d686c3615c6 Mon Sep 17 00:00:00 2001 From: shchen Date: Thu, 18 Apr 2024 00:17:47 -0700 Subject: [PATCH 30/49] Update the command to run the swiftv2 tests. --- .../swiftv2-e2e-step-template.yaml | 2 +- .../swiftv2/datapath_swiftv2_test.go | 70 +++++++++---------- 2 files changed, 35 insertions(+), 37 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index b7267a8ebe..4cbc64e166 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -80,7 +80,7 @@ steps: cd test/integration/swiftv2 echo "Swiftv2 test" echo "will run it in the near future" - go test -count=1 datapath_swiftv2.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=swiftv2,integration + go test -count=1 datapath_swiftv2.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration retryCountOnTaskFailure: 3 name: "Swiftv2_Tests" displayName: "Swiftv2 Tests" diff --git a/test/integration/swiftv2/datapath_swiftv2_test.go b/test/integration/swiftv2/datapath_swiftv2_test.go index 459c5f4257..2b048b2a17 100644 --- a/test/integration/swiftv2/datapath_swiftv2_test.go +++ b/test/integration/swiftv2/datapath_swiftv2_test.go @@ -13,9 +13,6 @@ import ( "github.com/Azure/azure-container-networking/test/internal/kubernetes" "github.com/Azure/azure-container-networking/test/internal/retry" "github.com/pkg/errors" - - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -41,7 +38,7 @@ const ( ) var ( - podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods") + podPrefix = flag.String("podName", "mta", "Prefix for test pods") podNamespace = flag.String("namespace", "default", "Namespace for test pods") nodepoolSelector = flag.String("nodepoolSelector", "mtapool", "Provides nodepool as a Linux Node-Selector for pods") // TODO: add flag to support dual nic scenario @@ -80,45 +77,46 @@ func setupLinuxEnvironment(t *testing.T) { t.Fatalf("could not get k8s node list: %v", err) } - t.Log("Creating Linux pods through deployment") + // shchen comment out + // t.Log("Creating Linux pods through deployment") - // run goldpinger ipv4 and ipv6 test cases saperately - var daemonset appsv1.DaemonSet - var deployment appsv1.Deployment + // // run goldpinger ipv4 and ipv6 test cases saperately + // var daemonset appsv1.DaemonSet + // var deployment appsv1.Deployment - deployment = kubernetes.MustParseDeployment(LinuxDeployIPV4) - daemonset = kubernetes.MustParseDaemonSet(gpDaemonset) + // deployment = kubernetes.MustParseDeployment(LinuxDeployIPV4) + // daemonset = kubernetes.MustParseDaemonSet(gpDaemonset) // setup common RBAC, ClusteerRole, ClusterRoleBinding, ServiceAccount - rbacSetupFn := kubernetes.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) + // rbacSetupFn := kubernetes.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) // Fields for overwritting existing deployment yaml. // Defaults from flags will not change anything - deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix - deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix - deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector - deployment.Name = *podPrefix - deployment.Namespace = *podNamespace - daemonset.Namespace = *podNamespace - - deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) - kubernetes.MustCreateDeployment(ctx, deploymentsClient, deployment) - - daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) - kubernetes.MustCreateDaemonset(ctx, daemonsetClient, daemonset) - - t.Cleanup(func() { - t.Log("cleaning up resources") - rbacSetupFn() - - if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil { - t.Log(err) - } - - if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil { - t.Log(err) - } - }) + // deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix + // deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix + // deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector + // deployment.Name = *podPrefix + // deployment.Namespace = *podNamespace + // daemonset.Namespace = *podNamespace + + // deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) + // kubernetes.MustCreateDeployment(ctx, deploymentsClient, deployment) + + // daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) + // kubernetes.MustCreateDaemonset(ctx, daemonsetClient, daemonset) + + // t.Cleanup(func() { + // t.Log("cleaning up resources") + // rbacSetupFn() + + // if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil { + // t.Log(err) + // } + + // if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil { + // t.Log(err) + // } + // }) t.Log("Waiting for pods to be running state") err = kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) From 94557b85ae97716bec5669b337f91fbd3f7a99a8 Mon Sep 17 00:00:00 2001 From: shchen Date: Tue, 23 Apr 2024 22:41:00 -0700 Subject: [PATCH 31/49] Move tests to the code file. --- .../swiftv2-e2e-step-template.yaml | 7 +- .../swiftv2/datapath_swiftv2_test.go | 680 ------------------ test/integration/swiftv2/swiftv2_test.go | 366 ++++++++++ test/internal/kubernetes/utils.go | 26 + 4 files changed, 396 insertions(+), 683 deletions(-) delete mode 100644 test/integration/swiftv2/datapath_swiftv2_test.go create mode 100644 test/integration/swiftv2/swiftv2_test.go diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 4cbc64e166..29925d4dd2 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -78,9 +78,10 @@ steps: set -e kubectl get po -owide -A cd test/integration/swiftv2 - echo "Swiftv2 test" - echo "will run it in the near future" - go test -count=1 datapath_swiftv2.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration + echo "Swiftv2 TestDatapathLinux" + go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration + echo "TestSwiftv2PodToPod" + go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration retryCountOnTaskFailure: 3 name: "Swiftv2_Tests" displayName: "Swiftv2 Tests" diff --git a/test/integration/swiftv2/datapath_swiftv2_test.go b/test/integration/swiftv2/datapath_swiftv2_test.go deleted file mode 100644 index 2b048b2a17..0000000000 --- a/test/integration/swiftv2/datapath_swiftv2_test.go +++ /dev/null @@ -1,680 +0,0 @@ -//go:build swiftv2 - -package swiftv2 - -import ( - "context" - "flag" - "testing" - "time" - - k8s "github.com/Azure/azure-container-networking/test/integration" - "github.com/Azure/azure-container-networking/test/integration/goldpinger" - "github.com/Azure/azure-container-networking/test/internal/kubernetes" - "github.com/Azure/azure-container-networking/test/internal/retry" - "github.com/pkg/errors" -) - -const ( - podLabelKey = "app" - podCount = 2 - nodepoolKey = "mtapool" - LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" - podNetworkYaml = "../manifests/swiftv2/podnetwork.yaml" - mtpodYaml = "../manifests/swiftv2/mtpod0.yaml" - pniYaml = "../manifests/swiftv2/pni.yaml" - maxRetryDelaySeconds = 10 - defaultTimeoutSeconds = 120 - defaultRetryDelaySeconds = 1 - goldpingerRetryCount = 24 - goldpingerDelayTimeSeconds = 5 - gpFolder = "../manifests/goldpinger" - gpClusterRolePath = gpFolder + "/cluster-role.yaml" - gpClusterRoleBindingPath = gpFolder + "/cluster-role-binding.yaml" - gpServiceAccountPath = gpFolder + "/service-account.yaml" - gpDaemonset = gpFolder + "/daemonset.yaml" - gpDaemonsetIPv6 = gpFolder + "/daemonset-ipv6.yaml" - gpDeployment = gpFolder + "/deployment.yaml" -) - -var ( - podPrefix = flag.String("podName", "mta", "Prefix for test pods") - podNamespace = flag.String("namespace", "default", "Namespace for test pods") - nodepoolSelector = flag.String("nodepoolSelector", "mtapool", "Provides nodepool as a Linux Node-Selector for pods") - // TODO: add flag to support dual nic scenario - isDualStack = flag.Bool("isDualStack", false, "whether system supports dualstack scenario") - defaultRetrier = retry.Retrier{ - Attempts: 10, - Delay: defaultRetryDelaySeconds * time.Second, - } -) - -/* -This test assumes that you have the current credentials loaded in your default kubeconfig for a -k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. -*** The expected nodepool name is mtapool, if the nodepool has a different name ensure that you change nodepoolSelector with: - -nodepoolSelector="yournodepoolname" - -This test checks pod to pod, pod to node, pod to Internet check - -Timeout context is controled by the -timeout flag. - -*/ - -func setupLinuxEnvironment(t *testing.T) { - ctx := context.Background() - - t.Log("Create Clientset") - clientset := kubernetes.MustGetClientset() - - t.Log("Create Label Selectors") - podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) - nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) - - t.Log("Get Nodes") - nodes, err := kubernetes.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) - if err != nil { - t.Fatalf("could not get k8s node list: %v", err) - } - - // shchen comment out - // t.Log("Creating Linux pods through deployment") - - // // run goldpinger ipv4 and ipv6 test cases saperately - // var daemonset appsv1.DaemonSet - // var deployment appsv1.Deployment - - // deployment = kubernetes.MustParseDeployment(LinuxDeployIPV4) - // daemonset = kubernetes.MustParseDaemonSet(gpDaemonset) - - // setup common RBAC, ClusteerRole, ClusterRoleBinding, ServiceAccount - // rbacSetupFn := kubernetes.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) - - // Fields for overwritting existing deployment yaml. - // Defaults from flags will not change anything - // deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix - // deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix - // deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector - // deployment.Name = *podPrefix - // deployment.Namespace = *podNamespace - // daemonset.Namespace = *podNamespace - - // deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) - // kubernetes.MustCreateDeployment(ctx, deploymentsClient, deployment) - - // daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) - // kubernetes.MustCreateDaemonset(ctx, daemonsetClient, daemonset) - - // t.Cleanup(func() { - // t.Log("cleaning up resources") - // rbacSetupFn() - - // if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil { - // t.Log(err) - // } - - // if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil { - // t.Log(err) - // } - // }) - - t.Log("Waiting for pods to be running state") - err = kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) - if err != nil { - t.Fatalf("Pods are not in running state due to %+v", err) - } - - t.Log("Successfully created customer Linux pods") - - t.Log("Checking swiftv2 multitenant pods number") - for _, node := range nodes.Items { - pods, err := kubernetes.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) - if err != nil { - t.Fatalf("could not get k8s clientset: %v", err) - } - if len(pods.Items) <= 1 { - t.Fatalf("Less than 2 pods on node: %v", node.Name) - } - } - - t.Log("Linux test environment ready") -} - -// func TestPodToPodSourceIP(config ergonomic.Config, -// kubeconfig string, -// settings *PodToPodSettings) { -// ctx := config.Ctx() -// kubeClient := clientgen.Default(kubeconfig) -// k8sClient := client.MustCreateK8SClientFromKubeConfig(kubeconfig) - -// // Create namespace. -// logger := config.Logger("k8sPodToVMSourceIP") -// logger.LogKV("step", "create namespace in the cluster") -// nsCreated := clientgen.EnsureNamespaceExists( -// ctx, -// kubeClient, -// logger, -// "pod-pod-src-ip-"+e2enaming.GenerateRandomName(5), -// ) -// logger.Logf("namespace %q created successfully", nsCreated.Name) - -// // Create pod network instance if labels exist -// if settings.PodLabels != nil { -// if podNetwork := settings.PodLabels["kubernetes.azure.com/pod-network"]; podNetwork != "" { -// if podNetworkInstance := settings.PodLabels["kubernetes.azure.com/pod-network-instance"]; podNetworkInstance != "" { -// CreatePodNetworkInstance(ctx, config.RunCtx(), kubeconfig, nsCreated.Name, podNetworkInstance, podNetwork, settings.NodeCountLinux) -// } -// } -// } - -// // Create hostNetwork deployment for the agnhost image from k/k E2E test framework. -// // AgnHost's netexec command runs an HTTP server with an endpoint /clientip that echos the client's source IP. -// deploymentPods := CreateAgnHostDeployment( -// config.RunCtx(), -// ctx, -// k8sClient, -// kubeClient, -// logger, -// nsCreated.Name, -// "linux", -// settings.DestPort, -// settings.HostNetwork, -// 1, -// false, -// ) - -// if settings.NodeCountWindows > 0 { -// deploymentPods = append(deploymentPods, -// CreateAgnHostDeployment( -// config.RunCtx(), -// ctx, -// k8sClient, -// kubeClient, -// logger, -// nsCreated.Name, -// "windows", -// settings.DestPort, -// settings.HostNetwork, -// 1, -// false, -// )...) -// } - -// // Create daemonset of curl pods, one on each node. -// curlPods := CreateCurlDaemonset( -// config.RunCtx(), -// ctx, -// k8sClient, -// kubeClient, -// logger, -// nsCreated.Name, -// settings.NodeCountLinux, -// settings.NodeCountWindows, -// settings.PodLabels, -// ) - -// cURLs := []string{} -// ipFamily := settings.IPFamily -// for _, deploymentPod := range deploymentPods { -// // From each curl pod, request /clientip from each agnhost deployment pod. -// // This will echo back the source IP as seen by the server, which we expect to be equal to the curl pod's IP unless SNAT is expected - -// clientIpURL := getClientIPEndpoint(getPodIPForFamily(deploymentPod.Status.PodIPs, ipFamily), ipFamily, deploymentPod.Spec.Containers[0].Ports[0].ContainerPort) -// m.Expect(clientIpURL).NotTo(m.BeEmpty(), -// "no valid IP in family %s found for pod %s", -// ipFamily, -// deploymentPod.Name) -// cURLs = append(cURLs, clientIpURL) - -// if settings.VerifyHostPort && !settings.HostNetwork { -// cURLs = append(cURLs, fmt.Sprintf("http://%s:%d", deploymentPod.Status.HostIP, deploymentPod.Spec.Containers[0].Ports[0].HostPort)) -// } -// } - -// logger.LogKV("step", "curl from each node") -// for _, curlPod := range curlPods { -// for _, url := range cURLs { -// var expectedPodIP string -// if settings.SNATExpected { -// node, err := k8sClient. -// Clientset. -// CoreV1(). -// Nodes(). -// Get(ctx, curlPod.Spec.NodeName, k8smetav1.GetOptions{}) -// m.Expect(err).NotTo(m.HaveOccurred()) - -// // get the ipfamily of the curl pod, then get the node ip of pod (host) -// expectedPodIP = getInternalNodeIPForFamily(*node, ipFamily).String() -// m.Expect(expectedPodIP).NotTo(m.BeNil(), "no host IP found for pod %s", curlPod.Name) -// if strings.Contains(url, curlPod.Status.HostIP) || checkIPsInNode(*node, url) { -// continue -// } -// } else { -// expectedPodIP = getPodIPForFamily(curlPod.Status.PodIPs, ipFamily).String() -// } - -// m.Expect(expectedPodIP).NotTo(m.BeEmpty(), -// "no valid IP in family %s found for pod %s", -// ipFamily, -// curlPod.Name) - -// // windows pod to its own host port times out -// if (settings.VerifyHostPort || settings.HostNetwork) && strings.Contains(curlPod.Name, "windows") && strings.Contains(url, curlPod.Status.HostIP) { -// continue -// } - -// logger.Logf("curl from pod %q to %q", curlPod.Name, url) -// result, err := retry.DoFixedRetryWithMaxCount( -// func() retry.Result { -// stdout, stderr, err := clientgen.PodExecWithError( -// logger, -// kubeconfig, -// curlPod.Name, -// curlPod.Namespace, -// []string{"curl", "-g", "--max-time", "30", url}, -// ) -// if err != nil { -// logger.Logf("curl %q request failed: error: %s, stdout: %s, stderr: %s", url, err, stdout, stderr) -// return retry.Result{ -// Status: retry.NeedRetry, -// Body: "curl request failed", -// } -// } - -// if strings.Contains(url, "/clientip") { - -// logger.Logf("checking response from agnhost endpoint contains expected IP. "+ -// "endpoint=%s\nexpected IP=%s\nstdout=%s\nstderr=%s\n", url, expectedPodIP, stdout, stderr) -// if strings.Contains(stdout, expectedPodIP) { -// logger.Logf("found expected IP %s in stdout %s", expectedPodIP, stdout) -// } else { -// logger.Logf("stdout %s does not contain expected IP %s", stdout, expectedPodIP) -// return retry.Result{Status: retry.NeedRetry, Body: stdout} -// } - -// } - -// return retry.Result{ -// Status: retry.Success, -// Body: stdout, -// } -// }, -// podExecRetryInterval, -// podExecRetryTimeout, -// podExecRetryMaxAttempts) - -// m.Expect(err).NotTo(m.HaveOccurred(), "err: %s", err) -// m.Expect(result.Status).To(m.Equal(retry.Success)) -// } -// } - -// // Cleanup by deleting the namespace. -// logger.LogKV("step", "delete namespace in the cluster") -// clientgen.EnsureNamespaceDeleted(ctx, kubeClient, logger, nsCreated.Name) -// logger.Logf("deleted namespace %q", nsCreated.Name) -// } - -// func GetMultitenantPodNetworkConfig(ctx context.Context, runCtx e2ev2.RunContext, kubeconfig, namespace, name string) v1alpha1.MultitenantPodNetworkConfig { -// crdClient, err := GetRESTClientForMultitenantCRD(kubeconfig) -// m.Expect(err).NotTo(m.HaveOccurred(), "failed to get multitenant crd rest client: %s", err) -// logger := runCtx.Logger("getMultitenantPodNetworkConfig") -// var mtpnc v1alpha1.MultitenantPodNetworkConfig -// retryResult := retry.RetryWithMaxCountWithContext( -// context.Background(), -// func() (*retry.Result, *cgerror.CategorizedError) { -// err = crdClient.Get().Namespace(namespace).Resource("multitenantpodnetworkconfigs").Name(name).Do(ctx).Into(&mtpnc) -// if err != nil { -// logger.Logf("failed to retrieve multitenantpodnetworkconfig: error: %s", err) -// retriable := true -// return &retry.Result{ -// Status: retry.NeedRetry, -// Body: err, -// }, &cgerror.CategorizedError{ -// Retriable: &retriable, -// } -// } -// if mtpnc.Status.MacAddress == "" || mtpnc.Status.PrimaryIP == "" { -// retriable := true -// return &retry.Result{ -// Status: retry.NeedRetry, -// Body: "waiting for mtpnc to be ready", -// }, &cgerror.CategorizedError{ -// Retriable: &retriable, -// } -// } -// return &retry.Result{ -// Status: retry.Success, -// Body: err, -// }, nil -// }, -// podExecRetryInterval, -// podExecRetryTimeout, -// podExecRetryMaxAttempts, -// retry.FixedType, -// ) -// m.Expect(retryResult.Status).To(m.Equal(retry.Success)) -// return mtpnc -// } - -// func GetRESTClientForMultitenantCRD(kubeconfig string) (*rest.RESTClient, error) { -// scheme := runtime.NewScheme() -// err := acnv1alpha1.AddToScheme(scheme) -// if err != nil { -// return nil, err -// } - -// restConfig, err := clientcmd.RESTConfigFromKubeConfig([]byte(kubeconfig)) -// if err != nil { -// return nil, err -// } - -// restConfig.ContentConfig.GroupVersion = &acnv1alpha1.GroupVersion -// restConfig.APIPath = "/apis" -// restConfig.NegotiatedSerializer = serializer.NewCodecFactory(scheme) -// restConfig.UserAgent = rest.DefaultKubernetesUserAgent() - -// return rest.UnversionedRESTClientFor(restConfig) -// } - -// // EnsureAllPodsAreRunning expects all pods returned from getPods are running. -// func EnsureAllPodsAreRunning( -// ctx context.Context, -// k Interface, -// runCtxLogger e2ev2.RunContextLogger, -// getPods func(k Interface) ([]k8scorev1.Pod, error), -// checkOpt *CheckPodOption, -// ) []k8scorev1.Pod { -// return k.Failer().MustPodList(ExpectAllPodsAreRunning(ctx, k, runCtxLogger, getPods, checkOpt)) -// } - -// // PodExecWithError executes a command in a pod by using its first container. -// // It returns the stdout, stderr and error. -// func PodExecWithError( -// logger e2ev2.RunContextLogger, -// kubeconfig string, -// podName string, namespace string, -// command []string, -// ) (string, string, error) { -// clientcmdConfig, err := k8sclientcmd.Load([]byte(kubeconfig)) -// if err != nil { -// return "", "", fmt.Errorf("failed to load kube config: %w", err) -// } - -// directClientcmdConfig := k8sclientcmd.NewNonInteractiveClientConfig( -// *clientcmdConfig, -// "", // default context -// &k8sclientcmd.ConfigOverrides{}, -// nil, // config access -// ) - -// clientRestConfig, err := directClientcmdConfig.ClientConfig() -// if err != nil { -// return "", "", fmt.Errorf("failed to create kube client config: %w", err) -// } -// WrapClientRestConfigWithRetry(clientRestConfig) - -// clientRestConfig.Timeout = 10 * time.Minute - -// client, err := k8sclientset.NewForConfig(clientRestConfig) -// if err != nil { -// return "", "", fmt.Errorf("failed to create kube clientset: %w", err) -// } - -// pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), podName, k8smetav1.GetOptions{}) -// if err != nil { -// return "", "", fmt.Errorf("get pod: %w", err) -// } -// containerName := pod.Spec.Containers[0].Name -// req := client.CoreV1().RESTClient().Post(). -// Resource("pods"). -// Name(podName). -// Namespace(namespace). -// SubResource("exec"). -// Param("container", containerName) - -// req.VersionedParams(&k8scorev1.PodExecOptions{ -// Command: command, -// Stdin: false, -// Stdout: true, -// Stderr: true, -// TTY: false, -// Container: containerName, -// }, k8sscheme.ParameterCodec) - -// var stdout, stderr bytes.Buffer -// executor, err := remotecommand.NewSPDYExecutor(clientRestConfig, "POST", req.URL()) -// if err != nil { -// return "", "", fmt.Errorf("NewSPDYExecutor: %w", err) -// } - -// // NOTE: remotecommand is not a Kubernetes pod resource API used here, but a tool API. -// ctx := context.Background() -// // yes, 3 mins is a magic number -// ctx, cancel := context.WithTimeout(ctx, 3*time.Minute) -// defer cancel() -// logger.Logf("executing command: %s", strings.Join(command, " ")) -// readStreamErr := executor.StreamWithContext(ctx, remotecommand.StreamOptions{ -// Stdin: nil, -// Stdout: &stdout, -// Stderr: &stderr, -// Tty: false, -// }) - -// // FIXME(hbc): Windows validation expect stdout/stderr output even seeing error -// // therefore we need to return the stdout/stderr output here -// stdoutRead := strings.TrimSpace(stdout.String()) -// stderrRead := strings.TrimSpace(stderr.String()) -// return stdoutRead, stderrRead, readStreamErr -// } - -// func HandleSwiftv2PodToPodTestcase() e2e.Handler { -// var ( -// kubeconfig string -// namespace string -// numNodes int -// podLabels map[string]string -// ) - -// fs := schemahelper.NewFlagSet() -// fs.RequiredStringVar(&kubeconfig, "kube_config", "First AKS cluster credentials to use") -// fs.StringVar(&namespace, "namespace", "", "namespace to deploy, generate if not specified") -// fs.IntVar(&numNodes, "num_linux_nodes", 2, "number of linux nodes in cluster") -// fs.StringMapStringVar(&podLabels, "pod_labels", nil, "client pod labels") - -// return &handler{ -// name: Swiftv2PodToPod, -// parametersSchema: fs.BuildParametersSchema(), -// handler: func(runCtx e2e.RunContext) { -// fs.Parse(runCtx.Parameters()) -// logger := runCtx.Logger("k8sPodConnectionCrossClusterWithPeeredNetwork") -// ctx := context.Background() - -// if namespace == "" { -// namespace = generateNamespace() -// } - -// // Create pod network instance -// m.Expect(podLabels).ToNot(m.BeNil()) -// podNetwork := podLabels["kubernetes.azure.com/pod-network"] -// podNetworkInstance := podLabels["kubernetes.azure.com/pod-network-instance"] - -// k8sClient := MustCreateK8SClientFromKubeConfig(kubeconfig) -// kubeClient := clientgen.Default(kubeconfig) - -// logger.LogKV("step", "create namespace in the cluster") -// clientgen.EnsureNamespaceExists(ctx, kubeClient, logger, namespace) -// logger.LogKV("namespace", namespace, "state", "created") - -// network.CreatePodNetworkInstance(ctx, runCtx, kubeconfig, namespace, podNetworkInstance, podNetwork, numNodes) - -// logger.LogKV("step", "create deployment in the cluster") -// testcase := BusyboxTestcase{ -// Namespace: namespace, -// BusyboxImage: dockerimage.ImageBusybox.MustGetFromRunContext(runCtx), -// Basename: "mtpod-to-mtpod", -// Replicas: numNodes, -// PodAntiAffinityHostname: true, -// Labels: podLabels, -// } -// deployment := testcase.Deployment() -// k8sClient.MustCreateDeployment(namespace, deployment) - -// logger.LogKV("step", "wait until the pods in the deployment are ready") -// k8sClient.MustWaitDeploymentReady(namespace, k8smetav1.ListOptions{}, numNodes, nil) - -// deploymentPods := EnsureAllPodsAreRunning(ctx, -// kubeClient, -// logger, -// func(k clientgen.Interface) ([]k8scorev1.Pod, error) { -// result := k.Pods(namespace).List(ctx, k8smetav1.ListOptions{LabelSelector: fmt.Sprintf("app in (%s)", testcase.Basename)}) -// err := result.Err() -// if err != nil { -// logger.Logf("failed to list pods on node: %s", err) -// return nil, err -// } - -// podList := result.OrElseThrow() - -// numPods := len(podList.Items) -// if numPods != numNodes { -// return nil, fmt.Errorf("waiting for %d/%d pods", numPods, numNodes) -// } - -// return podList.Items, nil -// }, -// &clientgen.CheckPodOption{ -// CheckInterval: podExecRetryInterval, -// CheckTimeout: podExecRetryTimeout, -// }) - -// logger.LogKV("step", "validate swiftv2 pods datapath") -// ipsToPing := make([]string, 0, numNodes) -// for _, pod := range deploymentPods { -// mtpnc := network.GetMultitenantPodNetworkConfig(ctx, runCtx, kubeconfig, pod.Namespace, pod.Name) -// m.Expect(pod.Status.PodIPs).To(m.HaveLen(1)) -// // remove /32 from PrimaryIP -// splitcidr := strings.Split(mtpnc.Status.PrimaryIP, "/") -// m.Expect(splitcidr).To(m.HaveLen(2)) -// ipsToPing = append(ipsToPing, splitcidr[0]) -// } - -// for _, pod := range deploymentPods { -// for _, ip := range ipsToPing { -// logger.Logf("ping from pod %q to %q", pod.Name, ip) -// result, err := retry.DoFixedRetryWithMaxCount( -// func() retry.Result { -// stdout, stderr, err := clientgen.PodExecWithError( -// logger, -// kubeconfig, -// pod.Name, -// pod.Namespace, -// []string{"ping", "-c", "3", ip}, -// ) -// if err != nil { -// logger.Logf("ping %q failed: error: %s, stdout: %s, stderr: %s", ip, err, stdout, stderr) -// return retry.Result{ -// Status: retry.NeedRetry, -// Body: "ping failed", -// } -// } - -// return retry.Result{ -// Status: retry.Success, -// Body: stdout, -// } -// }, -// podExecRetryInterval, -// podExecRetryTimeout, -// podExecRetryMaxAttempts) - -// m.Expect(err).NotTo(m.HaveOccurred(), "err: %s", err) -// m.Expect(result.Status).To(m.Equal(retry.Success)) -// } -// } - -// // Cleanup by deleting the namespace. -// logger.LogKV("step", "delete namespace in the cluster") -// clientgen.EnsureNamespaceDeleted(ctx, kubeClient, logger, namespace) -// logger.Logf("deleted namespace %q", namespace) -// }, -// } -// } - -func TestDatapathLinux(t *testing.T) { - ctx := context.Background() - - t.Log("Get REST config") - restConfig := kubernetes.MustGetRestConfig() - - t.Log("Create Clientset") - clientset := kubernetes.MustGetClientset() - - setupLinuxEnvironment(t) - podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) - - t.Run("Linux ping tests", func(t *testing.T) { - // Check goldpinger health - t.Run("all pods have IPs assigned", func(t *testing.T) { - err := kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) - if err != nil { - t.Fatalf("Pods are not in running state due to %+v", err) - } - t.Log("all pods have been allocated IPs") - }) - - t.Run("all linux pods can ping each other", func(t *testing.T) { - clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) - defer cancel() - - pfOpts := k8s.PortForwardingOpts{ - Namespace: *podNamespace, - LabelSelector: podLabelSelector, - LocalPort: 9090, - DestPort: 8080, - } - - pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts) - if err != nil { - t.Fatal(err) - } - - portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second) - defer cancel() - - portForwardFn := func() error { - err := pf.Forward(portForwardCtx) - if err != nil { - t.Logf("unable to start port forward: %v", err) - return err - } - return nil - } - - if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { - t.Fatalf("could not start port forward within %d: %v", defaultTimeoutSeconds, err) - } - defer pf.Stop() - - gpClient := goldpinger.Client{Host: pf.Address()} - clusterCheckFn := func() error { - clusterState, err := gpClient.CheckAll(clusterCheckCtx) - if err != nil { - return err - } - stats := goldpinger.ClusterStats(clusterState) - stats.PrintStats() - if stats.AllPingsHealthy() { - return nil - } - - return errors.New("not all pings are healthy") - } - retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second} - if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil { - t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err) - } - - t.Log("all pings successful!") - }) - }) -} diff --git a/test/integration/swiftv2/swiftv2_test.go b/test/integration/swiftv2/swiftv2_test.go new file mode 100644 index 0000000000..b429b5b67c --- /dev/null +++ b/test/integration/swiftv2/swiftv2_test.go @@ -0,0 +1,366 @@ +//go:build swiftv2 + +package swiftv2 + +import ( + "bytes" + "context" + "flag" + "fmt" + "strings" + "testing" + "time" + + "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" + k8s "github.com/Azure/azure-container-networking/test/integration" + "github.com/Azure/azure-container-networking/test/integration/goldpinger" + "github.com/Azure/azure-container-networking/test/internal/kubernetes" + "github.com/Azure/azure-container-networking/test/internal/retry" + "github.com/pkg/errors" + k8scorev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sclientset "k8s.io/client-go/kubernetes" + k8sscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/remotecommand" +) + +const ( + podLabelKey = "kubernetes.azure.com/pod-network-instance" + podCount = 2 + nodepoolKey = "agentpool" + LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" + podNetworkYaml = "../manifests/swiftv2/podnetwork.yaml" + mtpodYaml = "../manifests/swiftv2/mtpod0.yaml" + pniYaml = "../manifests/swiftv2/pni.yaml" + maxRetryDelaySeconds = 10 + defaultTimeoutSeconds = 120 + defaultRetryDelaySeconds = 1 + goldpingerRetryCount = 24 + goldpingerDelayTimeSeconds = 5 + gpFolder = "../manifests/goldpinger" + gpClusterRolePath = gpFolder + "/cluster-role.yaml" + gpClusterRoleBindingPath = gpFolder + "/cluster-role-binding.yaml" + gpServiceAccountPath = gpFolder + "/service-account.yaml" + gpDaemonset = gpFolder + "/daemonset.yaml" + gpDaemonsetIPv6 = gpFolder + "/daemonset-ipv6.yaml" + gpDeployment = gpFolder + "/deployment.yaml" + IpsInAnotherCluster = "172.25.0.27" +) + +var ( + podPrefix = flag.String("podnetworkinstance", "pni1", "the pni pod used") + podNamespace = flag.String("namespace", "default", "Namespace for test pods") + nodepoolSelector = flag.String("nodelabel", "mtapool", "One of the node label and the key is agentpool") + // TODO: add flag to support dual nic scenario + isDualStack = flag.Bool("isDualStack", false, "whether system supports dualstack scenario") + defaultRetrier = retry.Retrier{ + Attempts: 10, + Delay: defaultRetryDelaySeconds * time.Second, + } +) + +/* +This test assumes that you have the current credentials loaded in your default kubeconfig for a +k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. +*** The expected nodepool name is mtapool, if the nodepool has a different name ensure that you change nodepoolSelector with: + -nodepoolSelector="yournodepoolname" + +This test checks pod to pod, pod to node, pod to Internet check + +Timeout context is controled by the -timeout flag. + +*/ + +func setupLinuxEnvironment(t *testing.T) { + ctx := context.Background() + + t.Log("Create Clientset") + clientset := kubernetes.MustGetClientset() + + t.Log("Create Label Selectors") + podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) + + t.Log("Get Nodes") + nodes, err := kubernetes.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) + if err != nil { + t.Fatalf("could not get k8s node list: %v", err) + } + + t.Log("Waiting for pods to be running state") + err = kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + t.Fatalf("Pods are not in running state due to %+v", err) + } + + t.Log("Successfully created customer Linux pods") + + t.Log("Checking swiftv2 multitenant pods number") + for _, node := range nodes.Items { + pods, err := kubernetes.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) + if err != nil { + t.Fatalf("could not get k8s clientset: %v", err) + } + if len(pods.Items) < 1 { + t.Fatalf("No pod on node: %v", node.Name) + } + } + + t.Log("Linux test environment ready") +} + +func TestDatapathLinux(t *testing.T) { + ctx := context.Background() + + t.Log("Get REST config") + restConfig := kubernetes.MustGetRestConfig() + + t.Log("Get REST Client from REST config") + + //crdClient, err := kubernetes.GetRESTClientForMultitenantCRD(*kubernetes.Kubeconfig) + + t.Log("Create Clientset") + clientset := kubernetes.MustGetClientset() + + setupLinuxEnvironment(t) + podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + + t.Run("Linux ping tests", func(t *testing.T) { + // Check goldpinger health + t.Run("all pods have IPs assigned", func(t *testing.T) { + err := kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + t.Fatalf("Pods are not in running state due to %+v", err) + } + t.Log("all pods have been allocated IPs") + }) + + t.Run("all linux pods can ping each other", func(t *testing.T) { + clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + + pfOpts := k8s.PortForwardingOpts{ + Namespace: *podNamespace, + LabelSelector: podLabelSelector, + LocalPort: 9090, + DestPort: 8080, + } + + pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts) + if err != nil { + t.Fatal(err) + } + + portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second) + defer cancel() + + portForwardFn := func() error { + err := pf.Forward(portForwardCtx) + if err != nil { + t.Logf("unable to start port forward: %v", err) + return err + } + return nil + } + + if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { + t.Fatalf("could not start port forward within %d: %v", defaultTimeoutSeconds, err) + } + defer pf.Stop() + + gpClient := goldpinger.Client{Host: pf.Address()} + clusterCheckFn := func() error { + clusterState, err := gpClient.CheckAll(clusterCheckCtx) + if err != nil { + return err + } + stats := goldpinger.ClusterStats(clusterState) + stats.PrintStats() + if stats.AllPingsHealthy() { + return nil + } + + return errors.New("not all pings are healthy") + } + retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second} + if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil { + t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err) + } + + t.Log("all pings successful!") + }) + }) +} + +func GetMultitenantPodNetworkConfig(t *testing.T, ctx context.Context, kubeconfig, namespace, name string) v1alpha1.MultitenantPodNetworkConfig { + crdClient, err := kubernetes.GetRESTClientForMultitenantCRD(*kubernetes.Kubeconfig) + if err != nil { + t.Fatalf("failed to get multitenant crd rest client: %s", err) + } + var mtpnc v1alpha1.MultitenantPodNetworkConfig + err = crdClient.Get().Namespace(namespace).Resource("multitenantpodnetworkconfigs").Name(name).Do(ctx).Into(&mtpnc) + if err != nil { + t.Errorf("failed to retrieve multitenantpodnetworkconfig: error: %s", err) + } + if mtpnc.Status.MacAddress == "" || mtpnc.Status.PrimaryIP == "" { + t.Errorf("mtpnc.Status.MacAddress is %v or mtpnc.Status.PrimaryIP is %v and at least one of them is Empty, ", + mtpnc.Status.MacAddress, mtpnc.Status.PrimaryIP) + } + return mtpnc +} + +// PodExecWithError executes a command in a pod by using its first container. +// It returns the stdout, stderr and error. +func PodExecWithError( + t *testing.T, + kubeconfig string, + podName string, namespace string, + command []string, +) (string, string, error) { + clientcmdConfig, err := clientcmd.Load([]byte(kubeconfig)) + if err != nil { + return "", "", fmt.Errorf("failed to load kube config: %w", err) + } + + directClientcmdConfig := clientcmd.NewNonInteractiveClientConfig( + *clientcmdConfig, + "", // default context + &clientcmd.ConfigOverrides{}, + nil, // config access + ) + + clientRestConfig, err := directClientcmdConfig.ClientConfig() + if err != nil { + return "", "", fmt.Errorf("failed to create kube client config: %w", err) + } + + clientRestConfig.Timeout = 10 * time.Minute + + client, err := k8sclientset.NewForConfig(clientRestConfig) + if err != nil { + return "", "", fmt.Errorf("failed to create kube clientset: %w", err) + } + + pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), podName, k8smetav1.GetOptions{}) + if err != nil { + return "", "", fmt.Errorf("get pod: %w", err) + } + containerName := pod.Spec.Containers[0].Name + req := client.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(podName). + Namespace(namespace). + SubResource("exec"). + Param("container", containerName) + + req.VersionedParams(&k8scorev1.PodExecOptions{ + Command: command, + Stdin: false, + Stdout: true, + Stderr: true, + TTY: false, + Container: containerName, + }, k8sscheme.ParameterCodec) + + var stdout, stderr bytes.Buffer + executor, err := remotecommand.NewSPDYExecutor(clientRestConfig, "POST", req.URL()) + if err != nil { + return "", "", fmt.Errorf("NewSPDYExecutor: %w", err) + } + + // NOTE: remotecommand is not a Kubernetes pod resource API used here, but a tool API. + ctx := context.Background() + // yes, 3 mins is a magic number + ctx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + t.Logf("executing command: %s", strings.Join(command, " ")) + readStreamErr := executor.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdin: nil, + Stdout: &stdout, + Stderr: &stderr, + Tty: false, + }) + + // FIXME(hbc): Windows validation expect stdout/stderr output even seeing error + // therefore we need to return the stdout/stderr output here + stdoutRead := strings.TrimSpace(stdout.String()) + stderrRead := strings.TrimSpace(stderr.String()) + return stdoutRead, stderrRead, readStreamErr +} + +func TestSwiftv2PodToPod() (t *testing.T) { + var ( + kubeconfig string + numNodes int + ) + + ctx := context.Background() + + t.Log("Create Clientset") + clientset := kubernetes.MustGetClientset() + + t.Log("Create Label Selectors") + podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) + + t.Log("Get Nodes") + nodes, err := kubernetes.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) + if err != nil { + t.Fatalf("could not get k8s node list: %v", err) + } + + t.Log("Waiting for pods to be running state") + err = kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + t.Fatalf("Pods are not in running state due to %+v", err) + } + + t.Log("Successfully created customer Linux pods") + + t.Log("Checking swiftv2 multitenant pods number and get IPs") + allPods := make([]v1.Pod, numNodes) + ipsToPing := make([]string, 0, numNodes) + for _, node := range nodes.Items { + pods, err := kubernetes.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) + if err != nil { + t.Fatalf("could not get k8s clientset: %v", err) + } + if len(pods.Items) < 1 { + t.Fatalf("No pod on node: %v", node.Name) + } + for _, pod := range pods.Items { + allPods = append(allPods, pod) + mtpnc := GetMultitenantPodNetworkConfig(t, ctx, kubeconfig, pod.Namespace, pod.Name) + if len(pod.Status.PodIPs) != 1 { + t.Fatalf("Pod doesn't have any IP associated.") + } + // remove /32 from PrimaryIP + splitcidr := strings.Split(mtpnc.Status.PrimaryIP, "/") + if len(splitcidr) != 2 { + t.Fatalf("Split Pods IP with its cidr failed.") + } + ipsToPing = append(ipsToPing, splitcidr[0]) + } + } + ipsToPing = append(ipsToPing, IpsInAnotherCluster) + t.Log("Linux test environment ready") + + for _, pod := range allPods { + for _, ip := range ipsToPing { + t.Logf("ping from pod %q to %q", pod.Name, ip) + stdout, stderr, err := PodExecWithError( + t, + kubeconfig, + pod.Name, + pod.Namespace, + []string{"ping", "-c", "3", ip}, + ) + if err != nil { + t.Errorf("ping %q failed: error: %s, stdout: %s, stderr: %s", ip, err, stdout, stderr) + } + } + } + return +} diff --git a/test/internal/kubernetes/utils.go b/test/internal/kubernetes/utils.go index aa9122d154..04201a205a 100644 --- a/test/internal/kubernetes/utils.go +++ b/test/internal/kubernetes/utils.go @@ -11,6 +11,7 @@ import ( "path/filepath" "time" + "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" "github.com/Azure/azure-container-networking/test/internal/retry" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" @@ -18,6 +19,8 @@ import ( v1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" @@ -47,6 +50,10 @@ const ( var Kubeconfig = flag.String("test-kubeconfig", filepath.Join(homedir.HomeDir(), ".kube", "config"), "(optional) absolute path to the kubeconfig file") +func GetKubeconfig() *string { + return Kubeconfig +} + func MustGetClientset() *kubernetes.Clientset { config, err := clientcmd.BuildConfigFromFlags("", *Kubeconfig) if err != nil { @@ -66,6 +73,25 @@ func MustGetRestConfig() *rest.Config { } return config } +func GetRESTClientForMultitenantCRD(kubeconfig string) (*rest.RESTClient, error) { + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + if err != nil { + return nil, err + } + + restConfig, err := clientcmd.RESTConfigFromKubeConfig([]byte(kubeconfig)) + if err != nil { + return nil, err + } + + restConfig.ContentConfig.GroupVersion = &v1alpha1.GroupVersion + restConfig.APIPath = "/apis" + restConfig.NegotiatedSerializer = serializer.NewCodecFactory(scheme) + restConfig.UserAgent = rest.DefaultKubernetesUserAgent() + + return rest.UnversionedRESTClientFor(restConfig) +} func mustParseResource(path string, out interface{}) { f, err := os.Open(path) From 1fb9acf9043089929ac6f56b288b713c32646236 Mon Sep 17 00:00:00 2001 From: shchen Date: Fri, 26 Apr 2024 00:30:27 -0700 Subject: [PATCH 32/49] Update the dummy cluster since the previous one has been auto deleted. --- .../multitenancy/swiftv2-e2e-step-template.yaml | 15 +++++++++++++-- test/integration/manifests/swiftv2/mtpod0.yaml | 2 +- test/integration/manifests/swiftv2/mtpod1.yaml | 2 +- test/integration/manifests/swiftv2/pni.yaml | 2 +- .../integration/manifests/swiftv2/podnetwork.yaml | 2 +- 5 files changed, 17 insertions(+), 6 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 29925d4dd2..5a70d36b0e 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -60,8 +60,6 @@ steps: kubectl describe pni echo "Start the connection test" kubectl exec mtpod0 -it -- ip a - echo "Test the connection to a non-mt pod (172.25.0.27) in the same delegated network and in another cluster" - kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.27 echo "Test the connection to a mtpod in the same delegated network and in another node" export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` echo $IP0 @@ -69,6 +67,8 @@ steps: export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` echo $IP1 kubectl exec mtpod0 -it -- ping -c 3 $IP1 + echo "Test the connection to a non-mt pod (172.25.0.27) in the same delegated network and in another cluster" + kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.27 name: "start_swiftv2_pods_and_test_basic_connection" displayName: "Start Swiftv2 Pods and test basic connection" env: @@ -77,6 +77,17 @@ steps: - script: | set -e kubectl get po -owide -A + echo "Start the connection test" + kubectl exec mtpod0 -it -- ip a + echo "Test the connection to a mtpod in the same delegated network and in another node" + export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` + echo $IP0 + kubectl exec mtpod0 -it -- ping -c 3 $IP0 + export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` + echo $IP1 + kubectl exec mtpod0 -it -- ping -c 3 $IP1 + echo "Test the connection to a non-mt pod (172.25.0.27) in the same delegated network and in another cluster" + kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.27 cd test/integration/swiftv2 echo "Swiftv2 TestDatapathLinux" go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration diff --git a/test/integration/manifests/swiftv2/mtpod0.yaml b/test/integration/manifests/swiftv2/mtpod0.yaml index f1d02a2a3f..357c240582 100644 --- a/test/integration/manifests/swiftv2/mtpod0.yaml +++ b/test/integration/manifests/swiftv2/mtpod0.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: labels: - kubernetes.azure.com/pod-network: aksswiftvnetv2 + kubernetes.azure.com/pod-network: aksswiftvnetv20425 kubernetes.azure.com/pod-network-instance: pni1 name: mtpod0 namespace: default diff --git a/test/integration/manifests/swiftv2/mtpod1.yaml b/test/integration/manifests/swiftv2/mtpod1.yaml index e584d943cd..4894ef8eed 100644 --- a/test/integration/manifests/swiftv2/mtpod1.yaml +++ b/test/integration/manifests/swiftv2/mtpod1.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: labels: - kubernetes.azure.com/pod-network: aksswiftvnetv2 + kubernetes.azure.com/pod-network: aksswiftvnetv20425 kubernetes.azure.com/pod-network-instance: pni1 name: mtpod1 namespace: default diff --git a/test/integration/manifests/swiftv2/pni.yaml b/test/integration/manifests/swiftv2/pni.yaml index 17c33cc534..04cd0847b5 100644 --- a/test/integration/manifests/swiftv2/pni.yaml +++ b/test/integration/manifests/swiftv2/pni.yaml @@ -3,5 +3,5 @@ kind: PodNetworkInstance metadata: name: pni1 spec: - podnetwork: aksswiftvnetv2 + podnetwork: aksswiftvnetv20425 podIPReservationSize: 2 diff --git a/test/integration/manifests/swiftv2/podnetwork.yaml b/test/integration/manifests/swiftv2/podnetwork.yaml index 7d6c357787..ef289bb315 100644 --- a/test/integration/manifests/swiftv2/podnetwork.yaml +++ b/test/integration/manifests/swiftv2/podnetwork.yaml @@ -3,7 +3,7 @@ kind: PodNetwork metadata: labels: kubernetes.azure.com/override-subnet-token: $SUBNET_TOKEN - name: aksswiftvnetv2 + name: aksswiftvnetv20425 spec: subnetGUID: $SUBNET_GUID subnetResourceID: $SUBNET_RESOURCE_ID From e7611d9019592fd82e177e884f30e288ce008b91 Mon Sep 17 00:00:00 2001 From: shchen Date: Fri, 26 Apr 2024 14:26:29 -0700 Subject: [PATCH 33/49] Update the IP of the pod. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 5a70d36b0e..f9ef74c67d 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -67,8 +67,8 @@ steps: export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` echo $IP1 kubectl exec mtpod0 -it -- ping -c 3 $IP1 - echo "Test the connection to a non-mt pod (172.25.0.27) in the same delegated network and in another cluster" - kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.27 + echo "Test the connection to a non-mt pod (172.25.0.7) in the same delegated network and in another cluster" + kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.7 name: "start_swiftv2_pods_and_test_basic_connection" displayName: "Start Swiftv2 Pods and test basic connection" env: From c52c5a0965ce2d23b1b5905c231cbbc86378ea5b Mon Sep 17 00:00:00 2001 From: shchen Date: Fri, 26 Apr 2024 15:42:49 -0700 Subject: [PATCH 34/49] Update the host IP in the test part too. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index f9ef74c67d..ba363cbfb3 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -86,8 +86,8 @@ steps: export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` echo $IP1 kubectl exec mtpod0 -it -- ping -c 3 $IP1 - echo "Test the connection to a non-mt pod (172.25.0.27) in the same delegated network and in another cluster" - kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.27 + echo "Test the connection to a non-mt pod (172.25.0.7) in the same delegated network and in another cluster" + kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.7 cd test/integration/swiftv2 echo "Swiftv2 TestDatapathLinux" go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration From 33588c9e4c9888c467809a32b5463ea308a305d7 Mon Sep 17 00:00:00 2001 From: shchen Date: Sat, 27 Apr 2024 23:59:07 -0700 Subject: [PATCH 35/49] Separate different test stages. --- .../swiftv2-e2e-step-template.yaml | 50 +++++++++---------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index ba363cbfb3..4ffd37e556 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -58,42 +58,38 @@ steps: echo "Check pods after 4 minutes" kubectl get po -owide -A kubectl describe pni - echo "Start the connection test" - kubectl exec mtpod0 -it -- ip a - echo "Test the connection to a mtpod in the same delegated network and in another node" - export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` - echo $IP0 - kubectl exec mtpod0 -it -- ping -c 3 $IP0 - export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` - echo $IP1 - kubectl exec mtpod0 -it -- ping -c 3 $IP1 - echo "Test the connection to a non-mt pod (172.25.0.7) in the same delegated network and in another cluster" - kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.7 - name: "start_swiftv2_pods_and_test_basic_connection" - displayName: "Start Swiftv2 Pods and test basic connection" + name: "start_swiftv2_pods" + displayName: "Start Swiftv2 Pods" env: SUBNET_TOKEN: $(SUBNET_TOKEN) - script: | set -e kubectl get po -owide -A - echo "Start the connection test" - kubectl exec mtpod0 -it -- ip a - echo "Test the connection to a mtpod in the same delegated network and in another node" - export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` - echo $IP0 - kubectl exec mtpod0 -it -- ping -c 3 $IP0 - export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` - echo $IP1 - kubectl exec mtpod0 -it -- ping -c 3 $IP1 - echo "Test the connection to a non-mt pod (172.25.0.7) in the same delegated network and in another cluster" - kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.7 + echo "Start the connection test" + kubectl exec mtpod0 -it -- ip a + echo "Test the connection to a mtpod in the same delegated network and in another node" + export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` + echo $IP0 + kubectl exec mtpod0 -it -- ping -c 3 $IP0 + export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` + echo $IP1 + kubectl exec mtpod0 -it -- ping -c 3 $IP1 + echo "Test the connection to a non-mt pod (172.25.0.7) in the same delegated network and in another cluster" + kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.7 + retryCountOnTaskFailure: 3 + name: "Swiftv2_Tests" + displayName: "Swiftv2 Tests through script" + + - script: | + set -e + kubectl get po -owide -A cd test/integration/swiftv2 echo "Swiftv2 TestDatapathLinux" go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration echo "TestSwiftv2PodToPod" - go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration + go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestSwiftv2PodToPod$ -tags=swiftv2,integration retryCountOnTaskFailure: 3 - name: "Swiftv2_Tests" - displayName: "Swiftv2 Tests" + name: "Swiftv2_Tests_future_version" + displayName: "Swiftv2 Tests through code" From 5b1295d05d5060aecdc70edbb51322f1a4ebc145 Mon Sep 17 00:00:00 2001 From: shchen Date: Sun, 28 Apr 2024 21:36:29 -0700 Subject: [PATCH 36/49] Use mtpnc to grep IP. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 6 ++++-- test/integration/swiftv2/swiftv2_test.go | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 4ffd37e556..a268d325fa 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -69,10 +69,12 @@ steps: echo "Start the connection test" kubectl exec mtpod0 -it -- ip a echo "Test the connection to a mtpod in the same delegated network and in another node" - export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` + //export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` + export IP0=`k describe mtpnc mtpod0 | grep "Primary IP" | cut -c 17- | head -c -4` echo $IP0 kubectl exec mtpod0 -it -- ping -c 3 $IP0 - export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` + //export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` + export IP1=`k describe mtpnc mtpod1 | grep "Primary IP" | cut -c 17- | head -c -4` echo $IP1 kubectl exec mtpod0 -it -- ping -c 3 $IP1 echo "Test the connection to a non-mt pod (172.25.0.7) in the same delegated network and in another cluster" diff --git a/test/integration/swiftv2/swiftv2_test.go b/test/integration/swiftv2/swiftv2_test.go index b429b5b67c..26db78ca71 100644 --- a/test/integration/swiftv2/swiftv2_test.go +++ b/test/integration/swiftv2/swiftv2_test.go @@ -290,7 +290,7 @@ func PodExecWithError( return stdoutRead, stderrRead, readStreamErr } -func TestSwiftv2PodToPod() (t *testing.T) { +func TestSwiftv2PodToPod(t *testing.T) { var ( kubeconfig string numNodes int From 7be53c12b90a558289592e190784ff6913dce73c Mon Sep 17 00:00:00 2001 From: shchen Date: Sun, 28 Apr 2024 23:09:32 -0700 Subject: [PATCH 37/49] Update k to kubectl --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index a268d325fa..ddfb970242 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -70,11 +70,11 @@ steps: kubectl exec mtpod0 -it -- ip a echo "Test the connection to a mtpod in the same delegated network and in another node" //export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` - export IP0=`k describe mtpnc mtpod0 | grep "Primary IP" | cut -c 17- | head -c -4` + export IP0=`kubectl describe mtpnc mtpod0 | grep "Primary IP" | cut -c 17- | head -c -4` echo $IP0 kubectl exec mtpod0 -it -- ping -c 3 $IP0 //export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` - export IP1=`k describe mtpnc mtpod1 | grep "Primary IP" | cut -c 17- | head -c -4` + export IP1=`kubectl describe mtpnc mtpod1 | grep "Primary IP" | cut -c 17- | head -c -4` echo $IP1 kubectl exec mtpod0 -it -- ping -c 3 $IP1 echo "Test the connection to a non-mt pod (172.25.0.7) in the same delegated network and in another cluster" From 6fd12a8b07e03009a7ca7671b03d30ab35c32c61 Mon Sep 17 00:00:00 2001 From: shchen Date: Mon, 29 Apr 2024 21:16:49 -0700 Subject: [PATCH 38/49] Add mtpnc detailed info. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index ddfb970242..75a3b43061 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -68,6 +68,10 @@ steps: kubectl get po -owide -A echo "Start the connection test" kubectl exec mtpod0 -it -- ip a + echo "Check mtpnc" + kubectl get mtpnc + kubectl describe mtpnc mtpod0 + kubectl describe mtpnc mtpod1 echo "Test the connection to a mtpod in the same delegated network and in another node" //export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` export IP0=`kubectl describe mtpnc mtpod0 | grep "Primary IP" | cut -c 17- | head -c -4` From ff6773b704e6c895771a391df76da1bcbf199f0a Mon Sep 17 00:00:00 2001 From: shchen Date: Mon, 29 Apr 2024 22:26:42 -0700 Subject: [PATCH 39/49] Remove the wrong comments. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 75a3b43061..3990da4261 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -73,11 +73,9 @@ steps: kubectl describe mtpnc mtpod0 kubectl describe mtpnc mtpod1 echo "Test the connection to a mtpod in the same delegated network and in another node" - //export IP0=`kubectl describe pni pni1 | grep /32 | head -1 | cut -c 5- | head -c -4` export IP0=`kubectl describe mtpnc mtpod0 | grep "Primary IP" | cut -c 17- | head -c -4` echo $IP0 kubectl exec mtpod0 -it -- ping -c 3 $IP0 - //export IP1=`kubectl describe pni pni1 | grep /32 | tail -1 | cut -c 5- | head -c -4` export IP1=`kubectl describe mtpnc mtpod1 | grep "Primary IP" | cut -c 17- | head -c -4` echo $IP1 kubectl exec mtpod0 -it -- ping -c 3 $IP1 From 4e5ae36e0c45d7b1b757e65353ed697961dc5ce4 Mon Sep 17 00:00:00 2001 From: shchen Date: Tue, 30 Apr 2024 00:56:53 -0700 Subject: [PATCH 40/49] Use the script to run the test for now. --- .pipelines/multitenancy/swiftv2-e2e-step-template.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 3990da4261..3fb8c8df1a 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -89,10 +89,10 @@ steps: set -e kubectl get po -owide -A cd test/integration/swiftv2 - echo "Swiftv2 TestDatapathLinux" - go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration - echo "TestSwiftv2PodToPod" - go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestSwiftv2PodToPod$ -tags=swiftv2,integration + echo "Swiftv2 TestDatapathLinux and will run it after migration from scripts." + echo "go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration" + echo "TestSwiftv2PodToPod and will run it after migration from scripts." + echo "go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestSwiftv2PodToPod$ -tags=swiftv2,integration" retryCountOnTaskFailure: 3 name: "Swiftv2_Tests_future_version" displayName: "Swiftv2 Tests through code" From 37e2c14f85ceeac795f2841b63e343ce816431b8 Mon Sep 17 00:00:00 2001 From: shchen Date: Tue, 30 Apr 2024 16:40:15 -0700 Subject: [PATCH 41/49] Remove unnecessary variables. --- .pipelines/templates/create-cluster-swiftv2.yaml | 13 +++++++------ test/integration/swiftv2/swiftv2_test.go | 8 ++++---- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/.pipelines/templates/create-cluster-swiftv2.yaml b/.pipelines/templates/create-cluster-swiftv2.yaml index d0bfdf12c3..cf05f0a267 100644 --- a/.pipelines/templates/create-cluster-swiftv2.yaml +++ b/.pipelines/templates/create-cluster-swiftv2.yaml @@ -34,12 +34,13 @@ jobs: mkdir -p ~/.kube/ make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} - make -C ./hack/aks ${{ parameters.clusterType }} \ - AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) \ - CLUSTER=${{ parameters.clusterName }} \ - VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \ - OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{parameters.os}} \ - WINDOWS_USERNAME=${WINDOWS_USERNAME} WINDOWS_PASSWORD=${WINDOWS_PASSWORD} + make -C ./hack/aks ${{ parameters.clusterType }} + # \ + # AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) \ + # CLUSTER=${{ parameters.clusterName }} \ + # VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \ + # OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{parameters.os}} \ + # WINDOWS_USERNAME=${WINDOWS_USERNAME} WINDOWS_PASSWORD=${WINDOWS_PASSWORD} echo "Cluster successfully created" displayName: Cluster - ${{ parameters.clusterType }} diff --git a/test/integration/swiftv2/swiftv2_test.go b/test/integration/swiftv2/swiftv2_test.go index 26db78ca71..8a7fe5ffbd 100644 --- a/test/integration/swiftv2/swiftv2_test.go +++ b/test/integration/swiftv2/swiftv2_test.go @@ -27,7 +27,7 @@ import ( ) const ( - podLabelKey = "kubernetes.azure.com/pod-network-instance" + pniKey = "kubernetes.azure.com/pod-network-instance" podCount = 2 nodepoolKey = "agentpool" LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" @@ -80,7 +80,7 @@ func setupLinuxEnvironment(t *testing.T) { clientset := kubernetes.MustGetClientset() t.Log("Create Label Selectors") - podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + podLabelSelector := kubernetes.CreateLabelSelector(pniKey, podPrefix) nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) t.Log("Get Nodes") @@ -125,7 +125,7 @@ func TestDatapathLinux(t *testing.T) { clientset := kubernetes.MustGetClientset() setupLinuxEnvironment(t) - podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + podLabelSelector := kubernetes.CreateLabelSelector(pniKey, podPrefix) t.Run("Linux ping tests", func(t *testing.T) { // Check goldpinger health @@ -302,7 +302,7 @@ func TestSwiftv2PodToPod(t *testing.T) { clientset := kubernetes.MustGetClientset() t.Log("Create Label Selectors") - podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + podLabelSelector := kubernetes.CreateLabelSelector(pniKey, podPrefix) nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) t.Log("Get Nodes") From 223e7138ee3e52c52285018bb5dd612a5ce9ca15 Mon Sep 17 00:00:00 2001 From: shchen Date: Tue, 30 Apr 2024 19:35:49 -0700 Subject: [PATCH 42/49] Update the cluster creation. --- .pipelines/templates/create-cluster-swiftv2.yaml | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/.pipelines/templates/create-cluster-swiftv2.yaml b/.pipelines/templates/create-cluster-swiftv2.yaml index cf05f0a267..a2d3f2d43c 100644 --- a/.pipelines/templates/create-cluster-swiftv2.yaml +++ b/.pipelines/templates/create-cluster-swiftv2.yaml @@ -5,8 +5,7 @@ parameters: clusterName: "" # Recommended to pass in unique identifier vmSize: "" vmSizeWin: "" - k8sVersion: "" - osSkuWin: "Windows2022" # Currently we only support Windows2022 + k8sVersion: " dependsOn: "" region: "" os: linux @@ -34,13 +33,10 @@ jobs: mkdir -p ~/.kube/ make -C ./hack/aks azcfg AZCLI=az REGION=${{ parameters.region }} - make -C ./hack/aks ${{ parameters.clusterType }} - # \ - # AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) \ - # CLUSTER=${{ parameters.clusterName }} \ - # VM_SIZE=${{ parameters.vmSize }} VM_SIZE_WIN=${{ parameters.vmSizeWin }} \ - # OS_SKU_WIN=${{ parameters.osSkuWin }} OS=${{parameters.os}} \ - # WINDOWS_USERNAME=${WINDOWS_USERNAME} WINDOWS_PASSWORD=${WINDOWS_PASSWORD} + make -C ./hack/aks ${{ parameters.clusterType }} \ + AZCLI=az REGION=${{ parameters.region }} SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) \ + CLUSTER=${{ parameters.clusterName }} \ + VM_SIZE=${{ parameters.vmSize }} OS=${{parameters.os}} \ echo "Cluster successfully created" displayName: Cluster - ${{ parameters.clusterType }} From 93d4b31874413795cba05e5919d71bb8d60784d9 Mon Sep 17 00:00:00 2001 From: shchen Date: Tue, 30 Apr 2024 21:03:41 -0700 Subject: [PATCH 43/49] Update parameters. --- .../templates/create-cluster-swiftv2.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.pipelines/templates/create-cluster-swiftv2.yaml b/.pipelines/templates/create-cluster-swiftv2.yaml index a2d3f2d43c..46916fea9f 100644 --- a/.pipelines/templates/create-cluster-swiftv2.yaml +++ b/.pipelines/templates/create-cluster-swiftv2.yaml @@ -1,13 +1,13 @@ parameters: - name: "" - displayName: "" - clusterType: "" - clusterName: "" # Recommended to pass in unique identifier - vmSize: "" - vmSizeWin: "" - k8sVersion: " - dependsOn: "" - region: "" + # name: "" + # displayName: "" + # clusterType: "" + # clusterName: "" + # vmSize: "" + # vmSizeWin: "" + # k8sVersion: "" + # dependsOn: "" + # region: "" os: linux jobs: From 07854b09a0973517a8837c4d1e63c2ce92d8c99c Mon Sep 17 00:00:00 2001 From: shchen Date: Tue, 30 Apr 2024 21:48:58 -0700 Subject: [PATCH 44/49] Remove unnecessary parameters. --- .pipelines/templates/create-cluster-swiftv2.yaml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.pipelines/templates/create-cluster-swiftv2.yaml b/.pipelines/templates/create-cluster-swiftv2.yaml index 46916fea9f..4bf79fac9d 100644 --- a/.pipelines/templates/create-cluster-swiftv2.yaml +++ b/.pipelines/templates/create-cluster-swiftv2.yaml @@ -1,13 +1,4 @@ parameters: - # name: "" - # displayName: "" - # clusterType: "" - # clusterName: "" - # vmSize: "" - # vmSizeWin: "" - # k8sVersion: "" - # dependsOn: "" - # region: "" os: linux jobs: From 98b6e42e9a21bcf6b578b76afb6e4365e06cd078 Mon Sep 17 00:00:00 2001 From: shchen Date: Mon, 6 May 2024 11:20:30 -0700 Subject: [PATCH 45/49] Update the testing code part to match the script way. --- .../swiftv2-e2e-step-template.yaml | 4 +- test/integration/swiftv2/swiftv2_test.go | 209 +++--------------- test/internal/kubernetes/utils.go | 14 ++ 3 files changed, 40 insertions(+), 187 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 3fb8c8df1a..19c555c999 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -89,10 +89,8 @@ steps: set -e kubectl get po -owide -A cd test/integration/swiftv2 - echo "Swiftv2 TestDatapathLinux and will run it after migration from scripts." - echo "go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestDatapathLinux$ -tags=swiftv2,integration" echo "TestSwiftv2PodToPod and will run it after migration from scripts." - echo "go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestSwiftv2PodToPod$ -tags=swiftv2,integration" + go test -count=1 swiftv2_test.go -timeout 3m -tags swiftv2 -run ^TestSwiftv2PodToPod$ -tags=swiftv2,integration -v retryCountOnTaskFailure: 3 name: "Swiftv2_Tests_future_version" displayName: "Swiftv2 Tests through code" diff --git a/test/integration/swiftv2/swiftv2_test.go b/test/integration/swiftv2/swiftv2_test.go index 8a7fe5ffbd..32a8125fbe 100644 --- a/test/integration/swiftv2/swiftv2_test.go +++ b/test/integration/swiftv2/swiftv2_test.go @@ -3,27 +3,18 @@ package swiftv2 import ( - "bytes" "context" "flag" - "fmt" "strings" "testing" "time" "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" - k8s "github.com/Azure/azure-container-networking/test/integration" - "github.com/Azure/azure-container-networking/test/integration/goldpinger" "github.com/Azure/azure-container-networking/test/internal/kubernetes" "github.com/Azure/azure-container-networking/test/internal/retry" - "github.com/pkg/errors" - k8scorev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" - k8smetav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sclientset "k8s.io/client-go/kubernetes" - k8sscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/remotecommand" + kuberneteslib "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" ) const ( @@ -46,7 +37,7 @@ const ( gpDaemonset = gpFolder + "/daemonset.yaml" gpDaemonsetIPv6 = gpFolder + "/daemonset-ipv6.yaml" gpDeployment = gpFolder + "/deployment.yaml" - IpsInAnotherCluster = "172.25.0.27" + IpsInAnotherCluster = "172.25.0.7" ) var ( @@ -111,91 +102,10 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Linux test environment ready") } -func TestDatapathLinux(t *testing.T) { - ctx := context.Background() - - t.Log("Get REST config") - restConfig := kubernetes.MustGetRestConfig() - - t.Log("Get REST Client from REST config") - - //crdClient, err := kubernetes.GetRESTClientForMultitenantCRD(*kubernetes.Kubeconfig) - - t.Log("Create Clientset") - clientset := kubernetes.MustGetClientset() - - setupLinuxEnvironment(t) - podLabelSelector := kubernetes.CreateLabelSelector(pniKey, podPrefix) - - t.Run("Linux ping tests", func(t *testing.T) { - // Check goldpinger health - t.Run("all pods have IPs assigned", func(t *testing.T) { - err := kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) - if err != nil { - t.Fatalf("Pods are not in running state due to %+v", err) - } - t.Log("all pods have been allocated IPs") - }) - - t.Run("all linux pods can ping each other", func(t *testing.T) { - clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) - defer cancel() - - pfOpts := k8s.PortForwardingOpts{ - Namespace: *podNamespace, - LabelSelector: podLabelSelector, - LocalPort: 9090, - DestPort: 8080, - } - - pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts) - if err != nil { - t.Fatal(err) - } - - portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second) - defer cancel() - - portForwardFn := func() error { - err := pf.Forward(portForwardCtx) - if err != nil { - t.Logf("unable to start port forward: %v", err) - return err - } - return nil - } - - if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { - t.Fatalf("could not start port forward within %d: %v", defaultTimeoutSeconds, err) - } - defer pf.Stop() - - gpClient := goldpinger.Client{Host: pf.Address()} - clusterCheckFn := func() error { - clusterState, err := gpClient.CheckAll(clusterCheckCtx) - if err != nil { - return err - } - stats := goldpinger.ClusterStats(clusterState) - stats.PrintStats() - if stats.AllPingsHealthy() { - return nil - } - - return errors.New("not all pings are healthy") - } - retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second} - if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil { - t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err) - } - - t.Log("all pings successful!") - }) - }) -} - func GetMultitenantPodNetworkConfig(t *testing.T, ctx context.Context, kubeconfig, namespace, name string) v1alpha1.MultitenantPodNetworkConfig { - crdClient, err := kubernetes.GetRESTClientForMultitenantCRD(*kubernetes.Kubeconfig) + config := kubernetes.MustGetRestConfig() + crdClient, err := kubernetes.GetRESTClientForMultitenantCRDFromConfig(config) + t.Logf("config is %s", config) if err != nil { t.Fatalf("failed to get multitenant crd rest client: %s", err) } @@ -211,95 +121,22 @@ func GetMultitenantPodNetworkConfig(t *testing.T, ctx context.Context, kubeconfi return mtpnc } -// PodExecWithError executes a command in a pod by using its first container. -// It returns the stdout, stderr and error. -func PodExecWithError( - t *testing.T, - kubeconfig string, - podName string, namespace string, - command []string, -) (string, string, error) { - clientcmdConfig, err := clientcmd.Load([]byte(kubeconfig)) - if err != nil { - return "", "", fmt.Errorf("failed to load kube config: %w", err) - } - - directClientcmdConfig := clientcmd.NewNonInteractiveClientConfig( - *clientcmdConfig, - "", // default context - &clientcmd.ConfigOverrides{}, - nil, // config access - ) - - clientRestConfig, err := directClientcmdConfig.ClientConfig() - if err != nil { - return "", "", fmt.Errorf("failed to create kube client config: %w", err) - } - - clientRestConfig.Timeout = 10 * time.Minute - - client, err := k8sclientset.NewForConfig(clientRestConfig) - if err != nil { - return "", "", fmt.Errorf("failed to create kube clientset: %w", err) - } - - pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), podName, k8smetav1.GetOptions{}) - if err != nil { - return "", "", fmt.Errorf("get pod: %w", err) - } - containerName := pod.Spec.Containers[0].Name - req := client.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(podName). - Namespace(namespace). - SubResource("exec"). - Param("container", containerName) - - req.VersionedParams(&k8scorev1.PodExecOptions{ - Command: command, - Stdin: false, - Stdout: true, - Stderr: true, - TTY: false, - Container: containerName, - }, k8sscheme.ParameterCodec) - - var stdout, stderr bytes.Buffer - executor, err := remotecommand.NewSPDYExecutor(clientRestConfig, "POST", req.URL()) - if err != nil { - return "", "", fmt.Errorf("NewSPDYExecutor: %w", err) - } - - // NOTE: remotecommand is not a Kubernetes pod resource API used here, but a tool API. - ctx := context.Background() - // yes, 3 mins is a magic number - ctx, cancel := context.WithTimeout(ctx, 3*time.Minute) - defer cancel() - t.Logf("executing command: %s", strings.Join(command, " ")) - readStreamErr := executor.StreamWithContext(ctx, remotecommand.StreamOptions{ - Stdin: nil, - Stdout: &stdout, - Stderr: &stderr, - Tty: false, - }) - - // FIXME(hbc): Windows validation expect stdout/stderr output even seeing error - // therefore we need to return the stdout/stderr output here - stdoutRead := strings.TrimSpace(stdout.String()) - stderrRead := strings.TrimSpace(stderr.String()) - return stdoutRead, stderrRead, readStreamErr -} - func TestSwiftv2PodToPod(t *testing.T) { var ( kubeconfig string numNodes int ) + kubeconfigPath := *kubernetes.GetKubeconfig() + t.Logf("TestSwiftv2PodToPod kubeconfig is %v", kubeconfigPath) + ctx := context.Background() t.Log("Create Clientset") clientset := kubernetes.MustGetClientset() + t.Log("Get Clientset config") + restConfig := kubernetes.MustGetRestConfig() + t.Log("rest config is", restConfig) t.Log("Create Label Selectors") podLabelSelector := kubernetes.CreateLabelSelector(pniKey, podPrefix) @@ -331,6 +168,7 @@ func TestSwiftv2PodToPod(t *testing.T) { t.Fatalf("No pod on node: %v", node.Name) } for _, pod := range pods.Items { + t.Logf("Pod name is %s", pod.Name) allPods = append(allPods, pod) mtpnc := GetMultitenantPodNetworkConfig(t, ctx, kubeconfig, pod.Namespace, pod.Name) if len(pod.Status.PodIPs) != 1 { @@ -350,17 +188,20 @@ func TestSwiftv2PodToPod(t *testing.T) { for _, pod := range allPods { for _, ip := range ipsToPing { t.Logf("ping from pod %q to %q", pod.Name, ip) - stdout, stderr, err := PodExecWithError( - t, - kubeconfig, - pod.Name, - pod.Namespace, - []string{"ping", "-c", "3", ip}, - ) - if err != nil { - t.Errorf("ping %q failed: error: %s, stdout: %s, stderr: %s", ip, err, stdout, stderr) + result := podTest(t, ctx, clientset, pod, []string{"ping", "-c", "3", ip}, restConfig) + if result != nil { + t.Errorf("ping %q failed: error: %s", ip, result) } } } return } + +func podTest(t *testing.T, ctx context.Context, clientset *kuberneteslib.Clientset, srcPod v1.Pod, cmd []string, rc *restclient.Config) error { + output, err := kubernetes.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, cmd, rc) + t.Logf(string(output)) + if err != nil { + t.Errorf("failed to execute command on pod: %v", srcPod.Name) + } + return err +} diff --git a/test/internal/kubernetes/utils.go b/test/internal/kubernetes/utils.go index 04201a205a..ed5d6373d9 100644 --- a/test/internal/kubernetes/utils.go +++ b/test/internal/kubernetes/utils.go @@ -73,6 +73,20 @@ func MustGetRestConfig() *rest.Config { } return config } + +func GetRESTClientForMultitenantCRDFromConfig(config *rest.Config) (*rest.RESTClient, error) { + scheme := runtime.NewScheme() + err := v1alpha1.AddToScheme(scheme) + if err != nil { + return nil, err + } + config.ContentConfig.GroupVersion = &v1alpha1.GroupVersion + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.NewCodecFactory(scheme) + config.UserAgent = rest.DefaultKubernetesUserAgent() + return rest.UnversionedRESTClientFor(config) +} + func GetRESTClientForMultitenantCRD(kubeconfig string) (*rest.RESTClient, error) { scheme := runtime.NewScheme() err := v1alpha1.AddToScheme(scheme) From 05ec1c704e6aa7dd5190031eb02d5de06b6f388d Mon Sep 17 00:00:00 2001 From: shchen Date: Mon, 6 May 2024 13:48:49 -0700 Subject: [PATCH 46/49] Remove unnecessary node part. --- test/integration/swiftv2/swiftv2_test.go | 92 ++++++++---------------- 1 file changed, 29 insertions(+), 63 deletions(-) diff --git a/test/integration/swiftv2/swiftv2_test.go b/test/integration/swiftv2/swiftv2_test.go index 32a8125fbe..3c2b1dda7e 100644 --- a/test/integration/swiftv2/swiftv2_test.go +++ b/test/integration/swiftv2/swiftv2_test.go @@ -7,49 +7,33 @@ import ( "flag" "strings" "testing" - "time" "github.com/Azure/azure-container-networking/crd/multitenancy/api/v1alpha1" "github.com/Azure/azure-container-networking/test/internal/kubernetes" - "github.com/Azure/azure-container-networking/test/internal/retry" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kuberneteslib "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" ) const ( - pniKey = "kubernetes.azure.com/pod-network-instance" - podCount = 2 - nodepoolKey = "agentpool" - LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" - podNetworkYaml = "../manifests/swiftv2/podnetwork.yaml" - mtpodYaml = "../manifests/swiftv2/mtpod0.yaml" - pniYaml = "../manifests/swiftv2/pni.yaml" - maxRetryDelaySeconds = 10 - defaultTimeoutSeconds = 120 - defaultRetryDelaySeconds = 1 - goldpingerRetryCount = 24 - goldpingerDelayTimeSeconds = 5 - gpFolder = "../manifests/goldpinger" - gpClusterRolePath = gpFolder + "/cluster-role.yaml" - gpClusterRoleBindingPath = gpFolder + "/cluster-role-binding.yaml" - gpServiceAccountPath = gpFolder + "/service-account.yaml" - gpDaemonset = gpFolder + "/daemonset.yaml" - gpDaemonsetIPv6 = gpFolder + "/daemonset-ipv6.yaml" - gpDeployment = gpFolder + "/deployment.yaml" - IpsInAnotherCluster = "172.25.0.7" + pniKey = "kubernetes.azure.com/pod-network-instance" + podCount = 2 + nodepoolKey = "agentpool" + podNetworkYaml = "../manifests/swiftv2/podnetwork.yaml" + mtpodYaml = "../manifests/swiftv2/mtpod0.yaml" + pniYaml = "../manifests/swiftv2/pni.yaml" + maxRetryDelaySeconds = 10 + defaultTimeoutSeconds = 120 + defaultRetryDelaySeconds = 1 + IpsInAnotherCluster = "172.25.0.7" + namespace = "default" ) var ( podPrefix = flag.String("podnetworkinstance", "pni1", "the pni pod used") podNamespace = flag.String("namespace", "default", "Namespace for test pods") nodepoolSelector = flag.String("nodelabel", "mtapool", "One of the node label and the key is agentpool") - // TODO: add flag to support dual nic scenario - isDualStack = flag.Bool("isDualStack", false, "whether system supports dualstack scenario") - defaultRetrier = retry.Retrier{ - Attempts: 10, - Delay: defaultRetryDelaySeconds * time.Second, - } ) /* @@ -140,52 +124,34 @@ func TestSwiftv2PodToPod(t *testing.T) { t.Log("Create Label Selectors") podLabelSelector := kubernetes.CreateLabelSelector(pniKey, podPrefix) - nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) - - t.Log("Get Nodes") - nodes, err := kubernetes.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) - if err != nil { - t.Fatalf("could not get k8s node list: %v", err) - } - - t.Log("Waiting for pods to be running state") - err = kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) - if err != nil { - t.Fatalf("Pods are not in running state due to %+v", err) - } t.Log("Successfully created customer Linux pods") t.Log("Checking swiftv2 multitenant pods number and get IPs") - allPods := make([]v1.Pod, numNodes) ipsToPing := make([]string, 0, numNodes) - for _, node := range nodes.Items { - pods, err := kubernetes.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) - if err != nil { - t.Fatalf("could not get k8s clientset: %v", err) - } - if len(pods.Items) < 1 { - t.Fatalf("No pod on node: %v", node.Name) + + podsClient := clientset.CoreV1().Pods(namespace) + allPods, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: podLabelSelector}) + if err != nil { + t.Fatalf("could not get pods from clientset: %v", err) + } + for _, pod := range allPods.Items { + t.Logf("Pod name is %s", pod.Name) + mtpnc := GetMultitenantPodNetworkConfig(t, ctx, kubeconfig, pod.Namespace, pod.Name) + if len(pod.Status.PodIPs) != 1 { + t.Fatalf("Pod doesn't have any IP associated.") } - for _, pod := range pods.Items { - t.Logf("Pod name is %s", pod.Name) - allPods = append(allPods, pod) - mtpnc := GetMultitenantPodNetworkConfig(t, ctx, kubeconfig, pod.Namespace, pod.Name) - if len(pod.Status.PodIPs) != 1 { - t.Fatalf("Pod doesn't have any IP associated.") - } - // remove /32 from PrimaryIP - splitcidr := strings.Split(mtpnc.Status.PrimaryIP, "/") - if len(splitcidr) != 2 { - t.Fatalf("Split Pods IP with its cidr failed.") - } - ipsToPing = append(ipsToPing, splitcidr[0]) + // remove /32 from PrimaryIP + splitcidr := strings.Split(mtpnc.Status.PrimaryIP, "/") + if len(splitcidr) != 2 { + t.Fatalf("Split Pods IP with its cidr failed.") } + ipsToPing = append(ipsToPing, splitcidr[0]) } ipsToPing = append(ipsToPing, IpsInAnotherCluster) t.Log("Linux test environment ready") - for _, pod := range allPods { + for _, pod := range allPods.Items { for _, ip := range ipsToPing { t.Logf("ping from pod %q to %q", pod.Name, ip) result := podTest(t, ctx, clientset, pod, []string{"ping", "-c", "3", ip}, restConfig) From 89b910f8b26ab9e2f573ae1cba2fe979189fecf9 Mon Sep 17 00:00:00 2001 From: shchen Date: Mon, 6 May 2024 16:22:02 -0700 Subject: [PATCH 47/49] Remove the tests from script part. --- .../swiftv2-e2e-step-template.yaml | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml index 19c555c999..86cb37bc55 100644 --- a/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml +++ b/.pipelines/multitenancy/swiftv2-e2e-step-template.yaml @@ -63,28 +63,6 @@ steps: env: SUBNET_TOKEN: $(SUBNET_TOKEN) - - script: | - set -e - kubectl get po -owide -A - echo "Start the connection test" - kubectl exec mtpod0 -it -- ip a - echo "Check mtpnc" - kubectl get mtpnc - kubectl describe mtpnc mtpod0 - kubectl describe mtpnc mtpod1 - echo "Test the connection to a mtpod in the same delegated network and in another node" - export IP0=`kubectl describe mtpnc mtpod0 | grep "Primary IP" | cut -c 17- | head -c -4` - echo $IP0 - kubectl exec mtpod0 -it -- ping -c 3 $IP0 - export IP1=`kubectl describe mtpnc mtpod1 | grep "Primary IP" | cut -c 17- | head -c -4` - echo $IP1 - kubectl exec mtpod0 -it -- ping -c 3 $IP1 - echo "Test the connection to a non-mt pod (172.25.0.7) in the same delegated network and in another cluster" - kubectl exec mtpod0 -it -- ping -c 3 -W 1 172.25.0.7 - retryCountOnTaskFailure: 3 - name: "Swiftv2_Tests" - displayName: "Swiftv2 Tests through script" - - script: | set -e kubectl get po -owide -A From e0d94e98cbd34cb253a7a4549d8042502224ae6b Mon Sep 17 00:00:00 2001 From: shchen Date: Mon, 6 May 2024 21:33:02 -0700 Subject: [PATCH 48/49] Resolve the lint complaint. --- test/internal/kubernetes/utils.go | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/test/internal/kubernetes/utils.go b/test/internal/kubernetes/utils.go index ed5d6373d9..f6584ab974 100644 --- a/test/internal/kubernetes/utils.go +++ b/test/internal/kubernetes/utils.go @@ -75,36 +75,44 @@ func MustGetRestConfig() *rest.Config { } func GetRESTClientForMultitenantCRDFromConfig(config *rest.Config) (*rest.RESTClient, error) { - scheme := runtime.NewScheme() - err := v1alpha1.AddToScheme(scheme) + schemeLocal := runtime.NewScheme() + err := v1alpha1.AddToScheme(schemeLocal) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to AddToScheme") } config.ContentConfig.GroupVersion = &v1alpha1.GroupVersion config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.NewCodecFactory(scheme) + config.NegotiatedSerializer = serializer.NewCodecFactory(schemeLocal) config.UserAgent = rest.DefaultKubernetesUserAgent() - return rest.UnversionedRESTClientFor(config) + client, err := rest.UnversionedRESTClientFor(config) + if err != nil { + return nil, errors.Wrap(err, "failed to UnversionedRESTClientFor config") + } + return client, nil } func GetRESTClientForMultitenantCRD(kubeconfig string) (*rest.RESTClient, error) { - scheme := runtime.NewScheme() - err := v1alpha1.AddToScheme(scheme) + schemeLocal := runtime.NewScheme() + err := v1alpha1.AddToScheme(schemeLocal) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to AddToScheme") } restConfig, err := clientcmd.RESTConfigFromKubeConfig([]byte(kubeconfig)) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to get RESTConfigFromKubeConfig") } restConfig.ContentConfig.GroupVersion = &v1alpha1.GroupVersion restConfig.APIPath = "/apis" - restConfig.NegotiatedSerializer = serializer.NewCodecFactory(scheme) + restConfig.NegotiatedSerializer = serializer.NewCodecFactory(schemeLocal) restConfig.UserAgent = rest.DefaultKubernetesUserAgent() - return rest.UnversionedRESTClientFor(restConfig) + client, err := rest.UnversionedRESTClientFor(restConfig) + if err != nil { + return nil, errors.Wrap(err, "failed to UnversionedRESTClientFor config") + } + return client, nil } func mustParseResource(path string, out interface{}) { From ef0bfa82ec39f75448f3b55b96f9da9b48688985 Mon Sep 17 00:00:00 2001 From: shchen Date: Mon, 6 May 2024 21:50:06 -0700 Subject: [PATCH 49/49] Resume the "Remove AKS Engine storage account usage" --- .pipelines/pipeline.yaml | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index 0de802f1bf..2cb68aba4a 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -614,35 +614,3 @@ stages: echo $TAG echo $CURRENT_VERSION echo "Checking if branch is up to date with master" - - - stage: cleanup - displayName: Cleanup - dependsOn: - - azure_overlay_e2e - - aks_swift_e2e - - cilium_e2e - - cilium_overlay_e2e - - cilium_h_overlay_e2e - - aks_ubuntu_22_linux_e2e - - aks_windows_22_e2e - - dualstackoverlay_e2e - - cilium_dualstackoverlay_e2e - - swiftv2_e2e - jobs: - - job: delete_remote_artifacts - displayName: Delete remote artifacts - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - demands: agent.os -equals Linux - steps: - - checkout: none - - task: AzureCLI@1 - inputs: - azureSubscription: $(BUILD_VALIDATIONS_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - inlineScript: | - BUILD_NUMBER=$(Build.BuildNumber) - BUILD_NUMBER=${BUILD_NUMBER//./-} - echo Deleting storage container with name acn-$BUILD_NUMBER and account name $(STORAGE_ACCOUNT_NAME) - az storage container delete -n acn-$BUILD_NUMBER --account-name $(STORAGE_ACCOUNT_NAME) - displayName: Cleanup remote Azure storage container