From efbd8ba8f90b6da44b93d5c7969dc659230fecc7 Mon Sep 17 00:00:00 2001 From: myname4423 <57184070+myname4423@users.noreply.github.com> Date: Tue, 24 Dec 2024 19:38:52 +0800 Subject: [PATCH] support bluegreen release: support workload of deployment and cloneSet (#238) * support bluegreen release: Deployment and CloneSet Signed-off-by: yunbo * support bluegreen release: webhook update Signed-off-by: yunbo * add unit test & split workload mutating webhook Signed-off-by: yunbo * fix a bug caused by previous merged PR Signed-off-by: yunbo * improve some log information Signed-off-by: yunbo * fix kruise version problem Signed-off-by: yunbo --------- Signed-off-by: yunbo Co-authored-by: yunbo --- .../e2e-advanced-deployment-1.19.yaml | 2 +- .../e2e-advanced-deployment-1.23.yaml | 2 +- .github/workflows/e2e-cloneset-1.19.yaml | 2 +- .github/workflows/e2e-cloneset-1.23.yaml | 2 +- .github/workflows/e2e-daemonset-1.19.yaml | 2 +- .github/workflows/e2e-daemonset-1.23.yaml | 2 +- .github/workflows/e2e-deployment-1.19.yaml | 2 +- .github/workflows/e2e-deployment-1.23.yaml | 2 +- .github/workflows/e2e-others-1.19.yaml | 2 +- .github/workflows/e2e-others-1.23.yaml | 2 +- .github/workflows/e2e-statefulset-1.19.yaml | 2 +- .github/workflows/e2e-statefulset-1.23.yaml | 2 +- .../workflows/e2e-v1beta1-bluegreen-1.19.yaml | 146 + .../workflows/e2e-v1beta1-bluegreen-1.23.yaml | 146 + ...1-1.19.yaml => e2e-v1beta1-jump-1.19.yaml} | 4 +- ...1-1.23.yaml => e2e-v1beta1-jump-1.23.yaml} | 4 +- api/v1alpha1/conversion.go | 4 +- api/v1beta1/batchrelease_plan_types.go | 2 + api/v1beta1/deployment_types.go | 66 - api/v1beta1/rollout_types.go | 2 + api/v1beta1/zz_generated.deepcopy.go | 25 - .../rollouts.kruise.io_batchreleases.yaml | 4 + config/rbac/role.yaml | 10 + config/webhook/manifests.yaml | 42 - config/webhook/patch_manifests.yaml | 20 +- .../batchrelease/batchrelease_controller.go | 1 + .../batchrelease/batchrelease_executor.go | 44 +- .../batchrelease/context/context.go | 3 + pkg/controller/batchrelease/control/apis.go | 42 + .../bluegreenstyle/cloneset/control.go | 225 ++ .../bluegreenstyle/cloneset/control_test.go | 548 +++ .../control/bluegreenstyle/control_plane.go | 178 + .../bluegreenstyle/deployment/control.go | 336 ++ .../bluegreenstyle/deployment/control_test.go | 728 ++++ .../control/bluegreenstyle/hpa/hpa.go | 106 + .../control/bluegreenstyle/hpa/hpa_test.go | 149 + .../control/bluegreenstyle/interface.go | 48 + pkg/controller/batchrelease/control/util.go | 136 + pkg/controller/rollout/rollout_bluegreen.go | 12 +- pkg/controller/rollout/rollout_canary.go | 5 +- pkg/controller/rollout/rollout_progressing.go | 30 +- pkg/util/client/delegating_client.go | 7 +- pkg/util/condition.go | 29 + pkg/util/controller_finder.go | 6 +- pkg/util/errors/types.go | 69 + pkg/util/patch/patch_utils.go | 155 + pkg/util/workloads_utils.go | 6 +- .../rollout_create_update_handler.go | 21 +- pkg/webhook/util/writer/fs.go | 2 +- .../mutating/unified_update_handler.go | 241 ++ .../mutating/unified_update_handler_test.go | 110 + pkg/webhook/workload/mutating/webhooks.go | 6 +- .../mutating/workload_update_handler.go | 101 +- .../mutating/workload_update_handler_test.go | 223 +- test/e2e/rollout_v1beta1_test.go | 3041 +++++++++++++++-- test/e2e/test_data/rollout/hpa_v1.yaml | 12 + test/e2e/test_data/rollout/hpa_v2.yaml | 24 + .../rollout_v1beta1_bluegreen_base.yaml | 22 +- ...llout_v1beta1_bluegreen_cloneset_base.yaml | 24 + 59 files changed, 6505 insertions(+), 684 deletions(-) create mode 100644 .github/workflows/e2e-v1beta1-bluegreen-1.19.yaml create mode 100644 .github/workflows/e2e-v1beta1-bluegreen-1.23.yaml rename .github/workflows/{e2e-v1beta1-1.19.yaml => e2e-v1beta1-jump-1.19.yaml} (97%) rename .github/workflows/{e2e-v1beta1-1.23.yaml => e2e-v1beta1-jump-1.23.yaml} (97%) create mode 100644 pkg/controller/batchrelease/control/apis.go create mode 100644 pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control.go create mode 100644 pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control_test.go create mode 100644 pkg/controller/batchrelease/control/bluegreenstyle/control_plane.go create mode 100644 pkg/controller/batchrelease/control/bluegreenstyle/deployment/control.go create mode 100644 pkg/controller/batchrelease/control/bluegreenstyle/deployment/control_test.go create mode 100644 pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa.go create mode 100644 pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa_test.go create mode 100644 pkg/controller/batchrelease/control/bluegreenstyle/interface.go create mode 100644 pkg/util/errors/types.go create mode 100644 pkg/webhook/workload/mutating/unified_update_handler.go create mode 100644 pkg/webhook/workload/mutating/unified_update_handler_test.go create mode 100644 test/e2e/test_data/rollout/hpa_v1.yaml create mode 100644 test/e2e/test_data/rollout/hpa_v2.yaml create mode 100644 test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml diff --git a/.github/workflows/e2e-advanced-deployment-1.19.yaml b/.github/workflows/e2e-advanced-deployment-1.19.yaml index 1b04da6e..dc623af5 100644 --- a/.github/workflows/e2e-advanced-deployment-1.19.yaml +++ b/.github/workflows/e2e-advanced-deployment-1.19.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-advanced-deployment-1.23.yaml b/.github/workflows/e2e-advanced-deployment-1.23.yaml index 10edb25e..41a5c6ea 100644 --- a/.github/workflows/e2e-advanced-deployment-1.23.yaml +++ b/.github/workflows/e2e-advanced-deployment-1.23.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-cloneset-1.19.yaml b/.github/workflows/e2e-cloneset-1.19.yaml index 04e0a8eb..a32e9ceb 100644 --- a/.github/workflows/e2e-cloneset-1.19.yaml +++ b/.github/workflows/e2e-cloneset-1.19.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-cloneset-1.23.yaml b/.github/workflows/e2e-cloneset-1.23.yaml index 2bc0b0c2..712ed6ed 100644 --- a/.github/workflows/e2e-cloneset-1.23.yaml +++ b/.github/workflows/e2e-cloneset-1.23.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-daemonset-1.19.yaml b/.github/workflows/e2e-daemonset-1.19.yaml index 15c868d3..573b0335 100644 --- a/.github/workflows/e2e-daemonset-1.19.yaml +++ b/.github/workflows/e2e-daemonset-1.19.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-daemonset-1.23.yaml b/.github/workflows/e2e-daemonset-1.23.yaml index 75ee88f2..fdddac80 100644 --- a/.github/workflows/e2e-daemonset-1.23.yaml +++ b/.github/workflows/e2e-daemonset-1.23.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-deployment-1.19.yaml b/.github/workflows/e2e-deployment-1.19.yaml index c67f45bd..1ae0f6d8 100644 --- a/.github/workflows/e2e-deployment-1.19.yaml +++ b/.github/workflows/e2e-deployment-1.19.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-deployment-1.23.yaml b/.github/workflows/e2e-deployment-1.23.yaml index 59ffe444..f929ca6d 100644 --- a/.github/workflows/e2e-deployment-1.23.yaml +++ b/.github/workflows/e2e-deployment-1.23.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-others-1.19.yaml b/.github/workflows/e2e-others-1.19.yaml index 8caa8c9f..c4b221d8 100644 --- a/.github/workflows/e2e-others-1.19.yaml +++ b/.github/workflows/e2e-others-1.19.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-others-1.23.yaml b/.github/workflows/e2e-others-1.23.yaml index 6fbfdab5..8757ffc8 100644 --- a/.github/workflows/e2e-others-1.23.yaml +++ b/.github/workflows/e2e-others-1.23.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-statefulset-1.19.yaml b/.github/workflows/e2e-statefulset-1.19.yaml index 571c82e0..8f6865e7 100644 --- a/.github/workflows/e2e-statefulset-1.19.yaml +++ b/.github/workflows/e2e-statefulset-1.19.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-statefulset-1.23.yaml b/.github/workflows/e2e-statefulset-1.23.yaml index 7045b962..401f073b 100644 --- a/.github/workflows/e2e-statefulset-1.23.yaml +++ b/.github/workflows/e2e-statefulset-1.23.yaml @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-v1beta1-bluegreen-1.19.yaml b/.github/workflows/e2e-v1beta1-bluegreen-1.19.yaml new file mode 100644 index 00000000..e38bb7d8 --- /dev/null +++ b/.github/workflows/e2e-v1beta1-bluegreen-1.19.yaml @@ -0,0 +1,146 @@ +name: E2E-V1Beta1-BlueGreen-1.19 + +on: + push: + branches: + - master + - release-* + pull_request: {} + workflow_dispatch: {} + +env: + # Common versions + GO_VERSION: '1.19' + KIND_IMAGE: 'kindest/node:v1.19.16' + KIND_CLUSTER_NAME: 'ci-testing' + +jobs: + + rollout: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + with: + submodules: true + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup Kind Cluster + uses: helm/kind-action@v1.2.0 + with: + node_image: ${{ env.KIND_IMAGE }} + cluster_name: ${{ env.KIND_CLUSTER_NAME }} + config: ./test/kind-conf.yaml + - name: Build image + run: | + export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}" + docker build --pull --no-cache . -t $IMAGE + kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; } + - name: Install Kruise + run: | + set -ex + kubectl cluster-info + make helm + helm repo add openkruise https://openkruise.github.io/charts/ + helm repo update + helm install kruise openkruise/kruise --version 1.7.0 + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l) + set -e + if [ "$PODS" -eq "2" ]; then + break + fi + sleep 3 + done + set +e + PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l) + set -e + if [ "$PODS" -eq "2" ]; then + echo "Wait for kruise-manager ready successfully" + else + echo "Timeout to wait for kruise-manager ready" + exit 1 + fi + - name: Install Kruise Rollout + run: | + set -ex + kubectl cluster-info + IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l) + set -e + if [ "$PODS" -eq "1" ]; then + break + fi + sleep 3 + done + set +e + PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l) + kubectl get node -o yaml + kubectl get all -n kruise-rollout -o yaml + set -e + if [ "$PODS" -eq "1" ]; then + echo "Wait for kruise-rollout ready successfully" + else + echo "Timeout to wait for kruise-rollout ready" + exit 1 + fi + - name: Bluegreen Release Disable HPA + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='bluegreen disable hpa test case - autoscaling/v1 for v1.19' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal + - name: Deployment Bluegreen Release + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Deployment - Ingress' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal + - name: CloneSet Bluegreen Release + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Cloneset - Ingress' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal diff --git a/.github/workflows/e2e-v1beta1-bluegreen-1.23.yaml b/.github/workflows/e2e-v1beta1-bluegreen-1.23.yaml new file mode 100644 index 00000000..4aa68abc --- /dev/null +++ b/.github/workflows/e2e-v1beta1-bluegreen-1.23.yaml @@ -0,0 +1,146 @@ +name: E2E-V1Beta1-BlueGreen-1.23 + +on: + push: + branches: + - master + - release-* + pull_request: {} + workflow_dispatch: {} + +env: + # Common versions + GO_VERSION: '1.19' + KIND_IMAGE: 'kindest/node:v1.23.3' + KIND_CLUSTER_NAME: 'ci-testing' + +jobs: + + rollout: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + with: + submodules: true + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ env.GO_VERSION }} + - name: Setup Kind Cluster + uses: helm/kind-action@v1.2.0 + with: + node_image: ${{ env.KIND_IMAGE }} + cluster_name: ${{ env.KIND_CLUSTER_NAME }} + config: ./test/kind-conf.yaml + - name: Build image + run: | + export IMAGE="openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID}" + docker build --pull --no-cache . -t $IMAGE + kind load docker-image --name=${KIND_CLUSTER_NAME} $IMAGE || { echo >&2 "kind not installed or error loading image: $IMAGE"; exit 1; } + - name: Install Kruise + run: | + set -ex + kubectl cluster-info + make helm + helm repo add openkruise https://openkruise.github.io/charts/ + helm repo update + helm install kruise openkruise/kruise --version 1.7.0 + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l) + set -e + if [ "$PODS" -eq "2" ]; then + break + fi + sleep 3 + done + set +e + PODS=$(kubectl get pod -n kruise-system | grep '1/1' | grep kruise-controller-manager | wc -l) + set -e + if [ "$PODS" -eq "2" ]; then + echo "Wait for kruise-manager ready successfully" + else + echo "Timeout to wait for kruise-manager ready" + exit 1 + fi + - name: Install Kruise Rollout + run: | + set -ex + kubectl cluster-info + IMG=openkruise/kruise-rollout:e2e-${GITHUB_RUN_ID} ./scripts/deploy_kind.sh + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l) + set -e + if [ "$PODS" -eq "1" ]; then + break + fi + sleep 3 + done + set +e + PODS=$(kubectl get pod -n kruise-rollout | grep '1/1' | wc -l) + kubectl get node -o yaml + kubectl get all -n kruise-rollout -o yaml + set -e + if [ "$PODS" -eq "1" ]; then + echo "Wait for kruise-rollout ready successfully" + else + echo "Timeout to wait for kruise-rollout ready" + exit 1 + fi + - name: Bluegreen Release Disable HPA + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='bluegreen delete rollout case - autoscaling/v2 for v1.23' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal + - name: Deployment Bluegreen Release + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Deployment - Ingress' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal + - name: CloneSet Bluegreen Release + run: | + export KUBECONFIG=/home/runner/.kube/config + make ginkgo + set +e + ./bin/ginkgo -timeout 60m -v --focus='Bluegreen Release - Cloneset - Ingress' test/e2e + retVal=$? + # kubectl get pod -n kruise-rollout --no-headers | grep manager | awk '{print $1}' | xargs kubectl logs -n kruise-rollout + restartCount=$(kubectl get pod -n kruise-rollout --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kruise-rollout has not restarted" + else + kubectl get pod -n kruise-rollout --no-headers + echo "Kruise-rollout has restarted, abort!!!" + kubectl get pod -n kruise-rollout --no-headers| awk '{print $1}' | xargs kubectl logs -p -n kruise-rollout + exit 1 + fi + exit $retVal \ No newline at end of file diff --git a/.github/workflows/e2e-v1beta1-1.19.yaml b/.github/workflows/e2e-v1beta1-jump-1.19.yaml similarity index 97% rename from .github/workflows/e2e-v1beta1-1.19.yaml rename to .github/workflows/e2e-v1beta1-jump-1.19.yaml index 6ec0e39d..b7afc005 100644 --- a/.github/workflows/e2e-v1beta1-1.19.yaml +++ b/.github/workflows/e2e-v1beta1-jump-1.19.yaml @@ -1,4 +1,4 @@ -name: E2E-V1Beta1-1.19 +name: E2E-V1Beta1-JUMP-1.19 on: push: @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/.github/workflows/e2e-v1beta1-1.23.yaml b/.github/workflows/e2e-v1beta1-jump-1.23.yaml similarity index 97% rename from .github/workflows/e2e-v1beta1-1.23.yaml rename to .github/workflows/e2e-v1beta1-jump-1.23.yaml index 8dad2a8b..c553d2a7 100644 --- a/.github/workflows/e2e-v1beta1-1.23.yaml +++ b/.github/workflows/e2e-v1beta1-jump-1.23.yaml @@ -1,4 +1,4 @@ -name: E2E-V1Beta1-1.23 +name: E2E-V1Beta1-JUMP-1.23 on: push: @@ -44,7 +44,7 @@ jobs: make helm helm repo add openkruise https://openkruise.github.io/charts/ helm repo update - helm install kruise openkruise/kruise + helm install kruise openkruise/kruise --version 1.7.0 for ((i=1;i<10;i++)); do set +e diff --git a/api/v1alpha1/conversion.go b/api/v1alpha1/conversion.go index 28ad4a9c..442218ff 100644 --- a/api/v1alpha1/conversion.go +++ b/api/v1alpha1/conversion.go @@ -172,7 +172,9 @@ func (dst *Rollout) ConvertFrom(src conversion.Hub) error { srcV1beta1 := src.(*v1beta1.Rollout) dst.ObjectMeta = srcV1beta1.ObjectMeta if !srcV1beta1.Spec.Strategy.IsCanaryStragegy() { - return fmt.Errorf("v1beta1 Rollout with %s strategy cannot be converted to v1alpha1", srcV1beta1.Spec.Strategy.GetRollingStyle()) + // only v1beta1 supports bluegreen strategy + // Don't log the message because it will print too often + return nil } // spec dst.Spec = RolloutSpec{ diff --git a/api/v1beta1/batchrelease_plan_types.go b/api/v1beta1/batchrelease_plan_types.go index e72a9941..a1472946 100644 --- a/api/v1beta1/batchrelease_plan_types.go +++ b/api/v1beta1/batchrelease_plan_types.go @@ -117,6 +117,8 @@ type BatchReleaseStatus struct { // Phase is the release plan phase, which indicates the current state of release // plan state machine in BatchRelease controller. Phase RolloutPhase `json:"phase,omitempty"` + // Message provides details on why the rollout is in its current phase + Message string `json:"message,omitempty"` } type BatchReleaseCanaryStatus struct { diff --git a/api/v1beta1/deployment_types.go b/api/v1beta1/deployment_types.go index 9975e989..5002fd82 100644 --- a/api/v1beta1/deployment_types.go +++ b/api/v1beta1/deployment_types.go @@ -62,31 +62,6 @@ type DeploymentStrategy struct { Partition intstr.IntOrString `json:"partition,omitempty"` } -// OriginalDeploymentStrategy stores part of the fileds of a workload, -// so that it can be restored when finalizing. -// It is only used for BlueGreen Release -// Similar to DeploymentStrategy, it is an annotation used in workload -// However, unlike DeploymentStrategy, it is only used to store and restore the user's strategy -type OriginalDeploymentStrategy struct { - // The deployment strategy to use to replace existing pods with new ones. - // +optional - // +patchStrategy=retainKeys - Strategy *apps.DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. Defaults to 600s. - ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` -} - type RollingStyleType string const ( @@ -138,44 +113,3 @@ func SetDefaultDeploymentStrategy(strategy *DeploymentStrategy) { } } } - -func SetDefaultSetting(setting *OriginalDeploymentStrategy) { - if setting.ProgressDeadlineSeconds == nil { - setting.ProgressDeadlineSeconds = new(int32) - *setting.ProgressDeadlineSeconds = 600 - } - if setting.Strategy == nil { - setting.Strategy = &apps.DeploymentStrategy{} - } - if setting.Strategy.Type == "" { - setting.Strategy.Type = apps.RollingUpdateDeploymentStrategyType - } - if setting.Strategy.Type == apps.RecreateDeploymentStrategyType { - return - } - strategy := setting.Strategy - if strategy.RollingUpdate == nil { - strategy.RollingUpdate = &apps.RollingUpdateDeployment{} - } - if strategy.RollingUpdate.MaxUnavailable == nil { - // Set MaxUnavailable as 25% by default - maxUnavailable := intstr.FromString("25%") - strategy.RollingUpdate.MaxUnavailable = &maxUnavailable - } - if strategy.RollingUpdate.MaxSurge == nil { - // Set MaxSurge as 25% by default - maxSurge := intstr.FromString("25%") - strategy.RollingUpdate.MaxUnavailable = &maxSurge - } - - // Cannot allow maxSurge==0 && MaxUnavailable==0, otherwise, no pod can be updated when rolling update. - maxSurge, _ := intstr.GetScaledValueFromIntOrPercent(strategy.RollingUpdate.MaxSurge, 100, true) - maxUnavailable, _ := intstr.GetScaledValueFromIntOrPercent(strategy.RollingUpdate.MaxUnavailable, 100, true) - if maxSurge == 0 && maxUnavailable == 0 { - strategy.RollingUpdate = &apps.RollingUpdateDeployment{ - MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: 0}, - MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, - } - } - -} diff --git a/api/v1beta1/rollout_types.go b/api/v1beta1/rollout_types.go index 47051987..5b1ded6f 100644 --- a/api/v1beta1/rollout_types.go +++ b/api/v1beta1/rollout_types.go @@ -579,6 +579,8 @@ const ( FinalisingStepReleaseWorkloadControl FinalisingStepType = "ReleaseWorkloadControl" // All needed work done FinalisingStepTypeEnd FinalisingStepType = "END" + // Only for debugging use + FinalisingStepWaitEndless FinalisingStepType = "WaitEndless" ) // +genclient diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index a75b92b7..0cbeea32 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -422,31 +422,6 @@ func (in *ObjectRef) DeepCopy() *ObjectRef { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OriginalDeploymentStrategy) DeepCopyInto(out *OriginalDeploymentStrategy) { - *out = *in - if in.Strategy != nil { - in, out := &in.Strategy, &out.Strategy - *out = new(v1.DeploymentStrategy) - (*in).DeepCopyInto(*out) - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OriginalDeploymentStrategy. -func (in *OriginalDeploymentStrategy) DeepCopy() *OriginalDeploymentStrategy { - if in == nil { - return nil - } - out := new(OriginalDeploymentStrategy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatchPodTemplateMetadata) DeepCopyInto(out *PatchPodTemplateMetadata) { *out = *in diff --git a/config/crd/bases/rollouts.kruise.io_batchreleases.yaml b/config/crd/bases/rollouts.kruise.io_batchreleases.yaml index c14d26ca..34d8a03d 100644 --- a/config/crd/bases/rollouts.kruise.io_batchreleases.yaml +++ b/config/crd/bases/rollouts.kruise.io_batchreleases.yaml @@ -507,6 +507,10 @@ spec: - type type: object type: array + message: + description: Message provides details on why the rollout is in its + current phase + type: string observedGeneration: description: ObservedGeneration is the most recent generation observed for this BatchRelease. It corresponds to this BatchRelease's generation, diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 2a094c61..343d31ca 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -161,6 +161,16 @@ rules: - get - patch - update +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 9af263a5..9d2ec03c 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -65,48 +65,6 @@ webhooks: resources: - deployments sideEffects: None -- admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /mutate-apps-v1-statefulset - failurePolicy: Fail - name: mstatefulset.kb.io - rules: - - apiGroups: - - apps - apiVersions: - - v1 - operations: - - UPDATE - resources: - - statefulsets - sideEffects: None -- admissionReviewVersions: - - v1 - - v1beta1 - clientConfig: - service: - name: webhook-service - namespace: system - path: /mutate-apps-kruise-io-statefulset - failurePolicy: Fail - name: madvancedstatefulset.kb.io - rules: - - apiGroups: - - apps.kruise.io - apiVersions: - - v1alpha1 - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - statefulsets - sideEffects: None - admissionReviewVersions: - v1 - v1beta1 diff --git a/config/webhook/patch_manifests.yaml b/config/webhook/patch_manifests.yaml index 5022bcab..18993e3a 100644 --- a/config/webhook/patch_manifests.yaml +++ b/config/webhook/patch_manifests.yaml @@ -18,16 +18,16 @@ webhooks: matchExpressions: - key: rollouts.kruise.io/workload-type operator: Exists - - name: mstatefulset.kb.io - objectSelector: - matchExpressions: - - key: rollouts.kruise.io/workload-type - operator: Exists - - name: madvancedstatefulset.kb.io - objectSelector: - matchExpressions: - - key: rollouts.kruise.io/workload-type - operator: Exists + # - name: mstatefulset.kb.io + # objectSelector: + # matchExpressions: + # - key: rollouts.kruise.io/workload-type + # operator: Exists + # - name: madvancedstatefulset.kb.io + # objectSelector: + # matchExpressions: + # - key: rollouts.kruise.io/workload-type + # operator: Exists - name: mdeployment.kb.io objectSelector: matchExpressions: diff --git a/pkg/controller/batchrelease/batchrelease_controller.go b/pkg/controller/batchrelease/batchrelease_controller.go index 2eaf2faa..709c2707 100644 --- a/pkg/controller/batchrelease/batchrelease_controller.go +++ b/pkg/controller/batchrelease/batchrelease_controller.go @@ -148,6 +148,7 @@ type BatchReleaseReconciler struct { // +kubebuilder:rbac:groups=apps.kruise.io,resources=statefulsets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=apps.kruise.io,resources=daemonsets,verbs=get;list;watch;update;patch // +kubebuilder:rbac:groups=apps.kruise.io,resources=daemonsets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=get;list;watch;update;patch // Reconcile reads that state of the cluster for a Rollout object and makes changes based on the state read // and what is in the Rollout.Spec diff --git a/pkg/controller/batchrelease/batchrelease_executor.go b/pkg/controller/batchrelease/batchrelease_executor.go index 5082817d..bd836514 100644 --- a/pkg/controller/batchrelease/batchrelease_executor.go +++ b/pkg/controller/batchrelease/batchrelease_executor.go @@ -24,6 +24,9 @@ import ( appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" "github.com/openkruise/rollouts/api/v1beta1" "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle" + bgcloneset "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/cloneset" + bgdeplopyment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/deployment" "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/canarystyle" canarydeployment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/canarystyle/deployment" "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle" @@ -32,6 +35,7 @@ import ( partitiondeployment "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle/deployment" "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/partitionstyle/statefulset" "github.com/openkruise/rollouts/pkg/util" + "github.com/openkruise/rollouts/pkg/util/errors" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -145,7 +149,11 @@ func (r *Executor) progressBatches(release *v1beta1.BatchRelease, newStatus *v1b switch { case err == nil: result = reconcile.Result{RequeueAfter: DefaultDuration} + removeProgressingCondition(newStatus) newStatus.CanaryStatus.CurrentBatchState = v1beta1.VerifyingBatchState + case errors.IsBadRequest(err): + progressingStateTransition(newStatus, v1.ConditionTrue, v1beta1.ProgressingReasonInRolling, err.Error()) + fallthrough default: klog.Warningf("Failed to upgrade %v, err %v", klog.KObj(release), err) } @@ -204,14 +212,14 @@ func (r *Executor) getReleaseController(release *v1beta1.BatchRelease, newStatus klog.Infof("BatchRelease(%v) using %s-style release controller for this batch release", klog.KObj(release), rollingStyle) switch rollingStyle { case v1beta1.BlueGreenRollingStyle: - // if targetRef.APIVersion == appsv1alpha1.GroupVersion.String() && targetRef.Kind == reflect.TypeOf(appsv1alpha1.CloneSet{}).Name() { - // klog.InfoS("Using CloneSet bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace) - // return partitionstyle.NewControlPlane(cloneset.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil - // } - // if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() { - // klog.InfoS("Using Deployment bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace) - // return bluegreenstyle.NewControlPlane(deployment.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil - // } + if targetRef.APIVersion == appsv1alpha1.GroupVersion.String() && targetRef.Kind == reflect.TypeOf(appsv1alpha1.CloneSet{}).Name() { + klog.InfoS("Using CloneSet bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace) + return bluegreenstyle.NewControlPlane(bgcloneset.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil + } + if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() { + klog.InfoS("Using Deployment bluegreen-style release controller for this batch release", "workload name", targetKey.Name, "namespace", targetKey.Namespace) + return bluegreenstyle.NewControlPlane(bgdeplopyment.NewController, r.client, r.recorder, release, newStatus, targetKey, gvk), nil + } case v1beta1.CanaryRollingStyle: if targetRef.APIVersion == apps.SchemeGroupVersion.String() && targetRef.Kind == reflect.TypeOf(apps.Deployment{}).Name() { @@ -257,3 +265,23 @@ func isPartitioned(release *v1beta1.BatchRelease) bool { return release.Spec.ReleasePlan.BatchPartition != nil && *release.Spec.ReleasePlan.BatchPartition <= release.Status.CanaryStatus.CurrentBatch } + +func progressingStateTransition(status *v1beta1.BatchReleaseStatus, condStatus v1.ConditionStatus, reason, message string) { + cond := util.GetBatchReleaseCondition(*status, v1beta1.RolloutConditionProgressing) + if cond == nil { + cond = util.NewRolloutCondition(v1beta1.RolloutConditionProgressing, condStatus, reason, message) + } else { + cond.Status = condStatus + cond.Reason = reason + if message != "" { + cond.Message = message + } + } + util.SetBatchReleaseCondition(status, *cond) + status.Message = cond.Message +} + +func removeProgressingCondition(status *v1beta1.BatchReleaseStatus) { + util.RemoveBatchReleaseCondition(status, v1beta1.RolloutConditionProgressing) + status.Message = "" +} diff --git a/pkg/controller/batchrelease/context/context.go b/pkg/controller/batchrelease/context/context.go index 2c428180..6ad325b0 100644 --- a/pkg/controller/batchrelease/context/context.go +++ b/pkg/controller/batchrelease/context/context.go @@ -61,6 +61,9 @@ type BatchContext struct { Pods []*corev1.Pod `json:"-"` // filter or sort pods before patch label FilterFunc FilterFuncType `json:"-"` + // the next two fields are only used for bluegreen style + CurrentSurge intstr.IntOrString `json:"currentSurge,omitempty"` + DesiredSurge intstr.IntOrString `json:"desiredSurge,omitempty"` } type FilterFuncType func(pods []*corev1.Pod, ctx *BatchContext) []*corev1.Pod diff --git a/pkg/controller/batchrelease/control/apis.go b/pkg/controller/batchrelease/control/apis.go new file mode 100644 index 00000000..43ccd268 --- /dev/null +++ b/pkg/controller/batchrelease/control/apis.go @@ -0,0 +1,42 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package control + +import "k8s.io/apimachinery/pkg/util/intstr" + +// OriginalDeploymentStrategy stores part of the fileds of a workload, +// so that it can be restored when finalizing. +// It is only used for BlueGreen Release +// Similar to DeploymentStrategy, it is an annotation used in workload +// However, unlike DeploymentStrategy, it is only used to store and restore the user's strategy +type OriginalDeploymentStrategy struct { + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control.go b/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control.go new file mode 100644 index 00000000..5d6ae2d6 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control.go @@ -0,0 +1,225 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloneset + +import ( + "context" + "fmt" + + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + "github.com/openkruise/rollouts/api/v1beta1" + batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/hpa" + "github.com/openkruise/rollouts/pkg/util" + "github.com/openkruise/rollouts/pkg/util/errors" + "github.com/openkruise/rollouts/pkg/util/patch" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type realController struct { + *util.WorkloadInfo + client client.Client + pods []*corev1.Pod + key types.NamespacedName + object *kruiseappsv1alpha1.CloneSet +} + +func NewController(cli client.Client, key types.NamespacedName, _ schema.GroupVersionKind) bluegreenstyle.Interface { + return &realController{ + key: key, + client: cli, + } +} + +func (rc *realController) GetWorkloadInfo() *util.WorkloadInfo { + return rc.WorkloadInfo +} + +func (rc *realController) BuildController() (bluegreenstyle.Interface, error) { + if rc.object != nil { + return rc, nil + } + object := &kruiseappsv1alpha1.CloneSet{} + if err := rc.client.Get(context.TODO(), rc.key, object); err != nil { + return rc, err + } + rc.object = object + rc.WorkloadInfo = util.ParseWorkload(object) + return rc, nil +} + +func (rc *realController) ListOwnedPods() ([]*corev1.Pod, error) { + if rc.pods != nil { + return rc.pods, nil + } + var err error + rc.pods, err = util.ListOwnedPods(rc.client, rc.object) + return rc.pods, err +} + +func (rc *realController) Initialize(release *v1beta1.BatchRelease) error { + if rc.object == nil || control.IsControlledByBatchRelease(release, rc.object) { + return nil + } + + // disable the hpa + if err := hpa.DisableHPA(rc.client, rc.object); err != nil { + return err + } + klog.InfoS("Initialize: disable hpa for cloneset successfully", "cloneset", klog.KObj(rc.object)) + + // patch the cloneset + setting, err := control.GetOriginalSetting(rc.object) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("cannot get original setting for cloneset %v: %s from annotation", klog.KObj(rc.object), err.Error())) + } + control.InitOriginalSetting(&setting, rc.object) + patchData := patch.NewClonesetPatch() + patchData.InsertAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation, util.DumpJSON(&setting)) + patchData.InsertAnnotation(util.BatchReleaseControlAnnotation, util.DumpJSON(metav1.NewControllerRef( + release, release.GetObjectKind().GroupVersionKind()))) + // we use partition = 100% to function as "paused" instead of setting pasued field as true + // it is manily to keep consistency with partition style (partition is already set as 100% in webhook) + patchData.UpdatePaused(false) + maxSurge := intstr.FromInt(1) // select the minimum positive number as initial value + maxUnavailable := intstr.FromInt(0) + patchData.UpdateMaxSurge(&maxSurge) + patchData.UpdateMaxUnavailable(&maxUnavailable) + patchData.UpdateMinReadySeconds(v1beta1.MaxReadySeconds) + klog.InfoS("Initialize: try to update cloneset", "cloneset", klog.KObj(rc.object), "patchData", patchData.String()) + return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData) +} + +func (rc *realController) UpgradeBatch(ctx *batchcontext.BatchContext) error { + if err := control.ValidateReadyForBlueGreenRelease(rc.object); err != nil { + return errors.NewBadRequestError(fmt.Errorf("cannot upgrade batch, because cloneset %v doesn't satisfy conditions: %s", klog.KObj(rc.object), err.Error())) + } + desired, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.DesiredSurge, int(ctx.Replicas), true) + current, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.CurrentSurge, int(ctx.Replicas), true) + if current >= desired { + klog.InfoS("No need to upgrade batch, because current >= desired", "cloneset", klog.KObj(rc.object), "current", current, "desired", desired) + return nil + } else { + klog.InfoS("Will update batch for cloneset, because current < desired", "cloneset", klog.KObj(rc.object), "current", current, "desired", desired) + } + patchData := patch.NewClonesetPatch() + // avoid interference from partition + patchData.UpdatePartiton(nil) + patchData.UpdateMaxSurge(&ctx.DesiredSurge) + return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData) +} + +func (rc *realController) Finalize(release *v1beta1.BatchRelease) error { + if release.Spec.ReleasePlan.BatchPartition != nil { + // continuous release (not supported yet) + /* + patchData := patch.NewClonesetPatch() + patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation) + return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData) + */ + klog.Warningf("continuous release is not supported yet for bluegreen style release") + return nil + } + + // restore the original setting and remove annotation + if !rc.restored() { + c := util.GetEmptyObjectWithKey(rc.object) + setting, err := control.GetOriginalSetting(rc.object) + if err != nil { + return err + } + patchData := patch.NewClonesetPatch() + patchData.UpdateMinReadySeconds(setting.MinReadySeconds) + patchData.UpdateMaxSurge(setting.MaxSurge) + patchData.UpdateMaxUnavailable(setting.MaxUnavailable) + patchData.DeleteAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation) + patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation) + if err := rc.client.Patch(context.TODO(), c, patchData); err != nil { + return err + } + klog.InfoS("Finalize: cloneset bluegreen release: wait all pods updated and ready", "cloneset", klog.KObj(rc.object)) + } + + // wait all pods updated and ready + if rc.object.Status.ReadyReplicas != rc.object.Status.UpdatedReadyReplicas { + return errors.NewRetryError(fmt.Errorf("cloneset %v finalize not done, readyReplicas %d != updatedReadyReplicas %d, current policy %s", + klog.KObj(rc.object), rc.object.Status.ReadyReplicas, rc.object.Status.UpdatedReadyReplicas, release.Spec.ReleasePlan.FinalizingPolicy)) + } + klog.InfoS("Finalize: cloneset bluegreen release: all pods updated and ready", "cloneset", klog.KObj(rc.object)) + + // restore the hpa + return hpa.RestoreHPA(rc.client, rc.object) +} + +func (rc *realController) restored() bool { + if rc.object == nil || rc.object.DeletionTimestamp != nil { + return true + } + if rc.object.Annotations == nil || len(rc.object.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]) == 0 { + return true + } + return false +} + +// bluegreen doesn't support rollback in batch, because: +// - bluegreen support traffic rollback instead, rollback in batch is not necessary +// - it's diffcult for both Deployment and CloneSet to support rollback in batch, with the "minReadySeconds" implementation +func (rc *realController) CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error) { + // current batch index + currentBatch := release.Status.CanaryStatus.CurrentBatch + // the number of expected updated pods + desiredSurge := release.Spec.ReleasePlan.Batches[currentBatch].CanaryReplicas + // the number of current updated pods + currentSurge := intstr.FromInt(0) + if rc.object.Spec.UpdateStrategy.MaxSurge != nil { + currentSurge = *rc.object.Spec.UpdateStrategy.MaxSurge + if currentSurge == intstr.FromInt(1) { + // currentSurge == intstr.FromInt(1) means that currentSurge is the initial value + // if the value is indeed set by user, setting it to 0 still does no harm + currentSurge = intstr.FromInt(0) + } + } + desired, _ := intstr.GetScaledValueFromIntOrPercent(&desiredSurge, int(rc.Replicas), true) + + batchContext := &batchcontext.BatchContext{ + Pods: rc.pods, + RolloutID: release.Spec.ReleasePlan.RolloutID, + CurrentBatch: currentBatch, + UpdateRevision: release.Status.UpdateRevision, + DesiredSurge: desiredSurge, + CurrentSurge: currentSurge, + // the following fields isused to check if batch is ready + Replicas: rc.Replicas, + UpdatedReplicas: rc.Status.UpdatedReplicas, + UpdatedReadyReplicas: rc.Status.UpdatedReadyReplicas, + DesiredUpdatedReplicas: int32(desired), + PlannedUpdatedReplicas: int32(desired), + } + // the number of no need update pods that marked before rollout + // if noNeedUpdate := release.Status.CanaryStatus.NoNeedUpdateReplicas; noNeedUpdate != nil { + // batchContext.FilterFunc = labelpatch.FilterPodsForUnorderedUpdate + // } + return batchContext, nil +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control_test.go b/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control_test.go new file mode 100644 index 00000000..06a77197 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/cloneset/control_test.go @@ -0,0 +1,548 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloneset + +import ( + "context" + "encoding/json" + "reflect" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + rolloutapi "github.com/openkruise/rollouts/api" + "github.com/openkruise/rollouts/api/v1beta1" + batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context" + control "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/util" + apps "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + scheme = runtime.NewScheme() + + cloneKey = types.NamespacedName{ + Namespace: "default", + Name: "cloneset", + } + cloneDemo = &kruiseappsv1alpha1.CloneSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: cloneKey.Name, + Namespace: cloneKey.Namespace, + Generation: 1, + Labels: map[string]string{ + "app": "busybox", + }, + Annotations: map[string]string{ + "type": "unit-test", + }, + }, + Spec: kruiseappsv1alpha1.CloneSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "busybox", + }, + }, + Replicas: pointer.Int32(10), + UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{ + Paused: true, + Partition: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"}, + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "busybox", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox:latest", + }, + }, + }, + }, + }, + Status: kruiseappsv1alpha1.CloneSetStatus{ + Replicas: 10, + UpdatedReplicas: 0, + ReadyReplicas: 10, + AvailableReplicas: 10, + UpdatedReadyReplicas: 0, + UpdateRevision: "version-2", + CurrentRevision: "version-1", + ObservedGeneration: 1, + CollisionCount: pointer.Int32(1), + }, + } + + releaseDemo = &v1beta1.BatchRelease{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rollouts.kruise.io/v1alpha1", + Kind: "BatchRelease", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "release", + Namespace: cloneKey.Namespace, + UID: uuid.NewUUID(), + }, + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + Batches: []v1beta1.ReleaseBatch{ + { + CanaryReplicas: intstr.FromString("10%"), + }, + { + CanaryReplicas: intstr.FromString("50%"), + }, + { + CanaryReplicas: intstr.FromString("100%"), + }, + }, + }, + WorkloadRef: v1beta1.ObjectRef{ + APIVersion: cloneDemo.APIVersion, + Kind: cloneDemo.Kind, + Name: cloneDemo.Name, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 0, + }, + }, + } + hpaDemo = &autoscalingv1.HorizontalPodAutoscaler{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "autoscaling/v1", + Kind: "HorizontalPodAutoscaler", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "hpa", + Namespace: cloneKey.Namespace, + }, + Spec: autoscalingv1.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: cloneDemo.Name, + }, + MinReplicas: pointer.Int32(1), + MaxReplicas: 10, + }, + } +) + +func init() { + apps.AddToScheme(scheme) + rolloutapi.AddToScheme(scheme) + kruiseappsv1alpha1.AddToScheme(scheme) + autoscalingv1.AddToScheme(scheme) +} + +func TestControlPackage(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "CloneSet Control Package Suite") +} + +var _ = Describe("CloneSet Control", func() { + var ( + c client.Client + rc *realController + cloneset *kruiseappsv1alpha1.CloneSet + release *v1beta1.BatchRelease + hpa *autoscalingv1.HorizontalPodAutoscaler + ) + + BeforeEach(func() { + cloneset = cloneDemo.DeepCopy() + release = releaseDemo.DeepCopy() + hpa = hpaDemo.DeepCopy() + c = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(cloneset, release, hpa). + Build() + rc = &realController{ + key: types.NamespacedName{Namespace: cloneset.Namespace, Name: cloneset.Name}, + client: c, + } + }) + + It("should initialize cloneset successfully", func() { + // build controller + _, err := rc.BuildController() + Expect(err).NotTo(HaveOccurred()) + // call Initialize method + err = retryFunction(3, func() error { + return rc.Initialize(release) + }) + Expect(err).NotTo(HaveOccurred()) + // inspect if HPA is disabled + disabledHPA := &autoscalingv1.HorizontalPodAutoscaler{} + err = c.Get(context.TODO(), types.NamespacedName{Namespace: hpa.Namespace, Name: hpa.Name}, disabledHPA) + Expect(err).NotTo(HaveOccurred()) + Expect(disabledHPA.Spec.ScaleTargetRef.Name).To(Equal(cloneset.Name + "-DisableByRollout")) + + // inspect if Cloneset is patched properly + updatedCloneset := &kruiseappsv1alpha1.CloneSet{} + err = c.Get(context.TODO(), client.ObjectKeyFromObject(cloneset), updatedCloneset) + Expect(err).NotTo(HaveOccurred()) + + // inspect if annotations are added + Expect(updatedCloneset.Annotations).To(HaveKey(v1beta1.OriginalDeploymentStrategyAnnotation)) + Expect(updatedCloneset.Annotations).To(HaveKey(util.BatchReleaseControlAnnotation)) + Expect(updatedCloneset.Annotations[util.BatchReleaseControlAnnotation]).To(Equal(getControlInfo(release))) + + // inspect if strategy is updated + Expect(updatedCloneset.Spec.UpdateStrategy.Paused).To(BeFalse()) + Expect(updatedCloneset.Spec.UpdateStrategy.MaxSurge.IntVal).To(Equal(int32(1))) + Expect(updatedCloneset.Spec.UpdateStrategy.MaxUnavailable.IntVal).To(Equal(int32(0))) + Expect(updatedCloneset.Spec.MinReadySeconds).To(Equal(int32(v1beta1.MaxReadySeconds))) + }) + + It("should finalize CloneSet successfully", func() { + // hack to patch cloneset status + cloneset.Status.UpdatedReadyReplicas = 10 + err := c.Status().Update(context.TODO(), cloneset) + Expect(err).NotTo(HaveOccurred()) + // build controller + rc.object = nil + _, err = rc.BuildController() + Expect(err).NotTo(HaveOccurred()) + // call Finalize method + err = retryFunction(3, func() error { + return rc.Finalize(release) + }) + Expect(err).NotTo(HaveOccurred()) + + // inspect if CloneSet is patched properly + updatedCloneset := &kruiseappsv1alpha1.CloneSet{} + err = c.Get(context.TODO(), client.ObjectKeyFromObject(cloneset), updatedCloneset) + Expect(err).NotTo(HaveOccurred()) + + // inspect if annotations are removed + Expect(updatedCloneset.Annotations).NotTo(HaveKey(v1beta1.OriginalDeploymentStrategyAnnotation)) + Expect(updatedCloneset.Annotations).NotTo(HaveKey(util.BatchReleaseControlAnnotation)) + + // inspect if strategy is restored + Expect(updatedCloneset.Spec.UpdateStrategy.MaxSurge).To(BeNil()) + Expect(*updatedCloneset.Spec.UpdateStrategy.MaxUnavailable).To(Equal(intstr.IntOrString{Type: intstr.Int, IntVal: 1})) + Expect(updatedCloneset.Spec.MinReadySeconds).To(Equal(int32(0))) + + // inspect if HPA is restored + restoredHPA := &autoscalingv1.HorizontalPodAutoscaler{} + err = c.Get(context.TODO(), types.NamespacedName{Namespace: hpa.Namespace, Name: hpa.Name}, restoredHPA) + Expect(err).NotTo(HaveOccurred()) + Expect(restoredHPA.Spec.ScaleTargetRef.Name).To(Equal(cloneset.Name)) + }) + + It("should upgradBatch for CloneSet successfully", func() { + // call Initialize method + _, err := rc.BuildController() + Expect(err).NotTo(HaveOccurred()) + err = retryFunction(3, func() error { + return rc.Initialize(release) + }) + Expect(err).NotTo(HaveOccurred()) + + // call UpgradeBatch method + rc.object = nil + _, err = rc.BuildController() + Expect(err).NotTo(HaveOccurred()) + batchContext, err := rc.CalculateBatchContext(release) + Expect(err).NotTo(HaveOccurred()) + err = rc.UpgradeBatch(batchContext) + Expect(err).NotTo(HaveOccurred()) + + // inspect if CloneSet is patched properly + updatedCloneset := &kruiseappsv1alpha1.CloneSet{} + err = c.Get(context.TODO(), client.ObjectKeyFromObject(cloneset), updatedCloneset) + Expect(err).NotTo(HaveOccurred()) + Expect(*updatedCloneset.Spec.UpdateStrategy.MaxSurge).To(Equal(intstr.IntOrString{Type: intstr.String, StrVal: "10%"})) + Expect(*updatedCloneset.Spec.UpdateStrategy.MaxUnavailable).To(Equal(intstr.IntOrString{Type: intstr.Int, IntVal: 0})) + }) +}) + +func TestCalculateBatchContext(t *testing.T) { + RegisterFailHandler(Fail) + cases := map[string]struct { + workload func() *kruiseappsv1alpha1.CloneSet + release func() *v1beta1.BatchRelease + result *batchcontext.BatchContext + }{ + "normal case batch0": { + workload: func() *kruiseappsv1alpha1.CloneSet { + return &kruiseappsv1alpha1.CloneSet{ + Spec: kruiseappsv1alpha1.CloneSetSpec{ + Replicas: pointer.Int32Ptr(10), + UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{ + MaxSurge: func() *intstr.IntOrString { p := intstr.FromInt(1); return &p }(), + }, + }, + Status: kruiseappsv1alpha1.CloneSetStatus{ + Replicas: 10, + UpdatedReplicas: 0, + UpdatedReadyReplicas: 0, + AvailableReplicas: 10, + }, + } + }, + release: func() *v1beta1.BatchRelease { + r := &v1beta1.BatchRelease{ + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + Batches: []v1beta1.ReleaseBatch{ + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + }, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 0, + }, + UpdateRevision: "update-version", + }, + } + return r + }, + result: &batchcontext.BatchContext{ + CurrentBatch: 0, + DesiredSurge: intstr.FromString("50%"), + CurrentSurge: intstr.FromInt(0), + Replicas: 10, + UpdatedReplicas: 0, + UpdatedReadyReplicas: 0, + UpdateRevision: "update-version", + PlannedUpdatedReplicas: 5, + DesiredUpdatedReplicas: 5, + }, + }, + + "normal case batch1": { + workload: func() *kruiseappsv1alpha1.CloneSet { + return &kruiseappsv1alpha1.CloneSet{ + Spec: kruiseappsv1alpha1.CloneSetSpec{ + Replicas: pointer.Int32(10), + UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{ + MaxSurge: func() *intstr.IntOrString { p := intstr.FromString("50%"); return &p }(), + }, + }, + Status: kruiseappsv1alpha1.CloneSetStatus{ + Replicas: 15, + UpdatedReplicas: 5, + UpdatedReadyReplicas: 5, + AvailableReplicas: 10, + }, + } + }, + release: func() *v1beta1.BatchRelease { + r := &v1beta1.BatchRelease{ + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + Batches: []v1beta1.ReleaseBatch{ + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + }, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 1, + }, + UpdateRevision: "update-version", + }, + } + return r + }, + result: &batchcontext.BatchContext{ + CurrentBatch: 1, + DesiredSurge: intstr.FromString("100%"), + CurrentSurge: intstr.FromString("50%"), + Replicas: 10, + UpdatedReplicas: 5, + UpdatedReadyReplicas: 5, + UpdateRevision: "update-version", + PlannedUpdatedReplicas: 10, + DesiredUpdatedReplicas: 10, + }, + }, + "normal case batch2": { + workload: func() *kruiseappsv1alpha1.CloneSet { + return &kruiseappsv1alpha1.CloneSet{ + Spec: kruiseappsv1alpha1.CloneSetSpec{ + Replicas: pointer.Int32Ptr(10), + UpdateStrategy: kruiseappsv1alpha1.CloneSetUpdateStrategy{ + MaxSurge: func() *intstr.IntOrString { p := intstr.FromString("100%"); return &p }(), + }, + }, + Status: kruiseappsv1alpha1.CloneSetStatus{ + Replicas: 20, + UpdatedReplicas: 10, + UpdatedReadyReplicas: 10, + AvailableReplicas: 10, + ReadyReplicas: 20, + }, + } + }, + release: func() *v1beta1.BatchRelease { + r := &v1beta1.BatchRelease{ + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + Batches: []v1beta1.ReleaseBatch{ + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + {CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}}, + }, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 2, + }, + UpdateRevision: "update-version", + }, + } + return r + }, + result: &batchcontext.BatchContext{ + CurrentBatch: 2, + UpdateRevision: "update-version", + DesiredSurge: intstr.FromString("100%"), + CurrentSurge: intstr.FromString("100%"), + Replicas: 10, + UpdatedReplicas: 10, + UpdatedReadyReplicas: 10, + PlannedUpdatedReplicas: 10, + DesiredUpdatedReplicas: 10, + }, + }, + } + + for name, cs := range cases { + t.Run(name, func(t *testing.T) { + control := realController{ + object: cs.workload(), + WorkloadInfo: util.ParseWorkload(cs.workload()), + } + got, err := control.CalculateBatchContext(cs.release()) + Expect(err).NotTo(HaveOccurred()) + Expect(got.Log()).Should(Equal(cs.result.Log())) + }) + } +} + +func TestRealController(t *testing.T) { + RegisterFailHandler(Fail) + + release := releaseDemo.DeepCopy() + clone := cloneDemo.DeepCopy() + // for unit test we should set some default value since no webhook or controller is working + clone.Spec.UpdateStrategy.Type = kruiseappsv1alpha1.RecreateCloneSetUpdateStrategyType + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(release, clone).Build() + // build new controller + c := NewController(cli, cloneKey, clone.GroupVersionKind()).(*realController) + controller, err := c.BuildController() + Expect(err).NotTo(HaveOccurred()) + // call Initialize + err = controller.Initialize(release) + Expect(err).NotTo(HaveOccurred()) + fetch := &kruiseappsv1alpha1.CloneSet{} + Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred()) + // check strategy + Expect(fetch.Spec.UpdateStrategy.Type).Should(Equal(kruiseappsv1alpha1.RecreateCloneSetUpdateStrategyType)) + // partition is set to 100% in webhook, therefore we cannot observe it in unit test + // Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.Partition, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + Expect(reflect.DeepEqual(fetch.Spec.UpdateStrategy.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + // check annotations + Expect(fetch.Annotations[util.BatchReleaseControlAnnotation]).Should(Equal(getControlInfo(release))) + Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{ + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"}, + MinReadySeconds: 0, + }))) + + c.object = fetch // mock + + for { + batchContext, err := controller.CalculateBatchContext(release) + Expect(err).NotTo(HaveOccurred()) + err = controller.UpgradeBatch(batchContext) + fetch = &kruiseappsv1alpha1.CloneSet{} + // mock + Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred()) + c.object = fetch + if err == nil { + break + } + } + + fetch = &kruiseappsv1alpha1.CloneSet{} + Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred()) + Expect(fetch.Spec.UpdateStrategy.MaxSurge.StrVal).Should(Equal("10%")) + Expect(fetch.Spec.UpdateStrategy.MaxUnavailable.IntVal).Should(Equal(int32(0))) + Expect(fetch.Spec.UpdateStrategy.Paused).Should(Equal(false)) + Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{ + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "0%"}, + MinReadySeconds: 0, + }))) + + controller.Finalize(release) + fetch = &kruiseappsv1alpha1.CloneSet{} + Expect(cli.Get(context.TODO(), cloneKey, fetch)).NotTo(HaveOccurred()) + Expect(fetch.Spec.UpdateStrategy.MaxSurge.StrVal).Should(Equal("0%")) + Expect(fetch.Spec.UpdateStrategy.MaxUnavailable.IntVal).Should(Equal(int32(1))) + Expect(fetch.Spec.UpdateStrategy.Paused).Should(Equal(false)) + Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(0))) +} + +func getControlInfo(release *v1beta1.BatchRelease) string { + owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind())) + return string(owner) +} + +func retryFunction(limit int, f func() error) (err error) { + for i := limit; i >= 0; i-- { + if err = f(); err == nil { + return nil + } + } + return err +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/control_plane.go b/pkg/controller/batchrelease/control/bluegreenstyle/control_plane.go new file mode 100644 index 00000000..6f597370 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/control_plane.go @@ -0,0 +1,178 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bluegreenstyle + +import ( + "github.com/openkruise/rollouts/api/v1beta1" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/labelpatch" + "github.com/openkruise/rollouts/pkg/util" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type realBatchControlPlane struct { + Interface + client.Client + record.EventRecorder + patcher labelpatch.LabelPatcher + release *v1beta1.BatchRelease + newStatus *v1beta1.BatchReleaseStatus +} + +type NewInterfaceFunc func(cli client.Client, key types.NamespacedName, gvk schema.GroupVersionKind) Interface + +// NewControlPlane creates a new release controller with bluegreen-style to drive batch release state machine +func NewControlPlane(f NewInterfaceFunc, cli client.Client, recorder record.EventRecorder, release *v1beta1.BatchRelease, newStatus *v1beta1.BatchReleaseStatus, key types.NamespacedName, gvk schema.GroupVersionKind) *realBatchControlPlane { + return &realBatchControlPlane{ + Client: cli, + EventRecorder: recorder, + newStatus: newStatus, + Interface: f(cli, key, gvk), + release: release.DeepCopy(), + patcher: labelpatch.NewLabelPatcher(cli, klog.KObj(release)), + } +} + +func (rc *realBatchControlPlane) Initialize() error { + controller, err := rc.BuildController() + if err != nil { + return err + } + + // claim workload under our control + err = controller.Initialize(rc.release) + if err != nil { + return err + } + + // record revision and replicas + workloadInfo := controller.GetWorkloadInfo() + rc.newStatus.StableRevision = workloadInfo.Status.StableRevision + rc.newStatus.UpdateRevision = workloadInfo.Status.UpdateRevision + rc.newStatus.ObservedWorkloadReplicas = workloadInfo.Replicas + return err +} + +func (rc *realBatchControlPlane) UpgradeBatch() error { + controller, err := rc.BuildController() + if err != nil { + return err + } + + if controller.GetWorkloadInfo().Replicas == 0 { + return nil + } + + batchContext, err := controller.CalculateBatchContext(rc.release) + if err != nil { + return err + } + klog.Infof("BatchRelease %v calculated context when upgrade batch: %s", + klog.KObj(rc.release), batchContext.Log()) + + err = controller.UpgradeBatch(batchContext) + if err != nil { + return err + } + + return nil +} + +func (rc *realBatchControlPlane) CheckBatchReady() error { + controller, err := rc.BuildController() + if err != nil { + return err + } + + if controller.GetWorkloadInfo().Replicas == 0 { + return nil + } + + // do not countAndUpdateNoNeedUpdateReplicas when checking, + // the target calculated should be consistent with UpgradeBatch. + batchContext, err := controller.CalculateBatchContext(rc.release) + if err != nil { + return err + } + + klog.Infof("BatchRelease %v calculated context when check batch ready: %s", + klog.KObj(rc.release), batchContext.Log()) + + return batchContext.IsBatchReady() +} + +func (rc *realBatchControlPlane) Finalize() error { + controller, err := rc.BuildController() + if err != nil { + return client.IgnoreNotFound(err) + } + + // release workload control info and clean up resources if it needs + return controller.Finalize(rc.release) +} + +func (rc *realBatchControlPlane) SyncWorkloadInformation() (control.WorkloadEventType, *util.WorkloadInfo, error) { + // ignore the sync if the release plan is deleted + if rc.release.DeletionTimestamp != nil { + return control.WorkloadNormalState, nil, nil + } + + controller, err := rc.BuildController() + if err != nil { + if errors.IsNotFound(err) { + return control.WorkloadHasGone, nil, err + } + return control.WorkloadUnknownState, nil, err + } + + workloadInfo := controller.GetWorkloadInfo() + if !workloadInfo.IsStable() { + klog.Infof("Workload(%v) still reconciling, waiting for it to complete, generation: %v, observed: %v", + workloadInfo.LogKey, workloadInfo.Generation, workloadInfo.Status.ObservedGeneration) + return control.WorkloadStillReconciling, workloadInfo, nil + } + + if workloadInfo.IsPromoted() { + klog.Infof("Workload(%v) has been promoted, no need to rollout again actually, replicas: %v, updated: %v", + workloadInfo.LogKey, workloadInfo.Replicas, workloadInfo.Status.UpdatedReadyReplicas) + return control.WorkloadNormalState, workloadInfo, nil + } + + if workloadInfo.IsScaling(rc.newStatus.ObservedWorkloadReplicas) { + klog.Warningf("Workload(%v) replicas is modified, replicas from: %v to -> %v", + workloadInfo.LogKey, rc.newStatus.ObservedWorkloadReplicas, workloadInfo.Replicas) + return control.WorkloadReplicasChanged, workloadInfo, nil + } + + if workloadInfo.IsRollback(rc.newStatus.StableRevision, rc.newStatus.UpdateRevision) { + klog.Warningf("Workload(%v) is rolling back", workloadInfo.LogKey) + return control.WorkloadRollbackInBatch, workloadInfo, nil + } + + if workloadInfo.IsRevisionNotEqual(rc.newStatus.UpdateRevision) { + klog.Warningf("Workload(%v) updateRevision is modified, updateRevision from: %v to -> %v", + workloadInfo.LogKey, rc.newStatus.UpdateRevision, workloadInfo.Status.UpdateRevision) + return control.WorkloadPodTemplateChanged, workloadInfo, nil + } + + return control.WorkloadNormalState, workloadInfo, nil +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control.go b/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control.go new file mode 100644 index 00000000..9c693725 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control.go @@ -0,0 +1,336 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "fmt" + + "github.com/openkruise/rollouts/api/v1alpha1" + "github.com/openkruise/rollouts/api/v1beta1" + batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control/bluegreenstyle/hpa" + deploymentutil "github.com/openkruise/rollouts/pkg/controller/deployment/util" + "github.com/openkruise/rollouts/pkg/util" + "github.com/openkruise/rollouts/pkg/util/errors" + "github.com/openkruise/rollouts/pkg/util/patch" + apps "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" + utilpointer "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type realController struct { + *util.WorkloadInfo + client client.Client + pods []*corev1.Pod + key types.NamespacedName + object *apps.Deployment + finder *util.ControllerFinder +} + +func NewController(cli client.Client, key types.NamespacedName, _ schema.GroupVersionKind) bluegreenstyle.Interface { + return &realController{ + key: key, + client: cli, + finder: util.NewControllerFinder(cli), + } +} + +func (rc *realController) GetWorkloadInfo() *util.WorkloadInfo { + return rc.WorkloadInfo +} + +func (rc *realController) BuildController() (bluegreenstyle.Interface, error) { + if rc.object != nil { + return rc, nil + } + object := &apps.Deployment{} + if err := rc.client.Get(context.TODO(), rc.key, object); err != nil { + return rc, err + } + rc.object = object + rc.WorkloadInfo = rc.getWorkloadInfo(object) + return rc, nil +} + +func (rc *realController) ListOwnedPods() ([]*corev1.Pod, error) { + if rc.pods != nil { + return rc.pods, nil + } + var err error + rc.pods, err = util.ListOwnedPods(rc.client, rc.object) + return rc.pods, err +} + +// Initialize prepares the Deployment for the BatchRelease process +func (rc *realController) Initialize(release *v1beta1.BatchRelease) error { + if rc.object == nil || control.IsControlledByBatchRelease(release, rc.object) { + return nil + } + // Disable the HPA + if err := hpa.DisableHPA(rc.client, rc.object); err != nil { + return err + } + klog.InfoS("Initialize: disabled HPA for deployment successfully", "deployment", klog.KObj(rc.object)) + + // Patch minReadySeconds for stable ReplicaSet + if err := rc.patchStableRSMinReadySeconds(v1beta1.MaxReadySeconds); err != nil { + return err + } + klog.InfoS("Initialize: patched minReadySeconds for stable replicaset successfully", "deployment", klog.KObj(rc.object)) + + // Patch Deplopyment + if err := rc.patchDeployment(release); err != nil { + return err + } + klog.InfoS("Initialize: patched deployment successfully", "deployment", klog.KObj(rc.object)) + return nil +} + +func (rc *realController) UpgradeBatch(ctx *batchcontext.BatchContext) error { + if err := control.ValidateReadyForBlueGreenRelease(rc.object); err != nil { + return errors.NewBadRequestError(fmt.Errorf("cannot upgrade batch, because deployment %v doesn't satisfy conditions: %s", klog.KObj(rc.object), err.Error())) + } + desired, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.DesiredSurge, int(ctx.Replicas), true) + current, _ := intstr.GetScaledValueFromIntOrPercent(&ctx.CurrentSurge, int(ctx.Replicas), true) + + if current >= desired { + klog.Infof("No need to upgrade batch for deployment %v: because current %d >= desired %d", klog.KObj(rc.object), current, desired) + return nil + } + klog.Infof("Ready to upgrade batch for deployment %v: current %d < desired %d", klog.KObj(rc.object), current, desired) + patchData := patch.NewDeploymentPatch() + // different with canary release, bluegreen don't need to set pause in the process of rollout + // because our webhook may pause the Deployment in some situations, we ensure that the Deployment is not paused + patchData.UpdatePaused(false) + patchData.UpdateStrategy(apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ + MaxSurge: &ctx.DesiredSurge, + MaxUnavailable: &intstr.IntOrString{}, + }, + }) + return rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData) +} + +// set pause to false, restore the original setting, delete annotation +func (rc *realController) Finalize(release *v1beta1.BatchRelease) error { + if release.Spec.ReleasePlan.BatchPartition != nil { + // continuous release (not supported yet) + /* + patchData := patch.NewDeploymentPatch() + patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation) + if err := rc.client.Patch(context.TODO(), d, patchData); err != nil { + return err + } + */ + klog.Warningf("continuous release is not supported yet for bluegreen style release") + return nil + } + + // restore the original setting and remove annotation + d := util.GetEmptyObjectWithKey(rc.object) + if !rc.restored() { + setting, err := control.GetOriginalSetting(rc.object) + if err != nil { + return err + } + patchData := patch.NewDeploymentPatch() + // restore the original setting + patchData.UpdatePaused(false) + patchData.UpdateMinReadySeconds(setting.MinReadySeconds) + patchData.UpdateProgressDeadlineSeconds(setting.ProgressDeadlineSeconds) + patchData.UpdateMaxSurge(setting.MaxSurge) + patchData.UpdateMaxUnavailable(setting.MaxUnavailable) + // restore label and annotation + patchData.DeleteAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation) + patchData.DeleteLabel(v1alpha1.DeploymentStableRevisionLabel) + patchData.DeleteAnnotation(util.BatchReleaseControlAnnotation) + if err := rc.client.Patch(context.TODO(), d, patchData); err != nil { + return err + } + klog.InfoS("Finalize: deployment bluegreen release: wait all pods updated and ready", "Deployment", klog.KObj(rc.object)) + } + + // wait all pods updated and ready + if err := waitAllUpdatedAndReady(d.(*apps.Deployment)); err != nil { + return errors.NewRetryError(err) + } + klog.InfoS("Finalize: All pods updated and ready, then restore hpa", "Deployment", klog.KObj(rc.object)) + + // restore hpa + return hpa.RestoreHPA(rc.client, rc.object) +} + +func (rc *realController) restored() bool { + if rc.object == nil || rc.object.DeletionTimestamp != nil { + return true + } + if rc.object.Annotations == nil || len(rc.object.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]) == 0 { + return true + } + return false +} + +func (rc *realController) CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error) { + currentBatch := release.Status.CanaryStatus.CurrentBatch + desiredSurge := release.Spec.ReleasePlan.Batches[currentBatch].CanaryReplicas + PlannedUpdatedReplicas := deploymentutil.NewRSReplicasLimit(desiredSurge, rc.object) + currentSurge := intstr.FromInt(0) + if rc.object.Spec.Strategy.RollingUpdate != nil && rc.object.Spec.Strategy.RollingUpdate.MaxSurge != nil { + currentSurge = *rc.object.Spec.Strategy.RollingUpdate.MaxSurge + if currentSurge == intstr.FromInt(1) { + // currentSurge == intstr.FromInt(1) means that currentSurge is the initial value + // if the value is indeed set by user, setting it to 0 still does no harm + currentSurge = intstr.FromInt(0) + } + } + return &batchcontext.BatchContext{ + Pods: rc.pods, + RolloutID: release.Spec.ReleasePlan.RolloutID, + CurrentBatch: currentBatch, + CurrentSurge: currentSurge, + DesiredSurge: desiredSurge, + UpdateRevision: release.Status.UpdateRevision, + + Replicas: rc.Replicas, + UpdatedReplicas: rc.Status.UpdatedReplicas, + UpdatedReadyReplicas: rc.Status.UpdatedReadyReplicas, + PlannedUpdatedReplicas: PlannedUpdatedReplicas, + DesiredUpdatedReplicas: PlannedUpdatedReplicas, + }, nil +} + +func (rc *realController) getWorkloadInfo(d *apps.Deployment) *util.WorkloadInfo { + workloadInfo := util.ParseWorkload(d) + workloadInfo.Status.UpdatedReadyReplicas = 0 + if res, err := rc.getUpdatedReadyReplicas(d); err == nil { + workloadInfo.Status.UpdatedReadyReplicas = res + } + workloadInfo.Status.StableRevision = d.Labels[v1alpha1.DeploymentStableRevisionLabel] + return workloadInfo +} + +func (rc *realController) getUpdatedReadyReplicas(d *apps.Deployment) (int32, error) { + rss := &apps.ReplicaSetList{} + listOpts := []client.ListOption{ + client.InNamespace(d.Namespace), + client.MatchingLabels(d.Spec.Selector.MatchLabels), + client.UnsafeDisableDeepCopy, + } + if err := rc.client.List(context.TODO(), rss, listOpts...); err != nil { + klog.Warningf("getWorkloadInfo failed, because"+"%s", err.Error()) + return -1, err + } + allRSs := rss.Items + // select rs owner by current deployment + ownedRSs := make([]*apps.ReplicaSet, 0) + for i := range allRSs { + rs := &allRSs[i] + if !rs.DeletionTimestamp.IsZero() { + continue + } + + if metav1.IsControlledBy(rs, d) { + ownedRSs = append(ownedRSs, rs) + } + } + newRS := deploymentutil.FindNewReplicaSet(d, ownedRSs) + updatedReadyReplicas := int32(0) + // if newRS is nil, it means the replicaset hasn't been created (because the deployment is paused) + // therefore we can return 0 directly + if newRS != nil { + updatedReadyReplicas = newRS.Status.ReadyReplicas + } + return updatedReadyReplicas, nil +} + +func waitAllUpdatedAndReady(deployment *apps.Deployment) error { + if deployment.Spec.Paused { + return fmt.Errorf("deployment should not be paused") + } + + // ALL pods updated AND ready + if deployment.Status.ReadyReplicas != deployment.Status.UpdatedReplicas { + return fmt.Errorf("all ready replicas should be updated, and all updated replicas should be ready") + } + + availableReplicas := deployment.Status.AvailableReplicas + allowedUnavailable := util.DeploymentMaxUnavailable(deployment) + if allowedUnavailable+availableReplicas < deployment.Status.Replicas { + return fmt.Errorf("ready replicas should satisfy maxUnavailable") + } + return nil +} + +// Patch minReadySeconds for stable ReplicaSet +/* + Here is why: + For rollback scenario, we should set the stable rs minReadySeconds to infinity to make pods of the stable rs unavailable, + otherwise Pods in new version would be terminated immediately when rollback happens. + we want to keep them until traffic is switched to the stable version +*/ +func (rc *realController) patchStableRSMinReadySeconds(seconds int32) error { + if stableRS, err := rc.finder.GetDeploymentStableRs(rc.object); err != nil { + return fmt.Errorf("failed to get stable ReplicaSet: %v", err) + } else if stableRS == nil { + klog.Warningf("No stable ReplicaSet found for deployment %s/%s", rc.object.Namespace, rc.object.Name) + } else { + body := fmt.Sprintf(`{"spec":{"minReadySeconds":%v}}`, seconds) + if err = rc.client.Patch(context.TODO(), stableRS, client.RawPatch(types.MergePatchType, []byte(body))); err != nil { + return fmt.Errorf("failed to patch ReplicaSet %s/%s minReadySeconds to %v: %v", stableRS.Namespace, stableRS.Name, v1beta1.MaxReadySeconds, err) + } + } + return nil +} + +// Update deployment strategy: MinReadySeconds, ProgressDeadlineSeconds, MaxSurge, MaxUnavailable +func (rc *realController) patchDeployment(release *v1beta1.BatchRelease) error { + setting, err := control.GetOriginalSetting(rc.object) + if err != nil { + return errors.NewBadRequestError(fmt.Errorf("cannot get original setting for deployment %v: %s", klog.KObj(rc.object), err.Error())) + } + control.InitOriginalSetting(&setting, rc.object) + patchData := patch.NewDeploymentPatch() + patchData.InsertAnnotation(v1beta1.OriginalDeploymentStrategyAnnotation, util.DumpJSON(&setting)) + patchData.InsertAnnotation(util.BatchReleaseControlAnnotation, util.DumpJSON(metav1.NewControllerRef( + release, release.GetObjectKind().GroupVersionKind()))) + + patchData.UpdateStrategy(apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ + MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 0}, + }, + }) + patchData.UpdateMinReadySeconds(v1beta1.MaxReadySeconds) + patchData.UpdateProgressDeadlineSeconds(utilpointer.Int32(v1beta1.MaxProgressSeconds)) + + // Apply the patch to the Deployment + if err := rc.client.Patch(context.TODO(), util.GetEmptyObjectWithKey(rc.object), patchData); err != nil { + return fmt.Errorf("failed to patch deployment %v: %v", klog.KObj(rc.object), err) + } + return nil +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control_test.go b/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control_test.go new file mode 100644 index 00000000..f6fc934f --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/deployment/control_test.go @@ -0,0 +1,728 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strconv" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + rolloutapi "github.com/openkruise/rollouts/api" + "github.com/openkruise/rollouts/api/v1beta1" + batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context" + control "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" + "github.com/openkruise/rollouts/pkg/util" + "github.com/openkruise/rollouts/pkg/util/errors" + apps "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + scheme = runtime.NewScheme() + + deploymentKey = types.NamespacedName{ + Name: "deployment", + Namespace: "default", + } + + deploymentDemo = &apps.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentKey.Name, + Namespace: deploymentKey.Namespace, + Generation: 1, + Labels: map[string]string{ + "app": "busybox", + }, + Annotations: map[string]string{ + "type": "unit-test", + }, + }, + Spec: apps.DeploymentSpec{ + Paused: true, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "busybox", + }, + }, + Replicas: pointer.Int32(10), + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"}, + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "busybox", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "busybox", + Image: "busybox:latest", + }, + }, + }, + }, + }, + Status: apps.DeploymentStatus{ + Replicas: 10, + UpdatedReplicas: 0, + ReadyReplicas: 10, + AvailableReplicas: 10, + CollisionCount: pointer.Int32(1), + ObservedGeneration: 1, + }, + } + + deploymentDemo2 = &apps.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apps.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "deployment", + Namespace: "default", + UID: types.UID("87076677"), + Generation: 2, + Labels: map[string]string{ + "app": "busybox", + apps.DefaultDeploymentUniqueLabelKey: "update-pod-hash", + }, + }, + Spec: apps.DeploymentSpec{ + Replicas: pointer.Int32(10), + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ + MaxSurge: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(1)}, + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: int32(0)}, + }, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "busybox", + }, + }, + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: containers("v2"), + }, + }, + }, + Status: apps.DeploymentStatus{ + Replicas: 10, + ReadyReplicas: 10, + UpdatedReplicas: 0, + AvailableReplicas: 10, + }, + } + + releaseDemo = &v1beta1.BatchRelease{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "rollouts.kruise.io/v1alpha1", + Kind: "BatchRelease", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "release", + Namespace: deploymentKey.Namespace, + UID: uuid.NewUUID(), + }, + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType, + Batches: []v1beta1.ReleaseBatch{ + { + CanaryReplicas: intstr.FromString("10%"), + }, + { + CanaryReplicas: intstr.FromString("50%"), + }, + { + CanaryReplicas: intstr.FromString("100%"), + }, + }, + }, + WorkloadRef: v1beta1.ObjectRef{ + APIVersion: deploymentDemo.APIVersion, + Kind: deploymentDemo.Kind, + Name: deploymentDemo.Name, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 1, + }, + }, + } + + hpaDemo = &autoscalingv1.HorizontalPodAutoscaler{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "autoscaling/v1", + Kind: "HorizontalPodAutoscaler", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "hpa", + Namespace: deploymentKey.Namespace, + }, + Spec: autoscalingv1.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ + APIVersion: apps.SchemeGroupVersion.String(), + Kind: "Deployment", + Name: deploymentDemo.Name, + }, + MinReplicas: pointer.Int32(1), + MaxReplicas: 10, + }, + } +) + +func init() { + apps.AddToScheme(scheme) + rolloutapi.AddToScheme(scheme) + kruiseappsv1alpha1.AddToScheme(scheme) + autoscalingv1.AddToScheme(scheme) +} + +func TestControlPackage(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Deployment Control Package Suite") +} + +var _ = Describe("Deployment Control", func() { + var ( + c client.Client + rc *realController + deployment *apps.Deployment + release *v1beta1.BatchRelease + hpa *autoscalingv1.HorizontalPodAutoscaler + stableRS *apps.ReplicaSet + canaryRS *apps.ReplicaSet + ) + + BeforeEach(func() { + deployment = deploymentDemo.DeepCopy() + release = releaseDemo.DeepCopy() + hpa = hpaDemo.DeepCopy() + + deployment = getStableWithReady(deployment, "v1").(*apps.Deployment) + stableRS = makeStableReplicaSets(deployment).(*apps.ReplicaSet) + stableRS.Spec.MinReadySeconds = 0 + stableRS.Status.ReadyReplicas = *deployment.Spec.Replicas + stableRS.Status.AvailableReplicas = *deployment.Spec.Replicas + + canaryRS = makeCanaryReplicaSets(deployment).(*apps.ReplicaSet) + canaryRS.Status.ReadyReplicas = 0 + canaryRS.Status.AvailableReplicas = 0 + + c = fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(deployment, release, hpa, stableRS, canaryRS). + Build() + rc = &realController{ + key: types.NamespacedName{Namespace: deployment.Namespace, Name: deployment.Name}, + client: c, + finder: util.NewControllerFinder(c), + } + }) + + It("should initialize Deployment successfully", func() { + // build controller + _, err := rc.BuildController() + Expect(err).NotTo(HaveOccurred()) + // call Initialize method + err = retryFunction(3, func() error { + return rc.Initialize(release) + }) + Expect(err).NotTo(HaveOccurred()) + // inspect if HPA is disabled + disabledHPA := &autoscalingv1.HorizontalPodAutoscaler{} + err = c.Get(context.TODO(), types.NamespacedName{Namespace: hpa.Namespace, Name: hpa.Name}, disabledHPA) + Expect(err).NotTo(HaveOccurred()) + Expect(disabledHPA.Spec.ScaleTargetRef.Name).To(Equal(deployment.Name + "-DisableByRollout")) + + // inspect if MinReadySeconds of stable ReplicaSet is updated + stableRSAfter := &apps.ReplicaSet{} + err = c.Get(context.TODO(), client.ObjectKeyFromObject(stableRS), stableRSAfter) + Expect(err).NotTo(HaveOccurred()) + Expect(stableRSAfter.Spec.MinReadySeconds).To(Equal(int32(v1beta1.MaxReadySeconds))) + + // inspect if Deployment is patched properly + updatedDeployment := &apps.Deployment{} + err = c.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDeployment) + Expect(err).NotTo(HaveOccurred()) + + // inspect if annotations are added + Expect(updatedDeployment.Annotations).To(HaveKey(v1beta1.OriginalDeploymentStrategyAnnotation)) + Expect(updatedDeployment.Annotations).To(HaveKey(util.BatchReleaseControlAnnotation)) + Expect(updatedDeployment.Annotations[util.BatchReleaseControlAnnotation]).To(Equal(getControlInfo(release))) + + // inspect if strategy is updated + Expect(updatedDeployment.Spec.Strategy.RollingUpdate).NotTo(BeNil()) + Expect(updatedDeployment.Spec.Strategy.RollingUpdate.MaxSurge.IntVal).To(Equal(int32(1))) + Expect(updatedDeployment.Spec.Strategy.RollingUpdate.MaxUnavailable.IntVal).To(Equal(int32(0))) + Expect(updatedDeployment.Spec.MinReadySeconds).To(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*updatedDeployment.Spec.ProgressDeadlineSeconds).To(Equal(int32(v1beta1.MaxProgressSeconds))) + }) + + It("should finalize Deployment successfully", func() { + // build controller + rc.object = nil + _, err := rc.BuildController() + Expect(err).NotTo(HaveOccurred()) + // call Finalize method + err = retryFunction(3, func() error { + return rc.Finalize(release) + }) + Expect(err).NotTo(HaveOccurred()) + + // inspect if Deployment is patched properly + updatedDeployment := &apps.Deployment{} + err = c.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDeployment) + Expect(err).NotTo(HaveOccurred()) + + // inspect if annotations are removed + Expect(updatedDeployment.Annotations).NotTo(HaveKey(v1beta1.OriginalDeploymentStrategyAnnotation)) + Expect(updatedDeployment.Annotations).NotTo(HaveKey(util.BatchReleaseControlAnnotation)) + + // inspect if strategy is restored + Expect(updatedDeployment.Spec.Strategy.RollingUpdate).NotTo(BeNil()) + Expect(*updatedDeployment.Spec.Strategy.RollingUpdate.MaxSurge).To(Equal(intstr.IntOrString{Type: intstr.String, StrVal: "20%"})) + Expect(*updatedDeployment.Spec.Strategy.RollingUpdate.MaxUnavailable).To(Equal(intstr.IntOrString{Type: intstr.Int, IntVal: 1})) + Expect(updatedDeployment.Spec.MinReadySeconds).To(Equal(int32(0))) + Expect(updatedDeployment.Spec.ProgressDeadlineSeconds).To(BeNil()) + + // inspect if HPA is restored + restoredHPA := &autoscalingv1.HorizontalPodAutoscaler{} + err = c.Get(context.TODO(), types.NamespacedName{Namespace: hpa.Namespace, Name: hpa.Name}, restoredHPA) + Expect(err).NotTo(HaveOccurred()) + Expect(restoredHPA.Spec.ScaleTargetRef.Name).To(Equal(deployment.Name)) + + // inspect if MinReadySeconds of stable ReplicaSet is restored + stableRSAfter := &apps.ReplicaSet{} + err = c.Get(context.TODO(), client.ObjectKeyFromObject(stableRS), stableRSAfter) + Expect(err).NotTo(HaveOccurred()) + Expect(stableRSAfter.Spec.MinReadySeconds).To(Equal(int32(0))) + }) + + It("should upgradBatch for Deployment successfully", func() { + // call Initialize method + _, err := rc.BuildController() + Expect(err).NotTo(HaveOccurred()) + err = retryFunction(3, func() error { + return rc.Initialize(release) + }) + Expect(err).NotTo(HaveOccurred()) + + // call UpgradeBatch method + rc.object = nil + _, err = rc.BuildController() + Expect(err).NotTo(HaveOccurred()) + batchContext, err := rc.CalculateBatchContext(release) + Expect(err).NotTo(HaveOccurred()) + err = rc.UpgradeBatch(batchContext) + Expect(err).NotTo(HaveOccurred()) + // inspect if Deployment is patched properly + updatedDeployment := &apps.Deployment{} + err = c.Get(context.TODO(), client.ObjectKeyFromObject(deployment), updatedDeployment) + Expect(err).NotTo(HaveOccurred()) + Expect(updatedDeployment.Spec.Paused).To(BeFalse()) + Expect(*updatedDeployment.Spec.Strategy.RollingUpdate.MaxSurge).To(Equal(intstr.IntOrString{Type: intstr.String, StrVal: "50%"})) + Expect(*updatedDeployment.Spec.Strategy.RollingUpdate.MaxUnavailable).To(Equal(intstr.IntOrString{Type: intstr.Int, IntVal: 0})) + }) +}) + +func TestCalculateBatchContext(t *testing.T) { + RegisterFailHandler(Fail) + cases := map[string]struct { + workload func() []client.Object + release func() *v1beta1.BatchRelease + result *batchcontext.BatchContext + }{ + "noraml case": { + workload: func() []client.Object { + deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment) + deployment.Status = apps.DeploymentStatus{ + Replicas: 15, + UpdatedReplicas: 5, + AvailableReplicas: 12, + ReadyReplicas: 12, + } + // current partition, ie. maxSurge + deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "50%"} + deployment.Spec.Replicas = pointer.Int32(10) + newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet) + newRss.Status.ReadyReplicas = 2 + return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)} + }, + + release: func() *v1beta1.BatchRelease { + r := &v1beta1.BatchRelease{ + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType, + Batches: []v1beta1.ReleaseBatch{ + { + CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}, + }, + { + CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}, + }, + }, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 1, + }, + UpdateRevision: "version-2", + }, + } + return r + }, + result: &batchcontext.BatchContext{ + CurrentBatch: 1, + UpdateRevision: "version-2", + DesiredSurge: intstr.IntOrString{Type: intstr.String, StrVal: "100%"}, + CurrentSurge: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}, + Replicas: 10, + UpdatedReplicas: 5, + UpdatedReadyReplicas: 2, + PlannedUpdatedReplicas: 10, + DesiredUpdatedReplicas: 10, + }, + }, + "maxSurge=99%, replicas=5": { + workload: func() []client.Object { + deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment) + deployment.Status = apps.DeploymentStatus{ + Replicas: 9, + UpdatedReplicas: 4, + AvailableReplicas: 9, + ReadyReplicas: 9, + } + deployment.Spec.Replicas = pointer.Int32Ptr(5) + // current partition, ie. maxSurge + deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "90%"} + newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet) + newRss.Status.ReadyReplicas = 4 + return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)} + }, + release: func() *v1beta1.BatchRelease { + r := &v1beta1.BatchRelease{ + Spec: v1beta1.BatchReleaseSpec{ + ReleasePlan: v1beta1.ReleasePlan{ + FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType, + Batches: []v1beta1.ReleaseBatch{ + { + CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "90%"}, + }, + { + CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "99%"}, + }, + }, + }, + }, + Status: v1beta1.BatchReleaseStatus{ + CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + CurrentBatch: 1, + }, + UpdateRevision: "version-2", + }, + } + return r + }, + result: &batchcontext.BatchContext{ + CurrentBatch: 1, + UpdateRevision: "version-2", + DesiredSurge: intstr.FromString("99%"), + CurrentSurge: intstr.FromString("90%"), + Replicas: 5, + UpdatedReplicas: 4, + UpdatedReadyReplicas: 4, + PlannedUpdatedReplicas: 4, + DesiredUpdatedReplicas: 4, + }, + }, + + // test case for continuous release + // "maxSurge=100%, but it is initialized value": { + // workload: func() []client.Object { + // deployment := getStableWithReady(deploymentDemo2, "v2").(*apps.Deployment) + // deployment.Status = apps.DeploymentStatus{ + // Replicas: 10, + // UpdatedReplicas: 0, + // AvailableReplicas: 10, + // ReadyReplicas: 10, + // } + // // current partition, ie. maxSurge + // deployment.Spec.Strategy.RollingUpdate.MaxSurge = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"} + // newRss := makeCanaryReplicaSets(deployment).(*apps.ReplicaSet) + // newRss.Status.ReadyReplicas = 0 + // return []client.Object{deployment, newRss, makeStableReplicaSets(deployment)} + // }, + // release: func() *v1beta1.BatchRelease { + // r := &v1beta1.BatchRelease{ + // Spec: v1beta1.BatchReleaseSpec{ + // ReleasePlan: v1beta1.ReleasePlan{ + // FailureThreshold: &percent, + // FinalizingPolicy: v1beta1.WaitResumeFinalizingPolicyType, + // Batches: []v1beta1.ReleaseBatch{ + // { + // CanaryReplicas: intstr.IntOrString{Type: intstr.String, StrVal: "50%"}, + // }, + // }, + // }, + // }, + // Status: v1beta1.BatchReleaseStatus{ + // CanaryStatus: v1beta1.BatchReleaseCanaryStatus{ + // CurrentBatch: 0, + // }, + // UpdateRevision: "version-2", + // }, + // } + // return r + // }, + // result: &batchcontext.BatchContext{ + // CurrentBatch: 0, + // UpdateRevision: "version-2", + // DesiredPartition: intstr.FromString("50%"), + // FailureThreshold: &percent, + // CurrentPartition: intstr.FromString("0%"), // mainly check this + // Replicas: 10, + // UpdatedReplicas: 0, + // UpdatedReadyReplicas: 0, + // PlannedUpdatedReplicas: 5, + // DesiredUpdatedReplicas: 5, + // }, + // }, + } + + for name, cs := range cases { + t.Run(name, func(t *testing.T) { + cliBuilder := fake.NewClientBuilder().WithScheme(scheme).WithObjects(cs.workload()...) + cli := cliBuilder.Build() + control := realController{ + client: cli, + key: deploymentKey, + } + _, err := control.BuildController() + Expect(err).NotTo(HaveOccurred()) + got, err := control.CalculateBatchContext(cs.release()) + Expect(err).NotTo(HaveOccurred()) + fmt.Printf("expect %s, but got %s", cs.result.Log(), got.Log()) + Expect(got.Log()).Should(Equal(cs.result.Log())) + }) + } +} + +func TestRealController(t *testing.T) { + RegisterFailHandler(Fail) + + release := releaseDemo.DeepCopy() + clone := deploymentDemo.DeepCopy() + stableRs, canaryRs := makeStableReplicaSets(clone), makeCanaryReplicaSets(clone) + cli := fake.NewClientBuilder().WithScheme(scheme).WithObjects(release, clone, stableRs, canaryRs).Build() + // build new controller + c := NewController(cli, deploymentKey, clone.GroupVersionKind()).(*realController) + controller, err := c.BuildController() + Expect(err).NotTo(HaveOccurred()) + // call Initialize + err = controller.Initialize(release) + Expect(err).NotTo(HaveOccurred()) + fetch := &apps.Deployment{} + Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred()) + // check strategy + Expect(fetch.Spec.Paused).Should(BeTrue()) + Expect(fetch.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*fetch.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + // check annotations + Expect(fetch.Annotations[util.BatchReleaseControlAnnotation]).Should(Equal(getControlInfo(release))) + fmt.Println(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]) + Expect(fetch.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation]).Should(Equal(util.DumpJSON(&control.OriginalDeploymentStrategy{ + MaxUnavailable: &intstr.IntOrString{Type: intstr.Int, IntVal: 1}, + MaxSurge: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"}, + MinReadySeconds: 0, + ProgressDeadlineSeconds: pointer.Int32(600), + }))) + // check minReadyseconds field of stable replicaset + fetchRS := &apps.ReplicaSet{} + Expect(cli.Get(context.TODO(), types.NamespacedName{Name: stableRs.GetName(), Namespace: stableRs.GetNamespace()}, fetchRS)).NotTo(HaveOccurred()) + Expect(fetchRS.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + + c.object = fetch // mock + + for { + batchContext, err := controller.CalculateBatchContext(release) + Expect(err).NotTo(HaveOccurred()) + err = controller.UpgradeBatch(batchContext) + fetch := &apps.Deployment{} + // mock + Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred()) + c.object = fetch + if err == nil { + break + } + } + fetch = &apps.Deployment{} + Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred()) + // currentBatch is 1, which means br is in the second batch, maxSurge is 50% + Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + + release.Spec.ReleasePlan.BatchPartition = nil + err = controller.Finalize(release) + Expect(errors.IsRetryError(err)).Should(BeTrue()) + fetch = &apps.Deployment{} + Expect(cli.Get(context.TODO(), deploymentKey, fetch)).NotTo(HaveOccurred()) + // check workload strategy + Expect(fetch.Spec.Paused).Should(BeFalse()) + Expect(fetch.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "20%"})).Should(BeTrue()) + Expect(reflect.DeepEqual(fetch.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + Expect(fetch.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(*fetch.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600))) +} +func getControlInfo(release *v1beta1.BatchRelease) string { + owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind())) + return string(owner) +} + +func makeCanaryReplicaSets(d client.Object) client.Object { + deploy := d.(*apps.Deployment) + labels := deploy.Spec.Selector.DeepCopy().MatchLabels + labels[apps.DefaultDeploymentUniqueLabelKey] = util.ComputeHash(&deploy.Spec.Template, nil) + return &apps.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apps.SchemeGroupVersion.String(), + Kind: "ReplicaSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: deploy.Name + rand.String(5), + Namespace: deploy.Namespace, + UID: uuid.NewUUID(), + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deploy, deploy.GroupVersionKind()), + }, + CreationTimestamp: metav1.Now(), + }, + Spec: apps.ReplicaSetSpec{ + Replicas: deploy.Spec.Replicas, + Selector: deploy.Spec.Selector.DeepCopy(), + Template: *deploy.Spec.Template.DeepCopy(), + }, + } + +} + +func makeStableReplicaSets(d client.Object) client.Object { + deploy := d.(*apps.Deployment) + stableTemplate := deploy.Spec.Template.DeepCopy() + stableTemplate.Spec.Containers = containers("v1") + labels := deploy.Spec.Selector.DeepCopy().MatchLabels + labels[apps.DefaultDeploymentUniqueLabelKey] = util.ComputeHash(stableTemplate, nil) + return &apps.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apps.SchemeGroupVersion.String(), + Kind: "ReplicaSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: deploy.Name + rand.String(5), + Namespace: deploy.Namespace, + UID: uuid.NewUUID(), + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deploy, deploy.GroupVersionKind()), + }, + CreationTimestamp: metav1.NewTime(time.Now().Add(-time.Hour)), + }, + Spec: apps.ReplicaSetSpec{ + Replicas: deploy.Spec.Replicas, + Selector: deploy.Spec.Selector.DeepCopy(), + Template: *stableTemplate, + }, + } +} + +func containers(version string) []corev1.Container { + return []corev1.Container{ + { + Name: "busybox", + Image: fmt.Sprintf("busybox:%v", version), + }, + } +} + +func getStableWithReady(workload client.Object, version string) client.Object { + switch workload.(type) { + case *apps.Deployment: + deploy := workload.(*apps.Deployment) + d := deploy.DeepCopy() + d.Spec.Paused = true + d.ResourceVersion = strconv.Itoa(rand.Intn(100000000000)) + d.Spec.Template.Spec.Containers = containers(version) + d.Status.ObservedGeneration = deploy.Generation + return d + + case *kruiseappsv1alpha1.CloneSet: + clone := workload.(*kruiseappsv1alpha1.CloneSet) + c := clone.DeepCopy() + c.ResourceVersion = strconv.Itoa(rand.Intn(100000000000)) + c.Spec.UpdateStrategy.Paused = true + c.Spec.UpdateStrategy.Partition = &intstr.IntOrString{Type: intstr.String, StrVal: "100%"} + c.Spec.Template.Spec.Containers = containers(version) + c.Status.ObservedGeneration = clone.Generation + return c + } + return nil +} + +func retryFunction(limit int, f func() error) (err error) { + for i := limit; i >= 0; i-- { + if err = f(); err == nil { + return nil + } + } + return err +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa.go b/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa.go new file mode 100644 index 00000000..1e71a3a2 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa.go @@ -0,0 +1,106 @@ +package hpa + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + HPADisableSuffix = "-DisableByRollout" +) + +func DisableHPA(cli client.Client, object client.Object) error { + hpa := findHPAForWorkload(cli, object) + if hpa == nil { + return nil + } + targetRef, found, err := unstructured.NestedFieldCopy(hpa.Object, "spec", "scaleTargetRef") + if err != nil || !found { + return fmt.Errorf("get HPA targetRef for workload %v failed, because %s", klog.KObj(object), err.Error()) + } + ref := targetRef.(map[string]interface{}) + name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string) + if !strings.HasSuffix(name, HPADisableSuffix) { + body := fmt.Sprintf(`{"spec":{"scaleTargetRef":{"apiVersion": "%s", "kind": "%s", "name": "%s"}}}`, version, kind, addSuffix(name)) + if err = cli.Patch(context.TODO(), hpa, client.RawPatch(types.MergePatchType, []byte(body))); err != nil { + return fmt.Errorf("failed to disable HPA %v for workload %v, because %s", klog.KObj(hpa), klog.KObj(object), err.Error()) + } + } + return nil +} + +func RestoreHPA(cli client.Client, object client.Object) error { + hpa := findHPAForWorkload(cli, object) + if hpa == nil { + return nil + } + targetRef, found, err := unstructured.NestedFieldCopy(hpa.Object, "spec", "scaleTargetRef") + if err != nil || !found { + return fmt.Errorf("get HPA targetRef for workload %v failed, because %s", klog.KObj(object), err.Error()) + } + ref := targetRef.(map[string]interface{}) + name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string) + if strings.HasSuffix(name, HPADisableSuffix) { + body := fmt.Sprintf(`{"spec":{"scaleTargetRef":{"apiVersion": "%s", "kind": "%s", "name": "%s"}}}`, version, kind, removeSuffix(name)) + if err = cli.Patch(context.TODO(), hpa, client.RawPatch(types.MergePatchType, []byte(body))); err != nil { + return fmt.Errorf("failed to restore HPA %v for workload %v, because %s", klog.KObj(hpa), klog.KObj(object), err.Error()) + } + } + return nil +} + +func findHPAForWorkload(cli client.Client, object client.Object) *unstructured.Unstructured { + hpa := findHPA(cli, object, "v2") + if hpa != nil { + return hpa + } + return findHPA(cli, object, "v1") +} + +func findHPA(cli client.Client, object client.Object, version string) *unstructured.Unstructured { + unstructuredList := &unstructured.UnstructuredList{} + hpaGvk := schema.GroupVersionKind{Group: "autoscaling", Kind: "HorizontalPodAutoscaler", Version: version} + unstructuredList.SetGroupVersionKind(hpaGvk) + if err := cli.List(context.TODO(), unstructuredList, &client.ListOptions{Namespace: object.GetNamespace()}); err != nil { + klog.Warningf("Get HPA for workload %v failed, because %s", klog.KObj(object), err.Error()) + return nil + } + klog.Infof("Get %d HPA with %s in namespace %s in total", len(unstructuredList.Items), version, object.GetNamespace()) + for _, item := range unstructuredList.Items { + scaleTargetRef, found, err := unstructured.NestedFieldCopy(item.Object, "spec", "scaleTargetRef") + if err != nil || !found { + continue + } + ref := scaleTargetRef.(map[string]interface{}) + name, version, kind := ref["name"].(string), ref["apiVersion"].(string), ref["kind"].(string) + if version == object.GetObjectKind().GroupVersionKind().GroupVersion().String() && + kind == object.GetObjectKind().GroupVersionKind().Kind && + removeSuffix(name) == object.GetName() { + return &item + } + } + klog.Infof("No HPA found for workload %v", klog.KObj(object)) + return nil +} + +func addSuffix(HPARefName string) string { + if strings.HasSuffix(HPARefName, HPADisableSuffix) { + return HPARefName + } + return HPARefName + HPADisableSuffix +} + +func removeSuffix(HPARefName string) string { + refName := HPARefName + for strings.HasSuffix(refName, HPADisableSuffix) { + refName = refName[:len(refName)-len(HPADisableSuffix)] + } + return refName +} diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa_test.go b/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa_test.go new file mode 100644 index 00000000..57f94af8 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/hpa/hpa_test.go @@ -0,0 +1,149 @@ +package hpa + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + scheme = runtime.NewScheme() +) + +func TestHPAPackage(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "HPA Package Suite") +} + +var _ = Describe("HPA Operations", func() { + var ( + cli client.Client + object *unstructured.Unstructured + ) + + BeforeEach(func() { + object = &unstructured.Unstructured{} + object.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + }) + object.SetNamespace("default") + object.SetName("my-deployment") + + cli = fake.NewClientBuilder().WithScheme(scheme).WithObjects(object).Build() + }) + + Context("when disabling and restoring HPA", func() { + It("should disable and restore HPA successfully", func() { + // Create a fake HPA + hpa := &unstructured.Unstructured{} + hpa.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "autoscaling", + Version: "v2", + Kind: "HorizontalPodAutoscaler", + }) + hpa.SetNamespace("default") + hpa.SetName("my-hpa") + unstructured.SetNestedField(hpa.Object, map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "name": "my-deployment", + }, "spec", "scaleTargetRef") + + Expect(cli.Create(context.TODO(), hpa)).To(Succeed()) + + // Disable HPA + DisableHPA(cli, object) + + fetchedHPA := &unstructured.Unstructured{} + fetchedHPA.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "autoscaling", + Version: "v2", + Kind: "HorizontalPodAutoscaler", + }) + Expect(cli.Get(context.TODO(), types.NamespacedName{ + Namespace: "default", + Name: "my-hpa", + }, fetchedHPA)).To(Succeed()) + + targetRef, found, err := unstructured.NestedFieldCopy(fetchedHPA.Object, "spec", "scaleTargetRef") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + + ref := targetRef.(map[string]interface{}) + Expect(ref["name"]).To(Equal("my-deployment" + HPADisableSuffix)) + + // Restore HPA + RestoreHPA(cli, object) + + Expect(cli.Get(context.TODO(), types.NamespacedName{ + Namespace: "default", + Name: "my-hpa", + }, fetchedHPA)).To(Succeed()) + + targetRef, found, err = unstructured.NestedFieldCopy(fetchedHPA.Object, "spec", "scaleTargetRef") + Expect(err).NotTo(HaveOccurred()) + Expect(found).To(BeTrue()) + + ref = targetRef.(map[string]interface{}) + Expect(ref["name"]).To(Equal("my-deployment")) + }) + }) + + Context("when finding HPA for workload", func() { + It("should find the correct HPA", func() { + // Create a fake HPA v2 + hpaV2 := &unstructured.Unstructured{} + hpaV2.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "autoscaling", + Version: "v2", + Kind: "HorizontalPodAutoscaler", + }) + hpaV2.SetNamespace("default") + hpaV2.SetName("my-hpa-v2") + unstructured.SetNestedField(hpaV2.Object, map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "name": "my-deployment", + }, "spec", "scaleTargetRef") + + // Create a fake HPA v1 + hpaV1 := &unstructured.Unstructured{} + hpaV1.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "autoscaling", + Version: "v1", + Kind: "HorizontalPodAutoscaler", + }) + hpaV1.SetNamespace("default") + hpaV1.SetName("my-hpa-v1") + unstructured.SetNestedField(hpaV1.Object, map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "name": "my-deployment", + }, "spec", "scaleTargetRef") + + Expect(cli.Create(context.TODO(), hpaV2)).To(Succeed()) + Expect(cli.Create(context.TODO(), hpaV1)).To(Succeed()) + + // Test finding HPA for workload + foundHPA := findHPAForWorkload(cli, object) + Expect(foundHPA).NotTo(BeNil()) + Expect(foundHPA.GetName()).To(Equal("my-hpa-v2")) + + // Delete v2 HPA and check if v1 is found + Expect(cli.Delete(context.TODO(), hpaV2)).To(Succeed()) + foundHPA = findHPAForWorkload(cli, object) + Expect(foundHPA).NotTo(BeNil()) + Expect(foundHPA.GetName()).To(Equal("my-hpa-v1")) + }) + }) +}) diff --git a/pkg/controller/batchrelease/control/bluegreenstyle/interface.go b/pkg/controller/batchrelease/control/bluegreenstyle/interface.go new file mode 100644 index 00000000..7ae602f5 --- /dev/null +++ b/pkg/controller/batchrelease/control/bluegreenstyle/interface.go @@ -0,0 +1,48 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bluegreenstyle + +import ( + "github.com/openkruise/rollouts/api/v1beta1" + batchcontext "github.com/openkruise/rollouts/pkg/controller/batchrelease/context" + "github.com/openkruise/rollouts/pkg/util" + corev1 "k8s.io/api/core/v1" +) + +type Interface interface { + // BuildController will get workload object and parse workload info, + // and return a initialized controller for workload. + BuildController() (Interface, error) + // GetWorkloadInfo return workload information. + GetWorkloadInfo() *util.WorkloadInfo + // ListOwnedPods fetch the pods owned by the workload. + // Note that we should list pod only if we really need it. + // reserved for future use + ListOwnedPods() ([]*corev1.Pod, error) + // CalculateBatchContext calculate current batch context + // according to release plan and current status of workload. + CalculateBatchContext(release *v1beta1.BatchRelease) (*batchcontext.BatchContext, error) + // Initialize do something before rolling out, for example: + // - pause the workload + // - update: MinReadySeconds, ProgressDeadlineSeconds, Strategy + Initialize(release *v1beta1.BatchRelease) error + // UpgradeBatch upgrade workload according current batch context. + UpgradeBatch(ctx *batchcontext.BatchContext) error + // Finalize do something after rolling out, for example: + // - set pause to false, restore the original setting, delete annotation + Finalize(release *v1beta1.BatchRelease) error +} diff --git a/pkg/controller/batchrelease/control/util.go b/pkg/controller/batchrelease/control/util.go index 0c0e36bd..b9928fce 100644 --- a/pkg/controller/batchrelease/control/util.go +++ b/pkg/controller/batchrelease/control/util.go @@ -21,8 +21,10 @@ import ( "fmt" "strings" + appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" "github.com/openkruise/rollouts/api/v1beta1" "github.com/openkruise/rollouts/pkg/util" + apps "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -63,6 +65,42 @@ func IsControlledByBatchRelease(release *v1beta1.BatchRelease, object client.Obj return false } +// only when IsReadyForBlueGreenRelease returns true, can we go on to the next batch +func ValidateReadyForBlueGreenRelease(object client.Object) error { + // check the annotation + if object.GetAnnotations()[util.BatchReleaseControlAnnotation] == "" { + return fmt.Errorf("workload has no control info annotation") + } + switch o := object.(type) { + case *apps.Deployment: + // must be RollingUpdate + if len(o.Spec.Strategy.Type) > 0 && o.Spec.Strategy.Type != apps.RollingUpdateDeploymentStrategyType { + return fmt.Errorf("deployment strategy type is not RollingUpdate") + } + if o.Spec.Strategy.RollingUpdate == nil { + return fmt.Errorf("deployment strategy rollingUpdate is nil") + } + // MinReadySeconds and ProgressDeadlineSeconds must be set + if o.Spec.MinReadySeconds != v1beta1.MaxReadySeconds || o.Spec.ProgressDeadlineSeconds == nil || *o.Spec.ProgressDeadlineSeconds != v1beta1.MaxProgressSeconds { + return fmt.Errorf("deployment strategy minReadySeconds or progressDeadlineSeconds is not MaxReadySeconds or MaxProgressSeconds") + } + + case *appsv1alpha1.CloneSet: + // must be ReCreate + if len(o.Spec.UpdateStrategy.Type) > 0 && o.Spec.UpdateStrategy.Type != appsv1alpha1.RecreateCloneSetUpdateStrategyType { + return fmt.Errorf("cloneSet strategy type is not ReCreate") + } + // MinReadySeconds and ProgressDeadlineSeconds must be set + if o.Spec.MinReadySeconds != v1beta1.MaxReadySeconds { + return fmt.Errorf("cloneSet strategy minReadySeconds is not MaxReadySeconds") + } + + default: + panic("unsupported workload type to ValidateReadyForBlueGreenRelease function") + } + return nil +} + // BuildReleaseControlInfo return a NewControllerRef of release with escaped `"`. func BuildReleaseControlInfo(release *v1beta1.BatchRelease) string { owner, _ := json.Marshal(metav1.NewControllerRef(release, release.GetObjectKind().GroupVersionKind())) @@ -112,3 +150,101 @@ func IsCurrentMoreThanOrEqualToDesired(current, desired intstr.IntOrString) bool desiredNum, _ := intstr.GetScaledValueFromIntOrPercent(&desired, 10000000, true) return currentNum >= desiredNum } + +// GetDeploymentStrategy decode the strategy object for advanced deployment +// from the annotation "rollouts.kruise.io/original-deployment-strategy" +func GetOriginalSetting(object client.Object) (OriginalDeploymentStrategy, error) { + setting := OriginalDeploymentStrategy{} + settingStr := object.GetAnnotations()[v1beta1.OriginalDeploymentStrategyAnnotation] + if settingStr == "" { + return setting, nil + } + err := json.Unmarshal([]byte(settingStr), &setting) + return setting, err +} + +// InitOriginalSetting will update the original setting based on the workload object +// note: update the maxSurge and maxUnavailable only when MaxSurge and MaxUnavailable are nil, +// which means they should keep unchanged in continuous release (though continuous release isn't supported for now) +func InitOriginalSetting(setting *OriginalDeploymentStrategy, object client.Object) { + var changeLogs []string + switch o := object.(type) { + case *apps.Deployment: + if setting.MaxSurge == nil { + setting.MaxSurge = getMaxSurgeFromDeployment(o.Spec.Strategy.RollingUpdate) + changeLogs = append(changeLogs, fmt.Sprintf("maxSurge changed from nil to %s", setting.MaxSurge.String())) + } + if setting.MaxUnavailable == nil { + setting.MaxUnavailable = getMaxUnavailableFromDeployment(o.Spec.Strategy.RollingUpdate) + changeLogs = append(changeLogs, fmt.Sprintf("maxUnavailable changed from nil to %s", setting.MaxUnavailable.String())) + } + if setting.ProgressDeadlineSeconds == nil { + setting.ProgressDeadlineSeconds = getIntPtrOrDefault(o.Spec.ProgressDeadlineSeconds, 600) + changeLogs = append(changeLogs, fmt.Sprintf("progressDeadlineSeconds changed from nil to %d", *setting.ProgressDeadlineSeconds)) + } + if setting.MinReadySeconds == 0 { + setting.MinReadySeconds = o.Spec.MinReadySeconds + changeLogs = append(changeLogs, fmt.Sprintf("minReadySeconds changed from 0 to %d", setting.MinReadySeconds)) + } + case *appsv1alpha1.CloneSet: + if setting.MaxSurge == nil { + setting.MaxSurge = getMaxSurgeFromCloneset(o.Spec.UpdateStrategy) + changeLogs = append(changeLogs, fmt.Sprintf("maxSurge changed from nil to %s", setting.MaxSurge.String())) + } + if setting.MaxUnavailable == nil { + setting.MaxUnavailable = getMaxUnavailableFromCloneset(o.Spec.UpdateStrategy) + changeLogs = append(changeLogs, fmt.Sprintf("maxUnavailable changed from nil to %s", setting.MaxUnavailable.String())) + } + if setting.ProgressDeadlineSeconds == nil { + // cloneset is planned to support progressDeadlineSeconds field + } + if setting.MinReadySeconds == 0 { + setting.MinReadySeconds = o.Spec.MinReadySeconds + changeLogs = append(changeLogs, fmt.Sprintf("minReadySeconds changed from 0 to %d", setting.MinReadySeconds)) + } + default: + panic(fmt.Errorf("unsupported object type %T", o)) + } + if len(changeLogs) == 0 { + klog.InfoS("InitOriginalSetting: original setting unchanged", "object", object.GetName()) + return + } + klog.InfoS("InitOriginalSetting: original setting updated", "object", object.GetName(), "changes", strings.Join(changeLogs, ";")) +} + +func getMaxSurgeFromDeployment(ru *apps.RollingUpdateDeployment) *intstr.IntOrString { + defaultMaxSurge := intstr.FromString("25%") + if ru == nil || ru.MaxSurge == nil { + return &defaultMaxSurge + } + return ru.MaxSurge +} +func getMaxUnavailableFromDeployment(ru *apps.RollingUpdateDeployment) *intstr.IntOrString { + defaultMaxAnavailale := intstr.FromString("25%") + if ru == nil || ru.MaxUnavailable == nil { + return &defaultMaxAnavailale + } + return ru.MaxUnavailable +} + +func getMaxSurgeFromCloneset(us appsv1alpha1.CloneSetUpdateStrategy) *intstr.IntOrString { + defaultMaxSurge := intstr.FromString("0%") + if us.MaxSurge == nil { + return &defaultMaxSurge + } + return us.MaxSurge +} +func getMaxUnavailableFromCloneset(us appsv1alpha1.CloneSetUpdateStrategy) *intstr.IntOrString { + defaultMaxUnavailable := intstr.FromString("20%") + if us.MaxUnavailable == nil { + return &defaultMaxUnavailable + } + return us.MaxUnavailable +} + +func getIntPtrOrDefault(ptr *int32, defaultVal int32) *int32 { + if ptr == nil { + return &defaultVal + } + return ptr +} diff --git a/pkg/controller/rollout/rollout_bluegreen.go b/pkg/controller/rollout/rollout_bluegreen.go index 562172f0..4ececbe5 100644 --- a/pkg/controller/rollout/rollout_bluegreen.go +++ b/pkg/controller/rollout/rollout_bluegreen.go @@ -62,7 +62,6 @@ func (m *blueGreenReleaseManager) runCanary(c *RolloutContext) error { } if m.doCanaryJump(c) { - klog.Infof("rollout(%s/%s) canary step jumped", c.Rollout.Namespace, c.Rollout.Name) return nil } // When the first batch is trafficRouting rolling and the next steps are rolling release, @@ -323,6 +322,9 @@ func (m *blueGreenReleaseManager) doCanaryFinalising(c *RolloutContext) (bool, e // route all traffic to new version case v1beta1.FinalisingStepRouteTrafficToNew: retry, err = m.trafficRoutingManager.RouteAllTrafficToNewVersion(tr) + // dangerous, wait endlessly, only for debugging use + case v1beta1.FinalisingStepWaitEndless: + retry, err = true, fmt.Errorf("only for debugging, just wait endlessly") default: nextStep = nextBlueGreenTask(c.FinalizeReason, "") klog.Warningf("unexpected finalising step, current step(%s), start from the first step(%s)", blueGreenStatus.FinalisingStep, nextStep) @@ -392,7 +394,10 @@ func (m *blueGreenReleaseManager) syncBatchRelease(br *v1beta1.BatchRelease, blu // TODO: optimize the logic to better understand blueGreenStatus.Message = fmt.Sprintf("BatchRelease is at state %s, rollout-id %s, step %d", br.Status.CanaryStatus.CurrentBatchState, br.Status.ObservedRolloutID, br.Status.CanaryStatus.CurrentBatch+1) - + // br.Status.Message records messages that help users to understand what is going wrong + if len(br.Status.Message) > 0 { + blueGreenStatus.Message += fmt.Sprintf(", %s", br.Status.Message) + } // sync rolloutId from blueGreenStatus to BatchRelease if blueGreenStatus.ObservedRolloutID != br.Spec.ReleasePlan.RolloutID { body := fmt.Sprintf(`{"spec":{"releasePlan":{"rolloutID":"%s"}}}`, blueGreenStatus.ObservedRolloutID) @@ -450,10 +455,9 @@ func nextBlueGreenTask(reason string, currentTask v1beta1.FinalisingStepType) v1 default: // others: disabled/deleting rollout taskSequence = []v1beta1.FinalisingStepType{ v1beta1.FinalisingStepRestoreStableService, - v1beta1.FinalisingStepResumeWorkload, // scale up new, scale down old v1beta1.FinalisingStepRouteTrafficToStable, - v1beta1.FinalisingStepRemoveCanaryService, + v1beta1.FinalisingStepResumeWorkload, // scale up new, scale down old v1beta1.FinalisingStepReleaseWorkloadControl, } } diff --git a/pkg/controller/rollout/rollout_canary.go b/pkg/controller/rollout/rollout_canary.go index f6d3c8cd..6bdc1d6c 100644 --- a/pkg/controller/rollout/rollout_canary.go +++ b/pkg/controller/rollout/rollout_canary.go @@ -71,7 +71,6 @@ func (m *canaryReleaseManager) runCanary(c *RolloutContext) error { } if m.doCanaryJump(c) { - klog.Infof("rollout(%s/%s) canary step jumped", c.Rollout.Namespace, c.Rollout.Name) return nil } // When the first batch is trafficRouting rolling and the next steps are rolling release, @@ -457,6 +456,10 @@ func (m *canaryReleaseManager) syncBatchRelease(br *v1beta1.BatchRelease, canary // TODO: optimize the logic to better understand canaryStatus.Message = fmt.Sprintf("BatchRelease is at state %s, rollout-id %s, step %d", br.Status.CanaryStatus.CurrentBatchState, br.Status.ObservedRolloutID, br.Status.CanaryStatus.CurrentBatch+1) + // br.Status.Message records messages that help users to understand what is going wrong + if len(br.Status.Message) > 0 { + canaryStatus.Message += fmt.Sprintf(", %s", br.Status.Message) + } // sync rolloutId from canaryStatus to BatchRelease if canaryStatus.ObservedRolloutID != br.Spec.ReleasePlan.RolloutID { diff --git a/pkg/controller/rollout/rollout_progressing.go b/pkg/controller/rollout/rollout_progressing.go index 24be281a..dbdbe314 100644 --- a/pkg/controller/rollout/rollout_progressing.go +++ b/pkg/controller/rollout/rollout_progressing.go @@ -25,6 +25,7 @@ import ( "github.com/openkruise/rollouts/api/v1beta1" "github.com/openkruise/rollouts/pkg/trafficrouting" "github.com/openkruise/rollouts/pkg/util" + utilerrors "github.com/openkruise/rollouts/pkg/util/errors" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -110,7 +111,13 @@ func (r *RolloutReconciler) reconcileRolloutProgressing(rollout *v1beta1.Rollout case v1alpha1.ProgressingReasonInRolling: klog.Infof("rollout(%s/%s) is Progressing, and in reason(%s)", rollout.Namespace, rollout.Name, cond.Reason) err = r.doProgressingInRolling(rolloutContext) - if err != nil { + if utilerrors.IsBadRequest(err) { + // For fatal errors, do not retry as it wastes resources and has no effect. + // therefore, we don't propagate the error, but just log it. + // user should do sth instead, eg. for bluegreen continuous release scenario, user should do rollback + klog.Warningf("rollout(%s/%s) doProgressingInRolling error(%s)", rollout.Namespace, rollout.Name, err.Error()) + return nil, nil + } else if err != nil { return nil, err } @@ -225,11 +232,29 @@ func (r *RolloutReconciler) handleRolloutPaused(rollout *v1beta1.Rollout, newSta return nil } +/* +continuous release (or successive release) is not supported for bluegreen release, especially for cloneset, +here is why: +suppose we are releasing a cloneSet, which has pods of both v1 and v2 for now. If we release v3 before +v2 is fully released, the cloneSet controller might scale down pods without distinguishing between v1 and v2. +This is because our implementation is based on the minReadySeconds, pods of both v1 and v2 are "unavailable" +in the progress of rollout. +Deployment actually has the same problem, however it is possible to bypass this issue for Deployment by setting +minReadySeconds for replicaset separately; unfortunately this workaround seems not work for cloneset +*/ func (r *RolloutReconciler) handleContinuousRelease(c *RolloutContext) error { r.Recorder.Eventf(c.Rollout, corev1.EventTypeNormal, "Progressing", "workload continuous publishing canaryRevision, then restart publishing") klog.Infof("rollout(%s/%s) workload continuous publishing canaryRevision from(%s) -> to(%s), then restart publishing", c.Rollout.Namespace, c.Rollout.Name, c.NewStatus.GetCanaryRevision(), c.Workload.CanaryRevision) + // do nothing for blue-green release + if c.Rollout.Spec.Strategy.IsBlueGreenRelease() { + cond := util.GetRolloutCondition(*c.NewStatus, v1beta1.RolloutConditionProgressing) + cond.Message = "new version releasing detected in the progress of blue-green release, please rollback first" + c.NewStatus.Message = cond.Message + return utilerrors.NewBadRequestError(fmt.Errorf("new version releasing detected in the progress of blue-green release, please rollback first")) + } + done, err := r.doProgressingReset(c) if err != nil { klog.Errorf("rollout(%s/%s) doProgressingReset failed: %s", c.Rollout.Namespace, c.Rollout.Name, err.Error()) @@ -601,7 +626,8 @@ func newTrafficRoutingContext(c *RolloutContext) *trafficrouting.TrafficRoutingC revisionLabelKey = c.Workload.RevisionLabelKey } var selectorPatch map[string]string - if !c.Rollout.Spec.Strategy.DisableGenerateCanaryService() && c.Rollout.Spec.Strategy.Canary.PatchPodTemplateMetadata != nil { + if !c.Rollout.Spec.Strategy.DisableGenerateCanaryService() && c.Rollout.Spec.Strategy.Canary != nil && + c.Rollout.Spec.Strategy.Canary.PatchPodTemplateMetadata != nil { selectorPatch = c.Rollout.Spec.Strategy.Canary.PatchPodTemplateMetadata.Labels } return &trafficrouting.TrafficRoutingContext{ diff --git a/pkg/util/client/delegating_client.go b/pkg/util/client/delegating_client.go index d677d109..41bb074f 100644 --- a/pkg/util/client/delegating_client.go +++ b/pkg/util/client/delegating_client.go @@ -153,12 +153,7 @@ func (d *delegatingReader) List(ctx context.Context, list client.ObjectList, opt return d.CacheReader.List(ctx, list, opts...) } -var DisableDeepCopy = disableDeepCopy{} - -type disableDeepCopy struct{} - -func (_ disableDeepCopy) ApplyToList(_ *client.ListOptions) { -} +var DisableDeepCopy = client.UnsafeDisableDeepCopy func isDisableDeepCopy(opts []client.ListOption) bool { for _, opt := range opts { diff --git a/pkg/util/condition.go b/pkg/util/condition.go index b1ed7075..1cca8c32 100644 --- a/pkg/util/condition.go +++ b/pkg/util/condition.go @@ -45,6 +45,16 @@ func GetRolloutCondition(status v1beta1.RolloutStatus, condType v1beta1.RolloutC return nil } +func GetBatchReleaseCondition(status v1beta1.BatchReleaseStatus, condType v1beta1.RolloutConditionType) *v1beta1.RolloutCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} + // SetRolloutCondition updates the rollout to include the provided condition. If the condition that // we are about to add already exists and has the same status and reason, then we are not going to update // by returning false. Returns true if the condition was updated @@ -63,6 +73,21 @@ func SetRolloutCondition(status *v1beta1.RolloutStatus, condition v1beta1.Rollou return true } +func SetBatchReleaseCondition(status *v1beta1.BatchReleaseStatus, condition v1beta1.RolloutCondition) bool { + currentCond := GetBatchReleaseCondition(*status, condition.Type) + if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason && + currentCond.Message == condition.Message { + return false + } + // Do not update lastTransitionTime if the status of the condition doesn't change. + if currentCond != nil && currentCond.Status == condition.Status { + condition.LastTransitionTime = currentCond.LastTransitionTime + } + newConditions := filterOutCondition(status.Conditions, condition.Type) + status.Conditions = append(newConditions, condition) + return true +} + // filterOutCondition returns a new slice of rollout conditions without conditions with the provided type. func filterOutCondition(conditions []v1beta1.RolloutCondition, condType v1beta1.RolloutConditionType) []v1beta1.RolloutCondition { var newConditions []v1beta1.RolloutCondition @@ -78,3 +103,7 @@ func filterOutCondition(conditions []v1beta1.RolloutCondition, condType v1beta1. func RemoveRolloutCondition(status *v1beta1.RolloutStatus, condType v1beta1.RolloutConditionType) { status.Conditions = filterOutCondition(status.Conditions, condType) } + +func RemoveBatchReleaseCondition(status *v1beta1.BatchReleaseStatus, condType v1beta1.RolloutConditionType) { + status.Conditions = filterOutCondition(status.Conditions, condType) +} diff --git a/pkg/util/controller_finder.go b/pkg/util/controller_finder.go index 50030561..258f29e9 100644 --- a/pkg/util/controller_finder.go +++ b/pkg/util/controller_finder.go @@ -289,7 +289,7 @@ func (r *ControllerFinder) getDeployment(namespace string, ref *rolloutv1beta1.O return &Workload{IsStatusConsistent: false}, nil } // stable replicaSet - stableRs, err := r.getDeploymentStableRs(stable) + stableRs, err := r.GetDeploymentStableRs(stable) if err != nil || stableRs == nil { return &Workload{IsStatusConsistent: false}, err } @@ -318,7 +318,7 @@ func (r *ControllerFinder) getDeployment(namespace string, ref *rolloutv1beta1.O if err != nil || canary == nil { return workload, err } - canaryRs, err := r.getDeploymentStableRs(canary) + canaryRs, err := r.GetDeploymentStableRs(canary) if err != nil || canaryRs == nil { return workload, err } @@ -422,7 +422,7 @@ func (r *ControllerFinder) GetReplicaSetsForDeployment(obj *apps.Deployment) ([] return rss, nil } -func (r *ControllerFinder) getDeploymentStableRs(obj *apps.Deployment) (*apps.ReplicaSet, error) { +func (r *ControllerFinder) GetDeploymentStableRs(obj *apps.Deployment) (*apps.ReplicaSet, error) { rss, err := r.GetReplicaSetsForDeployment(obj) if err != nil { return nil, err diff --git a/pkg/util/errors/types.go b/pkg/util/errors/types.go new file mode 100644 index 00000000..94f7bce7 --- /dev/null +++ b/pkg/util/errors/types.go @@ -0,0 +1,69 @@ +package errors + +import ( + "errors" + "fmt" +) + +// RetryError represents a benign error that can be handled or ignored by the caller. +// It encapsulates information that is non-critical and does not require immediate attention. +type RetryError struct { + Err error +} + +// Error implements the error interface for RetryError. +// It returns the error message of the encapsulated error or a default message. +func (e *RetryError) Error() string { + if e.Err != nil { + return fmt.Sprintf("[retry]: %s", e.Err.Error()) + } + return "retry error" +} + +// NewRetryError creates a new instance of RetryError. +func NewRetryError(err error) *RetryError { + return &RetryError{Err: err} +} + +func IsRetryError(err error) bool { + var re *RetryError + return errors.As(err, &re) +} + +func AsRetryError(err error, target **RetryError) bool { + return errors.As(err, target) +} + +// BadRequestError represents a fatal error that requires special handling. +// Such errors are critical and may necessitate logging, alerts, or even program termination. +type BadRequestError struct { + Err error +} + +// Error implements the error interface for BadRequestError. +// It returns the error message of the encapsulated error or a default message. +func (e *BadRequestError) Error() string { + if e.Err != nil { + return e.Err.Error() + } + return "fatal error" +} + +// NewBadRequestError creates a new instance of BadRequestError. +// It encapsulates the provided error, marking it as critical. +func NewBadRequestError(err error) *BadRequestError { + return &BadRequestError{Err: err} +} + +// IsBadRequest checks whether the provided error is of type BadRequestError. +// It returns true if the error is a BadRequestError or wraps a BadRequestError, false otherwise. +func IsBadRequest(err error) bool { + var brErr *BadRequestError + return AsBadRequest(err, &brErr) +} + +// AsBadRequest attempts to cast the provided error to a BadRequestError. +// It returns true if the casting is successful, allowing the caller to handle it accordingly. +func AsBadRequest(err error, target **BadRequestError) bool { + return errors.As(err, target) +} diff --git a/pkg/util/patch/patch_utils.go b/pkg/util/patch/patch_utils.go index 42fd8a6b..b38eed91 100644 --- a/pkg/util/patch/patch_utils.go +++ b/pkg/util/patch/patch_utils.go @@ -23,6 +23,8 @@ import ( apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -222,3 +224,156 @@ func (s *DeploymentPatch) UpdatePaused(paused bool) *DeploymentPatch { } return s } + +func (s *DeploymentPatch) UpdateMinReadySeconds(seconds int32) *DeploymentPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + spec["minReadySeconds"] = seconds + } + return s +} + +func (s *DeploymentPatch) UpdateProgressDeadlineSeconds(seconds *int32) *DeploymentPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + spec["progressDeadlineSeconds"] = seconds + } + return s +} + +func (s *DeploymentPatch) UpdateMaxSurge(maxSurge *intstr.IntOrString) *DeploymentPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["strategy"]; !ok { + spec["strategy"] = make(map[string]interface{}) + } + strategy := spec["strategy"].(map[string]interface{}) + if _, ok := strategy["rollingUpdate"]; !ok { + strategy["rollingUpdate"] = make(map[string]interface{}) + } + rollingUpdate := strategy["rollingUpdate"].(map[string]interface{}) + rollingUpdate["maxSurge"] = maxSurge + } + return s +} + +func (s *DeploymentPatch) UpdateMaxUnavailable(maxUnavailable *intstr.IntOrString) *DeploymentPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["strategy"]; !ok { + spec["strategy"] = make(map[string]interface{}) + } + strategy := spec["strategy"].(map[string]interface{}) + if _, ok := strategy["rollingUpdate"]; !ok { + strategy["rollingUpdate"] = make(map[string]interface{}) + } + rollingUpdate := strategy["rollingUpdate"].(map[string]interface{}) + rollingUpdate["maxUnavailable"] = maxUnavailable + } + return s +} + +type ClonesetPatch struct { + CommonPatch +} + +func NewClonesetPatch() *ClonesetPatch { + return &ClonesetPatch{CommonPatch{PatchType: types.MergePatchType, PatchData: make(map[string]interface{})}} +} + +func (s *ClonesetPatch) UpdateMinReadySeconds(seconds int32) *ClonesetPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + klog.Infof("updateMinReadySeconds to %v", seconds) + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + spec["minReadySeconds"] = seconds + } + return s +} + +func (s *ClonesetPatch) UpdatePaused(paused bool) *ClonesetPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + klog.Infof("updatePaused to %v", paused) + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["updateStrategy"]; !ok { + spec["updateStrategy"] = make(map[string]interface{}) + } + updateStrategy := spec["updateStrategy"].(map[string]interface{}) + updateStrategy["paused"] = paused + } + return s +} + +func (s *ClonesetPatch) UpdatePartiton(partition *intstr.IntOrString) *ClonesetPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + klog.Infof("updatePartition to %v", partition) + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["updateStrategy"]; !ok { + spec["updateStrategy"] = make(map[string]interface{}) + } + updateStrategy := spec["updateStrategy"].(map[string]interface{}) + updateStrategy["partition"] = partition + } + return s +} + +func (s *ClonesetPatch) UpdateMaxSurge(maxSurge *intstr.IntOrString) *ClonesetPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + klog.Infof("updateMaxSurge to %v", maxSurge) + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["updateStrategy"]; !ok { + spec["updateStrategy"] = make(map[string]interface{}) + } + updateStrategy := spec["updateStrategy"].(map[string]interface{}) + updateStrategy["maxSurge"] = maxSurge + } + return s +} + +func (s *ClonesetPatch) UpdateMaxUnavailable(maxUnavailable *intstr.IntOrString) *ClonesetPatch { + switch s.PatchType { + case types.StrategicMergePatchType, types.MergePatchType: + klog.Infof("updateMaxUnavailable to %v", maxUnavailable) + if _, ok := s.PatchData["spec"]; !ok { + s.PatchData["spec"] = make(map[string]interface{}) + } + spec := s.PatchData["spec"].(map[string]interface{}) + if _, ok := spec["updateStrategy"]; !ok { + spec["updateStrategy"] = make(map[string]interface{}) + } + updateStrategy := spec["updateStrategy"].(map[string]interface{}) + updateStrategy["maxUnavailable"] = maxUnavailable + } + return s +} diff --git a/pkg/util/workloads_utils.go b/pkg/util/workloads_utils.go index 86bc8659..219c22e8 100644 --- a/pkg/util/workloads_utils.go +++ b/pkg/util/workloads_utils.go @@ -154,7 +154,7 @@ func ComputeHash(template *v1.PodTemplateSpec, collisionCount *int32) string { func SafeEncodeString(s string) string { r := make([]byte, len(s)) for i, b := range []rune(s) { - r[i] = alphanums[(int(b) % len(alphanums))] + r[i] = alphanums[int(b)%len(alphanums)] } return string(r) } @@ -329,11 +329,11 @@ func IsWorkloadType(object client.Object, t WorkloadType) bool { // DeploymentMaxUnavailable returns the maximum unavailable pods a rolling deployment can take. func DeploymentMaxUnavailable(deployment *apps.Deployment) int32 { strategy := deployment.Spec.Strategy - if strategy.Type != apps.RollingUpdateDeploymentStrategyType || *(deployment.Spec.Replicas) == 0 { + if strategy.Type != apps.RollingUpdateDeploymentStrategyType || *deployment.Spec.Replicas == 0 { return int32(0) } // Error caught by validation - _, maxUnavailable, _ := resolveFenceposts(strategy.RollingUpdate.MaxSurge, strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas)) + _, maxUnavailable, _ := resolveFenceposts(strategy.RollingUpdate.MaxSurge, strategy.RollingUpdate.MaxUnavailable, *deployment.Spec.Replicas) if maxUnavailable > *deployment.Spec.Replicas { return *deployment.Spec.Replicas } diff --git a/pkg/webhook/rollout/validating/rollout_create_update_handler.go b/pkg/webhook/rollout/validating/rollout_create_update_handler.go index 49b727cf..6ed46b02 100644 --- a/pkg/webhook/rollout/validating/rollout_create_update_handler.go +++ b/pkg/webhook/rollout/validating/rollout_create_update_handler.go @@ -36,6 +36,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" ) +var ( + blueGreenSupportWorkloadGVKs = []*schema.GroupVersionKind{ + &util.ControllerKindDep, + &util.ControllerKruiseKindCS, + } +) + // RolloutCreateUpdateHandler handles Rollout type RolloutCreateUpdateHandler struct { // To use the client, you need to do the following: @@ -204,12 +211,12 @@ func (h *RolloutCreateUpdateHandler) validateRolloutConflict(rollout *appsv1beta } func validateRolloutSpec(c *validateContext, rollout *appsv1beta1.Rollout, fldPath *field.Path) field.ErrorList { - errList := validateRolloutSpecObjectRef(&rollout.Spec.WorkloadRef, fldPath.Child("ObjectRef")) + errList := validateRolloutSpecObjectRef(c, &rollout.Spec.WorkloadRef, fldPath.Child("ObjectRef")) errList = append(errList, validateRolloutSpecStrategy(c, &rollout.Spec.Strategy, fldPath.Child("Strategy"))...) return errList } -func validateRolloutSpecObjectRef(workloadRef *appsv1beta1.ObjectRef, fldPath *field.Path) field.ErrorList { +func validateRolloutSpecObjectRef(c *validateContext, workloadRef *appsv1beta1.ObjectRef, fldPath *field.Path) field.ErrorList { if workloadRef == nil { return field.ErrorList{field.Invalid(fldPath.Child("WorkloadRef"), workloadRef, "WorkloadRef is required")} } @@ -218,6 +225,14 @@ func validateRolloutSpecObjectRef(workloadRef *appsv1beta1.ObjectRef, fldPath *f if !util.IsSupportedWorkload(gvk) { return field.ErrorList{field.Invalid(fldPath.Child("WorkloadRef"), workloadRef, "WorkloadRef kind is not supported")} } + if c.style == string(appsv1beta1.BlueGreenRollingStyle) { + for _, allowed := range blueGreenSupportWorkloadGVKs { + if gvk.Group == allowed.Group && gvk.Kind == allowed.Kind { + return nil + } + } + return field.ErrorList{field.Invalid(fldPath.Child("WorkloadRef"), workloadRef, "WorkloadRef kind is not supported for bluegreen style")} + } return nil } @@ -378,7 +393,7 @@ func (h *RolloutCreateUpdateHandler) InjectDecoder(d *admission.Decoder) error { func GetContextFromv1beta1Rollout(rollout *appsv1beta1.Rollout) *validateContext { if rollout.Spec.Strategy.Canary == nil && rollout.Spec.Strategy.BlueGreen == nil { - return nil + return &validateContext{} } style := rollout.Spec.Strategy.GetRollingStyle() if appsv1beta1.IsRealPartition(rollout) { diff --git a/pkg/webhook/util/writer/fs.go b/pkg/webhook/util/writer/fs.go index c003caa7..543e94f3 100644 --- a/pkg/webhook/util/writer/fs.go +++ b/pkg/webhook/util/writer/fs.go @@ -128,7 +128,7 @@ func prepareToWrite(dir string) error { // TODO: figure out if we can reduce the permission. (Now it's 0777) err = os.MkdirAll(dir, 0777) if err != nil { - return fmt.Errorf("can't create dir: %v", dir) + return fmt.Errorf("can't create dir: %v, err: %s", dir, err.Error()) } case err != nil: return err diff --git a/pkg/webhook/workload/mutating/unified_update_handler.go b/pkg/webhook/workload/mutating/unified_update_handler.go new file mode 100644 index 00000000..d29cae3e --- /dev/null +++ b/pkg/webhook/workload/mutating/unified_update_handler.go @@ -0,0 +1,241 @@ +/* +Copyright 2019 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + "context" + "encoding/json" + "math" + "net/http" + + kruiseappsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" + appsv1beta1 "github.com/openkruise/rollouts/api/v1beta1" + "github.com/openkruise/rollouts/pkg/util" + utilclient "github.com/openkruise/rollouts/pkg/util/client" + util2 "github.com/openkruise/rollouts/pkg/webhook/util" + "github.com/openkruise/rollouts/pkg/webhook/util/configuration" + admissionv1 "k8s.io/api/admission/v1" + v1 "k8s.io/api/admissionregistration/v1" + apps "k8s.io/api/apps/v1" + v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + labels2 "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// UnifiedWorkloadHandler handles Pod +type UnifiedWorkloadHandler struct { + // To use the client, you need to do the following: + // - uncomment it + // - import sigs.k8s.io/controller-runtime/pkg/client + // - uncomment the InjectClient method at the bottom of this file. + Client client.Client + + // Decoder decodes objects + Decoder *admission.Decoder + Finder *util.ControllerFinder +} + +var _ admission.Handler = &UnifiedWorkloadHandler{} + +// Handle handles admission requests. +func (h *UnifiedWorkloadHandler) Handle(ctx context.Context, req admission.Request) admission.Response { + // if subResources, then ignore + if req.Operation != admissionv1.Update || req.SubResource != "" { + return admission.Allowed("") + } + + meetingRules, err := h.checkWorkloadRules(ctx, req) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + + if !meetingRules { + return admission.Allowed("") + } + + switch req.Kind.Group { + // kruise cloneSet + case kruiseappsv1alpha1.GroupVersion.Group: + switch req.Kind.Kind { + case util.ControllerKruiseKindCS.Kind, util.ControllerKruiseKindDS.Kind: + return admission.Allowed("") + } + // native k8s deloyment + case apps.SchemeGroupVersion.Group: + switch req.Kind.Kind { + case util.ControllerKindDep.Kind: + return admission.Allowed("") + } + } + + // handle other workload types, including native/advanced statefulset + { + newObj := &unstructured.Unstructured{} + newObj.SetGroupVersionKind(schema.GroupVersionKind{Group: req.Kind.Group, Version: req.Kind.Version, Kind: req.Kind.Kind}) + if err := h.Decoder.Decode(req, newObj); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + if !util.IsWorkloadType(newObj, util.StatefulSetType) && req.Kind.Kind != util.ControllerKindSts.Kind { + return admission.Allowed("") + } + oldObj := &unstructured.Unstructured{} + oldObj.SetGroupVersionKind(schema.GroupVersionKind{Group: req.Kind.Group, Version: req.Kind.Version, Kind: req.Kind.Kind}) + if err := h.Decoder.Decode( + admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{Object: req.AdmissionRequest.OldObject}}, + oldObj); err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + changed, err := h.handleStatefulSetLikeWorkload(newObj, oldObj) + if err != nil { + return admission.Errored(http.StatusBadRequest, err) + } + if !changed { + return admission.Allowed("") + } + marshalled, err := json.Marshal(newObj.Object) + if err != nil { + return admission.Errored(http.StatusInternalServerError, err) + } + return admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled) + } +} + +func (h *UnifiedWorkloadHandler) handleStatefulSetLikeWorkload(newObj, oldObj *unstructured.Unstructured) (bool, error) { + // indicate whether the workload can enter the rollout process + // 1. replicas > 0 + if util.GetReplicas(newObj) == 0 || !util.IsStatefulSetRollingUpdate(newObj) { + return false, nil + } + oldTemplate, newTemplate := util.GetTemplate(oldObj), util.GetTemplate(newObj) + if oldTemplate == nil || newTemplate == nil { + return false, nil + } + oldMetadata, newMetadata := util.GetMetadata(oldObj), util.GetMetadata(newObj) + if newMetadata.Annotations[appsv1beta1.RolloutIDLabel] != "" && + oldMetadata.Annotations[appsv1beta1.RolloutIDLabel] == newMetadata.Annotations[appsv1beta1.RolloutIDLabel] { + return false, nil + } else if newMetadata.Annotations[appsv1beta1.RolloutIDLabel] == "" && util.EqualIgnoreHash(oldTemplate, newTemplate) { + return false, nil + } + + rollout, err := h.fetchMatchedRollout(newObj) + if err != nil { + return false, err + } else if rollout == nil || rollout.Spec.Strategy.IsEmptyRelease() { + return false, nil + } + + util.SetStatefulSetPartition(newObj, math.MaxInt16) + state := &util.RolloutState{RolloutName: rollout.Name} + by, _ := json.Marshal(state) + annotation := newObj.GetAnnotations() + if annotation == nil { + annotation = map[string]string{} + } + annotation[util.InRolloutProgressingAnnotation] = string(by) + newObj.SetAnnotations(annotation) + klog.Infof("StatefulSet(%s/%s) will be released incrementally based on Rollout(%s)", newMetadata.Namespace, newMetadata.Name, rollout.Name) + return true, nil +} + +func (h *UnifiedWorkloadHandler) fetchMatchedRollout(obj client.Object) (*appsv1beta1.Rollout, error) { + oGv := obj.GetObjectKind().GroupVersionKind() + rolloutList := &appsv1beta1.RolloutList{} + if err := h.Client.List(context.TODO(), rolloutList, utilclient.DisableDeepCopy, + &client.ListOptions{Namespace: obj.GetNamespace()}); err != nil { + klog.Errorf("UnifiedWorkloadHandler List rollout failed: %s", err.Error()) + return nil, err + } + for i := range rolloutList.Items { + rollout := &rolloutList.Items[i] + if !rollout.DeletionTimestamp.IsZero() { + continue + } + if rollout.Status.Phase == appsv1beta1.RolloutPhaseDisabled { + klog.Infof("Disabled rollout(%s/%s) fetched when fetching matched rollout", rollout.Namespace, rollout.Name) + continue + } + ref := rollout.Spec.WorkloadRef + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + klog.Warningf("ParseGroupVersion rollout(%s/%s) ref failed: %s", rollout.Namespace, rollout.Name, err.Error()) + continue + } + if oGv.Group == gv.Group && oGv.Kind == ref.Kind && obj.GetName() == ref.Name { + return rollout, nil + } + } + return nil, nil +} + +var _ inject.Client = &UnifiedWorkloadHandler{} + +// InjectClient injects the client into the UnifiedWorkloadHandler +func (h *UnifiedWorkloadHandler) InjectClient(c client.Client) error { + h.Client = c + h.Finder = util.NewControllerFinder(c) + return nil +} + +var _ admission.DecoderInjector = &UnifiedWorkloadHandler{} + +// InjectDecoder injects the decoder into the UnifiedWorkloadHandler +func (h *UnifiedWorkloadHandler) InjectDecoder(d *admission.Decoder) error { + h.Decoder = d + return nil +} + +func (h *UnifiedWorkloadHandler) checkWorkloadRules(ctx context.Context, req admission.Request) (bool, error) { + webhook := &v1.MutatingWebhookConfiguration{} + if err := h.Client.Get(ctx, types.NamespacedName{Name: configuration.MutatingWebhookConfigurationName}, webhook); err != nil { + return false, err + } + + newObject := unstructured.Unstructured{} + if err := h.Decoder.Decode(req, &newObject); err != nil { + return false, err + } + + labels := newObject.GetLabels() + + attr, err := constructAttr(req) + if err != nil { + return false, err + } + + for _, webhook := range webhook.Webhooks { + for _, rule := range webhook.Rules { + m := util2.Matcher{Rule: rule, Attr: attr} + if m.Matches() { + selector, err := v12.LabelSelectorAsSelector(webhook.ObjectSelector) + if err != nil { + return false, nil + } + if selector.Matches(labels2.Set(labels)) { + return true, nil + } + } + } + } + return false, nil +} diff --git a/pkg/webhook/workload/mutating/unified_update_handler_test.go b/pkg/webhook/workload/mutating/unified_update_handler_test.go new file mode 100644 index 00000000..3e70c28b --- /dev/null +++ b/pkg/webhook/workload/mutating/unified_update_handler_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2022 The Kruise Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mutating + +import ( + "context" + "encoding/json" + "math" + "reflect" + "testing" + + kruiseappsv1beta1 "github.com/openkruise/kruise-api/apps/v1beta1" + appsv1beta1 "github.com/openkruise/rollouts/api/v1beta1" + "github.com/openkruise/rollouts/pkg/util" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +func TestHandleStatefulSet(t *testing.T) { + cases := []struct { + name string + getObjs func() (*kruiseappsv1beta1.StatefulSet, *kruiseappsv1beta1.StatefulSet) + expectObj func() *kruiseappsv1beta1.StatefulSet + getRollout func() *appsv1beta1.Rollout + isError bool + }{ + { + name: "cloneSet image v1->v2, matched rollout", + getObjs: func() (*kruiseappsv1beta1.StatefulSet, *kruiseappsv1beta1.StatefulSet) { + oldObj := statefulset.DeepCopy() + newObj := statefulset.DeepCopy() + newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2" + return oldObj, newObj + }, + expectObj: func() *kruiseappsv1beta1.StatefulSet { + obj := statefulset.DeepCopy() + obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2" + obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}` + obj.Spec.UpdateStrategy.RollingUpdate.Partition = pointer.Int32(math.MaxInt16) + return obj + }, + getRollout: func() *appsv1beta1.Rollout { + obj := rolloutDemo.DeepCopy() + obj.Spec.WorkloadRef = appsv1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1beta1", + Kind: "StatefulSet", + Name: "echoserver", + } + return obj + }, + }, + } + + decoder, _ := admission.NewDecoder(scheme) + for _, cs := range cases { + t.Run(cs.name, func(t *testing.T) { + client := fake.NewClientBuilder().WithScheme(scheme).Build() + h := UnifiedWorkloadHandler{ + Client: client, + Decoder: decoder, + Finder: util.NewControllerFinder(client), + } + rollout := cs.getRollout() + if err := client.Create(context.TODO(), rollout); err != nil { + t.Errorf(err.Error()) + } + + oldObj, newObj := cs.getObjs() + oldO, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(oldObj) + newO, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(newObj) + oldUnstructured := &unstructured.Unstructured{Object: oldO} + newUnstructured := &unstructured.Unstructured{Object: newO} + oldUnstructured.SetGroupVersionKind(newObj.GroupVersionKind()) + newUnstructured.SetGroupVersionKind(newObj.GroupVersionKind()) + _, err := h.handleStatefulSetLikeWorkload(newUnstructured, oldUnstructured) + if cs.isError && err == nil { + t.Fatal("handleStatefulSetLikeWorkload failed") + } else if !cs.isError && err != nil { + t.Fatalf(err.Error()) + } + newStructured := &kruiseappsv1beta1.StatefulSet{} + err = runtime.DefaultUnstructuredConverter.FromUnstructured(newUnstructured.Object, newStructured) + if err != nil { + t.Fatal("DefaultUnstructuredConvert failed") + } + expect := cs.expectObj() + if !reflect.DeepEqual(newStructured, expect) { + by, _ := json.Marshal(newStructured) + t.Fatalf("handlerCloneSet failed, and new(%s)", string(by)) + } + }) + } +} diff --git a/pkg/webhook/workload/mutating/webhooks.go b/pkg/webhook/workload/mutating/webhooks.go index 6ff5d4b2..c31c2c0d 100644 --- a/pkg/webhook/workload/mutating/webhooks.go +++ b/pkg/webhook/workload/mutating/webhooks.go @@ -23,8 +23,6 @@ import ( // +kubebuilder:webhook:path=/mutate-apps-kruise-io-v1alpha1-cloneset,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=apps.kruise.io,resources=clonesets,verbs=update,versions=v1alpha1,name=mcloneset.kb.io // +kubebuilder:webhook:path=/mutate-apps-kruise-io-v1alpha1-daemonset,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=apps.kruise.io,resources=daemonsets,verbs=update,versions=v1alpha1,name=mdaemonset.kb.io // +kubebuilder:webhook:path=/mutate-apps-v1-deployment,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=apps,resources=deployments,verbs=update,versions=v1,name=mdeployment.kb.io -// +kubebuilder:webhook:path=/mutate-apps-v1-statefulset,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=apps,resources=statefulsets,verbs=update,versions=v1,name=mstatefulset.kb.io -// +kubebuilder:webhook:path=/mutate-apps-kruise-io-statefulset,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=apps.kruise.io,resources=statefulsets,verbs=create;update,versions=v1alpha1;v1beta1,name=madvancedstatefulset.kb.io // +kubebuilder:webhook:path=/mutate-unified-workload,mutating=true,failurePolicy=fail,sideEffects=None,admissionReviewVersions=v1;v1beta1,groups=*,resources=*,verbs=create;update,versions=*,name=munifiedworload.kb.io var ( @@ -32,9 +30,7 @@ var ( HandlerMap = map[string]admission.Handler{ "mutate-apps-kruise-io-v1alpha1-cloneset": &WorkloadHandler{}, "mutate-apps-v1-deployment": &WorkloadHandler{}, - "mutate-apps-v1-statefulset": &WorkloadHandler{}, - "mutate-apps-kruise-io-statefulset": &WorkloadHandler{}, - "mutate-unified-workload": &WorkloadHandler{}, "mutate-apps-kruise-io-v1alpha1-daemonset": &WorkloadHandler{}, + "mutate-unified-workload": &UnifiedWorkloadHandler{}, } ) diff --git a/pkg/webhook/workload/mutating/workload_update_handler.go b/pkg/webhook/workload/mutating/workload_update_handler.go index 45ade337..e3376164 100644 --- a/pkg/webhook/workload/mutating/workload_update_handler.go +++ b/pkg/webhook/workload/mutating/workload_update_handler.go @@ -189,87 +189,20 @@ func (h *WorkloadHandler) Handle(ctx context.Context, req admission.Request) adm } } - // handle other workload types, including native/advanced statefulset - { - newObj := &unstructured.Unstructured{} - newObj.SetGroupVersionKind(schema.GroupVersionKind{Group: req.Kind.Group, Version: req.Kind.Version, Kind: req.Kind.Kind}) - if err := h.Decoder.Decode(req, newObj); err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - if !util.IsWorkloadType(newObj, util.StatefulSetType) && req.Kind.Kind != util.ControllerKindSts.Kind { - return admission.Allowed("") - } - oldObj := &unstructured.Unstructured{} - oldObj.SetGroupVersionKind(schema.GroupVersionKind{Group: req.Kind.Group, Version: req.Kind.Version, Kind: req.Kind.Kind}) - if err := h.Decoder.Decode( - admission.Request{AdmissionRequest: admissionv1.AdmissionRequest{Object: req.AdmissionRequest.OldObject}}, - oldObj); err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - changed, err := h.handleStatefulSetLikeWorkload(newObj, oldObj) - if err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - if !changed { - return admission.Allowed("") - } - marshalled, err := json.Marshal(newObj.Object) - if err != nil { - return admission.Errored(http.StatusInternalServerError, err) - } - return admission.PatchResponseFromRaw(req.AdmissionRequest.Object.Raw, marshalled) - } -} - -func (h *WorkloadHandler) handleStatefulSetLikeWorkload(newObj, oldObj *unstructured.Unstructured) (bool, error) { - // indicate whether the workload can enter the rollout process - // 1. replicas > 0 - if util.GetReplicas(newObj) == 0 || !util.IsStatefulSetRollingUpdate(newObj) { - return false, nil - } - oldTemplate, newTemplate := util.GetTemplate(oldObj), util.GetTemplate(newObj) - if oldTemplate == nil || newTemplate == nil { - return false, nil - } - oldMetadata, newMetadata := util.GetMetadata(oldObj), util.GetMetadata(newObj) - if newMetadata.Annotations[appsv1beta1.RolloutIDLabel] != "" && - oldMetadata.Annotations[appsv1beta1.RolloutIDLabel] == newMetadata.Annotations[appsv1beta1.RolloutIDLabel] { - return false, nil - } else if newMetadata.Annotations[appsv1beta1.RolloutIDLabel] == "" && util.EqualIgnoreHash(oldTemplate, newTemplate) { - return false, nil - } - - rollout, err := h.fetchMatchedRollout(newObj) - if err != nil { - return false, err - } else if rollout == nil || rollout.Spec.Strategy.IsEmptyRelease() { - return false, nil - } - - util.SetStatefulSetPartition(newObj, math.MaxInt16) - state := &util.RolloutState{RolloutName: rollout.Name} - by, _ := json.Marshal(state) - annotation := newObj.GetAnnotations() - if annotation == nil { - annotation = map[string]string{} - } - annotation[util.InRolloutProgressingAnnotation] = string(by) - newObj.SetAnnotations(annotation) - klog.Infof("StatefulSet(%s/%s) will be released incrementally based on Rollout(%s)", newMetadata.Namespace, newMetadata.Name, rollout.Name) - return true, nil + return admission.Allowed("") } func (h *WorkloadHandler) handleDeployment(newObj, oldObj *apps.Deployment) (bool, error) { // in rollout progressing if newObj.Annotations[util.InRolloutProgressingAnnotation] != "" { modified := false - if !newObj.Spec.Paused { - modified = true - newObj.Spec.Paused = true - } strategy := util.GetDeploymentStrategy(newObj) - switch strings.ToLower(string(strategy.RollingStyle)) { - case strings.ToLower(string(appsv1alpha1.PartitionRollingStyle)): + // partition + if strings.EqualFold(string(strategy.RollingStyle), string(appsv1alpha1.PartitionRollingStyle)) { + if !newObj.Spec.Paused { + modified = true + newObj.Spec.Paused = true + } // Make sure it is always Recreate to disable native controller if newObj.Spec.Strategy.Type == apps.RollingUpdateDeploymentStrategyType { modified = true @@ -287,7 +220,24 @@ func (h *WorkloadHandler) handleDeployment(newObj, oldObj *apps.Deployment) (boo } appsv1alpha1.SetDefaultDeploymentStrategy(&strategy) setDeploymentStrategyAnnotation(strategy, newObj) - default: + // bluegreenStyle + } else if len(newObj.GetAnnotations()[appsv1beta1.OriginalDeploymentStrategyAnnotation]) > 0 { + if isEffectiveDeploymentRevisionChange(oldObj, newObj) { + newObj.Spec.Paused, modified = true, true + // disallow continuous release, allow rollback + klog.Warningf("rollback or continuous release detected in Deployment webhook, while only rollback is allowed for bluegreen release for now") + } + // not allow to modify Strategy.Type to Recreate + if newObj.Spec.Strategy.Type != apps.RollingUpdateDeploymentStrategyType { + modified = true + newObj.Spec.Strategy.Type = oldObj.Spec.Strategy.Type + klog.Warningf("Not allow to modify Strategy.Type to Recreate") + } + } else { // default + if !newObj.Spec.Paused { + modified = true + newObj.Spec.Paused = true + } // Do not allow to modify strategy as Recreate during rolling if newObj.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType { modified = true @@ -369,6 +319,7 @@ func (h *WorkloadHandler) handleCloneSet(newObj, oldObj *kruiseappsv1alpha1.Clon } else if rollout == nil || rollout.Spec.Strategy.IsEmptyRelease() { return false, nil } + // if traffic routing, there must only be one version of Pods if rollout.Spec.Strategy.HasTrafficRoutings() && newObj.Status.Replicas != newObj.Status.UpdatedReplicas { klog.Warningf("Because cloneSet(%s/%s) have multiple versions of Pods, so can not enter rollout progressing", newObj.Namespace, newObj.Name) diff --git a/pkg/webhook/workload/mutating/workload_update_handler_test.go b/pkg/webhook/workload/mutating/workload_update_handler_test.go index 117d32ba..13282dec 100644 --- a/pkg/webhook/workload/mutating/workload_update_handler_test.go +++ b/pkg/webhook/workload/mutating/workload_update_handler_test.go @@ -36,7 +36,6 @@ import ( apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -130,6 +129,51 @@ var ( }, } + rsDemoV2 = &apps.ReplicaSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "ReplicaSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "echoserver-v2", + Labels: map[string]string{ + "app": "echoserver", + "pod-template-hash": "verision2", + }, + Annotations: map[string]string{}, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(deploymentDemo, schema.GroupVersionKind{ + Group: apps.SchemeGroupVersion.Group, + Version: apps.SchemeGroupVersion.Version, + Kind: "Deployment", + }), + }, + }, + Spec: apps.ReplicaSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "echoserver", + }, + }, + Replicas: pointer.Int32(5), + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "echoserver", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "echoserver", + Image: "echoserver:v2", + }, + }, + }, + }, + }, + } + cloneSetDemo = &kruisev1aplphal.CloneSet{ TypeMeta: metav1.TypeMeta{ APIVersion: "apps.kruise.io/v1alpha1", @@ -519,6 +563,100 @@ func TestHandlerDeployment(t *testing.T) { return obj }, }, + { + name: "bluegreen: normal release", + getObjs: func() (*apps.Deployment, *apps.Deployment) { + oldObj := deploymentDemo.DeepCopy() + newObj := deploymentDemo.DeepCopy() + newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2" + return oldObj, newObj + }, + expectObj: func() *apps.Deployment { + obj := deploymentDemo.DeepCopy() + obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2" + obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}` + obj.Spec.Paused = true + return obj + }, + getRs: func() []*apps.ReplicaSet { + rs := rsDemo.DeepCopy() + return []*apps.ReplicaSet{rs} + }, + getRollout: func() *appsv1beta1.Rollout { + obj := rolloutDemo.DeepCopy() + obj.Spec.Strategy.BlueGreen = &appsv1beta1.BlueGreenStrategy{} + return obj + }, + isError: false, + }, + { + name: "bluegreen: rollback", + getObjs: func() (*apps.Deployment, *apps.Deployment) { + oldObj := deploymentDemo.DeepCopy() + oldObj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}` + oldObj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}` + oldObj.Labels[appsv1alpha1.DeploymentStableRevisionLabel] = "5b494f7bf" + oldObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2" + newObj := deploymentDemo.DeepCopy() + newObj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}` + newObj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}` + newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v1" + return oldObj, newObj + }, + expectObj: func() *apps.Deployment { + obj := deploymentDemo.DeepCopy() + obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v1" + obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}` + obj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}` + obj.Spec.Paused = true + return obj + }, + getRs: func() []*apps.ReplicaSet { + rs := rsDemo.DeepCopy() + rs2 := rsDemoV2.DeepCopy() + return []*apps.ReplicaSet{rs, rs2} + }, + getRollout: func() *appsv1beta1.Rollout { + obj := rolloutDemo.DeepCopy() + obj.Spec.Strategy.BlueGreen = &appsv1beta1.BlueGreenStrategy{} + return obj + }, + isError: false, + }, + { + name: "bluegreen: successive release", + getObjs: func() (*apps.Deployment, *apps.Deployment) { + oldObj := deploymentDemo.DeepCopy() + oldObj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}` + oldObj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}` + oldObj.Labels[appsv1alpha1.DeploymentStableRevisionLabel] = "5b494f7bf" + oldObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2" + newObj := deploymentDemo.DeepCopy() + newObj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}` + newObj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}` + newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v3" + return oldObj, newObj + }, + expectObj: func() *apps.Deployment { + obj := deploymentDemo.DeepCopy() + obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v3" + obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}` + obj.Annotations[appsv1beta1.OriginalDeploymentStrategyAnnotation] = `{"MaxSurge":"25%", "MaxUnavailable":"25%"}` + obj.Spec.Paused = true + return obj + }, + getRs: func() []*apps.ReplicaSet { + rs := rsDemo.DeepCopy() + rs2 := rsDemoV2.DeepCopy() + return []*apps.ReplicaSet{rs, rs2} + }, + getRollout: func() *appsv1beta1.Rollout { + obj := rolloutDemo.DeepCopy() + obj.Spec.Strategy.BlueGreen = &appsv1beta1.BlueGreenStrategy{} + return obj + }, + isError: false, + }, } decoder, _ := admission.NewDecoder(scheme) @@ -542,8 +680,11 @@ func TestHandlerDeployment(t *testing.T) { oldObj, newObj := cs.getObjs() _, err := h.handleDeployment(newObj, oldObj) - if cs.isError && err == nil { - t.Fatal("handlerDeployment failed") + if cs.isError { + if err == nil { + t.Fatal("handlerDeployment failed") + } + return //no need to check again } else if !cs.isError && err != nil { t.Fatalf(err.Error()) } @@ -683,82 +824,6 @@ func TestHandlerDaemonSet(t *testing.T) { } } -func TestHandleStatefulSet(t *testing.T) { - cases := []struct { - name string - getObjs func() (*kruiseappsv1beta1.StatefulSet, *kruiseappsv1beta1.StatefulSet) - expectObj func() *kruiseappsv1beta1.StatefulSet - getRollout func() *appsv1beta1.Rollout - isError bool - }{ - { - name: "cloneSet image v1->v2, matched rollout", - getObjs: func() (*kruiseappsv1beta1.StatefulSet, *kruiseappsv1beta1.StatefulSet) { - oldObj := statefulset.DeepCopy() - newObj := statefulset.DeepCopy() - newObj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2" - return oldObj, newObj - }, - expectObj: func() *kruiseappsv1beta1.StatefulSet { - obj := statefulset.DeepCopy() - obj.Spec.Template.Spec.Containers[0].Image = "echoserver:v2" - obj.Annotations[util.InRolloutProgressingAnnotation] = `{"rolloutName":"rollout-demo"}` - obj.Spec.UpdateStrategy.RollingUpdate.Partition = pointer.Int32(math.MaxInt16) - return obj - }, - getRollout: func() *appsv1beta1.Rollout { - obj := rolloutDemo.DeepCopy() - obj.Spec.WorkloadRef = appsv1beta1.ObjectRef{ - APIVersion: "apps.kruise.io/v1beta1", - Kind: "StatefulSet", - Name: "echoserver", - } - return obj - }, - }, - } - - decoder, _ := admission.NewDecoder(scheme) - for _, cs := range cases { - t.Run(cs.name, func(t *testing.T) { - client := fake.NewClientBuilder().WithScheme(scheme).Build() - h := WorkloadHandler{ - Client: client, - Decoder: decoder, - Finder: util.NewControllerFinder(client), - } - rollout := cs.getRollout() - if err := client.Create(context.TODO(), rollout); err != nil { - t.Errorf(err.Error()) - } - - oldObj, newObj := cs.getObjs() - oldO, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(oldObj) - newO, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(newObj) - oldUnstructured := &unstructured.Unstructured{Object: oldO} - newUnstructured := &unstructured.Unstructured{Object: newO} - oldUnstructured.SetGroupVersionKind(newObj.GroupVersionKind()) - newUnstructured.SetGroupVersionKind(newObj.GroupVersionKind()) - _, err := h.handleStatefulSetLikeWorkload(newUnstructured, oldUnstructured) - if cs.isError && err == nil { - t.Fatal("handleStatefulSetLikeWorkload failed") - } else if !cs.isError && err != nil { - t.Fatalf(err.Error()) - } - newStructured := &kruiseappsv1beta1.StatefulSet{} - err = runtime.DefaultUnstructuredConverter.FromUnstructured(newUnstructured.Object, newStructured) - if err != nil { - t.Fatal("DefaultUnstructuredConvert failed") - } - expect := cs.expectObj() - if !reflect.DeepEqual(newStructured, expect) { - by, _ := json.Marshal(newStructured) - t.Fatalf("handlerCloneSet failed, and new(%s)", string(by)) - } - }) - } -} - func TestCheckWorkloadRule(t *testing.T) { ctx := context.Background() diff --git a/test/e2e/rollout_v1beta1_test.go b/test/e2e/rollout_v1beta1_test.go index cd2f67c4..1ed52170 100644 --- a/test/e2e/rollout_v1beta1_test.go +++ b/test/e2e/rollout_v1beta1_test.go @@ -19,6 +19,7 @@ package e2e import ( "context" "fmt" + "reflect" "sort" "strings" "time" @@ -28,8 +29,11 @@ import ( appsv1alpha1 "github.com/openkruise/kruise-api/apps/v1alpha1" appsv1beta1 "github.com/openkruise/kruise-api/apps/v1beta1" "github.com/openkruise/rollouts/api/v1beta1" + "github.com/openkruise/rollouts/pkg/controller/batchrelease/control" "github.com/openkruise/rollouts/pkg/util" apps "k8s.io/api/apps/v1" + scalingV1 "k8s.io/api/autoscaling/v1" + scalingV2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/api/core/v1" netv1 "k8s.io/api/networking/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -41,11 +45,6 @@ import ( "k8s.io/klog/v2" utilpointer "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" - // "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - // "k8s.io/apimachinery/pkg/util/intstr" - // gatewayv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" - // "github.com/openkruise/rollouts/api/v1alpha1" - // "k8s.io/apimachinery/pkg/api/errors" ) var _ = SIGDescribe("Rollout v1beta1", func() { @@ -202,51 +201,61 @@ var _ = SIGDescribe("Rollout v1beta1", func() { return clone } - ResumeRolloutCanary := func(name string) { + UpdateRolloutFail := func(object *v1beta1.Rollout) *v1beta1.Rollout { + var clone *v1beta1.Rollout + // still ignore the conflict error + Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { + clone = &v1beta1.Rollout{} + err := GetObject(object.Name, clone) + if err != nil { + return err + } + clone.Spec = *object.Spec.DeepCopy() + return k8sClient.Update(context.TODO(), clone) + })).To(HaveOccurred()) + return clone + } + + ResumeRollout := func(name string) { + clone := &v1beta1.Rollout{} + Expect(GetObject(name, clone)).NotTo(HaveOccurred()) + currentIndex := clone.Status.GetSubStatus().CurrentStepIndex Eventually(func() bool { clone := &v1beta1.Rollout{} Expect(GetObject(name, clone)).NotTo(HaveOccurred()) - if clone.Status.CanaryStatus.CurrentStepState != v1beta1.CanaryStepStatePaused { + if clone.Status.GetSubStatus().CurrentStepIndex == currentIndex && clone.Status.GetSubStatus().CurrentStepState == v1beta1.CanaryStepStatePaused { + klog.Info("patch to stepReady") + body := fmt.Sprintf(`{"status":{"canaryStatus":{"currentStepState":"%s"}}}`, v1beta1.CanaryStepStateReady) + if clone.Spec.Strategy.IsBlueGreenRelease() { + body = fmt.Sprintf(`{"status":{"blueGreenStatus":{"currentStepState":"%s"}}}`, v1beta1.CanaryStepStateReady) + } + Expect(k8sClient.Status().Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, []byte(body)))).NotTo(HaveOccurred()) + return false + } else { fmt.Println("resume rollout success, and CurrentStepState", util.DumpJSON(clone.Status)) return true } - - body := fmt.Sprintf(`{"status":{"canaryStatus":{"currentStepState":"%s"}}}`, v1beta1.CanaryStepStateReady) - Expect(k8sClient.Status().Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, []byte(body)))).NotTo(HaveOccurred()) - return false }, 10*time.Second, time.Millisecond*500).Should(BeTrue()) } - RolloutJumpCanaryStep := func(name string, target int) { + JumpRolloutStep := func(name string, target int) { Eventually(func() bool { clone := &v1beta1.Rollout{} Expect(GetObject(name, clone)).NotTo(HaveOccurred()) - if clone.Status.CanaryStatus.CurrentStepState != v1beta1.CanaryStepStatePaused { + if clone.Status.GetSubStatus().CurrentStepState != v1beta1.CanaryStepStatePaused { fmt.Println("Jump successfully, and current status ", util.DumpJSON(clone.Status)) return true } body := fmt.Sprintf(`{"status":{"canaryStatus":{"nextStepIndex":%d}}}`, target) + if clone.Spec.Strategy.IsBlueGreenRelease() { + body = fmt.Sprintf(`{"status":{"blueGreenStatus":{"nextStepIndex":%d}}}`, target) + } Expect(k8sClient.Status().Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, []byte(body)))).NotTo(HaveOccurred()) return false }, 10*time.Second, time.Second).Should(BeTrue()) } - // RolloutJumpBlueGreenStep := func(name string, target int) { - // Eventually(func() bool { - // clone := &v1alpha1.Rollout{} - // Expect(GetObject(name, clone)).NotTo(HaveOccurred()) - // if clone.Status.CanaryStatus.CurrentStepState !=v1beta1.CanaryStepStatePaused { - // fmt.Println("Jump successfully, and current status ", util.DumpJSON(clone.Status)) - // return true - // } - - // body := fmt.Sprintf(`{"status":{"blueGreenStatus":{"nextStepIndex":"%d"}}}`, target) - // Expect(k8sClient.Status().Patch(context.TODO(), clone, client.RawPatch(types.MergePatchType, []byte(body)))).NotTo(HaveOccurred()) - // return false - // }, 10*time.Second, time.Second).Should(BeTrue()) - // } - WaitDeploymentAllPodsReady := func(deployment *apps.Deployment) { Eventually(func() bool { clone := &apps.Deployment{} @@ -293,7 +302,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // }, 5*time.Minute, time.Second).Should(BeTrue()) // } - // WaitDeploymentReplicas := func(deployment *apps.Deployment) { + // WaitDeploymentCanaryReplicas := func(deployment *apps.Deployment) { // Eventually(func() bool { // clone := &apps.Deployment{} // Expect(GetObject(deployment.Name, clone)).NotTo(HaveOccurred()) @@ -302,7 +311,41 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // }, 10*time.Minute, time.Second).Should(BeTrue()) // } - WaitRolloutCanaryStepPaused := func(name string, stepIndex int32) { + WaitDeploymentBlueGreenReplicas := func(deployment *apps.Deployment) { + Eventually(func() bool { + clone := &apps.Deployment{} + Expect(GetObject(deployment.Name, clone)).NotTo(HaveOccurred()) + return clone.Status.ObservedGeneration == clone.Generation && clone.Status.ReadyReplicas == clone.Status.Replicas + }, 10*time.Minute, time.Second).Should(BeTrue()) + } + + // WaitClonesetBlueGreenReplicas := func(cloneset *appsv1alpha1.CloneSet) { + // Eventually(func() bool { + // clone := &appsv1alpha1.CloneSet{} + // Expect(GetObject(cloneset.Name, clone)).NotTo(HaveOccurred()) + // return clone.Status.ObservedGeneration == clone.Generation && + // *clone.Spec.Replicas == clone.Status.AvailableReplicas && clone.Status.ReadyReplicas == clone.Status.Replicas + // }, 10*time.Minute, time.Second).Should(BeTrue()) + // } + + // WaitRolloutStepUpgrade := func(name string, stepIndex int32) { + // start := time.Now() + // Eventually(func() bool { + // if start.Add(time.Minute * 5).Before(time.Now()) { + // DumpAllResources() + // Expect(true).Should(BeFalse()) + // } + // clone := &v1beta1.Rollout{} + // Expect(GetObject(name, clone)).NotTo(HaveOccurred()) + // if clone.Status.GetSubStatus() == nil { + // return false + // } + // klog.Infof("current step:%v target step:%v current step state %v", clone.Status.GetSubStatus().CurrentStepIndex, stepIndex, clone.Status.GetSubStatus().CurrentStepState) + // return clone.Status.GetSubStatus().CurrentStepIndex == stepIndex && clone.Status.GetSubStatus().CurrentStepState == v1beta1.CanaryStepStateUpgrade + // }, 20*time.Minute, time.Second).Should(BeTrue()) + // } + + WaitRolloutStepPaused := func(name string, stepIndex int32) { start := time.Now() Eventually(func() bool { if start.Add(time.Minute * 5).Before(time.Now()) { @@ -311,11 +354,11 @@ var _ = SIGDescribe("Rollout v1beta1", func() { } clone := &v1beta1.Rollout{} Expect(GetObject(name, clone)).NotTo(HaveOccurred()) - if clone.Status.CanaryStatus == nil { + if clone.Status.GetSubStatus() == nil { return false } - klog.Infof("current step:%v target step:%v current step state %v", clone.Status.CanaryStatus.CurrentStepIndex, stepIndex, clone.Status.CanaryStatus.CurrentStepState) - return clone.Status.CanaryStatus.CurrentStepIndex == stepIndex && clone.Status.CanaryStatus.CurrentStepState == v1beta1.CanaryStepStatePaused + klog.Infof("current step:%v target step:%v current step state %v", clone.Status.GetSubStatus().CurrentStepIndex, stepIndex, clone.Status.GetSubStatus().CurrentStepState) + return clone.Status.GetSubStatus().CurrentStepIndex == stepIndex && clone.Status.GetSubStatus().CurrentStepState == v1beta1.CanaryStepStatePaused }, 20*time.Minute, time.Second).Should(BeTrue()) } @@ -331,7 +374,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Eventually(func() bool { clone := &v1beta1.Rollout{} Expect(GetObject(name, clone)).NotTo(HaveOccurred()) - return clone.Status.CanaryStatus.ObservedWorkloadGeneration == generation + return clone.Status.GetSubStatus().ObservedWorkloadGeneration == generation }, time.Minute, time.Second).Should(BeTrue()) } @@ -532,7 +575,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { time.Sleep(time.Second * 3) // wait step 1 complete By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 2)) @@ -550,8 +593,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 2 complete By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 3)) @@ -566,9 +609,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 3 complete By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) // rollout - WaitRolloutCanaryStepPaused(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 4)) // canary workload @@ -582,8 +625,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 4 complete By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 5)) @@ -611,12 +654,12 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 3 By("Jump to step 3") - RolloutJumpCanaryStep(rollout.Name, 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + JumpRolloutStep(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 4)) - // canary workload (won't scale down indeed) + // canary workload (won't scale down) cWorkload, err = GetCanaryDeployment(workload) Expect(err).NotTo(HaveOccurred()) canaryRevision = crss[0].Labels[apps.DefaultDeploymentUniqueLabelKey] @@ -687,14 +730,14 @@ var _ = SIGDescribe("Rollout v1beta1", func() { rollout = UpdateRollout(rollout) By("update rollout configuration, and wait rollout re-run current step(3)") time.Sleep(time.Second * 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) // batch release batch := &v1beta1.BatchRelease{} Expect(GetObject(rollout.Name, batch)).NotTo(HaveOccurred()) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 4)) - // canary workload (won't scale down indeed) + // canary workload (won't scale down) cWorkload, err = GetCanaryDeployment(workload) Expect(err).NotTo(HaveOccurred()) canaryRevision = crss[0].Labels[apps.DefaultDeploymentUniqueLabelKey] @@ -715,12 +758,12 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 2 By("Jump to step 2") - RolloutJumpCanaryStep(rollout.Name, 2) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + JumpRolloutStep(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 3)) - // canary workload (won't scale down indeed) + // canary workload (won't scale down) cWorkload, err = GetCanaryDeployment(workload) Expect(err).NotTo(HaveOccurred()) canaryRevision = crss[0].Labels[apps.DefaultDeploymentUniqueLabelKey] @@ -741,12 +784,12 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 1 By("Jump to step 1") - RolloutJumpCanaryStep(rollout.Name, 1) - WaitRolloutCanaryStepPaused(rollout.Name, 1) + JumpRolloutStep(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 2)) - // canary workload (won't scale down indeed) + // canary workload (won't scale down) cWorkload, err = GetCanaryDeployment(workload) Expect(err).NotTo(HaveOccurred()) canaryRevision = crss[0].Labels[apps.DefaultDeploymentUniqueLabelKey] @@ -767,7 +810,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 5 By("Jump to step 5") - RolloutJumpCanaryStep(rollout.Name, 5) + JumpRolloutStep(rollout.Name, 5) // wait rollout complete WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) klog.Infof("rollout(%s) completed, and check", namespace) @@ -841,7 +884,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 1 complete By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -853,7 +896,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus := util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 1)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -881,8 +924,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 2 complete By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -894,7 +937,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 2)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -909,8 +952,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { CheckIngressRestored(service.Name) // wait step 3 complete By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -922,7 +965,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -938,8 +981,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 4 complete By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -951,7 +994,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -967,8 +1010,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 3 By("Jump to step 3") - RolloutJumpCanaryStep(rollout.Name, 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + JumpRolloutStep(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -981,7 +1024,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1012,7 +1055,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { rollout = UpdateRollout(rollout) By("update rollout configuration, and wait rollout re-run current step(3)") time.Sleep(time.Second * 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) // batch release batch := &v1beta1.BatchRelease{} Expect(GetObject(rollout.Name, batch)).NotTo(HaveOccurred()) @@ -1028,7 +1071,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1056,8 +1099,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 2 By("Jump to step 2") - RolloutJumpCanaryStep(rollout.Name, 2) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + JumpRolloutStep(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1069,7 +1112,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1085,8 +1128,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 1 By("Jump to step 1") - RolloutJumpCanaryStep(rollout.Name, 1) - WaitRolloutCanaryStepPaused(rollout.Name, 1) + JumpRolloutStep(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision = GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1098,7 +1141,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { extraStatus = util.GetDeploymentExtraStatus(workload) Expect(extraStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) Expect(strategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + By("check workload status & paused success") // check rollout status Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -1126,7 +1169,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 5 By("Jump to step 5") - RolloutJumpCanaryStep(rollout.Name, 5) + JumpRolloutStep(rollout.Name, 5) // wait rollout complete WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhase(v1beta1.RolloutPhaseHealthy)) klog.Infof("rollout(%s) completed, and check", namespace) @@ -1200,7 +1243,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 1 complete By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 1)) @@ -1233,8 +1276,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 2 complete By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 2)) @@ -1255,8 +1298,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 3 complete By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) @@ -1292,7 +1335,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { rollout = UpdateRollout(rollout) By("update rollout configuration, and wait rollout re-run current step(3)") time.Sleep(time.Second * 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) // batch release batch := &v1beta1.BatchRelease{} Expect(GetObject(rollout.Name, batch)).NotTo(HaveOccurred()) @@ -1328,8 +1371,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 4 complete By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) @@ -1350,8 +1393,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 3 By("Jump to step 3") - RolloutJumpCanaryStep(rollout.Name, 3) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + JumpRolloutStep(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) @@ -1384,8 +1427,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 2 By("Jump to step 2") - RolloutJumpCanaryStep(rollout.Name, 2) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + JumpRolloutStep(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) @@ -1406,8 +1449,8 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 1 By("Jump to step 1") - RolloutJumpCanaryStep(rollout.Name, 1) - WaitRolloutCanaryStepPaused(rollout.Name, 1) + JumpRolloutStep(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) @@ -1440,7 +1483,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // Jump to step 5 By("Jump to step 5") - RolloutJumpCanaryStep(rollout.Name, 5) + JumpRolloutStep(rollout.Name, 5) // wait rollout complete WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhase(v1beta1.RolloutPhaseHealthy)) klog.Infof("rollout(%s) completed, and check", namespace) @@ -1470,33 +1513,14 @@ var _ = SIGDescribe("Rollout v1beta1", func() { WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) }) - }) - KruiseDescribe("CloneSet canary rollout with Ingress", func() { - It("CloneSet V1->V2: Percentage, 20%,60% Succeeded", func() { + // step1-> 2-> 3-> 4-> remove 2-4 steps + It("V1->V2: Deployment, Canary, remove 2-4 steps", func() { + finder := util.NewControllerFinder(k8sClient) By("Creating Rollout...") rollout := &v1beta1.Rollout{} - Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_partition_base.yaml", rollout)).ToNot(HaveOccurred()) - rollout.Spec.Strategy.Canary.Steps = []v1beta1.CanaryStep{ - { - TrafficRoutingStrategy: v1beta1.TrafficRoutingStrategy{ - Traffic: utilpointer.String("20%"), - }, - Replicas: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"}, - Pause: v1beta1.RolloutPause{}, - }, - { - Replicas: &intstr.IntOrString{Type: intstr.String, StrVal: "60%"}, - Pause: v1beta1.RolloutPause{}, - }, - } - rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ - APIVersion: "apps.kruise.io/v1alpha1", - Kind: "CloneSet", - Name: "echoserver", - } + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_canary_base.yaml", rollout)).ToNot(HaveOccurred()) CreateObject(rollout) - By("Creating workload and waiting for all pods ready...") // service service := &v1.Service{} @@ -1507,115 +1531,90 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) CreateObject(ingress) // workload - workload := &appsv1alpha1.CloneSet{} - Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) CreateObject(workload) - WaitCloneSetAllPodsReady(workload) - - // check rollout status - Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) - Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) - Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) - Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) - stableRevision := rollout.Status.CanaryStatus.StableRevision - By("check rollout status & paused success") + WaitDeploymentAllPodsReady(workload) + rss, err := finder.GetReplicaSetsForDeployment(workload) + Expect(err).NotTo(HaveOccurred()) + Expect(len(rss)).Should(BeNumerically("==", 1)) // v1 -> v2, start rollout action newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) workload.Spec.Template.Spec.Containers[0].Env = newEnvs - UpdateCloneSet(workload) - By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + UpdateDeployment(workload) + By("Update deployment image from(version1) -> to(version2)") + time.Sleep(time.Second * 3) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) - - // check workload status & paused + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // canary workload + cWorkload, err := GetCanaryDeployment(workload) + Expect(err).NotTo(HaveOccurred()) + crss, err := finder.GetReplicaSetsForDeployment(cWorkload) + Expect(err).NotTo(HaveOccurred()) + Expect(len(crss)).Should(BeNumerically("==", 1)) + Expect(cWorkload.Status.AvailableReplicas).Should(BeNumerically("==", 1)) + // workload Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) - Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 1)) - Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 1)) - Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 0)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) - // check rollout status + // Jump to step 3 + By("Jump to step 3") + JumpRolloutStep(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) + // rollout Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) - Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) - Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) - Expect(rollout.Status.CanaryStatus.CanaryRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) - Expect(rollout.Status.CanaryStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) - canaryRevision := rollout.Status.CanaryStatus.PodTemplateHash - Expect(rollout.Status.CanaryStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) - Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) - // check stable, canary service & ingress - CheckIngressConfigured(&trafficContext{ - stableRevision: stableRevision, - canaryRevision: canaryRevision, - service: service, - }, &rollout.Spec.Strategy.Canary.Steps[0]) - - // resume rollout canary - ResumeRolloutCanary(rollout.Name) - By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) - - // check stable, canary service & ingress - CheckIngressRestored(service.Name) - // cloneset + Expect(rollout.Status.CanaryStatus.NextStepIndex).Should(BeNumerically("==", 4)) + // canary workload + cWorkload, err = GetCanaryDeployment(workload) + Expect(err).NotTo(HaveOccurred()) + canaryRevision := crss[0].Labels[apps.DefaultDeploymentUniqueLabelKey] + Expect(cWorkload.Status.AvailableReplicas).Should(BeNumerically("==", 3)) + // workload Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) - Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) - Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) - Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) - - // resume rollout - ResumeRolloutCanary(rollout.Name) - WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) - WaitCloneSetAllPodsReady(workload) - By("rollout completed, and check") + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 0)) + Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) + // canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.Canary.Steps[2].Traffic))) - // check if network configuration has restored - CheckIngressRestored(service.Name) - // cloneset - Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) - Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) - Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) - Expect(workload.Spec.UpdateStrategy.Partition.IntVal).Should(BeNumerically("==", 0)) - Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) - Expect(workload.Status.CurrentRevision).Should(ContainSubstring(canaryRevision)) - Expect(workload.Status.UpdateRevision).Should(ContainSubstring(canaryRevision)) - for _, env := range workload.Spec.Template.Spec.Containers[0].Env { - if env.Name == "NODE_NAME" { - Expect(env.Value).Should(Equal("version2")) - } + // remove step 2 3 4 + By("Remove step 2 3 4") + Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // update rollout step configuration + rollout.Spec.Strategy.Canary.Steps = []v1beta1.CanaryStep{ + { + TrafficRoutingStrategy: v1beta1.TrafficRoutingStrategy{ + Traffic: utilpointer.String("20%"), + }, + Replicas: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"}, + Pause: v1beta1.RolloutPause{}, + }, } - time.Sleep(time.Second * 3) - - // check progressing succeed - Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) - Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) - cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) - Expect(cond.Reason).Should(Equal(v1beta1.ProgressingReasonCompleted)) - Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) - cond = getRolloutCondition(rollout.Status, v1beta1.RolloutConditionSucceeded) - Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionTrue))) - WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) - //Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(canaryRevision)) - - // scale up replicas 5 -> 6 - workload.Spec.Replicas = utilpointer.Int32(6) - UpdateCloneSet(workload) - By("Update cloneSet replicas from(5) -> to(6)") - time.Sleep(time.Second * 2) - - Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) - Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) - WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) + // now modifying the amount of steps is forbidden in webhook, we expect an error + _ = UpdateRolloutFail(rollout) }) + }) - It("V1->V2: Percentage, 20%, and rollback(v1)", func() { + KruiseDescribe("Bluegreen Release - Deployment - Ingress", func() { + It("bluegreen rolling with traffic case", func() { By("Creating Rollout...") rollout := &v1beta1.Rollout{} - Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_partition_base.yaml", rollout)).ToNot(HaveOccurred()) + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ - APIVersion: "apps.kruise.io/v1alpha1", - Kind: "CloneSet", + APIVersion: "apps/v1", + Kind: "Deployment", Name: "echoserver", } CreateObject(rollout) @@ -1630,40 +1629,2443 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) CreateObject(ingress) // workload - workload := &appsv1alpha1.CloneSet{} - Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) CreateObject(workload) - WaitCloneSetAllPodsReady(workload) + WaitDeploymentAllPodsReady(workload) - // check rollout status // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) - Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) - stableRevision := rollout.Status.CanaryStatus.StableRevision By("check rollout status & paused success") // v1 -> v2, start rollout action newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) - workload.Spec.Template.Spec.Containers[0].Image = "echoserver:failed" workload.Spec.Template.Spec.Containers[0].Env = newEnvs - UpdateCloneSet(workload) - By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ // wait step 1 complete - time.Sleep(time.Second * 20) + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) - Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 1)) - Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 0)) - Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) - By("check cloneSet status & paused success") + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + setting, _ := control.GetOriginalSetting(workload) + Expect(setting.MinReadySeconds).Should(BeNumerically("==", int32(0))) + Expect(*setting.ProgressDeadlineSeconds).Should(BeNumerically("==", int32(600))) + Expect(reflect.DeepEqual(setting.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(setting.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + By("check workload status & paused success") // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) - Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(util.ComputeHash(&workload.Spec.Template, nil))) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(GetCanaryRSRevision(workload))) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + // when traffic is 0%, ingress canary won't create and annotation won't be set (for ingress-nginx) + // cIngress := &netv1.Ingress{} + // Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + // Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + // Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[0].Traffic))) + + // ------ step 2: replicas: 100%, traffic: 0% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(2)") + WaitRolloutStepPaused(rollout.Name, 2) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // ------ step 3: replicas: 100%, traffic: 50% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(3)") + WaitRolloutStepPaused(rollout.Name, 3) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[2].Traffic))) + + // ------ step 4: replicas: 100%, traffic: 100% ------ + // resume rollout + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(4)") + WaitRolloutStepPaused(rollout.Name, 4) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + // canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[3].Traffic))) + + // ------ Final approval ------ + // resume rollout + ResumeRollout(rollout.Name) + By("resume rollout, and wait to Finalise") + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitDeploymentAllPodsReady(workload) + By("rollout completed, and check") + + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress = &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService = &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + time.Sleep(time.Second * 3) + + // check progressing succeed + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(cond.Reason).Should(Equal(v1beta1.ProgressingReasonCompleted)) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + cond = getRolloutCondition(rollout.Status, v1beta1.RolloutConditionSucceeded) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionTrue))) + WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) + + // scale up replicas 5 -> 6 + workload.Spec.Replicas = utilpointer.Int32(6) + UpdateDeployment(workload) + By("Update workload replicas from(5) -> to(6)") + time.Sleep(time.Second * 2) + + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) + }) + + It("bluegreen rollback case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + setting, _ := control.GetOriginalSetting(workload) + Expect(setting.MinReadySeconds).Should(BeNumerically("==", int32(0))) + Expect(*setting.ProgressDeadlineSeconds).Should(BeNumerically("==", int32(600))) + Expect(reflect.DeepEqual(setting.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(setting.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + By("check workload status & paused success") + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(util.ComputeHash(&workload.Spec.Template, nil))) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(GetCanaryRSRevision(workload))) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + // when traffic is 0%, ingress canary won't create and annotation won't be set (for ingress-nginx) + // cIngress := &netv1.Ingress{} + // Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + // Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + // Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[0].Traffic))) + + // ------ step 2: replicas: 100%, traffic: 0% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(2)") + WaitRolloutStepPaused(rollout.Name, 2) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // ------ step 3: replicas: 100%, traffic: 50% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(3)") + WaitRolloutStepPaused(rollout.Name, 3) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[2].Traffic))) + + // ------ step 4: replicas: 100%, traffic: 100% ------ + // resume rollout + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(4)") + WaitRolloutStepPaused(rollout.Name, 4) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + // canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[3].Traffic))) + + // ------ Rollback: traffic switch ------ + By("Jump to step 3") + JumpRolloutStep(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[2].Traffic))) + + By("Jump to step 2") + JumpRolloutStep(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 10)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // canary ingress + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // ------ Rollback: PaaS rollback ------ + By("update workload env NODE_NAME from(version2) -> to(version1)") + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version1"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitDeploymentAllPodsReady(workload) + + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(string(cond.Reason)).Should(Equal(string(v1beta1.CanaryStepStateCompleted))) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + // canary ingress and canary service should be deleted + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).To(HaveOccurred()) + cService = &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).To(HaveOccurred()) + + // check service update + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + }) + + It("bluegreen deployment continuous rolling case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + setting, _ := control.GetOriginalSetting(workload) + Expect(setting.MinReadySeconds).Should(BeNumerically("==", int32(0))) + Expect(*setting.ProgressDeadlineSeconds).Should(BeNumerically("==", int32(600))) + Expect(reflect.DeepEqual(setting.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(setting.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + By("check workload status & paused success") + // ----- Continuous Release ------ + updatedRevision := rollout.Status.BlueGreenStatus.UpdatedRevision + By(updatedRevision) + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version3"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("update workload env NODE_NAME from(version2) -> to(version3)") + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision = GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + time.Sleep(time.Second * 1) // ensure the Deployment controller notice the update + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 0)) // no version3 pods created, since we don't support continuous release yet + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + setting, _ = control.GetOriginalSetting(workload) + Expect(setting.MinReadySeconds).Should(BeNumerically("==", int32(0))) + Expect(*setting.ProgressDeadlineSeconds).Should(BeNumerically("==", int32(600))) + Expect(reflect.DeepEqual(setting.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(setting.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + + Expect(workload.Spec.Paused).Should(BeTrue()) // paused in the webhook + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + + // it's ok to patch the Deployment to version2 back, and even release remaining steps then + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("update workload env NODE_NAME from(version3) -> to(version2)") + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision = GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + time.Sleep(time.Second * 1) // ensure the Deployment controller notice the update + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + setting, _ = control.GetOriginalSetting(workload) + Expect(setting.MinReadySeconds).Should(BeNumerically("==", int32(0))) + Expect(*setting.ProgressDeadlineSeconds).Should(BeNumerically("==", int32(600))) + Expect(reflect.DeepEqual(setting.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(setting.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + + Expect(workload.Spec.Paused).Should(BeTrue()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "50%"})).Should(BeTrue()) + + // of course user can rollback to version1 directly + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version1"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("rollback: update workload env NODE_NAME from(version2) -> to(version1)") + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitDeploymentAllPodsReady(workload) + + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(len(rollout.GetAnnotations()[v1beta1.OriginalDeploymentStrategyAnnotation])).Should(BeNumerically("==", 0)) // the annotation should be removed + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(string(cond.Reason)).Should(Equal(string(v1beta1.CanaryStepStateCompleted))) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + }) + + It("bluegreen scale up and down", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + // ------ 50% maxSurge, scale up: from 5 to 6 ------ + workload.Spec.Replicas = utilpointer.Int32(6) + UpdateDeployment(workload) + By("scale up: from 5 to 6") + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 9)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 9)) + + // ------ scale up: from 6 to 7 ------ + workload.Spec.Replicas = utilpointer.Int32(7) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 11)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 11)) + + // ------ scale up: from 7 to 8 ------ + workload.Spec.Replicas = utilpointer.Int32(8) + UpdateDeployment(workload) + By("scale up: from 7 to 8") + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 12)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 12)) + + // ------ scale down: from 8 to 4 ------ + workload.Spec.Replicas = utilpointer.Int32(4) + UpdateDeployment(workload) + By("scale down: from 8 to 4") + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 2)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 2)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 6)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 6)) + + // ------ step 2: replicas: 100%, traffic: 0% ------ + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(2)") + WaitRolloutStepPaused(rollout.Name, 2) + + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(v1beta1.MaxReadySeconds))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(v1beta1.MaxProgressSeconds))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.String, StrVal: "100%"})).Should(BeTrue()) + + // rollout + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) + + // ------ scale up: from 4 to 7 ------ + workload.Spec.Replicas = utilpointer.Int32(7) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 7)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 7)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 7)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 14)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 14)) + + // ------ scale up: from 7 to 8 ------ + workload.Spec.Replicas = utilpointer.Int32(8) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 8)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 8)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 16)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 16)) + + // ------ scale down: from 8 to 4 ------ + workload.Spec.Replicas = utilpointer.Int32(4) + UpdateDeployment(workload) + time.Sleep(time.Second * 3) + WaitDeploymentBlueGreenReplicas(workload) + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) + // check workload status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + }) + + It("bluegreen delete rollout case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + + By("delete rollout and check deployment") + k8sClient.Delete(context.TODO(), rollout) + WaitRolloutNotFound(rollout.Name) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check spec + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress := &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService := &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + WaitDeploymentAllPodsReady(workload) + // status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + }) + + It("bluegreen disable rollout case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + + // By("before disable rollout") + By("disable rollout and check deployment") + rollout.Spec.Disabled = true + UpdateRollout(rollout) + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseDisabled) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check spec + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600))) + + limit := 0 + for !reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0}) { + By(fmt.Sprintf("workload.Spec.Strategy.RollingUpdate.MaxUnavailable: %v, workload.Spec.Strategy.RollingUpdate.MaxSurge: %v", workload.Spec.Strategy.RollingUpdate.MaxUnavailable, workload.Spec.Strategy.RollingUpdate.MaxSurge)) + time.Sleep(time.Second * 500) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + if limit > 10 { + Expect(false).To(BeTrue()) + } + limit++ + time.Sleep(time.Second) + } + + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress := &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService := &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + WaitDeploymentAllPodsReady(workload) + // status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + }) + }) + + KruiseDescribe("Bluegreen Release - Deployment - HPA disable", func() { + It("bluegreen disable hpa test case - autoscaling/v1 for v1.19", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + By("Creating v1 HPA...") + hpa := &scalingV1.HorizontalPodAutoscaler{} + Expect(ReadYamlToObject("./test_data/rollout/hpa_v1.yaml", hpa)).ToNot(HaveOccurred()) + CreateObject(hpa) + time.Sleep(time.Second * 3) + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + + // check hpa + HPADisableSuffix := "-DisableByRollout" + Expect(GetObject(hpa.Name, hpa)).NotTo(HaveOccurred()) + Expect(hpa.Spec.ScaleTargetRef.Name).Should(Equal(workload.Name + HPADisableSuffix)) + + By("disable rollout and check deployment") + rollout.Spec.Disabled = true + UpdateRollout(rollout) + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseDisabled) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check spec + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress := &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService := &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + WaitDeploymentAllPodsReady(workload) + // status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + + // check hpa + Expect(GetObject(hpa.Name, hpa)).NotTo(HaveOccurred()) + Expect(hpa.Spec.ScaleTargetRef.Name).Should(Equal(workload.Name)) + }) + + It("bluegreen disable hpa test case - autoscaling/v2 for v1.23", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &apps.Deployment{} + Expect(ReadYamlToObject("./test_data/rollout/deployment.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitDeploymentAllPodsReady(workload) + + By("Creating v2 HPA...") + hpa := &scalingV2.HorizontalPodAutoscaler{} + Expect(ReadYamlToObject("./test_data/rollout/hpa_v2.yaml", hpa)).ToNot(HaveOccurred()) + CreateObject(hpa) + time.Sleep(time.Second * 3) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateDeployment(workload) + By("Update workload env NODE_NAME from(version1) -> to(version2)") + // ------ step 1: replicas: 50%, traffic: 0% ------ + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + stableRevision := GetStableRSRevision(workload) + By(stableRevision) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.CanaryStatus).Should(BeNil()) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UnavailableReplicas).Should(BeNumerically("==", 8)) + Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // check rollout status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + + // check hpa + HPADisableSuffix := "-DisableByRollout" + Expect(GetObject(hpa.Name, hpa)).NotTo(HaveOccurred()) + Expect(hpa.Spec.ScaleTargetRef.Name).Should(Equal(workload.Name + HPADisableSuffix)) + + By("delete rollout and check deployment") + k8sClient.Delete(context.TODO(), rollout) + WaitRolloutNotFound(rollout.Name) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check spec + Expect(workload.Spec.Strategy.Type).Should(Equal(apps.RollingUpdateDeploymentStrategyType)) + Expect(workload.Spec.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(*workload.Spec.ProgressDeadlineSeconds).Should(Equal(int32(600))) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.Strategy.RollingUpdate.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress := &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService := &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + WaitDeploymentAllPodsReady(workload) + // status + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + // check hpa + Expect(GetObject(hpa.Name, hpa)).NotTo(HaveOccurred()) + Expect(hpa.Spec.ScaleTargetRef.Name).Should(Equal(workload.Name)) + }) + }) + + // test for cloneset + KruiseDescribe("Bluegreen Release - Cloneset - Ingress", func() { + It("bluegreen rolling with traffic case", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.BlueGreenStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // wait step 2 complete + By("wait step(2) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // wait step 3 complete + By("wait step(3) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", -1)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[2].Traffic))) + + // ------ Final approval ------ + // resume rollout + ResumeRollout(rollout.Name) + By("resume rollout, and wait to Finalise") + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitCloneSetAllPodsReady(workload) + By("rollout completed, and check") + + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress = &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService = &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + // workload + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + time.Sleep(time.Second * 3) + + // check progressing succeed + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(cond.Reason).Should(Equal(v1beta1.ProgressingReasonCompleted)) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + cond = getRolloutCondition(rollout.Status, v1beta1.RolloutConditionSucceeded) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionTrue))) + WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) + }) + + It("bluegreen rollback case for cloneset", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.BlueGreenStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // wait step 2 complete + By("wait step(2) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // wait step 3 complete + By("wait step(3) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", -1)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[2].Traffic))) + + // ------ Rollback: traffic switch ------ + By("Jump to step 2") + JumpRolloutStep(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // ------ Rollback: traffic switch ------ + By("Jump to step 1") + JumpRolloutStep(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress = &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[0].Traffic))) + + // ------ Rollback: PaaS rollback ------ + By("update workload env NODE_NAME from(version2) -> to(version1)") + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version1"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitCloneSetAllPodsReady(workload) + + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(string(cond.Reason)).Should(Equal(string(v1beta1.CanaryStepStateCompleted))) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + CheckIngressRestored(service.Name) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + }) + + It("bluegreen continuous rolling case for cloneset", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.BlueGreenStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // wait step 2 complete + By("wait step(2) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + revision2 := workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:] + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check if network configuration has restored + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // ----- Continuous Release ------ + updatedRevision := rollout.Status.BlueGreenStatus.UpdatedRevision + By(updatedRevision) + By("update workload env NODE_NAME from(version2) -> to(version3)") + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version3"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + time.Sleep(time.Second * 1) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 0)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 0)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) // unlike Deployment, cloneSet isn't paused + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + /* + note: rollout.Status.BlueGreenStatus.UpdatedRevision won't update at all, since we disallow + continuous release for bluegreen release (it is designed to trigger a fatal error before status update) + however the workload.Status.UpdateRevision will always be update since it is calculated + directly from the Cloneset + */ + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(revision2)) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(revision2)) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + + // it's ok to patch the CloneSet to version2 back, and even release remaining steps then + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("update workload env NODE_NAME from(version3) -> to(version2)") + time.Sleep(time.Second * 1) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) // unlike Deployment, cloneSet isn't paused + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(revision2)) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(revision2)) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + + // of course user can rollback to version1 directly + newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version1"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("rollback: update workload env NODE_NAME from(version2) -> to(version1)") + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitCloneSetAllPodsReady(workload) + + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(string(cond.Reason)).Should(Equal(string(v1beta1.CanaryStepStateCompleted))) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + Expect(len(rollout.GetAnnotations()[v1beta1.OriginalDeploymentStrategyAnnotation])).Should(BeNumerically("==", 0)) // the annotation should be removed + CheckIngressRestored(service.Name) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + }) + + // cloneset now only support single step, keep this case for future + // It("bluegreen scale up and down for cloneset", func() { + // By("Creating Rollout...") + // rollout := &v1beta1.Rollout{} + // Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + // rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + // APIVersion: "apps.kruise.io/v1alpha1", + // Kind: "CloneSet", + // Name: "echoserver", + // } + // CreateObject(rollout) + + // By("Creating workload and waiting for all pods ready...") + // // service + // service := &v1.Service{} + // Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + // CreateObject(service) + // // ingress + // ingress := &netv1.Ingress{} + // Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + // CreateObject(ingress) + // // workload + // workload := &appsv1alpha1.CloneSet{} + // Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + // CreateObject(workload) + // WaitCloneSetAllPodsReady(workload) + + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + // Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + // stableRevision := rollout.Status.BlueGreenStatus.StableRevision + // By("check rollout status & paused success") + + // // v1 -> v2, start rollout action + // newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + // workload.Spec.Template.Spec.Containers[0].Env = newEnvs + // UpdateCloneSet(workload) + // By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + // time.Sleep(time.Second * 3) + + // // wait step 1 complete + // By("wait step(1) pause") + // WaitRolloutStepPaused(rollout.Name, 1) + // // check workload status & paused + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + // Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + // Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + // Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + // By("check cloneSet status & paused success") + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + // Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + // Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + // canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + // Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + // Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // // check stable, canary service & ingress + // // stable service + // Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + // Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + // //canary service + // cService := &v1.Service{} + // Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + // Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + // // ------ 50% maxSurge, scale up: from 5 to 6 ------ + // By("scale up: from 5 to 6") + // workload.Spec.Replicas = utilpointer.Int32(6) + // UpdateCloneSet(workload) + // time.Sleep(time.Second * 3) + // WaitClonesetBlueGreenReplicas(workload) + + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + // Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 6)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 6)) + // // check workload status + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 6)) + // Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 12)) + + // // ------ scale up: from 6 to 7 ------ + // By("scale up: from 6 to 7") + // workload.Spec.Replicas = utilpointer.Int32(7) + // UpdateCloneSet(workload) + // time.Sleep(time.Second * 3) + // WaitClonesetBlueGreenReplicas(workload) + + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + // Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 7)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 7)) + // // check workload status + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 7)) + // Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 14)) + + // // ------ scale up: from 7 to 8 ------ + // By("scale up: from 7 to 8") + // workload.Spec.Replicas = utilpointer.Int32(8) + // UpdateCloneSet(workload) + // time.Sleep(time.Second * 3) + // WaitClonesetBlueGreenReplicas(workload) + + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + // Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 8)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 8)) + // // check workload status + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 8)) + // Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 16)) + + // // ------ scale down: from 8 to 4 ------ + // By("scale down: from 8 to 4") + // workload.Spec.Replicas = utilpointer.Int32(4) + // UpdateCloneSet(workload) + // time.Sleep(time.Second * 3) + // WaitClonesetBlueGreenReplicas(workload) + + // // check rollout status + // Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + // Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + // Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReplicas).Should(BeNumerically("==", 4)) + // Expect(rollout.Status.BlueGreenStatus.UpdatedReadyReplicas).Should(BeNumerically("==", 4)) + // // check workload status + // Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 4)) + // Expect(workload.Status.ReadyReplicas).Should(BeNumerically("==", 8)) + + // }) + + It("bluegreen delete rollout case for cloneset", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.BlueGreenStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // wait step 2 complete + By("wait step(2) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + // ------ delete rollout ------ + By("delete rollout and check deployment") + k8sClient.Delete(context.TODO(), rollout) + WaitRolloutNotFound(rollout.Name) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check workload annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check workload spec + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(reflect.DeepEqual(workload.Spec.UpdateStrategy.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.UpdateStrategy.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress = &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService = &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + time.Sleep(time.Second * 3) + + // check progressing succeed + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).To(HaveOccurred()) + }) + + It("bluegreen disable rollout case for cloneset", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.BlueGreenStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + time.Sleep(time.Second * 3) + + // wait step 1 complete + By("wait step(1) pause") + WaitRolloutStepPaused(rollout.Name, 1) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.BlueGreenStatus.PodTemplateHash + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + // stable service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(stableRevision)) + //canary service + cService := &v1.Service{} + Expect(GetObject(service.Name+"-canary", cService)).NotTo(HaveOccurred()) + Expect(cService.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal(canaryRevision)) + + // wait step 2 complete + By("wait step(2) pause") + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.Replicas).Should(BeNumerically("==", 10)) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.BlueGreenStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.BlueGreenStatus.UpdatedRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.BlueGreenStatus.CurrentStepIndex).Should(BeNumerically("==", 2)) + Expect(rollout.Status.BlueGreenStatus.NextStepIndex).Should(BeNumerically("==", 3)) + Expect(rollout.Status.BlueGreenStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // if network configuration has restored + cIngress := &netv1.Ingress{} + Expect(GetObject(service.Name+"-canary", cIngress)).NotTo(HaveOccurred()) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("true")) + Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-weight", nginxIngressAnnotationDefaultPrefix)]).Should(Equal(removePercentageSign(*rollout.Spec.Strategy.BlueGreen.Steps[1].Traffic))) + + By("disable rollout and check deployment") + rollout.Spec.Disabled = true + UpdateRollout(rollout) + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseDisabled) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + // check annotation + settingStr := workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check spec + // check workload annotation + settingStr = workload.Annotations[v1beta1.OriginalDeploymentStrategyAnnotation] + Expect(len(settingStr)).Should(BeNumerically("==", 0)) + // check workload spec + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + Expect(workload.Spec.MinReadySeconds).Should(Equal(int32(0))) + Expect(reflect.DeepEqual(workload.Spec.UpdateStrategy.MaxUnavailable, &intstr.IntOrString{Type: intstr.Int, IntVal: 0})).Should(BeTrue()) + Expect(reflect.DeepEqual(workload.Spec.UpdateStrategy.MaxSurge, &intstr.IntOrString{Type: intstr.Int, IntVal: 1})).Should(BeTrue()) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + + // check service & ingress & deployment + // ingress + Expect(GetObject(ingress.Name, ingress)).NotTo(HaveOccurred()) + cIngress = &netv1.Ingress{} + Expect(GetObject(fmt.Sprintf("%s-canary", ingress.Name), cIngress)).To(HaveOccurred()) + // service + Expect(GetObject(service.Name, service)).NotTo(HaveOccurred()) + Expect(service.Spec.Selector[apps.DefaultDeploymentUniqueLabelKey]).Should(Equal("")) + cService = &v1.Service{} + Expect(GetObject(fmt.Sprintf("%s-canary", service.Name), cService)).To(HaveOccurred()) + time.Sleep(time.Second * 3) + }) + }) + + KruiseDescribe("CloneSet canary rollout with Ingress", func() { + It("CloneSet V1->V2: Percentage, 20%,60% Succeeded", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_partition_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.Strategy.Canary.Steps = []v1beta1.CanaryStep{ + { + TrafficRoutingStrategy: v1beta1.TrafficRoutingStrategy{ + Traffic: utilpointer.String("20%"), + }, + Replicas: &intstr.IntOrString{Type: intstr.String, StrVal: "20%"}, + Pause: v1beta1.RolloutPause{}, + }, + { + Replicas: &intstr.IntOrString{Type: intstr.String, StrVal: "60%"}, + Pause: v1beta1.RolloutPause{}, + }, + } + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.CanaryStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + // wait step 1 complete + WaitRolloutStepPaused(rollout.Name, 1) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 1)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 1)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) + Expect(rollout.Status.CanaryStatus.CanaryRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + Expect(rollout.Status.CanaryStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) + canaryRevision := rollout.Status.CanaryStatus.PodTemplateHash + Expect(rollout.Status.CanaryStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) + Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) + // check stable, canary service & ingress + CheckIngressConfigured(&trafficContext{ + stableRevision: stableRevision, + canaryRevision: canaryRevision, + service: service, + }, &rollout.Spec.Strategy.Canary.Steps[0]) + + // resume rollout canary + ResumeRollout(rollout.Name) + By("resume rollout, and wait next step(2)") + WaitRolloutStepPaused(rollout.Name, 2) + + // check stable, canary service & ingress + CheckIngressRestored(service.Name) + // cloneset + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 3)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + + // resume rollout + ResumeRollout(rollout.Name) + WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) + WaitCloneSetAllPodsReady(workload) + By("rollout completed, and check") + + // check if network configuration has restored + CheckIngressRestored(service.Name) + // cloneset + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 5)) + Expect(workload.Spec.UpdateStrategy.Partition.IntVal).Should(BeNumerically("==", 0)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + Expect(workload.Status.CurrentRevision).Should(ContainSubstring(canaryRevision)) + Expect(workload.Status.UpdateRevision).Should(ContainSubstring(canaryRevision)) + for _, env := range workload.Spec.Template.Spec.Containers[0].Env { + if env.Name == "NODE_NAME" { + Expect(env.Value).Should(Equal("version2")) + } + } + time.Sleep(time.Second * 3) + + // check progressing succeed + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + cond := getRolloutCondition(rollout.Status, v1beta1.RolloutConditionProgressing) + Expect(cond.Reason).Should(Equal(v1beta1.ProgressingReasonCompleted)) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionFalse))) + cond = getRolloutCondition(rollout.Status, v1beta1.RolloutConditionSucceeded) + Expect(string(cond.Status)).Should(Equal(string(metav1.ConditionTrue))) + WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) + //Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(canaryRevision)) + + // scale up replicas 5 -> 6 + workload.Spec.Replicas = utilpointer.Int32(6) + UpdateCloneSet(workload) + By("Update cloneSet replicas from(5) -> to(6)") + time.Sleep(time.Second * 2) + + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + WaitRolloutWorkloadGeneration(rollout.Name, workload.Generation) + }) + + It("V1->V2: Percentage, 20%, and rollback(v1)", func() { + By("Creating Rollout...") + rollout := &v1beta1.Rollout{} + Expect(ReadYamlToObject("./test_data/rollout/rollout_v1beta1_partition_base.yaml", rollout)).ToNot(HaveOccurred()) + rollout.Spec.WorkloadRef = v1beta1.ObjectRef{ + APIVersion: "apps.kruise.io/v1alpha1", + Kind: "CloneSet", + Name: "echoserver", + } + CreateObject(rollout) + + By("Creating workload and waiting for all pods ready...") + // service + service := &v1.Service{} + Expect(ReadYamlToObject("./test_data/rollout/service.yaml", service)).ToNot(HaveOccurred()) + CreateObject(service) + // ingress + ingress := &netv1.Ingress{} + Expect(ReadYamlToObject("./test_data/rollout/nginx_ingress.yaml", ingress)).ToNot(HaveOccurred()) + CreateObject(ingress) + // workload + workload := &appsv1alpha1.CloneSet{} + Expect(ReadYamlToObject("./test_data/rollout/cloneset.yaml", workload)).ToNot(HaveOccurred()) + CreateObject(workload) + WaitCloneSetAllPodsReady(workload) + + // check rollout status + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseHealthy)) + Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(workload.Status.CurrentRevision[strings.LastIndex(workload.Status.CurrentRevision, "-")+1:])) + stableRevision := rollout.Status.CanaryStatus.StableRevision + By("check rollout status & paused success") + + // v1 -> v2, start rollout action + newEnvs := mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version2"}) + workload.Spec.Template.Spec.Containers[0].Image = "echoserver:failed" + workload.Spec.Template.Spec.Containers[0].Env = newEnvs + UpdateCloneSet(workload) + By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") + // wait step 1 complete + time.Sleep(time.Second * 20) + + // check workload status & paused + Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) + Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 1)) + Expect(workload.Status.UpdatedReadyReplicas).Should(BeNumerically("==", 0)) + Expect(workload.Spec.UpdateStrategy.Paused).Should(BeFalse()) + By("check cloneSet status & paused success") + + // check rollout status + Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) + Expect(rollout.Status.Phase).Should(Equal(v1beta1.RolloutPhaseProgressing)) + Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) Expect(rollout.Status.CanaryStatus.CanaryRevision).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) Expect(rollout.Status.CanaryStatus.PodTemplateHash).Should(Equal(workload.Status.UpdateRevision[strings.LastIndex(workload.Status.UpdateRevision, "-")+1:])) Expect(rollout.Status.CanaryStatus.CurrentStepIndex).Should(BeNumerically("==", 1)) @@ -1671,7 +4073,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // rollback -> v1 @@ -1753,7 +4155,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -1774,7 +4176,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // v1 -> v2 -> v3, continuous release @@ -1785,7 +4187,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { time.Sleep(time.Second * 10) // wait step 0 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -1805,7 +4207,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { service: service, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("check rollout canary status success, resume rollout, and wait rollout canary complete") WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitCloneSetAllPodsReady(workload) @@ -1873,7 +4275,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -1895,17 +4297,17 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // v1 -> v2 -> v1, continuous release By("Update cloneSet env NODE_NAME from(version2) -> to(version1)") // resume rollout canary - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) newEnvs = mergeEnvVar(workload.Spec.Template.Spec.Containers[0].Env, v1.EnvVar{Name: "NODE_NAME", Value: "version1"}) workload.Spec.Template.Spec.Containers[0].Env = newEnvs UpdateCloneSet(workload) // make sure CloneSet is rolling back in batch By("Wait step 1 paused") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) By("Wait step 2 paused") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("check rollout canary status success, resume rollout, and wait rollout canary complete") WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitCloneSetAllPodsReady(workload) @@ -1981,7 +4383,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -2001,7 +4403,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitCloneSetAllPodsReady(workload) By("rollout completed, and check") @@ -2091,7 +4493,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateDeployment(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) By(stableRevision) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) @@ -2123,9 +4525,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { service: service, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check stable, canary service & ingress CheckIngressRestored(service.Name) @@ -2138,7 +4540,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(strategy.Paused).Should(BeFalse()) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitDeploymentAllPodsReady(workload) By("rollout completed, and check") @@ -2148,7 +4550,6 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // cloneset Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 5)) - Expect(workload.Status.AvailableReplicas).Should(BeNumerically("==", 5)) for _, env := range workload.Spec.Template.Spec.Containers[0].Env { if env.Name == "NODE_NAME" { Expect(env.Value).Should(Equal("version2")) @@ -2226,7 +4627,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateDeployment(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) @@ -2241,9 +4642,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { By("check workload status & paused success") // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) @@ -2257,7 +4658,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { workload.Spec.Template.Spec.Containers[0].Env = newEnvs UpdateDeployment(workload) - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision = workload.Labels[v1beta1.DeploymentStableRevisionLabel] Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) @@ -2272,9 +4673,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { By("check workload status & paused success") // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) @@ -2332,7 +4733,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateDeployment(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) @@ -2347,9 +4748,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { By("check workload status & paused success") // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Status.UpdatedReplicas).Should(BeNumerically("==", 3)) @@ -2415,7 +4816,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateDeployment(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) @@ -2486,7 +4887,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateDeployment(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) stableRevision := GetStableRSRevision(workload) Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.StableRevision).Should(Equal(stableRevision)) @@ -2519,9 +4920,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { }, 5*time.Minute, time.Second).Should(BeTrue()) By("rolling deployment to be completed") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) WaitDeploymentAllPodsReady(workload) }) }) @@ -2566,17 +4967,17 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 1 complete By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 1) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("check rollout canary status success, resume rollout, and wait rollout canary complete") WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitAdvancedStatefulSetPodsReady(workload) @@ -2652,7 +5053,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // wait step 2 complete By("wait step(2) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 1) @@ -2674,7 +5075,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // resume rollout canary By("check rollout canary status success, resume rollout, and wait rollout canary complete") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitCloneSetAllPodsReady(workload) @@ -2737,17 +5138,17 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 1) By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "3", 1) @@ -2762,26 +5163,26 @@ var _ = SIGDescribe("Rollout v1beta1", func() { // make sure disable quickly rollback policy By("Wait step (1) paused") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "3", 1) By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "4", 1) By("Wait rollout complete") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) @@ -2840,17 +5241,17 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 1) By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "3", 1) By("Only update rollout id = '2', and check batch label again") @@ -2858,19 +5259,19 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("wait step(3) pause again") - WaitRolloutCanaryStepPaused(rollout.Name, 3) + WaitRolloutStepPaused(rollout.Name, 3) time.Sleep(30 * time.Second) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "3", 1) By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "4", 1) By("Wait rollout complete") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) @@ -2929,7 +5330,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) By("Only update rollout id = '2', and check batch label again") @@ -2940,26 +5341,26 @@ var _ = SIGDescribe("Rollout v1beta1", func() { time.Sleep(30 * time.Second) By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "3", 1) By("wait step(4) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 4) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 4) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "4", 1) By("Wait rollout complete") - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "1", 1) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "2", "2", 1) @@ -3028,7 +5429,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { workload.Spec.Template.Spec.Containers[0].Env = newEnvs UpdateNativeStatefulSet(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3054,9 +5455,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { selectorKey: apps.ControllerRevisionHashLabelKey, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check stable, canary service & ingress CheckIngressRestored(service.Name) @@ -3067,7 +5468,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(*workload.Spec.UpdateStrategy.RollingUpdate.Partition).Should(BeNumerically("==", *workload.Spec.Replicas-3)) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitNativeStatefulSetPodsReady(workload) By("rollout completed, and check") @@ -3152,7 +5553,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateNativeStatefulSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3171,7 +5572,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // v1 -> v2 -> v3, continuous release @@ -3182,7 +5583,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { time.Sleep(time.Second * 10) // wait step 0 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3202,7 +5603,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { selectorKey: apps.ControllerRevisionHashLabelKey, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("check rollout canary status success, resume rollout, and wait rollout canary complete") WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitNativeStatefulSetPodsReady(workload) @@ -3297,7 +5698,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // rollback -> v1 @@ -3386,7 +5787,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateNativeStatefulSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3404,7 +5805,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitNativeStatefulSetPodsReady(workload) By("rollout completed, and check") @@ -3496,7 +5897,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { workload.Spec.Template.Spec.Containers[0].Env = newEnvs UpdateAdvancedStatefulSet(workload) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3522,9 +5923,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { selectorKey: apps.ControllerRevisionHashLabelKey, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("resume rollout, and wait next step(2)") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check stable, canary service & ingress CheckIngressRestored(service.Name) @@ -3536,7 +5937,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(*workload.Spec.UpdateStrategy.RollingUpdate.Partition).Should(BeNumerically("==", *workload.Spec.Replicas-3)) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitAdvancedStatefulSetPodsReady(workload) By("rollout completed, and check") @@ -3621,7 +6022,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateAdvancedStatefulSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3640,7 +6041,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // v1 -> v2 -> v3, continuous release @@ -3651,7 +6052,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { time.Sleep(time.Second * 10) // wait step 0 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3671,7 +6072,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { selectorKey: apps.ControllerRevisionHashLabelKey, }, &rollout.Spec.Strategy.Canary.Steps[0]) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("check rollout canary status success, resume rollout, and wait rollout canary complete") WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitAdvancedStatefulSetPodsReady(workload) @@ -3766,7 +6167,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) time.Sleep(time.Second * 15) // rollback -> v1 @@ -3855,7 +6256,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateAdvancedStatefulSet(workload) By("Update cloneSet env NODE_NAME from(version1) -> to(version2)") // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check workload status & paused Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) @@ -3873,7 +6274,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(rollout.Status.CanaryStatus.RolloutHash).Should(Equal(rollout.Annotations[util.RolloutHashAnnotation])) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) WaitAdvancedStatefulSetPodsReady(workload) By("rollout completed, and check") @@ -3962,19 +6363,19 @@ var _ = SIGDescribe("Rollout v1beta1", func() { UpdateCloneSet(workload) By("wait step(1) pause") - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) Expect(checkUpdateReadyPods(1, 1)).Should(BeTrue()) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "1", 1) By("wait step(2) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 2) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 2) Expect(checkUpdateReadyPods(2, 3)).Should(BeTrue()) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "2", 2) By("wait step(3) pause") - ResumeRolloutCanary(rollout.Name) - WaitRolloutCanaryStepPaused(rollout.Name, 3) + ResumeRollout(rollout.Name) + WaitRolloutStepPaused(rollout.Name, 3) Expect(checkUpdateReadyPods(4, 6)).Should(BeTrue()) CheckPodBatchLabel(workload.Namespace, workload.Spec.Selector, "1", "3", 3) @@ -4030,7 +6431,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(GetObject(workload.Name, workload)).NotTo(HaveOccurred()) Expect(workload.Spec.Paused).Should(BeTrue()) // wait step 1 complete - WaitRolloutCanaryStepPaused(rollout.Name, 1) + WaitRolloutStepPaused(rollout.Name, 1) // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.CanaryReplicas).Should(BeNumerically("==", 1)) @@ -4051,9 +6452,9 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-by-header-value", nginxIngressAnnotationDefaultPrefix)]).Should(Equal("pc")) // resume rollout canary - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) By("Resume rollout, and wait next step(2), routing 50% traffic to new version pods") - WaitRolloutCanaryStepPaused(rollout.Name, 2) + WaitRolloutStepPaused(rollout.Name, 2) // check rollout status Expect(GetObject(rollout.Name, rollout)).NotTo(HaveOccurred()) Expect(rollout.Status.CanaryStatus.CanaryReplicas).Should(BeNumerically("==", 2)) @@ -4072,7 +6473,7 @@ var _ = SIGDescribe("Rollout v1beta1", func() { Expect(cIngress.Annotations[fmt.Sprintf("%s/canary-by-header-value", nginxIngressAnnotationDefaultPrefix)]).Should(BeEmpty()) // resume rollout - ResumeRolloutCanary(rollout.Name) + ResumeRollout(rollout.Name) WaitRolloutStatusPhase(rollout.Name, v1beta1.RolloutPhaseHealthy) By("rollout completed, and check") // check ingress & service & virtualservice & deployment diff --git a/test/e2e/test_data/rollout/hpa_v1.yaml b/test/e2e/test_data/rollout/hpa_v1.yaml new file mode 100644 index 00000000..653b87a8 --- /dev/null +++ b/test/e2e/test_data/rollout/hpa_v1.yaml @@ -0,0 +1,12 @@ +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: hpa-dp +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: echoserver + minReplicas: 2 + maxReplicas: 6 + targetCPUUtilizationPercentage: 1 \ No newline at end of file diff --git a/test/e2e/test_data/rollout/hpa_v2.yaml b/test/e2e/test_data/rollout/hpa_v2.yaml new file mode 100644 index 00000000..2391b960 --- /dev/null +++ b/test/e2e/test_data/rollout/hpa_v2.yaml @@ -0,0 +1,24 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: hpa-dp +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: echoserver + behavior: + scaleDown: + stabilizationWindowSeconds: 10 + # selectPolicy: Disabled + # scaleUp: + # selectPolicy: Disabled + minReplicas: 2 + maxReplicas: 6 + metrics: + - type: Resource + resource: + name: cpu + target: + type: AverageValue + averageValue: '1m' diff --git a/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_base.yaml b/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_base.yaml index 0959f065..d8ce5fa0 100644 --- a/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_base.yaml +++ b/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_base.yaml @@ -10,21 +10,15 @@ spec: strategy: blueGreen: steps: - - traffic: 20% - replicas: 20% + - replicas: 50% + traffic: 0% pause: {} - - traffic: 40% - replicas: 40% - pause: {duration: 10} - - traffic: 60% - replicas: 60% - pause: {duration: 10} - - traffic: 80% - replicas: 80% - pause: {duration: 10} - - traffic: 100% - replicas: 100% - pause: {duration: 0} + - replicas: 100% + traffic: 0% + - replicas: 100% + traffic: 50% + - replicas: 100% + traffic: 100% trafficRoutings: - service: echoserver ingress: diff --git a/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml b/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml new file mode 100644 index 00000000..6fc10f5d --- /dev/null +++ b/test/e2e/test_data/rollout/rollout_v1beta1_bluegreen_cloneset_base.yaml @@ -0,0 +1,24 @@ +apiVersion: rollouts.kruise.io/v1beta1 # we use v1beta1 +kind: Rollout +metadata: + name: rollouts-demo +spec: + workloadRef: + apiVersion: apps.kruise.io/v1alpha1 + kind: CloneSet + name: echoserver + strategy: + blueGreen: + steps: + - replicas: 100% + traffic: 0% + pause: {} + - replicas: 100% + traffic: 50% + - replicas: 100% + traffic: 100% + trafficRoutings: + - service: echoserver + ingress: + classType: nginx + name: echoserver \ No newline at end of file