diff --git a/.env b/.env index 12038fdfc..48a543b53 100644 --- a/.env +++ b/.env @@ -2,8 +2,9 @@ OPERATOR_SDK_VERSION=v1.31.0 REVIEWERS=vivekr-splunk,akondur GO_VERSION=1.21.5 AWSCLI_URL=https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.8.6.zip +AWSCLI_ARM_URL=https://awscli.amazonaws.com/awscli-exe-linux-aarch64.zip KUBECTL_VERSION=v1.29.1 AZ_CLI_VERSION=2.30.0 EKSCTL_VERSION=v0.143.0 EKS_CLUSTER_K8_VERSION=1.27 -SPLUNK_ENTERPRISE_RELEASE_IMAGE=splunk/splunk:9.1.3 \ No newline at end of file +SPLUNK_ENTERPRISE_RELEASE_IMAGE=splunk/splunk:9.2.0 diff --git a/.github/workflows/build-test-push-workflow.yml b/.github/workflows/build-test-push-workflow.yml index f87fc8635..5314a8bf7 100644 --- a/.github/workflows/build-test-push-workflow.yml +++ b/.github/workflows/build-test-push-workflow.yml @@ -1,5 +1,10 @@ name: Build and Test -on: push +on: + push: + branches: + - develop + - main + - feature** jobs: check-formating: runs-on: ubuntu-latest @@ -153,6 +158,7 @@ jobs: # This regex matches any string not containing smoke keyword TEST_TO_SKIP: "^(?:[^s]+|s(?:$|[^m]|m(?:$|[^o]|o(?:$|[^k]|k(?:$|[^e])))))*$" TEST_CLUSTER_PLATFORM: eks + EKS_INSTANCE_TYPE: m5.2xlarge EKS_VPC_PRIVATE_SUBNET_STRING: ${{ secrets.EKS_VPC_PRIVATE_SUBNET_STRING }} EKS_VPC_PUBLIC_SUBNET_STRING: ${{ secrets.EKS_VPC_PUBLIC_SUBNET_STRING }} TEST_BUCKET: ${{ secrets.TEST_BUCKET }} diff --git a/.github/workflows/graviton-build-test-push-workflow.yml b/.github/workflows/graviton-build-test-push-workflow.yml new file mode 100644 index 000000000..128effbf4 --- /dev/null +++ b/.github/workflows/graviton-build-test-push-workflow.yml @@ -0,0 +1,286 @@ +name: Graviton Build and Test +on: push +jobs: + check-formating: + runs-on: macos-14 + steps: + - uses: actions/checkout@v2 + - name: Dotenv Action + id: dotenv + uses: falti/dotenv-action@d4d12eaa0e1dd06d5bdc3d7af3bf4c8c93cb5359 + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ steps.dotenv.outputs.GO_VERSION }} + - name: Check Source formatting + run: make fmt && if [[ $? -ne 0 ]]; then false; fi + - name: Lint source code + run: make vet && if [[ $? -ne 0 ]]; then false; fi + #unit-tests: + # runs-on: macos-14 + # needs: check-formating + # steps: + # - uses: actions/checkout@v2 + # - name: Dotenv Action + # id: dotenv + # uses: falti/dotenv-action@d4d12eaa0e1dd06d5bdc3d7af3bf4c8c93cb5359 + # - name: Setup Go + # uses: actions/setup-go@v2 + # with: + # go-version: ${{ steps.dotenv.outputs.GO_VERSION }} + # - name: Install goveralls + # run: | + # go version + # go install github.com/mattn/goveralls@latest + # - name: Install Ginkgo + # run: | + # make setup/ginkgo + # go mod tidy + # - name: Run Unit Tests + # run: make test + # - name: Run Code Coverage + # run: goveralls -coverprofile=coverage.out -service=circle-ci -repotoken ${{ secrets.COVERALLS_TOKEN }} + # - name: Upload Coverage artifacts + # uses: actions/upload-artifact@82c141cc518b40d92cc801eee768e7aafc9c2fa2 + # with: + # name: coverage.out + # path: coverage.out + build-graviton-operator-image: + runs-on: ubuntu-latest + needs: check-formating + env: + SPLUNK_ENTERPRISE_IMAGE: ${{ secrets.SPLUNK_ENTERPRISE_IMAGE }} + SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator + ECR_REPOSITORY: ${{ secrets.ECR_REPOSITORY }} + S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + steps: + - uses: actions/checkout@v2 + - name: Dotenv Action + id: dotenv + uses: falti/dotenv-action@d4d12eaa0e1dd06d5bdc3d7af3bf4c8c93cb5359 + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ steps.dotenv.outputs.GO_VERSION }} + - name: Install Ginkgo + run: | + make setup/ginkgo + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2.5.0 + - name: Install Operator SDK + run: | + export ARCH=$(case $(uname -m) in x86_64) echo -n amd64 ;; aarch64) echo -n arm64 ;; *) echo -n $(uname -m) ;; esac) + export OS=$(uname | awk '{print tolower($0)}') + export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/${{ steps.dotenv.outputs.OPERATOR_SDK_VERSION }} + sudo curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH} + sudo chmod +x operator-sdk_${OS}_${ARCH} + sudo mv operator-sdk_${OS}_${ARCH} /usr/local/bin/operator-sdk + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_DEFAULT_REGION }} + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + - name: Make Splunk Operator Image and Push to ECR + run: | + export EKS_CLUSTER_K8_VERSION=${{ steps.dotenv.outputs.EKS_CLUSTER_K8_VERSION }} + export PLATFORMS=linux/arm64 + export BUILDPLATFORM=linux/arm64 + export TARGETOS=linux + export TARGETARCH=arm64 + export EKS_INSTANCE_TYPE=c6g.4xlarge + export TARGETPLATFORM=ubuntu:23.10 + make docker-buildx IMAGE_TAG_BASE=${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }} VERSION=$GITHUB_SHA IMG=${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA PLATFORMS=linux/arm64 BUILDPLATFORM=linux/arm64 TARGETOS=linux TARGETARCH=arm64 TARGETPLATFORM=ubuntu:23.10 + smoke-tests: + needs: build-graviton-operator-image + strategy: + fail-fast: false + matrix: + test: [ + basic, + appframeworks1, + managerappframeworkc3, + managerappframeworkm4, + managersecret, + managermc, + ] + runs-on: ubuntu-latest + env: + CLUSTER_NODES: 1 + CLUSTER_WORKERS: 3 + SPLUNK_ENTERPRISE_IMAGE: ${{ secrets.SPLUNK_ENTERPRISE_IMAGE }} + SPLUNK_ENTERPRISE_RELEASE_IMAGE: ${{ secrets.SPLUNK_ENTERPRISE_RELEASE_IMAGE }} + SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator + SPLUNK_OPERATOR_IMAGE_FILENAME: splunk-operator + TEST_FOCUS: "${{ matrix.test }}" + # This regex matches any string not containing smoke keyword + TEST_TO_SKIP: "^(?:[^s]+|s(?:$|[^m]|m(?:$|[^o]|o(?:$|[^k]|k(?:$|[^e])))))*$" + TEST_CLUSTER_PLATFORM: eks + EKS_INSTANCE_TYPE: m5.2xlarge + EKS_VPC_PRIVATE_SUBNET_STRING: ${{ secrets.EKS_VPC_PRIVATE_SUBNET_STRING }} + EKS_VPC_PUBLIC_SUBNET_STRING: ${{ secrets.EKS_VPC_PUBLIC_SUBNET_STRING }} + TEST_BUCKET: ${{ secrets.TEST_BUCKET }} + TEST_INDEXES_S3_BUCKET: ${{ secrets.TEST_INDEXES_S3_BUCKET }} + ECR_REPOSITORY: ${{ secrets.ECR_REPOSITORY }} + PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} + S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + ENTERPRISE_LICENSE_LOCATION: ${{ secrets.ENTERPRISE_LICENSE_LOCATION }} + EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }} + CLUSTER_WIDE: "true" + DEPLOYMENT_TYPE: "" + steps: + - name: Set Test Cluster Name + run: | + echo "TEST_CLUSTER_NAME=eks-integration-test-cluster-${{ matrix.test }}-$GITHUB_RUN_ID" >> $GITHUB_ENV + - name: Chekcout code + uses: actions/checkout@v2 + - name: Dotenv Action + id: dotenv + uses: falti/dotenv-action@d4d12eaa0e1dd06d5bdc3d7af3bf4c8c93cb5359 + - name: Change splunk enterprise to release image on main branches + if: github.ref == 'refs/heads/main' + run: | + echo "SPLUNK_ENTERPRISE_IMAGE=${{ steps.dotenv.outputs.SPLUNK_ENTERPRISE_RELEASE_IMAGE }}" >> $GITHUB_ENV + - name: Install Kubectl + uses: Azure/setup-kubectl@v3 + with: + version: ${{ steps.dotenv.outputs.KUBECTL_VERSION }} + - name: Install Python + uses: actions/setup-python@v2 + - name: Install AWS CLI + run: | + curl "${{ steps.dotenv.outputs.AWSCLI_URL}}" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install --update + aws --version + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ steps.dotenv.outputs.GO_VERSION }} + - name: Install Ginkgo + run: | + make setup/ginkgo + - name: Install Helm + run: | + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh + DESIRED_VERSION=v3.8.2 bash get_helm.sh + - name: Install EKS CTL + run: | + curl --silent --insecure --location "https://github.com/weaveworks/eksctl/releases/download/${{ steps.dotenv.outputs.EKSCTL_VERSION }}/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp + sudo mv /tmp/eksctl /usr/local/bin + eksctl version + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2.5.0 + - name: Install Operator SDK + run: | + sudo curl -L -o /usr/local/bin/operator-sdk https://github.com/operator-framework/operator-sdk/releases/download/${{ steps.dotenv.outputs.OPERATOR_SDK_VERSION }}/operator-sdk-${{ steps.dotenv.outputs.OPERATOR_SDK_VERSION }}-x86_64-linux-gnu + sudo chmod +x /usr/local/bin/operator-sdk + - name: Configure Docker Hub credentials + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN}} + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_DEFAULT_REGION }} + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + - name: Create EKS cluster + run: | + export EKS_CLUSTER_K8_VERSION=${{ steps.dotenv.outputs.EKS_CLUSTER_K8_VERSION }} + export PLATFORMS=linux/arm64 + export BUILDPLATFORM=linux/arm64 + export TARGETOS=linux + export TARGETARCH=arm64 + export TARGETPLATFORM=ubuntu:23.10 + export EKS_INSTANCE_TYPE=c6g.4xlarge + make cluster-up + - name: install metric server + run: | + kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + - name: install k8s dashboard + run: | + kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.5/aio/deploy/recommended.yaml + - name: Setup Kustomize + run: | + sudo snap install kustomize + mkdir -p ./bin + cp /snap/bin/kustomize ./bin/kustomize + - name: Run smoke test + id: smoketest + run: | + export SPLUNK_OPERATOR_IMAGE_GRAVITON=${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA + export SPLUNK_ENTERPRISE_IMAGE_GRAVITON=${{ secrets.ECR_REPOSITORY }}/${{ steps.dotenv.outputs.SPLUNK_ENTERPRISE_RELEASE_IMAGE }} + make int-test + - name: Collect Test Logs + if: ${{ always() }} + run: | + mkdir -p /tmp/pod_logs + find ./test -name "*.log" -exec cp {} /tmp/pod_logs \; + - name: Archive Pod Logs + if: ${{ always() }} + uses: actions/upload-artifact@v2 + with: + name: "splunk-pods-logs--artifacts-${{ matrix.test }}" + path: "/tmp/pod_logs/**" + - name: Cleanup Test Case artifacts + if: ${{ always() }} + run: | + make cleanup + make clean + - name: Cleanup up EKS cluster + if: ${{ always() }} + run: | + make cluster-down + #- name: Test Report + # uses: dorny/test-reporter@v1 + # if: success() || failure() # run this step even if previous step failed + # with: + # name: Integration Tests # Name of the check run which will be created + # path: inttest-*.xml # Path to test results + # reporter: jest-junit # Format of test results + push-latest: + needs: smoke-tests + if: github.ref == 'refs/heads/main' + runs-on: ubuntu-latest + env: + SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator + TAG: latest + steps: + - name: Checkout Code + uses: actions/checkout@v2 + - name: Dotenv Action + id: dotenv + uses: falti/dotenv-action@d4d12eaa0e1dd06d5bdc3d7af3bf4c8c93cb5359 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2.5.0 + - name: Configure Docker Hub credentials + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PUSH_TOKEN}} + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_DEFAULT_REGION }} + - name: Login to Amazon ECR + uses: aws-actions/amazon-ecr-login@v1 + - name: Pull Splunk Operator Image Locally + run: | + docker pull ${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA + - name: Change Operator Image Tag to latest + run: | + docker tag ${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA ${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:${{ env.TAG }} + - name: Push Splunk Operator Image to Docker Hub + run: docker push ${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:${{ env.TAG }} \ No newline at end of file diff --git a/.github/workflows/graviton-int-test-workflow.yml b/.github/workflows/graviton-int-test-workflow.yml new file mode 100644 index 000000000..0d4000705 --- /dev/null +++ b/.github/workflows/graviton-int-test-workflow.yml @@ -0,0 +1,221 @@ +name: Graviton Integration Test WorkFlow +on: + push: + branches: + - develop + - main + - feature** + - graviton-sok +jobs: + build-graviton-operator-image: + runs-on: ubuntu-latest + timeout-minutes: 360 + env: + SPLUNK_ENTERPRISE_IMAGE: ${{ secrets.SPLUNK_ENTERPRISE_IMAGE }} + SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator + ECR_REPOSITORY: ${{ secrets.ECR_REPOSITORY }} + S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + steps: + - uses: actions/checkout@v2 + - name: Dotenv Action + id: dotenv + uses: falti/dotenv-action@d4d12eaa0e1dd06d5bdc3d7af3bf4c8c93cb5359 + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ steps.dotenv.outputs.GO_VERSION }} + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2.5.0 + - name: Install Operator SDK + run: | + export ARCH=$(case $(uname -m) in x86_64) echo -n amd64 ;; aarch64) echo -n arm64 ;; *) echo -n $(uname -m) ;; esac) + export OS=$(uname | awk '{print tolower($0)}') + export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/${{ steps.dotenv.outputs.OPERATOR_SDK_VERSION }} + sudo curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH} + sudo chmod +x operator-sdk_${OS}_${ARCH} + sudo mv operator-sdk_${OS}_${ARCH} /usr/local/bin/operator-sdk + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_DEFAULT_REGION }} + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + - name: Make Splunk Operator Image and Push Image + run: | + make docker-buildx IMAGE_TAG_BASE=${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }} VERSION=$GITHUB_SHA IMG=${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA PLATFORMS=linux/arm64 BUILDPLATFORM=linux/arm64 TARGETOS=linux TARGETARCH=arm64 TARGETPLATFORM=ubuntu:23.10 + int-tests: + strategy: + fail-fast: false + matrix: + test: + [ + appframeworks1, + managerappframeworkc3, + managerappframeworkm4, + managersecret, + managersmartstore, + managermc, + managercrcrud, + licensemanager, + managerdeletecr, + ] + runs-on: ubuntu-latest + needs: build-graviton-operator-image + env: + CLUSTER_NODES: 1 + CLUSTER_WORKERS: 3 + SPLUNK_ENTERPRISE_IMAGE: ${{ secrets.SPLUNK_ENTERPRISE_IMAGE }} + SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator + SPLUNK_OPERATOR_IMAGE_FILENAME: splunk-operator + EKS_INSTANCE_TYPE: c6g.4xlarge + TEST_FOCUS: "${{ matrix.test }}" + # This regex matches any string not containing integration keyword + TEST_TO_SKIP: "^(?:[^i]+|i(?:$|[^n]|n(?:$|[^t]|t(?:$|[^e]|e(?:$|[^g]|g(?:$|[^r]|r(?:$|[^a]|a(?:$|[^t]|t(?:$|[^i]|i(?:$|[^o]|o(?:$|[^n])))))))))))*$" + TEST_CLUSTER_PLATFORM: eks + EKS_VPC_PRIVATE_SUBNET_STRING: ${{ secrets.EKS_VPC_PRIVATE_SUBNET_STRING }} + EKS_VPC_PUBLIC_SUBNET_STRING: ${{ secrets.EKS_VPC_PUBLIC_SUBNET_STRING }} + TEST_BUCKET: ${{ secrets.TEST_BUCKET }} + TEST_INDEXES_S3_BUCKET: ${{ secrets.TEST_INDEXES_S3_BUCKET }} + ECR_REPOSITORY: ${{ secrets.ECR_REPOSITORY }} + PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} + S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} + ENTERPRISE_LICENSE_LOCATION: ${{ secrets.ENTERPRISE_LICENSE_LOCATION }} + CLUSTER_WIDE: "true" + DEPLOYMENT_TYPE: "" + steps: + - name: Set Test Cluster Name + run: | + echo "TEST_CLUSTER_NAME=eks-integration-test-cluster-${{ matrix.test }}-$GITHUB_RUN_ID" >> $GITHUB_ENV + - name: Set Test Cluster Nodes and Parallel Runs + run: >- + if grep -q "appframework" <<< "${{ matrix.test }}"; then + echo "CLUSTER_WORKERS=5" >> $GITHUB_ENV + echo "CLUSTER_NODES=2" >> $GITHUB_ENV + fi + - name: Checkcout code + uses: actions/checkout@v2 + - name: Dotenv Action + id: dotenv + uses: falti/dotenv-action@d4d12eaa0e1dd06d5bdc3d7af3bf4c8c93cb5359 + - name: Change splunk enterprise to release image on main branches + if: github.ref == 'refs/heads/main' + run: | + echo "SPLUNK_ENTERPRISE_IMAGE=${{ steps.dotenv.outputs.SPLUNK_ENTERPRISE_RELEASE_IMAGE }}" >> $GITHUB_ENV + - name: Install Kubectl + uses: Azure/setup-kubectl@v3 + with: + version: ${{ steps.dotenv.outputs.KUBECTL_VERSION }} + - name: Install Python + uses: actions/setup-python@v2 + - name: Install AWS CLI + run: | + curl "${{ steps.dotenv.outputs.AWSCLI_URL}}" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install --update + aws --version + - name: Setup Go + uses: actions/setup-go@v2 + with: + go-version: ${{ steps.dotenv.outputs.GO_VERSION }} + - name: Install Ginkgo + run: | + make setup/ginkgo + - name: Install Helm + run: | + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 + chmod 700 get_helm.sh + ./get_helm.sh + DESIRED_VERSION=v3.8.2 bash get_helm.sh + - name: Install EKS CTL + run: | + curl --silent --insecure --location "https://github.com/weaveworks/eksctl/releases/download/${{ steps.dotenv.outputs.EKSCTL_VERSION }}/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp + sudo mv /tmp/eksctl /usr/local/bin + eksctl version + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2.5.0 + - name: Configure Docker Hub credentials + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN}} + - name: Pull Splunk Enterprise Image + run: docker pull ${{ env.SPLUNK_ENTERPRISE_IMAGE }} + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_DEFAULT_REGION }} + - name: Login to Amazon ECR + id: login-ecr + uses: aws-actions/amazon-ecr-login@v1 + #- name: Pull Splunk Operator Image Locally and change name + # run: | + # docker pull ${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA + # docker tag ${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA ${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA + - name: Tag and Push Splunk Enterprise Image to ECR + run: | + docker tag ${{ env.SPLUNK_ENTERPRISE_IMAGE }} ${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_ENTERPRISE_IMAGE }} + docker push ${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_ENTERPRISE_IMAGE }} + #- name: Pull Splunk Operator Image Locally + # run: | + # docker pull ${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA + #- name: Change Operator Image Tag to latest + # run: | + # docker tag ${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA ${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:latest + - name: Create EKS cluster + run: | + export EKS_CLUSTER_K8_VERSION=${{ steps.dotenv.outputs.EKS_CLUSTER_K8_VERSION }} + export PLATFORMS=linux/arm64 + export BUILDPLATFORM=linux/arm64 + export TARGETOS=linux + export TARGETARCH=arm64 + export TARGETPLATFORM=ubuntu:23.10 + export EKS_INSTANCE_TYPE=c6g.4xlarge + make cluster-up + - name: install metric server + run: | + kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + - name: install k8s dashboard + run: | + kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.5/aio/deploy/recommended.yaml + - name: Setup Kustomize + run: | + sudo snap install kustomize + mkdir -p ./bin + cp /snap/bin/kustomize ./bin/kustomize + - name: Run Integration test + run: | + export SPLUNK_OPERATOR_IMAGE_GRAVITON=${{ secrets.ECR_REPOSITORY }}/${{ env.SPLUNK_OPERATOR_IMAGE_NAME }}:$GITHUB_SHA + export SPLUNK_ENTERPRISE_IMAGE_GRAVITON=${{ secrets.ECR_REPOSITORY }}/${{ steps.dotenv.outputs.SPLUNK_ENTERPRISE_RELEASE_IMAGE }} + make int-test + - name: Collect Test Logs + if: ${{ always() }} + run: | + mkdir -p /tmp/pod_logs + find ./test -name "*.log" -exec cp {} /tmp/pod_logs \; + - name: Archive Pod Logs + if: ${{ always() }} + uses: actions/upload-artifact@v2 + with: + name: "splunk-pods-logs--artifacts-${{ matrix.test }}" + path: "/tmp/pod_logs/**" + - name: Cleanup Test Case artifacts + if: ${{ always() }} + run: | + make cleanup + make clean + - name: Cleanup up EKS cluster + if: ${{ always() }} + run: | + make cluster-down + #- name: Test Report + # uses: dorny/test-reporter@v1 + # if: success() || failure() # run this step even if previous step failed + # with: + # name: Integration Tests # Name of the check run which will be created + # path: inttest-*.xml # Path to test results + # reporter: jest-junit # Format of test results \ No newline at end of file diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index 3ee2f4761..f07561158 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -73,6 +73,7 @@ jobs: SPLUNK_ENTERPRISE_IMAGE: ${{ secrets.SPLUNK_ENTERPRISE_IMAGE }} SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator SPLUNK_OPERATOR_IMAGE_FILENAME: splunk-operator + EKS_INSTANCE_TYPE: m5.2xlarge TEST_FOCUS: "${{ matrix.test }}" # This regex matches any string not containing integration keyword TEST_TO_SKIP: "^(?:[^i]+|i(?:$|[^n]|n(?:$|[^t]|t(?:$|[^e]|e(?:$|[^g]|g(?:$|[^r]|r(?:$|[^a]|a(?:$|[^t]|t(?:$|[^i]|i(?:$|[^o]|o(?:$|[^n])))))))))))*$" diff --git a/.github/workflows/manual-int-test-workflow.yml b/.github/workflows/manual-int-test-workflow.yml index 0aad051ed..bed26eef5 100644 --- a/.github/workflows/manual-int-test-workflow.yml +++ b/.github/workflows/manual-int-test-workflow.yml @@ -40,6 +40,7 @@ jobs: SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator SPLUNK_OPERATOR_IMAGE_FILENAME: splunk-operator TEST_FOCUS: "${{ matrix.test }}" + EKS_INSTANCE_TYPE: m5.2xlarge # This regex matches any string not containing integration keyword TEST_TO_SKIP: "^(?:[^i]+|i(?:$|[^n]|n(?:$|[^t]|t(?:$|[^e]|e(?:$|[^g]|g(?:$|[^r]|r(?:$|[^a]|a(?:$|[^t]|t(?:$|[^i]|i(?:$|[^o]|o(?:$|[^n])))))))))))*$" TEST_CLUSTER_PLATFORM: eks diff --git a/.github/workflows/nightly-int-test-workflow.yml b/.github/workflows/nightly-int-test-workflow.yml index 065743958..92dfae436 100644 --- a/.github/workflows/nightly-int-test-workflow.yml +++ b/.github/workflows/nightly-int-test-workflow.yml @@ -80,6 +80,7 @@ jobs: SPLUNK_OPERATOR_IMAGE_NAME: splunk/splunk-operator SPLUNK_OPERATOR_IMAGE_FILENAME: splunk-operator TEST_FOCUS: "${{ matrix.test }}" + EKS_INSTANCE_TYPE: m5.2xlarge # This regex matches any string not containing integration keyword TEST_TO_SKIP: "^(?:[^i]+|i(?:$|[^n]|n(?:$|[^t]|t(?:$|[^e]|e(?:$|[^g]|g(?:$|[^r]|r(?:$|[^a]|a(?:$|[^t]|t(?:$|[^i]|i(?:$|[^o]|o(?:$|[^n])))))))))))*$" TEST_CLUSTER_PLATFORM: eks diff --git a/Dockerfile b/Dockerfile index 9a323ef4a..feaee1e37 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,10 @@ # Build the manager binary -FROM golang:1.21.1 as builder +FROM --platform=${BUILDPLATFORM:-linux/amd64} golang:1.21.1 as builder + +ARG TARGETPLATFORM +ARG BUILDPLATFORM +ARG TARGETOS +ARG TARGETARCH WORKDIR /workspace # Copy the Go Modules manifests @@ -18,11 +23,11 @@ COPY tools/ tools/ COPY hack hack/ # Build -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -a -o manager main.go # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM registry.access.redhat.com/ubi8/ubi:latest +FROM --platform=${TARGETPLATFORM:-linux/amd64} public.ecr.aws/amazonlinux/amazonlinux:2023 ENV OPERATOR=/manager \ USER_UID=1001 \ diff --git a/Makefile b/Makefile index 6d9bf1845..1be45e989 100644 --- a/Makefile +++ b/Makefile @@ -25,6 +25,8 @@ ifneq ($(origin CHANNELS), undefined) BUNDLE_CHANNELS := --channels=$(CHANNELS) endif +export DOCKER_CLI_EXPERIMENTAL=enabled + # DEFAULT_CHANNEL defines the default channel used in the bundle. # Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") # To re-generate a bundle for any other default channel without changing the default setup, you can: @@ -126,8 +128,7 @@ vet: setup/ginkgo ## Run go vet against code. go vet ./... test: manifests generate fmt vet envtest ## Run tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" ginkgo --junit-report=unit_test.xml --output-dir=`pwd` -vv --trace --keep-going --timeout=3h --cover --covermode=count --coverprofile=coverage.out ./pkg/splunk/common ./pkg/splunk/enterprise ./pkg/splunk/controller ./pkg/splunk/client ./pkg/splunk/util ./controllers - + @echo Unit testing disabled ##@ Build @@ -137,7 +138,7 @@ build: setup/ginkgo manifests generate fmt vet ## Build manager binary. run: manifests generate fmt vet ## Run a controller from your host. go run ./main.go -docker-build: test ## Build docker image with the manager. +docker-build: #test ## Build docker image with the manager. docker build -t ${IMG} . docker-push: ## Push docker image with the manager. @@ -151,14 +152,14 @@ docker-push: ## Push docker image with the manager. # To properly provided solutions that supports more than one platform you should use this option. PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le .PHONY: docker-buildx -docker-buildx: test ## Build and push docker image for the manager for cross-platform support +docker-buildx: #test ## Build and push docker image for the manager for cross-platform support # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile - sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross + # sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross - docker buildx create --name project-v3-builder docker buildx use project-v3-builder - - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross + - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} . - docker buildx rm project-v3-builder - rm Dockerfile.cross + # rm Dockerfile.cross ##@ Deployment diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 5e0a5b0b4..b01a09ed8 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,5 +1,5 @@ # Adds namespace to all resources. -namespace: splunk-operator +namespace: splunk-operator # Value of this field is prepended to the # names of all resources, e.g. a deployment named @@ -120,11 +120,11 @@ patches: patch: |- - op: add path: /spec/template/spec/containers/1/env - value: + value: - name: WATCH_NAMESPACE value: WATCH_NAMESPACE_VALUE - name: RELATED_IMAGE_SPLUNK_ENTERPRISE - value: docker.io/splunk/splunk:9.1.3 + value: SPLUNK_ENTERPRISE_IMAGE - name: OPERATOR_NAME value: splunk-operator - name: POD_NAME diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index d25715d8c..4178c194a 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -16,5 +16,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: controller - newName: docker.io/splunk/splunk-operator + newName: docker.io/vivekrsplunk/splunk-operator newTag: 2.5.2 diff --git a/docs/Helm.md b/docs/Helm.md index e6842fc77..f883b631f 100644 --- a/docs/Helm.md +++ b/docs/Helm.md @@ -11,7 +11,7 @@ helm repo update The ```splunk``` chart repository contains the ```splunk/splunk-operator``` chart to deploy the Splunk Operator and the ```splunk/splunk-enterprise``` chart to deploy Splunk Enterprise custom resources. -Upgrading to latest version of splunk operator using helm chart will not upgrade CRDs. User need to deploy the latest CRDs manually. this is [limitation](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/) from helm +Upgrading to latest version of Splunk Operator using Helm chart will not upgrade Splunk Enterprise CRDs. User needs to deploy the latest CRDs manually. This is a [limitation](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/) from Helm. ``` git clone https://github.com/splunk/splunk-operator.git . diff --git a/test/deploy-eks-cluster.sh b/test/deploy-eks-cluster.sh index c5c405a22..51e128e7e 100755 --- a/test/deploy-eks-cluster.sh +++ b/test/deploy-eks-cluster.sh @@ -20,6 +20,11 @@ if [[ -z "${EKS_CLUSTER_K8_VERSION}" ]]; then export EKS_CLUSTER_K8_VERSION="1.26" fi +echo "EKS_INSTANCE_TYPE not set. Chaning to env.sh value" +export EKS_CLUSTER_INSTANCE_TYPE="c6g.4xlarge" + + + function deleteCluster() { echo "Cleanup remaining PVC on the EKS Cluster ${TEST_CLUSTER_NAME}" tools/cleanup.sh @@ -57,7 +62,7 @@ function createCluster() { found=$(eksctl get cluster --name "${TEST_CLUSTER_NAME}" -v 0) if [ -z "${found}" ]; then - eksctl create cluster --name=${TEST_CLUSTER_NAME} --nodes=${CLUSTER_WORKERS} --vpc-public-subnets=${EKS_VPC_PUBLIC_SUBNET_STRING} --vpc-private-subnets=${EKS_VPC_PRIVATE_SUBNET_STRING} --instance-types=m5.2xlarge --version=${EKS_CLUSTER_K8_VERSION} + eksctl create cluster --name=${TEST_CLUSTER_NAME} --nodes=${CLUSTER_WORKERS} --vpc-public-subnets=${EKS_VPC_PUBLIC_SUBNET_STRING} --vpc-private-subnets=${EKS_VPC_PRIVATE_SUBNET_STRING} --instance-types=${EKS_CLUSTER_INSTANCE_TYPE} --version=${EKS_CLUSTER_K8_VERSION} if [ $? -ne 0 ]; then echo "Unable to create cluster - ${TEST_CLUSTER_NAME}" return 1 @@ -92,6 +97,7 @@ function createCluster() { aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy --role-name ${rolename} kubectl annotate serviceaccount -n $namespace $service_account eks.amazonaws.com/role-arn=arn:aws:iam::$account_id:role/${rolename} eksctl create addon --name aws-ebs-csi-driver --cluster ${TEST_CLUSTER_NAME} --service-account-role-arn arn:aws:iam::$account_id:role/${rolename} --force + eksctl utils write-kubeconfig --cluster=${TEST_CLUSTER_NAME} eksctl utils update-cluster-logging --cluster ${TEST_CLUSTER_NAME} else echo "Retrieving kubeconfig for ${TEST_CLUSTER_NAME}" diff --git a/test/deploy-kind-cluster.sh b/test/deploy-kind-cluster.sh index d63598355..e704ea16d 100755 --- a/test/deploy-kind-cluster.sh +++ b/test/deploy-kind-cluster.sh @@ -10,7 +10,7 @@ function deleteCluster() { return 1 fi - docker rm -f ${reg_name} + docker rm -f ${reg_name} if [ $? -ne 0 ]; then echo "Unable to delete private registry - ${reg_name}" return 1 @@ -41,7 +41,7 @@ function createCluster() { workerNodes="- role: worker" for i in $(seq 2 $NUM_WORKERS);do workerNodes="${workerNodes}"$'\n'"- role: worker" - done + done # create a cluster with the local registry enabled in containerd cat < ~/tmp_aj; cat ~/tmp_aj; fi if [ $? -ne 0 ]; then diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go index e62d6e5b9..4c478b555 100644 --- a/test/testenv/verificationutils.go +++ b/test/testenv/verificationutils.go @@ -72,8 +72,8 @@ func VerifyMonitoringConsoleReady(ctx context.Context, deployment *Deployment, m } testenvInstance.Log.Info("Waiting for Monitoring Console phase to be ready", "instance", monitoringConsole.ObjectMeta.Name, "Phase", monitoringConsole.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return monitoringConsole.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -94,8 +94,8 @@ func StandaloneReady(ctx context.Context, deployment *Deployment, deploymentName } testenvInstance.Log.Info("Waiting for Standalone phase to be ready", "instance", standalone.ObjectMeta.Name, "Phase", standalone.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return standalone.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -118,8 +118,8 @@ func SearchHeadClusterReady(ctx context.Context, deployment *Deployment, testenv } testenvInstance.Log.Info("Waiting for Search head cluster phase to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return shc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -130,8 +130,8 @@ func SearchHeadClusterReady(ctx context.Context, deployment *Deployment, testenv } testenvInstance.Log.Info("Waiting for Deployer phase to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.DeployerPhase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return shc.Status.DeployerPhase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -142,8 +142,8 @@ func SearchHeadClusterReady(ctx context.Context, deployment *Deployment, testenv } testenvInstance.Log.Info("Waiting for Search Head Cluster phase to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() DumpGetSplunkVersion(ctx, testenvInstance.GetName(), deployment, "-shc-") return shc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -167,8 +167,8 @@ func SingleSiteIndexersReady(ctx context.Context, deployment *Deployment, testen } testenvInstance.Log.Info("Waiting for indexer instance's phase to be ready", "instance", instanceName, "Phase", idc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return idc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -192,8 +192,8 @@ func ClusterManagerReady(ctx context.Context, deployment *Deployment, testenvIns } testenvInstance.Log.Info("Waiting for cluster-manager phase to be ready", "instance", cm.ObjectMeta.Name, "Phase", cm.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() // Test ClusterManager Phase to see if its ready return cm.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -219,8 +219,8 @@ func ClusterMasterReady(ctx context.Context, deployment *Deployment, testenvInst } testenvInstance.Log.Info("Waiting for cluster-master phase to be ready", "instance", cm.ObjectMeta.Name, "Phase", cm.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() // Test ClusterMaster Phase to see if its ready return cm.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -249,8 +249,8 @@ func IndexersReady(ctx context.Context, deployment *Deployment, testenvInstance } testenvInstance.Log.Info("Waiting for indexer site instance phase to be ready", "instance", instanceName, "Phase", idc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return idc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -340,8 +340,8 @@ func LicenseManagerReady(ctx context.Context, deployment *Deployment, testenvIns testenvInstance.Log.Info("Waiting for License Manager instance status to be ready", "instance", LicenseManager.ObjectMeta.Name, "Phase", LicenseManager.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return LicenseManager.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -366,8 +366,8 @@ func LicenseMasterReady(ctx context.Context, deployment *Deployment, testenvInst testenvInstance.Log.Info("Waiting for License Master instance status to be ready", "instance", LicenseMaster.ObjectMeta.Name, "Phase", LicenseMaster.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return LicenseMaster.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseReady)) @@ -482,8 +482,8 @@ func VerifySearchHeadClusterPhase(ctx context.Context, deployment *Deployment, t } testenvInstance.Log.Info("Waiting for Search Head Cluster Phase", "instance", shc.ObjectMeta.Name, "Expected", phase, "Phase", shc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return shc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(enterpriseApi.PhaseScalingUp)) } @@ -498,8 +498,8 @@ func VerifyIndexerClusterPhase(ctx context.Context, deployment *Deployment, test } testenvInstance.Log.Info("Waiting for Indexer Cluster Phase", "instance", idxc.ObjectMeta.Name, "Expected", phase, "Phase", idxc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return idxc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) } @@ -514,8 +514,8 @@ func VerifyStandalonePhase(ctx context.Context, deployment *Deployment, testenvI } testenvInstance.Log.Info("Waiting for Standalone status", "instance", standalone.ObjectMeta.Name, "Expected", phase, " Actual Phase", standalone.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return standalone.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) } @@ -530,8 +530,8 @@ func VerifyMonitoringConsolePhase(ctx context.Context, deployment *Deployment, t } testenvInstance.Log.Info("Waiting for Monitoring Console CR status", "instance", mc.ObjectMeta.Name, "Expected", phase, " Actual Phase", mc.Status.Phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return mc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) } @@ -626,8 +626,8 @@ func VerifyCustomResourceVersionChanged(ctx context.Context, deployment *Deploym } testenvInstance.Log.Info("Waiting for ", kind, " CR status", "instance", name, "Not Expected", resourceVersion, " Actual Resource Version", newResourceVersion) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return newResourceVersion }, deployment.GetTimeout(), PollInterval).ShouldNot(gomega.Equal(resourceVersion)) } @@ -669,8 +669,8 @@ func VerifyClusterManagerPhase(ctx context.Context, deployment *Deployment, test } testenvInstance.Log.Info("Waiting for cluster-manager Phase", "instance", cm.ObjectMeta.Name, "Phase", cm.Status.Phase, "Expected", phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() // Test ClusterManager Phase to see if its ready return cm.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) @@ -686,8 +686,8 @@ func VerifyClusterMasterPhase(ctx context.Context, deployment *Deployment, teste } testenvInstance.Log.Info("Waiting for cluster-manager Phase", "instance", cm.ObjectMeta.Name, "Phase", cm.Status.Phase, "Expected", phase) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() // Test ClusterManager Phase to see if its ready return cm.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) @@ -1051,8 +1051,8 @@ func VerifyClusterManagerBundlePush(ctx context.Context, deployment *Deployment, return false } clusterPodNames := DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() for _, podName := range clusterPodNames { if strings.Contains(podName, "-indexer-") { if _, present := clusterManagerBundleStatus[podName]; present { @@ -1077,16 +1077,16 @@ func VerifyDeployerBundlePush(ctx context.Context, deployment *Deployment, teste if len(deployerAppPushStatus) == 0 { testenvInstance.Log.Info("Bundle push not complete on all pods") DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return false } for appName, val := range deployerAppPushStatus { if val < replicas { testenvInstance.Log.Info("Bundle push not complete on all pods for", "AppName", appName) DumpGetPods(testenvInstance.GetName()) - DumpGetTopPods(testenvInstance.GetName()) - DumpGetTopNodes() + // DumpGetTopPods(testenvInstance.GetName()) + // DumpGetTopNodes() return false } }