Skip to content

Integration Tests

Integration Tests #397

# Based on integration tests in prometheus-service
# https://github.com/keptn-contrib/prometheus-service/blob/7c21f48f66f8ea26dd9d99e6898b57c55c0e5d88/.github/workflows/integration-tests.yaml
name: Integration Tests
on:
schedule:
# * is a special character in YAML, so you have to quote this string
- cron: "0 3 * * 1-5" # run integration tests at 3 AM, monday to friday (1-5)
workflow_dispatch: # run integration tests only when triggered manually
defaults:
run:
shell: bash
jobs:
integration_test:
name: "Integration Tests"
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
keptn-version: ["0.18.1", "0.19.0", "1.0.0"] # https://github.com/keptn/keptn/releases
datadog-version: ["2.37.2"] # chart version
env:
GO_VERSION: 1.17
GOPROXY: "https://proxy.golang.org"
GO111MODULE: "on"
BRANCH: ${{ github.head_ref || github.ref_name }}
ENABLE_E2E_TEST: true
JES_VERSION: "0.3.0"
JES_NAMESPACE: keptn-jes
GITEA_ADMIN_USERNAME: GiteaAdmin
GITEA_NAMESPACE: gitea
KUBECONFIG: "${{ github.workspace }}/.kube/config"
TEST_REPORT_FILENAME: test-report-${{ github.run_id }}-keptn-${{ matrix.keptn-version }}-datadog-${{ matrix.datadog-version }}.json
steps:
# Checkout code for the integrations tests in test/e2e
- name: Check out code.
uses: actions/checkout@v3.0.2
- name: Setup Go
uses: actions/setup-go@v3.2.0
with:
go-version-file: "go.mod"
- name: Install gotestsum
shell: bash
run: go install gotest.tools/gotestsum@latest
# Prepare minikube + Keptn environment
- name: Install and start minikube
run: |
# wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
# wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -
# echo "deb [arch=amd64] http://download.virtualbox.org/virtualbox/debian $(lsb_release -cs) contrib" | sudo tee -a /etc/apt/sources.list.d/virtualbox.list
curl -LO https://storage.googleapis.com/minikube/releases/v1.24.0/minikube-linux-amd64
sudo install minikube-linux-amd64 /usr/local/bin/minikube
/usr/local/bin/minikube start --cpus=2 --memory=5GB
minikube tunnel &> /dev/null &
echo "minikube ready!"
- name: Generate Gitea credentials
id: gitea_credentials
run: |
password=$(date +%s | sha256sum | base64 | head -c 32)
echo "::add-mask::$password"
echo "::set-output name=GITEA_ADMIN_PASSWORD::$password"
- name: Install Gitea
id: gitea
env:
GITEA_ADMIN_PASSWORD: ${{ steps.gitea_credentials.outputs.GITEA_ADMIN_PASSWORD }}
run: |
export GITEA_ENDPOINT="http://gitea-http.${GITEA_NAMESPACE}:3000"
helm repo add gitea-charts https://dl.gitea.io/charts/
helm repo update
helm install -n ${GITEA_NAMESPACE} gitea gitea-charts/gitea \
--create-namespace \
--set memcached.enabled=false \
--set postgresql.enabled=false \
--set gitea.config.database.DB_TYPE=sqlite3 \
--set gitea.admin.username=${GITEA_ADMIN_USERNAME} \
--set gitea.admin.password=${GITEA_ADMIN_PASSWORD} \
--set gitea.config.server.OFFLINE_MODE=true \
--set gitea.config.server.ROOT_URL=${GITEA_ENDPOINT}/ \
--wait
# Export Gitea connection details
echo "::set-output name=GITEA_ENDPOINT::${GITEA_ENDPOINT}"
- name: Install gitea provisioner-service
env:
GITEA_ADMIN_PASSWORD: ${{ steps.gitea_credentials.outputs.GITEA_ADMIN_PASSWORD }}
GITEA_ENDPOINT: ${{ steps.gitea.outputs.GITEA_ENDPOINT }}
run: |
helm install keptn-gitea-provisioner-service https://github.com/keptn-sandbox/keptn-gitea-provisioner-service/releases/download/0.1.0/keptn-gitea-provisioner-service-0.1.0.tgz \
--set gitea.endpoint=${GITEA_ENDPOINT} \
--set gitea.admin.create=true \
--set gitea.admin.username=${GITEA_ADMIN_USERNAME} \
--set gitea.admin.password=${GITEA_ADMIN_PASSWORD} \
--wait
# - name: Debugging with ssh
# uses: lhotari/action-upterm@v1
- name: Install Keptn
id: install_keptn
# This action is a patched version of github.com/keptn-sandbox/action-install-keptn
# which is being used here in the interest of time
# TODO: this change should go into github.com/keptn-sandbox/action-install-keptn
uses: vadasambar/action-install-keptn@v4.0.0-patch-vadasambar3
timeout-minutes: 10
with:
KEPTN_VERSION: ${{ matrix.keptn-version }}
HELM_VALUES: |
# Keptn 0.17 and newer
apiGatewayNginx:
type: LoadBalancer
features:
automaticProvisioning:
serviceURL: http://keptn-gitea-provisioner-service.default
# Keptn 0.16 compatibility
control-plane:
apiGatewayNginx:
type: LoadBalancer
features:
automaticProvisioningURL: http://keptn-gitea-provisioner-service.default
# For Keptn 0.18 (to make room for more CPU)
statisticsService:
enabled: false
webhookService:
enabled: false
# For Keptn 1.0.0 (to make room for more resources and reduce helm installationj time)
approvalService:
enabled: false
KUBECONFIG: ${{ env.KUBECONFIG }}
HELM_TIMEOUT: "10m"
- name: Test connection to keptn
run: |
curl -X GET "${{ steps.install_keptn.outputs.KEPTN_API_URL }}/v1/metadata" -H "accept: application/json" -H "x-token: ${{ steps.install_keptn.outputs.KEPTN_API_TOKEN }}"
- name: Install datadog
env:
DD_API_KEY: ${{ secrets.DD_API_KEY }}
DD_APP_KEY: ${{ secrets.DD_APP_KEY }}
DD_SITE: ${{ secrets.DD_SITE }}
DATADOG_VERSION: ${{ matrix.datadog-version }}
run: |
helm repo add datadog https://helm.datadoghq.com
helm install datadog --set datadog.apiKey=${DD_API_KEY} datadog/datadog --set datadog.appKey=${DD_APP_KEY} --set datadog.site=${DD_SITE} --set clusterAgent.enabled=true --set clusterAgent.metricsProvider.enabled=true --set clusterAgent.createPodDisruptionBudget=true --set clusterAgent.replicas=1 -nkeptn --version=${DATADOG_VERSION}
- name: Install datadog-service
env:
DD_API_KEY: ${{ secrets.DD_API_KEY }}
DD_APP_KEY: ${{ secrets.DD_APP_KEY }}
DD_SITE: ${{ secrets.DD_SITE }}
run: |
helm install datadog-service ./helm --set datadogservice.ddApikey=${DD_API_KEY} --set datadogservice.ddAppKey=${DD_APP_KEY} --set datadogservice.ddSite=${DD_SITE} -nkeptn
# Install job executor from downloaded helm chart
- name: Install Job-executor-service
env:
KEPTN_API_PROTOCOL: http
KEPTN_API_TOKEN: ${{ steps.install_keptn.outputs.KEPTN_API_TOKEN }}
KEPTN_ENDPOINT: ${{ steps.install_keptn.outputs.KEPTN_HTTP_ENDPOINT }}
TASK_SUBSCRIPTION: "sh.keptn.event.deployment.triggered\\,sh.keptn.event.test.triggered"
run: |
helm upgrade --install \
--create-namespace -n ${JES_NAMESPACE} \
job-executor-service \
"https://github.com/keptn-contrib/job-executor-service/releases/download/${JES_VERSION}/job-executor-service-${JES_VERSION}.tgz" \
--set remoteControlPlane.autoDetect.enabled="false" \
--set remoteControlPlane.topicSubscription=${TASK_SUBSCRIPTION} \
--set remoteControlPlane.api.token=${KEPTN_API_TOKEN} \
--set remoteControlPlane.api.hostname=${KEPTN_ENDPOINT} \
--set remoteControlPlane.api.protocol=${KEPTN_API_PROTOCOL} \
--wait
kubectl apply \
-f test/data/helm-serviceAccount.yaml \
-f test/data/helm-clusterRole.yaml \
-f test/data/helm-clusterRoleBinding.yaml
# If we failed any previous step we might have a problem and not reporting anything for the version
- name: Create pipeline failure report
if: failure()
run: |
echo "Failed to run integration tests!"
echo '{"Test": "TestGitHub Pipeline", "Action": "fail"}' >> $TEST_REPORT_FILENAME
# # Uncomment this for debugging (`cd && touch continue` to continue execution of the workflow)
# - name: Debugging with ssh
# uses: lhotari/action-upterm@v1
- name: Run Integration tests
env:
KEPTN_ENDPOINT: ${{ steps.install_keptn.outputs.KEPTN_API_URL }}
KEPTN_API_TOKEN: ${{ steps.install_keptn.outputs.KEPTN_API_TOKEN }}
PREFIX: ${{ matrix.keptn-version }}-r${{ github.run_number }}-${{ github.run_id }}-t${{ github.run_attempt }}
run: |
# create a new kube config (it is not available here sometimes)
# TODO: investigate why this is the case
minikube update-context
gotestsum --format testname --jsonfile $TEST_REPORT_FILENAME -- -timeout=120m ./test/e2e/...
# Upload the report files, so we can use them in later jobs
- name: Upload test report as an artifact
if: always()
uses: actions/upload-artifact@v2
with:
name: test-report
path: test-report-*.json
- name: Dump k8s debug info
if: always()
run: |
mkdir k8s_debug
kubectl describe nodes > k8s_debug/k8s_describe_nodes.txt
kubectl cluster-info dump > k8s_debug/k8s_cluster_info_dump.txt
kubectl get configmaps,deployments,pods,networkpolicy,serviceaccounts,role,rolebindings,events -n ${JES_NAMESPACE} -o json > k8s_debug/k8s_jes_objects.json
kubectl get all -n keptn -o json > k8s_debug/k8s_keptn_objects.json
kubectl logs -n keptn -l app.kubernetes.io/instance=keptn --prefix=true --previous=false --all-containers --tail=-1 > k8s_debug/k8s_keptn_logs.txt || true
kubectl logs deployment/keptn-gitea-provisioner-service --prefix=true --previous=false --all-containers --tail=-1 > k8s_debug/k8s_gitea_provisioner_logs.txt || true
kubectl logs -n ${JES_NAMESPACE} deployment/job-executor-service --prefix=true --previous=false --all-containers --tail=-1 > k8s_debug/k8s_jes_logs.txt || true
kubectl get statefulsets,configmaps,pods,networkpolicy,serviceaccounts,role,rolebindings,events,services -n ${GITEA_NAMESPACE} -o json > k8s_debug/k8s_objects_gitea.json
kubectl logs statefulsets/gitea --prefix=true --previous=false --all-containers -n ${GITEA_NAMESPACE} --tail=-1 > k8s_debug/k8s_logs_gitea.txt || true
kubectl logs -n keptn deployment/datadog-cluster-agent --prefix=true --previous=false --all-containers --tail=-1 > k8s_debug/k8s_datadog-cluster-agent_logs.txt || true
kubectl logs -n keptn deployment/datadog_kube_state_metrics --prefix=true --previous=false --all-containers --tail=-1 > k8s_debug/k8s_datadog_kube_state_metrics_logs.txt || true
kubectl logs -n keptn daemonset/datadog --prefix=true --previous=false --all-containers --tail=-1 > k8s_debug/k8s_datadog_logs.txt || true
kubectl logs -n keptn deployment/datadog-service --prefix=true --previous=false --all-containers --tail=-1 > k8s_debug/k8s_datadog_service_logs.txt || true
kubectl get all -n e2e-project-staging -o json > k8s_debug/k8s_e2e_project_objects.json
# Upload the k8s debug archive, so we can use it for investigating
- name: Upload k8s debug archive
if: always()
uses: actions/upload-artifact@v2
with:
name: k8s-debug-archive-keptn-${{ matrix.keptn-version }}-datadog-${{ matrix.datadog-version }}
path: k8s_debug/
publish_final_test_report:
name: Finalize tests reports
needs: integration_test
if: always()
runs-on: ubuntu-20.04
env:
BRANCH: ${{ github.head_ref || github.ref_name }}
TEST_REPORTS_PATH: "./test-reports/"
FINAL_TEST_REPORTS_FOLDER: "./final-test-reports/"
FINAL_TEST_REPORT_FILEPATH_JSON: "./final-test-reports/final-test-report.json"
FINAL_TEST_REPORT_FILEPATH_MARKDOWN: "./final-test-reports/final-test-report.md"
steps:
- name: Set up Node
uses: actions/setup-node@v3.3.0
with:
node-version: 16
- run: npm install ndjson-parse@1.0.4 tablemark@v2.0.0
- name: Download test reports
uses: actions/download-artifact@v3
with:
name: test-report
path: ${{ env.TEST_REPORTS_PATH }}
# This step collects all test reports and merges them into one
# As output a markdown and a json version will be generated
- name: Build final test report
id: build_final_test_report
uses: actions/github-script@v6.1.0
env:
TEST_REPORTS_PATH: ${{ env.TEST_REPORTS_PATH }}
FINAL_TEST_REPORTS_FOLDER: ${{ env.FINAL_TEST_REPORTS_FOLDER }}
FINAL_TEST_REPORT_FILEPATH_MARKDOWN: ${{ env.FINAL_TEST_REPORT_FILEPATH_MARKDOWN }}
with:
result-encoding: string
script: |
ndJsonParser = require('ndjson-parse');
tablemark = require('tablemark');
fs = require('fs');
const {TEST_REPORTS_PATH,
FINAL_TEST_REPORT_FILEPATH_MARKDOWN,
FINAL_TEST_REPORTS_FOLDER
} = process.env
const markdownReportData = [];
let transposedTable = {};
const keptnVersionRegex = /test-report-\d+-(.*).json/;
const fileList = fs.readdirSync(TEST_REPORTS_PATH);
let keptnVersionCount = 0;
fileList.forEach(fileName => {
console.log(`Reading file: ${fileName}`);
const platformReportFile = fs.readFileSync(TEST_REPORTS_PATH + fileName, {encoding:'utf8', flag:'r'});
const keptnVersion = keptnVersionRegex.exec(fileName)[1];
const testResults = ndJsonParser(platformReportFile);
keptnVersionCount++;
testResults.forEach(testResult => {
if (testResult.Test !== undefined && (testResult.Action === "pass" || testResult.Action === "fail" || testResult.Action === "skip")) {
// Strip Test prefix from the name of the test
name = testResult.Test
// For the markdown version we transpose the table layout such that tests are listed in rows and the
// Keptn version are in the columns:
if (transposedTable[name] === undefined) {
transposedTable[name] = {"Test": name};
}
switch(testResult.Action) {
case 'pass': transposedTable[name][keptnVersion] = ':heavy_check_mark:'; break;
case 'fail': transposedTable[name][keptnVersion] = ':x:'; break;
case 'skip': transposedTable[name][keptnVersion] = ':yellow_circle:'; break;
default: transposedTable[name][keptnVersion] = testResult.Action;
}
}
});
});
transposedTable = Object.values(transposedTable)
let columns = [{ align: "left" }];
for(let i = 0; i < keptnVersionCount; i++){
columns.push({ align: "center" });
}
const markdownReport = tablemark(transposedTable, {caseHeaders: false, columns: columns});
fs.mkdirSync(FINAL_TEST_REPORTS_FOLDER);
fs.writeFileSync(FINAL_TEST_REPORT_FILEPATH_MARKDOWN, markdownReport);
- name: Upload final Markdown test report as an artifact
if: always()
uses: actions/upload-artifact@v3
with:
name: final-test-report-markdown
path: ${{ env.FINAL_TEST_REPORT_FILEPATH_MARKDOWN }}
- name: Post final Markdown test report as summary
if: always()
run: cat ${{ env.FINAL_TEST_REPORT_FILEPATH_MARKDOWN }} >> $GITHUB_STEP_SUMMARY
# Check if an integration test failed
- name: Check test status
if: always()
id: check_test_status
env:
FINAL_TEST_REPORT_FILEPATH_MARKDOWN: ${{ env.FINAL_TEST_REPORT_FILEPATH_MARKDOWN }}
run: |
REPORT=$(cat "$FINAL_TEST_REPORT_FILEPATH_MARKDOWN")
if [[ "$REPORT" == *":x:"* ]]; then
echo "INTEGRATION TESTS FAILED!"
echo "##[set-output name=INTEGRATION_TESTS_FAILED;]true"
fi
- name: Finalize GitHub issue template
id: finalize_github_issue_template
if: always() && steps.check_test_status.outputs.INTEGRATION_TESTS_FAILED == 'true'
env:
FINAL_TEST_REPORT_FILEPATH_MARKDOWN: ${{ env.FINAL_TEST_REPORT_FILEPATH_MARKDOWN }}
run: |
REPORT=$(cat "$FINAL_TEST_REPORT_FILEPATH_MARKDOWN")
if [[ $GITHUB_EVENT_NAME == 'schedule' ]]; then
TRIGGERED_BY="Scheduled build"
else
TRIGGERED_BY="@$GITHUB_ACTOR"
fi
INTEGRATION_FILE_LINK=$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/blob/$BRANCH/.github/workflows/integration_tests.yaml
# Create issue for the failed integration test:
cat <<EOT >> integration-tests-failed.md
---
title: Integration tests failed
labels: type:critical
---
* Link to run: $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID
* Triggered by: $TRIGGERED_BY
* Branch: $BRANCH
* Commit: $GITHUB_SHA
$REPORT
Note: This issue was auto-generated from [integration_tests.yaml]($INTEGRATION_FILE_LINK)
EOT
# Create a GitHub issue if scheduled tests failed
- name: Create issue if tests failed
if: always() && github.event_name == 'schedule' && steps.check_test_status.outputs.INTEGRATION_TESTS_FAILED == 'true'
uses: JasonEtco/create-an-issue@v2.6
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
filename: integration-tests-failed.md
# This ensures that if the pipeline was triggered by hand that the user gets informed
- name: Fail pipeline if tests failed
if: always() && github.event_name != 'schedule' && steps.check_test_status.outputs.INTEGRATION_TESTS_FAILED == 'true'
run: exit 1