diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 86bd1b2711f5..bca9dff3b4f8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -223,26 +223,31 @@ jobs: - test-ui if: always() runs-on: ${{ github.repository == 'hashicorp/vault' && 'ubuntu-latest' || fromJSON('["self-hosted","linux","small"]') }} + permissions: write-all # Ensure we have id-token:write access for vault-auth. steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 # Determine the overall status of our required test jobs. - name: Determine status id: status run: | - # Decode our test results and filter out unrequired jobs. Reformat the JSON - # body that we get from Github into a more useful shape. - if results=$(jq -er 'del(.["test-go-fips"], .["test-go-race"]) as $required - | $required | keys as $jobs - | reduce $jobs[] as $job ([]; . + [{job: $job}+$required[$job]])' - ) <<< '${{ toJSON(needs.*.result) }}'; then + # Determine the overall status of the job. We allow fips and race tests to fail so we + # don't consider their result here. + # + # Encode the needs context into JSON, filter out unrequired workflows, shape the result + # into a more useful schema. Determine the overall status by comparing the total number of + # successful results with the number of required jobs. + if results=$(jq -rec 'del(.["test-go-fips"], .["test-go-race"]) as $required + | $required | keys as $jobs + | reduce $jobs[] as $job ([]; . + [{job: $job}+$required[$job]])' <<< '${{ toJSON(needs) }}' + ); then # Determine if all of our required jobs have succeeded. - if jq -er 'length as $expected_length - | [.[] | select(.result == "success")] - | length == $expected_length' <<< "$results"; then - msg=$(echo -e "All required test jobs succeeded!\n$results") + if jq -rec 'length as $expected + | [.[] | select((.result == "success") or (.result == "skipped"))] | length as $got + | $expected == $got' <<< "$results"; then + msg="All required test jobs succeeded!" result="success" else - msg=$(echo -e "One or more required test jobs failed!\n$results") + msg="One or more required test jobs failed!" result="failed" fi else @@ -253,7 +258,7 @@ jobs: { echo "msg=${msg}" echo "result=${result}" - echo "results=${results}" + echo "results<> "$GITHUB_STEP_SUMMARY" fi { - echo 'table-test-results< 0)); then result="failure" fi - - { - echo "data-race-result=${result}" - echo -n "data-race-output=${data_race_tests[*]}" - } | tee -a "$GITHUB_OUTPUT" + echo "data-race-result=${result}" | tee -a "$GITHUB_OUTPUT" + - name: Upload data race detector failure log + if: steps.data-race-check.outputs.data-race-result == 'failure' + uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + with: + name: ${{ steps.metadata.outputs.data-race-log-upload-key }} + path: ${{ steps.metadata.outputs.go-test-dir }}/${{ steps.metadata.outputs.data-race-log-file }} + # Set the minimum retention possible. We only upload this because it's the only way to + # aggregate results from matrix workflows. + retention-days: 1 + if-no-files-found: error # Make sure we always upload the data race logs if it failed # GitHub Actions doesn't expose the job ID or the URL to the job execution, # so we have to fetch it from the API - name: Fetch job logs URL @@ -555,7 +570,7 @@ jobs: continue-on-error: true run: | # This jq query filters out successful tests, leaving only the failures. - # Then, it formats the results into rows of a Markdown table. + # Then, it formats the results into rows of a Markdown table.k # An example row will resemble this: # | github.com/hashicorp/vault/package | TestName | fips | 0 | 2 | [view results](github.com/link-to-logs) | jq -r -n 'inputs @@ -578,27 +593,48 @@ jobs: - test-matrix - test-go runs-on: ${{ fromJSON(inputs.runs-on-small) }} + outputs: + data-race-output: ${{ steps.status.outputs.data-race-output }} + data-race-result: ${{ steps.status.outputs.data-race-result }} steps: + - uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 + with: + pattern: ${{ needs.test-go.outputs.data-race-log-download-pattern }} + path: data-race-logs + merge-multiple: true # Determine our success/failure status by checking the result status and data race status. - id: status name: Determine status result run: | # Determine status result result="success" + + # Aggregate all of our test workflows and determine our Go test result from them. test_go_results=$(tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | jq -Mrc) if ! grep -q -v -E '(failure|cancelled)' <<< "$test_go_results"; then + test_go_result="failed" result="failed" + else + test_go_result="success" fi - data_race_output="" - data_race_results=$(tr -d '\n' <<< '${{ toJSON(needs.test-go.outputs.data-race-result) }}' | jq -Mrc) - if ! grep -q -v -E '(failure|cancelled)' <<< "$data_race_results"; then - data_race_output="${{ toJSON(needs.test-go.outputs.data-race-output) }}" + + # If we have downloaded data race detector logs then at least one Go test job detected + # a data race during execution. We'll fail on that. + if [ -z "$(ls -A data-race-logs)" ]; then + data_race_output="" + data_race_result="success" + else + data_race_output="$(cat data-race-logs/*)" + data_race_result="failed" result="failed" fi + + # Write Go and data race results to outputs. { echo "data-race-output=${data_race_output}" - echo "data-race-results=${data_race_results}" + echo "data-race-result=${data_race_result}" echo "result=${result}" + echo "test-go-result=${test_go_result}" echo "test-go-results=${test_go_results}" } | tee -a "$GITHUB_OUTPUT" # Aggregate, prune, and cache our timing data @@ -616,7 +652,7 @@ jobs: pattern: ${{ needs.test-go.outputs.go-test-results-download-pattern }} merge-multiple: true - if: ${{ ! cancelled() && needs.test-go.result == 'success' && inputs.test-timing-cache-enabled }} - name: Prune old timing + name: Prune any invalid timing files run: | ls -lhR '${{ needs.test-matrix.outputs.go-test-dir }}' find '${{ needs.test-matrix.outputs.go-test-dir }}' -mindepth 1 -type f -mtime +3 -delete @@ -628,7 +664,7 @@ jobs: ' shell {} \; > /dev/null 2>&1 ls -lhR '${{ needs.test-matrix.outputs.go-test-dir }}' - # Determine our overall pass/fail with our Go test results and our + # Determine our overall pass/fail with our Go test results - if: always() && steps.status.outputs.result != 'success' name: Check for failed status run: |