diff --git a/.asf.yaml b/.asf.yaml index 3b495a705ca7..010df03881f8 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -53,6 +53,18 @@ github: dismiss_stale_reviews: true require_code_owner_reviews: true required_approving_review_count: 2 + release/3.5: + required_pull_request_reviews: + require_code_owner_reviews: true + required_approving_review_count: 2 + release/3.4: + required_pull_request_reviews: + require_code_owner_reviews: true + required_approving_review_count: 2 + release/3.3: + required_pull_request_reviews: + require_code_owner_reviews: true + required_approving_review_count: 2 release/3.2: required_pull_request_reviews: require_code_owner_reviews: true diff --git a/.github/actions/autocorrect b/.github/actions/autocorrect new file mode 160000 index 000000000000..2aefcaa00480 --- /dev/null +++ b/.github/actions/autocorrect @@ -0,0 +1 @@ +Subproject commit 2aefcaa0048021cfade67c6cae2b9503f97243f9 diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index efa5b8f631e3..95e72fe578b9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,11 +28,10 @@ jobs: - ubuntu-20.04 os_name: - linux_openresty - - linux_openresty_1_19 test_dir: - t/plugin/[a-k]* - t/plugin/[l-z]* - - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc - t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library t/xrpc runs-on: ${{ matrix.platform }} @@ -43,7 +42,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive @@ -137,7 +136,7 @@ jobs: [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after echo "Linux launch services, done." - name: Start Dubbo Backend - if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'plugin' + if: matrix.os_name == 'linux_openresty' && (steps.test_env.outputs.type == 'plugin' || steps.test_env.outputs.type == 'last') run: | sudo apt install -y maven cd t/lib/dubbo-backend diff --git a/.github/workflows/centos7-ci.yml b/.github/workflows/centos7-ci.yml index dc08b0fd384e..2be0c39cbb94 100644 --- a/.github/workflows/centos7-ci.yml +++ b/.github/workflows/centos7-ci.yml @@ -30,12 +30,12 @@ jobs: test_dir: - t/plugin/[a-k]* - t/plugin/[l-z]* - - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc - t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/chaos.yml b/.github/workflows/chaos.yml index 2bf3518dd2ab..7b47664c55e9 100644 --- a/.github/workflows/chaos.yml +++ b/.github/workflows/chaos.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 35 steps: - - uses: actions/checkout@v3.2.0 + - uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/cli-master.yml b/.github/workflows/cli-master.yml index dd77dcd1537c..d521a9d7a103 100644 --- a/.github/workflows/cli-master.yml +++ b/.github/workflows/cli-master.yml @@ -33,7 +33,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml index 2f76670a0611..7c50e3fc58d2 100644 --- a/.github/workflows/cli.yml +++ b/.github/workflows/cli.yml @@ -31,14 +31,14 @@ jobs: - linux_apisix_current_luarocks_in_customed_nginx runs-on: ${{ matrix.platform }} - timeout-minutes: 15 + timeout-minutes: 30 env: SERVER_NAME: ${{ matrix.job_name }} OPENRESTY_VERSION: default steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/close-unresponded.yml b/.github/workflows/close-unresponded.yml new file mode 100644 index 000000000000..52e81228eba2 --- /dev/null +++ b/.github/workflows/close-unresponded.yml @@ -0,0 +1,38 @@ +name: Check Issues + +on: + workflow_dispatch: + schedule: + - cron: '0 10 * * *' + +permissions: + contents: read + +jobs: + prune_stale: + permissions: + issues: write # for actions/stale to close stale issues + name: Prune Unresponded + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Prune Stale + uses: actions/stale@v8 + with: + days-before-issue-stale: 14 + days-before-issue-close: 3 + stale-issue-message: > + Due to lack of the reporter's response this issue has been labeled with "no response". + It will be close in 3 days if no further activity occurs. If this issue is still + relevant, please simply write any comment. Even if closed, you can still revive the + issue at any time or discuss it on the dev@apisix.apache.org list. + Thank you for your contributions. + close-issue-message: > + This issue has been closed due to lack of activity. If you think that + is incorrect, or the issue requires additional review, you can revive the issue at + any time. + # Issues with these labels will never be considered stale. + only-labels: 'wait for update' + stale-issue-label: 'no response' + ascending: true diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml index 07a1807f811e..9e2befc9ac1f 100644 --- a/.github/workflows/code-lint.yml +++ b/.github/workflows/code-lint.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 10 steps: - - uses: actions/checkout@v3.2.0 + - uses: actions/checkout@v4 - name: Install run: | . ./ci/common.sh @@ -37,7 +37,7 @@ jobs: timeout-minutes: 5 steps: - name: Checkout code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 - name: Shellcheck code run: | diff --git a/.github/workflows/doc-lint.yml b/.github/workflows/doc-lint.yml index 7a8b19904def..25e371162fbf 100644 --- a/.github/workflows/doc-lint.yml +++ b/.github/workflows/doc-lint.yml @@ -20,9 +20,9 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 1 steps: - - uses: actions/checkout@v3.2.0 + - uses: actions/checkout@v4 - name: 🚀 Use Node.js - uses: actions/setup-node@v3.5.1 + uses: actions/setup-node@v3.8.1 with: node-version: "12.x" - run: npm install -g markdownlint-cli@0.25.0 @@ -47,8 +47,10 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 1 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + with: + submodules: recursive - name: Check Chinese copywriting - uses: huacnlee/autocorrect-action@v2.6.2 + uses: ./.github/actions/autocorrect with: - args: --lint --no-diff-bg-color docs/zh/latest/**.md + args: autocorrect --lint --no-diff-bg-color ./docs/zh/latest/ diff --git a/.github/workflows/fips.yml b/.github/workflows/fips.yml index 5b2a004d72b2..2115b0a7b8c8 100644 --- a/.github/workflows/fips.yml +++ b/.github/workflows/fips.yml @@ -30,8 +30,9 @@ jobs: # The RSA and SHA tests are fully covered by jwt-auth and hmac-auth plugin tests, while other plugins only repeat such tests. - t/plugin/jwt-auth2.t t/plugin/jwt-auth.t t/plugin/hmac-auth.t # all SSL related core tests are covered by below two lists. - - t/admin/ssl* t/admin/schema.t t/admin/upstream.t t/config-center-yaml/ssl.t t/core/etcd-mtls.t t/core/config_etcd.t t/deployment/conf_server.t t/misc/patch.t + - t/admin/ssl* t/admin/schema.t t/admin/upstream.t t/config-center-yaml/ssl.t t/core/etcd-mtls.t t/core/config_etcd.t t/misc/patch.t - t/node/grpc-proxy-unary.t t/node/upstream-keepalive-pool.t t/node/upstream-websocket.t t/node/client-mtls.t t/node/upstream-mtls.t t/pubsub/kafka.t t/router/radixtree-sni2.t t/router/multi-ssl-certs.t t/router/radixtree-sni.t t/stream-node/mtls.t t/stream-node/tls.t t/stream-node/upstream-tls.t t/stream-node/sni.t + - t/fips runs-on: ${{ matrix.platform }} timeout-minutes: 90 @@ -41,7 +42,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive @@ -62,13 +63,18 @@ jobs: path: ~/openssl-3.0 key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }} + - name: set openssl prefix + id: set_openssl_prefix + shell: bash + run: | + echo "openssl3_prefix=$HOME" >>$GITHUB_OUTPUT + - name: Toggle openssl compile id: test_ssl_env shell: bash if: steps.cache-openssl.outputs.cache-hit != 'true' run: | echo "openssl3=yes" >>$GITHUB_OUTPUT - echo "openssl3_prefix=$HOME" >>$GITHUB_OUTPUT - name: Extract test type shell: bash @@ -78,6 +84,9 @@ jobs: if [[ $test_dir =~ 't/plugin' ]]; then echo "type=plugin" >>$GITHUB_OUTPUT fi + if [[ $test_dir =~ 't/fips' ]]; then + echo "type=plugin" >>$GITHUB_OUTPUT + fi if [[ $test_dir =~ 't/admin' ]]; then echo "type=first" >>$GITHUB_OUTPUT fi @@ -92,6 +101,7 @@ jobs: - name: Linux launch common services run: | make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml + sudo ./ci/init-common-test-service.sh - name: Cache images id: cache-images @@ -131,7 +141,7 @@ jobs: - name: Linux Install env: COMPILE_OPENSSL3: ${{ steps.test_ssl_env.outputs.openssl3 }} - OPENSSL3_PREFIX: ${{ steps.test_ssl_env.outputs.openssl3_prefix }} + OPENSSL3_PREFIX: ${{ steps.set_openssl_prefix.outputs.openssl3_prefix }} USE_OPENSSL3: yes run: | sudo --preserve-env=OPENRESTY_VERSION \ diff --git a/.github/workflows/fuzzing-ci.yaml b/.github/workflows/fuzzing-ci.yaml index ec3701532d77..4d313ebfeb1a 100644 --- a/.github/workflows/fuzzing-ci.yaml +++ b/.github/workflows/fuzzing-ci.yaml @@ -27,7 +27,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/gm-cron.yaml b/.github/workflows/gm-cron.yaml index 669a21798be3..f0327540498e 100644 --- a/.github/workflows/gm-cron.yaml +++ b/.github/workflows/gm-cron.yaml @@ -20,7 +20,7 @@ jobs: test_dir: - t/plugin/[a-k]* - t/plugin/[l-z]* - - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc - t/node t/pubsub t/router t/script t/stream-node t/utils t/wasm t/xds-library t/xrpc runs-on: ${{ matrix.platform }} @@ -33,7 +33,7 @@ jobs: # scripts or a separate action? steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/gm.yml b/.github/workflows/gm.yml index 06663006e942..297c746caebd 100644 --- a/.github/workflows/gm.yml +++ b/.github/workflows/gm.yml @@ -39,7 +39,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive @@ -72,6 +72,7 @@ jobs: - name: Linux launch common services run: | make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml + sudo ./ci/init-common-test-service.sh - name: Linux Before install env: diff --git a/.github/workflows/kubernetes-ci.yml b/.github/workflows/kubernetes-ci.yml index ea72fe57144c..dc8857739b85 100644 --- a/.github/workflows/kubernetes-ci.yml +++ b/.github/workflows/kubernetes-ci.yml @@ -28,7 +28,6 @@ jobs: - ubuntu-20.04 os_name: - linux_openresty - - linux_openresty_1_19 runs-on: ${{ matrix.platform }} timeout-minutes: 15 @@ -38,7 +37,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/license-checker.yml b/.github/workflows/license-checker.yml index 2122e0db8fbc..830f1a1802e3 100644 --- a/.github/workflows/license-checker.yml +++ b/.github/workflows/license-checker.yml @@ -30,8 +30,8 @@ jobs: timeout-minutes: 3 steps: - - uses: actions/checkout@v3.2.0 + - uses: actions/checkout@v4 - name: Check License Header - uses: apache/skywalking-eyes@v0.4.0 + uses: apache/skywalking-eyes@v0.5.0 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/link-check.yml b/.github/workflows/link-check.yml new file mode 100644 index 000000000000..20b2f16ec94e --- /dev/null +++ b/.github/workflows/link-check.yml @@ -0,0 +1,49 @@ +name: 'Link Checker' + +# **What it does**: Renders the content of every page and check all internal links. +# **Why we have it**: To make sure all links connect correctly. +# **Who does it impact**: Docs content. + +on: + workflow_dispatch: + push: + # branches: [master, 'release/**'] + paths: + - '**/*.md' + - '**/link-check.yml' + pull_request: + branches: [master, "release/**"] + paths: + - '**/*.md' + - '**/link-check.yml' + +permissions: + contents: read + # Needed for the 'trilom/file-changes-action' action + pull-requests: read + +# This allows a subsequently queued workflow run to interrupt previous runs +concurrency: + group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}' + cancel-in-progress: true + +jobs: + check-links: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Get script + run: | + wget https://raw.githubusercontent.com/xuruidong/markdown-link-checker/main/link_checker.py + + - name: Setup python + uses: actions/setup-python@v4 + with: + python-version: '3.9' + + - name: Link check (critical, all files) + run: | + # python link_checker.py ./ --enable-external --ignore "http://apisix.iresty.com" "https://www.upyun.com" "https://github.com/apache/apisix/actions/workflows/build.yml/badge.svg" "https://httpbin.org/" "https://en.wikipedia.org/wiki/Cache" + python link_checker.py ./ diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 66f898b4201b..aa8c2a11f574 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code. - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 - name: spell check run: | pip install codespell==2.1.0 @@ -30,10 +30,10 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 - name: Setup Nodejs env - uses: actions/setup-node@v3.5.1 + uses: actions/setup-node@v3.8.1 with: node-version: '12' diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml index c054303ccd74..17cf28691a4a 100644 --- a/.github/workflows/performance.yml +++ b/.github/workflows/performance.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/redhat-ci.yaml b/.github/workflows/redhat-ci.yaml new file mode 100644 index 000000000000..9bd8d39e35aa --- /dev/null +++ b/.github/workflows/redhat-ci.yaml @@ -0,0 +1,157 @@ +name: CI Redhat UBI - Daily + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + branches: [master] + paths-ignore: + - 'docs/**' + - '**/*.md' +concurrency: + group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + test_apisix: + name: run ci on redhat ubi + runs-on: ubuntu-20.04 + timeout-minutes: 90 + strategy: + fail-fast: false + matrix: + test_dir: + - t/plugin/[a-k]* + - t/plugin/[l-z]* + - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc + - t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Cache deps + uses: actions/cache@v3 + env: + cache-name: cache-deps + with: + path: deps + key: ${{ runner.os }}-${{ env.cache-name }}-ubi8.6-${{ hashFiles('rockspec/apisix-master-0.rockspec') }} + + - name: Extract branch name + if: ${{ startsWith(github.ref, 'refs/heads/release/') }} + id: branch_env + shell: bash + run: | + echo "version=${GITHUB_REF##*/}" >>$GITHUB_OUTPUT + + - name: Extract test type + shell: bash + id: test_env + run: | + test_dir="${{ matrix.test_dir }}" + if [[ $test_dir =~ 't/plugin' ]]; then + echo "type=plugin" >>$GITHUB_OUTPUT + fi + if [[ $test_dir =~ 't/admin ' ]]; then + echo "type=first" >>$GITHUB_OUTPUT + fi + if [[ $test_dir =~ ' t/xds-library' ]]; then + echo "type=last" >>$GITHUB_OUTPUT + fi + + - name: Free disk space + run: | + bash ./ci/free_disk_space.sh + + - name: Linux launch common services + run: | + make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml + sudo ./ci/init-common-test-service.sh + + - name: Build rpm package + if: ${{ startsWith(github.ref, 'refs/heads/release/') }} + run: | + export VERSION=${{ steps.branch_env.outputs.version }} + sudo gem install --no-document fpm + git clone --depth 1 https://github.com/api7/apisix-build-tools.git + + # move codes under build tool + mkdir ./apisix-build-tools/apisix + for dir in `ls|grep -v "^apisix-build-tools$"`;do cp -r $dir ./apisix-build-tools/apisix/;done + + cd apisix-build-tools + make package type=rpm app=apisix version=${VERSION} checkout=release/${VERSION} image_base=ubi image_tag=8.6 local_code_path=./apisix + cd .. + rm -rf $(ls -1 --ignore=apisix-build-tools --ignore=t --ignore=utils --ignore=ci --ignore=Makefile --ignore=rockspec) + + - name: Build xDS library + if: steps.test_env.outputs.type == 'last' + run: | + cd t/xds-library + go build -o libxds.so -buildmode=c-shared main.go export.go + + - name: Run redhat docker and mapping apisix into container + env: + TEST_FILE_SUB_DIR: ${{ matrix.test_dir }} + run: | + docker run -itd -v ${{ github.workspace }}:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --name ubiInstance --net="host" --dns 8.8.8.8 --dns-search apache.org registry.access.redhat.com/ubi8/ubi:8.6 /bin/bash + + - name: Cache images + id: cache-images + uses: actions/cache@v3 + env: + cache-name: cache-apisix-docker-images + with: + path: docker-images-backup + key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }} + + - if: ${{ steps.cache-images.outputs.cache-hit == 'true' }} + name: Load saved docker images + run: | + if [[ -f docker-images-backup/apisix-images.tar ]]; then + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before + docker load --input docker-images-backup/apisix-images.tar + rm docker-images-backup/apisix-images.tar + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + echo "loaded docker images" + if [[ ${{ steps.test_env.outputs.type }} != first ]]; then + sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after + fi + fi + - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }} + name: Linux launch services + run: | + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before + [[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh + make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml + [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after + echo "Linux launch services, done." + + - name: Install dependencies + run: | + docker exec ubiInstance bash -c "cd apisix && chmod +x ./ci/redhat-ci.sh && ./ci/redhat-ci.sh install_dependencies" + + - name: Install rpm package + if: ${{ startsWith(github.ref, 'refs/heads/release/') }} + run: | + docker exec ubiInstance bash -c "cd apisix && rpm -iv --prefix=/apisix ./apisix-build-tools/output/apisix-${{ steps.branch_env.outputs.version }}-0.ubi8.6.x86_64.rpm" + # Dependencies are attached with rpm, so revert `make deps` + docker exec ubiInstance bash -c "cd apisix && rm -rf deps" + docker exec ubiInstance bash -c "cd apisix && mv usr/bin . && mv usr/local/apisix/* ." + + - name: Run test cases + run: | + docker exec ubiInstance bash -c "cd apisix && chmod +x ./ci/redhat-ci.sh && ./ci/redhat-ci.sh run_case" + + - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }} + name: Save docker images + run: | + echo "start backing up, $(date)" + bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }} + echo "backup done, $(date)" diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml index a2b606667fad..85df2c0816f7 100644 --- a/.github/workflows/semantic.yml +++ b/.github/workflows/semantic.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out repository code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive - uses: ./.github/actions/action-semantic-pull-request diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 4f751e7e962f..3bd686e6f124 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -19,7 +19,7 @@ jobs: steps: - name: Prune Stale - uses: actions/stale@v7 + uses: actions/stale@v8 with: days-before-issue-stale: 350 days-before-issue-close: 14 diff --git a/.github/workflows/tars-ci.yml b/.github/workflows/tars-ci.yml index aa4c1b6e45af..9e1c9fa2963e 100644 --- a/.github/workflows/tars-ci.yml +++ b/.github/workflows/tars-ci.yml @@ -28,7 +28,6 @@ jobs: - ubuntu-20.04 os_name: - linux_openresty - - linux_openresty_1_19 runs-on: ${{ matrix.platform }} timeout-minutes: 15 @@ -38,7 +37,7 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3.2.0 + uses: actions/checkout@v4 with: submodules: recursive diff --git a/.github/workflows/update-labels.yml b/.github/workflows/update-labels.yml new file mode 100644 index 000000000000..80919aefb841 --- /dev/null +++ b/.github/workflows/update-labels.yml @@ -0,0 +1,62 @@ +name: Update labels when user responds in issue and pr +permissions: + issues: write + pull-requests: write + +on: + issue_comment: + types: [created] + pull_request_review_comment: + types: [created] + +jobs: + issue_commented: + if: github.event.issue && !github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && contains(github.event.issue.labels.*.name, 'wait for update') && !contains(github.event.issue.labels.*.name, 'user responded') + runs-on: ubuntu-latest + steps: + - name: update labels when user responds + uses: actions/github-script@v6 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["user responded"] + }) + github.rest.issues.removeLabel({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + name: "wait for update" + }) + + pr_commented: + if: github.event.issue && github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && (contains(github.event.issue.labels.*.name, 'wait for update') || contains(github.event.issue.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.issue.labels.*.name, 'user responded') + runs-on: ubuntu-latest + steps: + - name: update label when user responds + uses: actions/github-script@v6 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["user responded"] + }) + + pr_review_commented: + if: github.event.pull_request && github.event.comment.user.login == github.event.pull_request.user.login && (contains(github.event.pull_request.labels.*.name, 'wait for update') || contains(github.event.pull_request.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.pull_request.labels.*.name, 'user responded') + runs-on: ubuntu-latest + steps: + - name: update label when user responds + uses: actions/github-script@v6 + with: + script: | + github.rest.issues.addLabels({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + labels: ["user responded"] + }) diff --git a/.gitmodules b/.gitmodules index 3c8ed44e4c56..981985528a80 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,6 @@ [submodule ".github/actions/action-semantic-pull-request"] path = .github/actions/action-semantic-pull-request url = https://github.com/amannn/action-semantic-pull-request.git +[submodule ".github/actions/autocorrect"] + path = .github/actions/autocorrect + url = https://github.com/huacnlee/autocorrect.git diff --git a/.licenserc.yaml b/.licenserc.yaml index 315fa71bcf42..8b423f25cdd1 100644 --- a/.licenserc.yaml +++ b/.licenserc.yaml @@ -23,6 +23,8 @@ header: paths-ignore: - '.gitignore' + - '.gitattributes' + - '.gitmodules' - 'LICENSE' - 'NOTICE' - '**/*.json' @@ -46,7 +48,11 @@ header: # Exclude plugin-specific configuration files - 't/plugin/authz-casbin' - 't/coredns' + - 't/fuzzing/requirements.txt' + - 't/perf/requirements.txt' - 'autodocs/' - 'docs/**/*.md' + - '.ignore_words' + - '.luacheckrc' comment: on-failure diff --git a/CHANGELOG.md b/CHANGELOG.md index 0097b2ef93cf..92bd8498c313 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,10 @@ title: Changelog ## Table of Contents +- [3.5.0](#350) +- [3.4.0](#340) +- [3.3.0](#330) +- [3.2.1](#321) - [3.2.0](#320) - [3.1.0](#310) - [3.0.0](#300) @@ -68,6 +72,117 @@ title: Changelog - [0.7.0](#070) - [0.6.0](#060) +## 3.5.0 + +### Change + +- :warning: remove snowflake algorithm in the request-id plugin: [#9715](https://github.com/apache/apisix/pull/9715) +- :warning: No longer compatible with OpenResty 1.19, it needs to be upgraded to 1.21+: [#9913](https://github.com/apache/apisix/pull/9913) +- :warning: Remove the configuration item `apisix.stream_proxy.only`, the L4/L7 proxy needs to be enabled through the configuration item `apisix.proxy_mode`: [#9607](https://github.com/apache/apisix/pull/9607) +- :warning: The admin-api `/apisix/admin/plugins?all=true` marked as deprecated: [#9580](https://github.com/apache/apisix/pull/9580) +- :warning: allowlist and denylist can't be enabled at the same time in ua-restriction plugin: [#9841](https://github.com/apache/apisix/pull/9841) + +### Core + +- :sunrise: Support host level dynamic setting of tls protocol version: [#9903](https://github.com/apache/apisix/pull/9903) +- :sunrise: Support force delete resource: [#9810](https://github.com/apache/apisix/pull/9810) +- :sunrise: Support pulling env vars from yaml keys: [#9855](https://github.com/apache/apisix/pull/9855) +- :sunrise: Add schema validate API in admin-api: [#10065](https://github.com/apache/apisix/pull/10065) + +### Plugins + +- :sunrise: Add chaitin-waf plugin: [#9838](https://github.com/apache/apisix/pull/9838) +- :sunrise: Support vars for file-logger plugin: [#9712](https://github.com/apache/apisix/pull/9712) +- :sunrise: Support adding response headers for mock plugin: [#9720](https://github.com/apache/apisix/pull/9720) +- :sunrise: Support regex_uri with unsafe_uri for proxy-rewrite plugin: [#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: Support set client_email field for google-cloud-logging plugin: [#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: Support sending headers upstream returned by OPA server for opa plugin: [#9710](https://github.com/apache/apisix/pull/9710) +- :sunrise: Support configuring proxy server for openid-connect plugin: [#9948](https://github.com/apache/apisix/pull/9948) + +### Bugfixes + +- Fix(log-rotate): the max_kept configuration doesn't work when using custom name: [#9749](https://github.com/apache/apisix/pull/9749) +- Fix(limit_conn): do not use the http variable in stream mode: [#9816](https://github.com/apache/apisix/pull/9816) +- Fix(loki-logger): getting an error with log_labels: [#9850](https://github.com/apache/apisix/pull/9850) +- Fix(limit-count): X-RateLimit-Reset shouldn't be set to 0 after request be rejected: [#9978](https://github.com/apache/apisix/pull/9978) +- Fix(nacos): attempt to index upvalue 'applications' (a nil value): [#9960](https://github.com/apache/apisix/pull/9960) +- Fix(etcd): can't sync etcd data if key has special character: [#9967](https://github.com/apache/apisix/pull/9967) +- Fix(tencent-cloud-cls): dns parsing failure: [#9843](https://github.com/apache/apisix/pull/9843) +- Fix(reload): worker not exited when executing quit or reload command [#9909](https://github.com/apache/apisix/pull/9909) +- Fix(traffic-split): upstream_id validity verification [#10008](https://github.com/apache/apisix/pull/10008) + +## 3.4.0 + +### Core + +- :sunrise: Support route-level MTLS [#9322](https://github.com/apache/apisix/pull/9322) +- :sunrise: Support id schema for global_rules [#9517](https://github.com/apache/apisix/pull/9517) +- :sunrise: Support use a single long http connection to watch all resources for etcd [#9456](https://github.com/apache/apisix/pull/9456) +- :sunrise: Support max len 256 for ssl label [#9301](https://github.com/apache/apisix/pull/9301) + +### Plugins + +- :sunrise: Support multiple regex pattern matching for proxy_rewrite plugin [#9194](https://github.com/apache/apisix/pull/9194) +- :sunrise: Add loki-logger plugin [#9399](https://github.com/apache/apisix/pull/9399) +- :sunrise: Allow user configure DEFAULT_BUCKETS for prometheus plugin [#9673](https://github.com/apache/apisix/pull/9673) + +### Bugfixes + +- Fix(body-transformer): xml2lua: replace empty table with empty string [#9669](https://github.com/apache/apisix/pull/9669) +- Fix: opentelemetry and grpc-transcode plugins cannot work together [#9606](https://github.com/apache/apisix/pull/9606) +- Fix(skywalking-logger, error-log-logger): support $hostname in skywalking service_instance_name [#9401](https://github.com/apache/apisix/pull/9401) +- Fix(admin): fix secrets do not support to update attributes by PATCH [#9510](https://github.com/apache/apisix/pull/9510) +- Fix(http-logger): default request path should be '/' [#9472](https://github.com/apache/apisix/pull/9472) +- Fix: syslog plugin doesn't work [#9425](https://github.com/apache/apisix/pull/9425) +- Fix: wrong log format for splunk-hec-logging [#9478](https://github.com/apache/apisix/pull/9478) +- Fix(etcd): reuse cli and enable keepalive [#9420](https://github.com/apache/apisix/pull/9420) +- Fix: upstream key config add mqtt_client_id support [#9450](https://github.com/apache/apisix/pull/9450) +- Fix: body-transformer plugin return raw body anytime [#9446](https://github.com/apache/apisix/pull/9446) +- Fix(wolf-rbac): other plugin in consumer not effective when consumer used wolf-rbac plugin [#9298](https://github.com/apache/apisix/pull/9298) +- Fix: always parse domain when host is domain name [#9332](https://github.com/apache/apisix/pull/9332) +- Fix: response-rewrite plugin can't add only one character [#9372](https://github.com/apache/apisix/pull/9372) +- Fix(consul): support to fetch only health endpoint [#9204](https://github.com/apache/apisix/pull/9204) + +## 3.3.0 + +**The changes marked with :warning: are not backward compatible.** + +### Change + +- :warning: Change the default router from `radixtree_uri` to `radixtree_host_uri`: [#9047](https://github.com/apache/apisix/pull/9047) +- :warning: CORS plugin will add `Vary: Origin` header when `allow_origin` is not `*`: [#9010](https://github.com/apache/apisix/pull/9010) + +### Core + +- :sunrise: Support store route's cert in secrets manager: [#9247](https://github.com/apache/apisix/pull/9247) +- :sunrise: Support bypassing Admin API Auth by configuration: [#9147](https://github.com/apache/apisix/pull/9147) + +### Plugins + +- :sunrise: Support header injection for `fault-injection` plugin: [#9039](https://github.com/apache/apisix/pull/9039) +- :sunrise: Support variable when rewrite header in `proxy-rewrite` plugin: [#9112](https://github.com/apache/apisix/pull/9112) +- :sunrise: `limit-count` plugin supports `username` and `ssl` for redis policy: [#9185](https://github.com/apache/apisix/pull/9185) + +### Bugfixes + +- Fix etcd data sync exception: [#8493](https://github.com/apache/apisix/pull/8493) +- Fix invalidate cache in `core.request.add_header` and fix some calls: [#8824](https://github.com/apache/apisix/pull/8824) +- Fix the high CPU and memory usage cause by healthcheck impl: [#9015](https://github.com/apache/apisix/pull/9015) +- Consider using `allow_origins_by_regex` only when it is not `nil`: [#9028](https://github.com/apache/apisix/pull/9028) +- Check upstream reference in `traffic-split` plugin when delete upstream: [#9044](https://github.com/apache/apisix/pull/9044) +- Fix failing to connect to etcd at startup: [#9077](https://github.com/apache/apisix/pull/9077) +- Fix health checker leak for domain nodes: [#9090](https://github.com/apache/apisix/pull/9090) +- Prevent non `127.0.0.0/24` to access admin api with empty admin_key: [#9146](https://github.com/apache/apisix/pull/9146) +- Ensure `hold_body_chunk` should use separate buffer for each plugin in case of pollution: [#9266](https://github.com/apache/apisix/pull/9266) +- Ensure `batch-requests` plugin read trailer headers if existed: [#9289](https://github.com/apache/apisix/pull/9289) +- Ensure `proxy-rewrite` should set `ngx.var.uri`: [#9309](https://github.com/apache/apisix/pull/9309) + +## 3.2.1 + +**This is an LTS maintenance release and you can see the CHANGELOG in `release/3.2` branch.** + +[https://github.com/apache/apisix/blob/release/3.2/CHANGELOG.md#321](https://github.com/apache/apisix/blob/release/3.2/CHANGELOG.md#321) + ## 3.2.0 ### Change @@ -298,7 +413,7 @@ Returns multiple configurations: - Port of Admin API changed to 9180: [#7806](https://github.com/apache/apisix/pull/7806) - We only support OpenResty 1.19.3.2 and above: [#7625](https://github.com/apache/apisix/pull/7625) -- Adjusted the priority of the Plugin Config object so that the priority of a plugin configuration with the same name changes from Consumer > Plugin Config > Route > Service to Consumer > Route > Plugin Config > Service: [#7614](https://github. com/apache/apisix/pull/7614) +- Adjusted the priority of the Plugin Config object so that the priority of a plugin configuration with the same name changes from Consumer > Plugin Config > Route > Service to Consumer > Route > Plugin Config > Service: [#7614](https://github.com/apache/apisix/pull/7614) ### Core @@ -1290,7 +1405,7 @@ and support for ARM platform, and proxy rewrite plugin. ### Core -- :sunrise: **[support stand-alone mode](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/stand-alone-cn.md)**: using yaml to update configurations of APISIX, more friendly to kubernetes. [#464](https://github.com/apache/incubator-apisix/pull/464) +- :sunrise: **[support standalone mode](https://github.com/apache/apisix/blob/master/docs/en/latest/deployment-modes.md#standalone)**: using yaml to update configurations of APISIX, more friendly to kubernetes. [#464](https://github.com/apache/incubator-apisix/pull/464) - :sunrise: **[support stream proxy](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/stream-proxy.md)**. [#513](https://github.com/apache/incubator-apisix/pull/513) - :sunrise: support consumer bind plugins. [#544](https://github.com/apache/incubator-apisix/pull/544) - support domain name in upstream, not only IP. [#522](https://github.com/apache/incubator-apisix/pull/522) @@ -1332,7 +1447,7 @@ This release brings many new features, such as IP black and white list, gPRC pro ### Core -- :sunrise: **[gRPC transcoding](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/grpc-transcoding.md)**: supports protocol transcoding so that clients can access your gRPC API by using HTTP/JSON. [#395](https://github.com/apache/incubator-apisix/issues/395) +- :sunrise: **[gRPC transcoding](https://github.com/apache/apisix/blob/master/docs/en/latest/plugins/grpc-transcode.md)**: supports protocol transcoding so that clients can access your gRPC API by using HTTP/JSON. [#395](https://github.com/apache/incubator-apisix/issues/395) - :sunrise: **[radix tree router](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/router-radixtree.md)**: The radix tree is used as the default router implementation. It supports the uri, host, cookie, request header, request parameters, Nginx built-in variables, etc. as the routing conditions, and supports common operators such as equal, greater than, less than, etc., more powerful and flexible.**IMPORTANT: This change is not downward compatible. All users who use historical versions need to manually modify their routing to work properly.** [#414](https://github.com/apache/incubator-apisix/issues/414) - Dynamic upstream supports more parameters, you can specify the upstream uri and host, and whether to enable websocket. [#451](https://github.com/apache/incubator-apisix/pull/451) - Support for get values from cookies directly from `ctx.var`. [#449](https://github.com/apache/incubator-apisix/pull/449) @@ -1378,7 +1493,7 @@ This release brings many new features such as health check and circuit breaker, ### Plugins - :sunrise: **[OpenTracing](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/zipkin.md)**: support Zipkin and Apache SkyWalking. [#304](https://github.com/apache/incubator-apisix/pull/304) -- [JWT auth](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/jwt-auth-cn.md). [#303](https://github.com/apache/incubator-apisix/pull/303) +- [JWT auth](https://github.com/apache/apisix/blob/master/docs/en/latest/plugins/jwt-auth.md). [#303](https://github.com/apache/incubator-apisix/pull/303) ### CLI diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 7a96ea5db17f..fe93188bc703 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -65,7 +65,7 @@ We strive to: * Repeated harassment of others. In general, if someone asks you to stop, then stop. * Advocating for, or encouraging, any of the above behaviour. -6. __Be concise.__ Keep in mind that what you write once will be read by hundreds of persons. Writing a short email means people can understand the conversation as efficiently as possible. Short emails should always strive to be empathetic, welcoming, friendly and patient. When a long explanation is necessary, consider adding a summary.

+6. __Be concise.__ Keep in mind that what you write once will be read by hundreds of people. Writing a short email means people can understand the conversation as efficiently as possible. Short emails should always strive to be empathetic, welcoming, friendly and patient. When a long explanation is necessary, consider adding a summary.

Try to bring new ideas to a conversation so that each mail adds something unique to the thread, keeping in mind that the rest of the thread still contains the other messages with arguments that have already been made. diff --git a/CODE_STYLE.md b/CODE_STYLE.md index 9b25f552cf90..f19bf355aeb8 100644 --- a/CODE_STYLE.md +++ b/CODE_STYLE.md @@ -322,7 +322,7 @@ end The function should return ``, `err`. The first return value means successful or not, if not, the second return value specifies the error message. -The error message can be ignored in some case. +The error message can be ignored in some cases. ```lua --No diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7f8434e63460..872e2a59b87a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -44,7 +44,7 @@ Once we've discussed your changes and you've got your code ready, make sure that * Includes tests for new functionality. * References the original issue in the description, e.g. "Resolves #123". * Has a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). -* Ensure your pull request's title starts from one of the word in the `types` section of [semantic.yml](https://github.com/apache/apisix/blob/master/.github/semantic.yml). +* Ensure your pull request's title starts from one of the word in the `types` section of [semantic.yml](https://github.com/apache/apisix/blob/master/.github/workflows/semantic.yml). * Follow the [PR manners](https://raw.githubusercontent.com/apache/apisix/master/.github/PULL_REQUEST_TEMPLATE.md) ## Contribution Guidelines for Documentation diff --git a/Makefile b/Makefile index abe654407bc7..f3cc375e061e 100644 --- a/Makefile +++ b/Makefile @@ -26,6 +26,7 @@ VERSION ?= master project_name ?= apache-apisix project_release_name ?= $(project_name)-$(VERSION)-src +OTEL_CONFIG ?= ./ci/pod/otelcol-contrib/data-otlp.json # Hyperconverged Infrastructure ENV_OS_NAME ?= $(shell uname -s | tr '[:upper:]' '[:lower:]') @@ -80,6 +81,9 @@ ifeq ($(ENV_OS_NAME), darwin) ifeq ($(shell test -d $(ENV_HOMEBREW_PREFIX)/opt/openresty-openssl111 && echo -n yes), yes) ENV_OPENSSL_PREFIX := $(ENV_HOMEBREW_PREFIX)/opt/openresty-openssl111 endif + ifeq ($(shell test -d $(ENV_HOMEBREW_PREFIX)/opt/pcre && echo -n yes), yes) + ENV_PCRE_PREFIX := $(ENV_HOMEBREW_PREFIX)/opt/pcre + endif endif @@ -145,7 +149,7 @@ help: @echo -### deps : Installation dependencies +### deps : Installing dependencies .PHONY: deps deps: runtime $(eval ENV_LUAROCKS_VER := $(shell $(ENV_LUAROCKS) --version | grep -E -o "luarocks [0-9]+.")) @@ -153,14 +157,15 @@ deps: runtime mkdir -p ~/.luarocks; \ $(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.OPENSSL_LIBDIR $(addprefix $(ENV_OPENSSL_PREFIX), /lib); \ $(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.OPENSSL_INCDIR $(addprefix $(ENV_OPENSSL_PREFIX), /include); \ - $(ENV_LUAROCKS) install rockspec/apisix-master-0.rockspec --tree=deps --only-deps --local $(ENV_LUAROCKS_SERVER_OPT); \ + [ '$(ENV_OS_NAME)' == 'darwin' ] && $(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.PCRE_INCDIR $(addprefix $(ENV_PCRE_PREFIX), /include); \ + $(ENV_LUAROCKS) install rockspec/apisix-master-0.rockspec --tree deps --only-deps $(ENV_LUAROCKS_SERVER_OPT); \ else \ $(call func_echo_warn_status, "WARNING: You're not using LuaRocks 3.x; please remove the luarocks and reinstall it via https://raw.githubusercontent.com/apache/apisix/master/utils/linux-install-luarocks.sh"); \ exit 1; \ fi -### undeps : Uninstallation dependencies +### undeps : Uninstalling dependencies .PHONY: undeps undeps: @$(call func_echo_status, "$@ -> [ Start ]") @@ -337,9 +342,6 @@ install: runtime $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/serverless $(ENV_INSTALL) apisix/plugins/serverless/*.lua $(ENV_INST_LUADIR)/apisix/plugins/serverless/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/slslog - $(ENV_INSTALL) apisix/plugins/slslog/*.lua $(ENV_INST_LUADIR)/apisix/plugins/slslog/ - $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/syslog $(ENV_INSTALL) apisix/plugins/syslog/*.lua $(ENV_INST_LUADIR)/apisix/plugins/syslog/ @@ -373,6 +375,9 @@ install: runtime $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis $(ENV_INSTALL) apisix/stream/xrpc/protocols/redis/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo + $(ENV_INSTALL) apisix/stream/xrpc/protocols/dubbo/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo/ + $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/utils $(ENV_INSTALL) apisix/utils/*.lua $(ENV_INST_LUADIR)/apisix/utils/ @@ -441,6 +446,8 @@ compress-tar: .PHONY: ci-env-up ci-env-up: @$(call func_echo_status, "$@ -> [ Start ]") + touch $(OTEL_CONFIG) + chmod 777 $(OTEL_CONFIG) $(ENV_DOCKER_COMPOSE) up -d @$(call func_echo_success_status, "$@ -> [ Done ]") @@ -465,5 +472,6 @@ ci-env-rebuild: .PHONY: ci-env-down ci-env-down: @$(call func_echo_status, "$@ -> [ Start ]") + rm $(OTEL_CONFIG) $(ENV_DOCKER_COMPOSE) down @$(call func_echo_success_status, "$@ -> [ Done ]") diff --git a/README.md b/README.md index b3dbfdfe5d46..9d304c5fed92 100644 --- a/README.md +++ b/README.md @@ -123,14 +123,14 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against - Zipkin tracing: [Zipkin](docs/en/latest/plugins/zipkin.md) - Open source APM: support [Apache SkyWalking](docs/en/latest/plugins/skywalking.md) - - Works with external service discovery: In addition to the built-in etcd, it also supports [Consul](docs/en/latest/discovery/consul_kv.md), [Nacos](docs/en/latest/discovery/nacos.md), [Eureka](docs/en/latest/discovery/eureka.md) and [Zookeeper (CP)](docs/en/latest/discovery/zookeeper.md). + - Works with external service discovery: In addition to the built-in etcd, it also supports [Consul](docs/en/latest/discovery/consul_kv.md), [Nacos](docs/en/latest/discovery/nacos.md), [Eureka](docs/en/latest/discovery/eureka.md) and [Zookeeper (CP)](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md). - Monitoring And Metrics: [Prometheus](docs/en/latest/plugins/prometheus.md) - Clustering: APISIX nodes are stateless, creates clustering of the configuration center, please refer to [etcd Clustering Guide](https://etcd.io/docs/v3.5/op-guide/clustering/). - High availability: Support to configure multiple etcd addresses in the same cluster. - [Dashboard](https://github.com/apache/apisix-dashboard) - Version Control: Supports rollbacks of operations. - CLI: start\stop\reload APISIX through the command line. - - [Stand-Alone](docs/en/latest/deployment-modes.md#stand-alone): Supports to load route rules from local YAML file, it is more friendly such as under the kubernetes(k8s). + - [Standalone](docs/en/latest/deployment-modes.md#standalone): Supports to load route rules from local YAML file, it is more friendly such as under the kubernetes(k8s). - [Global Rule](docs/en/latest/terminology/global-rule.md): Allows to run any plugin for all request, eg: limit rate, IP filter etc. - High performance: The single-core QPS reaches 18k with an average delay of fewer than 0.2 milliseconds. - [Fault Injection](docs/en/latest/plugins/fault-injection.md) diff --git a/THREAT_MODEL.md b/THREAT_MODEL.md index e537d1a469c8..c10560c4d4aa 100644 --- a/THREAT_MODEL.md +++ b/THREAT_MODEL.md @@ -57,4 +57,4 @@ We should keep security in mind, and validate the input from the client before u As the maintainer: We should keep security in mind, and review the code line by line. -We are open to the discussion from the security researchers. +We are open to discussion from the security researchers. diff --git a/apisix/admin/init.lua b/apisix/admin/init.lua index 072a584350ee..333c798e6ada 100644 --- a/apisix/admin/init.lua +++ b/apisix/admin/init.lua @@ -16,6 +16,7 @@ -- local require = require local core = require("apisix.core") +local get_uri_args = ngx.req.get_uri_args local route = require("apisix.utils.router") local plugin = require("apisix.plugin") local v3_adapter = require("apisix.admin.v3_adapter") @@ -56,8 +57,8 @@ local resources = { stream_routes = require("apisix.admin.stream_routes"), plugin_metadata = require("apisix.admin.plugin_metadata"), plugin_configs = require("apisix.admin.plugin_config"), - consumer_groups = require("apisix.admin.consumer_group"), - secrets = require("apisix.admin.secrets"), + consumer_groups = require("apisix.admin.consumer_group"), + secrets = require("apisix.admin.secrets"), } @@ -67,6 +68,12 @@ local router local function check_token(ctx) local local_conf = core.config.local_conf() + + -- check if admin_key is required + if local_conf.deployment.admin.admin_key_required == false then + return true + end + local admin_key = core.table.try_read_attr(local_conf, "deployment", "admin", "admin_key") if not admin_key then return true @@ -98,6 +105,22 @@ local function check_token(ctx) return true end +-- Set the `apictx` variable and check admin api token, if the check fails, the current +-- request will be interrupted and an error response will be returned. +-- +-- NOTE: This is a higher wrapper for `check_token` function. +local function set_ctx_and_check_token() + local api_ctx = {} + core.ctx.set_vars_meta(api_ctx) + ngx.ctx.api_ctx = api_ctx + + local ok, err = check_token(api_ctx) + if not ok then + core.log.warn("failed to check token: ", err) + core.response.exit(401, { error_msg = "failed to check token" }) + end +end + local function strip_etcd_resp(data) if type(data) == "table" @@ -136,15 +159,7 @@ end local function run() - local api_ctx = {} - core.ctx.set_vars_meta(api_ctx) - ngx.ctx.api_ctx = api_ctx - - local ok, err = check_token(api_ctx) - if not ok then - core.log.warn("failed to check token: ", err) - core.response.exit(401, {error_msg = "failed to check token"}) - end + set_ctx_and_check_token() local uri_segs = core.utils.split_uri(ngx.var.uri) core.log.info("uri: ", core.json.delay_encode(uri_segs)) @@ -160,7 +175,8 @@ local function run() if seg_res == "stream_routes" then local local_conf = core.config.local_conf() - if not local_conf.apisix.stream_proxy then + if local_conf.apisix.proxy_mode ~= "stream" and + local_conf.apisix.proxy_mode ~= "http&stream" then core.log.warn("stream mode is disabled, can not add any stream ", "routes") core.response.exit(400, {error_msg = "stream mode is disabled, " .. @@ -170,7 +186,7 @@ local function run() local resource = resources[seg_res] if not resource then - core.response.exit(404, {error_msg = "not found"}) + core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res}) end local method = str_lower(get_method()) @@ -238,31 +254,32 @@ end local function get_plugins_list() - local api_ctx = {} - core.ctx.set_vars_meta(api_ctx) - ngx.ctx.api_ctx = api_ctx - - local ok, err = check_token(api_ctx) - if not ok then - core.log.warn("failed to check token: ", err) - core.response.exit(401, {error_msg = "failed to check token"}) + set_ctx_and_check_token() + local args = get_uri_args() + local subsystem = args["subsystem"] + -- If subsystem is passed then it should be either http or stream. + -- If it is not passed/nil then http will be default. + subsystem = subsystem or "http" + if subsystem == "http" or subsystem == "stream" then + local plugins = resources.plugins.get_plugins_list(subsystem) + core.response.exit(200, plugins) end + core.response.exit(400,"invalid subsystem passed") +end - local plugins = resources.plugins.get_plugins_list() - core.response.exit(200, plugins) +-- Handle unsupported request methods for the virtual "reload" plugin +local function unsupported_methods_reload_plugin() + set_ctx_and_check_token() + + core.response.exit(405, { + error_msg = "please use PUT method to reload the plugins, " + .. get_method() .. " method is not allowed." + }) end local function post_reload_plugins() - local api_ctx = {} - core.ctx.set_vars_meta(api_ctx) - ngx.ctx.api_ctx = api_ctx - - local ok, err = check_token(api_ctx) - if not ok then - core.log.warn("failed to check token: ", err) - core.response.exit(401, {error_msg = "failed to check token"}) - end + set_ctx_and_check_token() local success, err = events.post(reload_event, get_method(), ngx_time()) if not success then @@ -359,6 +376,41 @@ local function reload_plugins(data, event, source, pid) end +local function schema_validate() + local uri_segs = core.utils.split_uri(ngx.var.uri) + core.log.info("uri: ", core.json.delay_encode(uri_segs)) + + local seg_res = uri_segs[6] + local resource = resources[seg_res] + if not resource then + core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res}) + end + + local req_body, err = core.request.get_body(MAX_REQ_BODY) + if err then + core.log.error("failed to read request body: ", err) + core.response.exit(400, {error_msg = "invalid request body: " .. err}) + end + + if req_body then + local data, err = core.json.decode(req_body) + if err then + core.log.error("invalid request body: ", req_body, " err: ", err) + core.response.exit(400, {error_msg = "invalid request body: " .. err, + req_body = req_body}) + end + + req_body = data + end + + local ok, err = core.schema.check(resource.schema, req_body) + if ok then + core.response.exit(200) + end + core.response.exit(400, {error_msg = err}) +end + + local uri_route = { { paths = [[/apisix/admin]], @@ -375,11 +427,22 @@ local uri_route = { methods = {"GET"}, handler = get_plugins_list, }, + { + paths = [[/apisix/admin/schema/validate/*]], + methods = {"POST"}, + handler = schema_validate, + }, { paths = reload_event, methods = {"PUT"}, handler = post_reload_plugins, }, + -- Handle methods other than "PUT" on "/plugin/reload" to inform user + { + paths = reload_event, + methods = { "GET", "POST", "DELETE", "PATCH" }, + handler = unsupported_methods_reload_plugin, + }, } @@ -395,6 +458,13 @@ function _M.init_worker() events.register(reload_plugins, reload_event, "PUT") if ngx_worker_id() == 0 then + -- check if admin_key is required + if local_conf.deployment.admin.admin_key_required == false then + core.log.warn("Admin key is bypassed! ", + "If you are deploying APISIX in a production environment, ", + "please disable `admin_key_required` and set a secure admin key!") + end + local ok, err = ngx_timer_at(0, function(premature) if premature then return diff --git a/apisix/admin/plugins.lua b/apisix/admin/plugins.lua index 26dcab719332..201f8f3c998e 100644 --- a/apisix/admin/plugins.lua +++ b/apisix/admin/plugins.lua @@ -18,11 +18,12 @@ local require = require local core = require("apisix.core") local check_schema = require("apisix.plugin").check_schema local ipairs = ipairs -local pcall = pcall local table_sort = table.sort local table_insert = table.insert local get_uri_args = ngx.req.get_uri_args local plugin_get_all = require("apisix.plugin").get_all +local plugin_get_http = require("apisix.plugin").get +local plugin_get_stream = require("apisix.plugin").get_stream local encrypt_conf = require("apisix.plugin").encrypt_conf local pairs = pairs @@ -42,7 +43,15 @@ end function _M.get(name) local arg = get_uri_args() - if arg and arg["all"] == "true" then + -- If subsystem is passed inside args then it should be oneOf: http / stream. + local subsystem = arg["subsystem"] or "http" + if subsystem ~= "http" and subsystem ~= "stream" then + return 400, {error_msg = "unsupported subsystem: "..subsystem} + end + + -- arg all to be deprecated + if (arg and arg["all"] == "true") then + core.log.warn("query parameter \"all\" will be deprecated soon.") local http_plugins, stream_plugins = plugin_get_all({ version = true, priority = true, @@ -60,16 +69,18 @@ function _M.get(name) return 200, http_plugins end - if not name then - return 400, {error_msg = "not found plugin name"} - end + local plugin - local plugin_name = "apisix.plugins." .. name + if subsystem == "http" then + plugin = plugin_get_http(name) + else + plugin = plugin_get_stream(name) + end - local ok, plugin = pcall(require, plugin_name) - if not ok then - core.log.warn("failed to load plugin [", name, "] err: ", plugin) - return 400, {error_msg = "failed to load plugin " .. name} + if not plugin then + local err = "plugin not found in subsystem " .. subsystem + core.log.warn(err) + return 404, {error_msg = err} end local json_schema = plugin.schema @@ -85,16 +96,34 @@ function _M.get(name) end -function _M.get_plugins_list() - local plugins = core.config.local_conf().plugins +function _M.get_plugins_list(subsystem) + local http_plugins + local stream_plugins + if subsystem == "http" then + http_plugins = core.config.local_conf().plugins + else + stream_plugins = core.config.local_conf().stream_plugins + end + local priorities = {} local success = {} - for i, name in ipairs(plugins) do - local plugin_name = "apisix.plugins." .. name - local ok, plugin = pcall(require, plugin_name) - if ok and plugin.priority then - priorities[name] = plugin.priority - table_insert(success, name) + if http_plugins then + for i, name in ipairs(http_plugins) do + local plugin = plugin_get_http(name) + if plugin and plugin.priority then + priorities[name] = plugin.priority + table_insert(success, name) + end + end + end + + if stream_plugins then + for i, name in ipairs(stream_plugins) do + local plugin = plugin_get_stream(name) + if plugin and plugin.priority then + priorities[name] = plugin.priority + table_insert(success, name) + end end end diff --git a/apisix/admin/proto.lua b/apisix/admin/proto.lua index de4d24e23e88..f8133cc80b71 100644 --- a/apisix/admin/proto.lua +++ b/apisix/admin/proto.lua @@ -50,7 +50,7 @@ local function check_proto_used(plugins, deleting, ptype, pid) if type(plugins) == "table" and plugins["grpc-transcode"] and plugins["grpc-transcode"].proto_id and tostring(plugins["grpc-transcode"].proto_id) == deleting then - return false, {error_msg = "can not delete this proto," + return false, {error_msg = "can not delete this proto, " .. ptype .. " [" .. pid .. "] is still using it now"} end diff --git a/apisix/admin/resource.lua b/apisix/admin/resource.lua index ac204dcf574a..35fe3bba2476 100644 --- a/apisix/admin/resource.lua +++ b/apisix/admin/resource.lua @@ -230,7 +230,7 @@ function _M:put(id, conf, sub_path, args) end -- Keep the unused conf to make the args list consistent with other methods -function _M:delete(id, conf, sub_path) +function _M:delete(id, conf, sub_path, uri_args) if core.table.array_find(self.unsupported_methods, "delete") then return 405, {error_msg = "not supported `DELETE` method for " .. self.kind} end @@ -253,7 +253,7 @@ function _M:delete(id, conf, sub_path) key = key .. "/" .. id - if self.delete_checker then + if self.delete_checker and uri_args.force ~= "true" then local code, err = self.delete_checker(id) if err then return code, err @@ -279,8 +279,8 @@ function _M:patch(id, conf, sub_path, args) local typ = nil if self.name == "secrets" then local uri_segs = core.utils.split_uri(sub_path) - if #uri_segs < 2 then - return 400, {error_msg = "no secret id and/or sub path in uri"} + if #uri_segs < 1 then + return 400, {error_msg = "no secret id"} end typ = id id = uri_segs[1] diff --git a/apisix/admin/upstreams.lua b/apisix/admin/upstreams.lua index a85d24d8d9e1..6c04d9379d52 100644 --- a/apisix/admin/upstreams.lua +++ b/apisix/admin/upstreams.lua @@ -15,13 +15,17 @@ -- limitations under the License. -- local core = require("apisix.core") +local config_util = require("apisix.core.config_util") local get_routes = require("apisix.router").http_routes local get_services = require("apisix.http.service").services +local get_plugin_configs = require("apisix.plugin_config").plugin_configs +local get_consumers = require("apisix.consumer").consumers +local get_consumer_groups = require("apisix.consumer_group").consumer_groups +local get_global_rules = require("apisix.global_rules").global_rules local apisix_upstream = require("apisix.upstream") local resource = require("apisix.admin.resource") local tostring = tostring local ipairs = ipairs -local type = type local function check_conf(id, conf, need_id) @@ -34,33 +38,87 @@ local function check_conf(id, conf, need_id) end -local function delete_checker(id) - local routes, routes_ver = get_routes() - if routes_ver and routes then - for _, route in ipairs(routes) do - if type(route) == "table" and route.value - and route.value.upstream_id - and tostring(route.value.upstream_id) == id then - return 400, {error_msg = "can not delete this upstream," - .. " route [" .. route.value.id +local function up_id_in_plugins(plugins, up_id) + if plugins and plugins["traffic-split"] + and plugins["traffic-split"].rules then + + for _, rule in ipairs(plugins["traffic-split"].rules) do + local plugin_upstreams = rule.weighted_upstreams + for _, plugin_upstream in ipairs(plugin_upstreams) do + if plugin_upstream.upstream_id + and tostring(plugin_upstream.upstream_id) == up_id then + return true + end + end + end + + return false + end +end + + +local function check_resources_reference(resources, up_id, + only_check_plugin, resources_name) + if resources then + for _, resource in config_util.iterate_values(resources) do + if resource and resource.value then + if up_id_in_plugins(resource.value.plugins, up_id) then + return {error_msg = "can not delete this upstream," + .. " plugin in " + .. resources_name .. " [" + .. resource.value.id + .. "] is still using it now"} + end + + if not only_check_plugin and resource.value.upstream_id + and tostring(resource.value.upstream_id) == up_id then + return {error_msg = "can not delete this upstream, " + .. resources_name .. " [" .. resource.value.id .. "] is still using it now"} + end end end end +end + + +local function delete_checker(id) + local routes = get_routes() + local err_msg = check_resources_reference(routes, id, false, "route") + if err_msg then + return 400, err_msg + end local services, services_ver = get_services() core.log.info("services: ", core.json.delay_encode(services, true)) core.log.info("services_ver: ", services_ver) - if services_ver and services then - for _, service in ipairs(services) do - if type(service) == "table" and service.value - and service.value.upstream_id - and tostring(service.value.upstream_id) == id then - return 400, {error_msg = "can not delete this upstream," - .. " service [" .. service.value.id - .. "] is still using it now"} - end - end + local err_msg = check_resources_reference(services, id, false, "service") + if err_msg then + return 400, err_msg + end + + local plugin_configs = get_plugin_configs() + local err_msg = check_resources_reference(plugin_configs, id, true, "plugin_config") + if err_msg then + return 400, err_msg + end + + local consumers = get_consumers() + local err_msg = check_resources_reference(consumers, id, true, "consumer") + if err_msg then + return 400, err_msg + end + + local consumer_groups = get_consumer_groups() + local err_msg = check_resources_reference(consumer_groups, id, true, "consumer_group") + if err_msg then + return 400, err_msg + end + + local global_rules = get_global_rules() + err_msg = check_resources_reference(global_rules, id, true, "global_rules") + if err_msg then + return 400, err_msg end return nil, nil diff --git a/apisix/cli/etcd.lua b/apisix/cli/etcd.lua index 51cac2a508e6..b67248095d92 100644 --- a/apisix/cli/etcd.lua +++ b/apisix/cli/etcd.lua @@ -280,103 +280,7 @@ local function prepare_dirs_via_http(yaml_conf, args, index, host, host_count) end -local function grpc_request(url, yaml_conf, key) - local cmd - - local auth = "" - if yaml_conf.etcd.user then - local user = yaml_conf.etcd.user - local password = yaml_conf.etcd.password - auth = str_format("--user=%s:%s", user, password) - end - - if str_sub(url, 1, 8) == "https://" then - local host = url:sub(9) - - local verify = true - local certificate, pkey, cafile - if yaml_conf.etcd.tls then - local cfg = yaml_conf.etcd.tls - - if cfg.verify == false then - verify = false - end - - certificate = cfg.cert - pkey = cfg.key - - local apisix_ssl = yaml_conf.apisix.ssl - if apisix_ssl and apisix_ssl.ssl_trusted_certificate then - cafile = apisix_ssl.ssl_trusted_certificate - end - end - - cmd = str_format( - "etcdctl --insecure-transport=false %s %s %s %s " .. - "%s --endpoints=%s put %s init_dir", - verify and "" or "--insecure-skip-tls-verify", - certificate and "--cert " .. certificate or "", - pkey and "--key " .. pkey or "", - cafile and "--cacert " .. cafile or "", - auth, host, key) - else - local host = url:sub(#("http://") + 1) - - cmd = str_format( - "etcdctl %s --endpoints=%s put %s init_dir", - auth, host, key) - end - - local res, err = util.execute_cmd(cmd) - return res, err -end - - -local function prepare_dirs_via_grpc(yaml_conf, args, index, host) - local is_success = true - - local errmsg - local dirs = {} - for name in pairs(constants.HTTP_ETCD_DIRECTORY) do - dirs[name] = true - end - for name in pairs(constants.STREAM_ETCD_DIRECTORY) do - dirs[name] = true - end - - for dir_name in pairs(dirs) do - local key = (yaml_conf.etcd.prefix or "") .. dir_name .. "/" - local res, err - local retry_time = 0 - while retry_time < 2 do - res, err = grpc_request(host, yaml_conf, key) - retry_time = retry_time + 1 - if res then - break - end - print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s", - host, err, retry_time)) - end - - if not res then - errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", host, err) - util.die(errmsg) - end - - if args and args["verbose"] then - print(res) - end - end - - return is_success -end - - -local function prepare_dirs(use_grpc, yaml_conf, args, index, host, host_count) - if use_grpc then - return prepare_dirs_via_grpc(yaml_conf, args, index, host) - end - +local function prepare_dirs(yaml_conf, args, index, host, host_count) return prepare_dirs_via_http(yaml_conf, args, index, host, host_count) end @@ -400,8 +304,6 @@ function _M.init(env, args) util.die("failed to read `etcd` field from yaml file when init etcd") end - local etcd_conf = yaml_conf.etcd - -- convert old single etcd config to multiple etcd config if type(yaml_conf.etcd.host) == "string" then yaml_conf.etcd.host = {yaml_conf.etcd.host} @@ -477,22 +379,9 @@ function _M.init(env, args) util.die("the etcd cluster needs at least 50% and above healthy nodes\n") end - if etcd_conf.use_grpc and not env.use_apisix_base then - io_stderr:write("'use_grpc: true' in the etcd configuration " .. - "is not supported by vanilla OpenResty\n") - end - - local use_grpc = etcd_conf.use_grpc and env.use_apisix_base - if use_grpc then - local ok, err = util.execute_cmd("command -v etcdctl") - if not ok then - util.die("can't find etcdctl: ", err, "\n") - end - end - local etcd_ok = false for index, host in ipairs(etcd_healthy_hosts) do - if prepare_dirs(use_grpc, yaml_conf, args, index, host, host_count) then + if prepare_dirs(yaml_conf, args, index, host, host_count) then etcd_ok = true break end diff --git a/apisix/cli/file.lua b/apisix/cli/file.lua index 2fe9edd030b3..94e790db65e3 100644 --- a/apisix/cli/file.lua +++ b/apisix/cli/file.lua @@ -54,8 +54,59 @@ local function tab_is_array(t) end +local function var_sub(val) + local err + local var_used = false + -- we use '${{var}}' because '$var' and '${var}' are taken + -- by Nginx + local new_val = val:gsub("%$%{%{%s*([%w_]+[%:%=]?.-)%s*%}%}", function(var) + local i, j = var:find("%:%=") + local default + if i and j then + default = var:sub(i + 2, #var) + default = default:gsub('^%s*(.-)%s*$', '%1') + var = var:sub(1, i - 1) + end + + local v = getenv(var) or default + if v then + if not exported_vars then + exported_vars = {} + end + + exported_vars[var] = v + var_used = true + return v + end + + err = "failed to handle configuration: " .. + "can't find environment variable " .. var + return "" + end) + return new_val, var_used, err +end + + local function resolve_conf_var(conf) + local new_keys = {} for key, val in pairs(conf) do + -- avoid re-iterating the table for already iterated key + if new_keys[key] then + goto continue + end + -- substitute environment variables from conf keys + if type(key) == "string" then + local new_key, _, err = var_sub(key) + if err then + return nil, err + end + if new_key ~= key then + new_keys[new_key] = "dummy" -- we only care about checking the key + conf.key = nil + conf[new_key] = val + key = new_key + end + end if type(val) == "table" then local ok, err = resolve_conf_var(val) if not ok then @@ -63,34 +114,7 @@ local function resolve_conf_var(conf) end elseif type(val) == "string" then - local err - local var_used = false - -- we use '${{var}}' because '$var' and '${var}' are taken - -- by Nginx - local new_val = val:gsub("%$%{%{%s*([%w_]+[%:%=]?.-)%s*%}%}", function(var) - local i, j = var:find("%:%=") - local default - if i and j then - default = var:sub(i + 2, #var) - default = default:gsub('^%s*(.-)%s*$', '%1') - var = var:sub(1, i - 1) - end - - local v = getenv(var) or default - if v then - if not exported_vars then - exported_vars = {} - end - - exported_vars[var] = v - var_used = true - return v - end - - err = "failed to handle configuration: " .. - "can't find environment variable " .. var - return "" - end) + local new_val, var_used, err = var_sub(val) if err then return nil, err @@ -108,6 +132,7 @@ local function resolve_conf_var(conf) conf[key] = new_val end + ::continue:: end return true @@ -223,7 +248,10 @@ function _M.read_yaml_conf(apisix_home) return nil, "invalid config-default.yaml file" end - local_conf_path = profile:yaml_path("config") + local_conf_path = profile:customized_yaml_path() + if not local_conf_path then + local_conf_path = profile:yaml_path("config") + end local user_conf_yaml, err = util.read_file(local_conf_path) if not user_conf_yaml then return nil, err @@ -264,26 +292,14 @@ function _M.read_yaml_conf(apisix_home) default_conf.apisix.enable_admin = true elseif default_conf.deployment.role == "data_plane" then + default_conf.etcd = default_conf.deployment.etcd if default_conf.deployment.role_data_plane.config_provider == "yaml" then default_conf.deployment.config_provider = "yaml" elseif default_conf.deployment.role_data_plane.config_provider == "xds" then default_conf.deployment.config_provider = "xds" - else - default_conf.etcd = default_conf.deployment.role_data_plane.control_plane end default_conf.apisix.enable_admin = false end - - if default_conf.etcd and default_conf.deployment.certs then - -- copy certs configuration to keep backward compatible - local certs = default_conf.deployment.certs - local etcd = default_conf.etcd - if not etcd.tls then - etcd.tls = {} - end - etcd.tls.cert = certs.cert - etcd.tls.key = certs.cert_key - end end if default_conf.deployment.config_provider == "yaml" then diff --git a/apisix/cli/ngx_tpl.lua b/apisix/cli/ngx_tpl.lua index c81430d9e096..76c1832f3b32 100644 --- a/apisix/cli/ngx_tpl.lua +++ b/apisix/cli/ngx_tpl.lua @@ -115,16 +115,12 @@ http { } } {% end %} - - {% if conf_server then %} - {* conf_server *} - {% end %} } {% end %} {% end %} -{% if stream_proxy then %} +{% if enable_stream then %} stream { lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=] .. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};"; @@ -369,8 +365,12 @@ http { log_format main escape={* http.access_log_format_escape *} '{* http.access_log_format *}'; uninitialized_variable_warn off; + {% if http.access_log_buffer then %} + access_log {* http.access_log *} main buffer={* http.access_log_buffer *} flush=3; + {% else %} access_log {* http.access_log *} main buffer=16384 flush=3; {% end %} + {% end %} open_file_cache max=1000 inactive=60; client_max_body_size {* http.client_max_body_size *}; keepalive_timeout {* http.keepalive_timeout *}; @@ -576,10 +576,6 @@ http { } {% end %} - {% if conf_server then %} - {* conf_server *} - {% end %} - {% if deployment_role ~= "control_plane" then %} {% if enabled_plugins["proxy-cache"] then %} @@ -661,6 +657,10 @@ http { } {% if ssl.enable then %} + ssl_client_hello_by_lua_block { + apisix.http_ssl_client_hello_phase() + } + ssl_certificate_by_lua_block { apisix.http_ssl_phase() } @@ -672,6 +672,7 @@ http { {% end %} location / { + set $upstream_mirror_host ''; set $upstream_mirror_uri ''; set $upstream_upgrade ''; set $upstream_connection ''; @@ -711,16 +712,11 @@ http { ### the following x-forwarded-* headers is to send to upstream server - set $var_x_forwarded_for $remote_addr; set $var_x_forwarded_proto $scheme; set $var_x_forwarded_host $host; set $var_x_forwarded_port $server_port; - if ($http_x_forwarded_for != "") { - set $var_x_forwarded_for "${http_x_forwarded_for}, ${realip_remote_addr}"; - } - - proxy_set_header X-Forwarded-For $var_x_forwarded_for; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto; proxy_set_header X-Forwarded-Host $var_x_forwarded_host; proxy_set_header X-Forwarded-Port $var_x_forwarded_port; @@ -779,9 +775,14 @@ http { grpc_set_header "Host" $upstream_host; {% end %} grpc_set_header Content-Type application/grpc; + grpc_set_header TE trailers; grpc_socket_keepalive on; grpc_pass $upstream_scheme://apisix_backend; + {% if enabled_plugins["proxy-mirror"] then %} + mirror /proxy_mirror_grpc; + {% end %} + header_filter_by_lua_block { apisix.http_header_filter_phase() } @@ -846,6 +847,32 @@ http { proxy_pass $upstream_mirror_uri; } {% end %} + + {% if enabled_plugins["proxy-mirror"] then %} + location = /proxy_mirror_grpc { + internal; + + {% if not use_apisix_base then %} + if ($upstream_mirror_uri = "") { + return 200; + } + {% end %} + + + {% if proxy_mirror_timeouts then %} + {% if proxy_mirror_timeouts.connect then %} + grpc_connect_timeout {* proxy_mirror_timeouts.connect *}; + {% end %} + {% if proxy_mirror_timeouts.read then %} + grpc_read_timeout {* proxy_mirror_timeouts.read *}; + {% end %} + {% if proxy_mirror_timeouts.send then %} + grpc_send_timeout {* proxy_mirror_timeouts.send *}; + {% end %} + {% end %} + grpc_pass $upstream_mirror_host; + } + {% end %} } {% end %} diff --git a/apisix/cli/ops.lua b/apisix/cli/ops.lua index e7f86f562e22..a5a6975d80bd 100644 --- a/apisix/cli/ops.lua +++ b/apisix/cli/ops.lua @@ -21,7 +21,6 @@ local file = require("apisix.cli.file") local schema = require("apisix.cli.schema") local ngx_tpl = require("apisix.cli.ngx_tpl") local cli_ip = require("apisix.cli.ip") -local snippet = require("apisix.cli.snippet") local profile = require("apisix.core.profile") local template = require("resty.template") local argparse = require("argparse") @@ -58,7 +57,7 @@ local function help() print([[ Usage: apisix [action] -help: show this message, then exit +help: print the apisix cli help message init: initialize the local nginx.conf init_etcd: initialize the data of etcd start: start the apisix server @@ -189,6 +188,13 @@ local function init(env) and #allow_admin == 1 and allow_admin[1] == "127.0.0.0/24" then checked_admin_key = true end + -- check if admin_key is required + if yaml_conf.deployment.admin.admin_key_required == false then + checked_admin_key = true + print("Warning! Admin key is bypassed! " + .. "If you are deploying APISIX in a production environment, " + .. "please disable `admin_key_required` and set a secure admin key!") + end if yaml_conf.apisix.enable_admin and not checked_admin_key then local help = [[ @@ -251,7 +257,7 @@ Please modify "admin_key" in conf/config.yaml . util.die("can not find openresty\n") end - local need_ver = "1.19.3" + local need_ver = "1.21.4" if not version_greater_equal(or_ver, need_ver) then util.die("openresty version must >=", need_ver, " current ", or_ver, "\n") end @@ -262,11 +268,24 @@ Please modify "admin_key" in conf/config.yaml . "your openresty, please check it out.\n") end + --- http is enabled by default local enable_http = true - if not yaml_conf.apisix.enable_admin and yaml_conf.apisix.stream_proxy and - yaml_conf.apisix.stream_proxy.only ~= false - then - enable_http = false + --- stream is disabled by default + local enable_stream = false + if yaml_conf.apisix.proxy_mode then + --- check for "http" + if yaml_conf.apisix.proxy_mode == "http" then + enable_http = true + enable_stream = false + --- check for "stream" + elseif yaml_conf.apisix.proxy_mode == "stream" then + enable_stream = true + enable_http = false + --- check for "http&stream" + elseif yaml_conf.apisix.proxy_mode == "http&stream" then + enable_stream = true + enable_http = true + end end local enabled_discoveries = {} @@ -481,7 +500,7 @@ Please modify "admin_key" in conf/config.yaml . local tcp_enable_ssl -- compatible with the original style which only has the addr - if yaml_conf.apisix.stream_proxy and yaml_conf.apisix.stream_proxy.tcp then + if enable_stream and yaml_conf.apisix.stream_proxy and yaml_conf.apisix.stream_proxy.tcp then local tcp = yaml_conf.apisix.stream_proxy.tcp for i, item in ipairs(tcp) do if type(item) ~= "table" then @@ -513,11 +532,6 @@ Please modify "admin_key" in conf/config.yaml . proxy_mirror_timeouts = yaml_conf.plugin_attr["proxy-mirror"].timeout end - local conf_server, err = snippet.generate_conf_server(env, yaml_conf) - if err then - util.die(err, "\n") - end - if yaml_conf.deployment and yaml_conf.deployment.role then local role = yaml_conf.deployment.role env.deployment_role = role @@ -542,6 +556,7 @@ Please modify "admin_key" in conf/config.yaml . use_apisix_base = env.use_apisix_base, error_log = {level = "warn"}, enable_http = enable_http, + enable_stream = enable_stream, enabled_discoveries = enabled_discoveries, enabled_plugins = enabled_plugins, enabled_stream_plugins = enabled_stream_plugins, @@ -551,7 +566,6 @@ Please modify "admin_key" in conf/config.yaml . control_server_addr = control_server_addr, prometheus_server_addr = prometheus_server_addr, proxy_mirror_timeouts = proxy_mirror_timeouts, - conf_server = conf_server, opentelemetry_set_ngx_var = opentelemetry_set_ngx_var } @@ -737,7 +751,22 @@ local function init_etcd(env, args) end +local function cleanup(env) + if env.apisix_home then + profile.apisix_home = env.apisix_home + end + + os_remove(profile:customized_yaml_index()) +end + + local function start(env, ...) + cleanup(env) + + if env.apisix_home then + profile.apisix_home = env.apisix_home + end + -- Because the worker process started by apisix has "nobody" permission, -- it cannot access the `/root` directory. Therefore, it is necessary to -- prohibit APISIX from running in the /root directory. @@ -783,39 +812,34 @@ local function start(env, ...) -- start a new APISIX instance - local conf_server_sock_path = env.apisix_home .. "/conf/config_listen.sock" - if pl_path.exists(conf_server_sock_path) then - -- remove stale sock (if exists) so that APISIX can start - local ok, err = os_remove(conf_server_sock_path) - if not ok then - util.die("failed to remove stale conf server sock file, error: ", err) - end - end - local parser = argparse() parser:argument("_", "Placeholder") parser:option("-c --config", "location of customized config.yaml") -- TODO: more logs for APISIX cli could be added using this feature - parser:flag("--verbose", "show init_etcd debug information") + parser:flag("-v --verbose", "show init_etcd debug information") local args = parser:parse() local customized_yaml = args["config"] if customized_yaml then - profile.apisix_home = env.apisix_home .. "/" - local local_conf_path = profile:yaml_path("config") - local local_conf_path_bak = local_conf_path .. ".bak" + local customized_yaml_path + local idx = str_find(customized_yaml, "/") + if idx and idx == 1 then + customized_yaml_path = customized_yaml + else + local cur_dir, err = lfs.currentdir() + if err then + util.die("failed to get current directory") + end + customized_yaml_path = cur_dir .. "/" .. customized_yaml + end - local ok, err = os_rename(local_conf_path, local_conf_path_bak) - if not ok then - util.die("failed to backup config, error: ", err) + if not util.file_exists(customized_yaml_path) then + util.die("customized config file not exists, path: " .. customized_yaml_path) end - local ok, err1 = lfs.link(customized_yaml, local_conf_path) + + local ok, err = util.write_file(profile:customized_yaml_index(), customized_yaml_path) if not ok then - ok, err = os_rename(local_conf_path_bak, local_conf_path) - if not ok then - util.die("failed to recover original config file, error: ", err) - end - util.die("failed to link customized config, error: ", err1) + util.die("write customized config index failed, err: " .. err) end print("Use customized yaml: ", customized_yaml) @@ -831,22 +855,6 @@ local function start(env, ...) end -local function cleanup() - local local_conf_path = profile:yaml_path("config") - local local_conf_path_bak = local_conf_path .. ".bak" - if pl_path.exists(local_conf_path_bak) then - local ok, err = os_remove(local_conf_path) - if not ok then - print("failed to remove customized config, error: ", err) - end - ok, err = os_rename(local_conf_path_bak, local_conf_path) - if not ok then - util.die("failed to recover original config file, error: ", err) - end - end -end - - local function test(env, backup_ngx_conf) -- backup nginx.conf local ngx_conf_path = env.apisix_home .. "/conf/nginx.conf" @@ -886,7 +894,7 @@ end local function quit(env) - cleanup() + cleanup(env) local cmd = env.openresty_args .. [[ -s quit]] util.execute_cmd(cmd) @@ -894,7 +902,7 @@ end local function stop(env) - cleanup() + cleanup(env) local cmd = env.openresty_args .. [[ -s stop]] util.execute_cmd(cmd) diff --git a/apisix/cli/schema.lua b/apisix/cli/schema.lua index 477e2ce7e56d..6f6450b46e0c 100644 --- a/apisix/cli/schema.lua +++ b/apisix/cli/schema.lua @@ -62,11 +62,6 @@ local etcd_schema = { minimum = 1, description = "etcd connection timeout in seconds", }, - use_grpc = { - type = "boolean", - -- TODO: set true by default in v3.2 - default = false, - }, }, required = {"prefix", "host"} } @@ -136,6 +131,10 @@ local config_schema = { } } }, + proxy_mode = { + type = "string", + enum = {"http", "stream", "http&stream"}, + }, stream_proxy = { type = "object", properties = { @@ -353,6 +352,9 @@ local admin_schema = { https_admin = { type = "boolean", }, + admin_key_required = { + type = "boolean", + }, } } @@ -381,60 +383,23 @@ local deployment_schema = { config_provider = { enum = {"etcd"} }, - conf_server = { - properties = { - listen = { - type = "string", - default = "0.0.0.0:9280", - }, - cert = { type = "string" }, - cert_key = { type = "string" }, - client_ca_cert = { type = "string" }, - }, - required = {"cert", "cert_key"} - }, - }, - required = {"config_provider", "conf_server"} - }, - certs = { - properties = { - cert = { type = "string" }, - cert_key = { type = "string" }, - trusted_ca_cert = { type = "string" }, - }, - dependencies = { - cert = { - required = {"cert_key"}, - }, }, - default = {}, + required = {"config_provider"} }, }, required = {"etcd", "role_control_plane"} }, data_plane = { properties = { + etcd = etcd_schema, role_data_plane = { properties = { config_provider = { - enum = {"control_plane", "yaml", "xds"} + enum = {"etcd", "yaml", "xds"} }, }, required = {"config_provider"} }, - certs = { - properties = { - cert = { type = "string" }, - cert_key = { type = "string" }, - trusted_ca_cert = { type = "string" }, - }, - dependencies = { - cert = { - required = {"cert_key"}, - }, - }, - default = {}, - }, }, required = {"role_data_plane"} } diff --git a/apisix/cli/snippet.lua b/apisix/cli/snippet.lua deleted file mode 100644 index ca7ed3f7c59a..000000000000 --- a/apisix/cli/snippet.lua +++ /dev/null @@ -1,203 +0,0 @@ --- --- Licensed to the Apache Software Foundation (ASF) under one or more --- contributor license agreements. See the NOTICE file distributed with --- this work for additional information regarding copyright ownership. --- The ASF licenses this file to You under the Apache License, Version 2.0 --- (the "License"); you may not use this file except in compliance with --- the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an "AS IS" BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the License. --- -local template = require("resty.template") -local pl_path = require("pl.path") -local ipairs = ipairs - - --- this module provide methods to generate snippets which will be used in the nginx.conf template -local _M = {} -local conf_server_tpl = [[ -upstream apisix_conf_backend { - server 0.0.0.0:80; - balancer_by_lua_block { - local conf_server = require("apisix.conf_server") - conf_server.balancer() - } -} - -{% if trusted_ca_cert then %} -lua_ssl_trusted_certificate {* trusted_ca_cert *}; -{% end %} - -server { - {% if control_plane then %} - {% if directive_prefix == "grpc" then %} - listen {* control_plane.listen *} ssl http2; - {% else %} - listen {* control_plane.listen *} ssl; - {% end %} - ssl_certificate {* control_plane.cert *}; - ssl_certificate_key {* control_plane.cert_key *}; - - {% if control_plane.client_ca_cert then %} - ssl_verify_client on; - ssl_client_certificate {* control_plane.client_ca_cert *}; - {% end %} - - {% else %} - {% if directive_prefix == "grpc" then %} - listen unix:{* home *}/conf/config_listen.sock http2; - {% else %} - listen unix:{* home *}/conf/config_listen.sock; - {% end %} - {% end %} - - access_log off; - - set $upstream_host ''; - - access_by_lua_block { - local conf_server = require("apisix.conf_server") - conf_server.access() - } - - location / { - {% if enable_https then %} - {* directive_prefix *}_pass {* scheme_name *}s://apisix_conf_backend; - {* directive_prefix *}_ssl_protocols TLSv1.2 TLSv1.3; - {* directive_prefix *}_ssl_server_name on; - - {% if etcd_tls_verify then %} - {* directive_prefix *}_ssl_verify on; - {* directive_prefix *}_ssl_trusted_certificate {* ssl_trusted_certificate *}; - {% end %} - - {% if sni then %} - {* directive_prefix *}_ssl_name {* sni *}; - {% else %} - {* directive_prefix *}_ssl_name $upstream_host; - {% end %} - - {% if client_cert then %} - {* directive_prefix *}_ssl_certificate {* client_cert *}; - {* directive_prefix *}_ssl_certificate_key {* client_cert_key *}; - {% end %} - - {% else %} - {* directive_prefix *}_pass {* scheme_name *}://apisix_conf_backend; - {% end %} - - {% if scheme_name == "http" then %} - proxy_http_version 1.1; - proxy_set_header Connection ""; - {% end %} - - {* directive_prefix *}_set_header Host $upstream_host; - {* directive_prefix *}_next_upstream error timeout non_idempotent - http_500 http_502 http_503 http_504; - } - - log_by_lua_block { - local conf_server = require("apisix.conf_server") - conf_server.log() - } -} -]] - - -local function is_grpc_used(env, etcd) - local is_grpc_available = env.use_apisix_base - return is_grpc_available and etcd.use_grpc -end - - -function _M.generate_conf_server(env, conf) - if not (conf.deployment and ( - conf.deployment.role == "traditional" or - conf.deployment.role == "control_plane")) - then - return nil, nil - end - - -- we use proxy even the role is traditional so that we can test the proxy in daily dev - local etcd = conf.deployment.etcd - local servers = etcd.host - local enable_https = false - local prefix = "https://" - if servers[1]:find(prefix, 1, true) then - enable_https = true - end - - for i, s in ipairs(servers) do - if (s:find(prefix, 1, true) ~= nil) ~= enable_https then - return nil, "all nodes in the etcd cluster should enable/disable TLS together" - end - - local _, to = s:find("://", 1, true) - if not to then - return nil, "bad etcd endpoint format" - end - end - - local control_plane - if conf.deployment.role == "control_plane" then - control_plane = conf.deployment.role_control_plane.conf_server - control_plane.cert = pl_path.abspath(control_plane.cert) - control_plane.cert_key = pl_path.abspath(control_plane.cert_key) - - if control_plane.client_ca_cert then - control_plane.client_ca_cert = pl_path.abspath(control_plane.client_ca_cert) - end - end - - local trusted_ca_cert - if conf.deployment.certs then - if conf.deployment.certs.trusted_ca_cert then - trusted_ca_cert = pl_path.abspath(conf.deployment.certs.trusted_ca_cert) - end - end - - local conf_render = template.compile(conf_server_tpl) - local tls = etcd.tls - local client_cert - local client_cert_key - local ssl_trusted_certificate - local etcd_tls_verify - local use_grpc = is_grpc_used(env, etcd) - if tls then - if tls.cert then - client_cert = pl_path.abspath(tls.cert) - client_cert_key = pl_path.abspath(tls.key) - end - - etcd_tls_verify = tls.verify - if enable_https and etcd_tls_verify then - if not conf.apisix.ssl.ssl_trusted_certificate then - return nil, "should set ssl_trusted_certificate if etcd tls verify is enabled" - end - ssl_trusted_certificate = pl_path.abspath(conf.apisix.ssl.ssl_trusted_certificate) - end - end - - return conf_render({ - sni = tls and tls.sni, - home = env.apisix_home or ".", - control_plane = control_plane, - enable_https = enable_https, - client_cert = client_cert, - client_cert_key = client_cert_key, - trusted_ca_cert = trusted_ca_cert, - etcd_tls_verify = etcd_tls_verify, - ssl_trusted_certificate = ssl_trusted_certificate, - scheme_name = use_grpc and "grpc" or "http", - directive_prefix = use_grpc and "grpc" or "proxy", - }) -end - - -return _M diff --git a/apisix/cli/util.lua b/apisix/cli/util.lua index cc6206a844e1..bcd56a241aa8 100644 --- a/apisix/cli/util.lua +++ b/apisix/cli/util.lua @@ -19,6 +19,7 @@ local require = require local pcall = pcall local open = io.open local popen = io.popen +local close = io.close local exit = os.exit local stderr = io.stderr local str_format = string.format @@ -127,4 +128,9 @@ function _M.write_file(file_path, data) end +function _M.file_exists(file_path) + local f = open(file_path, "r") + return f ~= nil and close(f) +end + return _M diff --git a/apisix/consumer_group.lua b/apisix/consumer_group.lua index 8c17bdd715e5..3be59ec923f9 100644 --- a/apisix/consumer_group.lua +++ b/apisix/consumer_group.lua @@ -39,6 +39,14 @@ function _M.init_worker() end +function _M.consumer_groups() + if not consumer_groups then + return nil, nil + end + return consumer_groups.values, consumer_groups.conf_version +end + + function _M.get(id) return consumer_groups:get(id) end diff --git a/apisix/control/v1.lua b/apisix/control/v1.lua index fd031a473ca2..3143ae5948fc 100644 --- a/apisix/control/v1.lua +++ b/apisix/control/v1.lua @@ -14,6 +14,7 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- +local require = require local core = require("apisix.core") local plugin = require("apisix.plugin") local get_routes = require("apisix.router").http_routes @@ -22,6 +23,7 @@ local upstream_mod = require("apisix.upstream") local get_upstreams = upstream_mod.upstreams local collectgarbage = collectgarbage local ipairs = ipairs +local pcall = pcall local str_format = string.format local ngx_var = ngx.var @@ -62,52 +64,137 @@ function _M.schema() end -local function extra_checker_info(value, src_type) - local checker = value.checker - local upstream = value.checker_upstream - local host = upstream.checks and upstream.checks.active and upstream.checks.active.host - local port = upstream.checks and upstream.checks.active and upstream.checks.active.port - local nodes = upstream.nodes - local healthy_nodes = core.table.new(#nodes, 0) - for _, node in ipairs(nodes) do - local ok = checker:get_target_status(node.host, port or node.port, host) - if ok then - core.table.insert(healthy_nodes, node) - end +local healthcheck +local function extra_checker_info(value) + if not healthcheck then + healthcheck = require("resty.healthcheck") end - local conf = value.value + local name = upstream_mod.get_healthchecker_name(value) + local nodes, err = healthcheck.get_target_list(name, "upstream-healthcheck") + if err then + core.log.error("healthcheck.get_target_list failed: ", err) + end return { - name = upstream_mod.get_healthchecker_name(value), - src_id = conf.id, - src_type = src_type, + name = value.key, nodes = nodes, - healthy_nodes = healthy_nodes, } end -local function iter_and_add_healthcheck_info(infos, values, src_type) +local function get_checker_type(checks) + if checks.active and checks.active.type then + return checks.active.type + elseif checks.passive and checks.passive.type then + return checks.passive.type + end +end + + +local function iter_and_add_healthcheck_info(infos, values) if not values then return end for _, value in core.config_util.iterate_values(values) do - if value.checker then - core.table.insert(infos, extra_checker_info(value, src_type)) + local checks = value.value.checks or (value.value.upstream and value.value.upstream.checks) + if checks then + local info = extra_checker_info(value) + info.type = get_checker_type(checks) + core.table.insert(infos, info) end end end -function _M.get_health_checkers() +local HTML_TEMPLATE = [[ + + + APISIX upstream check status + + +

APISIX upstream check status

+ + + + + + + + + + + + +{% local i = 0 %} +{% for _, stat in ipairs(stats) do %} +{% for _, node in ipairs(stat.nodes) do %} +{% i = i + 1 %} + {% if node.status == "healthy" then %} + + {% else %} + + {% end %} + + + + + + + + + + +{% end %} +{% end %} +
IndexUpstreamCheck typeHostStatusSuccess countsTCP FailuresHTTP FailuresTIMEOUT Failures
{* i *}{* stat.name *}{* stat.type *}{* node.ip .. ":" .. node.port *}{* node.status *}{* node.counter.success *}{* node.counter.tcp_failure *}{* node.counter.http_failure *}{* node.counter.timeout_failure *}
+ + +]] + +local html_render + +local function try_render_html(data) + if not html_render then + local template = require("resty.template") + html_render = template.compile(HTML_TEMPLATE) + end + local accept = ngx_var.http_accept + if accept and accept:find("text/html") then + local ok, out = pcall(html_render, data) + if not ok then + local err = str_format("HTML template rendering: %s", out) + core.log.error(err) + return nil, err + end + return out + end +end + + +local function _get_health_checkers() local infos = {} local routes = get_routes() - iter_and_add_healthcheck_info(infos, routes, "routes") + iter_and_add_healthcheck_info(infos, routes) local services = get_services() - iter_and_add_healthcheck_info(infos, services, "services") + iter_and_add_healthcheck_info(infos, services) local upstreams = get_upstreams() - iter_and_add_healthcheck_info(infos, upstreams, "upstreams") + iter_and_add_healthcheck_info(infos, upstreams) + return infos +end + + +function _M.get_health_checkers() + local infos = _get_health_checkers() + local out, err = try_render_html({stats=infos}) + if out then + core.response.set_header("Content-Type", "text/html") + return 200, out + end + if err then + return 503, {error_msg = err} + end + return 200, infos end @@ -119,11 +206,15 @@ local function iter_and_find_healthcheck_info(values, src_type, src_id) for _, value in core.config_util.iterate_values(values) do if value.value.id == src_id then - if not value.checker then + local checks = value.value.checks or + (value.value.upstream and value.value.upstream.checks) + if not checks then return nil, str_format("no checker for %s[%s]", src_type, src_id) end - return extra_checker_info(value, src_type) + local info = extra_checker_info(value) + info.type = get_checker_type(checks) + return info end end @@ -155,6 +246,16 @@ function _M.get_health_checker() if not info then return 404, {error_msg = err} end + + local out, err = try_render_html({stats={info}}) + if out then + core.response.set_header("Content-Type", "text/html") + return 200, out + end + if err then + return 503, {error_msg = err} + end + return 200, info end @@ -372,5 +473,6 @@ return { methods = {"GET"}, uris = {"/plugin_metadata/*"}, handler = _M.dump_plugin_metadata, - } + }, + get_health_checkers = _get_health_checkers, } diff --git a/apisix/core/config_etcd.lua b/apisix/core/config_etcd.lua index 4946cc5c22c8..357f24fa1e6e 100644 --- a/apisix/core/config_etcd.lua +++ b/apisix/core/config_etcd.lua @@ -27,6 +27,10 @@ local json = require("apisix.core.json") local etcd_apisix = require("apisix.core.etcd") local core_str = require("apisix.core.string") local new_tab = require("table.new") +local inspect = require("inspect") +local errlog = require("ngx.errlog") +local log_level = errlog.get_sys_filter_level() +local NGX_INFO = ngx.INFO local check_schema = require("apisix.core.schema").check local exiting = ngx.worker.exiting local insert_tab = table.insert @@ -43,9 +47,17 @@ local xpcall = xpcall local debug = debug local string = string local error = error +local pairs = pairs +local next = next +local assert = assert local rand = math.random local constants = require("apisix.constants") local health_check = require("resty.etcd.health_check") +local semaphore = require("ngx.semaphore") +local tablex = require("pl.tablex") +local ngx_thread_spawn = ngx.thread.spawn +local ngx_thread_kill = ngx.thread.kill +local ngx_thread_wait = ngx.thread.wait local is_http = ngx.config.subsystem == "http" @@ -58,6 +70,7 @@ if not is_http then end local created_obj = {} local loaded_configuration = {} +local watch_ctx local _M = { @@ -75,6 +88,232 @@ local mt = { } +local get_etcd +do + local etcd_cli + + function get_etcd() + if etcd_cli ~= nil then + return etcd_cli + end + + local _, err + etcd_cli, _, err = etcd_apisix.get_etcd_syncer() + return etcd_cli, err + end +end + + +local function cancel_watch(http_cli) + local res, err = watch_ctx.cli:watchcancel(http_cli) + if res == 1 then + log.info("cancel watch connection success") + else + log.error("cancel watch failed: ", err) + end +end + + +-- append res to the queue and notify pending watchers +local function produce_res(res, err) + if log_level >= NGX_INFO then + log.info("append res: ", inspect(res), ", err: ", inspect(err)) + end + insert_tab(watch_ctx.res, {res=res, err=err}) + for _, sema in pairs(watch_ctx.sema) do + sema:post() + end + table.clear(watch_ctx.sema) +end + + +local function do_run_watch(premature) + if premature then + return + end + + local local_conf, err = config_local.local_conf() + if not local_conf then + error("no local conf: " .. err) + end + watch_ctx.prefix = local_conf.etcd.prefix .. "/" + + watch_ctx.cli, err = get_etcd() + if not watch_ctx.cli then + error("failed to create etcd instance: " .. string(err)) + end + + local rev = 0 + if loaded_configuration then + local _, res = next(loaded_configuration) + if res then + rev = tonumber(res.headers["X-Etcd-Index"]) + assert(rev > 0, 'invalid res.headers["X-Etcd-Index"]') + end + end + + if rev == 0 then + while true do + local res, err = watch_ctx.cli:get(watch_ctx.prefix) + if not res then + log.error("etcd get: ", err) + ngx_sleep(3) + else + rev = tonumber(res.body.header.revision) + break + end + end + end + + watch_ctx.rev = rev + 1 + watch_ctx.started = true + + log.warn("main etcd watcher started, revision=", watch_ctx.rev) + for _, sema in pairs(watch_ctx.wait_init) do + sema:post() + end + watch_ctx.wait_init = nil + + local opts = {} + opts.timeout = 50 -- second + opts.need_cancel = true + + ::restart_watch:: + while true do + opts.start_revision = watch_ctx.rev + log.info("restart watchdir: start_revision=", opts.start_revision) + local res_func, err, http_cli = watch_ctx.cli:watchdir(watch_ctx.prefix, opts) + if not res_func then + log.error("watchdir: ", err) + ngx_sleep(3) + goto restart_watch + end + + ::watch_event:: + while true do + local res, err = res_func() + if log_level >= NGX_INFO then + log.info("res_func: ", inspect(res)) + end + + if not res then + if err ~= "closed" and + err ~= "timeout" and + err ~= "broken pipe" + then + log.error("wait watch event: ", err) + end + cancel_watch(http_cli) + break + end + + if res.error then + log.error("wait watch event: ", inspect(res.error)) + cancel_watch(http_cli) + break + end + + if res.result.created then + goto watch_event + end + + if res.result.canceled then + log.warn("watch canceled by etcd, res: ", inspect(res)) + if res.result.compact_revision then + watch_ctx.rev = tonumber(res.result.compact_revision) + log.warn("etcd compacted, compact_revision=", watch_ctx.rev) + produce_res(nil, "compacted") + end + cancel_watch(http_cli) + break + end + + -- cleanup + local min_idx = 0 + for _, idx in pairs(watch_ctx.idx) do + if (min_idx == 0) or (idx < min_idx) then + min_idx = idx + end + end + + for i = 1, min_idx - 1 do + watch_ctx.res[i] = false + end + + if min_idx > 100 then + for k, idx in pairs(watch_ctx.idx) do + watch_ctx.idx[k] = idx - min_idx + 1 + end + -- trim the res table + for i = 1, min_idx - 1 do + table.remove(watch_ctx.res, 1) + end + end + + local rev = tonumber(res.result.header.revision) + if rev > watch_ctx.rev then + watch_ctx.rev = rev + 1 + end + produce_res(res) + end + end +end + + +local function run_watch(premature) + local run_watch_th = ngx_thread_spawn(do_run_watch, premature) + + ::restart:: + local check_worker_th = ngx_thread_spawn(function () + while not exiting() do + ngx_sleep(0.1) + end + end) + + local ok, err = ngx_thread_wait(check_worker_th) + + if not ok then + log.error("check_worker thread terminates failed, retart checker, error: " .. err) + ngx_thread_kill(check_worker_th) + goto restart + end + + ngx_thread_kill(run_watch_th) + -- notify child watchers + produce_res(nil, "worker exited") +end + + +local function init_watch_ctx(key) + if not watch_ctx then + watch_ctx = { + idx = {}, + res = {}, + sema = {}, + wait_init = {}, + started = false, + } + ngx_timer_at(0, run_watch) + end + + if watch_ctx.started == false then + -- wait until the main watcher is started + local sema, err = semaphore.new() + if not sema then + error(err) + end + watch_ctx.wait_init[key] = sema + while true do + local ok, err = sema:wait(60) + if ok then + break + end + log.error("wait main watcher to start, key: ", key, ", err: ", err) + end + end +end + + local function getkey(etcd_cli, key) if not etcd_cli then return nil, "not inited" @@ -123,79 +362,67 @@ local function readdir(etcd_cli, key, formatter) end -local function grpc_waitdir(self, etcd_cli, key, modified_index, timeout) - local watching_stream = self.watching_stream - if not watching_stream then - local attr = {} - attr.start_revision = modified_index - local opts = {} - opts.timeout = timeout - - local st, err = etcd_cli:create_grpc_watch_stream(key, attr, opts) - if not st then - log.error("create watch stream failed: ", err) - return nil, err - end - - log.info("create watch stream for key: ", key, ", modified_index: ", modified_index) - - self.watching_stream = st - watching_stream = st +local function http_waitdir(self, etcd_cli, key, modified_index, timeout) + if not watch_ctx.idx[key] then + watch_ctx.idx[key] = 1 end - return etcd_cli:read_grpc_watch_stream(watching_stream) -end - - -local function flush_watching_streams(self) - local etcd_cli = self.etcd_cli - if not etcd_cli.use_grpc then - return - end - - self.watching_stream = nil -end + ::iterate_events:: + for i = watch_ctx.idx[key], #watch_ctx.res do + watch_ctx.idx[key] = i + 1 + local item = watch_ctx.res[i] + if item == false then + goto iterate_events + end -local function http_waitdir(etcd_cli, key, modified_index, timeout) - local opts = {} - opts.start_revision = modified_index - opts.timeout = timeout - opts.need_cancel = true - local res_func, func_err, http_cli = etcd_cli:watchdir(key, opts) - if not res_func then - return nil, func_err - end + local res, err = item.res, item.err + if err then + return res, err + end - -- in etcd v3, the 1st res of watch is watch info, useless to us. - -- try twice to skip create info - local res, err = res_func() - if not res or not res.result or not res.result.events then - res, err = res_func() - end + -- ignore res with revision smaller then self.prev_index + if tonumber(res.result.header.revision) > self.prev_index then + local res2 + for _, evt in ipairs(res.result.events) do + if core_str.find(evt.kv.key, key) == 1 then + if not res2 then + res2 = tablex.deepcopy(res) + table.clear(res2.result.events) + end + insert_tab(res2.result.events, evt) + end + end - if http_cli then - local res_cancel, err_cancel = etcd_cli:watchcancel(http_cli) - if res_cancel == 1 then - log.info("cancel watch connection success") - else - log.error("cancel watch failed: ", err_cancel) + if res2 then + if log_level >= NGX_INFO then + log.info("http_waitdir: ", inspect(res2)) + end + return res2 + end end end - if not res then - return nil, err + -- if no events, wait via semaphore + if not self.watch_sema then + local sema, err = semaphore.new() + if not sema then + error(err) + end + self.watch_sema = sema end - if type(res.result) ~= "table" then - err = "failed to wait etcd dir" - if res.error and res.error.message then - err = err .. ": " .. res.error.message + watch_ctx.sema[key] = self.watch_sema + local ok, err = self.watch_sema:wait(timeout or 60) + watch_ctx.sema[key] = nil + if ok then + goto iterate_events + else + if err ~= "timeout" then + log.error("wait watch event, key=", key, ", err: ", err) end return nil, err end - - return res, err end @@ -209,12 +436,7 @@ local function waitdir(self) return nil, "not inited" end - local res, err - if etcd_cli.use_grpc then - res, err = grpc_waitdir(self, etcd_cli, key, modified_index, timeout) - else - res, err = http_waitdir(etcd_cli, key, modified_index, timeout) - end + local res, err = http_waitdir(self, etcd_cli, key, modified_index, timeout) if not res then -- log.error("failed to get key from etcd: ", err) @@ -359,9 +581,9 @@ local function sync_data(self) return nil, "missing 'key' arguments" end - if self.need_reload then - flush_watching_streams(self) + init_watch_ctx(self.key) + if self.need_reload then local res, err = readdir(self.etcd_cli, self.key) if not res then return false, err @@ -555,22 +777,6 @@ function _M.getkey(self, key) end -local get_etcd -do - local etcd_cli - - function get_etcd() - if etcd_cli ~= nil then - return etcd_cli - end - - local _, err - etcd_cli, _, err = etcd_apisix.get_etcd_syncer() - return etcd_cli, err - end -end - - local function _automatic_fetch(premature, self) if premature then return @@ -629,6 +835,9 @@ local function _automatic_fetch(premature, self) .. backoff_duration .. "s") end end + elseif err == "worker exited" then + log.info("worker exited.") + return elseif err ~= "timeout" and err ~= "Key not found" and self.last_err ~= err then log.error("failed to fetch data from etcd: ", err, ", ", @@ -664,7 +873,6 @@ local function _automatic_fetch(premature, self) end if not exiting() and self.running then - flush_watching_streams(self) ngx_timer_at(0, _automatic_fetch, self) end end @@ -866,10 +1074,6 @@ function _M.init() return true end - if local_conf.etcd.use_grpc then - return true - end - -- don't go through proxy during start because the proxy is not available local etcd_cli, prefix, err = etcd_apisix.new_without_proxy() if not etcd_cli then @@ -895,21 +1099,6 @@ function _M.init_worker() return true end - if not local_conf.etcd.use_grpc then - return true - end - - -- don't go through proxy during start because the proxy is not available - local etcd_cli, prefix, err = etcd_apisix.new_without_proxy() - if not etcd_cli then - return nil, "failed to start a etcd instance: " .. err - end - - local res, err = readdir(etcd_cli, prefix, create_formatter(prefix)) - if not res then - return nil, err - end - return true end diff --git a/apisix/core/ctx.lua b/apisix/core/ctx.lua index 47b6bef8584e..5128061d58fe 100644 --- a/apisix/core/ctx.lua +++ b/apisix/core/ctx.lua @@ -190,6 +190,7 @@ do upstream_connection = true, upstream_uri = true, + upstream_mirror_host = true, upstream_mirror_uri = true, upstream_cache_zone = true, @@ -198,9 +199,9 @@ do upstream_cache_key = true, upstream_cache_bypass = true, - var_x_forwarded_proto = true, - var_x_forwarded_port = true, - var_x_forwarded_host = true, + var_x_forwarded_proto = true, + var_x_forwarded_port = true, + var_x_forwarded_host = true, } -- sort in alphabetical diff --git a/apisix/core/etcd.lua b/apisix/core/etcd.lua index 99e4bbb6b7ab..5cd1038581dd 100644 --- a/apisix/core/etcd.lua +++ b/apisix/core/etcd.lua @@ -28,25 +28,15 @@ local clone_tab = require("table.clone") local health_check = require("resty.etcd.health_check") local pl_path = require("pl.path") local ipairs = ipairs -local pcall = pcall local setmetatable = setmetatable local string = string local tonumber = tonumber -local ngx_config_prefix = ngx.config.prefix() -local ngx_socket_tcp = ngx.socket.tcp local ngx_get_phase = ngx.get_phase -local is_http = ngx.config.subsystem == "http" local _M = {} -local function has_mtls_support() - local s = ngx_socket_tcp() - return s.tlshandshake ~= nil -end - - local function _new(etcd_conf) local prefix = etcd_conf.prefix etcd_conf.http_host = etcd_conf.host @@ -72,17 +62,6 @@ local function _new(etcd_conf) end end - if etcd_conf.use_grpc then - if ngx_get_phase() == "init" then - etcd_conf.use_grpc = false - else - local ok = pcall(require, "resty.grpc") - if not ok then - etcd_conf.use_grpc = false - end - end - end - local etcd_cli, err = etcd.new(etcd_conf) if not etcd_cli then return nil, nil, err @@ -129,64 +108,7 @@ local function new() etcd_conf.trusted_ca = local_conf.apisix.ssl.ssl_trusted_certificate end - local proxy_by_conf_server = false - - if local_conf.deployment then - if local_conf.deployment.role == "traditional" - -- we proxy the etcd requests in traditional mode so we can test the CP's behavior in - -- daily development. However, a stream proxy can't be the CP. - -- Hence, generate a HTTP conf server to proxy etcd requests in stream proxy is - -- unnecessary and inefficient. - and is_http - then - local sock_prefix = ngx_config_prefix - etcd_conf.unix_socket_proxy = - "unix:" .. sock_prefix .. "/conf/config_listen.sock" - etcd_conf.host = {"http://127.0.0.1:2379"} - proxy_by_conf_server = true - - elseif local_conf.deployment.role == "control_plane" then - local addr = local_conf.deployment.role_control_plane.conf_server.listen - etcd_conf.host = {"https://" .. addr} - etcd_conf.tls = { - verify = false, - } - - if has_mtls_support() and local_conf.deployment.certs.cert then - local cert = local_conf.deployment.certs.cert - local cert_key = local_conf.deployment.certs.cert_key - etcd_conf.tls.cert = cert - etcd_conf.tls.key = cert_key - end - - proxy_by_conf_server = true - - elseif local_conf.deployment.role == "data_plane" then - if has_mtls_support() and local_conf.deployment.certs.cert then - local cert = local_conf.deployment.certs.cert - local cert_key = local_conf.deployment.certs.cert_key - - if not etcd_conf.tls then - etcd_conf.tls = {} - end - - etcd_conf.tls.cert = cert - etcd_conf.tls.key = cert_key - end - end - - if local_conf.deployment.certs and local_conf.deployment.certs.trusted_ca_cert then - etcd_conf.trusted_ca = local_conf.deployment.certs.trusted_ca_cert - end - end - - -- if an unhealthy etcd node is selected in a single admin read/write etcd operation, - -- the retry mechanism for health check can select another healthy etcd node - -- to complete the read/write etcd operation. - if proxy_by_conf_server then - -- health check is done in conf server - health_check.disable() - elseif not health_check.conf then + if not health_check.conf then health_check.init({ max_fails = 1, retry = true, @@ -349,10 +271,6 @@ do return nil, nil, err end - if tmp_etcd_cli.use_grpc then - etcd_cli_init_phase = tmp_etcd_cli - end - return tmp_etcd_cli, prefix end @@ -372,9 +290,7 @@ do return nil, nil, err end - if tmp_etcd_cli.use_grpc then - etcd_cli = tmp_etcd_cli - end + etcd_cli = tmp_etcd_cli return tmp_etcd_cli, prefix end diff --git a/apisix/core/profile.lua b/apisix/core/profile.lua index 389a9d42ccec..a5dcdc81e10b 100644 --- a/apisix/core/profile.lua +++ b/apisix/core/profile.lua @@ -19,6 +19,8 @@ -- -- @module core.profile +local util = require("apisix.cli.util") + local _M = { version = 0.1, profile = os.getenv("APISIX_PROFILE") or "", @@ -48,4 +50,18 @@ function _M.yaml_path(self, file_name) end +function _M.customized_yaml_index(self) + return self.apisix_home .. "/conf/.customized_config_path" +end + + +function _M.customized_yaml_path(self) + local customized_config_index = self:customized_yaml_index() + if util.file_exists(customized_config_index) then + return util.read_file(customized_config_index) + end + return nil +end + + return _M diff --git a/apisix/core/pubsub.lua b/apisix/core/pubsub.lua index 25ac46f13eeb..18bb887001c1 100644 --- a/apisix/core/pubsub.lua +++ b/apisix/core/pubsub.lua @@ -185,9 +185,10 @@ function _M.wait(self) end -- recovery of stored pb_store - pb.state(pb_state) + local pb_old_state = pb.state(pb_state) local data, err = pb.decode("PubSubReq", raw_data) + pb.state(pb_old_state) if not data then log.error("pubsub server receives undecodable data, err: ", err) send_error(ws, 0, "wrong command") diff --git a/apisix/core/response.lua b/apisix/core/response.lua index b934d94b93af..04430abd5266 100644 --- a/apisix/core/response.lua +++ b/apisix/core/response.lua @@ -70,7 +70,9 @@ function resp_exit(code, ...) error("failed to encode data: " .. err, -2) else idx = idx + 1 - t[idx] = body .. "\n" + t[idx] = body + idx = idx + 1 + t[idx] = "\n" end elseif v ~= nil then @@ -80,7 +82,7 @@ function resp_exit(code, ...) end if idx > 0 then - ngx_print(concat_tab(t, "", 1, idx)) + ngx_print(t) end if code then @@ -177,14 +179,19 @@ end function _M.hold_body_chunk(ctx, hold_the_copy) local body_buffer local chunk, eof = arg[1], arg[2] + + if not ctx._body_buffer then + ctx._body_buffer = {} + end + if type(chunk) == "string" and chunk ~= "" then - body_buffer = ctx._body_buffer + body_buffer = ctx._body_buffer[ctx._plugin_name] if not body_buffer then body_buffer = { chunk, n = 1 } - ctx._body_buffer = body_buffer + ctx._body_buffer[ctx._plugin_name] = body_buffer else local n = body_buffer.n + 1 body_buffer.n = n @@ -193,13 +200,13 @@ function _M.hold_body_chunk(ctx, hold_the_copy) end if eof then - body_buffer = ctx._body_buffer + body_buffer = ctx._body_buffer[ctx._plugin_name] if not body_buffer then return chunk end body_buffer = concat_tab(body_buffer, "", 1, body_buffer.n) - ctx._body_buffer = nil + ctx._body_buffer[ctx._plugin_name] = nil return body_buffer end diff --git a/apisix/core/version.lua b/apisix/core/version.lua index d83cba52dd98..5fa6af15eb53 100644 --- a/apisix/core/version.lua +++ b/apisix/core/version.lua @@ -20,5 +20,5 @@ -- @module core.version return { - VERSION = "3.2.0" + VERSION = "3.5.0" } diff --git a/apisix/discovery/consul/init.lua b/apisix/discovery/consul/init.lua index 686ab4120f18..ae1e4c64cc9c 100644 --- a/apisix/discovery/consul/init.lua +++ b/apisix/discovery/consul/init.lua @@ -32,6 +32,14 @@ local ngx_timer_every = ngx.timer.every local log = core.log local json_delay_encode = core.json.delay_encode local ngx_worker_id = ngx.worker.id +local thread_spawn = ngx.thread.spawn +local thread_wait = ngx.thread.wait +local thread_kill = ngx.thread.kill +local math_random = math.random +local pcall = pcall +local null = ngx.null +local type = type +local next = next local all_services = core.table.new(0, 5) local default_service @@ -44,9 +52,15 @@ local events_list local consul_services local default_skip_services = {"consul"} +local default_random_range = 5 +local default_catalog_error_index = -1 +local default_health_error_index = -2 +local watch_type_catalog = 1 +local watch_type_health = 2 +local max_retry_time = 256 local _M = { - version = 0.2, + version = 0.3, } @@ -128,7 +142,7 @@ local function read_dump_services() local now_time = ngx.time() log.info("dump file last_update: ", entity.last_update, ", dump_params.expire: ", dump_params.expire, ", now_time: ", now_time) - if dump_params.expire ~= 0 and (entity.last_update + dump_params.expire) < now_time then + if dump_params.expire ~= 0 and (entity.last_update + dump_params.expire) < now_time then log.warn("dump file: ", dump_params.path, " had expired, ignored it") return end @@ -145,7 +159,7 @@ local function write_dump_services() expire = dump_params.expire, -- later need handle it } local data = core.json.encode(entity) - local succ, err = util.write_file(dump_params.path, data) + local succ, err = util.write_file(dump_params.path, data) if not succ then log.error("write dump into file got error: ", err) end @@ -165,8 +179,9 @@ local function show_dump_file() return 200, data end + local function get_retry_delay(retry_delay) - if not retry_delay then + if not retry_delay or retry_delay >= max_retry_time then retry_delay = 1 else retry_delay = retry_delay * 4 @@ -176,80 +191,307 @@ local function get_retry_delay(retry_delay) end +local function get_opts(consul_server, is_catalog) + local opts = { + host = consul_server.host, + port = consul_server.port, + connect_timeout = consul_server.connect_timeout, + read_timeout = consul_server.read_timeout, + } + if not consul_server.keepalive then + return opts + end + + if is_catalog then + opts.default_args = { + wait = consul_server.wait_timeout, --blocked wait!=0; unblocked by wait=0 + index = consul_server.catalog_index, + } + else + opts.default_args = { + wait = consul_server.wait_timeout, --blocked wait!=0; unblocked by wait=0 + index = consul_server.health_index, + } + end + + return opts +end + + +local function watch_catalog(consul_server) + local client = resty_consul:new(get_opts(consul_server, true)) + + ::RETRY:: + local watch_result, watch_err = client:get(consul_server.consul_watch_catalog_url) + local watch_error_info = (watch_err ~= nil and watch_err) + or ((watch_result ~= nil and watch_result.status ~= 200) + and watch_result.status) + if watch_error_info then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_catalog_url, + ", got watch result: ", json_delay_encode(watch_result), + ", with error: ", watch_error_info) + + return watch_type_catalog, default_catalog_error_index + end + + if consul_server.catalog_index > 0 + and consul_server.catalog_index == tonumber(watch_result.headers['X-Consul-Index']) then + local random_delay = math_random(default_random_range) + log.info("watch catalog has no change, re-watch consul after ", random_delay, " seconds") + core_sleep(random_delay) + goto RETRY + end + + return watch_type_catalog, watch_result.headers['X-Consul-Index'] +end + + +local function watch_health(consul_server) + local client = resty_consul:new(get_opts(consul_server, false)) + + ::RETRY:: + local watch_result, watch_err = client:get(consul_server.consul_watch_health_url) + local watch_error_info = (watch_err ~= nil and watch_err) + or ((watch_result ~= nil and watch_result.status ~= 200) + and watch_result.status) + if watch_error_info then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_health_url, + ", got watch result: ", json_delay_encode(watch_result), + ", with error: ", watch_error_info) + + return watch_type_health, default_health_error_index + end + + if consul_server.health_index > 0 + and consul_server.health_index == tonumber(watch_result.headers['X-Consul-Index']) then + local random_delay = math_random(default_random_range) + log.info("watch health has no change, re-watch consul after ", random_delay, " seconds") + core_sleep(random_delay) + goto RETRY + end + + return watch_type_health, watch_result.headers['X-Consul-Index'] +end + + +local function check_keepalive(consul_server, retry_delay) + if consul_server.keepalive then + local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay) + if not ok then + log.error("create ngx_timer_at got error: ", err) + return + end + end +end + + +local function update_index(consul_server, catalog_index, health_index) + local c_index = 0 + local h_index = 0 + if catalog_index ~= nil then + c_index = tonumber(catalog_index) + end + + if health_index ~= nil then + h_index = tonumber(health_index) + end + + if c_index > 0 then + consul_server.catalog_index = c_index + end + + if h_index > 0 then + consul_server.health_index = h_index + end +end + + +local function is_not_empty(value) + if value == nil or value == null + or (type(value) == "table" and not next(value)) + or (type(value) == "string" and value == "") + then + return false + end + + return true +end + + +local function watch_result_is_valid(watch_type, index, catalog_index, health_index) + if index <= 0 then + return false + end + + if watch_type == watch_type_catalog then + if index == catalog_index then + return false + end + else + if index == health_index then + return false + end + end + + return true +end + + function _M.connect(premature, consul_server, retry_delay) if premature then return end + local catalog_thread, spawn_catalog_err = thread_spawn(watch_catalog, consul_server) + if not catalog_thread then + local random_delay = math_random(default_random_range) + log.error("failed to spawn thread watch catalog: ", spawn_catalog_err, + ", retry connecting consul after ", random_delay, " seconds") + core_sleep(random_delay) + + check_keepalive(consul_server, retry_delay) + return + end + + local health_thread, err = thread_spawn(watch_health, consul_server) + if not health_thread then + thread_kill(catalog_thread) + local random_delay = math_random(default_random_range) + log.error("failed to spawn thread watch health: ", err, ", retry connecting consul after ", + random_delay, " seconds") + core_sleep(random_delay) + + check_keepalive(consul_server, retry_delay) + return + end + + local thread_wait_ok, watch_type, index = thread_wait(catalog_thread, health_thread) + thread_kill(catalog_thread) + thread_kill(health_thread) + if not thread_wait_ok then + local random_delay = math_random(default_random_range) + log.error("failed to wait thread: ", watch_type, ", retry connecting consul after ", + random_delay, " seconds") + core_sleep(random_delay) + + check_keepalive(consul_server, retry_delay) + return + end + + -- double check index has changed + if not watch_result_is_valid(tonumber(watch_type), + tonumber(index), consul_server.catalog_index, consul_server.health_index) then + retry_delay = get_retry_delay(retry_delay) + log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds") + core_sleep(retry_delay) + + check_keepalive(consul_server, retry_delay) + return + end + local consul_client = resty_consul:new({ host = consul_server.host, port = consul_server.port, connect_timeout = consul_server.connect_timeout, read_timeout = consul_server.read_timeout, - default_args = consul_server.default_args, }) + local catalog_success, catalog_res, catalog_err = pcall(function() + return consul_client:get(consul_server.consul_watch_catalog_url) + end) + if not catalog_success then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_catalog_url, + ", got catalog result: ", json_delay_encode(catalog_res)) + check_keepalive(consul_server, retry_delay) + return + end + local catalog_error_info = (catalog_err ~= nil and catalog_err) + or ((catalog_res ~= nil and catalog_res.status ~= 200) + and catalog_res.status) + if catalog_error_info then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_catalog_url, + ", got catalog result: ", json_delay_encode(catalog_res), + ", with error: ", catalog_error_info) - log.info("consul_server: ", json_delay_encode(consul_server, true)) - local watch_result, watch_err = consul_client:get(consul_server.consul_watch_sub_url) - local watch_error_info = (watch_err ~= nil and watch_err) - or ((watch_result ~= nil and watch_result.status ~= 200) - and watch_result.status) - if watch_error_info then + retry_delay = get_retry_delay(retry_delay) + log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds") + core_sleep(retry_delay) + + check_keepalive(consul_server, retry_delay) + return + end + + -- get health index + local success, health_res, health_err = pcall(function() + return consul_client:get(consul_server.consul_watch_health_url) + end) + if not success then log.error("connect consul: ", consul_server.consul_server_url, - " by sub url: ", consul_server.consul_watch_sub_url, - ", got watch result: ", json_delay_encode(watch_result, true), - ", with error: ", watch_error_info) + " by sub url: ", consul_server.consul_watch_health_url, + ", got health result: ", json_delay_encode(health_res)) + check_keepalive(consul_server, retry_delay) + return + end + local health_error_info = (health_err ~= nil and health_err) + or ((health_res ~= nil and health_res.status ~= 200) + and health_res.status) + if health_error_info then + log.error("connect consul: ", consul_server.consul_server_url, + " by sub url: ", consul_server.consul_watch_health_url, + ", got health result: ", json_delay_encode(health_res), + ", with error: ", health_error_info) retry_delay = get_retry_delay(retry_delay) - log.warn("retry connecting consul after ", retry_delay, " seconds") + log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds") core_sleep(retry_delay) - goto ERR + check_keepalive(consul_server, retry_delay) + return end log.info("connect consul: ", consul_server.consul_server_url, - ", watch_result status: ", watch_result.status, - ", watch_result.headers.index: ", watch_result.headers['X-Consul-Index'], + ", catalog_result status: ", catalog_res.status, + ", catalog_result.headers.index: ", catalog_res.headers['X-Consul-Index'], ", consul_server.index: ", consul_server.index, - ", consul_server: ", json_delay_encode(consul_server, true)) - - -- if current index different last index then update service - if consul_server.index ~= watch_result.headers['X-Consul-Index'] then - local up_services = core.table.new(0, #watch_result.body) - local consul_client_svc = resty_consul:new({ - host = consul_server.host, - port = consul_server.port, - connect_timeout = consul_server.connect_timeout, - read_timeout = consul_server.read_timeout, - }) - for service_name, _ in pairs(watch_result.body) do + ", consul_server: ", json_delay_encode(consul_server)) + + -- if the current index is different from the last index, then update the service + if (consul_server.catalog_index ~= tonumber(catalog_res.headers['X-Consul-Index'])) + or (consul_server.health_index ~= tonumber(health_res.headers['X-Consul-Index'])) then + local up_services = core.table.new(0, #catalog_res.body) + for service_name, _ in pairs(catalog_res.body) do -- check if the service_name is 'skip service' if skip_service_map[service_name] then goto CONTINUE end + -- get node from service local svc_url = consul_server.consul_sub_url .. "/" .. service_name - local result, err = consul_client_svc:get(svc_url) - local error_info = (err ~= nil and err) or + local svc_success, result, get_err = pcall(function() + return consul_client:get(svc_url, {passing = true}) + end) + local error_info = (get_err ~= nil and get_err) or ((result ~= nil and result.status ~= 200) and result.status) - if error_info then + if not svc_success or error_info then log.error("connect consul: ", consul_server.consul_server_url, ", by service url: ", svc_url, ", with error: ", error_info) goto CONTINUE end -- decode body, decode json, update service, error handling - if result.body then - log.notice("service url: ", svc_url, - ", header: ", json_delay_encode(result.headers, true), - ", body: ", json_delay_encode(result.body, true)) + -- check result body is not nil and not empty + if is_not_empty(result.body) then -- add services to table local nodes = up_services[service_name] - for _, node in ipairs(result.body) do - local svc_address, svc_port = node.ServiceAddress, node.ServicePort - if not svc_address then - svc_address = node.Address + for _, node in ipairs(result.body) do + if not node.Service then + goto CONTINUE end + + local svc_address, svc_port = node.Service.Address, node.Service.Port -- if nodes is nil, new nodes table and set to up_services if not nodes then nodes = core.table.new(1, 0) @@ -270,8 +512,9 @@ function _M.connect(premature, consul_server, retry_delay) update_all_services(consul_server.consul_server_url, up_services) --update events - local ok, post_err = events.post(events_list._source, events_list.updating, all_services) - if not ok then + local post_ok, post_err = events.post(events_list._source, + events_list.updating, all_services) + if not post_ok then log.error("post_event failure with ", events_list._source, ", update all services error: ", post_err) end @@ -280,37 +523,17 @@ function _M.connect(premature, consul_server, retry_delay) ngx_timer_at(0, write_dump_services) end - consul_server.index = watch_result.headers['X-Consul-Index'] - -- only long connect type use index - if consul_server.keepalive then - consul_server.default_args.index = watch_result.headers['X-Consul-Index'] - end + update_index(consul_server, + catalog_res.headers['X-Consul-Index'], + health_res.headers['X-Consul-Index']) end - :: ERR :: - local keepalive = consul_server.keepalive - if keepalive then - local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay) - if not ok then - log.error("create ngx_timer_at got error: ", err) - return - end - end + check_keepalive(consul_server, retry_delay) end local function format_consul_params(consul_conf) local consul_server_list = core.table.new(0, #consul_conf.servers) - local args - - if consul_conf.keepalive == false then - args = {} - elseif consul_conf.keepalive then - args = { - wait = consul_conf.timeout.wait, --blocked wait!=0; unblocked by wait=0 - index = 0, - } - end for _, v in pairs(consul_conf.servers) do local scheme, host, port, path = unpack(http.parse_uri(nil, v)) @@ -319,23 +542,23 @@ local function format_consul_params(consul_conf) elseif path ~= "/" or core.string.has_suffix(v, '/') then return nil, "invalid consul server address, the valid format: http://address:port" end - core.table.insert(consul_server_list, { host = host, port = port, connect_timeout = consul_conf.timeout.connect, read_timeout = consul_conf.timeout.read, - consul_sub_url = "/catalog/service", - consul_watch_sub_url = "/catalog/services", + wait_timeout = consul_conf.timeout.wait, + consul_watch_catalog_url = "/catalog/services", + consul_sub_url = "/health/service", + consul_watch_health_url = "/health/state/any", consul_server_url = v .. "/v1", weight = consul_conf.weight, keepalive = consul_conf.keepalive, - default_args = args, - index = 0, + health_index = 0, + catalog_index = 0, fetch_interval = consul_conf.fetch_interval -- fetch interval to next connect consul }) end - return consul_server_list, nil end diff --git a/apisix/discovery/kubernetes/init.lua b/apisix/discovery/kubernetes/init.lua index 3f5f275d9aff..d16d4f4fcd31 100644 --- a/apisix/discovery/kubernetes/init.lua +++ b/apisix/discovery/kubernetes/init.lua @@ -26,7 +26,7 @@ local error = error local pcall = pcall local setmetatable = setmetatable local is_http = ngx.config.subsystem == "http" -local support_process, process = pcall(require, "ngx.process") +local process = require("ngx.process") local core = require("apisix.core") local util = require("apisix.cli.util") local local_conf = require("apisix.core.config_local").local_conf() @@ -520,11 +520,6 @@ end function _M.init_worker() - if not support_process then - core.log.error("kubernetes discovery not support in subsystem: ", ngx.config.subsystem, - ", please check if your openresty version >= 1.19.9.1 or not") - return - end local discovery_conf = local_conf.discovery.kubernetes core.log.info("kubernetes discovery conf: ", core.json.delay_encode(discovery_conf)) if #discovery_conf == 0 then diff --git a/apisix/discovery/nacos/init.lua b/apisix/discovery/nacos/init.lua index e12c025c56d5..2e06f5553493 100644 --- a/apisix/discovery/nacos/init.lua +++ b/apisix/discovery/nacos/init.lua @@ -361,7 +361,9 @@ function _M.nodes(service_name, discovery_args) waiting_time = waiting_time - step end - if not applications[namespace_id] or not applications[namespace_id][group_name] then + if not applications or not applications[namespace_id] + or not applications[namespace_id][group_name] + then return nil end return applications[namespace_id][group_name][service_name] diff --git a/apisix/discovery/tars/init.lua b/apisix/discovery/tars/init.lua index 14f658dce5d2..17bb2758b199 100644 --- a/apisix/discovery/tars/init.lua +++ b/apisix/discovery/tars/init.lua @@ -23,7 +23,7 @@ local local_conf = require("apisix.core.config_local").local_conf() local core = require("apisix.core") local mysql = require("resty.mysql") local is_http = ngx.config.subsystem == "http" -local support_process, process = pcall(require, "ngx.process") +local process = require("ngx.process") local endpoint_dict @@ -343,12 +343,6 @@ local function get_endpoint_dict() end function _M.init_worker() - if not support_process then - core.log.error("tars discovery not support in subsystem: ", ngx.config.subsystem, - ", please check if your openresty version >= 1.19.9.1 or not") - return - end - endpoint_dict = get_endpoint_dict() if not endpoint_dict then error("failed to get lua_shared_dict: tars, please check your APISIX version") diff --git a/apisix/global_rules.lua b/apisix/global_rules.lua new file mode 100644 index 000000000000..93fa2890d423 --- /dev/null +++ b/apisix/global_rules.lua @@ -0,0 +1,56 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local core = require("apisix.core") +local plugin_checker = require("apisix.plugin").plugin_checker +local error = error + + +local _M = {} + +local global_rules + +function _M.init_worker() + local err + global_rules, err = core.config.new("/global_rules", { + automatic = true, + item_schema = core.schema.global_rule, + checker = plugin_checker, + }) + if not global_rules then + error("failed to create etcd instance for fetching /global_rules : " + .. err) + end +end + + +function _M.global_rules() + if not global_rules then + return nil, nil + end + return global_rules.values, global_rules.conf_version +end + + +function _M.get_pre_index() + if not global_rules then + return nil + end + return global_rules.prev_index +end + +return _M diff --git a/apisix/http/route.lua b/apisix/http/route.lua index d475646b56c6..dbf11abf5e28 100644 --- a/apisix/http/route.lua +++ b/apisix/http/route.lua @@ -103,8 +103,8 @@ function _M.create_radixtree_uri_router(routes, uri_routes, with_parameter) end -function _M.match_uri(uri_router, match_opts, api_ctx) - core.table.clear(match_opts) +function _M.match_uri(uri_router, api_ctx) + local match_opts = core.tablepool.fetch("route_match_opts", 0, 4) match_opts.method = api_ctx.var.request_method match_opts.host = api_ctx.var.host match_opts.remote_addr = api_ctx.var.remote_addr @@ -112,6 +112,7 @@ function _M.match_uri(uri_router, match_opts, api_ctx) match_opts.matched = core.tablepool.fetch("matched_route_record", 0, 4) local ok = uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts) + core.tablepool.release("route_match_opts", match_opts) return ok end diff --git a/apisix/http/router/radixtree_host_uri.lua b/apisix/http/router/radixtree_host_uri.lua index 532576e53d4a..680a04fbe815 100644 --- a/apisix/http/router/radixtree_host_uri.lua +++ b/apisix/http/router/radixtree_host_uri.lua @@ -142,8 +142,6 @@ local function create_radixtree_router(routes) return true end - - local match_opts = {} function _M.match(api_ctx) local user_routes = _M.user_routes local _, service_version = get_services() @@ -162,7 +160,7 @@ end function _M.matching(api_ctx) core.log.info("route match mode: radixtree_host_uri") - core.table.clear(match_opts) + local match_opts = core.tablepool.fetch("route_match_opts", 0, 16) match_opts.method = api_ctx.var.request_method match_opts.remote_addr = api_ctx.var.remote_addr match_opts.vars = api_ctx.var @@ -181,11 +179,13 @@ function _M.matching(api_ctx) api_ctx.curr_req_matched._host = api_ctx.real_curr_req_matched_host:reverse() api_ctx.real_curr_req_matched_host = nil end + core.tablepool.release("route_match_opts", match_opts) return true end end local ok = only_uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts) + core.tablepool.release("route_match_opts", match_opts) return ok end diff --git a/apisix/http/router/radixtree_uri.lua b/apisix/http/router/radixtree_uri.lua index 6e546364ac14..7c1b5c0c147a 100644 --- a/apisix/http/router/radixtree_uri.lua +++ b/apisix/http/router/radixtree_uri.lua @@ -27,7 +27,6 @@ local _M = {version = 0.2} local uri_routes = {} local uri_router - local match_opts = {} function _M.match(api_ctx) local user_routes = _M.user_routes local _, service_version = get_services() @@ -51,8 +50,7 @@ end function _M.matching(api_ctx) core.log.info("route match mode: radixtree_uri") - - return base_router.match_uri(uri_router, match_opts, api_ctx) + return base_router.match_uri(uri_router, api_ctx) end diff --git a/apisix/http/router/radixtree_uri_with_parameter.lua b/apisix/http/router/radixtree_uri_with_parameter.lua index 4bf7f3ebee5f..3f10f4fcac49 100644 --- a/apisix/http/router/radixtree_uri_with_parameter.lua +++ b/apisix/http/router/radixtree_uri_with_parameter.lua @@ -27,7 +27,6 @@ local _M = {} local uri_routes = {} local uri_router - local match_opts = {} function _M.match(api_ctx) local user_routes = _M.user_routes local _, service_version = get_services() @@ -51,8 +50,7 @@ end function _M.matching(api_ctx) core.log.info("route match mode: radixtree_uri_with_parameter") - - return base_router.match_uri(uri_router, match_opts, api_ctx) + return base_router.match_uri(uri_router, api_ctx) end diff --git a/apisix/init.lua b/apisix/init.lua index aef5b7ebeb23..86b68cf62208 100644 --- a/apisix/init.lua +++ b/apisix/init.lua @@ -40,6 +40,7 @@ local apisix_upstream = require("apisix.upstream") local apisix_secret = require("apisix.secret") local set_upstream = apisix_upstream.set_by_route local apisix_ssl = require("apisix.ssl") +local apisix_global_rules = require("apisix.global_rules") local upstream_util = require("apisix.utils.upstream") local xrpc = require("apisix.stream.xrpc") local ctxdump = require("resty.ctxdump") @@ -59,6 +60,7 @@ local str_sub = string.sub local tonumber = tonumber local type = type local pairs = pairs +local ngx_re_match = ngx.re.match local control_api_router local is_http = false @@ -76,6 +78,7 @@ local load_balancer local local_conf local ver_header = "APISIX/" .. core.version.VERSION +local has_mod, apisix_ngx_client = pcall(require, "resty.apisix.client") local _M = {version = 0.4} @@ -153,6 +156,8 @@ function _M.http_init_worker() consumer_group.init_worker() apisix_secret.init_worker() + apisix_global_rules.init_worker() + apisix_upstream.init_worker() require("apisix.plugins.ext-plugin.init").init_worker() @@ -173,12 +178,33 @@ end function _M.http_ssl_phase() + local ok, err = router.router_ssl.set(ngx.ctx.matched_ssl) + if not ok then + if err then + core.log.error("failed to fetch ssl config: ", err) + end + ngx_exit(-1) + end +end + + +function _M.http_ssl_client_hello_phase() + local sni, err = apisix_ssl.server_name(true) + if not sni or type(sni) ~= "string" then + local advise = "please check if the client requests via IP or uses an outdated " .. + "protocol. If you need to report an issue, " .. + "provide a packet capture file of the TLS handshake." + core.log.error("failed to find SNI: " .. (err or advise)) + ngx_exit(-1) + end + local ngx_ctx = ngx.ctx local api_ctx = core.tablepool.fetch("api_ctx", 0, 32) ngx_ctx.api_ctx = api_ctx - local ok, err = router.router_ssl.match_and_set(api_ctx) + local ok, err = router.router_ssl.match_and_set(api_ctx, true, sni) + ngx_ctx.matched_ssl = api_ctx.matched_ssl core.tablepool.release("api_ctx", api_ctx) ngx_ctx.api_ctx = nil @@ -186,6 +212,13 @@ function _M.http_ssl_phase() if err then core.log.error("failed to fetch ssl config: ", err) end + core.log.error("failed to match any SSL certificate by SNI: ", sni) + ngx_exit(-1) + end + + ok, err = apisix_ssl.set_protocols_by_clienthello(ngx_ctx.matched_ssl.value.ssl_protocols) + if not ok then + core.log.error("failed to set ssl protocols: ", err) ngx_exit(-1) end end @@ -310,12 +343,38 @@ local function verify_tls_client(ctx) end +local function uri_matches_skip_mtls_route_patterns(ssl, uri) + for _, pat in ipairs(ssl.value.client.skip_mtls_uri_regex) do + if ngx_re_match(uri, pat, "jo") then + return true + end + end +end + + local function verify_https_client(ctx) local scheme = ctx.var.scheme if scheme ~= "https" then return true end + local matched_ssl = ngx.ctx.matched_ssl + if matched_ssl.value.client + and matched_ssl.value.client.skip_mtls_uri_regex + and apisix_ssl.support_client_verification() + and (not uri_matches_skip_mtls_route_patterns(matched_ssl, ngx.var.uri)) then + local res = ctx.var.ssl_client_verify + if res ~= "SUCCESS" then + if res == "NONE" then + core.log.error("client certificate was not present") + else + core.log.error("client certificate verification is not passed: ", res) + end + + return false + end + end + local host = ctx.var.host local matched = router.router_ssl.match_and_set(ctx, true, host) if not matched then @@ -568,7 +627,8 @@ function _M.http_access_phase() local route = api_ctx.matched_route if not route then -- run global rule when there is no matching route - plugin.run_global_rules(api_ctx, router.global_rules, nil) + local global_rules = apisix_global_rules.global_rules() + plugin.run_global_rules(api_ctx, global_rules, nil) core.log.info("not find any matched route") return core.response.exit(404, @@ -620,7 +680,8 @@ function _M.http_access_phase() api_ctx.route_name = route.value.name -- run global rule - plugin.run_global_rules(api_ctx, router.global_rules, nil) + local global_rules = apisix_global_rules.global_rules() + plugin.run_global_rules(api_ctx, global_rules, nil) if route.value.script then script.load(route, api_ctx) @@ -688,6 +749,10 @@ function _M.grpc_access_phase() core.log.error("failed to set grpcs upstream param: ", err) core.response.exit(code) end + + if api_ctx.enable_mirror == true and has_mod then + apisix_ngx_client.enable_mirror() + end end diff --git a/apisix/inspect/dbg.lua b/apisix/inspect/dbg.lua index 7f4e7b114424..2fd78782faac 100644 --- a/apisix/inspect/dbg.lua +++ b/apisix/inspect/dbg.lua @@ -98,6 +98,9 @@ local function hook(_, arg) if #hooks == 0 then core.log.warn("inspect: all hooks removed") debug.sethook() + if jit then + jit.on() + end end end end diff --git a/apisix/plugin.lua b/apisix/plugin.lua index 3b5cecac6ded..bde2b89a5393 100644 --- a/apisix/plugin.lua +++ b/apisix/plugin.lua @@ -775,6 +775,11 @@ function _M.get(name) end +function _M.get_stream(name) + return stream_local_plugins_hash and stream_local_plugins_hash[name] +end + + function _M.get_all(attrs) local http_plugins = {} local stream_plugins = {} @@ -858,10 +863,6 @@ end local function get_plugin_schema_for_gde(name, schema_type) - if not enable_gde() then - return nil - end - local plugin_schema = local_plugins_hash and local_plugins_hash[name] if not plugin_schema then return nil @@ -881,6 +882,9 @@ end local function decrypt_conf(name, conf, schema_type) + if not enable_gde() then + return + end local schema = get_plugin_schema_for_gde(name, schema_type) if not schema then core.log.warn("failed to get schema for plugin: ", name) @@ -924,6 +928,9 @@ _M.decrypt_conf = decrypt_conf local function encrypt_conf(name, conf, schema_type) + if not enable_gde() then + return + end local schema = get_plugin_schema_for_gde(name, schema_type) if not schema then core.log.warn("failed to get schema for plugin: ", name) @@ -1091,7 +1098,9 @@ function _M.run_plugin(phase, plugins, api_ctx) end plugin_run = true + api_ctx._plugin_name = plugins[i]["name"] local code, body = phase_func(conf, api_ctx) + api_ctx._plugin_name = nil if code or body then if is_http then if code >= 400 then @@ -1126,7 +1135,9 @@ function _M.run_plugin(phase, plugins, api_ctx) local conf = plugins[i + 1] if phase_func and meta_filter(api_ctx, plugins[i]["name"], conf) then plugin_run = true + api_ctx._plugin_name = plugins[i]["name"] phase_func(conf, api_ctx) + api_ctx._plugin_name = nil end end @@ -1135,8 +1146,7 @@ end function _M.run_global_rules(api_ctx, global_rules, phase_name) - if global_rules and global_rules.values - and #global_rules.values > 0 then + if global_rules and #global_rules > 0 then local orig_conf_type = api_ctx.conf_type local orig_conf_version = api_ctx.conf_version local orig_conf_id = api_ctx.conf_id @@ -1146,7 +1156,7 @@ function _M.run_global_rules(api_ctx, global_rules, phase_name) end local plugins = core.tablepool.fetch("plugins", 32, 0) - local values = global_rules.values + local values = global_rules local route = api_ctx.matched_route for _, global_rule in config_util.iterate_values(values) do api_ctx.conf_type = "global_rule" diff --git a/apisix/plugin_config.lua b/apisix/plugin_config.lua index cc5a6ff38456..88b17d4b83ac 100644 --- a/apisix/plugin_config.lua +++ b/apisix/plugin_config.lua @@ -40,6 +40,14 @@ function _M.init_worker() end +function _M.plugin_configs() + if not plugin_configs then + return nil, nil + end + return plugin_configs.values, plugin_configs.conf_version +end + + function _M.get(id) return plugin_configs:get(id) end @@ -70,8 +78,7 @@ function _M.merge(route_conf, plugin_config) end end - route_conf.update_count = route_conf.update_count + 1 - route_conf.modifiedIndex = route_conf.orig_modifiedIndex .. "#" .. route_conf.update_count + route_conf.modifiedIndex = route_conf.orig_modifiedIndex .. "#" .. plugin_config.modifiedIndex route_conf.prev_plugin_config_ver = plugin_config.modifiedIndex return route_conf diff --git a/apisix/plugins/ai.lua b/apisix/plugins/ai.lua index b46249a9a190..39430c7ad014 100644 --- a/apisix/plugins/ai.lua +++ b/apisix/plugins/ai.lua @@ -18,6 +18,7 @@ local require = require local apisix = require("apisix") local core = require("apisix.core") local router = require("apisix.router") +local get_global_rules = require("apisix.global_rules").global_rules local event = require("apisix.core.event") local balancer = require("ngx.balancer") local ngx = ngx @@ -229,8 +230,8 @@ local function routes_analyze(routes) end end - local global_rules_flag = router.global_rules and router.global_rules.values - and #router.global_rules.values ~= 0 + local global_rules, _ = get_global_rules() + local global_rules_flag = global_rules and #global_rules ~= 0 if route_flags["vars"] or route_flags["filter_func"] or route_flags["remote_addr"] diff --git a/apisix/plugins/aws-lambda.lua b/apisix/plugins/aws-lambda.lua index fe4d7f394cd2..1b172af4135d 100644 --- a/apisix/plugins/aws-lambda.lua +++ b/apisix/plugins/aws-lambda.lua @@ -122,8 +122,10 @@ local function request_processor(conf, ctx, params) -- computing canonical query string local canonical_qs = {} + local canonical_qs_i = 0 for k, v in pairs(params.query) do - canonical_qs[#canonical_qs+1] = ngx.unescape_uri(k) .. "=" .. ngx.unescape_uri(v) + canonical_qs_i = canonical_qs_i + 1 + canonical_qs[canonical_qs_i] = ngx.unescape_uri(k) .. "=" .. ngx.unescape_uri(v) end tab_sort(canonical_qs) @@ -132,10 +134,12 @@ local function request_processor(conf, ctx, params) -- computing canonical and signed headers local canonical_headers, signed_headers = {}, {} + local signed_headers_i = 0 for k, v in pairs(headers) do k = k:lower() if k ~= "connection" then - signed_headers[#signed_headers+1] = k + signed_headers_i = signed_headers_i + 1 + signed_headers[signed_headers_i] = k -- strip starting and trailing spaces including strip multiple spaces into single space canonical_headers[k] = str_strip(v) end diff --git a/apisix/plugins/batch-requests.lua b/apisix/plugins/batch-requests.lua index 54dfba07d488..a1b574307ef6 100644 --- a/apisix/plugins/batch-requests.lua +++ b/apisix/plugins/batch-requests.lua @@ -275,7 +275,14 @@ local function batch_requests(ctx) headers = resp.headers, } if resp.has_body then - sub_resp.body = resp:read_body() + local err + sub_resp.body, err = resp:read_body() + if err then + sub_resp.read_body_err = err + core.log.error("read pipeline response body failed: ", err) + else + resp:read_trailers() + end end core.table.insert(aggregated_resp, sub_resp) end diff --git a/apisix/plugins/body-transformer.lua b/apisix/plugins/body-transformer.lua index 9ca6b86bad51..5b5557f7d4d7 100644 --- a/apisix/plugins/body-transformer.lua +++ b/apisix/plugins/body-transformer.lua @@ -25,6 +25,7 @@ local str_format = string.format local type = type local pcall = pcall local pairs = pairs +local next = next local transform_schema = { @@ -74,6 +75,10 @@ end local function remove_namespace(tbl) for k, v in pairs(tbl) do + if type(v) == "table" and next(v) == nil then + v = "" + tbl[k] = v + end if type(k) == "string" then local newk = k:match(".*:(.*)") if newk then @@ -112,7 +117,7 @@ end local function transform(conf, body, typ, ctx) - local out = {_body = body} + local out = {} if body then local err local format = conf[typ].input_format @@ -123,6 +128,8 @@ local function transform(conf, body, typ, ctx) core.log.error(err, ", body=", body) return nil, 400, err end + else + core.log.warn("no input format to parse ", typ, " body") end end @@ -137,6 +144,7 @@ local function transform(conf, body, typ, ctx) end out._ctx = ctx + out._body = body out._escape_xml = escape_xml out._escape_json = escape_json local ok, render_out = pcall(render, out) diff --git a/apisix/plugins/chaitin-waf.lua b/apisix/plugins/chaitin-waf.lua new file mode 100644 index 000000000000..cc870a47f47e --- /dev/null +++ b/apisix/plugins/chaitin-waf.lua @@ -0,0 +1,373 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local require = require +local core = require("apisix.core") +local rr_balancer = require("apisix.balancer.roundrobin") +local plugin = require("apisix.plugin") +local t1k = require "resty.t1k" +local expr = require("resty.expr.v1") + +local ngx = ngx +local ngx_now = ngx.now +local string = string +local fmt = string.format +local tostring = tostring +local tonumber = tonumber +local ipairs = ipairs + +local plugin_name = "chaitin-waf" + +local vars_schema = { + type = "array", +} + +local match_schema = { + type = "array", + items = { + type = "object", + properties = { + vars = vars_schema + } + }, +} + +local plugin_schema = { + type = "object", + properties = { + -- TODO: we should add a configuration "mode" here + -- It can be one of off, block and monitor + match = match_schema, + append_waf_resp_header = { + type = "boolean", + default = true + }, + append_waf_debug_header = { + type = "boolean", + default = false + }, + config = { + type = "object", + properties = { + connect_timeout = { + type = "integer", + }, + send_timeout = { + type = "integer", + }, + read_timeout = { + type = "integer", + }, + req_body_size = { + type = "integer", + }, + keepalive_size = { + type = "integer", + }, + keepalive_timeout = { + type = "integer", + } + }, + }, + }, +} + +local metadata_schema = { + type = "object", + properties = { + nodes = { + type = "array", + items = { + type = "object", + properties = { + host = { + type = "string", + pattern = "^\\*?[0-9a-zA-Z-._\\[\\]:/]+$" + }, + port = { + type = "integer", + minimum = 1, + default = 80 + }, + }, + required = { "host" } + }, + minItems = 1, + }, + config = { + type = "object", + properties = { + connect_timeout = { + type = "integer", + default = 1000 -- milliseconds + }, + send_timeout = { + type = "integer", + default = 1000 -- milliseconds + }, + read_timeout = { + type = "integer", + default = 1000 -- milliseconds + }, + req_body_size = { + type = "integer", + default = 1024 -- milliseconds + }, + -- maximum concurrent idle connections to + -- the SafeLine WAF detection service + keepalive_size = { + type = "integer", + default = 256 + }, + keepalive_timeout = { + type = "integer", + default = 60000 -- milliseconds + }, + -- TODO: we need a configuration to enable/disable the real client ip + -- the real client ip is calculated by APISIX + }, + default = {}, + }, + }, + required = { "nodes" }, +} + +local _M = { + version = 0.1, + priority = 2700, + name = plugin_name, + schema = plugin_schema, + metadata_schema = metadata_schema +} + +local global_server_picker + +local HEADER_CHAITIN_WAF = "X-APISIX-CHAITIN-WAF" +local HEADER_CHAITIN_WAF_ERROR = "X-APISIX-CHAITIN-WAF-ERROR" +local HEADER_CHAITIN_WAF_TIME = "X-APISIX-CHAITIN-WAF-TIME" +local HEADER_CHAITIN_WAF_STATUS = "X-APISIX-CHAITIN-WAF-STATUS" +local HEADER_CHAITIN_WAF_ACTION = "X-APISIX-CHAITIN-WAF-ACTION" +local HEADER_CHAITIN_WAF_SERVER = "X-APISIX-CHAITIN-WAF-SERVER" +local blocked_message = [[{"code": %s, "success":false, ]] .. + [["message": "blocked by Chaitin SafeLine Web Application Firewall", "event_id": "%s"}]] + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local ok, err = core.schema.check(plugin_schema, conf) + + if not ok then + return false, err + end + + if conf.match then + for _, m in ipairs(conf.match) do + local ok, err = expr.new(m.vars) + if not ok then + return false, "failed to validate the 'vars' expression: " .. err + end + end + end + + return true +end + + +local function get_healthy_chaitin_server_nodes(metadata, checker) + local nodes = metadata.nodes + local new_nodes = core.table.new(0, #nodes) + + for i = 1, #nodes do + local host, port = nodes[i].host, nodes[i].port + new_nodes[host .. ":" .. tostring(port)] = 1 + end + + return new_nodes +end + + +local function get_chaitin_server(metadata, ctx) + if not global_server_picker or global_server_picker.upstream ~= metadata.value.nodes then + local up_nodes = get_healthy_chaitin_server_nodes(metadata.value) + if core.table.nkeys(up_nodes) == 0 then + return nil, nil, "no healthy nodes" + end + core.log.info("chaitin-waf nodes: ", core.json.delay_encode(up_nodes)) + + global_server_picker = rr_balancer.new(up_nodes, metadata.value.nodes) + end + + local server = global_server_picker.get(ctx) + local host, port, err = core.utils.parse_addr(server) + if err then + return nil, nil, err + end + + return host, port, nil +end + + +local function check_match(conf, ctx) + local match_passed = true + + if conf.match then + for _, match in ipairs(conf.match) do + -- todo: use lrucache to cache the result + local exp, err = expr.new(match.vars) + if err then + local msg = "failed to create match expression for " .. + tostring(match.vars) .. ", err: " .. tostring(err) + core.log.error(msg) + return false, msg + end + + match_passed = exp:eval(ctx.var) + if match_passed then + break + end + end + end + + return match_passed, nil +end + + +local function get_conf(conf, metadata) + local t = { + mode = "block", + } + + if metadata.config then + t.connect_timeout = metadata.config.connect_timeout + t.send_timeout = metadata.config.send_timeout + t.read_timeout = metadata.config.read_timeout + t.req_body_size = metadata.config.req_body_size + t.keepalive_size = metadata.config.keepalive_size + t.keepalive_timeout = metadata.config.keepalive_timeout + end + + if conf.config then + t.connect_timeout = conf.config.connect_timeout + t.send_timeout = conf.config.send_timeout + t.read_timeout = conf.config.read_timeout + t.req_body_size = conf.config.req_body_size + t.keepalive_size = conf.config.keepalive_size + t.keepalive_timeout = conf.config.keepalive_timeout + end + + return t +end + + +local function do_access(conf, ctx) + local extra_headers = {} + + local match, err = check_match(conf, ctx) + if not match then + if err then + extra_headers[HEADER_CHAITIN_WAF] = "err" + extra_headers[HEADER_CHAITIN_WAF_ERROR] = tostring(err) + return 500, nil, extra_headers + else + extra_headers[HEADER_CHAITIN_WAF] = "no" + return nil, nil, extra_headers + end + end + + local metadata = plugin.plugin_metadata(plugin_name) + if not core.table.try_read_attr(metadata, "value", "nodes") then + extra_headers[HEADER_CHAITIN_WAF] = "err" + extra_headers[HEADER_CHAITIN_WAF_ERROR] = "missing metadata" + return 500, nil, extra_headers + end + + local host, port, err = get_chaitin_server(metadata, ctx) + if err then + extra_headers[HEADER_CHAITIN_WAF] = "unhealthy" + extra_headers[HEADER_CHAITIN_WAF_ERROR] = tostring(err) + + return 500, nil, extra_headers + end + + core.log.info("picked chaitin-waf server: ", host, ":", port) + + local t = get_conf(conf, metadata.value) + t.host = host + t.port = port + + extra_headers[HEADER_CHAITIN_WAF_SERVER] = host + extra_headers[HEADER_CHAITIN_WAF] = "yes" + + local start_time = ngx_now() * 1000 + local ok, err, result = t1k.do_access(t, false) + if not ok then + extra_headers[HEADER_CHAITIN_WAF] = "waf-err" + local err_msg = tostring(err) + if core.string.find(err_msg, "timeout") then + extra_headers[HEADER_CHAITIN_WAF] = "timeout" + end + extra_headers[HEADER_CHAITIN_WAF_ERROR] = tostring(err) + else + extra_headers[HEADER_CHAITIN_WAF_ACTION] = "pass" + end + extra_headers[HEADER_CHAITIN_WAF_TIME] = ngx_now() * 1000 - start_time + + local code = 200 + extra_headers[HEADER_CHAITIN_WAF_STATUS] = code + if result then + if result.status then + code = result.status + extra_headers[HEADER_CHAITIN_WAF_STATUS] = code + extra_headers[HEADER_CHAITIN_WAF_ACTION] = "reject" + + core.log.error("request rejected by chaitin-waf, event_id: " .. result.event_id) + return tonumber(code), fmt(blocked_message, code, + result.event_id) .. "\n", extra_headers + end + end + if not ok then + extra_headers[HEADER_CHAITIN_WAF_STATUS] = nil + end + + return nil, nil, extra_headers +end + + +function _M.access(conf, ctx) + local code, msg, extra_headers = do_access(conf, ctx) + + if not conf.append_waf_debug_header then + extra_headers[HEADER_CHAITIN_WAF_ERROR] = nil + extra_headers[HEADER_CHAITIN_WAF_SERVER] = nil + end + + if conf.append_waf_resp_header then + core.response.set_header(extra_headers) + end + + return code, msg +end + + +function _M.header_filter(conf, ctx) + t1k.do_header_filter() +end + + +return _M diff --git a/apisix/plugins/consumer-restriction.lua b/apisix/plugins/consumer-restriction.lua index 93f3b9f59f5f..88c2bbd959d6 100644 --- a/apisix/plugins/consumer-restriction.lua +++ b/apisix/plugins/consumer-restriction.lua @@ -128,7 +128,9 @@ function _M.access(conf, ctx) local method = ngx.req.get_method() if not value then - return 401, { message = "Missing authentication or identity verification."} + local err_msg = "The request is rejected, please check the " + .. conf.type .. " for this request" + return 401, { message = err_msg} end core.log.info("value: ", value) diff --git a/apisix/plugins/error-log-logger.lua b/apisix/plugins/error-log-logger.lua index a0a364564a27..5d5c5d2a9f8b 100644 --- a/apisix/plugins/error-log-logger.lua +++ b/apisix/plugins/error-log-logger.lua @@ -237,10 +237,15 @@ local function send_to_skywalking(log_message) httpc:set_timeout(config.timeout * 1000) local entries = {} + local service_instance_name = config.skywalking.service_instance_name + if service_instance_name == "$hostname" then + service_instance_name = core.utils.gethostname() + end + for i = 1, #log_message, 2 do local content = { service = config.skywalking.service_name, - serviceInstance = config.skywalking.service_instance_name, + serviceInstance = service_instance_name, endpoint = "", body = { text = { diff --git a/apisix/plugins/fault-injection.lua b/apisix/plugins/fault-injection.lua index 9089b41681ac..34ca05e817fb 100644 --- a/apisix/plugins/fault-injection.lua +++ b/apisix/plugins/fault-injection.lua @@ -20,6 +20,9 @@ local expr = require("resty.expr.v1") local sleep = core.sleep local random = math.random local ipairs = ipairs +local ngx = ngx +local pairs = pairs +local type = type local plugin_name = "fault-injection" @@ -32,6 +35,18 @@ local schema = { properties = { http_status = {type = "integer", minimum = 200}, body = {type = "string", minLength = 0}, + headers = { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + { type = "string" }, + { type = "number" } + } + } + } + }, percentage = {type = "integer", minimum = 0, maximum = 100}, vars = { type = "array", @@ -144,6 +159,14 @@ function _M.rewrite(conf, ctx) end if conf.abort and sample_hit(conf.abort.percentage) and abort_vars then + if conf.abort.headers then + for header_name, header_value in pairs(conf.abort.headers) do + if type(header_value) == "string" then + header_value = core.utils.resolve_var(header_value, ctx.var) + end + ngx.header[header_name] = header_value + end + end return conf.abort.http_status, core.utils.resolve_var(conf.abort.body, ctx.var) end end diff --git a/apisix/plugins/file-logger.lua b/apisix/plugins/file-logger.lua index 97140abfac91..d07c49270798 100644 --- a/apisix/plugins/file-logger.lua +++ b/apisix/plugins/file-logger.lua @@ -16,6 +16,7 @@ -- local log_util = require("apisix.utils.log-util") local core = require("apisix.core") +local expr = require("resty.expr.v1") local ngx = ngx local io_open = io.open local is_apisix_or, process = pcall(require, "resty.apisix.process") @@ -38,6 +39,13 @@ local schema = { items = { type = "array" } + }, + match = { + type = "array", + maxItems = 20, + items = { + type = "array", + }, } }, required = {"path"} @@ -65,6 +73,12 @@ function _M.check_schema(conf, schema_type) if schema_type == core.schema.TYPE_METADATA then return core.schema.check(metadata_schema, conf) end + if conf.match then + local ok, err = expr.new(conf.match) + if not ok then + return nil, "failed to validate the 'match' expression: " .. err + end + end return core.schema.check(schema, conf) end @@ -150,6 +164,9 @@ end function _M.log(conf, ctx) local entry = log_util.get_log_entry(plugin_name, conf, ctx) + if entry == nil then + return + end write_file_data(conf, entry) end diff --git a/apisix/plugins/forward-auth.lua b/apisix/plugins/forward-auth.lua index 3bc8a0e50265..39e690038890 100644 --- a/apisix/plugins/forward-auth.lua +++ b/apisix/plugins/forward-auth.lua @@ -23,6 +23,7 @@ local schema = { type = "object", properties = { uri = {type = "string"}, + allow_degradation = {type = "boolean", default = false}, ssl_verify = { type = "boolean", default = true, @@ -118,9 +119,9 @@ function _M.access(conf, ctx) httpc:set_timeout(conf.timeout) local res, err = httpc:request_uri(conf.uri, params) - - -- block by default when authorization service is unavailable - if not res then + if not res and conf.allow_degradation then + return + elseif not res then core.log.error("failed to process forward auth, err: ", err) return 403 end diff --git a/apisix/plugins/google-cloud-logging.lua b/apisix/plugins/google-cloud-logging.lua index 3fb34ab43efe..6e71cc81b300 100644 --- a/apisix/plugins/google-cloud-logging.lua +++ b/apisix/plugins/google-cloud-logging.lua @@ -35,6 +35,7 @@ local schema = { auth_config = { type = "object", properties = { + client_email = { type = "string" }, private_key = { type = "string" }, project_id = { type = "string" }, token_uri = { @@ -62,7 +63,7 @@ local schema = { default = "https://logging.googleapis.com/v2/entries:write" }, }, - required = { "private_key", "project_id", "token_uri" } + required = { "client_email", "private_key", "project_id", "token_uri" } }, ssl_verify = { type = "boolean", diff --git a/apisix/plugins/grpc-transcode/proto.lua b/apisix/plugins/grpc-transcode/proto.lua index 1c9b7718fdc1..347ec39eae17 100644 --- a/apisix/plugins/grpc-transcode/proto.lua +++ b/apisix/plugins/grpc-transcode/proto.lua @@ -99,7 +99,7 @@ end local function compile_proto(content) -- clear pb state - pb.state(nil) + local old_pb_state = pb.state(nil) local compiled, err = compile_proto_text(content) if not compiled then @@ -110,7 +110,7 @@ local function compile_proto(content) end -- fetch pb state - compiled.pb_state = pb.state(nil) + compiled.pb_state = pb.state(old_pb_state) return compiled end diff --git a/apisix/plugins/grpc-transcode/request.lua b/apisix/plugins/grpc-transcode/request.lua index 88d2fcfb9ec8..934a1c95657c 100644 --- a/apisix/plugins/grpc-transcode/request.lua +++ b/apisix/plugins/grpc-transcode/request.lua @@ -36,10 +36,12 @@ return function (proto, service, method, pb_option, deadline, default_values) req_read_body() + local pb_old_state = pb.state(proto.pb_state) util.set_options(proto, pb_option) local map_message = util.map_message(m.input_type, default_values or {}) local ok, encoded = pcall(pb.encode, m.input_type, map_message) + pb.state(pb_old_state) if not ok or not encoded then return false, "failed to encode request data to protobuf", 400 diff --git a/apisix/plugins/grpc-transcode/response.lua b/apisix/plugins/grpc-transcode/response.lua index dee267b77c1f..9dd6780f049d 100644 --- a/apisix/plugins/grpc-transcode/response.lua +++ b/apisix/plugins/grpc-transcode/response.lua @@ -25,7 +25,7 @@ local ipairs = ipairs local pcall = pcall -local function handle_error_response(status_detail_type) +local function handle_error_response(status_detail_type, proto) local err_msg local grpc_status = ngx.header["grpc-status-details-bin"] @@ -58,7 +58,9 @@ local function handle_error_response(status_detail_type) if status_detail_type and details then local decoded_details = {} for _, detail in ipairs(details) do + local pb_old_state = pb.state(proto.pb_state) local ok, err_or_value = pcall(pb.decode, status_detail_type, detail.value) + pb.state(pb_old_state) if not ok then err_msg = "failed to call pb.decode to decode details in " .. "grpc-status-details-bin" @@ -99,7 +101,7 @@ return function(ctx, proto, service, method, pb_option, show_status_in_body, sta -- handle error response after the last response chunk if ngx.status >= 300 and show_status_in_body then - return handle_error_response(status_detail_type) + return handle_error_response(status_detail_type, proto) end -- when body has already been read by other plugin @@ -118,10 +120,12 @@ return function(ctx, proto, service, method, pb_option, show_status_in_body, sta buffer = string.sub(buffer, 6) end + local pb_old_state = pb.state(proto.pb_state) util.set_options(proto, pb_option) local err_msg local decoded = pb.decode(m.output_type, buffer) + pb.state(pb_old_state) if not decoded then err_msg = "failed to decode response data by protobuf" ngx.arg[1] = err_msg diff --git a/apisix/plugins/grpc-transcode/util.lua b/apisix/plugins/grpc-transcode/util.lua index 4a27c1d3b9df..a95cb8202041 100644 --- a/apisix/plugins/grpc-transcode/util.lua +++ b/apisix/plugins/grpc-transcode/util.lua @@ -48,8 +48,6 @@ function _M.find_method(proto, service, method) return nil end - -- restore pb state - pb.state(proto.pb_state) return res end diff --git a/apisix/plugins/http-logger.lua b/apisix/plugins/http-logger.lua index 399d50da1eed..5dd52240f8df 100644 --- a/apisix/plugins/http-logger.lua +++ b/apisix/plugins/http-logger.lua @@ -122,7 +122,7 @@ local function send_http_data(conf, log_message) local httpc_res, httpc_err = httpc:request({ method = "POST", - path = url_decoded.path, + path = #url_decoded.path ~= 0 and url_decoded.path or "/", query = url_decoded.query, body = log_message, headers = { diff --git a/apisix/plugins/jwt-auth.lua b/apisix/plugins/jwt-auth.lua index f195830a696c..26211b98dece 100644 --- a/apisix/plugins/jwt-auth.lua +++ b/apisix/plugins/jwt-auth.lua @@ -220,7 +220,7 @@ local function fetch_jwt_token(conf, ctx) return val end -local function get_secret(conf, consumer_name) +local function get_secret(conf) local secret = conf.secret if conf.base64_secret then @@ -231,7 +231,7 @@ local function get_secret(conf, consumer_name) end -local function get_rsa_or_ecdsa_keypair(conf, consumer_name) +local function get_rsa_or_ecdsa_keypair(conf) local public_key = conf.public_key local private_key = conf.private_key @@ -261,7 +261,7 @@ end local function sign_jwt_with_HS(key, consumer, payload) - local auth_secret, err = get_secret(consumer.auth_conf, consumer.username) + local auth_secret, err = get_secret(consumer.auth_conf) if not auth_secret then core.log.error("failed to sign jwt, err: ", err) core.response.exit(503, "failed to sign jwt") @@ -286,7 +286,7 @@ end local function sign_jwt_with_RS256_ES256(key, consumer, payload) local public_key, private_key, err = get_rsa_or_ecdsa_keypair( - consumer.auth_conf, consumer.username + consumer.auth_conf ) if not public_key then core.log.error("failed to sign jwt, err: ", err) @@ -321,13 +321,13 @@ local function algorithm_handler(consumer, method_only) return sign_jwt_with_HS end - return get_secret(consumer.auth_conf, consumer.username) + return get_secret(consumer.auth_conf) elseif consumer.auth_conf.algorithm == "RS256" or consumer.auth_conf.algorithm == "ES256" then if method_only then return sign_jwt_with_RS256_ES256 end - local public_key, _, err = get_rsa_or_ecdsa_keypair(consumer.auth_conf, consumer.username) + local public_key, _, err = get_rsa_or_ecdsa_keypair(consumer.auth_conf) return public_key, err end end diff --git a/apisix/plugins/ldap-auth.lua b/apisix/plugins/ldap-auth.lua index 41156c1bfb6e..11f205c6b8f5 100644 --- a/apisix/plugins/ldap-auth.lua +++ b/apisix/plugins/ldap-auth.lua @@ -18,7 +18,7 @@ local core = require("apisix.core") local ngx = ngx local ngx_re = require("ngx.re") local consumer_mod = require("apisix.consumer") -local ok, ldap_cli = pcall(require, "resty.ldap.client") +local ldap = require("resty.ldap") local schema = { type = "object", @@ -100,11 +100,6 @@ local function extract_auth_header(authorization) end function _M.rewrite(conf, ctx) - if not ok then -- ensure rasn library loaded - core.log.error("failed to load lua-resty-ldap lib: ", ldap_cli) - return 501 - end - core.log.info("plugin rewrite phase, conf: ", core.json.delay_encode(conf)) -- 1. extract authorization from header @@ -115,31 +110,36 @@ function _M.rewrite(conf, ctx) end local user, err = extract_auth_header(auth_header) - if err then - core.log.warn(err) + if err or not user then + if err then + core.log.warn(err) + else + core.log.warn("nil user") + end return 401, { message = "Invalid authorization in request" } end -- 2. try authenticate the user against the ldap server local ldap_host, ldap_port = core.utils.parse_addr(conf.ldap_uri) - local ldap_client = ldap_cli:new(ldap_host, ldap_port, { + local ldapconf = { + timeout = 10000, start_tls = false, + ldap_host = ldap_host, + ldap_port = ldap_port or 389, ldaps = conf.use_tls, - ssl_verify = conf.tls_verify, - socket_timeout = 10000, - keepalive_pool_name = ldap_host .. ":" .. ldap_port .. "_ldapauth" - .. (conf.use_tls and "_tls" or ""), - keepalive_pool_size = 5, - keepalive_timeout = 60000, - }) - - local user_dn = conf.uid .. "=" .. user.username .. "," .. conf.base_dn - local res, err = ldap_client:simple_bind(user_dn, user.password) + tls_verify = conf.tls_verify, + base_dn = conf.base_dn, + attribute = conf.uid, + keepalive = 60000, + } + local res, err = ldap.ldap_authenticate(user.username, user.password, ldapconf) if not res then core.log.warn("ldap-auth failed: ", err) return 401, { message = "Invalid user authorization" } end + local user_dn = conf.uid .. "=" .. user.username .. "," .. conf.base_dn + -- 3. Retrieve consumer for authorization plugin local consumer_conf = consumer_mod.plugin(plugin_name) if not consumer_conf then diff --git a/apisix/plugins/limit-conn/init.lua b/apisix/plugins/limit-conn/init.lua index 3337980fd815..c6ce55f240d0 100644 --- a/apisix/plugins/limit-conn/init.lua +++ b/apisix/plugins/limit-conn/init.lua @@ -16,6 +16,7 @@ -- local limit_conn_new = require("resty.limit.conn").new local core = require("apisix.core") +local is_http = ngx.config.subsystem == "http" local sleep = core.sleep local shdict_name = "plugin-limit-conn" if ngx.config.subsystem == "stream" then @@ -115,11 +116,13 @@ function _M.decrease(conf, ctx) local use_delay = limit_conn[i + 3] local latency - if not use_delay then - if ctx.proxy_passed then - latency = ctx.var.upstream_response_time - else - latency = ctx.var.request_time - delay + if is_http then + if not use_delay then + if ctx.proxy_passed then + latency = ctx.var.upstream_response_time + else + latency = ctx.var.request_time - delay + end end end core.log.debug("request latency is ", latency) -- for test diff --git a/apisix/plugins/limit-count.lua b/apisix/plugins/limit-count.lua index 0eafd64235b3..72ab574df775 100644 --- a/apisix/plugins/limit-count.lua +++ b/apisix/plugins/limit-count.lua @@ -31,7 +31,7 @@ end function _M.access(conf, ctx) - return limit_count.rate_limit(conf, ctx) + return limit_count.rate_limit(conf, ctx, plugin_name, 1) end diff --git a/apisix/plugins/limit-count/init.lua b/apisix/plugins/limit-count/init.lua index 58fbf7969e2a..069ab6739ee5 100644 --- a/apisix/plugins/limit-count/init.lua +++ b/apisix/plugins/limit-count/init.lua @@ -20,7 +20,6 @@ local tab_insert = table.insert local ipairs = ipairs local pairs = pairs -local plugin_name = "limit-count" local limit_redis_cluster_new local limit_redis_new local limit_local_new @@ -50,6 +49,9 @@ local policy_to_additional_properties = { redis_port = { type = "integer", minimum = 1, default = 6379, }, + redis_username = { + type = "string", minLength = 1, + }, redis_password = { type = "string", minLength = 0, }, @@ -59,6 +61,12 @@ local policy_to_additional_properties = { redis_timeout = { type = "integer", minimum = 1, default = 1000, }, + redis_ssl = { + type = "boolean", default = false, + }, + redis_ssl_verify = { + type = "boolean", default = false, + }, }, required = {"redis_host"}, }, @@ -191,8 +199,8 @@ function _M.check_schema(conf) end -local function create_limit_obj(conf) - core.log.info("create new limit-count plugin instance") +local function create_limit_obj(conf, plugin_name) + core.log.info("create new " .. plugin_name .. " plugin instance") if not conf.policy or conf.policy == "local" then return limit_local_new("plugin-" .. plugin_name, conf.count, @@ -234,9 +242,9 @@ local function gen_limit_key(conf, ctx, key) end -local function gen_limit_obj(conf, ctx) +local function gen_limit_obj(conf, ctx, plugin_name) if conf.group then - return lrucache(conf.group, "", create_limit_obj, conf) + return lrucache(conf.group, "", create_limit_obj, conf, plugin_name) end local extra_key @@ -246,13 +254,13 @@ local function gen_limit_obj(conf, ctx) extra_key = conf.policy end - return core.lrucache.plugin_ctx(lrucache, ctx, extra_key, create_limit_obj, conf) + return core.lrucache.plugin_ctx(lrucache, ctx, extra_key, create_limit_obj, conf, plugin_name) end -function _M.rate_limit(conf, ctx) +function _M.rate_limit(conf, ctx, name, cost) core.log.info("ver: ", ctx.conf_version) - local lim, err = gen_limit_obj(conf, ctx) + local lim, err = gen_limit_obj(conf, ctx, name) if not lim then core.log.error("failed to fetch limit.count object: ", err) @@ -289,7 +297,13 @@ function _M.rate_limit(conf, ctx) key = gen_limit_key(conf, ctx, key) core.log.info("limit key: ", key) - local delay, remaining, reset = lim:incoming(key, true, conf) + local delay, remaining, reset + if not conf.policy or conf.policy == "local" then + delay, remaining, reset = lim:incoming(key, true, conf, cost) + else + delay, remaining, reset = lim:incoming(key, cost) + end + if not delay then local err = remaining if err == "rejected" then diff --git a/apisix/plugins/limit-count/limit-count-local.lua b/apisix/plugins/limit-count/limit-count-local.lua index f6909403141e..b6f319ae0106 100644 --- a/apisix/plugins/limit-count/limit-count-local.lua +++ b/apisix/plugins/limit-count/limit-count-local.lua @@ -14,7 +14,8 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local limit_local_new = require("resty.limit.count").new +local limit_count = require("resty.limit.count") + local ngx = ngx local ngx_time = ngx.time local assert = assert @@ -55,21 +56,18 @@ function _M.new(plugin_name, limit, window) assert(limit > 0 and window > 0) local self = { - limit_count = limit_local_new(plugin_name, limit, window), - dict = ngx.shared["plugin-limit-count-reset-header"] + limit_count = limit_count.new(plugin_name, limit, window), + dict = ngx.shared[plugin_name .. "-reset-header"] } return setmetatable(self, mt) end -function _M.incoming(self, key, commit, conf) - local delay, remaining = self.limit_count:incoming(key, commit) - local reset = 0 - if not delay then - return delay, remaining, reset - end +function _M.incoming(self, key, commit, conf, cost) + local delay, remaining = self.limit_count:incoming(key, commit, cost) + local reset - if remaining == conf.count - 1 then + if remaining == conf.count - cost then reset = set_endtime(self, key, conf.time_window) else reset = read_reset(self, key) diff --git a/apisix/plugins/limit-count/limit-count-redis-cluster.lua b/apisix/plugins/limit-count/limit-count-redis-cluster.lua index 25a3c1c57b21..7800f1c888f9 100644 --- a/apisix/plugins/limit-count/limit-count-redis-cluster.lua +++ b/apisix/plugins/limit-count/limit-count-redis-cluster.lua @@ -30,12 +30,13 @@ local mt = { local script = core.string.compress_script([=[ + assert(tonumber(ARGV[3]) >= 1, "cost must be at least 1") local ttl = redis.call('ttl', KEYS[1]) if ttl < 0 then - redis.call('set', KEYS[1], ARGV[1] - 1, 'EX', ARGV[2]) - return {ARGV[1] - 1, ARGV[2]} + redis.call('set', KEYS[1], ARGV[1] - ARGV[3], 'EX', ARGV[2]) + return {ARGV[1] - ARGV[3], ARGV[2]} end - return {redis.call('incrby', KEYS[1], -1), ttl} + return {redis.call('incrby', KEYS[1], 0 - ARGV[3]), ttl} ]=]) @@ -90,14 +91,14 @@ function _M.new(plugin_name, limit, window, conf) end -function _M.incoming(self, key) +function _M.incoming(self, key, cost) local red = self.red_cli local limit = self.limit local window = self.window key = self.plugin_name .. tostring(key) local ttl = 0 - local res, err = red:eval(script, 1, key, limit, window) + local res, err = red:eval(script, 1, key, limit, window, cost or 1) if err then return nil, err, ttl diff --git a/apisix/plugins/limit-count/limit-count-redis.lua b/apisix/plugins/limit-count/limit-count-redis.lua index bed920229b0c..85f769cbda56 100644 --- a/apisix/plugins/limit-count/limit-count-redis.lua +++ b/apisix/plugins/limit-count/limit-count-redis.lua @@ -30,12 +30,13 @@ local mt = { local script = core.string.compress_script([=[ + assert(tonumber(ARGV[3]) >= 1, "cost must be at least 1") local ttl = redis.call('ttl', KEYS[1]) if ttl < 0 then - redis.call('set', KEYS[1], ARGV[1] - 1, 'EX', ARGV[2]) - return {ARGV[1] - 1, ARGV[2]} + redis.call('set', KEYS[1], ARGV[1] - ARGV[3], 'EX', ARGV[2]) + return {ARGV[1] - ARGV[3], ARGV[2]} end - return {redis.call('incrby', KEYS[1], -1), ttl} + return {redis.call('incrby', KEYS[1], 0 - ARGV[3]), ttl} ]=]) local function redis_cli(conf) @@ -44,7 +45,12 @@ local function redis_cli(conf) red:set_timeouts(timeout, timeout, timeout) - local ok, err = red:connect(conf.redis_host, conf.redis_port or 6379) + local sock_opts = { + ssl = conf.redis_ssl, + ssl_verify = conf.redis_ssl_verify + } + + local ok, err = red:connect(conf.redis_host, conf.redis_port or 6379, sock_opts) if not ok then return false, err end @@ -53,7 +59,12 @@ local function redis_cli(conf) count, err = red:get_reused_times() if 0 == count then if conf.redis_password and conf.redis_password ~= '' then - local ok, err = red:auth(conf.redis_password) + local ok, err + if conf.redis_username then + ok, err = red:auth(conf.redis_username, conf.redis_password) + else + ok, err = red:auth(conf.redis_password) + end if not ok then return nil, err end @@ -85,7 +96,7 @@ function _M.new(plugin_name, limit, window, conf) return setmetatable(self, mt) end -function _M.incoming(self, key) +function _M.incoming(self, key, cost) local conf = self.conf local red, err = redis_cli(conf) if not red then @@ -98,7 +109,7 @@ function _M.incoming(self, key) key = self.plugin_name .. tostring(key) local ttl = 0 - res, err = red:eval(script, 1, key, limit, window) + res, err = red:eval(script, 1, key, limit, window, cost or 1) if err then return nil, err, ttl diff --git a/apisix/plugins/log-rotate.lua b/apisix/plugins/log-rotate.lua index 7380b0d1c64f..db3360e448b4 100644 --- a/apisix/plugins/log-rotate.lua +++ b/apisix/plugins/log-rotate.lua @@ -32,10 +32,10 @@ local os_date = os.date local os_remove = os.remove local os_rename = os.rename local str_sub = string.sub -local str_find = string.find local str_format = string.format -local str_reverse = string.reverse +local str_byte = string.byte local ngx_sleep = require("apisix.core.utils").sleep +local string_rfind = require("pl.stringx").rfind local local_conf @@ -49,6 +49,7 @@ local default_logs local enable_compression = false local DEFAULT_ACCESS_LOG_FILENAME = "access.log" local DEFAULT_ERROR_LOG_FILENAME = "error.log" +local SLASH_BYTE = str_byte("/") local schema = { type = "object", @@ -74,18 +75,6 @@ local function file_exists(path) end -local function get_last_index(str, key) - local rev = str_reverse(str) - local _, idx = str_find(rev, key) - local n - if idx then - n = #rev - idx + 1 - end - - return n -end - - local function get_log_path_info(file_type) local_conf = core.config.local_conf() local conf_path @@ -101,12 +90,11 @@ local function get_log_path_info(file_type) local prefix = ngx.config.prefix() if conf_path then - local root = str_sub(conf_path, 1, 1) -- relative path - if root ~= "/" then + if str_byte(conf_path) ~= SLASH_BYTE then conf_path = prefix .. conf_path end - local n = get_last_index(conf_path, "/") + local n = string_rfind(conf_path, "/") if n ~= nil and n ~= #conf_path then local dir = str_sub(conf_path, 1, n) local name = str_sub(conf_path, n + 1) @@ -126,14 +114,14 @@ end local function scan_log_folder(log_file_name) local t = {} - local log_dir, _ = get_log_path_info(log_file_name) + local log_dir, log_name = get_log_path_info(log_file_name) - local compression_log_type = log_file_name .. COMPRESSION_FILE_SUFFIX + local compression_log_type = log_name .. COMPRESSION_FILE_SUFFIX for file in lfs.dir(log_dir) do - local n = get_last_index(file, "__") + local n = string_rfind(file, "__") if n ~= nil then local log_type = file:sub(n + 2) - if log_type == log_file_name or log_type == compression_log_type then + if log_type == log_name or log_type == compression_log_type then core.table.insert(t, file) end end @@ -174,7 +162,7 @@ local function compression_file(new_file) return end - local n = get_last_index(new_file, "/") + local n = string_rfind(new_file, "/") local new_filepath = str_sub(new_file, 1, n) local new_filename = str_sub(new_file, n + 1) local com_filename = new_filename .. COMPRESSION_FILE_SUFFIX diff --git a/apisix/plugins/loki-logger.lua b/apisix/plugins/loki-logger.lua new file mode 100644 index 000000000000..357ab1978202 --- /dev/null +++ b/apisix/plugins/loki-logger.lua @@ -0,0 +1,234 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local log_util = require("apisix.utils.log-util") +local core = require("apisix.core") +local http = require("resty.http") +local new_tab = require("table.new") + +local pairs = pairs +local ipairs = ipairs +local tostring = tostring +local math_random = math.random +local table_insert = table.insert +local ngx = ngx +local str_format = core.string.format + +local plugin_name = "loki-logger" +local batch_processor_manager = bp_manager_mod.new("loki logger") + +local schema = { + type = "object", + properties = { + -- core configurations + endpoint_addrs = { + type = "array", + minItems = 1, + items = core.schema.uri_def, + }, + endpoint_uri = { + type = "string", + minLength = 1, + default = "/loki/api/v1/push" + }, + tenant_id = {type = "string", default = "fake"}, + log_labels = { + type = "object", + patternProperties = { + [".*"] = { + type = "string", + minLength = 1, + }, + }, + default = { + job = "apisix", + }, + }, + + -- connection layer configurations + ssl_verify = {type = "boolean", default = false}, + timeout = { + type = "integer", + minimum = 1, + maximum = 60000, + default = 3000, + description = "timeout in milliseconds", + }, + keepalive = {type = "boolean", default = true}, + keepalive_timeout = { + type = "integer", + minimum = 1000, + default = 60000, + description = "keepalive timeout in milliseconds", + }, + keepalive_pool = {type = "integer", minimum = 1, default = 5}, + + -- logger related configurations + log_format = {type = "object"}, + include_req_body = {type = "boolean", default = false}, + include_req_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + include_resp_body = {type = "boolean", default = false}, + include_resp_body_expr = { + type = "array", + minItems = 1, + items = { + type = "array" + } + }, + }, + required = {"endpoint_addrs"} +} + + +local metadata_schema = { + type = "object", + properties = { + log_format = log_util.metadata_schema_log_format, + }, +} + + +local _M = { + version = 0.1, + priority = 414, + name = plugin_name, + schema = batch_processor_manager:wrap_schema(schema), + metadata_schema = metadata_schema, +} + + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + + local ok, err = core.schema.check(schema, conf) + if not ok then + return nil, err + end + return log_util.check_log_schema(conf) +end + + +local function send_http_data(conf, log) + local params = { + headers = { + ["Content-Type"] = "application/json", + ["X-Scope-OrgID"] = conf.tenant_id, + }, + keepalive = conf.keepalive, + ssl_verify = conf.ssl_verify, + method = "POST", + body = core.json.encode(log) + } + + if conf.keepalive then + params.keepalive_timeout = conf.keepalive_timeout + params.keepalive_pool = conf.keepalive_pool + end + + local httpc, err = http.new() + if not httpc then + return false, str_format("create http client error: %s", err) + end + httpc:set_timeout(conf.timeout) + + -- select an random endpoint and build URL + local endpoint_url = conf.endpoint_addrs[math_random(#conf.endpoint_addrs)] .. conf.endpoint_uri + local res, err = httpc:request_uri(endpoint_url, params) + if not res then + return false, err + end + + if res.status >= 300 then + return false, str_format("loki server returned status: %d, body: %s", + res.status, res.body or "") + end + + return true +end + + +function _M.body_filter(conf, ctx) + log_util.collect_body(conf, ctx) +end + + +function _M.log(conf, ctx) + local entry = log_util.get_log_entry(plugin_name, conf, ctx) + + if not entry.route_id then + entry.route_id = "no-matched" + end + + -- insert start time as log time, multiply to nanoseconds + -- use string concat to circumvent 64bit integers that LuaVM cannot handle + -- that is, first process the decimal part of the millisecond value + -- and then add 6 zeros by string concatenation + entry.loki_log_time = tostring(ngx.req.start_time() * 1000) .. "000000" + + if batch_processor_manager:add_entry(conf, entry) then + return + end + + local labels = conf.log_labels + + -- parsing possible variables in label value + for key, value in pairs(labels) do + local new_val, err, n_resolved = core.utils.resolve_var(value, ctx.var) + if not err and n_resolved > 0 then + labels[key] = new_val + end + end + + -- generate a function to be executed by the batch processor + local func = function(entries) + -- build loki request data + local data = { + streams = { + { + stream = labels, + values = new_tab(1, 0), + } + } + } + + -- add all entries to the batch + for _, entry in ipairs(entries) do + local log_time = entry.loki_log_time + entry.loki_log_time = nil -- clean logger internal field + + table_insert(data.streams[1].values, { + log_time, core.json.encode(entry) + }) + end + + return send_http_data(conf, data) + end + + batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) +end + + +return _M diff --git a/apisix/plugins/mocking.lua b/apisix/plugins/mocking.lua index 134647f71d9f..af5bc75edb2a 100644 --- a/apisix/plugins/mocking.lua +++ b/apisix/plugins/mocking.lua @@ -49,7 +49,19 @@ local schema = { -- specify response json schema, if response_example is not nil, this conf will be ignore. -- generate random response by json schema. response_schema = { type = "object" }, - with_mock_header = { type = "boolean", default = true } + with_mock_header = { type = "boolean", default = true }, + response_headers = { + type = "object", + minProperties = 1, + patternProperties = { + ["^[^:]+$"] = { + oneOf = { + { type = "string" }, + { type = "number" } + } + } + }, + } }, anyOf = { { required = { "response_example" } }, @@ -215,6 +227,12 @@ function _M.access(conf, ctx) ngx.header["x-mock-by"] = "APISIX/" .. core.version.VERSION end + if conf.response_headers then + for key, value in pairs(conf.response_headers) do + core.response.add_header(key, value) + end + end + if conf.delay > 0 then ngx.sleep(conf.delay) end diff --git a/apisix/plugins/opa.lua b/apisix/plugins/opa.lua index 1b7db1b62d73..24bdb5be1a46 100644 --- a/apisix/plugins/opa.lua +++ b/apisix/plugins/opa.lua @@ -19,6 +19,7 @@ local core = require("apisix.core") local http = require("resty.http") local helper = require("apisix.plugins.opa.helper") local type = type +local ipairs = ipairs local schema = { type = "object", @@ -37,6 +38,14 @@ local schema = { description = "timeout in milliseconds", }, keepalive = {type = "boolean", default = true}, + send_headers_upstream = { + type = "array", + minItems = 1, + items = { + type = "string" + }, + description = "list of headers to pass to upstream in request" + }, keepalive_timeout = {type = "integer", minimum = 1000, default = 60000}, keepalive_pool = {type = "integer", minimum = 1, default = 5}, with_route = {type = "boolean", default = false}, @@ -125,6 +134,14 @@ function _M.access(conf, ctx) end return status_code, reason + else if result.headers and conf.send_headers_upstream then + for _, name in ipairs(conf.send_headers_upstream) do + local value = result.headers[name] + if value then + core.request.set_header(ctx, name, value) + end + end + end end end diff --git a/apisix/plugins/openid-connect.lua b/apisix/plugins/openid-connect.lua index 5058eba47d28..927e4ddbd8aa 100644 --- a/apisix/plugins/openid-connect.lua +++ b/apisix/plugins/openid-connect.lua @@ -130,6 +130,32 @@ local schema = { "header to the request for downstream.", type = "boolean", default = false + }, + proxy_opts = { + description = "HTTP proxy server be used to access identity server.", + type = "object", + properties = { + http_proxy = { + type = "string", + description = "HTTP proxy like: http://proxy-server:80.", + }, + https_proxy = { + type = "string", + description = "HTTPS proxy like: http://proxy-server:80.", + }, + http_proxy_authorization = { + type = "string", + description = "Basic [base64 username:password].", + }, + https_proxy_authorization = { + type = "string", + description = "Basic [base64 username:password].", + }, + no_proxy = { + type = "string", + description = "Comma separated list of hosts that should not be proxied.", + } + }, } }, encrypt_fields = {"client_secret"}, diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua index 45ff94c3f631..623a9eddf113 100644 --- a/apisix/plugins/prometheus/exporter.lua +++ b/apisix/plugins/prometheus/exporter.lua @@ -17,6 +17,7 @@ local base_prometheus = require("prometheus") local core = require("apisix.core") local plugin = require("apisix.plugin") +local control = require("apisix.control.v1") local ipairs = ipairs local pairs = pairs local ngx = ngx @@ -33,6 +34,8 @@ local get_ssls = router.ssls local get_services = require("apisix.http.service").services local get_consumers = require("apisix.consumer").consumers local get_upstreams = require("apisix.upstream").upstreams +local get_global_rules = require("apisix.global_rules").global_rules +local get_global_rules_prev_index = require("apisix.global_rules").get_pre_index local clear_tab = core.table.clear local get_stream_routes = router.stream_routes local get_protos = require("apisix.plugins.grpc-transcode.proto").protos @@ -158,6 +161,10 @@ function _M.http_init(prometheus_enabled_in_stream) "The free space of each nginx shared DICT since APISIX start", {"name"}) + metrics.upstream_status = prometheus:gauge("upstream_status", + "Upstream status from health check", + {"name", "ip", "port"}) + -- per service -- The consumer label indicates the name of consumer corresponds to the @@ -168,10 +175,15 @@ function _M.http_init(prometheus_enabled_in_stream) {"code", "route", "matched_uri", "matched_host", "service", "consumer", "node", unpack(extra_labels("http_status"))}) + local buckets = DEFAULT_BUCKETS + if attr and attr.default_buckets then + buckets = attr.default_buckets + end + metrics.latency = prometheus:histogram("http_latency", "HTTP request latency in milliseconds per service in APISIX", {"type", "route", "service", "consumer", "node", unpack(extra_labels("http_latency"))}, - DEFAULT_BUCKETS) + buckets) metrics.bandwidth = prometheus:counter("bandwidth", "Total bandwidth in bytes consumed per service in APISIX", @@ -367,14 +379,15 @@ local function etcd_modify_index() global_max_idx = set_modify_index("consumers", consumers, consumers_ver, global_max_idx) -- global_rules - local global_rules = router.global_rules + local global_rules, global_rules_ver = get_global_rules() if global_rules then - global_max_idx = set_modify_index("global_rules", global_rules.values, - global_rules.conf_version, global_max_idx) + global_max_idx = set_modify_index("global_rules", global_rules, + global_rules_ver, global_max_idx) -- prev_index key_values[1] = "prev_index" - metrics.etcd_modify_indexes:set(global_rules.prev_index, key_values) + local prev_index = get_global_rules_prev_index() + metrics.etcd_modify_indexes:set(prev_index, key_values) else global_max_idx = set_modify_index("global_rules", nil, nil, global_max_idx) @@ -458,6 +471,15 @@ local function collect(ctx, stream_only) metrics.node_info:set(1, gen_arr(hostname)) + -- update upstream_status metrics + local stats = control.get_health_checkers() + for _, stat in ipairs(stats) do + for _, node in ipairs(stat.nodes) do + metrics.upstream_status:set((node.status == "healthy") and 1 or 0, + gen_arr(stat.name, node.ip, node.port)) + end + end + core.response.set_header("content_type", "text/plain") return 200, core.table.concat(prometheus:metric_data()) end diff --git a/apisix/plugins/proxy-cache/init.lua b/apisix/plugins/proxy-cache/init.lua index 59569980cc5c..918f755994ea 100644 --- a/apisix/plugins/proxy-cache/init.lua +++ b/apisix/plugins/proxy-cache/init.lua @@ -25,6 +25,7 @@ local plugin_name = "proxy-cache" local STRATEGY_DISK = "disk" local STRATEGY_MEMORY = "memory" +local DEFAULT_CACHE_ZONE = "disk_cache_one" local schema = { type = "object", @@ -33,7 +34,7 @@ local schema = { type = "string", minLength = 1, maxLength = 100, - default = "disk_cache_one", + default = DEFAULT_CACHE_ZONE, }, cache_strategy = { type = "string", @@ -108,7 +109,7 @@ local schema = { local _M = { version = 0.2, - priority = 1009, + priority = 1085, name = plugin_name, schema = schema, } @@ -129,14 +130,23 @@ function _M.check_schema(conf) local found = false local local_conf = core.config.local_conf() if local_conf.apisix.proxy_cache then + local err = "cache_zone " .. conf.cache_zone .. " not found" for _, cache in ipairs(local_conf.apisix.proxy_cache.zones) do + -- cache_zone passed in plugin config matched one of the proxy_cache zones if cache.name == conf.cache_zone then - found = true + -- check for the mismatch between cache_strategy and corresponding cache zone + if (conf.cache_strategy == STRATEGY_MEMORY and cache.disk_path) or + (conf.cache_strategy == STRATEGY_DISK and not cache.disk_path) then + err = "invalid or empty cache_zone for cache_strategy: "..conf.cache_strategy + else + found = true + end + break end end if found == false then - return false, "cache_zone " .. conf.cache_zone .. " not found" + return false, err end end diff --git a/apisix/plugins/proxy-cache/memory.lua b/apisix/plugins/proxy-cache/memory.lua index 0112db63b568..9d5c665a8d92 100644 --- a/apisix/plugins/proxy-cache/memory.lua +++ b/apisix/plugins/proxy-cache/memory.lua @@ -32,6 +32,10 @@ end function _M:set(key, obj, ttl) + if self.dict == nil then + return nil, "invalid cache_zone provided" + end + local obj_json = core.json.encode(obj) if not obj_json then return nil, "could not encode object" @@ -43,6 +47,10 @@ end function _M:get(key) + if self.dict == nil then + return nil, "invalid cache_zone provided" + end + -- If the key does not exist or has expired, then res_json will be nil. local res_json, err = self.dict:get(key) if not res_json then @@ -63,6 +71,9 @@ end function _M:purge(key) + if self.dict == nil then + return nil, "invalid cache_zone provided" + end self.dict:delete(key) end diff --git a/apisix/plugins/proxy-mirror.lua b/apisix/plugins/proxy-mirror.lua index 312d3ec37b68..d6cede6e9b59 100644 --- a/apisix/plugins/proxy-mirror.lua +++ b/apisix/plugins/proxy-mirror.lua @@ -27,7 +27,7 @@ local schema = { properties = { host = { type = "string", - pattern = [=[^http(s)?:\/\/([\da-zA-Z.-]+|\[[\da-fA-F:]+\])(:\d+)?$]=], + pattern = [=[^(http(s)?|grpc(s)?):\/\/([\da-zA-Z.-]+|\[[\da-fA-F:]+\])(:\d+)?$]=], }, path = { type = "string", @@ -76,15 +76,15 @@ local function resolver_host(prop_host) if not ip then core.log.error("dns resolver resolves domain: ", decoded_host," error: ", err, " will continue to use the host: ", decoded_host) - return prop_host + return url_decoded.scheme, prop_host end local host = url_decoded.scheme .. '://' .. ip .. (url_decoded.port and ':' .. url_decoded.port or '') core.log.info(prop_host, " is resolved to: ", host) - return host + return url_decoded.scheme, host end - return prop_host + return url_decoded.scheme, prop_host end @@ -101,7 +101,9 @@ local function enable_mirror(ctx, conf) end end - ctx.var.upstream_mirror_uri = resolver_host(conf.host) .. uri + local _, mirror_host = resolver_host(conf.host) + ctx.var.upstream_mirror_host = mirror_host + ctx.var.upstream_mirror_uri = mirror_host .. uri if has_mod then apisix_ngx_client.enable_mirror() @@ -114,12 +116,14 @@ function _M.rewrite(conf, ctx) if conf.sample_ratio == 1 then enable_mirror(ctx, conf) + ctx.enable_mirror = true else local val = math_random() core.log.info("mirror request sample_ratio conf: ", conf.sample_ratio, ", random value: ", val) if val < conf.sample_ratio then enable_mirror(ctx, conf) + ctx.enable_mirror = true end end diff --git a/apisix/plugins/proxy-rewrite.lua b/apisix/plugins/proxy-rewrite.lua index 8b1df5aaa876..087686edfbf8 100644 --- a/apisix/plugins/proxy-rewrite.lua +++ b/apisix/plugins/proxy-rewrite.lua @@ -22,6 +22,7 @@ local ngx = ngx local type = type local re_sub = ngx.re.sub local re_match = ngx.re.match +local req_set_uri = ngx.req.set_uri local sub_str = string.sub local str_find = core.string.find @@ -65,7 +66,6 @@ local schema = { description = "new uri that substitute from client uri " .. "for upstream, lower priority than uri property", type = "array", - maxItems = 2, minItems = 2, items = { description = "regex uri", @@ -191,11 +191,16 @@ function _M.check_schema(conf) end if conf.regex_uri and #conf.regex_uri > 0 then - local _, _, err = re_sub("/fake_uri", conf.regex_uri[1], - conf.regex_uri[2], "jo") - if err then - return false, "invalid regex_uri(" .. conf.regex_uri[1] .. - ", " .. conf.regex_uri[2] .. "): " .. err + if (#conf.regex_uri % 2 ~= 0) then + return false, "The length of regex_uri should be an even number" + end + for i = 1, #conf.regex_uri, 2 do + local _, _, err = re_sub("/fake_uri", conf.regex_uri[i], + conf.regex_uri[i + 1], "jo") + if err then + return false, "invalid regex_uri(" .. conf.regex_uri[i] .. + ", " .. conf.regex_uri[i + 1] .. "): " .. err + end end end @@ -274,32 +279,47 @@ function _M.rewrite(conf, ctx) local separator_escaped = false if conf.use_real_request_uri_unsafe then upstream_uri = ctx.var.real_request_uri - elseif conf.uri ~= nil then + end + + if conf.uri ~= nil then separator_escaped = true upstream_uri = core.utils.resolve_var(conf.uri, ctx.var, escape_separator) + elseif conf.regex_uri ~= nil then if not str_find(upstream_uri, "?") then separator_escaped = true end - local uri, _, err = re_sub(upstream_uri, conf.regex_uri[1], - conf.regex_uri[2], "jo") - if not uri then - local msg = "failed to substitute the uri " .. ctx.var.uri .. - " (" .. conf.regex_uri[1] .. ") with " .. - conf.regex_uri[2] .. " : " .. err - core.log.error(msg) - return 500, {message = msg} - end + local error_msg + for i = 1, #conf.regex_uri, 2 do + local captures, err = re_match(upstream_uri, conf.regex_uri[i], "jo") + if err then + error_msg = "failed to match the uri " .. ctx.var.uri .. + " (" .. conf.regex_uri[i] .. ") " .. " : " .. err + break + end + + if captures then + ctx.proxy_rewrite_regex_uri_captures = captures + + local uri, _, err = re_sub(upstream_uri, + conf.regex_uri[i], conf.regex_uri[i + 1], "jo") + if uri then + upstream_uri = uri + else + error_msg = "failed to substitute the uri " .. ngx.var.uri .. + " (" .. conf.regex_uri[i] .. ") with " .. + conf.regex_uri[i + 1] .. " : " .. err + end - local m, err = re_match(upstream_uri, conf.regex_uri[1], "jo") - if not m and err then - core.log.error("match error in proxy-rewrite plugin, please check: ", err) - return 500 + break + end end - ctx.proxy_rewrite_regex_uri_captures = m - upstream_uri = uri + if error_msg ~= nil then + core.log.error(error_msg) + return 500, { error_msg = error_msg } + end end if not conf.use_real_request_uri_unsafe then @@ -317,6 +337,8 @@ function _M.rewrite(conf, ctx) upstream_uri = core.utils.uri_safe_encode(upstream_uri) end + req_set_uri(upstream_uri) + if ctx.var.is_args == "?" then if index then ctx.var.upstream_uri = upstream_uri .. "&" .. (ctx.var.args or "") @@ -326,6 +348,8 @@ function _M.rewrite(conf, ctx) else ctx.var.upstream_uri = upstream_uri end + else + ctx.var.upstream_uri = upstream_uri end if conf.headers then diff --git a/apisix/plugins/request-id.lua b/apisix/plugins/request-id.lua index 3a7e6bc39ee8..dac3162db5ed 100644 --- a/apisix/plugins/request-id.lua +++ b/apisix/plugins/request-id.lua @@ -16,28 +16,15 @@ -- local ngx = ngx -local bit = require("bit") local core = require("apisix.core") -local snowflake = require("snowflake") local uuid = require("resty.jit-uuid") local nanoid = require("nanoid") -local process = require("ngx.process") -local timers = require("apisix.timers") -local tostring = tostring -local math_pow = math.pow -local math_ceil = math.ceil -local math_floor = math.floor local math_random = math.random local str_byte = string.byte local ffi = require "ffi" local plugin_name = "request-id" -local data_machine = nil -local snowflake_inited = nil - -local attr = nil - local schema = { type = "object", properties = { @@ -45,7 +32,7 @@ local schema = { include_in_response = {type = "boolean", default = true}, algorithm = { type = "string", - enum = {"uuid", "snowflake", "nanoid", "range_id"}, + enum = {"uuid", "nanoid", "range_id"}, default = "uuid" }, range_id = { @@ -62,25 +49,8 @@ local schema = { minLength = 6, default = "abcdefghijklmnopqrstuvwxyzABCDEFGHIGKLMNOPQRSTUVWXYZ0123456789" } - } - } - } -} - -local attr_schema = { - type = "object", - properties = { - snowflake = { - type = "object", - properties = { - enable = {type = "boolean", default = false}, - snowflake_epoc = {type = "integer", minimum = 1, default = 1609459200000}, - data_machine_bits = {type = "integer", minimum = 1, maximum = 31, default = 12}, - sequence_bits = {type = "integer", minimum = 1, default = 10}, - delta_offset = {type = "integer", default = 1, enum = {1, 10, 100, 1000}}, - data_machine_ttl = {type = "integer", minimum = 1, default = 30}, - data_machine_interval = {type = "integer", minimum = 1, default = 10} - } + }, + default = {} } } } @@ -92,139 +62,10 @@ local _M = { schema = schema } - function _M.check_schema(conf) return core.schema.check(schema, conf) end - --- Generates the current process data machine -local function gen_data_machine(max_number) - if data_machine == nil then - local etcd_cli, prefix = core.etcd.new() - local prefix = prefix .. "/plugins/request-id/snowflake/" - local uuid = uuid.generate_v4() - local id = 1 - ::continue:: - while (id <= max_number) do - local res, err = etcd_cli:grant(attr.snowflake.data_machine_ttl) - if err then - id = id + 1 - core.log.error("Etcd grant failure, err: ".. err) - goto continue - end - - local _, err1 = etcd_cli:setnx(prefix .. tostring(id), uuid) - local res2, err2 = etcd_cli:get(prefix .. tostring(id)) - - if err1 or err2 or res2.body.kvs[1].value ~= uuid then - core.log.notice("data_machine " .. id .. " is not available") - id = id + 1 - else - data_machine = id - - local _, err3 = - etcd_cli:set( - prefix .. tostring(id), - uuid, - { - prev_kv = true, - lease = res.body.ID - } - ) - - if err3 then - id = id + 1 - etcd_cli:delete(prefix .. tostring(id)) - core.log.error("set data_machine " .. id .. " lease error: " .. err3) - goto continue - end - - local lease_id = res.body.ID - local start_at = ngx.time() - local handler = function() - local now = ngx.time() - if now - start_at < attr.snowflake.data_machine_interval then - return - end - - local _, err4 = etcd_cli:keepalive(lease_id) - if err4 then - snowflake_inited = nil - data_machine = nil - timers.unregister_timer("plugin#request-id") - core.log.error("snowflake data_machine: " .. id .." lease failed.") - end - start_at = now - core.log.info("snowflake data_machine: " .. id .." lease success.") - end - - timers.register_timer("plugin#request-id", handler) - core.log.info( - "timer created to lease snowflake algorithm data_machine, interval: ", - attr.snowflake.data_machine_interval) - core.log.notice("lease snowflake data_machine: " .. id) - break - end - end - - if data_machine == nil then - core.log.error("No data_machine is not available") - return nil - end - end - return data_machine -end - - --- Split 'Data Machine' into 'Worker ID' and 'datacenter ID' -local function split_data_machine(data_machine, node_id_bits, datacenter_id_bits) - local num = bit.tobit(data_machine) - local worker_id = bit.band(num, math_pow(2, node_id_bits) - 1) - num = bit.rshift(num, node_id_bits) - local datacenter_id = bit.band(num, math_pow(2, datacenter_id_bits) - 1) - return worker_id, datacenter_id -end - - --- Initialize the snowflake algorithm -local function snowflake_init() - if snowflake_inited == nil then - local max_number = math_pow(2, (attr.snowflake.data_machine_bits)) - local datacenter_id_bits = math_floor(attr.snowflake.data_machine_bits / 2) - local node_id_bits = math_ceil(attr.snowflake.data_machine_bits / 2) - data_machine = gen_data_machine(max_number) - if data_machine == nil then - return "" - end - - local worker_id, datacenter_id = split_data_machine(data_machine, - node_id_bits, datacenter_id_bits) - - core.log.info("snowflake init datacenter_id: " .. - datacenter_id .. " worker_id: " .. worker_id) - snowflake.init( - datacenter_id, - worker_id, - attr.snowflake.snowflake_epoc, - node_id_bits, - datacenter_id_bits, - attr.snowflake.sequence_bits, - attr.delta_offset - ) - snowflake_inited = true - end -end - - --- generate snowflake id -local function next_id() - if snowflake_inited == nil then - snowflake_init() - end - return snowflake:next_id() -end - -- generate range_id local function get_range_id(range_id) local res = ffi.new("unsigned char[?]", range_id.length) @@ -246,7 +87,7 @@ local function get_request_id(conf) return get_range_id(conf.range_id) end - return next_id() + return uuid() end @@ -276,25 +117,4 @@ function _M.header_filter(conf, ctx) end end -function _M.init() - local local_conf = core.config.local_conf() - attr = core.table.try_read_attr(local_conf, "plugin_attr", plugin_name) - local ok, err = core.schema.check(attr_schema, attr) - if not ok then - core.log.error("failed to check the plugin_attr[", plugin_name, "]", ": ", err) - return - end - if attr.snowflake.enable then - if process.type() == "worker" then - ngx.timer.at(0, snowflake_init) - end - end -end - -function _M.destroy() - if snowflake_inited then - timers.unregister_timer("plugin#request-id") - end -end - return _M diff --git a/apisix/plugins/response-rewrite.lua b/apisix/plugins/response-rewrite.lua index 4c3487da0686..20458c4fcf82 100644 --- a/apisix/plugins/response-rewrite.lua +++ b/apisix/plugins/response-rewrite.lua @@ -58,7 +58,7 @@ local schema = { items = { type = "string", -- "Set-Cookie: =; Max-Age=" - pattern = "^[^:]+:[^:]+[^/]$" + pattern = "^[^:]+:[^:]*[^/]$" } }, set = { diff --git a/apisix/plugins/skywalking-logger.lua b/apisix/plugins/skywalking-logger.lua index 1c1ba590a0c4..0d90b97893cb 100644 --- a/apisix/plugins/skywalking-logger.lua +++ b/apisix/plugins/skywalking-logger.lua @@ -132,6 +132,11 @@ function _M.log(conf, ctx) end end + local service_instance_name = conf.service_instance_name + if service_instance_name == "$hostname" then + service_instance_name = core.utils.gethostname() + end + local entry = { traceContext = trace_context, body = { @@ -140,7 +145,7 @@ function _M.log(conf, ctx) } }, service = conf.service_name, - serviceInstance = conf.service_instance_name, + serviceInstance = service_instance_name, endpoint = ctx.var.uri, } diff --git a/apisix/plugins/sls-logger.lua b/apisix/plugins/sls-logger.lua index 6c2415ac6d29..de2fbae67ffa 100644 --- a/apisix/plugins/sls-logger.lua +++ b/apisix/plugins/sls-logger.lua @@ -21,7 +21,7 @@ local bp_manager_mod = require("apisix.utils.batch-processor-manager") local plugin_name = "sls-logger" local ngx = ngx -local rf5424 = require("apisix.plugins.slslog.rfc5424") +local rf5424 = require("apisix.utils.rfc5424") local tcp = ngx.socket.tcp local tostring = tostring local ipairs = ipairs @@ -138,9 +138,14 @@ function _M.log(conf, ctx) return end - local rf5424_data = rf5424.encode("SYSLOG", "INFO", ctx.var.host,"apisix", - ctx.var.pid, conf.project, conf.logstore, - conf.access_key_id, conf.access_key_secret, json_str) + local structured_data = { + {name = "project", value = conf.project}, + {name = "logstore", value = conf.logstore}, + {name = "access-key-id", value = conf.access_key_id}, + {name = "access-key-secret", value = conf.access_key_secret}, + } + local rf5424_data = rf5424.encode("SYSLOG", "INFO", ctx.var.host, "apisix", + ctx.var.pid, json_str, structured_data) local process_context = { data = rf5424_data, diff --git a/apisix/plugins/splunk-hec-logging.lua b/apisix/plugins/splunk-hec-logging.lua index 8de8be6eca9b..d7c97107e620 100644 --- a/apisix/plugins/splunk-hec-logging.lua +++ b/apisix/plugins/splunk-hec-logging.lua @@ -21,6 +21,9 @@ local ngx_now = ngx.now local http = require("resty.http") local log_util = require("apisix.utils.log-util") local bp_manager_mod = require("apisix.utils.batch-processor-manager") +local table_insert = core.table.insert +local table_concat = core.table.concat +local ipairs = ipairs local DEFAULT_SPLUNK_HEC_ENTRY_SOURCE = "apache-apisix-splunk-hec-logging" @@ -127,10 +130,15 @@ local function send_to_splunk(conf, entries) local http_new = http.new() http_new:set_timeout(conf.endpoint.timeout * 1000) + local t = {} + for _, e in ipairs(entries) do + table_insert(t, core.json.encode(e)) + end + local res, err = http_new:request_uri(conf.endpoint.uri, { ssl_verify = conf.ssl_verify, method = "POST", - body = core.json.encode(entries), + body = table_concat(t), headers = request_headers, }) diff --git a/apisix/plugins/syslog/init.lua b/apisix/plugins/syslog/init.lua index 24f2f62ab812..0ab34f8054e9 100644 --- a/apisix/plugins/syslog/init.lua +++ b/apisix/plugins/syslog/init.lua @@ -18,6 +18,10 @@ local core = require("apisix.core") local bp_manager_mod = require("apisix.utils.batch-processor-manager") local logger_socket = require("resty.logger.socket") +local rfc5424 = require("apisix.utils.rfc5424") +local ipairs = ipairs +local table_insert = core.table.insert +local table_concat = core.table.concat local batch_processor_manager = bp_manager_mod.new("sys logger") @@ -63,7 +67,8 @@ local function send_syslog_data(conf, log_message, api_ctx) end -- reuse the logger object - local ok, err = logger:log(core.json.encode(log_message)) + local ok, err = logger:log(log_message) + if not ok then res = false err_msg = "failed to log message" .. err @@ -75,28 +80,32 @@ end -- called in log phase of APISIX function _M.push_entry(conf, ctx, entry) - if batch_processor_manager:add_entry(conf, entry) then + local json_str, err = core.json.encode(entry) + if not json_str then + core.log.error('error occurred while encoding the data: ', err) + return + end + + local rfc5424_data = rfc5424.encode("SYSLOG", "INFO", ctx.var.host, + "apisix", ctx.var.pid, json_str) + + if batch_processor_manager:add_entry(conf, rfc5424_data) then return end -- Generate a function to be executed by the batch processor local cp_ctx = core.table.clone(ctx) - local func = function(entries, batch_max_size) - local data, err - if batch_max_size == 1 then - data, err = core.json.encode(entries[1]) -- encode as single {} - else - data, err = core.json.encode(entries) -- encode as array [{}] - end - - if not data then - return false, 'error occurred while encoding the data: ' .. err + local func = function(entries) + local items = {} + for _, e in ipairs(entries) do + table_insert(items, e) + core.log.debug("buffered logs:", e) end - return send_syslog_data(conf, data, cp_ctx) + return send_syslog_data(conf, table_concat(items), cp_ctx) end - batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func) + batch_processor_manager:add_entry_to_new_processor(conf, rfc5424_data, ctx, func) end diff --git a/apisix/plugins/tencent-cloud-cls/cls-sdk.lua b/apisix/plugins/tencent-cloud-cls/cls-sdk.lua index d2b6e8ad4525..650d4ab0e92b 100644 --- a/apisix/plugins/tencent-cloud-cls/cls-sdk.lua +++ b/apisix/plugins/tencent-cloud-cls/cls-sdk.lua @@ -41,6 +41,7 @@ local type = type local tostring = tostring local setmetatable = setmetatable local pcall = pcall +local unpack = unpack -- api doc https://www.tencentcloud.com/document/product/614/16873 local MAX_SINGLE_VALUE_SIZE = 1 * 1024 * 1024 @@ -62,13 +63,20 @@ local params_cache = { local function get_ip(hostname) local _, resolved = socket.dns.toip(hostname) local ip_list = {} - for _, v in ipairs(resolved.ip) do - insert_tab(ip_list, v) + if not resolved.ip then + -- DNS parsing failure + local err = resolved + core.log.error("resolve ip failed, hostname: " .. hostname .. ", error: " .. err) + return nil, err + else + for _, v in ipairs(resolved.ip) do + insert_tab(ip_list, v) + end end return ip_list end -local host_ip = tostring(unpack(get_ip(core_gethostname()))) +local host_ip local log_group_list = {} local log_group_list_pb = { logGroupList = log_group_list, @@ -273,6 +281,15 @@ function _M.send_to_cls(self, logs) -- sums of all value in all LogGroup should be no more than 5MB -- so send whenever size exceed max size local group_list_start = 1 + + if not host_ip then + local host_ip_list, err = get_ip(core_gethostname()) + if not host_ip_list then + return false, err + end + host_ip = tostring(unpack(host_ip_list)) + end + for i = 1, #logs, 1 do local contents, log_size = normalize_log(logs[i]) if log_size > MAX_LOG_GROUP_VALUE_SIZE then diff --git a/apisix/plugins/traffic-split.lua b/apisix/plugins/traffic-split.lua index 38e272b7be66..1d621426a137 100644 --- a/apisix/plugins/traffic-split.lua +++ b/apisix/plugins/traffic-split.lua @@ -125,7 +125,7 @@ end local function parse_domain_for_node(node) - local host = node.host + local host = node.domain or node.host if not ipmatcher.parse_ipv4(host) and not ipmatcher.parse_ipv6(host) then @@ -233,6 +233,20 @@ function _M.access(conf, ctx) local match_passed = true for _, rule in ipairs(conf.rules) do + -- check if all upstream_ids are valid + if rule.weighted_upstreams then + for _, wupstream in ipairs(rule.weighted_upstreams) do + local ups_id = wupstream.upstream_id + if ups_id then + local ups = upstream.get_by_id(ups_id) + if not ups then + return 500, "failed to fetch upstream info by " + .. "upstream id: " .. ups_id + end + end + end + end + if not rule.match then match_passed = true weighted_upstreams = rule.weighted_upstreams diff --git a/apisix/plugins/ua-restriction.lua b/apisix/plugins/ua-restriction.lua index ec74e7592115..577dc2b67cbb 100644 --- a/apisix/plugins/ua-restriction.lua +++ b/apisix/plugins/ua-restriction.lua @@ -22,11 +22,8 @@ local type = type local str_strip = stringx.strip local re_find = ngx.re.find -local MATCH_NONE = 0 -local MATCH_ALLOW = 1 -local MATCH_DENY = 2 - -local lrucache_useragent = core.lrucache.new({ ttl = 300, count = 4096 }) +local lrucache_allow = core.lrucache.new({ ttl = 300, count = 4096 }) +local lrucache_deny = core.lrucache.new({ ttl = 300, count = 4096 }) local schema = { type = "object", @@ -58,6 +55,10 @@ local schema = { default = "Not allowed" }, }, + oneOf = { + {required = {"allowlist"}}, + {required = {"denylist"}} + } } local plugin_name = "ua-restriction" @@ -69,27 +70,56 @@ local _M = { schema = schema, } -local function match_user_agent(user_agent, conf) - user_agent = str_strip(user_agent) - if conf.allowlist then - for _, rule in ipairs(conf.allowlist) do +local function check_with_allow_list(user_agents, allowlist) + local check = function (user_agent) + user_agent = str_strip(user_agent) + + for _, rule in ipairs(allowlist) do if re_find(user_agent, rule, "jo") then - return MATCH_ALLOW + return true end end + return false end - if conf.denylist then - for _, rule in ipairs(conf.denylist) do + if type(user_agents) == "table" then + for _, v in ipairs(user_agents) do + if lrucache_allow(v, allowlist, check, v) then + return true + end + end + return false + else + return lrucache_allow(user_agents, allowlist, check, user_agents) + end +end + + +local function check_with_deny_list(user_agents, denylist) + local check = function (user_agent) + user_agent = str_strip(user_agent) + + for _, rule in ipairs(denylist) do if re_find(user_agent, rule, "jo") then - return MATCH_DENY + return false end end + return true end - return MATCH_NONE + if type(user_agents) == "table" then + for _, v in ipairs(user_agents) do + if lrucache_deny(v, denylist, check, v) then + return false + end + end + return true + else + return lrucache_deny(user_agents, denylist, check, user_agents) + end end + function _M.check_schema(conf) local ok, err = core.schema.check(schema, conf) @@ -118,6 +148,7 @@ function _M.check_schema(conf) return true end + function _M.access(conf, ctx) local user_agent = core.request.header(ctx, "User-Agent") @@ -128,21 +159,16 @@ function _M.access(conf, ctx) return 403, { message = conf.message } end end - local match = MATCH_NONE - if type(user_agent) == "table" then - for _, v in ipairs(user_agent) do - if type(v) == "string" then - match = lrucache_useragent(v, conf, match_user_agent, v, conf) - if match > MATCH_ALLOW then - break - end - end - end + + local is_passed + + if conf.allowlist then + is_passed = check_with_allow_list(user_agent, conf.allowlist) else - match = lrucache_useragent(user_agent, conf, match_user_agent, user_agent, conf) + is_passed = check_with_deny_list(user_agent, conf.denylist) end - if match > MATCH_ALLOW then + if not is_passed then return 403, { message = conf.message } end end diff --git a/apisix/plugins/wolf-rbac.lua b/apisix/plugins/wolf-rbac.lua index 62cb7b04fbba..154fde41a4ad 100644 --- a/apisix/plugins/wolf-rbac.lua +++ b/apisix/plugins/wolf-rbac.lua @@ -266,13 +266,13 @@ function _M.rewrite(conf, ctx) local consumers = consumer.consumers_kv(plugin_name, consumer_conf, "appid") core.log.info("------ consumers: ", core.json.delay_encode(consumers)) - local consumer = consumers[appid] - if not consumer then + local cur_consumer = consumers[appid] + if not cur_consumer then core.log.error("consumer [", appid, "] not found") return 401, fail_response("Invalid appid in rbac token") end - core.log.info("consumer: ", core.json.delay_encode(consumer)) - local server = consumer.auth_conf.server + core.log.info("consumer: ", core.json.delay_encode(cur_consumer)) + local server = cur_consumer.auth_conf.server local res = check_url_permission(server, appid, action, url, client_ip, wolf_token) @@ -287,7 +287,7 @@ function _M.rewrite(conf, ctx) local userId = userInfo.id username = userInfo.username nickname = userInfo.nickname or userInfo.username - local prefix = consumer.auth_conf.header_prefix or '' + local prefix = cur_consumer.auth_conf.header_prefix or '' core.response.set_header(prefix .. "UserId", userId) core.response.set_header(prefix .. "Username", username) core.response.set_header(prefix .. "Nickname", ngx.escape_uri(nickname)) @@ -303,6 +303,7 @@ function _M.rewrite(conf, ctx) ") failed, res: ",core.json.delay_encode(res)) return res.status, fail_response(res.err, { username = username, nickname = nickname }) end + consumer.attach_consumer(ctx, cur_consumer, consumer_conf) core.log.info("wolf-rbac check permission passed") end diff --git a/apisix/plugins/workflow.lua b/apisix/plugins/workflow.lua index 5adb5b455435..73d68375dd9d 100644 --- a/apisix/plugins/workflow.lua +++ b/apisix/plugins/workflow.lua @@ -52,7 +52,8 @@ local schema = { required = {"case", "actions"} } } - } + }, + required = {"rules"} } local plugin_name = "workflow" @@ -93,7 +94,7 @@ end local function rate_limit(conf, ctx) - return limit_count.rate_limit(conf, ctx) + return limit_count.rate_limit(conf, ctx, "limit-count", 1) end diff --git a/apisix/router.lua b/apisix/router.lua index 2fd14917c299..93b123e5b004 100644 --- a/apisix/router.lua +++ b/apisix/router.lua @@ -18,9 +18,7 @@ local require = require local http_route = require("apisix.http.route") local apisix_upstream = require("apisix.upstream") local core = require("apisix.core") -local plugin_checker = require("apisix.plugin").plugin_checker local str_lower = string.lower -local error = error local ipairs = ipairs @@ -29,7 +27,6 @@ local _M = {version = 0.3} local function filter(route) route.orig_modifiedIndex = route.modifiedIndex - route.update_count = 0 route.has_domain = false if not route.value then @@ -91,17 +88,6 @@ function _M.http_init_worker() _M.router_ssl = router_ssl _M.api = require("apisix.api_router") - - local global_rules, err = core.config.new("/global_rules", { - automatic = true, - item_schema = core.schema.global_rule, - checker = plugin_checker, - }) - if not global_rules then - error("failed to create etcd instance for fetching /global_rules : " - .. err) - end - _M.global_rules = global_rules end diff --git a/apisix/schema_def.lua b/apisix/schema_def.lua index f7b117af93da..cec1c53b3438 100644 --- a/apisix/schema_def.lua +++ b/apisix/schema_def.lua @@ -82,7 +82,7 @@ local label_value_def = { description = "value of label", type = "string", pattern = [[^\S+$]], - maxLength = 64, + maxLength = 256, minLength = 1 } _M.label_value_def = label_value_def @@ -283,6 +283,7 @@ local health_checker = { {required = {"active"}}, {required = {"active", "passive"}}, }, + additionalProperties = false, } @@ -501,14 +502,15 @@ local upstream_schema = { oneOf = { {required = {"nodes"}}, {required = {"service_name", "discovery_type"}}, - } + }, + additionalProperties = false } -- TODO: add more nginx variable support _M.upstream_hash_vars_schema = { type = "string", pattern = [[^((uri|server_name|server_addr|request_uri|remote_port]] - .. [[|remote_addr|query_string|host|hostname)]] + .. [[|remote_addr|query_string|host|hostname|mqtt_client_id)]] .. [[|arg_[0-9a-zA-z_-]+)$]], } @@ -662,6 +664,7 @@ _M.route = { {required = {"script", "plugin_config_id"}}, } }, + additionalProperties = false, } @@ -689,6 +692,7 @@ _M.service = { uniqueItems = true, }, }, + additionalProperties = false, } @@ -707,6 +711,7 @@ _M.consumer = { desc = desc_def, }, required = {"username"}, + additionalProperties = false, } @@ -725,8 +730,19 @@ _M.ssl = { default = "server", enum = {"server", "client"} }, - cert = certificate_scheme, - key = private_key_schema, + cert = { + oneOf = { + certificate_scheme, + -- TODO: uniformly define the schema of secret_uri + { type = "string", pattern = "^\\$(secret|env)://"} + } + }, + key = { + oneOf = { + private_key_schema, + { type = "string", pattern = "^\\$(secret|env)://"} + } + }, sni = { type = "string", pattern = host_def_pat, @@ -756,6 +772,15 @@ _M.ssl = { minimum = 0, default = 1, }, + skip_mtls_uri_regex = { + type = "array", + minItems = 1, + uniqueItems = true, + items = { + description = "uri regular expression to skip mtls", + type = "string", + } + }, }, required = {"ca"}, }, @@ -770,6 +795,15 @@ _M.ssl = { enum = {1, 0}, default = 1 }, + ssl_protocols = { + description = "set ssl protocols", + type = "array", + maxItems = 3, + uniqueItems = true, + items = { + enum = {"TLSv1.1", "TLSv1.2", "TLSv1.3"} + }, + }, validity_end = timestamp_def, validity_start = timestamp_def, create_time = timestamp_def, @@ -788,7 +822,8 @@ _M.ssl = { {required = {"snis", "key", "cert"}} } }, - ["else"] = {required = {"key", "cert"}} + ["else"] = {required = {"key", "cert"}}, + additionalProperties = false, } @@ -805,6 +840,7 @@ _M.proto = { } }, required = {"content"}, + additionalProperties = false, } @@ -816,7 +852,8 @@ _M.global_rule = { create_time = timestamp_def, update_time = timestamp_def }, - required = {"plugins"}, + required = {"id", "plugins"}, + additionalProperties = false, } @@ -850,6 +887,7 @@ local xrpc_protocol_schema = { dependencies = { name = {"conf"}, }, + additionalProperties = false, }, }, @@ -884,7 +922,8 @@ _M.stream_route = { upstream_id = id_schema, plugins = plugins_schema, protocol = xrpc_protocol_schema, - } + }, + additionalProperties = false, } @@ -900,6 +939,7 @@ _M.plugins = { stream = { type = "boolean" }, + additionalProperties = false, }, required = {"name"} } @@ -917,6 +957,7 @@ _M.plugin_config = { update_time = timestamp_def }, required = {"id", "plugins"}, + additionalProperties = false, } @@ -931,6 +972,7 @@ _M.consumer_group = { update_time = timestamp_def }, required = {"id", "plugins"}, + additionalProperties = false, } diff --git a/apisix/secret.lua b/apisix/secret.lua index 7dcf9cf4ab22..ca9b09190ebc 100644 --- a/apisix/secret.lua +++ b/apisix/secret.lua @@ -110,14 +110,27 @@ function _M.init_worker() end -local function parse_secret_uri(secret_uri) +local function check_secret_uri(secret_uri) -- Avoid the error caused by has_prefix to cause a crash. if type(secret_uri) ~= "string" then - return nil, "error secret_uri type: " .. type(secret_uri) + return false, "error secret_uri type: " .. type(secret_uri) + end + + if not string.has_prefix(secret_uri, PREFIX) and + not string.has_prefix(upper(secret_uri), core.env.PREFIX) then + return false, "error secret_uri prefix: " .. secret_uri end - if not string.has_prefix(secret_uri, PREFIX) then - return nil, "error secret_uri prefix: " .. secret_uri + return true +end + +_M.check_secret_uri = check_secret_uri + + +local function parse_secret_uri(secret_uri) + local is_secret_uri, err = check_secret_uri(secret_uri) + if not is_secret_uri then + return is_secret_uri, err end local path = sub(secret_uri, #PREFIX + 1) diff --git a/apisix/ssl.lua b/apisix/ssl.lua index 18898027aee5..e14c92fc3c41 100644 --- a/apisix/ssl.lua +++ b/apisix/ssl.lua @@ -14,8 +14,11 @@ -- See the License for the specific language governing permissions and -- limitations under the License. -- -local core = require("apisix.core") -local ngx_ssl = require("ngx.ssl") +local core = require("apisix.core") +local secret = require("apisix.secret") +local ngx_ssl = require("ngx.ssl") +local ngx_ssl_client = require("ngx.ssl.clienthello") + local ngx_encode_base64 = ngx.encode_base64 local ngx_decode_base64 = ngx.decode_base64 local aes = require("resty.aes") @@ -37,8 +40,13 @@ local pkey_cache = core.lrucache.new { local _M = {} -function _M.server_name() - local sni, err = ngx_ssl.server_name() +function _M.server_name(clienthello) + local sni, err + if clienthello then + sni, err = ngx_ssl_client.get_client_hello_server_name() + else + sni, err = ngx_ssl.server_name() + end if err then return nil, err end @@ -56,6 +64,14 @@ function _M.server_name() end +function _M.set_protocols_by_clienthello(ssl_protocols) + if ssl_protocols then + return ngx_ssl_client.set_protocols(ssl_protocols) + end + return true +end + + local function init_iv_tbl(ivs) local _aes_128_cbc_with_iv_tbl = core.table.new(2, 0) local type_ivs = type(ivs) @@ -252,9 +268,13 @@ function _M.check_ssl_conf(in_dp, conf) end end - local ok, err = validate(conf.cert, conf.key) - if not ok then - return nil, err + if not secret.check_secret_uri(conf.cert) and + not secret.check_secret_uri(conf.key) then + + local ok, err = validate(conf.cert, conf.key) + if not ok then + return nil, err + end end if conf.type == "client" then @@ -268,9 +288,13 @@ function _M.check_ssl_conf(in_dp, conf) end for i = 1, numcerts do - local ok, err = validate(conf.certs[i], conf.keys[i]) - if not ok then - return nil, "failed to handle cert-key pair[" .. i .. "]: " .. err + if not secret.check_secret_uri(conf.cert[i]) and + not secret.check_secret_uri(conf.key[i]) then + + local ok, err = validate(conf.certs[i], conf.keys[i]) + if not ok then + return nil, "failed to handle cert-key pair[" .. i .. "]: " .. err + end end end diff --git a/apisix/ssl/router/radixtree_sni.lua b/apisix/ssl/router/radixtree_sni.lua index fd1f55c395dc..b6824852e0b3 100644 --- a/apisix/ssl/router/radixtree_sni.lua +++ b/apisix/ssl/router/radixtree_sni.lua @@ -18,8 +18,10 @@ local get_request = require("resty.core.base").get_request local router_new = require("apisix.utils.router").new local core = require("apisix.core") local apisix_ssl = require("apisix.ssl") +local secret = require("apisix.secret") local ngx_ssl = require("ngx.ssl") local config_util = require("apisix.core.config_util") +local ngx = ngx local ipairs = ipairs local type = type local error = error @@ -183,6 +185,7 @@ function _M.match_and_set(api_ctx, match_only, alt_sni) for _, msni in ipairs(api_ctx.matched_sni) do if sni_rev == msni or not str_find(sni_rev, ".", #msni) then matched = true + break end end if not matched then @@ -203,16 +206,40 @@ function _M.match_and_set(api_ctx, match_only, alt_sni) end end - local matched_ssl = api_ctx.matched_ssl - core.log.info("debug - matched: ", core.json.delay_encode(matched_ssl, true)) + core.log.info("debug - matched: ", core.json.delay_encode(api_ctx.matched_ssl, true)) if match_only then return true end + ok, err = _M.set(api_ctx.matched_ssl, sni) + if not ok then + return false, err + end + + return true +end + + +function _M.set(matched_ssl, sni) + if not matched_ssl then + return false, "failed to match ssl certificate" + end + local ok, err + if not sni then + sni, err = apisix_ssl.server_name() + if type(sni) ~= "string" then + local advise = "please check if the client requests via IP or uses an outdated " .. + "protocol. If you need to report an issue, " .. + "provide a packet capture file of the TLS handshake." + return false, "failed to find SNI: " .. (err or advise) + end + end ngx_ssl.clear_certs() - ok, err = _M.set_cert_and_key(sni, matched_ssl.value) + local new_ssl_value = secret.fetch_secrets(matched_ssl.value) or matched_ssl.value + + ok, err = _M.set_cert_and_key(sni, new_ssl_value) if not ok then return false, err end @@ -226,7 +253,11 @@ function _M.match_and_set(api_ctx, match_only, alt_sni) return false, "failed to parse client cert: " .. err end - local ok, err = ngx_ssl.verify_client(parsed_cert, depth) + local reject_in_handshake = + (ngx.config.subsystem == "stream") or + (matched_ssl.value.client.skip_mtls_uri_regex == nil) + local ok, err = ngx_ssl.verify_client(parsed_cert, depth, + reject_in_handshake) if not ok then return false, err end diff --git a/apisix/stream/xrpc/protocols/dubbo/init.lua b/apisix/stream/xrpc/protocols/dubbo/init.lua new file mode 100644 index 000000000000..19160d6c544e --- /dev/null +++ b/apisix/stream/xrpc/protocols/dubbo/init.lua @@ -0,0 +1,231 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") +local sdk = require("apisix.stream.xrpc.sdk") +local xrpc_socket = require("resty.apisix.stream.xrpc.socket") +local math_random = math.random +local ngx = ngx +local OK = ngx.OK +local str_format = string.format +local DECLINED = ngx.DECLINED +local DONE = ngx.DONE +local bit = require("bit") +local ffi = require("ffi") +local ffi_str = ffi.string + + +-- dubbo protocol spec: https://cn.dubbo.apache.org/zh-cn/overview/reference/protocols/tcp/ +local header_len = 16 +local _M = {} + + +function _M.init_downstream(session) + session.req_id_seq = 0 + session.resp_id_seq = 0 + session.cmd_labels = { session.route.id, "" } + return xrpc_socket.downstream.socket() +end + + +local function parse_dubbo_header(header) + for i = 1, header_len do + local currentByte = header:byte(i) + if not currentByte then + return nil + end + end + + local magic_number = str_format("%04x", header:byte(1) * 256 + header:byte(2)) + local message_flag = header:byte(3) + local status = header:byte(4) + local request_id = 0 + for i = 5, 12 do + request_id = request_id * 256 + header:byte(i) + end + + local byte13Val = header:byte(13) * 256 * 256 * 256 + local byte14Val = header:byte(14) * 256 * 256 + local data_length = byte13Val + byte14Val + header:byte(15) * 256 + header:byte(16) + + local is_request = bit.band(bit.rshift(message_flag, 7), 0x01) == 1 and 1 or 0 + local is_two_way = bit.band(bit.rshift(message_flag, 6), 0x01) == 1 and 1 or 0 + local is_event = bit.band(bit.rshift(message_flag, 5), 0x01) == 1 and 1 or 0 + + return { + magic_number = magic_number, + message_flag = message_flag, + is_request = is_request, + is_two_way = is_two_way, + is_event = is_event, + status = status, + request_id = request_id, + data_length = data_length + } +end + + +local function read_data(sk, is_req) + local header_data, err = sk:read(header_len) + if not header_data then + return nil, err, false + end + + local header_str = ffi_str(header_data, header_len) + local header_info = parse_dubbo_header(header_str) + if not header_info then + return nil, "header insufficient", false + end + + local is_valid_magic_number = header_info.magic_number == "dabb" + if not is_valid_magic_number then + return nil, str_format("unknown magic number: \"%s\"", header_info.magic_number), false + end + + local body_data, err = sk:read(header_info.data_length) + if not body_data then + core.log.error("failed to read dubbo request body") + return nil, err, false + end + + local ctx = ngx.ctx + ctx.dubbo_serialization_id = bit.band(header_info.message_flag, 0x1F) + + if is_req then + ctx.dubbo_req_body_data = body_data + else + ctx.dubbo_rsp_body_data = body_data + end + + return true, nil, false +end + + +local function read_req(sk) + return read_data(sk, true) +end + + +local function read_reply(sk) + return read_data(sk, false) +end + + +local function handle_reply(session, sk) + local ok, err = read_reply(sk) + if not ok then + return nil, err + end + + local ctx = sdk.get_req_ctx(session, 10) + + return ctx +end + + +function _M.from_downstream(session, downstream) + local read_pipeline = false + session.req_id_seq = session.req_id_seq + 1 + local ctx = sdk.get_req_ctx(session, session.req_id_seq) + session._downstream_ctx = ctx + while true do + local ok, err, pipelined = read_req(downstream) + if not ok then + if err ~= "timeout" and err ~= "closed" then + core.log.error("failed to read request: ", err) + end + + if read_pipeline and err == "timeout" then + break + end + + return DECLINED + end + + if not pipelined then + break + end + + if not read_pipeline then + read_pipeline = true + -- set minimal read timeout to read pipelined data + downstream:settimeouts(0, 0, 1) + end + end + + if read_pipeline then + -- set timeout back + downstream:settimeouts(0, 0, 0) + end + + return OK, ctx +end + + +function _M.connect_upstream(session, ctx) + local conf = session.upstream_conf + local nodes = conf.nodes + if #nodes == 0 then + core.log.error("failed to connect: no nodes") + return DECLINED + end + + local node = nodes[math_random(#nodes)] + local sk = sdk.connect_upstream(node, conf) + if not sk then + return DECLINED + end + + core.log.debug("dubbo_connect_upstream end") + + return OK, sk +end + +function _M.disconnect_upstream(session, upstream) + sdk.disconnect_upstream(upstream, session.upstream_conf) +end + + +function _M.to_upstream(session, ctx, downstream, upstream) + local ok, _ = upstream:move(downstream) + if not ok then + return DECLINED + end + + return OK +end + + +function _M.from_upstream(session, downstream, upstream) + local ctx,err = handle_reply(session, upstream) + if err then + return DECLINED + end + + local ok, _ = downstream:move(upstream) + if not ok then + return DECLINED + end + + return DONE, ctx +end + + +function _M.log(_, _) +end + + +return _M diff --git a/apisix/stream/xrpc/protocols/dubbo/schema.lua b/apisix/stream/xrpc/protocols/dubbo/schema.lua new file mode 100644 index 000000000000..3a9d73325498 --- /dev/null +++ b/apisix/stream/xrpc/protocols/dubbo/schema.lua @@ -0,0 +1,32 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local core = require("apisix.core") + + +local schema = { + type = "object", +} + +local _M = {} + + +function _M.check_schema(conf) + return core.schema.check(schema, conf) +end + + +return _M diff --git a/apisix/utils/log-util.lua b/apisix/utils/log-util.lua index 4aecf290e4ee..8e9cb9c01361 100644 --- a/apisix/utils/log-util.lua +++ b/apisix/utils/log-util.lua @@ -210,7 +210,27 @@ function _M.inject_get_full_log(f) end +local function is_match(match, ctx) + local match_result + for _, m in pairs(match) do + local expr, _ = expr.new(m) + match_result = expr:eval(ctx.var) + if match_result then + break + end + end + + return match_result +end + + function _M.get_log_entry(plugin_name, conf, ctx) + -- If the "match" configuration is set and the matching conditions are not met, + -- then do not log the message. + if conf.match and not is_match(conf.match, ctx) then + return + end + local metadata = plugin.plugin_metadata(plugin_name) core.log.info("metadata: ", core.json.delay_encode(metadata)) diff --git a/apisix/plugins/slslog/rfc5424.lua b/apisix/utils/rfc5424.lua similarity index 83% rename from apisix/plugins/slslog/rfc5424.lua rename to apisix/utils/rfc5424.lua index 5d09a58a5165..e046194bf349 100644 --- a/apisix/plugins/slslog/rfc5424.lua +++ b/apisix/utils/rfc5424.lua @@ -79,12 +79,13 @@ local Severity = { } local log_util = require("apisix.utils.log-util") - +local ipairs = ipairs +local str_format = string.format local _M = { version = 0.1 } -function _M.encode(facility, severity, hostname, appname, pid, project, - logstore, access_key_id, access_key_secret, msg) + +function _M.encode(facility, severity, hostname, appname, pid, msg, structured_data) local pri = (Facility[facility] * 8 + Severity[severity]) local t = log_util.get_rfc3339_zulu_timestamp() if not hostname then @@ -95,10 +96,19 @@ function _M.encode(facility, severity, hostname, appname, pid, project, appname = "-" end - return "<" .. pri .. ">1 " .. t .. " " .. hostname .. " " .. appname .. " " .. pid - .. " - [logservice project=\"" .. project .. "\" logstore=\"" .. logstore - .. "\" access-key-id=\"" .. access_key_id .. "\" access-key-secret=\"" - .. access_key_secret .. "\"] " .. msg .. "\n" + local structured_data_str = "-" + + if structured_data then + structured_data_str = "[logservice" + for _, sd_param in ipairs(structured_data) do + structured_data_str = structured_data_str .. " " .. sd_param.name + .. "=\"" .. sd_param.value .. "\"" + end + structured_data_str = structured_data_str .. "]" + end + + return str_format("<%d>1 %s %s %s %d - %s %s\n", pri, t, hostname, + appname, pid, structured_data_str, msg) end return _M diff --git a/apisix/wasm.lua b/apisix/wasm.lua index a27641504a2b..c8b863aeac9d 100644 --- a/apisix/wasm.lua +++ b/apisix/wasm.lua @@ -15,6 +15,7 @@ -- limitations under the License. -- local core = require("apisix.core") +local type = type local support_wasm, wasm = pcall(require, "resty.proxy-wasm") local ngx_var = ngx.var @@ -23,8 +24,10 @@ local schema = { type = "object", properties = { conf = { - type = "string", - minLength = 1, + oneOf = { + { type = "object", minProperties = 1}, + { type = "string", minLength = 1}, + } }, }, required = {"conf"} @@ -51,7 +54,13 @@ local function fetch_plugin_ctx(conf, ctx, plugin) local plugin_ctx = ctxs[key] local err if not plugin_ctx then - plugin_ctx, err = wasm.on_configure(plugin, conf.conf) + if type(conf.conf) == "table" then + plugin_ctx, err = wasm.on_configure(plugin, core.json.encode(conf.conf)) + elseif type(conf.conf) == "string" then + plugin_ctx, err = wasm.on_configure(plugin, conf.conf) + else + return nil, "invalid conf type" + end if not plugin_ctx then return nil, err end diff --git a/benchmark/fake-apisix/conf/nginx.conf b/benchmark/fake-apisix/conf/nginx.conf index 9ae60d248141..f35131cbe2ea 100644 --- a/benchmark/fake-apisix/conf/nginx.conf +++ b/benchmark/fake-apisix/conf/nginx.conf @@ -103,25 +103,11 @@ http { ### the following x-forwarded-* headers is to send to upstream server - set $var_x_forwarded_for $remote_addr; set $var_x_forwarded_proto $scheme; set $var_x_forwarded_host $host; set $var_x_forwarded_port $server_port; - if ($http_x_forwarded_for != "") { - set $var_x_forwarded_for "${http_x_forwarded_for}, ${realip_remote_addr}"; - } - if ($http_x_forwarded_proto != "") { - set $var_x_forwarded_proto $http_x_forwarded_proto; - } - if ($http_x_forwarded_host != "") { - set $var_x_forwarded_host $http_x_forwarded_host; - } - if ($http_x_forwarded_port != "") { - set $var_x_forwarded_port $http_x_forwarded_port; - } - - proxy_set_header X-Forwarded-For $var_x_forwarded_for; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto; proxy_set_header X-Forwarded-Host $var_x_forwarded_host; proxy_set_header X-Forwarded-Port $var_x_forwarded_port; diff --git a/ci/centos7-ci.sh b/ci/centos7-ci.sh index 251dd33639b3..cf2d0387bbc2 100755 --- a/ci/centos7-ci.sh +++ b/ci/centos7-ci.sh @@ -33,7 +33,7 @@ install_dependencies() { # install openresty to make apisix's rpm test work yum install -y yum-utils && yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo - yum install -y openresty openresty-debug openresty-openssl111-debug-devel pcre pcre-devel + yum install -y openresty-1.21.4.2 openresty-debug-1.21.4.2 openresty-openssl111-debug-devel pcre pcre-devel # install luarocks ./utils/linux-install-luarocks.sh @@ -73,9 +73,6 @@ install_dependencies() { # install nodejs install_nodejs - # install rust - install_rust - # grpc-web server && client cd t/plugin/grpc-web ./setup.sh diff --git a/ci/common.sh b/ci/common.sh index 509647b001d2..e1961fc6b01e 100644 --- a/ci/common.sh +++ b/ci/common.sh @@ -21,6 +21,7 @@ export_or_prefix() { export OPENRESTY_PREFIX="/usr/local/openresty-debug" export APISIX_MAIN="https://raw.githubusercontent.com/apache/incubator-apisix/master/rockspec/apisix-master-0.rockspec" export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH + export OPENSSL111_BIN=$OPENRESTY_PREFIX/openssl111/bin/openssl } create_lua_deps() { @@ -99,11 +100,6 @@ install_nodejs () { npm config set registry https://registry.npmjs.org/ } -install_rust () { - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sudo sh -s -- -y - source "$HOME/.cargo/env" -} - set_coredns() { # test a domain name is configured as upstream echo "127.0.0.1 test.com" | sudo tee -a /etc/hosts diff --git a/ci/init-plugin-test-service.sh b/ci/init-plugin-test-service.sh index 881f466c710d..d670b7fa39d4 100755 --- a/ci/init-plugin-test-service.sh +++ b/ci/init-plugin-test-service.sh @@ -22,8 +22,8 @@ after() { docker exec -i apache-apisix_kafka-server2_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server2:2181 --replication-factor 1 --partitions 1 --topic test4 # prepare openwhisk env - docker pull openwhisk/action-nodejs-v14:nightly - docker run --rm -d --name openwhisk -p 3233:3233 -p 3232:3232 -v /var/run/docker.sock:/var/run/docker.sock openwhisk/standalone:nightly + docker pull openwhisk/action-nodejs-v14:1.20.0 + docker run --rm -d --name openwhisk -p 3233:3233 -p 3232:3232 -v /var/run/docker.sock:/var/run/docker.sock openwhisk/standalone:1.0.0 docker exec -i openwhisk waitready docker exec -i openwhisk bash -c "wsk package create pkg" docker exec -i openwhisk bash -c "wsk action update /guest/pkg/testpkg <(echo 'function main(args){return {\"hello\": \"world\"}}') --kind nodejs:14" @@ -42,8 +42,18 @@ after() { # wait for keycloak ready bash -c 'while true; do curl -s localhost:8080 &>/dev/null; ret=$?; [[ $ret -eq 0 ]] && break; sleep 3; done' - docker cp ci/kcadm_configure_cas.sh apisix_keycloak_new:/tmp/ - docker exec apisix_keycloak_new bash /tmp/kcadm_configure_cas.sh + + # install jq + wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 -O jq + chmod +x jq + docker cp jq apisix_keycloak:/usr/bin/ + + # configure keycloak + docker exec apisix_keycloak bash /tmp/kcadm_configure_cas.sh + docker exec apisix_keycloak bash /tmp/kcadm_configure_university.sh + + echo 'CREATE TABLE default.test (`host` String, `client_ip` String, `route_id` String, `service_id` String, `@timestamp` String, PRIMARY KEY(`@timestamp`)) ENGINE = MergeTree()' | curl 'http://localhost:8123/' --data-binary @- + echo 'CREATE TABLE default.test (`host` String, `client_ip` String, `route_id` String, `service_id` String, `@timestamp` String, PRIMARY KEY(`@timestamp`)) ENGINE = MergeTree()' | curl 'http://localhost:8124/' --data-binary @- } before() { diff --git a/ci/linux-install-openresty.sh b/ci/linux-install-openresty.sh index 3bc2d18ab35e..8d4b6f87224a 100755 --- a/ci/linux-install-openresty.sh +++ b/ci/linux-install-openresty.sh @@ -38,6 +38,13 @@ OPENSSL3_PREFIX=${OPENSSL3_PREFIX-/home/runner} SSL_LIB_VERSION=${SSL_LIB_VERSION-openssl} if [ "$OPENRESTY_VERSION" == "source" ]; then + export openssl_prefix=/usr/local/openresty/openssl111 + export zlib_prefix=/usr/local/openresty/zlib + export pcre_prefix=/usr/local/openresty/pcre + + export cc_opt="-DNGX_LUA_ABORT_AT_PANIC -I${zlib_prefix}/include -I${pcre_prefix}/include -I${openssl_prefix}/include" + export ld_opt="-L${zlib_prefix}/lib -L${pcre_prefix}/lib -L${openssl_prefix}/lib -Wl,-rpath,${zlib_prefix}/lib:${pcre_prefix}/lib:${openssl_prefix}/lib" + if [ "$COMPILE_OPENSSL3" == "yes" ]; then apt install -y build-essential git clone https://github.com/openssl/openssl @@ -72,7 +79,8 @@ if [ "$OPENRESTY_VERSION" == "source" ]; then chmod +x build-apisix-base.sh ./build-apisix-base.sh latest - sudo apt-get install openresty-openssl111-debug-dev + sudo apt-get install -y openresty-openssl111 openresty-openssl111-debug-dev libldap2-dev openresty-pcre openresty-zlib + exit 0 fi diff --git a/ci/linux_apisix_current_luarocks_runner.sh b/ci/linux_apisix_current_luarocks_runner.sh index a8836f43b691..96aac508f762 100755 --- a/ci/linux_apisix_current_luarocks_runner.sh +++ b/ci/linux_apisix_current_luarocks_runner.sh @@ -34,9 +34,6 @@ script() { sudo rm -rf /usr/local/share/lua/5.1/apisix - # install rust - install_rust - # install APISIX with local version luarocks install rockspec/apisix-master-0.rockspec --only-deps > build.log 2>&1 || (cat build.log && exit 1) luarocks make rockspec/apisix-master-0.rockspec > build.log 2>&1 || (cat build.log && exit 1) diff --git a/ci/linux_apisix_master_luarocks_runner.sh b/ci/linux_apisix_master_luarocks_runner.sh index 8931ad82c5f5..afc487ddd160 100755 --- a/ci/linux_apisix_master_luarocks_runner.sh +++ b/ci/linux_apisix_master_luarocks_runner.sh @@ -38,9 +38,6 @@ script() { mkdir tmp && cd tmp cp -r ../utils ./ - # install rust - install_rust - # install APISIX by luarocks luarocks install $APISIX_MAIN > build.log 2>&1 || (cat build.log && exit 1) cp ../bin/apisix /usr/local/bin/apisix @@ -52,6 +49,13 @@ script() { sudo PATH=$PATH apisix init sudo PATH=$PATH apisix start sudo PATH=$PATH apisix quit + for i in {1..10} + do + if [ ! -f /usr/local/apisix/logs/nginx.pid ];then + break + fi + sleep 0.3 + done sudo PATH=$PATH apisix start sudo PATH=$PATH apisix stop diff --git a/ci/linux_openresty_common_runner.sh b/ci/linux_openresty_common_runner.sh index c96f8f6c383d..e4ed00905c86 100755 --- a/ci/linux_openresty_common_runner.sh +++ b/ci/linux_openresty_common_runner.sh @@ -33,9 +33,6 @@ do_install() { ./ci/linux-install-etcd-client.sh - # install rust - install_rust - create_lua_deps # sudo apt-get install tree -y @@ -71,6 +68,8 @@ script() { export_or_prefix openresty -V + make init + set_coredns ./t/grpc_server_example/grpc_server_example \ diff --git a/ci/linux_openresty_runner.sh b/ci/linux_openresty_runner.sh index 2cdc87b218f3..2e39224efc59 100755 --- a/ci/linux_openresty_runner.sh +++ b/ci/linux_openresty_runner.sh @@ -18,5 +18,4 @@ export OPENRESTY_VERSION=source -export TEST_CI_USE_GRPC=true . ./ci/linux_openresty_common_runner.sh diff --git a/ci/pod/docker-compose.plugin.yml b/ci/pod/docker-compose.plugin.yml index e7878ab61b81..748b28b868f6 100644 --- a/ci/pod/docker-compose.plugin.yml +++ b/ci/pod/docker-compose.plugin.yml @@ -23,36 +23,23 @@ services: # The latest image is the latest stable version image: redis:latest restart: unless-stopped + volumes: + - ./t/certs:/certs + command: "--tls-port 6380 \ + --tls-cert-file /certs/mtls_server.crt \ + --tls-key-file /certs/mtls_server.key \ + --tls-ca-cert-file /certs/mtls_ca.crt \ + --tls-auth-clients no \ + --user alice on +@all ~* \\&* \\>somepassword" ports: - "6379:6379" + - "6380:6380" networks: apisix_net: - ## keycloak apisix_keycloak: - image: sshniro/keycloak-apisix:1.0.0 - environment: - KEYCLOAK_USER: admin - KEYCLOAK_PASSWORD: 123456 - restart: unless-stopped - ports: - - "8090:8080" - - "8443:8443" - networks: - apisix_net: - - ## keycloak - # The keycloak official has two types of docker images: - # * legacy WildFly distribution - # * new Quarkus based distribution - # Here we choose new version, because it's mainstream and - # supports kcadm.sh to init the container for test. - # The original keycloak service `apisix_keycloak` is - # third-party personal customized image and for OIDC test only. - # We should unify both containers in future. - apisix_keycloak_new: - container_name: apisix_keycloak_new + container_name: apisix_keycloak image: quay.io/keycloak/keycloak:18.0.2 # use host network because in CAS auth, # keycloak needs to send back-channel POST to apisix. @@ -60,10 +47,16 @@ services: environment: KEYCLOAK_ADMIN: admin KEYCLOAK_ADMIN_PASSWORD: admin + KC_HTTPS_CERTIFICATE_FILE: /opt/keycloak/conf/server.crt.pem + KC_HTTPS_CERTIFICATE_KEY_FILE: /opt/keycloak/conf/server.key.pem restart: unless-stopped - command: ["start-dev", "--http-port 8080"] + command: ["start-dev"] volumes: - /opt/keycloak-protocol-cas-18.0.2.jar:/opt/keycloak/providers/keycloak-protocol-cas-18.0.2.jar + - ./ci/pod/keycloak/server.crt.pem:/opt/keycloak/conf/server.crt.pem + - ./ci/pod/keycloak/server.key.pem:/opt/keycloak/conf/server.key.pem + - ./ci/pod/keycloak/kcadm_configure_cas.sh:/tmp/kcadm_configure_cas.sh + - ./ci/pod/keycloak/kcadm_configure_university.sh:/tmp/kcadm_configure_university.sh ## kafka-cluster zookeeper-server1: @@ -150,6 +143,15 @@ services: - ./t/certs:/certs + ## Grafana Loki + loki: + image: grafana/loki:2.8.0 + command: -config.file=/etc/loki/local-config.yaml -auth.enabled -querier.multi-tenant-queries-enabled + ports: + - "3100:3100" + networks: + - loki_net + rocketmq_namesrv: image: apacherocketmq/rocketmq:4.6.0 container_name: rmqnamesrv @@ -194,18 +196,6 @@ services: networks: opa_net: - # Splunk HEC Logging Service - splunk: - image: splunk/splunk:8.2.3 - restart: unless-stopped - ports: - - "18088:8088" - environment: - SPLUNK_PASSWORD: "ApacheAPISIX@666" - SPLUNK_START_ARGS: "--accept-license" - SPLUNK_HEC_TOKEN: "BD274822-96AA-4DA6-90EC-18940FB2414C" - SPLUNK_HEC_SSL: "False" - # Elasticsearch Logger Service elasticsearch-noauth: image: docker.elastic.co/elasticsearch/elasticsearch:7.17.1 @@ -319,9 +309,52 @@ services: ports: - '8888:8080' + vector: + image: timberio/vector:0.29.1-debian + container_name: vector + volumes: + - ./ci/pod/vector:/etc/vector/ + - ./t/certs:/certs + ports: + - '3000:3000' #tcp logger + - '8127:8127/udp' + - '43000:43000' + - '5140:5140' + - "18088:18088" # For splunk logging tests + - '5150:5150/udp' + - "3001:3001" #http logger + networks: + vector_net: + + clickhouse: + image: clickhouse/clickhouse-server:23.4.2-alpine + container_name: clickhouse + ports: + - '8123:8123' + networks: + clickhouse_net: + + clickhouse2: + image: clickhouse/clickhouse-server:23.4.2-alpine + container_name: clickhouse2 + ports: + - '8124:8123' + networks: + clickhouse_net: + otel-collector: + image: otel/opentelemetry-collector-contrib + volumes: + - ./ci/pod/otelcol-contrib:/etc/otelcol-contrib:rw + ports: + - '4318:4318' + + networks: apisix_net: kafka_net: skywalk_net: rocketmq_net: opa_net: + vector_net: + clickhouse_net: + loki_net: diff --git a/ci/kcadm_configure_cas.sh b/ci/pod/keycloak/kcadm_configure_cas.sh similarity index 100% rename from ci/kcadm_configure_cas.sh rename to ci/pod/keycloak/kcadm_configure_cas.sh diff --git a/ci/pod/keycloak/kcadm_configure_university.sh b/ci/pod/keycloak/kcadm_configure_university.sh new file mode 100644 index 000000000000..162b624f8ab6 --- /dev/null +++ b/ci/pod/keycloak/kcadm_configure_university.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +export PATH=/opt/keycloak/bin:$PATH + +kcadm.sh config credentials --server http://localhost:8080 --realm master --user admin --password admin + +# create realm University +kcadm.sh create realms -s realm=University -s enabled=true + +# create roles `Teacher, Student` +kcadm.sh create roles -r University -s name=Teacher +kcadm.sh create roles -r University -s name=Student + +# create users `teacher@gmail.com, student@gmail.com` +kcadm.sh create users -r University -s username=teacher@gmail.com -s enabled=true +kcadm.sh create users -r University -s username=student@gmail.com -s enabled=true + +# set password +kcadm.sh set-password -r University --username teacher@gmail.com --new-password 123456 +kcadm.sh set-password -r University --username student@gmail.com --new-password 123456 + +# bind roles to users +kcadm.sh add-roles -r University --uusername teacher@gmail.com --rolename Teacher +kcadm.sh add-roles -r University --uusername student@gmail.com --rolename Student + +# create client course_management +kcadm.sh create clients -r University -s clientId=course_management -s enabled=true -s clientAuthenticatorType=client-secret -s secret=d1ec69e9-55d2-4109-a3ea-befa071579d5 + +client_id=$(kcadm.sh get clients -r University --fields id,clientId 2>/dev/null | jq -r '.[] | select(.clientId=='\"course_management\"') | .id') +teacher_id=$(kcadm.sh get roles -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"Teacher\"') | .id') +student_id=$(kcadm.sh get roles -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"Student\"') | .id') + +# update client course_management +kcadm.sh update clients/${client_id} -r University -s protocol=openid-connect -s standardFlowEnabled=true \ + -s implicitFlowEnabled=true -s directAccessGrantsEnabled=true -s serviceAccountsEnabled=true \ + -s authorizationServicesEnabled=true -s 'redirectUris=["*"]' -s 'webOrigins=["*"]' + +kcadm.sh update clients/${client_id}/authz/resource-server -r University -s allowRemoteResourceManagement=false -s policyEnforcementMode="ENFORCING" + +# create authz-resource with name `course_resource`, uri `/course/*`, scope `DELETE, delete, view, GET` +kcadm.sh create clients/${client_id}/authz/resource-server/resource -r University -s name=course_resource \ + -s ownerManagedAccess=false -s uris='["/course/*"]' -s scopes='[{"name": "DELETE"},{"name": "view"},{"name": "GET"},{"name": "delete"}]' + +course_resource_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/resource -r University --fields _id,name 2>/dev/null | jq -r '.[] | select(.name=='\"course_resource\"') | ._id') +DELETE_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"DELETE\"') | .id') +delete_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"delete\"') | .id') +GET_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"GET\"') | .id') +view_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"view\"') | .id') + +# create authz-policy `AllowTeacherPolicy, AllowStudentPolicy` +kcadm.sh create clients/${client_id}/authz/resource-server/policy/role -r University \ + -s name="AllowTeacherPolicy" -s logic="POSITIVE" -s decisionStrategy="UNANIMOUS" \ + -s roles='[{"id": '\"${teacher_id}\"'}]' + +kcadm.sh create clients/${client_id}/authz/resource-server/policy/role -r University \ + -s name="AllowStudentPolicy" -s logic="POSITIVE" -s decisionStrategy="UNANIMOUS" \ + -s roles='[{"id": '\"${student_id}\"'}]' + +allow_teacher_policy_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/policy -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"AllowTeacherPolicy\"') | .id') +allow_student_policy_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/policy -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"AllowStudentPolicy\"') | .id') + +# create authz-permission `Delete Course Permission` and `View Course Permission` +kcadm.sh create clients/${client_id}/authz/resource-server/permission/scope -r University \ + -s name="Delete Course Permission" -s logic="POSITIVE" -s decisionStrategy="UNANIMOUS" \ + -s policies='['\"${allow_teacher_policy_id}\"']' \ + -s scopes='['\"${DELETE_scope_id}\"', '\"${delete_scope_id}\"']' \ + -s resources='['\"${course_resource_id}\"']' + +kcadm.sh create clients/${client_id}/authz/resource-server/permission/scope -r University \ + -s name="View Course Permission" -s logic="POSITIVE" -s decisionStrategy="AFFIRMATIVE" \ + -s policies='['\"${allow_teacher_policy_id}\"', '\"${allow_student_policy_id}\"']' \ + -s scopes='['\"${GET_scope_id}\"', '\"${view_scope_id}\"']' \ + -s resources='['\"${course_resource_id}\"']' diff --git a/ci/pod/keycloak/server.crt.pem b/ci/pod/keycloak/server.crt.pem new file mode 100644 index 000000000000..9c7bde3bb33a --- /dev/null +++ b/ci/pod/keycloak/server.crt.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIUbZfnhty/ZiHPz5Aq8kK5Kr8kcSQwDQYJKoZIhvcNAQEL +BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA0MTgxMTQzNDJaFw0zMzA0 +MTUxMTQzNDJaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw +HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQC/F4wK7eMTVAKGDMLCXE+Y6REdA5GU6/AakJf3NEKQ +wCrtrqO+VBPIz445+edf3EEXhjFFGPdU6p0EkF0SMLaMsVBQQJ2qcP6FloIYiyT3 +WCs/gbtdoWq53ucAfWueIyHWsovLc0VhOXm0rhTYg88nMjJ7y6vYkfLMT6qlwASn +9Tozgjat09fWATbN7yBi4ivVVsKDo2S3jkOyVnYYMjzZO3CSkyUSMl+ZsSesseSK +A9c2zogfKIU833njraA8blMFfdinEMI/9yceEx57IUjnpY1iWHLSItiZF+LKEpeL +vp9gpr88ghR85ISusqAqwcmnsdAqjjw7gbPm1DIvUgVBAgMBAAGjUzBRMB0GA1Ud +DgQWBBRvlz5ZiE2fD9ikPRqpYwsVrxZfxTAfBgNVHSMEGDAWgBRvlz5ZiE2fD9ik +PRqpYwsVrxZfxTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCX +5fOeFnX67eHI5dJB8p3U2GS21qykDVLV5ZV+JZfZwXJEygIvr/T9vs772EPxv+0/ +TO0+pGdcVswXq/6BoUFCV0rWWTDP5wTS3sV1ZsSSHil5zEutXuAI1LQGlit6w5xn +iDURFZw3ZmOFytXKXNbca1ma4yaCZtOwVe3O36GZeOiZFzBYE2DELqy77Nz1E5+3 +jZaDnx0vonV8/hhX6FAPRPQnIXkaEH3BnVQZGD1jxipbFQQtmeeNPELy18MQo30N +W1wOsbMMouniKUjdT16tdtzJzC+l9pVqRC+8df5PJfN56Uv9Ed6pjytkSF1SvHyJ +iTWmyxJL9AonUkc5Oiri +-----END CERTIFICATE----- diff --git a/ci/pod/keycloak/server.key.pem b/ci/pod/keycloak/server.key.pem new file mode 100755 index 000000000000..f2bc2d942856 --- /dev/null +++ b/ci/pod/keycloak/server.key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC/F4wK7eMTVAKG +DMLCXE+Y6REdA5GU6/AakJf3NEKQwCrtrqO+VBPIz445+edf3EEXhjFFGPdU6p0E +kF0SMLaMsVBQQJ2qcP6FloIYiyT3WCs/gbtdoWq53ucAfWueIyHWsovLc0VhOXm0 +rhTYg88nMjJ7y6vYkfLMT6qlwASn9Tozgjat09fWATbN7yBi4ivVVsKDo2S3jkOy +VnYYMjzZO3CSkyUSMl+ZsSesseSKA9c2zogfKIU833njraA8blMFfdinEMI/9yce +Ex57IUjnpY1iWHLSItiZF+LKEpeLvp9gpr88ghR85ISusqAqwcmnsdAqjjw7gbPm +1DIvUgVBAgMBAAECggEBAKUrrkGYI2mGePPzPbiP38E02zTv67sEQLJFfwUOp+bE +I5b0F9agh8VQGghkyKgkEiNKO3YVQVuluvjB66CYeIGdleT4JQ+4wVcoo+ShCN++ +1wr6kMA6kKx+Tb8vqYCzr0ELbSf6x+Jksp0Ixz3qmHixu88jWbNFW89boQ3JrnyZ +TUgRSRdPoXcxspwcbhy6mMhwUfUSy8Zcck81dBEAjokvzbYh4jtFYMipWqro66KJ +B9uqQme2J/rN/2PSrA6chI85Wa+JaGOSPDaGNp+DrADjoVZf1tXgzGCsA/lmVtQ0 +8YN4Dh21EjLxz4Dj5GE7RWET4Ejvv1XEih1p+zKne00CgYEA327raCD5Fnr1nGTb +Q4ZWkcDR6EGSD6JGD0ur+UqqJhirM/5b4iGcsVK5uufb5dwk9+9z0EucXOVq/il0 +vgG2FbgRYM8kx3CDLvMYAqKJ8e5NsGJWwJVq6DsmsO1SaEId+SVFH83RHfG5/ksq +/DgRg0Wl9FoL7sHchuSIP2QiLrMCgYEA2vHcKsMZk/KGMBHVffY3PUckirIM6vLa +idMmm0T0HSAdviZRxQGyOnjd93ZhMqFJPTrmHOq0uAxfdFt+oRoHk/pGarBCv76L +NnPrSnVe1pJOh7Mm7LHLgrAgeM2WW7xz6jZwc8On+9qHK97I/wAnJB8J7DvQJ2hR +sWCDSbfKtjsCgYEAnVE77tVIjMuGo9dfiuvLiFR7d0yzys43Bg4ByEUKCEjWQoWV +rGJ+MVxN6YvXCME4RloS8VZLgh0GeG44BJCv5Br2IXO4MbTGqQgAn9pRxkZD7S1Q +Z8jMvTboxypSG5ZyBDp5sSr5Ulwg2SuT2IKh0gv4DVRZkoJtA41lYTzf1IECgYBd +3NJGgt20T4S3lu2v0p5b5uQDkdF36CVIcP1cE3OUCPC3VDY5/0ApUSfXryh8TCjZ +1yZPv086mBNUDuV6q24UQndtxaLYERgdgBSfFzJRSuffxS4qyw40OM2y/HA5Y9FN +14jeGEMr9cN9S0VgDPC6y5O1cu8J9e8P3BBsyh5dgQKBgHMlIhOJDO/neVnax79X +d3+5GaiggUnkd27OkYC4LhXEc/QWeHE0ByA0bDhhnsE7IVK2CVC18axOLmEJVy2g +F6ZtxcpNrlVtF4YaOiRVUcDNnz9gX48efrpdoX2iBSFEd1NRDo/bjkVXI1L08LNf +BbMB104PadChoGpl5R3NQQsP +-----END PRIVATE KEY----- diff --git a/ci/pod/opa/example.rego b/ci/pod/opa/example.rego index 2eb912e08c52..a9161042b887 100644 --- a/ci/pod/opa/example.rego +++ b/ci/pod/opa/example.rego @@ -29,6 +29,11 @@ allow { request.query["user"] } +allow { + request.method == "GET" + startswith(request.path, "/echo") +} + reason = users[request.query["user"]].reason { not allow request.query["user"] @@ -39,6 +44,11 @@ headers = users[request.query["user"]].headers { request.query["user"] } +headers = {"user": request.query["user"]} { + allow + request.query["user"] +} + status_code = users[request.query["user"]].status_code { not allow request.query["user"] diff --git a/ci/pod/openfunction/build-function-image.sh b/ci/pod/openfunction/build-function-image.sh index fc4a4945ed3e..dc9f34a8b15b 100755 --- a/ci/pod/openfunction/build-function-image.sh +++ b/ci/pod/openfunction/build-function-image.sh @@ -23,6 +23,6 @@ if [ ! -f "./pack" ]; then fi # please update function-example/*/hello.go if you want to update function -./pack build test-uri-image --path ./ci/pod/openfunction/function-example/test-uri --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn" -./pack build test-body-image --path ./ci/pod/openfunction/function-example/test-body --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn" -./pack build test-header-image --path ./ci/pod/openfunction/function-example/test-header --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn" +./pack build test-uri-image --path ./ci/pod/openfunction/function-example/test-uri --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://proxy.golang.org" +./pack build test-body-image --path ./ci/pod/openfunction/function-example/test-body --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://proxy.golang.org" +./pack build test-header-image --path ./ci/pod/openfunction/function-example/test-header --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://proxy.golang.org" diff --git a/ci/pod/openfunction/function-example/test-body/go.mod b/ci/pod/openfunction/function-example/test-body/go.mod index 9b9c54475baa..3e2f6155748e 100644 --- a/ci/pod/openfunction/function-example/test-body/go.mod +++ b/ci/pod/openfunction/function-example/test-body/go.mod @@ -20,12 +20,12 @@ require ( go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.1 // indirect golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect - golang.org/x/sys v0.1.0 // indirect + golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect google.golang.org/grpc v1.40.0 // indirect google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.0 // indirect k8s.io/klog/v2 v2.30.0 // indirect skywalking.apache.org/repo/goapi v0.0.0-20220401015832-2c9eee9481eb // indirect ) diff --git a/ci/pod/openfunction/function-example/test-body/go.sum b/ci/pod/openfunction/function-example/test-body/go.sum index 149c3a4469d0..35f77fd70a02 100644 --- a/ci/pod/openfunction/function-example/test-body/go.sum +++ b/ci/pod/openfunction/function-example/test-body/go.sum @@ -1398,9 +1398,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1696,8 +1695,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= diff --git a/ci/pod/openfunction/function-example/test-uri/go.mod b/ci/pod/openfunction/function-example/test-uri/go.mod index 7d7fb3203cc5..94075e599c8f 100644 --- a/ci/pod/openfunction/function-example/test-uri/go.mod +++ b/ci/pod/openfunction/function-example/test-uri/go.mod @@ -20,9 +20,9 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect + golang.org/x/net v0.0.0-20220621193019-9d032be2e588 // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20220622171453-ea41d75dfa0f // indirect google.golang.org/grpc v1.47.0 // indirect google.golang.org/protobuf v1.28.0 // indirect diff --git a/ci/pod/openfunction/function-example/test-uri/go.sum b/ci/pod/openfunction/function-example/test-uri/go.sum index 28f9e57c8fbe..d7837ccf355c 100644 --- a/ci/pod/openfunction/function-example/test-uri/go.sum +++ b/ci/pod/openfunction/function-example/test-uri/go.sum @@ -1656,7 +1656,6 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= @@ -1856,7 +1855,6 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1941,10 +1939,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220621193019-9d032be2e588 h1:9ubFuySsnAJYGyJrZ3koiEv8FyqofCBdz3G9Mbf2YFc= golang.org/x/net v0.0.0-20220621193019-9d032be2e588/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1977,7 +1973,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180828065106-d99a578cf41b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2129,16 +2124,13 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2148,9 +2140,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2247,7 +2238,6 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/ci/linux_openresty_1_19_runner.sh b/ci/pod/otelcol-contrib/config.yaml old mode 100755 new mode 100644 similarity index 79% rename from ci/linux_openresty_1_19_runner.sh rename to ci/pod/otelcol-contrib/config.yaml index ed1751308926..438f04c8b9fe --- a/ci/linux_openresty_1_19_runner.sh +++ b/ci/pod/otelcol-contrib/config.yaml @@ -1,4 +1,3 @@ -#!/usr/bin/env bash # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with @@ -16,6 +15,16 @@ # limitations under the License. # - -export OPENRESTY_VERSION=1.19.3.2 -. ./ci/linux_openresty_common_runner.sh +receivers: + otlp: + protocols: + grpc: + http: +exporters: + file: + path: /etc/otelcol-contrib/data-otlp.json +service: + pipelines: + traces: + receivers: [otlp] + exporters: [file] diff --git a/ci/pod/vector/vector.toml b/ci/pod/vector/vector.toml new file mode 100644 index 000000000000..26716bc3893b --- /dev/null +++ b/ci/pod/vector/vector.toml @@ -0,0 +1,111 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[sources.log-from-tcp] +type = "socket" +address = "0.0.0.0:3000" +host_key = "host" +mode = "tcp" +port_key = "port" +shutdown_timeout_secs = 30 +socket_file_mode = 511 + +[sources.log-from-http] +type = "http_server" +address = "0.0.0.0:3001" + +[sources.log-from-udp] +type = "socket" +address = "0.0.0.0:8127" +host_key = "host" +mode = "udp" +port_key = "port" + +[sources.log-from-tls] +type = "socket" +address = "0.0.0.0:43000" +host_key = "host" +mode = "tcp" +port_key = "port" +tls.enabled = true +tls.verify = true +tls.ca_file = "/certs/vector_logs_ca.crt" +tls.crt_file = "/certs/vector_logs_server.crt" +tls.key_file = "/certs/vector_logs_server.key" + +[sources.log-from-syslog-tcp] +type = "syslog" +address = "0.0.0.0:5140" +mode = "tcp" + +[sources.log-from-syslog-udp] +type = "syslog" +address = "0.0.0.0:5150" +mode = "udp" + +[sources.log-from-splunk] +type = "splunk_hec" +address = "0.0.0.0:18088" +valid_tokens = [ + "BD274822-96AA-4DA6-90EC-18940FB2414C" +] + +[sinks.log-2-console] +inputs = [ "log-from-tcp", "log-from-tls", "log-from-syslog-tcp", "log-from-syslog-udp", "log-from-udp", "log-from-splunk", "log-from-http"] +type = "console" +encoding.codec = "json" + +[sinks.log-2-tcp-file] +inputs = [ "log-from-tcp" ] +type = "file" +encoding.codec = "text" +path = "/etc/vector/tcp.log" + +[sinks.log-2-http-file] +inputs = [ "log-from-http" ] +type = "file" +encoding.codec = "text" +path = "/etc/vector/http.log" + +[sinks.log-2-udp-file] +inputs = [ "log-from-udp" ] +type = "file" +encoding.codec = "json" +path = "/etc/vector/udp.log" + +[sinks.tls-log-2-file] +inputs = [ "log-from-tls" ] +type = "file" +encoding.codec = "json" +path = "/etc/vector/tls-datas.log" + +[sinks.log-2-syslog-tcp-file] +inputs = [ "log-from-syslog-tcp" ] +type = "file" +encoding.codec = "text" +path = "/etc/vector/syslog-tcp.log" + +[sinks.log-2-splunk-file] +inputs = [ "log-from-splunk" ] +type = "file" +encoding.codec = "json" +path = "/etc/vector/splunk.log" + +[sinks.log-2-syslog-udp-file] +inputs = [ "log-from-syslog-udp" ] +type = "file" +encoding.codec = "text" +path = "/etc/vector/syslog-udp.log" diff --git a/ci/redhat-ci.sh b/ci/redhat-ci.sh new file mode 100755 index 000000000000..18fed3208290 --- /dev/null +++ b/ci/redhat-ci.sh @@ -0,0 +1,104 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./ci/common.sh +install_dependencies() { + export_or_prefix + + # install build & runtime deps + yum install -y --disablerepo=* --enablerepo=ubi-8-appstream-rpms --enablerepo=ubi-8-baseos-rpms \ + wget tar gcc automake autoconf libtool make unzip git sudo openldap-devel hostname \ + which ca-certificates openssl-devel + + # install newer curl + yum makecache + yum install -y libnghttp2-devel + install_curl + + # install openresty to make apisix's rpm test work + yum install -y yum-utils && yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo + yum install -y openresty-1.21.4.2 openresty-debug-1.21.4.2 openresty-openssl111-debug-devel pcre pcre-devel xz + + # install luarocks + ./utils/linux-install-luarocks.sh + + # install etcdctl + ./ci/linux-install-etcd-client.sh + + # install vault cli capabilities + install_vault_cli + + # install test::nginx + yum install -y --disablerepo=* --enablerepo=ubi-8-appstream-rpms --enablerepo=ubi-8-baseos-rpms cpanminus perl + cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1) + + # add go1.15 binary to the path + mkdir build-cache + pushd build-cache/ + # Go is required inside the container. + wget -q https://golang.org/dl/go1.17.linux-amd64.tar.gz && tar -xf go1.17.linux-amd64.tar.gz + export PATH=$PATH:$(pwd)/go/bin + popd + # install and start grpc_server_example + pushd t/grpc_server_example + + CGO_ENABLED=0 go build + ./grpc_server_example \ + -grpc-address :50051 -grpcs-address :50052 -grpcs-mtls-address :50053 -grpc-http-address :50054 \ + -crt ../certs/apisix.crt -key ../certs/apisix.key -ca ../certs/mtls_ca.crt \ + > grpc_server_example.log 2>&1 || (cat grpc_server_example.log && exit 1)& + + popd + # wait for grpc_server_example to fully start + sleep 3 + + # installing grpcurl + install_grpcurl + + # install nodejs + install_nodejs + + # grpc-web server && client + pushd t/plugin/grpc-web + ./setup.sh + # back to home directory + popd + + # install dependencies + git clone https://github.com/openresty/test-nginx.git test-nginx + create_lua_deps +} + +run_case() { + export_or_prefix + make init + set_coredns + # run test cases + FLUSH_ETCD=1 prove --timer -Itest-nginx/lib -I./ -r ${TEST_FILE_SUB_DIR} | tee /tmp/test.result + rerun_flaky_tests /tmp/test.result +} + +case_opt=$1 +case $case_opt in + (install_dependencies) + install_dependencies + ;; + (run_case) + run_case + ;; +esac diff --git a/conf/config-default.yaml b/conf/config-default.yaml index 2765afe9c43f..05dc50ad72d1 100755 --- a/conf/config-default.yaml +++ b/conf/config-default.yaml @@ -14,53 +14,48 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# PLEASE DO NOT UPDATE THIS FILE! -# If you want to set the specified configuration value, you can set the new -# value in the conf/config.yaml file. +# CAUTION: DO NOT MODIFY DEFAULT CONFIGURATIONS IN THIS FILE. +# Keep the custom configurations in conf/config.yaml. # apisix: - # node_listen: 9080 # APISIX listening port - node_listen: # This style support multiple ports + # node_listen: 9080 # APISIX listening port. + node_listen: # APISIX listening ports. - 9080 # - port: 9081 - # enable_http2: true # If not set, the default value is `false`. - # - ip: 127.0.0.2 # Specific IP, If not set, the default value is `0.0.0.0`. + # enable_http2: true # If not set, default to `false`. + # - ip: 127.0.0.2 # If not set, default to `0.0.0.0` # port: 9082 # enable_http2: true - enable_admin: true - enable_dev_mode: false # Sets nginx worker_processes to 1 if set to true - enable_reuseport: true # Enable nginx SO_REUSEPORT switch if set to true. - show_upstream_status_in_response_header: false # when true all upstream status write to `X-APISIX-Upstream-Status` otherwise only 5xx code + enable_admin: true # Admin API + enable_dev_mode: false # If true, set nginx `worker_processes` to 1. + enable_reuseport: true # If true, enable nginx SO_REUSEPORT option. + show_upstream_status_in_response_header: false # If true, include the upstream HTTP status code in + # the response header `X-APISIX-Upstream-Status`. + # If false, show `X-APISIX-Upstream-Status` only if + # the upstream response code is 5xx. enable_ipv6: true - #proxy_protocol: # Proxy Protocol configuration - # listen_http_port: 9181 # The port with proxy protocol for http, it differs from node_listen and admin_listen. - # This port can only receive http request with proxy protocol, but node_listen & admin_listen - # can only receive http request. If you enable proxy protocol, you must use this port to - # receive http request with proxy protocol - # listen_https_port: 9182 # The port with proxy protocol for https - # enable_tcp_pp: true # Enable the proxy protocol for tcp proxy, it works for stream_proxy.tcp option - # enable_tcp_pp_to_upstream: true # Enables the proxy protocol to the upstream server - enable_server_tokens: true # Whether the APISIX version number should be shown in Server header. - # It's enabled by default. - - # configurations to load third party code and/or override the builtin one. - extra_lua_path: "" # extend lua_package_path to load third party code - extra_lua_cpath: "" # extend lua_package_cpath to load third party code - #lua_module_hook: "my_project.my_hook" # the hook module which will be used to inject third party code into APISIX - - proxy_cache: # Proxy Caching configuration - cache_ttl: 10s # The default caching time in disk if the upstream does not specify the cache time - zones: # The parameters of a cache - - name: disk_cache_one # The name of the cache, administrator can specify - # which cache to use by name in the admin api (disk|memory) - memory_size: 50m # The size of shared memory, it's used to store the cache index for - # disk strategy, store cache content for memory strategy (disk|memory) - disk_size: 1G # The size of disk, it's used to store the cache data (disk) - disk_path: /tmp/disk_cache_one # The path to store the cache data (disk) - cache_levels: 1:2 # The hierarchy levels of a cache (disk) - #- name: disk_cache_two + # proxy_protocol: # PROXY Protocol configuration + # listen_http_port: 9181 # APISIX listening port for HTTP traffic with PROXY protocol. + # listen_https_port: 9182 # APISIX listening port for HTTPS traffic with PROXY protocol. + # enable_tcp_pp: true # Enable the PROXY protocol when stream_proxy.tcp is set. + # enable_tcp_pp_to_upstream: true # Enable the PROXY protocol. + + enable_server_tokens: true # If true, show APISIX version in the `Server` response header. + extra_lua_path: "" # Extend lua_package_path to load third-party code. + extra_lua_cpath: "" # Extend lua_package_cpath to load third-party code. + # lua_module_hook: "my_project.my_hook" # Hook module used to inject third-party code into APISIX. + + proxy_cache: # Proxy Caching configuration + cache_ttl: 10s # The default caching time on disk if the upstream does not specify a caching time. + zones: + - name: disk_cache_one # Name of the cache. + memory_size: 50m # Size of the memory to store the cache index. + disk_size: 1G # Size of the disk to store the cache data. + disk_path: /tmp/disk_cache_one # Path to the cache file for disk cache. + cache_levels: 1:2 # Cache hierarchy levels of disk cache. + # - name: disk_cache_two # memory_size: 50m # disk_size: 1G # disk_path: "/tmp/disk_cache_two" @@ -68,108 +63,131 @@ apisix: - name: memory_cache memory_size: 50m - delete_uri_tail_slash: false # delete the '/' at the end of the URI - # The URI normalization in servlet is a little different from the RFC's. - # See https://github.com/jakartaee/servlet/blob/master/spec/src/main/asciidoc/servlet-spec-body.adoc#352-uri-path-canonicalization, - # which is used under Tomcat. - # Turn this option on if you want to be compatible with servlet when matching URI path. - normalize_uri_like_servlet: false + delete_uri_tail_slash: false # Delete the '/' at the end of the URI + normalize_uri_like_servlet: false # If true, use the same path normalization rules as the Java + # servlet specification. See https://github.com/jakartaee/servlet/blob/master/spec/src/main/asciidoc/servlet-spec-body.adoc#352-uri-path-canonicalization, which is used in Tomcat. + router: - http: radixtree_host_uri # radixtree_uri: match route by uri(base on radixtree) - # radixtree_host_uri: match route by host + uri(base on radixtree) - # radixtree_uri_with_parameter: like radixtree_uri but match uri with parameters, - # see https://github.com/api7/lua-resty-radixtree/#parameters-in-path for - # more details. - ssl: radixtree_sni # radixtree_sni: match route by SNI(base on radixtree) - #stream_proxy: # TCP/UDP proxy - # only: true # use stream proxy only, don't enable HTTP stuff - # tcp: # TCP proxy port list - # - addr: 9100 - # tls: true - # - addr: "127.0.0.1:9101" - # udp: # UDP proxy port list - # - 9200 - # - "127.0.0.1:9201" - #dns_resolver: # If not set, read from `/etc/resolv.conf` - # - 1.1.1.1 - # - 8.8.8.8 - #dns_resolver_valid: 30 # if given, override the TTL of the valid records. The unit is second. - resolver_timeout: 5 # resolver timeout - enable_resolv_search_opt: true # enable search option in resolv.conf + http: radixtree_host_uri # radixtree_host_uri: match route by host and URI + # radixtree_uri: match route by URI + # radixtree_uri_with_parameter: similar to radixtree_uri but match URI with parameters. See https://github.com/api7/lua-resty-radixtree/#parameters-in-path for more details. + ssl: radixtree_sni # radixtree_sni: match route by SNI + + # http is the default proxy mode. proxy_mode can be one of `http`, `stream`, or `http&stream` + proxy_mode: http + # stream_proxy: # TCP/UDP L4 proxy + # only: true # Enable L4 proxy only without L7 proxy. + # tcp: + # - addr: 9100 # Set the TCP proxy listening ports. + # tls: true + # - addr: "127.0.0.1:9101" + # udp: # Set the UDP proxy listening ports. + # - 9200 + # - "127.0.0.1:9201" + + # dns_resolver: # If not set, read from `/etc/resolv.conf` + # - 1.1.1.1 + # - 8.8.8.8 + # dns_resolver_valid: 30 # Override the default TTL of the DNS records. + resolver_timeout: 5 # Set the time in seconds that the server will wait for a response from the + # DNS resolver before timing out. + enable_resolv_search_opt: true # If true, use search option in the resolv.conf file in DNS lookups. + ssl: enable: true - listen: # APISIX listening port in https. + listen: # APISIX listening port for HTTPS traffic. - port: 9443 enable_http2: true - # - ip: 127.0.0.3 # Specific IP, If not set, the default value is `0.0.0.0`. - # port: 9445 - # enable_http2: true - #ssl_trusted_certificate: /path/to/ca-cert # Specifies a file path with trusted CA certificates in the PEM format - # used to verify the certificate when APISIX needs to do SSL/TLS handshaking - # with external services (e.g. etcd) - ssl_protocols: TLSv1.2 TLSv1.3 + # - ip: 127.0.0.3 # If not set, default to `0.0.0.0`. + # port: 9445 + # enable_http2: true + # ssl_trusted_certificate: /path/to/ca-cert # Set the path to CA certificates used to verify client + # certificates in the PEM format. + ssl_protocols: TLSv1.2 TLSv1.3 # TLS versions supported. ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 - ssl_session_tickets: false # disable ssl_session_tickets by default for 'ssl_session_tickets' would make Perfect Forward Secrecy useless. - # ref: https://github.com/mozilla/server-side-tls/issues/135 - - key_encrypt_salt: # If not set, will save origin ssl key into etcd. - - edd1c9f0985e76a2 # If set this, the key_encrypt_salt should be an array whose elements are string, and the size is also 16, and it will encrypt ssl key with AES-128-CBC - # !!! So do not change it after saving your ssl, it can't decrypt the ssl keys have be saved if you change !! - # Only use the first key to encrypt, and decrypt in the order of the array. - - #fallback_sni: "my.default.domain" # If set this, when the client doesn't send SNI during handshake, the fallback SNI will be used instead - enable_control: true - #control: + ssl_session_tickets: false # If true, session tickets are used for SSL/TLS connections. + # Disabled by default because it renders Perfect Forward Secrecy (FPS) + # useless. See https://github.com/mozilla/server-side-tls/issues/135. + + key_encrypt_salt: # This field is only used to encrypt the private key of SSL. + - edd1c9f0985e76a2 # Set the encryption key for AES-128-CBC. It should be a + # hexadecimal string of length 16. + # If not set, APISIX saves the original data into etcd. + # CAUTION: If you would like to update the key, add the new key as the + # first item in the array and keep the older keys below the newly added + # key, so that data can be decrypted with the older keys and encrypted + # with the new key. Removing the old keys directly can render the data + # unrecoverable. + + # fallback_sni: "my.default.domain" # Fallback SNI to be used if the client does not send SNI during + # # the handshake. + + enable_control: true # Control API + # control: # ip: 127.0.0.1 # port: 9090 - disable_sync_configuration_during_start: false # safe exit. Remove this once the feature is stable - data_encryption: # add `encrypt_fields = { $field },` in plugin schema to enable encryption - enable: false # if not set, the default value is `false`. - keyring: - - qeddd145sfvddff3 # If not set, will save origin value into etcd. - # If set this, the keyring should be an array whose elements are string, and the size is also 16, and it will encrypt fields with AES-128-CBC - # !!! So do not change it after encryption, it can't decrypt the fields have be saved if you change !! - # Only use the first key to encrypt, and decrypt in the order of the array. - -nginx_config: # config for render the template to generate nginx.conf - #user: root # specifies the execution user of the worker process. - # the "user" directive makes sense only if the master process runs with super-user privileges. - # if you're not root user,the default is current user. - error_log: logs/error.log - error_log_level: warn # warn,error - worker_processes: auto # if you want use multiple cores in container, you can inject the number of cpu as environment variable "APISIX_WORKER_PROCESSES" - enable_cpu_affinity: false # disable CPU affinity by default, if APISIX is deployed on a physical machine, it can be enabled and work well. - worker_rlimit_nofile: 20480 # the number of files a worker process can open, should be larger than worker_connections - worker_shutdown_timeout: 240s # timeout for a graceful shutdown of worker processes - - max_pending_timers: 16384 # increase it if you see "too many pending timers" error - max_running_timers: 4096 # increase it if you see "lua_max_running_timers are not enough" error + + disable_sync_configuration_during_start: false # Safe exit. TO BE REMOVED. + + data_encryption: # Encrypt fields specified in `encrypt_fields` in plugin schema. + enable: false + keyring: # Set the encryption key for AES-128-CBC. It should be a + - qeddd145sfvddff3 # hexadecimal string of length 16. + # If not set, APISIX saves the original data into etcd. + # CAUTION: If you would like to update the key, add the new key as the + # first item in the array and keep the older keys below the newly added + # key, so that data can be decrypted with the older keys and encrypted + # with the new key. Removing the old keys directly can render the data + # unrecoverable. + +nginx_config: # Config for render the template to generate nginx.conf + # user: root # Set the execution user of the worker process. This is only + # effective if the master process runs with super-user privileges. + error_log: logs/error.log # Location of the error log. + error_log_level: warn # Logging level: info, debug, notice, warn, error, crit, alert, or emerg. + worker_processes: auto # Automatically determine the optimal number of worker processes based + # on the available system resources. + # If you want use multiple cores in container, you can inject the number of + # CPU cores as environment variable "APISIX_WORKER_PROCESSES". + enable_cpu_affinity: false # Disable CPU affinity by default as worker_cpu_affinity affects the + # behavior of APISIX in containers. For example, multiple instances could + # be bound to one CPU core, which is not desirable. + # If APISIX is deployed on a physical machine, CPU affinity can be enabled. + worker_rlimit_nofile: 20480 # The number of files a worker process can open. + # The value should be larger than worker_connections. + worker_shutdown_timeout: 240s # Timeout for a graceful shutdown of worker processes. + + max_pending_timers: 16384 # The maximum number of pending timers that can be active at any given time. + # Error "too many pending timers" indicates the threshold is reached. + max_running_timers: 4096 # The maximum number of running timers that can be active at any given time. + # Error "lua_max_running_timers are not enough" error indicates the + # threshold is reached. event: worker_connections: 10620 - #envs: # allow to get a list of environment variables + + # envs: # Get environment variables. # - TEST_ENV meta: - lua_shared_dict: + lua_shared_dict: # Nginx Lua shared memory zone. Size units are m or k. prometheus-metrics: 15m stream: - enable_access_log: false # enable access log or not, default false - access_log: logs/access_stream.log - access_log_format: "$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time" - # create your custom log format by visiting http://nginx.org/en/docs/varindex.html - access_log_format_escape: default # allows setting json or default characters escaping in variables - lua_shared_dict: + enable_access_log: false # Enable stream proxy access logging. + access_log: logs/access_stream.log # Location of the stream access log. + access_log_format: "$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time" # Customize log format: http://nginx.org/en/docs/varindex.html + access_log_format_escape: default # Escape default or json characters in variables. + lua_shared_dict: # Nginx Lua shared memory zone. Size units are m or k. etcd-cluster-health-check-stream: 10m lrucache-lock-stream: 10m plugin-limit-conn-stream: 10m worker-events-stream: 10m tars-stream: 1m - # As user can add arbitrary configurations in the snippet, - # it is user's responsibility to check the configurations - # don't conflict with APISIX. + # Add other custom Nginx configurations. + # Users are responsible for validating the custom configurations + # to ensure they are not in conflict with APISIX configurations. main_configuration_snippet: | # Add custom Nginx main configuration to nginx.conf. # The configuration should be well indented! @@ -193,41 +211,48 @@ nginx_config: # config for render the template to generate n # The configuration should be well indented! http: - enable_access_log: true # enable access log or not, default true - access_log: logs/access.log + enable_access_log: true # Enable HTTP proxy access logging. + access_log: logs/access.log # Location of the access log. + access_log_buffer: 16384 # buffer size of access log. access_log_format: "$remote_addr - $remote_user [$time_local] $http_host \"$request\" $status $body_bytes_sent $request_time \"$http_referer\" \"$http_user_agent\" $upstream_addr $upstream_status $upstream_response_time \"$upstream_scheme://$upstream_host$upstream_uri\"" - access_log_format_escape: default # allows setting json or default characters escaping in variables - keepalive_timeout: 60s # timeout during which a keep-alive client connection will stay open on the server side. - client_header_timeout: 60s # timeout for reading client request header, then 408 (Request Time-out) error is returned to the client - client_body_timeout: 60s # timeout for reading client request body, then 408 (Request Time-out) error is returned to the client - client_max_body_size: 0 # The maximum allowed size of the client request body. - # If exceeded, the 413 (Request Entity Too Large) error is returned to the client. - # Note that unlike Nginx, we don't limit the body size by default. - - send_timeout: 10s # timeout for transmitting a response to the client.then the connection is closed - underscores_in_headers: "on" # default enables the use of underscores in client request header fields - real_ip_header: X-Real-IP # http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header - real_ip_recursive: "off" # http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_recursive - real_ip_from: # http://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from + # Customize log format: http://nginx.org/en/docs/varindex.html + access_log_format_escape: default # Escape default or json characters in variables. + keepalive_timeout: 60s # Set the maximum time for which TCP connection keeps alive. + client_header_timeout: 60s # Set the maximum time waiting for client to send the entire HTTP + # request header before closing the connection. + client_body_timeout: 60s # Set the maximum time waiting for client to send the request body. + client_max_body_size: 0 # Set the maximum allowed size of the client request body. + # Default to 0, unlimited. + # Unlike Nginx, APISIX does not limit the body size by default. + # If exceeded, the 413 (Request Entity Too Large) error is returned. + send_timeout: 10s # Set the maximum time for transmitting a response to the client before closing. + underscores_in_headers: "on" # Allow HTTP request headers to contain underscores in their names. + real_ip_header: X-Real-IP # https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header + real_ip_recursive: "off" # http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_recursive + real_ip_from: # http://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from - 127.0.0.1 - "unix:" - #custom_lua_shared_dict: # add custom shared cache to nginx.conf - # ipc_shared_dict: 100m # custom shared cache, format: `cache-key: cache-size` - # Enables or disables passing of the server name through TLS Server Name Indication extension (SNI, RFC 6066) - # when establishing a connection with the proxied HTTPS server. - proxy_ssl_server_name: true + # custom_lua_shared_dict: # Custom Nginx Lua shared memory zone for nginx.conf. Size units are m or k. + # ipc_shared_dict: 100m # Custom shared cache, format: `cache-key: cache-size` + + proxy_ssl_server_name: true # Send the server name in the SNI extension when establishing an SSL/TLS + # connection with the upstream server, allowing the upstream server to + # select the appropriate SSL/TLS certificate and configuration based on + # the requested server name. + upstream: - keepalive: 320 # Sets the maximum number of idle keepalive connections to upstream servers that are preserved in the cache of each worker process. - # When this number is exceeded, the least recently used connections are closed. - keepalive_requests: 1000 # Sets the maximum number of requests that can be served through one keepalive connection. - # After the maximum number of requests is made, the connection is closed. - keepalive_timeout: 60s # Sets a timeout during which an idle keepalive connection to an upstream server will stay open. - charset: utf-8 # Adds the specified charset to the "Content-Type" response header field, see - # http://nginx.org/en/docs/http/ngx_http_charset_module.html#charset - variables_hash_max_size: 2048 # Sets the maximum size of the variables hash table. - - lua_shared_dict: + keepalive: 320 # Set the maximum time of keep-alive connections to the upstream servers. + # When the value is exceeded, the least recently used connection is closed. + keepalive_requests: 1000 # Set the maximum number of requests that can be served through one + # keep-alive connection. + # After the maximum number of requests is made, the connection is closed. + keepalive_timeout: 60s # Set the maximum time for which TCP connection keeps alive. + charset: utf-8 # Add the charset to the "Content-Type" response header field. + # See http://nginx.org/en/docs/http/ngx_http_charset_module.html#charset + variables_hash_max_size: 2048 # Set the maximum size of the variables hash table. + + lua_shared_dict: # Nginx Lua shared memory zone. Size units are m or k. internal-status: 10m plugin-limit-req: 10m plugin-limit-count: 10m @@ -251,93 +276,94 @@ nginx_config: # config for render the template to generate n tars: 1m cas-auth: 10m -#discovery: # service discovery center +# discovery: # Service Discovery # dns: # servers: -# - "127.0.0.1:8600" # use the real address of your dns server -# order: # order in which to try different dns record types when resolving -# - last # "last" will try the last previously successful type for a hostname. +# - "127.0.0.1:8600" # Replace with the address of your DNS server. +# order: # Resolve DNS records this order. +# - last # Try the latest successful type for a hostname. # - SRV # - A # - AAAA # - CNAME -# eureka: -# host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster. +# eureka: # Eureka +# host: # Eureka address(es) # - "http://127.0.0.1:8761" # prefix: /eureka/ -# fetch_interval: 30 # default 30s -# weight: 100 # default weight for node +# fetch_interval: 30 # Default 30s +# weight: 100 # Default weight for node # timeout: -# connect: 2000 # default 2000ms -# send: 2000 # default 2000ms -# read: 5000 # default 5000ms -# nacos: -# host: +# connect: 2000 # Default 2000ms +# send: 2000 # Default 2000ms +# read: 5000 # Default 5000ms +# nacos: # Nacos +# host: # Nacos address(es) # - "http://${username}:${password}@${host1}:${port1}" # prefix: "/nacos/v1/" -# fetch_interval: 30 # default 30 sec -# weight: 100 # default 100 +# fetch_interval: 30 # Default 30s +# `weight` is the `default_weight` that will be attached to each discovered node that +# doesn't have a weight explicitly provided in nacos results +# weight: 100 # Default 100. # timeout: -# connect: 2000 # default 2000 ms -# send: 2000 # default 2000 ms -# read: 5000 # default 5000 ms -# consul_kv: -# servers: +# connect: 2000 # Default 2000ms +# send: 2000 # Default 2000ms +# read: 5000 # Default 5000ms +# consul_kv: # Consul KV +# servers: # Consul KV address(es) # - "http://127.0.0.1:8500" # - "http://127.0.0.1:8600" # prefix: "upstreams" -# skip_keys: # if you need to skip special keys +# skip_keys: # Skip special keys # - "upstreams/unused_api/" # timeout: -# connect: 2000 # default 2000 ms -# read: 2000 # default 2000 ms -# wait: 60 # default 60 sec -# weight: 1 # default 1 -# fetch_interval: 3 # default 3 sec, only take effect for keepalive: false way -# keepalive: true # default true, use the long pull way to query consul servers -# default_server: # you can define default server when missing hit +# connect: 2000 # Default 2000ms +# read: 2000 # Default 2000ms +# wait: 60 # Default 60s +# weight: 1 # Default 1 +# fetch_interval: 3 # Default 3s. Effective only when keepalive is false. +# keepalive: true # Default to true. Use long pull to query Consul. +# default_server: # Define default server to route traffic to. # host: "127.0.0.1" # port: 20999 # metadata: -# fail_timeout: 1 # default 1 ms -# weight: 1 # default 1 -# max_fails: 1 # default 1 -# dump: # if you need, when registered nodes updated can dump into file -# path: "logs/consul_kv.dump" -# expire: 2592000 # unit sec, here is 30 day -# consul: -# servers: # make sure service name is unique in these consul servers -# - "http://127.0.0.1:8500" # `http://127.0.0.1:8500` and `http://127.0.0.1:8600` are different clusters +# fail_timeout: 1 # Default 1ms +# weight: 1 # Default 1 +# max_fails: 1 # Default 1 +# dump: # Dump the Consul key-value (KV) store to a file. +# path: "logs/consul_kv.dump" # Location of the dump file. +# expire: 2592000 # Specify the expiration time of the dump file in units of seconds. +# consul: # Consul +# servers: # Consul address(es) +# - "http://127.0.0.1:8500" # - "http://127.0.0.1:8600" -# skip_services: # if you need to skip special services -# - "service_a" # `consul` service is default skip service +# skip_services: # Skip services during service discovery. +# - "service_a" # timeout: -# connect: 2000 # default 2000 ms -# read: 2000 # default 2000 ms -# wait: 60 # default 60 sec -# weight: 1 # default 1 -# fetch_interval: 3 # default 3 sec, only take effect for keepalive: false way -# keepalive: true # default true, use the long pull way to query consul servers -# default_service: # you can define default server when missing hit +# connect: 2000 # Default 2000ms +# read: 2000 # Default 2000ms +# wait: 60 # Default 60s +# weight: 1 # Default 1 +# fetch_interval: 3 # Default 3s. Effective only when keepalive is false. +# keepalive: true # Default to true. Use long pull to query Consul. +# default_service: # Define the default service to route traffic to. # host: "127.0.0.1" # port: 20999 # metadata: -# fail_timeout: 1 # default 1 ms -# weight: 1 # default 1 -# max_fails: 1 # default 1 -# dump: # if you need, when registered nodes updated can dump into file -# path: "logs/consul.dump" -# expire: 2592000 # unit sec, here is 30 day -# load_on_init: true # default true, load the consul dump file on init -# kubernetes: +# fail_timeout: 1 # Default 1ms +# weight: 1 # Default 1 +# max_fails: 1 # Default 1 +# dump: # Dump the Consul key-value (KV) store to a file. +# path: "logs/consul_kv.dump" # Location of the dump file. +# expire: 2592000 # Specify the expiration time of the dump file in units of seconds. +# load_on_init: true # Default true, load the consul dump file on init +# kubernetes: # Kubernetes service discovery # ### kubernetes service discovery both support single-cluster and multi-cluster mode # ### applicable to the case where the service is distributed in a single or multiple kubernetes clusters. -# # ### single-cluster mode ### # service: -# schema: https #apiserver schema, options [http, https], default https -# host: ${KUBERNETES_SERVICE_HOST} #apiserver host, options [ipv4, ipv6, domain, environment variable], default ${KUBERNETES_SERVICE_HOST} -# port: ${KUBERNETES_SERVICE_PORT} #apiserver port, options [port number, environment variable], default ${KUBERNETES_SERVICE_PORT} +# schema: https # apiserver schema, options [http, https], default https +# host: ${KUBERNETES_SERVICE_HOST} # apiserver host, options [ipv4, ipv6, domain, environment variable], default ${KUBERNETES_SERVICE_HOST} +# port: ${KUBERNETES_SERVICE_PORT} # apiserver port, options [port number, environment variable], default ${KUBERNETES_SERVICE_PORT} # client: # # serviceaccount token or path of serviceaccount token_file # token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} @@ -366,13 +392,12 @@ nginx_config: # config for render the template to generate n # # reserved lua shared memory size,1m memory can store about 1000 pieces of endpoint # shared_size: 1m #default 1m # ### single-cluster mode ### -# # ### multi-cluster mode ### # - id: release # a custom name refer to the cluster, pattern ^[a-z0-9]{1,8} # service: -# schema: https #apiserver schema, options [http, https], default https -# host: ${KUBERNETES_SERVICE_HOST} #apiserver host, options [ipv4, ipv6, domain, environment variable] -# port: ${KUBERNETES_SERVICE_PORT} #apiserver port, options [port number, environment variable] +# schema: https # apiserver schema, options [http, https], default https +# host: ${KUBERNETES_SERVICE_HOST} # apiserver host, options [ipv4, ipv6, domain, environment variable] +# port: ${KUBERNETES_SERVICE_PORT} # apiserver port, options [port number, environment variable] # client: # # serviceaccount token or path of serviceaccount token_file # token_file: ${KUBERNETES_CLIENT_TOKEN_FILE} @@ -403,12 +428,12 @@ nginx_config: # config for render the template to generate n # ### multi-cluster mode ### graphql: - max_size: 1048576 # the maximum size limitation of graphql in bytes, default 1MiB + max_size: 1048576 # Set the maximum size limitation of graphql in bytes. Default to 1MiB. -#ext-plugin: - #cmd: ["ls", "-l"] +# ext-plugin: +# cmd: ["ls", "-l"] -plugins: # plugin list (sorted by priority) +plugins: # plugin list (sorted by priority) - real-ip # priority: 23000 - ai # priority: 22900 - client-control # priority: 22000 @@ -429,6 +454,7 @@ plugins: # plugin list (sorted by priority) - csrf # priority: 2980 - uri-blocker # priority: 2900 - request-validation # priority: 2800 + - chaitin-waf # priority: 2700 - openid-connect # priority: 2599 - cas-auth # priority: 2597 - authz-casbin # priority: 2560 @@ -444,9 +470,9 @@ plugins: # plugin list (sorted by priority) - opa # priority: 2001 - authz-keycloak # priority: 2000 #- error-log-logger # priority: 1091 + - proxy-cache # priority: 1085 - body-transformer # priority: 1080 - proxy-mirror # priority: 1010 - - proxy-cache # priority: 1009 - proxy-rewrite # priority: 1008 - workflow # priority: 1006 - api-breaker # priority: 1005 @@ -467,6 +493,7 @@ plugins: # plugin list (sorted by priority) - public-api # priority: 501 - prometheus # priority: 500 - datadog # priority: 495 + - loki-logger # priority: 414 - elasticsearch-logger # priority: 413 - echo # priority: 412 - loggly # priority: 411 @@ -496,7 +523,7 @@ plugins: # plugin list (sorted by priority) - ext-plugin-post-req # priority: -3000 - ext-plugin-post-resp # priority: -4000 -stream_plugins: # sorted by priority +stream_plugins: # stream plugin list (sorted by priority) - ip-restriction # priority: 3000 - limit-conn # priority: 1003 - mqtt-proxy # priority: 1000 @@ -504,141 +531,145 @@ stream_plugins: # sorted by priority - syslog # priority: 401 # <- recommend to use priority (0, 100) for your custom plugins -#wasm: - #plugins: - #- name: wasm_log - #priority: 7999 - #file: t/wasm/log/main.go.wasm - -#xrpc: - #protocols: - #- name: pingpong - -plugin_attr: - log-rotate: - interval: 3600 # rotate interval (unit: second) - max_kept: 168 # max number of log files will be kept - max_size: -1 # max size bytes of log files to be rotated, size check would be skipped with a value less than 0 - enable_compression: false # enable log file compression(gzip) or not, default false - skywalking: - service_name: APISIX - service_instance_name: APISIX Instance Name - endpoint_addr: http://127.0.0.1:12800 - opentelemetry: - trace_id_source: x-request-id + +# wasm: +# plugins: +# - name: wasm_log +# priority: 7999 +# file: t/wasm/log/main.go.wasm + +# xrpc: +# protocols: +# - name: pingpong +plugin_attr: # Plugin attributes + log-rotate: # Plugin: log-rotate + interval: 3600 # Set the log rotate interval in seconds. + max_kept: 168 # Set the maximum number of log files to keep. If exceeded, historic logs are deleted. + max_size: -1 # Set the maximum size of log files in bytes before a rotation. + # Skip size check if max_size is less than 0. + enable_compression: false # Enable log file compression (gzip). + skywalking: # Plugin: skywalking + service_name: APISIX # Set the service name for SkyWalking reporter. + service_instance_name: APISIX Instance Name # Set the service instance name for SkyWalking reporter. + endpoint_addr: http://127.0.0.1:12800 # Set the SkyWalking HTTP endpoint. + report_interval: 3 # Set the reporting interval in second. + opentelemetry: # Plugin: opentelemetry + trace_id_source: x-request-id # Specify the source of the trace ID for OpenTelemetry traces. resource: - service.name: APISIX + service.name: APISIX # Set the service name for OpenTelemetry traces. collector: - address: 127.0.0.1:4318 - request_timeout: 3 - request_headers: - Authorization: token + address: 127.0.0.1:4318 # Set the address of the OpenTelemetry collector to send traces to. + request_timeout: 3 # Set the timeout for requests to the OpenTelemetry collector in seconds. + request_headers: # Set the headers to include in requests to the OpenTelemetry collector. + Authorization: token # Set the authorization header to include an access token. batch_span_processor: - drop_on_queue_full: false - max_queue_size: 1024 - batch_timeout: 2 - inactive_timeout: 1 - max_export_batch_size: 16 - prometheus: - export_uri: /apisix/prometheus/metrics - metric_prefix: apisix_ - enable_export_server: true - export_addr: - ip: 127.0.0.1 - port: 9091 - #metrics: + drop_on_queue_full: false # Drop spans when the export queue is full. + max_queue_size: 1024 # Set the maximum size of the span export queue. + batch_timeout: 2 # Set the timeout for span batches to wait in the export queue before + # being sent. + inactive_timeout: 1 # Set the timeout for spans to wait in the export queue before being sent, + # if the queue is not full. + max_export_batch_size: 16 # Set the maximum number of spans to include in each batch sent to the + # OpenTelemetry collector. + prometheus: # Plugin: prometheus + export_uri: /apisix/prometheus/metrics # Set the URI for the Prometheus metrics endpoint. + metric_prefix: apisix_ # Set the prefix for Prometheus metrics generated by APISIX. + enable_export_server: true # Enable the Prometheus export server. + export_addr: # Set the address for the Prometheus export server. + ip: 127.0.0.1 # Set the IP. + port: 9091 # Set the port. + # metrics: # Create extra labels from nginx variables: https://nginx.org/en/docs/varindex.html # http_status: - # # extra labels from nginx variables # extra_labels: - # # the label name doesn't need to be the same as variable name - # # below labels are only examples, you could add any valid variables as you need # - upstream_addr: $upstream_addr - # - upstream_status: $upstream_status + # - status: $upstream_status # The label name does not need to be the same as the variable name. # http_latency: # extra_labels: # - upstream_addr: $upstream_addr # bandwidth: # extra_labels: # - upstream_addr: $upstream_addr - server-info: - report_ttl: 60 # live time for server info in etcd (unit: second) - dubbo-proxy: - upstream_multiplex_count: 32 - request-id: - snowflake: - enable: false - snowflake_epoc: 1609459200000 # the starting timestamp is expressed in milliseconds - data_machine_bits: 12 # data machine bit, maximum 31, because Lua cannot do bit operations greater than 31 - sequence_bits: 10 # each machine generates a maximum of (1 << sequence_bits) serial numbers per millisecond - data_machine_ttl: 30 # live time for data_machine in etcd (unit: second) - data_machine_interval: 10 # lease renewal interval in etcd (unit: second) - proxy-mirror: - timeout: # proxy timeout in mirrored sub-request + # default_buckets: + # - 10 + # - 50 + # - 100 + # - 200 + # - 500 + server-info: # Plugin: server-info + report_ttl: 60 # Set the TTL in seconds for server info in etcd. + # Maximum: 86400. Minimum: 3. + dubbo-proxy: # Plugin: dubbo-proxy + upstream_multiplex_count: 32 # Set the maximum number of connections that can be multiplexed over + # a single network connection between the Dubbo Proxy and the upstream + # Dubbo services. + proxy-mirror: # Plugin: proxy-mirror + timeout: # Set the timeout for mirrored requests. connect: 60s read: 60s send: 60s -# redirect: -# https_port: 8443 # the default port for use by HTTP redirects to HTTPS - inspect: - delay: 3 # in seconds - hooks_file: "/usr/local/apisix/plugin_inspect_hooks.lua" - -deployment: - role: traditional + # redirect: # Plugin: redirect + # https_port: 8443 # Set the default port used to redirect HTTP to HTTPS. + inspect: # Plugin: inspect + delay: 3 # Set the delay in seconds for the frequency of checking the hooks file. + hooks_file: "/usr/local/apisix/plugin_inspect_hooks.lua" # Set the path to the Lua file that defines + # hooks. Only administrators should have + # write access to this file for security. + +deployment: # Deployment configurations + role: traditional # Set deployment mode: traditional, control_plane, or data_plane. role_traditional: - config_provider: etcd - admin: - # Default token when use API to call for Admin API. - # *NOTE*: Highly recommended to modify this value to protect APISIX's Admin API. - # Disabling this configuration item means that the Admin API does not - # require any authentication. + config_provider: etcd # Set the configuration center. + + #role_data_plane: # Set data plane details if role is data_plane. + # config_provider: etcd # Set the configuration center: etcd, xds, or yaml. + + #role_control_plane: # Set control plane details if role is control_plane. + # config_provider: etcd # Set the configuration center. + + admin: # Admin API + admin_key_required: true # Enable Admin API authentication by default for security. admin_key: - - name: admin - key: edd1c9f034335f136f87ad84b625c8f1 - role: admin # admin: manage all configuration data - # viewer: only can view configuration data + name: admin # admin: write access to configurations. + key: edd1c9f034335f136f87ad84b625c8f1 # Set API key for the admin of Admin API. + role: admin - - name: viewer - key: 4054f7cf07e344346cd3f287985e76a2 + name: viewer # viewer: read-only to configurations. + key: 4054f7cf07e344346cd3f287985e76a2 # Set API key for the viewer of Admin API. role: viewer - enable_admin_cors: true # Admin API support CORS response headers. - allow_admin: # http://nginx.org/en/docs/http/ngx_http_access_module.html#allow - - 127.0.0.0/24 # If we don't set any IP list, then any IP access is allowed by default. - #- "::/64" - admin_listen: # use a separate port - ip: 0.0.0.0 # Specific IP, if not set, the default value is `0.0.0.0`. - port: 9180 # Specific port, which must be different from node_listen's port. - - #https_admin: true # enable HTTPS when use a separate port for Admin API. - # Admin API will use conf/apisix_admin_api.crt and conf/apisix_admin_api.key as certificate. + enable_admin_cors: true # Enable Admin API CORS response header `Access-Control-Allow-Origin`. + allow_admin: # Limit Admin API access by IP addresses. + - 127.0.0.0/24 # If not set, any IP address is allowed. + # - "::/64" + admin_listen: # Set the Admin API listening addresses. + ip: 0.0.0.0 # Set listening IP. + port: 9180 # Set listening port. Beware of port conflict with node_listen. - admin_api_mtls: # Depends on `admin_listen` and `https_admin`. - admin_ssl_cert: "" # Path of your self-signed server side cert. - admin_ssl_cert_key: "" # Path of your self-signed server side key. - admin_ssl_ca_cert: "" # Path of your self-signed ca cert.The CA is used to sign all admin api callers' certificates. + # https_admin: true # Enable SSL for Admin API on IP and port specified in admin_listen. + # Use admin_api_mtls.admin_ssl_cert and admin_api_mtls.admin_ssl_cert_key. + # admin_api_mtls: # Set this if `https_admin` is true. + # admin_ssl_cert: "" # Set path to SSL/TLS certificate. + # admin_ssl_cert_key: "" # Set path to SSL/TLS key. + # admin_ssl_ca_cert: "" # Set path to CA certificate used to sign client certificates. - admin_api_version: v3 # The version of admin api, latest version is v3. + admin_api_version: v3 # Set the version of Admin API (latest: v3). etcd: - host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster. - - "http://127.0.0.1:2379" # multiple etcd address, if your etcd cluster enables TLS, please use https scheme, - # e.g. https://127.0.0.1:2379. - prefix: /apisix # configuration prefix in etcd - use_grpc: false # enable the experimental configuration sync via gRPC - timeout: 30 # 30 seconds. Use a much higher timeout (like an hour) if the `use_grpc` is true. - #resync_delay: 5 # when sync failed and a rest is needed, resync after the configured seconds plus 50% random jitter - #health_check_timeout: 10 # etcd retry the unhealthy nodes after the configured seconds - startup_retry: 2 # the number of retry to etcd during the startup, default to 2 - #user: root # root username for etcd - #password: 5tHkHhYkjr6cQY # root password for etcd + host: # Set etcd address(es) in the same etcd cluster. + - "http://127.0.0.1:2379" # If TLS is enabled for etcd, use https://127.0.0.1:2379. + prefix: /apisix # Set etcd prefix. + timeout: 30 # Set timeout in seconds. + # resync_delay: 5 # Set resync time in seconds after a sync failure. + # The actual resync time would be resync_delay plus 50% random jitter. + # health_check_timeout: 10 # Set timeout in seconds for etcd health check. + # Default to 10 if not set or a negative value is provided. + startup_retry: 2 # Set the number of retries to etcd on startup. Default to 2. + # user: root # Set the root username for etcd. + # password: 5tHkHhYkjr6cQ # Set the root password for etcd. tls: - # To enable etcd client certificate you need to build APISIX-Base, see - # https://apisix.apache.org/docs/apisix/FAQ#how-do-i-build-the-apisix-base-environment - #cert: /path/to/cert # path of certificate used by the etcd client - #key: /path/to/key # path of key used by the etcd client - - verify: true # whether to verify the etcd endpoint certificate when setup a TLS connection to etcd, - # the default value is true, e.g. the certificate will be verified strictly. - #sni: # the SNI for etcd TLS requests. If missed, the host part of the URL will be used. + # cert: /path/to/cert # Set the path to certificate used by the etcd client + # key: /path/to/key # Set the path to path of key used by the etcd client + verify: true # Verify the etcd certificate when establishing a TLS connection with etcd. + # sni: # The SNI for etcd TLS requests. + # If not set, the host from the URL is used. diff --git a/docs/assets/images/health_check_status_page.png b/docs/assets/images/health_check_status_page.png new file mode 100644 index 000000000000..ed4aebead0fd Binary files /dev/null and b/docs/assets/images/health_check_status_page.png differ diff --git a/docs/assets/images/skip-mtls.png b/docs/assets/images/skip-mtls.png new file mode 100644 index 000000000000..a739e66dfe74 Binary files /dev/null and b/docs/assets/images/skip-mtls.png differ diff --git a/docs/assets/other/json/apisix-grafana-dashboard.json b/docs/assets/other/json/apisix-grafana-dashboard.json index 1ea90c10ca34..e4e5b94681c3 100644 --- a/docs/assets/other/json/apisix-grafana-dashboard.json +++ b/docs/assets/other/json/apisix-grafana-dashboard.json @@ -520,7 +520,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(apisix_bandwidth{instance=~\"$instance\"}[30s])) by (type)", + "expr": "sum(rate(apisix_bandwidth{instance=~\"$instance\"}[$__rate_interval])) by (type)", "legendFormat": "{{type}}", "refId": "A" } @@ -620,12 +620,12 @@ "steppedLine": false, "targets": [ { - "expr": "sum(irate(apisix_bandwidth{type=\"ingress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (service)", + "expr": "sum(irate(apisix_bandwidth{type=\"ingress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (service)", "legendFormat": "service:{{service}}", "refId": "A" }, { - "expr": "sum(irate(apisix_bandwidth{type=\"ingress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (route)", + "expr": "sum(irate(apisix_bandwidth{type=\"ingress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (route)", "legendFormat": "route:{{route}}", "refId": "B" } @@ -725,13 +725,13 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(apisix_bandwidth{type=\"egress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (service)", + "expr": "sum(rate(apisix_bandwidth{type=\"egress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (service)", "interval": "", "legendFormat": "service:{{service}}", "refId": "A" }, { - "expr": "sum(rate(apisix_bandwidth{type=\"egress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (route)", + "expr": "sum(rate(apisix_bandwidth{type=\"egress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (route)", "legendFormat": "route:{{route}}", "refId": "B" } @@ -853,7 +853,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (code)", + "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (code)", "instant": false, "interval": "", "intervalFactor": 1, @@ -965,7 +965,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (service)", + "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (service)", "instant": false, "interval": "", "intervalFactor": 1, @@ -973,7 +973,7 @@ "refId": "A" }, { - "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (route)", + "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (route)", "interval": "", "legendFormat": "route:{{route}}", "refId": "D" @@ -1083,20 +1083,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", "format": "time_series", "interval": "", "legendFormat": "P90", "refId": "A" }, { - "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))", + "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": "P95", "refId": "B" }, { - "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": "P99", "refId": "C" @@ -1206,7 +1206,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", "format": "time_series", "instant": false, "interval": "", @@ -1215,13 +1215,13 @@ "refId": "A" }, { - "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))", + "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": "P95", "refId": "B" }, { - "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": "P99", "refId": "C" @@ -1330,20 +1330,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))", + "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", "format": "time_series", "interval": "", "legendFormat": "P90", "refId": "A" }, { - "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))", + "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": "P95", "refId": "B" }, { - "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))", + "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))", "interval": "", "legendFormat": "P99", "refId": "C" diff --git a/docs/en/latest/FAQ.md b/docs/en/latest/FAQ.md index 02691f17bb3b..79e3f3d48a51 100644 --- a/docs/en/latest/FAQ.md +++ b/docs/en/latest/FAQ.md @@ -105,7 +105,7 @@ Mainland China users can use `luarocks.cn` as the LuaRocks server. You can use t make deps ENV_LUAROCKS_SERVER=https://luarocks.cn ``` -If this does not solve your problem, you can try getting a detailed log by using the `--verbose` flag to diagnose the problem. +If this does not solve your problem, you can try getting a detailed log by using the `--verbose` or `-v` flag to diagnose the problem. ## How do I build the APISIX-Base environment? @@ -732,6 +732,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ } } }' +``` ## How does APISIX configure ETCD with authentication? diff --git a/docs/en/latest/admin-api.md b/docs/en/latest/admin-api.md index 6e714cc9d8e2..6f96e18abd88 100644 --- a/docs/en/latest/admin-api.md +++ b/docs/en/latest/admin-api.md @@ -103,6 +103,46 @@ deployment: This will find the environment variable `ADMIN_KEY` first, and if it does not exist, it will use `edd1c9f034335f136f87ad84b625c8f1` as the default value. +You can also specify environment variables in yaml keys. This is specifically useful in the `standalone` [mode](./deployment-modes.md#standalone) where you can specify the upstream nodes as follows: + +```yaml title="./conf/apisix.yaml" +routes: + - + uri: "/test" + upstream: + nodes: + "${{HOST_IP}}:${{PORT}}": 1 + type: roundrobin +#END +``` + +### Force Delete + +By default, the Admin API checks for references between resources and will refuse to delete resources in use. + +You can make a force deletion by adding the request argument `force=true` to the delete request, for example: + +```bash +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" +}' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '{ + "uri": "/*", + "upstream_id": 1 +}' +{"value":{"priority":0,"upstream_id":1,"uri":"/*","create_time":1689038794,"id":"1","status":1,"update_time":1689038916},"key":"/apisix/routes/1"} + +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE +{"error_msg":"can not delete this upstream, route [1] is still using it now"} +$ curl "http://127.0.0.1:9180/apisix/admin/upstreams/1?force=anyvalue" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE +{"error_msg":"can not delete this upstream, route [1] is still using it now"} +$ curl "http://127.0.0.1:9180/apisix/admin/upstreams/1?force=true" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE +{"deleted":"1","key":"/apisix/upstreams/1"} +``` + ## V3 new feature The Admin API has made some breaking changes in V3 version, as well as supporting additional features. @@ -237,6 +277,10 @@ curl 'http://127.0.0.1:9180/apisix/admin/routes?name=test&uri=foo&label=' \ Route resource request address: /apisix/admin/routes/{id}?ttl=0 +### Quick Note on ID Syntax + +ID's as a text string must be of a length between 1 and 64 characters and they should only contain uppercase, lowercase, numbers and no special characters apart from dashes ( - ), periods ( . ) and underscores ( _ ). For integer values they simply must have a minimum character count of 1. + ### Request Methods | Method | Request URI | Request Body | Description | @@ -270,7 +314,7 @@ Route resource request address: /apisix/admin/routes/{id}?ttl=0 | methods | False | Match Rules | Matches with the specified methods. Matches all methods if empty or unspecified. | ["GET", "POST"] | | priority | False | Match Rules | If different Routes matches to the same `uri`, then the Route is matched based on its `priority`. A higher value corresponds to higher priority. It is set to `0` by default. | priority = 10 | | vars | False | Match Rules | Matches based on the specified variables consistent with variables in Nginx. Takes the form `[[var, operator, val], [var, operator, val], ...]]`. Note that this is case sensitive when matching a cookie name. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more details. | [["arg_name", "==", "json"], ["arg_age", ">", 18]] | -| filter_func | False | Match Rules | Matches based on a user-defined filtering function. Used in scenarios requiring complex matching. These functions can accept an input parameter `vars` which can be used to access the Nginx variables. | function(vars) return vars["arg_name"] == "json" end | +| filter_func | False | Match Rules | Matches using a user-defined function in Lua. Used in scenarios where `vars` is not sufficient. Functions accept an argument `vars` which provides access to built-in variables (including Nginx variables). | function(vars) return tonumber(vars.arg_userid) % 4 > 2; end | | plugins | False | Plugin | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | | | script | False | Script | Used for writing arbitrary Lua code or directly calling existing plugins to be executed. See [Script](terminology/script.md) for more. | | | upstream | False | Upstream | Configuration of the [Upstream](./terminology/upstream.md). | | @@ -831,6 +875,8 @@ An Upstream configuration can be directly bound to a Route or a Service, but the Upstream resource request address: /apisix/admin/upstreams/{id} +For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax) + ### Request Methods | Method | Request URI | Request Body | Description | @@ -854,11 +900,11 @@ In addition to the equalization algorithm selections, Upstream also supports pas | service_name | required, can't be used with `nodes` | Service name used for [service discovery](discovery.md). | `a-bootiful-client` | | discovery_type | required, if `service_name` is used | The type of service [discovery](discovery.md). | `eureka` | | hash_on | optional | Only valid if the `type` is `chash`. Supports Nginx variables (`vars`), custom headers (`header`), `cookie` and `consumer`. Defaults to `vars`. | | -| key | optional | Only valid if the `type` is `chash`. Finds the corresponding node `id` according to `hash_on` and `key` values. When `hash_on` is set to `vars`, `key` is a required parameter and it supports [Nginx variables](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is a required parameter, and `header name` can be customized. When `hash_on` is set to `cookie`, `key` is also a required parameter, and `cookie name` can be customized. When `hash_on` is set to `consumer`, `key` need not be set and the `key` used by the hash algorithm would be the authenticated `consumer_name`. If the specified `hash_on` and `key` fail to fetch the values, it will default to `remote_addr`. | `uri`, `server_name`, `server_addr`, `request_uri`, `remote_port`, `remote_addr`, `query_string`, `host`, `hostname`, `arg_***`, `arg_***` | +| key | optional | Only valid if the `type` is `chash`. Finds the corresponding node `id` according to `hash_on` and `key` values. When `hash_on` is set to `vars`, `key` is a required parameter and it supports [Nginx variables](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is a required parameter, and `header name` can be customized. When `hash_on` is set to `cookie`, `key` is also a required parameter, and `cookie name` can be customized. When `hash_on` is set to `consumer`, `key` need not be set and the `key` used by the hash algorithm would be the authenticated `consumer_name`. | `uri`, `server_name`, `server_addr`, `request_uri`, `remote_port`, `remote_addr`, `query_string`, `host`, `hostname`, `arg_***`, `arg_***` | | checks | optional | Configures the parameters for the [health check](./tutorials/health-check.md). | | | retries | optional | Sets the number of retries while passing the request to Upstream using the underlying Nginx mechanism. Set according to the number of available backend nodes by default. Setting this to `0` disables retry. | | | retry_timeout | optional | Timeout to continue with retries. Setting this to `0` disables the retry timeout. | | -| timeout | optional | Sets the timeout (in seconds) for connecting to, and sending and receiving messages to and from the Upstream. | | +| timeout | optional | Sets the timeout (in seconds) for connecting to, and sending and receiving messages to and from the Upstream. | `{"connect": 0.5,"send": 0.5,"read": 0.5}` | | name | optional | Identifier for the Upstream. | | | desc | optional | Description of usage scenarios. | | | pass_host | optional | Configures the `host` when the request is forwarded to the upstream. Can be one of `pass`, `node` or `rewrite`. Defaults to `pass` if not specified. `pass`- transparently passes the client's host to the Upstream. `node`- uses the host configured in the node of the Upstream. `rewrite`- Uses the value configured in `upstream_host`. | | @@ -889,7 +935,6 @@ The following should be considered when setting the `hash_on` value: - When set to `cookie`, a `key` is required. This key is equal to "cookie\_`key`". The cookie name is case-sensitive. - When set to `consumer`, the `key` is optional and the key is set to the `consumer_name` captured from the authentication Plugin. - When set to `vars_combinations`, the `key` is required. The value of the key can be a combination of any of the [Nginx variables](http://nginx.org/en/docs/varindex.html) like `$request_uri$remote_addr`. -- When no value is set for either `hash_on` or `key`, the key defaults to `remote_addr`. The features described below requires APISIX to be run on [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment): @@ -1140,6 +1185,8 @@ Currently, the response is returned from etcd. SSL resource request address: /apisix/admin/ssls/{id} +For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax) + ### Request Methods | Method | Request URI | Request Body | Description | @@ -1154,18 +1201,20 @@ SSL resource request address: /apisix/admin/ssls/{id} | Parameter | Required | Type | Description | Example | | ------------ | -------- | ------------------------ | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | -| cert | True | Certificate | HTTPS certificate. | | -| key | True | Private key | HTTPS private key. | | +| cert | True | Certificate | HTTPS certificate. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | | +| key | True | Private key | HTTPS private key. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | | | certs | False | An array of certificates | Used for configuring multiple certificates for the same domain excluding the one provided in the `cert` field. | | | keys | False | An array of private keys | Private keys to pair with the `certs`. | | | client.ca | False | Certificate | Sets the CA certificate that verifies the client. Requires OpenResty 1.19+. | | | client.depth | False | Certificate | Sets the verification depth in client certificate chains. Defaults to 1. Requires OpenResty 1.19+. | | -| snis | True | Match Rules | A non-empty array of HTTPS SNI | | +| client.skip_mtls_uri_regex | False | An array of regular expressions, in PCRE format | Used to match URI, if matched, this request bypasses the client certificate checking, i.e. skip the MTLS. | ["/hello[0-9]+", "/foobar"] | +| snis | True, only if `type` is `server` | Match Rules | A non-empty array of HTTPS SNI | | | labels | False | Match Rules | Attributes of the resource specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | | create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | | update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | | type | False | Auxiliary | Identifies the type of certificate, default `server`. | `client` Indicates that the certificate is a client certificate, which is used when APISIX accesses the upstream; `server` Indicates that the certificate is a server-side certificate, which is used by APISIX when verifying client requests. | | status | False | Auxiliary | Enables the current SSL. Set to `1` (enabled) by default. | `1` to enable, `0` to disable | +| ssl_protocols | False | An array of ssl protocols | It is used to control the SSL/TLS protocol version used between servers and clients. See [SSL Protocol](./ssl-protocol.md) for more examples. | `["TLSv1.2", "TLSv2.3"]` | Example Configuration: @@ -1312,11 +1361,31 @@ Plugin resource request address: /apisix/admin/plugins/{plugin_name} | ------ | ----------------------------------- | ------------ | ---------------------------------------------- | | GET | /apisix/admin/plugins/list | NULL | Fetches a list of all Plugins. | | GET | /apisix/admin/plugins/{plugin_name} | NULL | Fetches the specified Plugin by `plugin_name`. | +| GET | /apisix/admin/plugins?all=true | NULL | Get all properties of all plugins. | +| GET | /apisix/admin/plugins?all=true&subsystem=stream| NULL | Gets properties of all Stream plugins.| +| GET | /apisix/admin/plugins?all=true&subsystem=http | NULL | Gets properties of all HTTP plugins. | +| PUT | /apisix/admin/plugins/reload | NULL | Reloads the plugin according to the changes made in code | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=stream | NULL | Gets properties of a specified plugin if it is supported in Stream/L4 subsystem. | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=http | NULL | Gets properties of a specified plugin if it is supported in HTTP/L7 subsystem. | + +:::caution + +The interface of getting properties of all plugins via `/apisix/admin/plugins?all=true` will be deprecated soon. + +::: ### Request Body Parameters The Plugin ({plugin_name}) of the data structure. +### Request Arguments + +| Name | Description | Default | +| --------- | ----------------------------- | ------- | +| subsystem | The subsystem of the Plugins. | http | + +The plugin can be filtered on subsystem so that the ({plugin_name}) is searched in the subsystem passed through query params. + ### Example API usage: ```shell @@ -1329,7 +1398,7 @@ curl "http://127.0.0.1:9180/apisix/admin/plugins/list" \ ``` ```shell -curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' +curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth?subsystem=http" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' ``` ```json @@ -1338,26 +1407,10 @@ curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth" -H 'X-API-KEY: ed :::tip -You can use the `/apisix/admin/plugins?all=true` API to get all properties of all plugins. - -Each Plugin has the attributes `name`, `priority`, `type`, `schema`, `consumer_schema` and `version`. - -Defaults to only HTTP Plugins. If you need to get attributes from stream Plugins, use `/apisix/admin/plugins?all=true&subsystem=stream`. +You can use the `/apisix/admin/plugins?all=true` API to get all properties of all plugins. This API will be deprecated soon. ::: -### Request Methods - -| Method | Request URI | Request Body | Description | -| ------ | ------------------------------ | ------------ | ---------------------------------------- | -| GET | /apisix/admin/plugins?all=true | NULL | Fetches all attributes from all Plugins. | - -### Request Arguments - -| Name | Description | Default | -| --------- | ----------------------------- | ------- | -| subsystem | The subsystem of the Plugins. | http | - ## Stream Route Route used in the [Stream Proxy](./stream-proxy.md). @@ -1382,8 +1435,8 @@ Stream Route resource request address: /apisix/admin/stream_routes/{id} | ----------- | -------- | -------- | ------------------------------------------------------------------- | ----------------------------- | | upstream | False | Upstream | Configuration of the [Upstream](./terminology/upstream.md). | | | upstream_id | False | Upstream | Id of the [Upstream](terminology/upstream.md) service. | | -| remote_addr | False | IP/CIDR | Filters Upstream forwards by matching with client IP. | "127.0.0.1/32" or "127.0.0.1" | -| server_addr | False | IP/CIDR | Filters Upstream forwards by matching with APISIX Server IP. | "127.0.0.1/32" or "127.0.0.1" | +| remote_addr | False | IPv4, IPv4 CIDR, IPv6 | Filters Upstream forwards by matching with client IP. | "127.0.0.1" or "127.0.0.1/32" or "::1" | +| server_addr | False | IPv4, IPv4 CIDR, IPv6 | Filters Upstream forwards by matching with APISIX Server IP. | "127.0.0.1" or "127.0.0.1/32" or "::1" | | server_port | False | Integer | Filters Upstream forwards by matching with APISIX Server port. | 9090 | | sni | False | Host | Server Name Indication. | "test.com" | | protocol.name | False | String | Name of the protocol proxyed by xRPC framework. | "redis" | @@ -1452,3 +1505,80 @@ HTTP/1.1 200 OK ### Response Parameters Currently, the response is returned from etcd. + +## Proto + +Proto is used to store protocol buffers so that APISIX can communicate in gRPC. + +See [grpc-transcode plugin](./plugins/grpc-transcode.md#enabling-the-plugin) doc for more examples. + +### Proto API + +Proto resource request address: /apisix/admin/protos/{id} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | -------------------------------- | ------------ | ----------------------------------------------- | +| GET | /apisix/admin/protos | NULL | List all Protos. | +| GET | /apisix/admin/protos/{id} | NULL | Get a Proto by id. | +| PUT | /apisix/admin/protos/{id} | {...} | Create or update a Proto with the given id. | +| POST | /apisix/admin/protos | {...} | Create a Proto with a random id. | +| DELETE | /apisix/admin/protos/{id} | NULL | Delete Proto by id. | + +### Request Body Parameters + +| Parameter | Required | Type | Description | Example | +| ----------- | -------- | -------- | ------------------------------------------------------------------- | ----------------------------- | +| content | True | String | content of `.proto` or `.pb` files | See [here](./plugins/grpc-transcode.md#enabling-the-plugin) | +| create_time | False | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 | +| update_time | False | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 | + +## Schema validation + +Check the validity of a configuration against its entity schema. This allows you to test your input before submitting a request to the entity endpoints of the Admin API. + +Note that this only performs the schema validation checks, checking that the input configuration is well-formed. Requests to the entity endpoint using the given configuration may still fail due to other reasons, such as invalid foreign key relationships or uniqueness check failures against the contents of the data store. + +### Schema validation + +Schema validation request address: /apisix/admin/schema/validate/{resource} + +### Request Methods + +| Method | Request URI | Request Body | Description | +| ------ | -------------------------------- | ------------ | ----------------------------------------------- | +| POST | /apisix/admin/schema/validate/{resource} | {..resource conf..} | Validate the resource configuration against corresponding schema. | + +### Request Body Parameters + +* 200: validate ok. +* 400: validate failed, with error as response body in JSON format. + +Example: + +```bash +curl http://127.0.0.1:9180/apisix/admin/schema/validate/routes \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X POST -i -d '{ + "uri": 1980, + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } +}' +HTTP/1.1 400 Bad Request +Date: Mon, 21 Aug 2023 07:37:13 GMT +Content-Type: application/json +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.4.0 +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Access-Control-Expose-Headers: * +Access-Control-Max-Age: 3600 + +{"error_msg":"property \"uri\" validation failed: wrong type: expected string, got number"} +``` diff --git a/docs/en/latest/building-apisix.md b/docs/en/latest/building-apisix.md index a0a20b40ee98..31de4b081c27 100644 --- a/docs/en/latest/building-apisix.md +++ b/docs/en/latest/building-apisix.md @@ -31,42 +31,41 @@ description: Guide for building and running APISIX locally for development. import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -If you are looking to contribute to APISIX or setup a development environment, this guide is for you. +If you are looking to setup a development environment or contribute to APISIX, this guide is for you. -If you are looking to install and run APISIX, check out the [Installation](./installation-guide.md) docs. +If you are looking to quickly get started with APISIX, check out the other [installation methods](./installation-guide.md). :::note -If you want to build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools). +To build an APISIX docker image from source code, see [build image from source code](https://apisix.apache.org/docs/docker/build/#build-an-image-from-customizedpatched-source-code). + +To build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools) instead. ::: ## Building APISIX from source -To start, you have to install some dependencies. APISIX provides a handy script to get these installed: +Install dependencies using the script provided by APISIX: ```shell curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-dependencies.sh -sL | bash - ``` -Then, create a directory and set the environment variable `APISIX_VERSION`: +Save the APISIX version to an environment variable to be used next: ```shell -APISIX_VERSION='3.2.0' -mkdir apisix-${APISIX_VERSION} +APISIX_VERSION='3.5.0' ``` -You can now clone the APISIX source code from Github by running the command below: +Clone the APISIX source code of this version into a new directory `apisix-APISIX_VERSION`: ```shell git clone --depth 1 --branch ${APISIX_VERSION} https://github.com/apache/apisix.git apisix-${APISIX_VERSION} ``` -You can also download the source package from the [Downloads page](https://apisix.apache.org/downloads/). But the source package missing the test case. This may affect subsequent operations. - -And you will also find source packages for APISIX Dashboard and APISIX Ingress Controller from [Downloads page](https://apisix.apache.org/downloads/). +Alternatively, you can also download the source package from the [Downloads](https://apisix.apache.org/downloads/) page. Note that source packages here are not distributed with test cases. -Now, navigate to the directory, create dependencies, and install APISIX as shown below: +Next, navigate to the directory, install dependencies, and build APISIX. ```shell cd apisix-${APISIX_VERSION} @@ -74,7 +73,7 @@ make deps make install ``` -This will install the runtime dependent Lua libraries and the `apisix` command. +This will install the runtime-dependent Lua libraries and the `apisix` CLI tool. :::note @@ -275,4 +274,4 @@ To run a specific test case, use the command below: prove -Itest-nginx/lib -r t/plugin/openid-connect.t ``` -See [testing framework](https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md) for more details. +See [testing framework](./internal/testing-framework.md) for more details. diff --git a/docs/en/latest/certificate.md b/docs/en/latest/certificate.md index 4faf62ceb1ce..146a153fbe25 100644 --- a/docs/en/latest/certificate.md +++ b/docs/en/latest/certificate.md @@ -31,124 +31,121 @@ It is most common for an SSL certificate to contain only one domain. We can crea * `key`: PEM-encoded private key of the SSL key pair. * `snis`: Hostname(s) to associate with this certificate as SNIs. To set this attribute this certificate must have a valid private key associated with it. -We will use the Python script below to simplify the example: - -```python title="create-ssl.py" -#!/usr/bin/env python -# coding: utf-8 -import sys -# sudo pip install requests -import requests - -if len(sys.argv) <= 3: - print("bad argument") - sys.exit(1) -with open(sys.argv[1]) as f: - cert = f.read() -with open(sys.argv[2]) as f: - key = f.read() -sni = sys.argv[3] -api_key = "edd1c9f034335f136f87ad84b625c8f1" -resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json={ - "cert": cert, - "key": key, - "snis": [sni], -}, headers={ - "X-API-KEY": api_key, -}) -print(resp.status_code) -print(resp.text) -``` +The following is an example of configuring an SSL certificate with a single SNI in APISIX. + +Create an SSL object with the certificate and key valid for the SNI: ```shell -# create SSL object -./create-ssl.py t.crt t.key test.com +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat t/certs/apisix.crt)"'", + "key": "'"$(cat t/certs/apisix.key)"'", + "snis": ["test.com"] +}' +``` + +Create a Router object: -# create Router object +```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { - "uri": "/hello", + "uri": "/get", "hosts": ["test.com"], "methods": ["GET"], "upstream": { "type": "roundrobin", "nodes": { - "127.0.0.1:1980": 1 + "httpbin.org": 1 } } }' +``` + +Send a request to verify: -# make a test +```shell +curl --resolve 'test.com:9443:127.0.0.1' https://test.com:9443/hello -k -vvv -curl --resolve 'test.com:9443:127.0.0.1' https://test.com:9443/hello -vvv * Added test.com:9443:127.0.0.1 to DNS cache * About to connect() to test.com port 9443 (#0) * Trying 127.0.0.1... * Connected to test.com (127.0.0.1) port 9443 (#0) -* Initializing NSS with certpath: sql:/etc/pki/nssdb -* skipping SSL peer certificate verification -* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 * Server certificate: -* subject: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN -* start date: Jun 24 22:18:05 2019 GMT -* expire date: May 31 22:18:05 2119 GMT -* common name: test.com -* issuer: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN -> GET /hello HTTP/1.1 -> User-Agent: curl/7.29.0 +* subject: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* start date: Jun 24 22:18:05 2019 GMT +* expire date: May 31 22:18:05 2119 GMT +* issuer: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* SSL certificate verify result: self-signed certificate (18), continuing anyway. +> GET /get HTTP/2 > Host: test.com:9443 -> Accept: */* +> user-agent: curl/7.81.0 +> accept: */* ``` ### wildcard SNI -Sometimes, one SSL certificate may contain a wildcard domain like `*.test.com`, -that means it can accept more than one domain, eg: `www.test.com` or `mail.test.com`. +An SSL certificate could also be valid for a wildcard domain like `*.test.com`, which means it is valid for any domain of that pattern, including `www.test.com` and `mail.test.com`. + +The following is an example of configuring an SSL certificate with a wildcard SNI in APISIX. -Here is an example, note that the value we pass as `sni` is `*.test.com`. +Create an SSL object with the certificate and key valid for the SNI: ```shell -./create-ssl.py t.crt t.key '*.test.com' +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ + -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' + { + "cert" : "'"$(cat t/certs/apisix.crt)"'", + "key": "'"$(cat t/certs/apisix.key)"'", + "snis": ["*.test.com"] + }' +``` + +Create a Router object: +```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { - "uri": "/hello", + "uri": "/get", "hosts": ["*.test.com"], "methods": ["GET"], "upstream": { "type": "roundrobin", "nodes": { - "127.0.0.1:1980": 1 + "httpbin.org": 1 } } }' +``` -# make a test +Send a request to verify: -curl --resolve 'www.test.com:9443:127.0.0.1' https://www.test.com:9443/hello -vvv -* Added test.com:9443:127.0.0.1 to DNS cache -* About to connect() to test.com port 9443 (#0) -* Trying 127.0.0.1... -* Connected to test.com (127.0.0.1) port 9443 (#0) -* Initializing NSS with certpath: sql:/etc/pki/nssdb -* skipping SSL peer certificate verification -* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +```shell +curl --resolve 'www.test.com:9443:127.0.0.1' https://www.test.com:9443/get -k -vvv + +* Added www.test.com:9443:127.0.0.1 to DNS cache +* Hostname www.test.com was found in DNS cache +* Trying 127.0.0.1:9443... +* Connected to www.test.com (127.0.0.1) port 9443 (#0) +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 * Server certificate: -* subject: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN -* start date: Jun 24 22:18:05 2019 GMT -* expire date: May 31 22:18:05 2119 GMT -* common name: test.com -* issuer: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN -> GET /hello HTTP/1.1 -> User-Agent: curl/7.29.0 -> Host: test.com:9443 -> Accept: */* +* subject: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* start date: Jun 24 22:18:05 2019 GMT +* expire date: May 31 22:18:05 2119 GMT +* issuer: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* SSL certificate verify result: self signed certificate (18), continuing anyway. +> GET /get HTTP/2 +> Host: www.test.com:9443 +> user-agent: curl/7.74.0 +> accept: */* ``` ### multiple domain -If your SSL certificate may contain more than one domain, like `www.test.com` -and `mail.test.com`, then you can add them into the `snis` array. For example: +If your SSL certificate may contain more than one domain, like `www.test.com` and `mail.test.com`, then you can add them into the `snis` array. For example: ```json { diff --git a/docs/en/latest/config.json b/docs/en/latest/config.json index d9804a15770a..bec485d7a291 100644 --- a/docs/en/latest/config.json +++ b/docs/en/latest/config.json @@ -1,5 +1,5 @@ { - "version": "3.2.0", + "version": "3.5.0", "sidebar": [ { "type": "category", @@ -26,12 +26,20 @@ "items": [ "tutorials/expose-api", "tutorials/protect-api", - "tutorials/observe-your-api", + { + "type": "category", + "label": "Observability", + "items": [ + "tutorials/observe-your-api", + "tutorials/health-check", + "tutorials/monitor-api-health-check" + ] + }, "tutorials/manage-api-consumers", "tutorials/cache-api-responses", "tutorials/add-multiple-api-versions", - "tutorials/health-check", - "tutorials/client-to-apisix-mtls" + "tutorials/client-to-apisix-mtls", + "tutorials/websocket-authentication" ] }, { @@ -118,7 +126,8 @@ "plugins/consumer-restriction", "plugins/csrf", "plugins/public-api", - "plugins/gm" + "plugins/gm", + "plugins/chaitin-waf" ] }, { @@ -180,7 +189,8 @@ "plugins/file-logger", "plugins/loggly", "plugins/elasticsearch-logger", - "plugins/tencent-cloud-cls" + "plugins/tencent-cloud-cls", + "plugins/loki-logger" ] } ] @@ -357,6 +367,10 @@ { "type": "doc", "id": "profile" + }, + { + "type": "doc", + "id": "ssl-protocol" } ] }, diff --git a/docs/en/latest/control-api.md b/docs/en/latest/control-api.md index c6944f2b5421..e21ca86d502d 100644 --- a/docs/en/latest/control-api.md +++ b/docs/en/latest/control-api.md @@ -98,71 +98,50 @@ Returns a [health check](./tutorials/health-check.md) of the APISIX instance. ```json [ - { - "healthy_nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - } - ], - "name": "upstream#/upstreams/1", - "nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - }, - { - "host": "127.0.0.2", - "port": 1988, - "priority": 0, - "weight": 1 - } - ], - "src_id": "1", - "src_type": "upstreams" - }, - { - "healthy_nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - } - ], - "name": "upstream#/routes/1", - "nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - }, - { - "host": "127.0.0.1", - "port": 1988, - "priority": 0, - "weight": 1 - } - ], - "src_id": "1", - "src_type": "routes" - } + { + "nodes": [ + { + "ip": "52.86.68.46", + "counter": { + "http_failure": 0, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "healthy" + }, + { + "ip": "100.24.156.8", + "counter": { + "http_failure": 5, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "unhealthy" + } + ], + "name": "/apisix/routes/1", + "type": "http" + } ] + ``` Each of the returned objects contain the following fields: -* src_type: where the health checker is reporting from. Value is one of `["routes", "services", "upstreams"]`. -* src_id: id of the object creating the health checker. For example, if an Upstream -object with id `1` creates a health checker, the `src_type` is `upstreams` and the `src_id` is `1`. -* name: name of the health checker. +* name: resource id, where the health checker is reporting from. +* type: health check type: `["http", "https", "tcp"]`. * nodes: target nodes of the health checker. -* healthy_nodes: healthy nodes discovered by the health checker. +* nodes[i].ip: ip address. +* nodes[i].port: port number. +* nodes[i].status: health check result: `["healthy", "unhealthy", "mostly_healthy", "mostly_unhealthy"]`. +* nodes[i].counter.success: success health check count. +* nodes[i].counter.http_failure: http failures count. +* nodes[i].counter.tcp_failure: tcp connect/read/write failures count. +* nodes[i].counter.timeout_failure: timeout count. You can also use `/v1/healthcheck/$src_type/$src_id` to get the health status of specific nodes. @@ -170,40 +149,50 @@ For example, `GET /v1/healthcheck/upstreams/1` returns: ```json { - "healthy_nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - } - ], - "name": "upstream#/upstreams/1", - "nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - }, - { - "host": "127.0.0.2", - "port": 1988, - "priority": 0, - "weight": 1 - } - ], - "src_id": "1", - "src_type": "upstreams" + "nodes": [ + { + "ip": "52.86.68.46", + "counter": { + "http_failure": 0, + "success": 2, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "healthy" + }, + { + "ip": "100.24.156.8", + "counter": { + "http_failure": 5, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "unhealthy" + } + ], + "type": "http" + "name": "/apisix/routes/1" } + ``` :::note -As APISIX uses multiple-process architecture, if the process never handles the request of a specific upstream, then the upstream's health check information will not appear on the process. This may result in the health check API can't get all data during testing. +Only when one upstream is satisfied by the conditions below, +its status is shown in the result list: + +* The upstream is configured with a health checker +* The upstream has served requests in any worker process ::: +If you use browser to access the control API URL, then you will get the HTML output: + +![Health Check Status Page](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/health_check_status_page.png) + ### POST /v1/gc Introduced in [v2.8](https://github.com/apache/apisix/releases/tag/2.8). @@ -221,7 +210,6 @@ Returns all configured [Routes](./terminology/route.md): ```json [ { - "update_count": 0, "value": { "priority": 0, "uris": [ @@ -260,7 +248,6 @@ Returns the Route with the specified `route_id`: ```json { - "update_count": 0, "value": { "priority": 0, "uris": [ diff --git a/docs/en/latest/debug-mode.md b/docs/en/latest/debug-mode.md index e1438d0c3438..b933dfd88868 100644 --- a/docs/en/latest/debug-mode.md +++ b/docs/en/latest/debug-mode.md @@ -38,13 +38,15 @@ basic: enable: true ``` +APISIX loads the configurations of `debug.yaml` on startup and then checks if the file is modified on an interval of 1 second. If the file is changed, APISIX automatically applies the configuration changes. + :::note For APISIX releases prior to v2.10, basic debug mode is enabled by setting `apisix.enable_debug = true` in your configuration file (`conf/config.yaml`). ::: -If you have configured two Plgins `limit-conn` and `limit-count` on the Route `/hello`, you will receive a response with the header `Apisix-Plugins: limit-conn, limit-count` when you enable the basic debug mode. +If you have configured two Plugins `limit-conn` and `limit-count` on the Route `/hello`, you will receive a response with the header `Apisix-Plugins: limit-conn, limit-count` when you enable the basic debug mode. ```shell curl http://127.0.0.1:1984/hello -i diff --git a/docs/en/latest/deployment-modes.md b/docs/en/latest/deployment-modes.md index 7e6d8da7d3f9..bc195121c946 100644 --- a/docs/en/latest/deployment-modes.md +++ b/docs/en/latest/deployment-modes.md @@ -39,10 +39,6 @@ Each of these deployment modes are explained in detail below. In the traditional deployment mode, one instance of APISIX will be both the `data_plane` and the `control_plane`. -![traditional deployment mode](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-traditional.png) - -There will be a conf server that listens on the UNIX socket and acts as a proxy between APISIX and etcd. Both the data and the control planes connect to this conf server via HTTP. - An example configuration of the traditional deployment mode is shown below: ```yaml title="conf/config.yaml" @@ -73,16 +69,9 @@ The instance of APISIX deployed as the traditional role will: In the decoupled deployment mode the `data_plane` and `control_plane` instances of APISIX are deployed separately, i.e., one instance of APISIX is configured to be a *data plane* and the other to be a *control plane*. -![decoupled](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-cp_and_dp.png) - The instance of APISIX deployed as the data plane will: -1. Fetch the configuration from the *control plane*. The default port is `9280`. -2. Performs a health check on all configured control plane addresses before starting the service. - 1. If the control plane addresses are unavailable, the startup fails and an exception is thrown. - 2. If at least one control plane address is available, it prints the unhealthy control planes logs, and starts the APISIX service. - 3. If all control planes are normal, APISIX service is started normally. -3. Once the service is started, it will handle the user requests. +Once the service is started, it will handle the user requests. The example below shows the configuration of an APISIX instance as *data plane* in the decoupled mode: @@ -90,23 +79,13 @@ The example below shows the configuration of an APISIX instance as *data plane* deployment: role: data_plane role_data_plane: - config_provider: control_plane - control_plane: - host: - - https://${Control_Plane_IP}:9280 - prefix: /apisix - timeout: 30 - certs: - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert - trusted_ca_cert: /path/to/ca-cert + config_provider: etcd #END ``` The instance of APISIX deployed as the control plane will: 1. Listen on port `9180` and handle Admin API requests. -2. Provide the conf server which will listen on port `9280`. Both the control plane and the data plane will connect to this via HTTPS enforced by mTLS. The example below shows the configuration of an APISIX instance as *control plane* in the decoupled mode: @@ -115,51 +94,17 @@ deployment: role: control_plane role_control_plane: config_provider: etcd - conf_server: - listen: 0.0.0.0:9280 - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert - client_ca_cert: /path/to/ca-cert - etcd: - host: - - https://${etcd_IP}:${etcd_Port} - prefix: /apisix - timeout: 30 - certs: - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert - trusted_ca_cert: /path/to/ca-cert -#END -``` - -:::tip - -As OpenResty <= 1.21.4 does not support sending mTLS requests, to accept connections from APISIX running on these OpenResty versions, you need to disable the client certificate verification in the control plane instance as shown below: - -```yaml title="conf/config.yaml" -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:9280 - cert: /path/to/ca-cert - cert_key: /path/to/ca-cert etcd: host: - https://${etcd_IP}:${etcd_Port} prefix: /apisix timeout: 30 - certs: - trusted_ca_cert: /path/to/ca-cert #END ``` -::: - ## Standalone -Turning on the APISIX node in Stand-alone mode will no longer use the default etcd as the configuration center. +Turning on the APISIX node in Standalone mode will no longer use the default etcd as the configuration center. This method is more suitable for two types of users: @@ -170,7 +115,7 @@ The routing rules in the `conf/apisix.yaml` file are loaded into memory immediat *Note*: Reloading and updating routing rules are all hot memory updates. There is no replacement of working processes, since it's a hot update. -Since the current Admin API is based on the etcd configuration center solution, enable Admin API is not allowed when the Stand-alone mode is enabled. +Since the current Admin API is based on the etcd configuration center solution, enable Admin API is not allowed when the Standalone mode is enabled. Standalone mode can only be enabled when we set the role of APISIX as data plane. We set `deployment.role` to `data_plane` and `deployment.role_data_plane.config_provider` to `yaml`. @@ -434,3 +379,25 @@ upstreams: id: 1 #END ``` + +### How to configure protos + +```yaml +protos: + - id: helloworld + desc: hello world + content: > + syntax = "proto3"; + package helloworld; + + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + } +#END +``` diff --git a/docs/en/latest/discovery/consul.md b/docs/en/latest/discovery/consul.md index e31a83690b2b..b4eab61e982d 100644 --- a/docs/en/latest/discovery/consul.md +++ b/docs/en/latest/discovery/consul.md @@ -143,7 +143,7 @@ curl -X PUT 'http://127.0.0.1:8500/v1/agent/service/register' \ }' ``` -In some case, same service name exist in different consul servers. +In some cases, same service name might exist in different consul servers. To avoid confusion, use the full consul key url path as service name in practice. ### Upstream setting diff --git a/docs/en/latest/discovery/control-plane-service-discovery.md b/docs/en/latest/discovery/control-plane-service-discovery.md index acd4923a92e0..71fc6c7c46ce 100644 --- a/docs/en/latest/discovery/control-plane-service-discovery.md +++ b/docs/en/latest/discovery/control-plane-service-discovery.md @@ -6,7 +6,7 @@ keywords: - ZooKeeper - Nacos - APISIX-Seed -description: This documentation describes implement service discovery through Nacos and ZooKeeper on the API Gateway APISIX Control Plane. +description: This documentation describes implementing service discovery through Nacos and ZooKeeper on the API Gateway APISIX Control Plane. --- + +## Description + +After enabling the chaitin-waf plugin, the traffic will be forwarded to the Chaitin WAF service for the detection and +prevention of various web application attacks, ensuring the security of the application and user data. + +## Response Headers + +Depending on the plugin configuration, it is optional to add additional response headers. + +The response headers are listed below: + +- **X-APISIX-CHAITIN-WAF**: Whether APISIX forwards the request to the WAF server. + - yes: forwarded + - no: no forwarded + - unhealthy: matches the match variables, but no WAF server is available. + - err: an error occurred during the execution of the plugin. Also with **X-APISIX-CHAITIN-WAF-ERROR** request header + - waf-err: Error while interacting with the WAF server. Also with **X-APISIX-CHAITIN-WAF-ERROR** request header + - timeout: Timeout for request to the WAF server +- **X-APISIX-CHAITIN-WAF-ERROR**: Debug header. WAF error message +- **X-APISIX-CHAITIN-WAF-TIME**: The time in milliseconds that APISIX spent interacting with WAF. +- **X-APISIX-CHAITIN-WAF-STATUS**: The status code returned to APISIX by the WAF server. +- **X-APISIX-CHAITIN-WAF-ACTION**: Processing result returned to APISIX by the WAF server. + - pass: request valid and passed + - reject: request rejected by WAF service +- **X-APISIX-CHAITIN-WAF-SERVER**: Debug header. Picked WAF server. + +## Plugin Metadata + +| Name | Type | Required | Default value | Description | +|--------------------------|---------------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------| +| nodes | array(object) | true | | A list of addresses for the Chaitin SafeLine WAF service. | +| nodes[0].host | string | true | | The address of Chaitin SafeLine WAF service. Supports IPV4, IPV6, Unix Socket, etc. | +| nodes[0].port | string | false | 80 | The port of Chaitin SafeLine WAF service. | +| config | object | false | | Configuration of the Chaitin SafeLine WAF service. The parameters configured here will be used when route is not configured. | +| config.connect_timeout | integer | false | 1000 | connect timeout, in milliseconds | +| config.send_timeout | integer | false | 1000 | send timeout, in milliseconds | +| config.read_timeout | integer | false | 1000 | read timeout, in milliseconds | +| config.req_body_size | integer | false | 1024 | request body size, in KB | +| config.keepalive_size | integer | false | 256 | maximum concurrent idle connections to the SafeLine WAF detection service | +| config.keepalive_timeout | integer | false | 60000 | idle connection timeout, in milliseconds | + +An example configuration is as follows. + +```bash +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/chaitin-waf -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "nodes":[ + { + "host": "unix:/path/to/safeline/resources/detector/snserver.sock", + "port": 8000 + } + ] +}' +``` + +## Attributes + +| Name | Type | Required | Default value | Description | +|--------------------------|---------------|----------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| match | array[object] | false | | The list of matching rules, default is empty | +| match.vars | array[array] | false | | List of variables to match for filtering requests for conditional traffic split. It is in the format `{variable operator value}`. For example, `{"arg_name", "==", "json"}`. The variables here are consistent with NGINX internal variables. For details on supported operators, [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list). | +| append_waf_resp_header | bool | false | true | Whether to add response headers | +| append_waf_debug_header | bool | false | false | Whether or not to add debugging headers, effective when `add_header` is `true`. | +| config | object | false | | Configuration of the Chaitin SafeLine WAF service. When the route is not configured, the parameters configured in the metadata are used. | +| config.connect_timeout | integer | false | | connect timeout, in milliseconds | +| config.send_timeout | integer | false | | send timeout, in milliseconds | +| config.read_timeout | integer | false | | read timeout, in milliseconds | +| config.req_body_size | integer | false | | request body size, in KB | +| config.keepalive_size | integer | false | | maximum concurrent idle connections to the SafeLine WAF detection service | +| config.keepalive_timeout | integer | false | | idle connection timeout, in milliseconds | + +A sample configuration is shown below, using `httpbun.org` as the example backend, which can be replaced as needed: + +```bash +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/*", + "plugins": { + "chaitin-waf": { + "match": [ + { + "vars": [ + ["http_waf","==","true"] + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbun.org:80": 1 + } + } +}' +``` + +## Test Plugin + +Test the above example configuration. + +If the match condition is not met, the request can be reached normally: + +```bash +curl -H "Host: httpbun.org" http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 408 +Connection: keep-alive +X-APISIX-CHAITIN-WAF: no +Date: Wed, 19 Jul 2023 09:30:42 GMT +X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 +Server: APISIX/3.3.0 + +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Connection": "close", + "Host": "httpbun.org", + "User-Agent": "curl/8.1.2", + "X-Forwarded-For": "127.0.0.1", + "X-Forwarded-Host": "httpbun.org", + "X-Forwarded-Port": "9080", + "X-Forwarded-Proto": "http", + "X-Real-Ip": "127.0.0.1" + }, + "method": "GET", + "origin": "127.0.0.1, 122.231.76.178", + "url": "http://httpbun.org/get" +} +``` + +Potential injection requests are also forwarded as is and encounter a 404 error: + +```bash +curl -H "Host: httpbun.org" http://127.0.0.1:9080/getid=1%20AND%201=1 -i + +HTTP/1.1 404 Not Found +Content-Type: text/plain; charset=utf-8 +Content-Length: 19 +Connection: keep-alive +X-APISIX-CHAITIN-WAF: no +Date: Wed, 19 Jul 2023 09:30:28 GMT +X-Content-Type-Options: nosniff +X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 +Server: APISIX/3.3.0 + +404 page not found +``` + +Normal requests are still reachable when the match condition is met: + +```bash +curl -H "Host: httpbun.org" -H "waf: true" http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 427 +Connection: keep-alive +X-APISIX-CHAITIN-WAF-TIME: 2 +X-APISIX-CHAITIN-WAF-STATUS: 200 +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-ACTION: pass +Date: Wed, 19 Jul 2023 09:29:58 GMT +X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 +Server: APISIX/3.3.0 + +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Connection": "close", + "Host": "httpbun.org", + "User-Agent": "curl/8.1.2", + "Waf": "true", + "X-Forwarded-For": "127.0.0.1", + "X-Forwarded-Host": "httpbun.org", + "X-Forwarded-Port": "9080", + "X-Forwarded-Proto": "http", + "X-Real-Ip": "127.0.0.1" + }, + "method": "GET", + "origin": "127.0.0.1, 122.231.76.178", + "url": "http://httpbun.org/get" +} +``` + +Potential attack requests will be intercepted and returned a 403 error: + +```bash +curl -H "Host: httpbun.org" -H "waf: true" http://127.0.0.1:9080/getid=1%20AND%201=1 -i + +HTTP/1.1 403 Forbidden +Date: Wed, 19 Jul 2023 09:29:06 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-TIME: 2 +X-APISIX-CHAITIN-WAF-ACTION: reject +X-APISIX-CHAITIN-WAF-STATUS: 403 +Server: APISIX/3.3.0 +Set-Cookie: sl-session=UdywdGL+uGS7q8xMfnJlbQ==; Domain=; Path=/; Max-Age=86400 + +{"code": 403, "success":false, "message": "blocked by Chaitin SafeLine Web Application Firewall", "event_id": "51a268653f2c4189bfa3ec66afbcb26d"} +``` + +## Delete Plugin + +To remove the `chaitin-waf` plugin, you can delete the corresponding JSON configuration from the Plugin configuration. +APISIX will automatically reload and you do not have to restart for this to take effect: + +```bash +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbun.org:80": 1 + } + } +}' +``` diff --git a/docs/en/latest/plugins/clickhouse-logger.md b/docs/en/latest/plugins/clickhouse-logger.md index 8b6067d0a589..2f3a66e0c6fb 100644 --- a/docs/en/latest/plugins/clickhouse-logger.md +++ b/docs/en/latest/plugins/clickhouse-logger.md @@ -44,7 +44,7 @@ The `clickhouse-logger` Plugin is used to push logs to [ClickHouse](https://clic | timeout | integer | False | 3 | [1,...] | Time to keep the connection alive for after sending a request. | | name | string | False | "clickhouse logger" | | Unique identifier for the logger. | | ssl_verify | boolean | False | true | [true,false] | When set to `true`, verifies SSL. | -| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | | include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | | include_req_body_expr | array | False | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | | include_resp_body | boolean | False | false | [false, true] | When set to `true` includes the response body in the log. | @@ -81,28 +81,19 @@ curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/clickhouse-logger -H 'X- }' ``` -You have to then create a table in your ClickHouse database to store the logs: - -```sql -CREATE TABLE default.test ( - `host` String, - `client_ip` String, - `route_id` String, - `service_id` String, - `@timestamp` String, - PRIMARY KEY(`@timestamp`) -) ENGINE = MergeTree() +You can use the clickhouse docker image to create a container like so: + +```shell +docker run -d -p 8123:8123 -p 9000:9000 -p 9009:9009 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server ``` -Now, if you run `select * from default.test;`, you will get the following row: +Then create a table in your ClickHouse database to store the logs. -``` -┌─host──────┬─client_ip─┬─route_id─┬─@timestamp────────────────┐ -│ 127.0.0.1 │ 127.0.0.1 │ 1 │ 2022-01-17T10:03:10+08:00 │ -└───────────┴───────────┴──────────┴───────────────────────────┘ +```shell +echo "CREATE TABLE default.test (\`host\` String, \`client_ip\` String, \`route_id\` String, \`service_id\` String, \`@timestamp\` String, PRIMARY KEY(\`@timestamp\`)) ENGINE = MergeTree()" | curl 'http://localhost:8123/' ``` -## Enabling the Plugin +## Enable Plugin If multiple endpoints are configured, they will be written randomly. The example below shows how you can enable the Plugin on a specific Route: @@ -137,9 +128,16 @@ Now, if you make a request to APISIX, it will be logged in your ClickHouse datab curl -i http://127.0.0.1:9080/hello ``` -## Disable Plugin +Now, if you check for the rows in the table, you will get the following output: + +```shell +curl 'http://localhost:8123/?query=select%20*%20from%20default.test' +127.0.0.1 127.0.0.1 1 2023-05-08T19:15:53+05:30 +``` + +## Delete Plugin -To disable the `clickhouse-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `clickhouse-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/client-control.md b/docs/en/latest/plugins/client-control.md index 6aeb82abb393..fd226f7b2ba1 100644 --- a/docs/en/latest/plugins/client-control.md +++ b/docs/en/latest/plugins/client-control.md @@ -42,7 +42,7 @@ This Plugin requires APISIX to run on APISIX-Base. See [apisix-build-tools](http | ------------- | ------- | -------- | ------------ | ------------------------------------------------------------------------------------------------------------------------------------ | | max_body_size | integer | False | [0,...] | Dynamically set the [`client_max_body_size`](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) directive. | -## Enabling the Plugin +## Enable Plugin The example below enables the Plugin on a specific Route: @@ -85,9 +85,9 @@ HTTP/1.1 413 Request Entity Too Large ``` -## Disable Plugin +## Delete Plugin -To disable the `client-control` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload, and you do not have to restart for this to take effect. +To remove the `client-control` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload, and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/en/latest/plugins/consumer-restriction.md b/docs/en/latest/plugins/consumer-restriction.md index a5677020cb9b..7df4d0634784 100644 --- a/docs/en/latest/plugins/consumer-restriction.md +++ b/docs/en/latest/plugins/consumer-restriction.md @@ -4,7 +4,7 @@ keywords: - Apache APISIX - API Gateway - Consumer restriction -description: The Consumer Restriction Plugin allows users to set access restrictions based on Consumer, Route, or Service. +description: The Consumer Restriction Plugin allows users to configure access restrictions on Consumer, Route, Service, or Global Rule. --- :::info -The function usage scenarios introduced in this article are mainly in China, so this article only has a Chinese version temporarily. You can cilik [here](https://apisix.apache.org/zh/docs/apisix/plugins/gm/) for more details. If you are interested in this feature, welcome to translate this document. +The function usage scenarios introduced in this article are mainly in China, so this article only has a Chinese version temporarily. You can click [here](https://apisix.apache.org/zh/docs/apisix/plugins/gm/) for more details. If you are interested in this feature, welcome to translate this document. ::: diff --git a/docs/en/latest/plugins/google-cloud-logging.md b/docs/en/latest/plugins/google-cloud-logging.md index 79adec8e2393..8b4bb81ad0a1 100644 --- a/docs/en/latest/plugins/google-cloud-logging.md +++ b/docs/en/latest/plugins/google-cloud-logging.md @@ -37,6 +37,7 @@ This plugin also allows to push logs as a batch to your Google Cloud Logging Ser | Name | Required | Default | Description | |-------------------------|----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------| | auth_config | True | | Either `auth_config` or `auth_file` must be provided. | +| auth_config.client_email | True | | Email address of the Google Cloud service account. | | auth_config.private_key | True | | Private key of the Google Cloud service account. | | auth_config.project_id | True | | Project ID in the Google Cloud service account. | | auth_config.token_uri | True | https://oauth2.googleapis.com/token | Token URI of the Google Cloud service account. | @@ -46,7 +47,7 @@ This plugin also allows to push logs as a batch to your Google Cloud Logging Ser | ssl_verify | False | true | When set to `true`, enables SSL verification as mentioned in [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). | | resource | False | {"type": "global"} | Google monitor resource. See [MonitoredResource](https://cloud.google.com/logging/docs/reference/v2/rest/v2/MonitoredResource) for more details. | | log_id | False | apisix.apache.org%2Flogs | Google Cloud logging ID. See [LogEntry](https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry) for details. | -| log_format | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | NOTE: `encrypt_fields = {"auth_config.private_key"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). @@ -85,7 +86,7 @@ With this configuration, your logs would be formatted as shown below: {"partialSuccess":false,"entries":[{"jsonPayload":{"client_ip":"127.0.0.1","host":"localhost","@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1"},"resource":{"type":"global"},"insertId":"942e81f60b9157f0d46bc9f5a8f0cc40","logName":"projects/apisix/logs/apisix.apache.org%2Flogs","timestamp":"2023-01-09T14:47:25+08:00","labels":{"source":"apache-apisix-google-cloud-logging"}}]} ``` -## Enabling the Plugin +## Enable Plugin ### Full configuration @@ -98,6 +99,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 "google-cloud-logging": { "auth_config":{ "project_id":"apisix", + "client_email":"your service account email@apisix.iam.gserviceaccount.com", "private_key":"-----BEGIN RSA PRIVATE KEY-----your private key-----END RSA PRIVATE KEY-----", "token_uri":"https://oauth2.googleapis.com/token", "scopes":[ @@ -137,6 +139,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 "google-cloud-logging": { "auth_config":{ "project_id":"apisix", + "client_email":"your service account email@apisix.iam.gserviceaccount.com", "private_key":"-----BEGIN RSA PRIVATE KEY-----your private key-----END RSA PRIVATE KEY-----" } } @@ -161,9 +164,9 @@ curl -i http://127.0.0.1:9080/hello You can then login and view the logs in [Google Cloud Logging Service](https://console.cloud.google.com/logs/viewer). -## Disable Plugin +## Delete Plugin -To disable the `google-cloud-logging` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `google-cloud-logging` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/grpc-transcode.md b/docs/en/latest/plugins/grpc-transcode.md index 3e75b428d39e..56680946dff5 100644 --- a/docs/en/latest/plugins/grpc-transcode.md +++ b/docs/en/latest/plugins/grpc-transcode.md @@ -57,7 +57,7 @@ APISIX takes in an HTTP request, transcodes it and forwards it to a gRPC service | default values | `auto_default_values`, `no_default_values`, `use_default_values`, `use_default_metatable` | | hooks | `enable_hooks`, `disable_hooks` | -## Enabling the Plugin +## Enable Plugin Before enabling the Plugin, you have to add the content of your `.proto` or `.pb` files to APISIX. @@ -102,34 +102,14 @@ The output binary file, `proto.pb` will contain both `helloworld.proto` and `imp We can now use the content of `proto.pb` in the `content` field of the API request. -As the content of the proto is binary, we encode it in `base64` using this Python script: - -```python title="upload_pb.py" -#!/usr/bin/env python -# coding: utf-8 - -import base64 -import sys - -# sudo pip install requests -import requests - -if len(sys.argv) <= 1: - print("bad argument") - sys.exit(1) -with open(sys.argv[1], 'rb') as f: - content = base64.b64encode(f.read()) -id = sys.argv[2] -api_key = "edd1c9f034335f136f87ad84b625c8f1" # use a different API key - -reqParam = { - "content": content, -} -resp = requests.put("http://127.0.0.1:9180/apisix/admin/protos/" + id, json=reqParam, headers={ - "X-API-KEY": api_key, -}) -print(resp.status_code) -print(resp.text) +As the content of the proto is binary, we encode it in `base64` using this shell command: + +```shell +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "content" : "'"$(base64 -w0 /path/to/proto.pb)"'" +}' ``` This script will take in a `.pb` file and the `id` to create, encodes the content of the proto to `base64`, and calls the Admin API with this encoded content. @@ -258,7 +238,7 @@ If the gRPC service returns an error, there may be a `grpc-status-details-bin` f Upload the proto file: ```shell -curl http://127.0.0.1:9080/apisix/admin/proto/1 \ +curl http://127.0.0.1:9080/apisix/admin/protos/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "content" : "syntax = \"proto3\"; @@ -328,7 +308,7 @@ Server: APISIX web server Note that there is an undecoded field in the return body. If you need to decode the field, you need to add the `message type` of the field in the uploaded proto file. ```shell -curl http://127.0.0.1:9080/apisix/admin/proto/1 \ +curl http://127.0.0.1:9080/apisix/admin/protos/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "content" : "syntax = \"proto3\"; @@ -395,9 +375,9 @@ Server: APISIX web server {"error":{"details":[{"type":"service","message":"The server is out of service","code":1}],"message":"Out of service","code":14}} ``` -## Disable Plugin +## Delete Plugin -To disable the `grpc-transcode` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `grpc-transcode` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/111 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/grpc-web.md b/docs/en/latest/plugins/grpc-web.md index e839613613bb..a834e0e95fd6 100644 --- a/docs/en/latest/plugins/grpc-web.md +++ b/docs/en/latest/plugins/grpc-web.md @@ -32,7 +32,7 @@ description: This document contains information about the Apache APISIX grpc-web The `grpc-web` Plugin is a proxy Plugin that can process [gRPC Web](https://github.com/grpc/grpc-web) requests from JavaScript clients to a gRPC service. -## Enabling the Plugin +## Enable Plugin You can enable the `grpc-web` Plugin on a specific Route as shown below: @@ -55,7 +55,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 :::info IMPORTANT -While using the `grpc-web` Plugin, always using a prefix matching pattern (`/*`, `/grpc/example/*`) for matching Routes. This is because the gRPC Web client passes the package name, the service interface name, the method name and other information in the proto in the URI. For example, `/path/a6.RouteService/Insert`. +While using the `grpc-web` Plugin, always use a prefix matching pattern (`/*`, `/grpc/example/*`) for matching Routes. This is because the gRPC Web client passes the package name, the service interface name, the method name and other information in the proto in the URI. For example, `/path/a6.RouteService/Insert`. So, when absolute matching is used, the Plugin would not be hit and the information from the proto would not be extracted. @@ -75,9 +75,9 @@ The supported `Content-Type` includes `application/grpc-web`, `application/grpc- ::: -## Disable Plugin +## Delete Plugin -To disable the `grpc-web` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `grpc-web` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/gzip.md b/docs/en/latest/plugins/gzip.md index 346354b290ce..b812de4daf82 100644 --- a/docs/en/latest/plugins/gzip.md +++ b/docs/en/latest/plugins/gzip.md @@ -49,7 +49,7 @@ This Plugin requires APISIX to run on [APISIX-Base](../FAQ.md#how-do-i-build-the | buffers.size | integer | False | 4096 | >= 1 | Dynamically sets the `gzip_buffers` directive. | | vary | boolean | False | false | | Dynamically sets the `gzip_vary` directive. | -## Enabling the Plugin +## Enable Plugin The example below enables the `gzip` Plugin on the specified Route: @@ -95,9 +95,9 @@ Warning: curl to output it to your terminal anyway, or consider "--output Warning: " to save to a file. ``` -## Disable Plugin +## Delete Plugin -To disable the `gzip` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `gzip` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/hmac-auth.md b/docs/en/latest/plugins/hmac-auth.md index 3f3951877d6a..5940a52df3da 100644 --- a/docs/en/latest/plugins/hmac-auth.md +++ b/docs/en/latest/plugins/hmac-auth.md @@ -50,7 +50,7 @@ This Plugin works with a [Consumer](../terminology/consumer.md) object and a con NOTE: `encrypt_fields = {"secret_key"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). -## Enabling the Plugin +## Enable Plugin First we enable the Plugin on a Consumer object as shown below: @@ -357,9 +357,9 @@ Accept-Ranges: bytes ``` -## Disable Plugin +## Delete Plugin -To disable the `hmac-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `hmac-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/http-logger.md b/docs/en/latest/plugins/http-logger.md index da5dfcd7420c..0f26bd51e874 100644 --- a/docs/en/latest/plugins/http-logger.md +++ b/docs/en/latest/plugins/http-logger.md @@ -40,7 +40,7 @@ This will allow the ability to send log data requests as JSON objects to monitor | uri | string | True | | | URI of the HTTP/HTTPS server. | | auth_header | string | False | | | Authorization headers if required. | | timeout | integer | False | 3 | [1,...] | Time to keep the connection alive for after sending a request. | -| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | | include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | | include_resp_body | boolean | False | false | [false, true] | When set to `true` includes the response body in the log. | | include_resp_body_expr | array | False | | | When the `include_resp_body` attribute is set to `true`, use this to filter based on [lua-resty-expr](https://github.com/api7/lua-resty-expr). If present, only logs the response if the expression evaluates to `true`. | @@ -88,7 +88,7 @@ With this configuration, your logs would be formatted as shown below: {"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} ``` -## Enabling the Plugin +## Enable Plugin The example below shows how you can enable the Plugin on a specific Route: @@ -121,7 +121,7 @@ Now, if you make a request to APISIX, it will be logged in your mockbin server: curl -i http://127.0.0.1:9080/hello ``` -## Disable Plugin +## Delete Plugin To disable this Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. diff --git a/docs/en/latest/plugins/inspect.md b/docs/en/latest/plugins/inspect.md index 18b913ae5c12..8b891b82dc6c 100644 --- a/docs/en/latest/plugins/inspect.md +++ b/docs/en/latest/plugins/inspect.md @@ -85,7 +85,7 @@ The `info` is a hash table which contains below keys: | delay | integer | False | 3 | Time in seconds specifying how often to check the hooks file. | | hooks_file | string | False | "/usr/local/apisix/plugin_inspect_hooks.lua" | Lua file to define hooks, which could be a link file. Ensure only administrator could write this file, otherwise it may be a security risk. | -## Enabling the Plugin +## Enable Plugin Plugin is enabled by default (`conf/config-default.yaml`): @@ -162,7 +162,7 @@ stack traceback: 2022/09/01 00:55:52 [info] 2754534#2754534: *4070 [lua] resty_inspect_hooks.lua:6: conf_key=remote_addr, client: 127.0.0.1, server: _, request: "GET /get HTTP/1.1", host: "127.0.0.1:9080" ``` -## Disable plugin +## Delete Plugin To remove the `inspect` Plugin, you can remove it from your configuration file (`conf/config.yaml`): diff --git a/docs/en/latest/plugins/ip-restriction.md b/docs/en/latest/plugins/ip-restriction.md index 904b316f95b7..0f9e4769f3cf 100644 --- a/docs/en/latest/plugins/ip-restriction.md +++ b/docs/en/latest/plugins/ip-restriction.md @@ -48,7 +48,7 @@ Either one of `whitelist` or `blacklist` attribute must be specified. They canno ::: -## Enabling the Plugin +## Enable Plugin You can enable the Plugin on a Route or a Service as shown below: @@ -135,9 +135,9 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 }' ``` -## Disable Plugin +## Delete Plugin -To disable the `ip-restriction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `ip-restriction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/jwt-auth.md b/docs/en/latest/plugins/jwt-auth.md index 9eebdc6a2f05..41919d4c11a5 100644 --- a/docs/en/latest/plugins/jwt-auth.md +++ b/docs/en/latest/plugins/jwt-auth.md @@ -60,7 +60,7 @@ For Route: | cookie | string | False | jwt | The cookie to get the token from. Lower priority than query. | | hide_credentials | boolean | False | false | Set to true will not pass the authorization request of header\query\cookie to the Upstream.| -You can implement `jwt-auth` with [HashiCorp Vault](https://www.vaultproject.io/) to store and fetch secrets and RSA keys pairs from its [encrypted KV engine](https://www.vaultproject.io/docs/secrets/kv) using the [APISIX Secret](../terminology/secret.md) resource. +You can implement `jwt-auth` with [HashiCorp Vault](https://www.vaultproject.io/) to store and fetch secrets and RSA keys pairs from its [encrypted KV engine](https://developer.hashicorp.com/vault/docs/secrets/kv) using the [APISIX Secret](../terminology/secret.md) resource. ## API @@ -72,7 +72,7 @@ You may need to use the [public-api](public-api.md) plugin to expose this endpoi ::: -## Enabling the Plugin +## Enable Plugin To enable the Plugin, you have to create a Consumer object with the JWT token and configure your Route to use JWT authentication. @@ -249,9 +249,9 @@ Accept-Ranges: bytes ... ``` -## Disable Plugin +## Delete Plugin -To disable the `jwt-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `jwt-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/kafka-logger.md b/docs/en/latest/plugins/kafka-logger.md index aebd43813fa9..229256baeb6e 100644 --- a/docs/en/latest/plugins/kafka-logger.md +++ b/docs/en/latest/plugins/kafka-logger.md @@ -47,12 +47,12 @@ It might take some time to receive the log data. It will be automatically sent a | brokers.sasl_config.password | string | True | | | The password of sasl_config. If sasl_config exists, it's required. | | kafka_topic | string | True | | | Target topic to push the logs for organisation. | | producer_type | string | False | async | ["async", "sync"] | Message sending mode of the producer. | -| required_acks | integer | False | 1 | [0, 1, -1] | Number of acknowledgements the leader needs to receive for the producer to consider the request complete. This controls the durability of the sent records. The attribute follows the same configuration as the Kafka `acks` attribute. See [Apache Kafka documentation](https://kafka.apache.org/documentation/#producerconfigs_acks) for more. | +| required_acks | integer | False | 1 | [1, -1] | Number of acknowledgements the leader needs to receive for the producer to consider the request complete. This controls the durability of the sent records. The attribute follows the same configuration as the Kafka `acks` attribute. `required_acks` cannot be 0. See [Apache Kafka documentation](https://kafka.apache.org/documentation/#producerconfigs_acks) for more. | | key | string | False | | | Key used for allocating partitions for messages. | | timeout | integer | False | 3 | [1,...] | Timeout for the upstream to send data. | | name | string | False | "kafka logger" | | Unique identifier for the batch processor. | | meta_format | enum | False | "default" | ["default","origin"] | Format to collect the request information. Setting to `default` collects the information in JSON format and `origin` collects the information with the original HTTP request. See [examples](#meta_format-example) below. | -| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | | include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | | include_req_body_expr | array | False | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | | include_resp_body | boolean | False | false | [false, true] | When set to `true` includes the response body in the log. | @@ -164,7 +164,7 @@ With this configuration, your logs would be formatted as shown below: {"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} ``` -## Enabling the Plugin +## Enable Plugin The example below shows how you can enable the `kafka-logger` Plugin on a specific Route: @@ -218,9 +218,9 @@ Now, if you make a request to APISIX, it will be logged in your Kafka server: curl -i http://127.0.0.1:9080/hello ``` -## Disable Plugin +## Delete Plugin -To disable the `kafka-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `kafka-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/kafka-proxy.md b/docs/en/latest/plugins/kafka-proxy.md index 6345a543da94..303886514c71 100644 --- a/docs/en/latest/plugins/kafka-proxy.md +++ b/docs/en/latest/plugins/kafka-proxy.md @@ -78,6 +78,6 @@ curl -X PUT 'http://127.0.0.1:9180/apisix/admin/routes/r1' \ Now, we can test it by connecting to the `/kafka` endpoint via websocket. -## Disable Plugin +## Delete Plugin -To disable the `kafka-proxy` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `kafka-proxy` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. diff --git a/docs/en/latest/plugins/key-auth.md b/docs/en/latest/plugins/key-auth.md index bf34e93f715a..f8dd177db47c 100644 --- a/docs/en/latest/plugins/key-auth.md +++ b/docs/en/latest/plugins/key-auth.md @@ -52,7 +52,7 @@ For Route: | query | string | optional | apikey | | The query string to get the key from. Lower priority than header. | | hide_credentials | bool | optional | false | | Apache APISIX will pass the request header or query string that contains the authentication information to the Upstream if `hide_credentials` is `false`. Otherwise the authentication information will be removed before proxying.| -## Enabling the Plugin +## Enable Plugin To enable the Plugin, you have to create a Consumer object with an authentication key and configure your Route to authenticate requests. @@ -145,9 +145,9 @@ HTTP/1.1 401 Unauthorized {"message":"Invalid API key in request"} ``` -## Disable Plugin +## Delete Plugin -To disable the `key-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `key-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/ldap-auth.md b/docs/en/latest/plugins/ldap-auth.md index 1e0e4005c00f..dac081dc52fb 100644 --- a/docs/en/latest/plugins/ldap-auth.md +++ b/docs/en/latest/plugins/ldap-auth.md @@ -54,7 +54,7 @@ For Route: | tls_verify| boolean | False | `false` | Whether to verify the server certificate when `use_tls` is enabled; If set to `true`, you must set `ssl_trusted_certificate` in `config.yaml`, and make sure the host of `ldap_uri` matches the host in server certificate. | | uid | string | False | `cn` | uid attribute. | -## Enabling the plugin +## Enable plugin First, you have to create a Consumer and enable the `ldap-auth` Plugin on it: @@ -139,9 +139,9 @@ HTTP/1.1 401 Unauthorized {"message":"Invalid user authorization"} ``` -## Disable Plugin +## Delete Plugin -To disable the `ldap-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `ldap-auth` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/limit-conn.md b/docs/en/latest/plugins/limit-conn.md index 4cd882bac6ab..af0e59bc0e9f 100644 --- a/docs/en/latest/plugins/limit-conn.md +++ b/docs/en/latest/plugins/limit-conn.md @@ -28,7 +28,7 @@ description: This document contains information about the Apache APISIX limit-co ## Description -The `limit-con` Plugin limits the number of concurrent requests to your services. +The `limit-conn` Plugin limits the number of concurrent requests to your services. ## Attributes @@ -44,7 +44,7 @@ The `limit-con` Plugin limits the number of concurrent requests to your services | rejected_msg | string | False | | non-empty | Body of the response returned when the requests exceeding the threshold are rejected. | | allow_degradation | boolean | False | false | | When set to `true` enables Plugin degradation when the Plugin is temporarily unavailable and allows requests to continue. | -## Enabling the Plugin +## Enable Plugin You can enable the Plugin on a Route as shown below: @@ -120,9 +120,9 @@ curl -i http://127.0.0.1:9080/index.html?sleep=20 ``` -## Disable Plugin +## Delete Plugin -To disable the `limit-conn` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `limit-conn` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/en/latest/plugins/limit-count.md b/docs/en/latest/plugins/limit-count.md index b32cad88ff32..46a775a00226 100644 --- a/docs/en/latest/plugins/limit-count.md +++ b/docs/en/latest/plugins/limit-count.md @@ -43,10 +43,13 @@ The `limit-count` Plugin limits the number of requests to your service by a give | policy | string | False | "local" | ["local", "redis", "redis-cluster"] | Rate-limiting policies to use for retrieving and increment the limit count. When set to `local` the counters will be locally stored in memory on the node. When set to `redis` counters are stored on a Redis server and will be shared across the nodes. It is done usually for global speed limiting, and setting to `redis-cluster` uses a Redis cluster instead of a single instance. | | allow_degradation | boolean | False | false | | When set to `true` enables Plugin degradation when the Plugin is temporarily unavailable (for example, a Redis timeout) and allows requests to continue. | | show_limit_quota_header | boolean | False | true | | When set to `true`, adds `X-RateLimit-Limit` (total number of requests) and `X-RateLimit-Remaining` (remaining number of requests) to the response header. | -| group | string | False | | non-empty | Group to share the counter with. Routes configured with the same group will share the counter. | +| group | string | False | | non-empty | Group to share the counter with. Routes configured with the same group will share the same counter. Do not configure with a value that was previously used in this attribute before as the plugin would not allow. | | redis_host | string | required when `policy` is `redis` | | | Address of the Redis server. Used when the `policy` attribute is set to `redis`. | | redis_port | integer | False | 6379 | [1,...] | Port of the Redis server. Used when the `policy` attribute is set to `redis`. | -| redis_password | string | False | | | Password of the Redis server. Used when the `policy` is set to `redis` or `redis-cluster`. | +| redis_username | string | False | | | Username for Redis authentication if Redis ACL is used (for Redis version >= 6.0). If you use the legacy authentication method `requirepass` to configure Redis password, configure only the `redis_password`. Used when the `policy` is set to `redis`. | +| redis_password | string | False | | | Password for Redis authentication. Used when the `policy` is set to `redis` or `redis-cluster`. | +| redis_ssl | boolean | False | false | | If set to `true`, then uses SSL to connect to redis instance. Used when the `policy` attribute is set to `redis`. | +| redis_ssl_verify | boolean | False | false | | If set to `true`, then verifies the validity of the server SSL certificate. Used when the `policy` attribute is set to `redis`. See [tcpsock:sslhandshake](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). | | redis_database | integer | False | 0 | redis_database >= 0 | Selected database of the Redis server (for single instance operation or when using Redis cloud with a single entrypoint). Used when the `policy` attribute is set to `redis`. | | redis_timeout | integer | False | 1000 | [1,...] | Timeout in milliseconds for any command submitted to the Redis server. Used when the `policy` attribute is set to `redis` or `redis-cluster`. | | redis_cluster_nodes | array | required when `policy` is `redis-cluster` | | | Addresses of Redis cluster nodes. Used when the `policy` attribute is set to `redis-cluster`. | @@ -54,7 +57,7 @@ The `limit-count` Plugin limits the number of requests to your service by a give | redis_cluster_ssl | boolean | False | false | | If set to `true`, then uses SSL to connect to redis-cluster. Used when the `policy` attribute is set to `redis-cluster`. | | redis_cluster_ssl_verify | boolean | False | false | | If set to `true`, then verifies the validity of the server SSL certificate. Used when the `policy` attribute is set to `redis-cluster`. | -## Enabling the Plugin +## Enable Plugin You can enable the Plugin on a Route as shown below: @@ -301,9 +304,9 @@ Server: APISIX web server {"error_msg":"Requests are too frequent, please try again later."} ``` -## Disable Plugin +## Delete Plugin -To disable the `limit-count` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `limit-count` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/en/latest/plugins/limit-req.md b/docs/en/latest/plugins/limit-req.md index ee41ae96056e..77fd1cf1825b 100644 --- a/docs/en/latest/plugins/limit-req.md +++ b/docs/en/latest/plugins/limit-req.md @@ -44,7 +44,7 @@ The `limit-req` Plugin limits the number of requests to your service using the [ | nodelay | boolean | False | false | | If set to `true`, requests within the burst threshold would not be delayed. | | allow_degradation | boolean | False | false | | When set to `true` enables Plugin degradation when the Plugin is temporarily unavailable and allows requests to continue. | -## Enabling the Plugin +## Enable Plugin You can enable the Plugin on a Route as shown below: @@ -186,9 +186,9 @@ Server: APISIX web server {"error_msg":"Requests are too frequent, please try again later."} ``` -## Disable Plugin +## Delete Plugin -To disable the `limit-req` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `limit-req` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/log-rotate.md b/docs/en/latest/plugins/log-rotate.md index 94f08ebd9e3e..4f3fd82b7003 100644 --- a/docs/en/latest/plugins/log-rotate.md +++ b/docs/en/latest/plugins/log-rotate.md @@ -42,7 +42,7 @@ You can configure how often the logs are rotated and how many logs to keep. When | max_size | integer | False | -1 | Max size(Bytes) of log files to be rotated, size check would be skipped with a value less than 0 or time is up specified by interval. | | enable_compression | boolean | False | false | When set to `true`, compresses the log file (gzip). Requires `tar` to be installed. | -## Enabling the Plugin +## Enable Plugin To enable the Plugin, add it in your configuration file (`conf/config.yaml`): @@ -108,7 +108,7 @@ total 10.5K -rw-r--r--. 1 resty resty 1.5K Mar 20 21:31 error.log ``` -## Disable plugin +## Delete Plugin To remove the `log-rotate` Plugin, you can remove it from your configuration file (`conf/config.yaml`): diff --git a/docs/en/latest/plugins/loggly.md b/docs/en/latest/plugins/loggly.md index ea949a92d29e..cee32c8fc02d 100644 --- a/docs/en/latest/plugins/loggly.md +++ b/docs/en/latest/plugins/loggly.md @@ -43,7 +43,7 @@ When the maximum batch size is exceeded, the data in the queue is pushed to Logg | severity | string (enum) | False | INFO | Syslog log event severity level. Choose between: `DEBUG`, `INFO`, `NOTICE`, `WARNING`, `ERR`, `CRIT`, `ALERT`, and `EMEGR`. | | severity_map | object | False | nil | A way to map upstream HTTP response codes to Syslog severity. Key-value pairs where keys are the HTTP response codes and the values are the Syslog severity levels. For example `{"410": "CRIT"}`. | | tags | array | False | | Metadata to be included with any event log to aid in segmentation and filtering. | -| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | | include_req_body | boolean | False | false | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | | include_resp_body | boolean | False | false | When set to `true` includes the response body in the log. | | include_resp_body_expr | array | False | | When the `include_resp_body` attribute is set to `true`, use this to filter based on [lua-resty-expr](https://github.com/api7/lua-resty-expr). If present, only logs the response if the expression evaluates to `true`. | @@ -79,7 +79,7 @@ curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/loggly -H 'X-API-KEY: ed ::: -## Enabling the Plugin +## Enable Plugin ### Full configuration @@ -148,9 +148,9 @@ You can then view the logs on your Loggly Dashboard: ![Loggly Dashboard](../../../assets/images/plugin/loggly-dashboard.png) -## Disable Plugin +## Delete Plugin -To disable the `file-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `file-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/loki-logger.md b/docs/en/latest/plugins/loki-logger.md new file mode 100644 index 000000000000..75e76c08a31d --- /dev/null +++ b/docs/en/latest/plugins/loki-logger.md @@ -0,0 +1,165 @@ +--- +title: loki-logger +keywords: + - Apache APISIX + - API Gateway + - Plugin + - Loki-logger + - Grafana Loki +description: This document contains information about the Apache APISIX loki-logger Plugin. +--- + + + +## Description + +The `loki-logger` plugin is used to forward logs to [Grafana Loki](https://grafana.com/oss/loki/) for analysis and storage. + +When the Plugin is enabled, APISIX will serialize the request context information to [Log entries in JSON](https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki) and submit it to the batch queue. When the maximum batch size is exceeded, the data in the queue is pushed to Grafana Loki. See [batch processor](../batch-processor.md) for more details. + +## Attributes + +| Name | Type | Required | Default | Description | +|---|---|---|---|---| +| endpoint_addrs | array[string] | True | | Loki API base URL, format like http://127.0.0.1:3100, supports HTTPS and domain names. If multiple endpoints are configured, they will be written randomly. | +| endpoint_uri | string | False | /loki/api/v1/push | If you are using a log collection service that is compatible with the Loki Push API, you can use this configuration item to customize the API path. | +| tenant_id | string | False | fake | Loki tenant ID. According to Loki's [multi-tenancy documentation](https://grafana.com/docs/loki/latest/operations/multi-tenancy/#multi-tenancy), its default value is set to the default value `fake` under single-tenancy. | +| log_labels | object | False | {job = "apisix"} | Loki log label. [APISIX variables](../apisix-variable.md) and [Nginx variables](http://nginx.org/en/docs/varindex.html) can be used by prefixing the string with `$`, both individual and combined, such as `$host` or `$remote_addr:$remote_port`. | +| ssl_verify | boolean | False | true | When set to `true`, verifies the SSL certificate. | +| timeout | integer | False | 3000ms | Timeout for the Loki service HTTP call. Range from 1 to 60,000ms. | +| keepalive | boolean | False | true | When set to `true`, keeps the connection alive for multiple requests. | +| keepalive_timeout | integer | False | 60000ms | Idle time after which the connection is closed. Range greater than or equal than 1000ms. | +| keepalive_pool | integer | False | 5 | Connection pool limit. Range greater than or equal than 1. | +| log_format | object | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX variables](../apisix-variable.md) and [Nginx variables](http://nginx.org/en/docs/varindex.html) can be used by prefixing the string with `$`. | +| include_req_body | boolean | False | false | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. | +| include_req_body_expr | array | False | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | +| include_resp_body | boolean | False | false | When set to `true` includes the response body in the log. | +| include_resp_body_expr | array | False | | Filter for when the `include_resp_body` attribute is set to `true`. Response body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. | + +This plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. + +## Metadata + +You can also set the format of the logs by configuring the Plugin metadata. The following configurations are available: + +| Name | Type | Required | Default | Description | +|------|------|----------|---------|-------------| +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX variables](../apisix-variable.md) and [Nginx variables](http://nginx.org/en/docs/varindex.html) can be used by prefixing the string with `$`. | + +:::info IMPORTANT + +Configuring the plugin metadata is global in scope. This means that it will take effect on all Routes and Services which use the `loki-logger` plugin. + +::: + +The example below shows how you can configure through the Admin API: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/loki-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +With this configuration, your logs would be formatted as shown below: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## Enable plugin + +The example below shows how you can enable the `loki-logger` plugin on a specific Route: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "loki-logger": { + "endpoint_addrs" : ["http://127.0.0.1:3100"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +## Example usage + +Now, if you make a request to APISIX, it will be logged in your Loki server: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## Delete the plugin + +When you need to remove the `loki-logger` plugin, you can delete the corresponding JSON configuration with the following command and APISIX will automatically reload the relevant configuration without restarting the service: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## FAQ + +### Logs are not pushed properly + +Look at `error.log` for such a log. + +```text +2023/04/30 13:45:46 [error] 19381#19381: *1075673 [lua] batch-processor.lua:95: Batch Processor[loki logger] failed to process entries: loki server returned status: 401, body: no org id, context: ngx.timer, client: 127.0.0.1, server: 0.0.0.0:9081 +``` + +The error can be diagnosed based on the error code in the `failed to process entries: loki server returned status: 401, body: no org id` and the response body of the loki server. + +### Getting errors when RPS is high? + +- Make sure to `keepalive` related configuration is set properly. See [Attributes](#attributes) for more information. +- Check the logs in `error.log`, look for such a log. + + ```text + 2023/04/30 13:49:34 [error] 19381#19381: *1082680 [lua] batch-processor.lua:95: Batch Processor[loki logger] failed to process entries: loki server returned status: 429, body: Ingestion rate limit exceeded for user tenant_1 (limit: 4194304 bytes/sec) while attempting to ingest '1000' lines totaling '616307' bytes, reduce log volume or contact your Loki administrator to see if the limit can be increased, context: ngx.timer, client: 127.0.0.1, server: 0.0.0.0:9081 + ``` + + - The logs usually associated with high QPS look like the above. The error is: `Ingestion rate limit exceeded for user tenant_1 (limit: 4194304 bytes/sec) while attempting to ingest '1000' lines totaling '616307' bytes, reduce log volume or contact your Loki administrator to see if the limit can be increased`. + - Refer to [Loki documentation](https://grafana.com/docs/loki/latest/configuration/#limits_config) to add limits on the amount of default and burst logs, such as `ingestion_rate_mb` and `ingestion_burst_size_mb`. + + As the test during development, setting the `ingestion_burst_size_mb` to 100 allows APISIX to push the logs correctly at least at 10000 RPS. diff --git a/docs/en/latest/plugins/mocking.md b/docs/en/latest/plugins/mocking.md index de9b20ad22a9..eaadc0b45d63 100644 --- a/docs/en/latest/plugins/mocking.md +++ b/docs/en/latest/plugins/mocking.md @@ -41,6 +41,7 @@ The `mocking` Plugin is used for mocking an API. When executed, it returns rando | response_example | string | False | | Body of the response, support use variables, like `$remote_addr $consumer_name`. | | response_schema | object | False | | The JSON schema object for the response. Works when `response_example` is unspecified. | | with_mock_header | boolean | False | true | When set to `true`, adds a response header `x-mock-by: APISIX/{version}`. | +| response_headers | object | false | | Headers to be added in the mocked response. Example: `{"X-Foo": "bar", "X-Few": "baz"}`| The JSON schema supports the following types in their fields: @@ -121,7 +122,7 @@ This is the response generated by the Plugin from this JSON schema: } ``` -## Enabling the Plugin +## Enable Plugin The example below configures the `mocking` Plugin for a specific Route: @@ -221,9 +222,9 @@ x-mock-by: APISIX/2.10.0 {"a":1,"b":2} ``` -## Disable Plugin +## Delete Plugin -To disable the `mocking` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `mocking` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/mqtt-proxy.md b/docs/en/latest/plugins/mqtt-proxy.md index cafc986251b6..85cdcdfcfc06 100644 --- a/docs/en/latest/plugins/mqtt-proxy.md +++ b/docs/en/latest/plugins/mqtt-proxy.md @@ -40,7 +40,7 @@ This Plugin supports both the protocols [3.1.*](http://docs.oasis-open.org/mqtt/ | protocol_name | string | True | Name of the protocol. Generally `MQTT`. | | protocol_level | integer | True | Level of the protocol. It should be `4` for MQTT `3.1.*` and `5` for MQTT `5.0`. | -## Enabling the Plugin +## Enable Plugin To enable the Plugin, you need to first enable the `stream_proxy` configuration in your configuration file (`conf/config.yaml`). The below configuration represents listening on the `9100` TCP port: @@ -50,7 +50,6 @@ To enable the Plugin, you need to first enable the `stream_proxy` configuration http: 'radixtree_uri' ssl: 'radixtree_sni' stream_proxy: # TCP/UDP proxy - only: false # needed if HTTP and Stream Proxy should be enabled tcp: # TCP proxy port list - 9100 dns_resolver: @@ -152,9 +151,9 @@ curl 127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f13 The `sni` name must match one or more of the SNIs provided to the SSL object that you created with the CA and server certificates. -## Disable Plugin +## Delete Plugin -To disable the `mqtt-proxy` Plugin you can remove the corresponding configuration as shown below: +To remove the `mqtt-proxy` Plugin you can remove the corresponding configuration as shown below: ```shell curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE diff --git a/docs/en/latest/plugins/node-status.md b/docs/en/latest/plugins/node-status.md index 0a4f92e36f6e..7ad41f42aa40 100644 --- a/docs/en/latest/plugins/node-status.md +++ b/docs/en/latest/plugins/node-status.md @@ -40,7 +40,7 @@ This Plugin will add the endpoint `/apisix/status` to expose the status of APISI You may need to use the [public-api](public-api.md) Plugin to expose the endpoint. -## Enabling the Plugin +## Enable Plugin To configure the `node-status` Plugin, you have to first enable it in your configuration file (`conf/config.yaml`): @@ -99,7 +99,7 @@ The parameters in the response are described below: | reading | Number of connections where APISIX is reading the request header. | | id | UID of APISIX instance saved in `apisix/conf/apisix.uid`. | -## Disable Plugin +## Delete Plugin To remove the Plugin, you can remove it from your configuration file (`conf/config.yaml`): diff --git a/docs/en/latest/plugins/opa.md b/docs/en/latest/plugins/opa.md index 1aea6b50dad6..cddc9bb4f3e8 100644 --- a/docs/en/latest/plugins/opa.md +++ b/docs/en/latest/plugins/opa.md @@ -30,7 +30,7 @@ description: This document contains information about the Apache APISIX opa Plug ## Description -The `opa` Plugin can be used to integrate with [Open Policy Agent](https://www.openpolicyagent.org). This can help you decouple functions such as authentication and access to services and reduce the complexity of your system. +The `opa` Plugin can be used to integrate with [Open Policy Agent (OPA)](https://www.openpolicyagent.org). OPA is a policy engine that helps defininig and enforcing authorization policies, which determines whether a user or application has the necessary permissions to perform a particular action or access a particular resource. Using OPA with APISIX decouples authorization logics from APISIX. ## Attributes @@ -298,9 +298,9 @@ curl -X GET 127.0.0.1:9080/get } ``` -## Disable Plugin +## Delete Plugin -To disable the `opa` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `opa` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/openfunction.md b/docs/en/latest/plugins/openfunction.md index a211ce1fcda2..098cf238f114 100644 --- a/docs/en/latest/plugins/openfunction.md +++ b/docs/en/latest/plugins/openfunction.md @@ -142,9 +142,9 @@ curl http://127.0.0.1:9080/hello/123 Hello, 123! ``` -## Disable Plugin +## Delete Plugin -To disable the `openfunction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `openfunction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/openid-connect.md b/docs/en/latest/plugins/openid-connect.md index 5dd08decad10..493370240362 100644 --- a/docs/en/latest/plugins/openid-connect.md +++ b/docs/en/latest/plugins/openid-connect.md @@ -61,6 +61,12 @@ description: OpenID Connect allows the client to obtain user information from th | session | object | False | | | When bearer_only is set to false, openid-connect will use Authorization Code flow to authenticate on the IDP, so you need to set the session-related configuration. | | session.secret | string | True | Automatic generation | 16 or more characters | The key used for session encrypt and HMAC operation. | | unauth_action | string | False | "auth" | | Specify the response type on unauthenticated requests. "auth" redirects to identity provider, "deny" results in a 401 response, "pass" will allow the request without authentication. | +| proxy_opts | object | False | | | HTTP proxy server be used to access identity server. | +| proxy_opts.proxy_opts.http_proxy | string | False | | http://proxy-server:port | HTTP proxy server address. | +| proxy_opts.proxy_opts.https_proxy | string | False | | http://proxy-server:port | HTTPS proxy server address. | +| proxy_opts.http_proxy_authorization | string | False | | Basic [base64 username:password] | Default `Proxy-Authorization` header value to be used with `http_proxy`. | +| proxy_opts.https_proxy_authorization | string | False | | Basic [base64 username:password] | As `http_proxy_authorization` but for use with `https_proxy` (since with HTTPS the authorisation is done when connecting, this one cannot be overridden by passing the `Proxy-Authorization` request header). | +| proxy_opts.no_proxy | string | False | | | Comma separated list of hosts that should not be proxied. | NOTE: `encrypt_fields = {"client_secret"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). @@ -88,7 +94,7 @@ The image below shows an example token introspection flow via a Gateway: ![token introspection](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/plugin/oauth-1.png) -The example below shows how you can enable the Plugin on Route. The Rouet below will protect the Upstream by introspecting the token provided in the request header: +The example below shows how you can enable the Plugin on Route. The Route below will protect the Upstream by introspecting the token provided in the request header: ```bash curl http://127.0.0.1:9180/apisix/admin/routes/5 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/opentelemetry.md b/docs/en/latest/plugins/opentelemetry.md index 76f94febd7dd..55171d539f74 100644 --- a/docs/en/latest/plugins/opentelemetry.md +++ b/docs/en/latest/plugins/opentelemetry.md @@ -112,7 +112,7 @@ plugin_attr: set_ngx_var: true ``` -## Enabling the Plugin +## Enable Plugin To enable the Plugin, you have to add it to your configuration file (`conf/config.yaml`): @@ -147,9 +147,9 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f1 }' ``` -## Disable Plugin +## Delete Plugin -To disable the `opentelemetry` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `opentelemetry` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/openwhisk.md b/docs/en/latest/plugins/openwhisk.md index 2c2afb24a010..333d0aea2592 100644 --- a/docs/en/latest/plugins/openwhisk.md +++ b/docs/en/latest/plugins/openwhisk.md @@ -56,7 +56,7 @@ OpenWhisk supports timeouts in the range 1ms to 60000ms and it is recommended to ::: -## Enabling the Plugin +## Enable Plugin Before configuring the Plugin, you need to have OpenWhisk running. The example below shows OpenWhisk in standalone mode: @@ -111,9 +111,9 @@ This will give back the response from the action: { "ready": true } ``` -## Disable Plugin +## Delete Plugin -To disable the `openwhisk` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `openwhisk` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/prometheus.md b/docs/en/latest/plugins/prometheus.md index eb9ca6c83b34..71927fc3ba91 100644 --- a/docs/en/latest/plugins/prometheus.md +++ b/docs/en/latest/plugins/prometheus.md @@ -75,6 +75,26 @@ plugin_attr: extra_labels: - upstream_addr: $upstream_addr - upstream_status: $upstream_status +``` + +### Specifying `default_buckets` + +`DEFAULT_BUCKETS` is the default value for bucket array in `http_latency` metrics. + +You can change the `DEFAULT_BUCKETS` by configuring `default_buckets` attribute in you configuration file. + +Here is a configuration example: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: + default_buckets: + - 15 + - 55 + - 105 + - 205 + - 505 +``` ## API @@ -111,7 +131,7 @@ This feature requires APISIX to run on [APISIX-Base](../FAQ.md#how-do-i-build-th ::: -## Enabling the Plugin +## Enable Plugin The `prometheus` Plugin can be enabled with an empty table. @@ -235,6 +255,16 @@ The following metrics are exported by the `prometheus` Plugin: - Info: Information about the APISIX node. - Shared dict: The capacity and free space of all nginx.shared.DICT in APISIX. +- `apisix_upstream_status`: Health check result status of upstream nodes. A value of `1` represents healthy and `0` represents unhealthy. + + The available attributes are: + + | Name | Description | + |--------------|-------------------------------------------------------------------------------------------------------------------------------| + | name | resource id where the upstream node is attached to, e.g. `/apisix/routes/1`, `/apisix/upstreams/1`. | + | ip | ip address of the node. | + | port | port number of the node. | + Here are the original metrics from APISIX: ```shell @@ -323,11 +353,15 @@ apisix_shared_dict_free_space_bytes{name="balancer-ewma-locks"} 10412032 apisix_shared_dict_free_space_bytes{name="discovery"} 1032192 apisix_shared_dict_free_space_bytes{name="etcd-cluster-health-check"} 10412032 ... +# HELP apisix_upstream_status Upstream status from health check +# TYPE apisix_upstream_status gauge +apisix_upstream_status{name="/apisix/routes/1",ip="100.24.156.8",port="80"} 0 +apisix_upstream_status{name="/apisix/routes/1",ip="52.86.68.46",port="80"} 1 ``` -## Disable Plugin +## Delete Plugin -To disable the `prometheus` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `prometheus` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/proxy-cache.md b/docs/en/latest/plugins/proxy-cache.md index 7120aa54f6ea..8b31baa46ef7 100644 --- a/docs/en/latest/plugins/proxy-cache.md +++ b/docs/en/latest/plugins/proxy-cache.md @@ -55,27 +55,38 @@ The data to be cached can be filtered with response codes, request modes, or mor ::: -## Enabling the Plugin +## Enable Plugin You can add your cache configuration in you APISIX configuration file (`conf/config.yaml`) as shown below: ```yaml title="conf/config.yaml" -proxy_cache: - cache_ttl: 10s # default caching time if the upstream doesn't specify the caching time +apisix: + proxy_cache: + cache_ttl: 10s # default cache TTL for caching on disk zones: - - name: disk_cache_one # name of the cache. Admin can specify which cache to use in the Admin API by name - memory_size: 50m # size of shared memory, used to store the cache index - disk_size: 1G # size of disk, used to store the cache data - disk_path: "/tmp/disk_cache_one" # path to store the cache data - cache_levels: "1:2" # hierarchy levels of the cache + - name: disk_cache_one + memory_size: 50m + disk_size: 1G + disk_path: /tmp/disk_cache_one + cache_levels: 1:2 + # - name: disk_cache_two + # memory_size: 50m + # disk_size: 1G + # disk_path: "/tmp/disk_cache_two" + # cache_levels: "1:2" + - name: memory_cache + memory_size: 50m ``` -You can enable the Plugin on a specific Route as shown below: +### Use disk-based caching + +You can enable the Plugin on a Route as shown below. The Plugin uses the disk-based `cache_strategy` and `disk_cache_one` as the `cache_zone` by default: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { + "uri": "/ip", "plugins": { "proxy-cache": { "cache_key": ["$uri", "-cache-id"], @@ -88,22 +99,44 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ }, "upstream": { "nodes": { - "127.0.0.1:1999": 1 + "httpbin.org": 1 }, "type": "roundrobin" - }, - "uri": "/hello" + } }' ``` -In the above configuration, the `cache_zone` attribute defaults to `disk_cache_one`. +### Use memory-based caching + +You can enable the Plugin on a Route with in-memory `cache_strategy` and a corresponding in-memory `cache_zone` as shown below: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/ip", + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_zone": "memory_cache", + "cache_ttl": 10 + } + }, + "upstream": { + "nodes": { + "httpbin.org": 1 + }, + "type": "roundrobin" + } +}' +``` ## Example usage Once you have configured the Plugin as shown above, you can make an initial request: ```shell -curl http://127.0.0.1:9080/hello -i +curl http://127.0.0.1:9080/ip -i ``` ```shell @@ -117,7 +150,7 @@ hello The `Apisix-Cache-Status` in the response shows `MISS` meaning that the response is not cached, as expected. Now, if you make another request, you will see that you get a cached response: ```shell -curl http://127.0.0.1:9080/hello -i +curl http://127.0.0.1:9080/ip -i ``` ```shell @@ -135,7 +168,7 @@ If you set `"cache_zone": "invalid_disk_cache"` attribute to an invalid value (c To clear the cached data, you can send a request with `PURGE` method: ```shell -curl -i http://127.0.0.1:9080/hello -X PURGE +curl -i http://127.0.0.1:9080/ip -X PURGE ``` ```shell @@ -146,20 +179,20 @@ If the response code is `200`, the deletion is successful. If the cached data is ::: -## Disable Plugin +## Delete Plugin -To disable the `proxy-cache` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `proxy-cache` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { - "uri": "/hello", + "uri": "/ip", "plugins": {}, "upstream": { "type": "roundrobin", "nodes": { - "127.0.0.1:1999": 1 + "httpbin.org": 1 } } }' diff --git a/docs/en/latest/plugins/proxy-control.md b/docs/en/latest/plugins/proxy-control.md index a4bbae5a497f..e97d0dc5c084 100644 --- a/docs/en/latest/plugins/proxy-control.md +++ b/docs/en/latest/plugins/proxy-control.md @@ -42,7 +42,7 @@ This Plugin requires APISIX to run on [APISIX-Base](../FAQ.md#how-do-i-build-the | ----------------- | ------- | -------- | ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | request_buffering | boolean | False | true | When set to `true`, the Plugin dynamically sets the [`proxy_request_buffering`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering) directive. | -## Enabling the Plugin +## Enable Plugin The example below enables the Plugin on a specific Route: @@ -75,9 +75,9 @@ curl -i http://127.0.0.1:9080/upload -d @very_big_file It's expected to not find a message "a client request body is buffered to a temporary file" in the error log. -## Disable Plugin +## Delete Plugin -To disable the `proxy-control` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `proxy-control` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl -i http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/en/latest/plugins/proxy-mirror.md b/docs/en/latest/plugins/proxy-mirror.md index 1b97daa5b429..73b65f8e19c5 100644 --- a/docs/en/latest/plugins/proxy-mirror.md +++ b/docs/en/latest/plugins/proxy-mirror.md @@ -39,9 +39,9 @@ The response returned by the mirror request is ignored. | Name | Type | Required | Default | Valid values | Description | |--------------|--------|----------|---------|--------------|---------------------------------------------------------------------------------------------------------------------------| -| host | string | True | | | Address of the mirror service. It needs to contain the scheme but without the path. For example, `http://127.0.0.1:9797`. | -| path | string | False | | | Path of the mirror request. If unspecified, current path will be used. | -| path_concat_mode | string | False | replace | ["replace", "prefix"] | If the path of a mirror request is specified, set the concatenation mode of request paths. The `replace` mode will directly use `path` as the path of the mirror request. The `prefix` mode will use the `path` + `source request URI` as the path to the mirror request. | +| host | string | True | | | Address of the mirror service. It needs to contain the scheme (`http(s)` or `grpc(s)`) but without the path. For example, `http://127.0.0.1:9797`. | +| path | string | False | | | Path of the mirror request. If unspecified, current path will be used. If it is for mirroring grpc traffic, this option is no longer applicable. | +| path_concat_mode | string | False | replace | ["replace", "prefix"] | If the path of a mirror request is specified, set the concatenation mode of request paths. The `replace` mode will directly use `path` as the path of the mirror request. The `prefix` mode will use the `path` + `source request URI` as the path to the mirror request. If it is for mirroring grpc traffic, this option is no longer applicable too. | | sample_ratio | number | False | 1 | [0.00001, 1] | Ratio of the requests that will be mirrored. | You can customize the proxy timeouts for the mirrored sub-requests by configuring the `plugin_attr` key in your configuration file (`conf/config.yaml`). This can be used for mirroring traffic to a slow backend. @@ -61,7 +61,7 @@ plugin_attr: | read | string | 60s | Read timeout to the mirrored Upstream. | | send | string | 60s | Send timeout to the mirrored Upstream. | -## Enabling the Plugin +## Enable Plugin You can enable the Plugin on a specific Route as shown below: @@ -129,9 +129,9 @@ HTTP/1.1 200 OK hello world ``` -## Disable Plugin +## Delete Plugin -To disable the `proxy-mirror` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `proxy-mirror` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/en/latest/plugins/proxy-rewrite.md b/docs/en/latest/plugins/proxy-rewrite.md index 0e18d9c5d79c..d1b4fdca238e 100644 --- a/docs/en/latest/plugins/proxy-rewrite.md +++ b/docs/en/latest/plugins/proxy-rewrite.md @@ -38,10 +38,10 @@ The `proxy-rewrite` Plugin rewrites Upstream proxy information such as `scheme`, |-----------------------------|---------------|----------|---------|----------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | uri | string | False | | | New Upstream forwarding address. Value supports [Nginx variables](https://nginx.org/en/docs/http/ngx_http_core_module.html). For example, `$arg_name`. | | method | string | False | | ["GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS","MKCOL", "COPY", "MOVE", "PROPFIND", "PROPFIND","LOCK", "UNLOCK", "PATCH", "TRACE"] | Rewrites the HTTP method. | -| regex_uri | array[string] | False | | | New upstream forwarding address. Regular expressions can be used to match the URL from client. If it matches, the URL template is forwarded to the Upstream otherwise, the URL from the client is forwarded. When both `uri` and `regex_uri` are configured, `uri` is used first. For example, `[" ^/iresty/(.*)/(.*)/(.*)", "/$1-$2-$3"]`. Here, the first element is the regular expression to match and the second element is the URL template forwarded to the Upstream. | +| regex_uri | array[string] | False | | | Regular expressions can be used to match the URL from client. If it matches, the URL template is forwarded to the upstream. Otherwise, the URL from the client is forwarded. When both `uri` and `regex_uri` are configured, `uri` has a higher priority. Multiple regular expressions are currently supported for pattern matching, and the plugin will try to match them one by one until they succeed or all fail. For example: `["^/iresty/(. *)/(. *)/(. *)", "/$1-$2-$3", ^/theothers/(. *)/(. *)", "/theothers/$1-$2"]`, the element with the odd index represents the uri regular expression that matches the request from the client, and the element with the even index represents the `uri` template that is forwarded upstream upon a successful match. Please note that the length of this value must be an **even number**. | | host | string | False | | | New Upstream host address. | | headers | object | False | | | | -| headers.add | object | false | | | Append the new headers. The format is `{"name: value",...}`. The values in the header can contain Nginx variables like `$remote_addr` and `$balancer_ip`. It also supports referencing the match result of `regex_uri` as a variable like `$1-$2-$3`. | +| headers.add | object | false | | | Append the new headers. The format is `{"name": "value",...}`. The values in the header can contain Nginx variables like `$remote_addr` and `$balancer_ip`. It also supports referencing the match result of `regex_uri` as a variable like `$1-$2-$3`. | | headers.set | object | false | | | Overwrite the headers. If the header does not exist, it will be added. The format is `{"name": "value", ...}`. The values in the header can contain Nginx variables like `$remote_addr` and `$balancer_ip`. It also supports referencing the match result of `regex_uri` as a variable like `$1-$2-$3`. | | headers.remove | array | false | | | Remove the headers. The format is `["name", ...]`. | use_real_request_uri_unsafe | boolean | False | false | | Use real_request_uri (original $request_uri in nginx) to bypass URI normalization. **Enabling this is considered unsafe as it bypasses all URI normalization steps**. | @@ -52,7 +52,7 @@ Header configurations are executed according to the following priorities: `add` > `remove` > `set` -## Enabling the Plugin +## Enable Plugin The example below enables the `proxy-rewrite` Plugin on a specific Route: @@ -103,9 +103,9 @@ Once you send the request, you can check the Upstream `access.log` for its outpu 127.0.0.1 - [26/Sep/2019:10:52:20 +0800] iresty.com GET /test/home.html HTTP/1.1 200 38 - curl/7.29.0 - 0.000 199 107 ``` -## Disable Plugin +## Delete Plugin -To disable the `proxy-rewrite` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `proxy-rewrite` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/public-api.md b/docs/en/latest/plugins/public-api.md index 0098a86e631e..1c275ff23dda 100644 --- a/docs/en/latest/plugins/public-api.md +++ b/docs/en/latest/plugins/public-api.md @@ -134,9 +134,9 @@ curl -i 'http://127.0.0.1:9080/gen_token?key=user-key' HTTP/1.1 401 Unauthorized ``` -## Disable Plugin +## Delete Plugin -To disable the `public-api` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `public-api` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/real-ip.md b/docs/en/latest/plugins/real-ip.md index ce2567da38a9..115fc29b2a9e 100644 --- a/docs/en/latest/plugins/real-ip.md +++ b/docs/en/latest/plugins/real-ip.md @@ -43,7 +43,7 @@ This Plugin requires APISIX to run on [APISIX-Base](../FAQ.md#how-do-i-build-the | Name | Type | Required | Valid values | Description | |-------------------|---------------|----------|-----------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| source | string | True | Any Nginx variable like `arg_realip` or `http_x_forwarded_for`. | Dynamically sets the client's IP address and an optional port from APISIX's view. | +| source | string | True | Any Nginx variable like `arg_realip` or `http_x_forwarded_for`. | Dynamically sets the client's IP address and an optional port, or the client's host name, from APISIX's view. | | trusted_addresses | array[string] | False | List of IPs or CIDR ranges. | Dynamically sets the `set_real_ip_from` field. | | recursive | boolean | False | True to enable, false to disable, default is false | If recursive search is disabled, the original client address that matches one of the trusted addresses is replaced by the last address sent in the configured `source`. If recursive search is enabled, the original client address that matches one of the trusted addresses is replaced by the last non-trusted address sent in the configured `source`. | @@ -53,7 +53,7 @@ If the address specified in `source` is missing or invalid, the Plugin would not ::: -## Enabling the Plugin +## Enable Plugin The example below enables the `real-ip` Plugin on the specified Route: @@ -96,9 +96,9 @@ remote-addr: 1.2.3.4 remote-port: 9080 ``` -## Disable Plugin +## Delete Plugin -To disable the `real-ip` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `real-ip` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/redirect.md b/docs/en/latest/plugins/redirect.md index 7ba47aea8923..0b725451d918 100644 --- a/docs/en/latest/plugins/redirect.md +++ b/docs/en/latest/plugins/redirect.md @@ -53,7 +53,7 @@ The `redirect` Plugin can be used to configure redirects. ::: -## Enabling the Plugin +## Enable Plugin The example below shows how you can enable the `redirect` Plugin on a specific Route: @@ -144,9 +144,9 @@ Location: https://127.0.0.1:9443/hello ... ``` -## Disable Plugin +## Delete Plugin -To disable the `redirect` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `redirect` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/referer-restriction.md b/docs/en/latest/plugins/referer-restriction.md index 877896ad1c08..fb3d358a0201 100644 --- a/docs/en/latest/plugins/referer-restriction.md +++ b/docs/en/latest/plugins/referer-restriction.md @@ -45,7 +45,7 @@ Only one of `whitelist` or `blacklist` attribute must be specified. They cannot ::: -## Enabling the Plugin +## Enable Plugin You can enable the Plugin on a specific Route or a Service as shown below: @@ -107,9 +107,9 @@ HTTP/1.1 200 OK ... ``` -## Disable Plugin +## Delete Plugin -To disable the `referer-restriction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `referer-restriction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/request-id.md b/docs/en/latest/plugins/request-id.md index 3592135fd694..95dd565b1a55 100644 --- a/docs/en/latest/plugins/request-id.md +++ b/docs/en/latest/plugins/request-id.md @@ -44,45 +44,11 @@ The Plugin will not add a unique ID if the request already has a header with the | ------------------- | ------- | -------- | -------------- | ------------------------------- | ---------------------------------------------------------------------- | | header_name | string | False | "X-Request-Id" | | Header name for the unique request ID. | | include_in_response | boolean | False | true | | When set to `true`, adds the unique request ID in the response header. | -| algorithm | string | False | "uuid" | ["uuid", "snowflake", "nanoid", "range_id"] | Algorithm to use for generating the unique request ID. | +| algorithm | string | False | "uuid" | ["uuid", "nanoid", "range_id"] | Algorithm to use for generating the unique request ID. | | range_id.char_set | string | False | "abcdefghijklmnopqrstuvwxyzABCDEFGHIGKLMNOPQRSTUVWXYZ0123456789| The minimum string length is 6 | Character set for range_id | | range_id.length | integer | False | 16 | Minimum 6 | Id length for range_id algorithm | -### Using snowflake algorithm to generate unique ID - -:::caution - -- When you need to use `snowflake` algorithm, make sure APISIX has the permission to write to the etcd. -- Please read this documentation before deciding to use the snowflake algorithm. Once it is configured, you cannot arbitrarily change the configuration. Failure to do so may result in duplicate IDs. - -::: - -The `snowflake` algorithm supports flexible configurations to cover a variety of needs. Attributes are as follows: - -| Name | Type | Required | Default | Description | -| --------------------- | ------- | -------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| enable | boolean | False | false | When set to `true`, enables the snowflake algorithm. | -| snowflake_epoc | integer | False | 1609459200000 | Starting timestamp in milliseconds. Default is `2021-01-01T00:00:00Z` and supports to a 69 year time until `2090-09-0715:47:35Z`. | -| data_machine_bits | integer | False | 12 | Maximum number of supported machines (processes) `1 << data_machine_bits`. Corresponds the set of `workIDs` and `dataCenterIDs` in the snowflake definition. Each process is associated to a unique ID. The maximum number of supported processes is `pow(2, data_machine_bits)`. So, for the default value of 12 bits, it is 4096. | -| sequence_bits | integer | False | 10 | Maximum number of generated ID per millisecond per node `1 << sequence_bits`. Each process generates up to 1024 IDs per millisecond. | -| data_machine_ttl | integer | False | 30 | Valid time in seconds of registration of `data_machine` in etcd. | -| data_machine_interval | integer | False | 10 | Time in seconds between `data_machine` renewals in etcd. | - -To use the snowflake algorithm, you have to enable it first on your configuration file `conf/config.yaml`: - -```yaml title="conf/config.yaml" -plugin_attr: - request-id: - snowflake: - enable: true - snowflake_epoc: 1609459200000 - data_machine_bits: 12 - sequence_bits: 10 - data_machine_ttl: 30 - data_machine_interval: 10 -``` - -## Enabling the Plugin +## Enable Plugin The example below enables the Plugin on a specific Route: @@ -118,9 +84,9 @@ HTTP/1.1 200 OK X-Request-Id: fe32076a-d0a5-49a6-a361-6c244c1df956 ``` -## Disable Plugin +## Delete Plugin -To disable the `request-id` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `request-id` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/5 \ diff --git a/docs/en/latest/plugins/request-validation.md b/docs/en/latest/plugins/request-validation.md index a9fbcee21a82..bd0d80b7f048 100644 --- a/docs/en/latest/plugins/request-validation.md +++ b/docs/en/latest/plugins/request-validation.md @@ -45,7 +45,7 @@ At least one of `header_schema` or `body_schema` should be filled in. ::: -## Enabling the Plugin +## Enable Plugin You can configure the Plugin on a specific Route as shown below: @@ -192,6 +192,23 @@ The examples below shows how you can configure this Plugin for different validat } ``` +### Header validation + +```json +{ + "header_schema": { + "type": "object", + "required": ["Content-Type"], + "properties": { + "Content-Type": { + "type": "string", + "pattern": "^application\/json$" + } + } + } +} +``` + ### Combined validation ```json @@ -265,9 +282,9 @@ curl --header "Content-Type: application/json" \ http://127.0.0.1:9080/get ``` -## Disable Plugin +## Delete Plugin -To disable the `request-validation` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `request-validation` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/5 \ diff --git a/docs/en/latest/plugins/response-rewrite.md b/docs/en/latest/plugins/response-rewrite.md index 044351ef5d34..392d367254f2 100644 --- a/docs/en/latest/plugins/response-rewrite.md +++ b/docs/en/latest/plugins/response-rewrite.md @@ -49,7 +49,7 @@ You can also use the [redirect](./redirect.md) Plugin to setup redirects. |-----------------|---------|----------|---------|---------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | status_code | integer | False | | [200, 598] | New HTTP status code in the response. If unset, falls back to the original status code. | | body | string | False | | | New body of the response. The content-length would also be reset. | -| body_base64 | boolean | False | false | | When set, the body of the request will be decoded before writing to the client. | +| body_base64 | boolean | False | false | | When set, the body passed in `body` will be decoded before writing to the client which is used in some image and Protobuffer scenarios. Note that this field only allows decoding the body passed in plugin configuration and does not decode upstream response. | | headers | object | False | | | | | headers.add | array | False | | | Append the new headers to the response. The format is `["name: value", ...]`. The values in the header can contain Nginx variables like `$remote_addr` and `$balancer_ip`. | | headers.set | object | False | | | Rewriting the headers. The format is `{"name": "value", ...}`. The values in the header can contain Nginx variables like `$remote_addr` and `$balancer_ip`. | @@ -67,7 +67,7 @@ Only one of `body` or `filters` can be configured. ::: -## Enabling the Plugin +## Enable Plugin The example below enables the `response-rewrite` Plugin on a specific Route: @@ -226,9 +226,9 @@ X-Server-id: 3 ``` -## Disable Plugin +## Delete Plugin -To disable the `response-rewrite` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `response-rewrite` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/rocketmq-logger.md b/docs/en/latest/plugins/rocketmq-logger.md index b5d088d5352d..be3245aeeadf 100644 --- a/docs/en/latest/plugins/rocketmq-logger.md +++ b/docs/en/latest/plugins/rocketmq-logger.md @@ -40,7 +40,7 @@ It might take some time to receive the log data. It will be automatically sent a | topic | string | True | | | Target topic to push the data to. | | key | string | False | | | Key of the messages. | | tag | string | False | | | Tag of the messages. | -| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | | timeout | integer | False | 3 | [1,...] | Timeout for the upstream to send data. | | use_tls | boolean | False | false | | When set to `true`, uses TLS. | | access_key | string | False | "" | | Access key for ACL. Setting to an empty string will disable the ACL. | @@ -154,7 +154,7 @@ With this configuration, your logs would be formatted as shown below: {"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} ``` -## Enabling the Plugin +## Enable Plugin The example below shows how you can enable the `rocketmq-logger` Plugin on a specific Route: @@ -196,9 +196,9 @@ Now, if you make a request to APISIX, it will be logged in your RocketMQ server: curl -i http://127.0.0.1:9080/hello ``` -## Disable Plugin +## Delete Plugin -To disable the `rocketmq-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `rocketmq-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/server-info.md b/docs/en/latest/plugins/server-info.md index c73997e11afd..9e9005639d22 100644 --- a/docs/en/latest/plugins/server-info.md +++ b/docs/en/latest/plugins/server-info.md @@ -50,7 +50,7 @@ None. This Plugin exposes the endpoint `/v1/server_info` to the [Control API](../control-api.md) -## Enabling the Plugin +## Enable Plugin Add `server-info` to the Plugin list in your configuration file (`conf/config.yaml`): @@ -102,9 +102,9 @@ You can also view the server info report through the [APISIX Dashboard](/docs/da ::: -## Disable Plugin +## Delete Plugin -To disable the Plugin, you can remove `server-info` from the list of Plugins in your configuration file: +To remove the Plugin, you can remove `server-info` from the list of Plugins in your configuration file: ```yaml title="conf/config.yaml" plugins: diff --git a/docs/en/latest/plugins/serverless.md b/docs/en/latest/plugins/serverless.md index df4fed614615..11f3c7c6377a 100644 --- a/docs/en/latest/plugins/serverless.md +++ b/docs/en/latest/plugins/serverless.md @@ -79,7 +79,7 @@ Prior to v2.12.0, the phase `before_proxy` was called `balancer`. This was updat ::: -## Enabling the Plugin +## Enable Plugin The example below enables the Plugin on a specific Route: @@ -116,9 +116,9 @@ curl -i http://127.0.0.1:9080/index.html You will find a message "serverless pre-function" and "match uri /index.html" in the error.log. -## Disable Plugin +## Delete Plugin -To disable the `serverless` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `serverless` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/skywalking-logger.md b/docs/en/latest/plugins/skywalking-logger.md index 22ddbf42222b..127cffbb5660 100644 --- a/docs/en/latest/plugins/skywalking-logger.md +++ b/docs/en/latest/plugins/skywalking-logger.md @@ -40,7 +40,7 @@ If there is an existing tracing context, it sets up the trace-log correlation au | endpoint_addr | string | True | | | URI of the SkyWalking OAP server. | | service_name | string | False | "APISIX" | | Service name for the SkyWalking reporter. | | service_instance_name | string | False | "APISIX Instance Name" | | Service instance name for the SkyWalking reporter. Set it to `$hostname` to directly get the local hostname. | -| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | | timeout | integer | False | 3 | [1,...] | Time to keep the connection alive for after sending a request. | | name | string | False | "skywalking logger" | | Unique identifier to identify the logger. | | include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. | @@ -81,7 +81,7 @@ With this configuration, your logs would be formatted as shown below: {"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} ``` -## Enabling the Plugin +## Enable Plugin Once you have set up your SkyWalking OAP server, you can enable the Plugin on a specific Route as shown below: @@ -111,9 +111,9 @@ Now, if you make a request to APISIX, it will be logged in your SkyWalking OAP s curl -i http://127.0.0.1:9080/hello ``` -## Disable Plugin +## Delete Plugin -To disable the `skywalking-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `skywalking-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/skywalking.md b/docs/en/latest/plugins/skywalking.md index 6e3a2965c78e..c9d2920328fc 100644 --- a/docs/en/latest/plugins/skywalking.md +++ b/docs/en/latest/plugins/skywalking.md @@ -94,7 +94,7 @@ nohup java -javaagent:/root/skywalking/app/agent/skywalking-agent.jar \ 2>&1 > /root/skywalking/app/logs/nohup.log & ``` -## Enabling the Plugin +## Enable Plugin To enable the Plugin, you have to add it to your configuration file (`conf/config.yaml`): @@ -197,9 +197,9 @@ You should also be able to see traces from all services: ![ ](../../../assets/images/plugin/skywalking-5.png) -## Disable Plugin +## Delete Plugin -To disable the `skywalking` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `skywalking` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/sls-logger.md b/docs/en/latest/plugins/sls-logger.md index 16b64d974c98..c76ebbbed6f0 100644 --- a/docs/en/latest/plugins/sls-logger.md +++ b/docs/en/latest/plugins/sls-logger.md @@ -86,7 +86,7 @@ With this configuration, your logs would be formatted as shown below: {"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} ``` -## Enabling the Plugin +## Enable Plugin The example below shows how you can configure the Plugin on a specific Route: @@ -126,9 +126,9 @@ Now if you check your Ali Cloud log server, you will be able to see the logs: ![sls logger view](../../../assets/images/plugin/sls-logger-1.png "sls logger view") -## Disable Plugin +## Delete Plugin -To disable the `sls-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `sls-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/splunk-hec-logging.md b/docs/en/latest/plugins/splunk-hec-logging.md index df7b4e9c4887..d8a847d2a8bf 100644 --- a/docs/en/latest/plugins/splunk-hec-logging.md +++ b/docs/en/latest/plugins/splunk-hec-logging.md @@ -44,7 +44,7 @@ When the Plugin is enabled, APISIX will serialize the request context informatio | endpoint.channel | False | | Splunk HEC send data channel identifier. Read more: [About HTTP Event Collector Indexer Acknowledgment](https://docs.splunk.com/Documentation/Splunk/8.2.3/Data/AboutHECIDXAck). | | endpoint.timeout | False | 10 | Splunk HEC send data timeout in seconds. | | ssl_verify | False | true | When set to `true` enables SSL verification as per [OpenResty docs](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). | -| log_format | False | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. @@ -81,7 +81,7 @@ With this configuration, your logs would be formatted as shown below: [{"time":1673976669.269,"source":"apache-apisix-splunk-hec-logging","event":{"host":"localhost","client_ip":"127.0.0.1","@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1"},"host":"DESKTOP-2022Q8F-wsl","sourcetype":"_json"}] ``` -## Enabling the Plugin +## Enable Plugin ### Full configuration @@ -152,9 +152,9 @@ You should be able to login and search these logs from your Splunk dashboard: ![splunk hec search view](../../../assets/images/plugin/splunk-hec-admin-en.png) -## Disable Plugin +## Delete Plugin -To disable the `splunk-hec-logging` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `splunk-hec-logging` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/syslog.md b/docs/en/latest/plugins/syslog.md index 1823350c19e4..4a38f9728098 100644 --- a/docs/en/latest/plugins/syslog.md +++ b/docs/en/latest/plugins/syslog.md @@ -45,7 +45,7 @@ Logs can be set as JSON objects. | drop_limit | integer | False | 1048576 | | Maximum size of the buffer (KB) and the current message before the current message is dropped because of the size limit. | | sock_type | string | False | "tcp" | ["tcp", "udp] | Transport layer protocol to use. | | pool_size | integer | False | 5 | [5, ...] | Keep-alive pool size used by `sock:keepalive`. | -| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | | include_req_body | boolean | False | false | | When set to `true` includes the request body in the log. | This Plugin supports using batch processors to aggregate and process entries (logs/data) in a batch. This avoids the need for frequently submitting the data. The batch processor submits data every `5` seconds or when the data in the queue reaches `1000`. See [Batch Processor](../batch-processor.md#configuration) for more information or setting your custom configuration. @@ -84,7 +84,7 @@ With this configuration, your logs would be formatted as shown below: {"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} ``` -## Enabling the Plugin +## Enable Plugin The example below shows how you can enable the Plugin for a specific Route: @@ -116,9 +116,9 @@ Now, if you make a request to APISIX, it will be logged in your Syslog server: curl -i http://127.0.0.1:9080/hello ``` -## Disable Plugin +## Delete Plugin -To disable the `syslog` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `syslog` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/tcp-logger.md b/docs/en/latest/plugins/tcp-logger.md index 418643584f13..3b41582244ab 100644 --- a/docs/en/latest/plugins/tcp-logger.md +++ b/docs/en/latest/plugins/tcp-logger.md @@ -43,7 +43,7 @@ This plugin also allows to push logs as a batch to your external TCP server. It | host | string | True | | | IP address or the hostname of the TCP server. | | port | integer | True | | [0,...] | Target upstream port. | | timeout | integer | False | 1000 | [1,...] | Timeout for the upstream to send data. | -| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | | tls | boolean | False | false | | When set to `true` performs SSL verification. | | tls_options | string | False | | | TLS options. | | include_req_body | boolean | False | false | | When set to `true` includes the request body in the log. | @@ -83,7 +83,7 @@ With this configuration, your logs would be formatted as shown below: {"@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1","host":"localhost","client_ip":"127.0.0.1"} ``` -## Enabling the Plugin +## Enable Plugin The example below shows how you can enable the `tcp-logger` Plugin on a specific Route: @@ -117,9 +117,9 @@ Now, if you make a request to APISIX, it will be logged in your TCP server: curl -i http://127.0.0.1:9080/hello ``` -## Disable Plugin +## Delete Plugin -To disable the `tcp-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `tcp-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/tencent-cloud-cls.md b/docs/en/latest/plugins/tencent-cloud-cls.md index 67954d0fcd0b..da9ec84f4e73 100644 --- a/docs/en/latest/plugins/tencent-cloud-cls.md +++ b/docs/en/latest/plugins/tencent-cloud-cls.md @@ -44,7 +44,7 @@ The `tencent-cloud-cls` Plugin uses [TencentCloud CLS](https://cloud.tencent.com | include_req_body | boolean | No | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to NGINX's limitations. | | include_resp_body | boolean | No | false | [false, true] | When set to `true` includes the response body in the log. | | global_tag | object | No | | | kv pairs in JSON,send with each log. | -| log_format | object | No | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | object | No | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | NOTE: `encrypt_fields = {"secret_key"}` is also defined in the schema, which means that the field will be stored encrypted in etcd. See [encrypted storage fields](../plugin-develop.md#encrypted-storage-fields). @@ -85,7 +85,7 @@ With this configuration, your logs would be formatted as shown below: {"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} ``` -## Enabling the Plugin +## Enable Plugin The example below shows how you can enable the Plugin on a specific Route: @@ -125,7 +125,7 @@ Now, if you make a request to APISIX, it will be logged in your cls topic: curl -i http://127.0.0.1:9080/hello ``` -## Disable Plugin +## Delete Plugin To disable this Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. diff --git a/docs/en/latest/plugins/traffic-split.md b/docs/en/latest/plugins/traffic-split.md index e617e22673a4..7c45aa9e7b90 100644 --- a/docs/en/latest/plugins/traffic-split.md +++ b/docs/en/latest/plugins/traffic-split.md @@ -77,7 +77,7 @@ If only the `weight` attribute is configured, it corresponds to the weight of th ::: -## Enabling the Plugin +## Enable Plugin You can configure the Plugin on a Route as shown below: @@ -616,9 +616,9 @@ curl http://127.0.0.1:9080/hello -H 'x-api-id: 3' 1980 ``` -## Disable Plugin +## Delete Plugin -To disable the `traffic-split` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `traffic-split` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/en/latest/plugins/ua-restriction.md b/docs/en/latest/plugins/ua-restriction.md index 84bd2cb14303..070f08ba6a85 100644 --- a/docs/en/latest/plugins/ua-restriction.md +++ b/docs/en/latest/plugins/ua-restriction.md @@ -43,11 +43,11 @@ A common scenario is to set crawler rules. `User-Agent` is the identity of the c :::note -Both `allowlist` and `denylist` can be used on their own. If they are used together, the `allowlist` matches before the `denylist`. +Both `allowlist` and `denylist` can't be used at the same time. ::: -## Enabling the Plugin +## Enable Plugin You can enable the Plugin on a Route or a Service as shown below: @@ -116,9 +116,9 @@ HTTP/1.1 403 Forbidden {"message":"Not allowed"} ``` -## Disable Plugin +## Delete Plugin -To disable the `ua-restriction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `ua-restriction` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/udp-logger.md b/docs/en/latest/plugins/udp-logger.md index ee60a529a08d..8b4eb7f232d2 100644 --- a/docs/en/latest/plugins/udp-logger.md +++ b/docs/en/latest/plugins/udp-logger.md @@ -42,7 +42,7 @@ This plugin also allows to push logs as a batch to your external UDP server. It | host | string | True | | | IP address or the hostname of the UDP server. | | port | integer | True | | [0,...] | Target upstream port. | | timeout | integer | False | 3 | [1,...] | Timeout for the upstream to send data. | -| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. | | name | string | False | "udp logger" | | Unique identifier for the batch processor. | | include_req_body | boolean | False | false | | When set to `true` includes the request body in the log. | @@ -81,7 +81,7 @@ With this configuration, your logs would be formatted as shown below: {"@timestamp":"2023-01-09T14:47:25+08:00","route_id":"1","host":"localhost","client_ip":"127.0.0.1"} ``` -## Enabling the Plugin +## Enable Plugin The example below shows how you can enable the Plugin on a specific Route: @@ -114,9 +114,9 @@ Now, if you make a request to APISIX, it will be logged in your UDP server: curl -i http://127.0.0.1:9080/hello ``` -## Disable Plugin +## Delete Plugin -To disable the `udp-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `udp-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/uri-blocker.md b/docs/en/latest/plugins/uri-blocker.md index c7db356bcd09..18be67c04797 100644 --- a/docs/en/latest/plugins/uri-blocker.md +++ b/docs/en/latest/plugins/uri-blocker.md @@ -39,7 +39,7 @@ The `uri-blocker` Plugin intercepts user requests with a set of `block_rules`. | rejected_msg | string | False | | non-empty | HTTP response body returned when the request URI hits any of the `block_rules`. | | case_insensitive | boolean | False | false | | When set to `true`, ignores the case when matching request URI. | -## Enabling the Plugin +## Enable Plugin The example below enables the `uri-blocker` Plugin on a specific Route: @@ -93,9 +93,9 @@ Server: APISIX web server {"error_msg":"access is not allowed"} ``` -## Disable Plugin +## Delete Plugin -To disable the `uri-blocker` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `uri-blocker` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/wolf-rbac.md b/docs/en/latest/plugins/wolf-rbac.md index 87bd409875cc..954c894ebf10 100644 --- a/docs/en/latest/plugins/wolf-rbac.md +++ b/docs/en/latest/plugins/wolf-rbac.md @@ -60,7 +60,7 @@ To use this Plugin, you have to first [install wolf](https://github.com/iGeeky/w Once you have done that you need to add `application`, `admin`, `normal user`, `permission`, `resource` and user authorize to the [wolf-console](https://github.com/iGeeky/wolf/blob/master/docs/usage.md). -## Enabling the Plugin +## Enable Plugin You need to first configure the Plugin on a Consumer: @@ -266,9 +266,9 @@ HTTP/1.1 200 OK {"message":"success to change password"} ``` -## Disable Plugin +## Delete Plugin -To disable the `wolf-rbac` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `wolf-rbac` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/plugins/workflow.md b/docs/en/latest/plugins/workflow.md index ed1d8adaa8c0..6448d0224156 100644 --- a/docs/en/latest/plugins/workflow.md +++ b/docs/en/latest/plugins/workflow.md @@ -61,7 +61,7 @@ In `rules`, match `case` in order according to the index of the `rules`, and exe ::: -## Enabling the Plugin +## Enable Plugin You can configure the `workflow` plugin on a Route as shown below: @@ -147,9 +147,9 @@ curl http://127.0.0.1:0080/hello/fake -i HTTP/1.1 200 OK ``` -## Disable Plugin +## Delete Plugin -To disable the `workflow` plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `workflow` plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/en/latest/plugins/zipkin.md b/docs/en/latest/plugins/zipkin.md index 1a870441b1ad..2a772e608f0d 100644 --- a/docs/en/latest/plugins/zipkin.md +++ b/docs/en/latest/plugins/zipkin.md @@ -106,7 +106,7 @@ func main(){ } ``` -## Enabling the Plugin +## Enable Plugin The example below enables the Plugin on a specific Route: @@ -216,9 +216,9 @@ You can access the Jaeger UI to view the traces in endpoint [http://127.0.0.1:16 ![jaeger web-ui trace](../../../assets/images/plugin/jaeger-2.png) -## Disable Plugin +## Delete Plugin -To disable the `zipkin` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. +To remove the `zipkin` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/profile.md b/docs/en/latest/profile.md index af226c09a55c..5c4bca67928b 100644 --- a/docs/en/latest/profile.md +++ b/docs/en/latest/profile.md @@ -1,5 +1,11 @@ --- -title: Configuration file switching based on environment variables +title: Configuration based on environments +keywords: + - Apache APISIX + - API Gateway + - Configuration + - Environment +description: This document describes how you can change APISIX configuration based on environments. --- -The reason the configuration is extracted from the code is to better adapt to changes. Usually our applications have different -operating environments such as development environment and production environment. Certain configurations of these applications -will definitely be different, such as the address of the configuration center. +Extracting configuration from the code makes APISIX adaptable to changes in the operating environments. For example, APISIX can be deployed in a development environment for testing and then moved to a production environment. The configuration for APISIX in these environments would be different. -If the configuration of all environments is placed in the same file, it is very difficult to manage. After receiving new -requirements, we need to change the parameters in the configuration file to the development environment when developing the -development environment. You have to change it back. It's very easy to make mistakes. +APISIX supports managing multiple configurations through environment variables in two different ways: -The solution to the above problem is to distinguish the current running environment through environment variables, and switch -between different configuration files through environment variables. The corresponding environment variable in APISIX is: `APISIX_PROFILE` +1. Using environment variables in the configuration file +2. Using an environment variable to switch between multiple configuration profiles -When `APISIX_PROFILE` is not set, the following three configuration files are used by default: +## Using environment variables in the configuration file -* conf/config.yaml -* conf/apisix.yaml -* conf/debug.yaml +This is useful when you want to change some configurations based on the environment. + +To use environment variables, you can use the syntax `key_name: ${{ENVIRONMENT_VARIABLE_NAME:=}}`. You can also set a default value to fall back to if no environment variables are set by adding it to the configuration as `key_name: ${{ENVIRONMENT_VARIABLE_NAME:=VALUE}}`. The example below shows how you can modify your configuration file to use environment variables to set the listening ports of APISIX: + +```yaml title="config.yaml" +apisix: + node_listen: + - ${{APISIX_NODE_LISTEN:=}} +deployment: + admin: + admin_listen: + port: ${{DEPLOYMENT_ADMIN_ADMIN_LISTEN:=}} +``` + +When you run APISIX, you can set these environment variables dynamically: + +```shell +export APISIX_NODE_LISTEN=8132 +export DEPLOYMENT_ADMIN_ADMIN_LISTEN=9232 +``` + +Now when you start APISIX, it will listen on port `8132` and expose the Admin API on port `9232`. + +To use default values if no environment variables are set, you can add it to your configuration file as shown below: + +```yaml title="config.yaml" +apisix: + node_listen: + - ${{APISIX_NODE_LISTEN:=9080}} +deployment: + admin: + admin_listen: + port: ${{DEPLOYMENT_ADMIN_ADMIN_LISTEN:=9180}} +``` + +Now if you don't specify these environment variables when running APISIX, it will fall back to the default values and expose the Admin API on port `9180` and listen on port `9080`. + +## Using the `APISIX_PROFILE` environment variable -If the value of `APISIX_PROFILE` is set to `prod`, the following three configuration files are used: +If you have multiple configuration changes for multiple environments, it might be better to have a different configuration file for each. + +Although this might increase the number of configuration files, you would be able to manage each independently and can even do version management. + +APISIX uses the `APISIX_PROFILE` environment variable to switch between environments, i.e. to switch between different sets of configuration files. If the value of `APISIX_PROFILE` is `env`, then APISIX will look for the configuration files `conf/config-env.yaml`, `conf/apisix-env.yaml`, and `conf/debug-env.yaml`. + +For example for the production environment, you can have: * conf/config-prod.yaml * conf/apisix-prod.yaml * conf/debug-prod.yaml -Although this way will increase the number of configuration files, it can be managed independently, and then version management -tools such as git can be configured, and version management can be better achieved. +And for the development environment: + +* conf/config-dev.yaml +* conf/apisix-dev.yaml +* conf/debug-dev.yaml + +And if no environment is specified, APISIX can use the default configuration files: + +* conf/config.yaml +* conf/apisix.yaml +* conf/debug.yaml + +To use a particular configuration, you can specify it in the environment variable: + +```shell +export APISIX_PROFILE=prod +``` + +APISIX will now use the `-prod.yaml` configuration files. diff --git a/docs/en/latest/pubsub.md b/docs/en/latest/pubsub.md index 2db871cecb0f..62112fb4596c 100644 --- a/docs/en/latest/pubsub.md +++ b/docs/en/latest/pubsub.md @@ -44,7 +44,7 @@ Currently, Apache APISIX supports WebSocket communication with the client, which ## Supported messaging systems -- [Aapche Kafka](pubsub/kafka.md) +- [Apache Kafka](pubsub/kafka.md) ## How to support other messaging systems diff --git a/docs/en/latest/router-radixtree.md b/docs/en/latest/router-radixtree.md index 068b9777a0d6..7f6bf7f971ec 100644 --- a/docs/en/latest/router-radixtree.md +++ b/docs/en/latest/router-radixtree.md @@ -1,5 +1,5 @@ --- -title: Router radixtree +Title: Router Radixtree --- -### what's libradixtree? +### What is Libradixtree? -[libradixtree](https://github.com/api7/lua-resty-radixtree), adaptive radix trees implemented in Lua for OpenResty. +[Libradixtree](https://github.com/api7/lua-resty-radixtree) is an adaptive radix tree that is implemented in Lua for OpenResty and it is based on FFI for [rax](https://github.com/antirez/rax). APISIX uses libradixtree as a route dispatching library. -APISIX using libradixtree as route dispatching library. +### How to use Libradixtree in APISIX? -### How to use libradixtree in APISIX? - -This is Lua-OpenResty implementation library base on FFI for [rax](https://github.com/antirez/rax). - -Let's take a look at a few examples and have an intuitive understanding. +There are several ways to use Libradixtree in APISIX. Let's take a look at a few examples and have an intuitive understanding. #### 1. Full match @@ -39,7 +35,7 @@ Let's take a look at a few examples and have an intuitive understanding. /blog/foo ``` -It will only match `/blog/foo`. +It will only match the full path `/blog/foo`. #### 2. Prefix matching @@ -47,12 +43,12 @@ It will only match `/blog/foo`. /blog/bar* ``` -It will match the path with the prefix `/blog/bar`, eg: `/blog/bar/a`, +It will match the path with the prefix `/blog/bar`. For example, `/blog/bar/a`, `/blog/bar/b`, `/blog/bar/c/d/e`, `/blog/bar` etc. #### 3. Match priority -Full match -> Deep prefix matching. +Full match has a higher priority than deep prefix matching. Here are the rules: @@ -77,7 +73,7 @@ When different routes have the same `uri`, you can set the priority field of the Note: In the matching rules, the `priority` field takes precedence over other rules except `uri`. -1. Different routes have the same `uri` and set the `priority` field +1. Different routes have the same `uri` but different `priority` field Create two routes with different `priority` values ​​(the larger the value, the higher the priority). @@ -116,11 +112,11 @@ curl http://127.0.0.1:1980/hello 1980 ``` -All requests only hit the route of port `1980`. +All requests will only hit the route of port `1980` because it has a priority of 3 while the route with the port of `1981` has a priority of 2. -2. Different routes have the same `uri` and set different matching conditions +2. Different routes have the same `uri` but different matching conditions -Here is an example of setting host matching rules: +To understand this, look at the example of setting host matching rules: ```shell $ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' @@ -191,10 +187,9 @@ will match both `/blog/dog` and `/blog/cat`. For more details, see https://github.com/api7/lua-resty-radixtree/#parameters-in-path. -### How to filter route by Nginx builtin variable +### How to filter route by Nginx built-in variable? -Please take a look at [radixtree-new](https://github.com/api7/lua-resty-radixtree#new), -here is an simple example: +Nginx provides a variety of built-in variables that can be used to filter routes based on certain criteria. Here is an example of how to filter routes by Nginx built-in variables: ```shell $ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' @@ -216,9 +211,9 @@ $ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f }' ``` -This route will require the request header `host` equal `iresty.com`, request cookie key `_device_id` equal `a66f0cdc4ba2df8c096f74c9110163a9` etc. +This route will require the request header `host` equal `iresty.com`, request cookie key `_device_id` equal `a66f0cdc4ba2df8c096f74c9110163a9` etc. You can learn more at [radixtree-new](https://github.com/api7/lua-resty-radixtree#new). -### How to filter route by POST form attributes +### How to filter route by POST form attributes? APISIX supports filtering route by POST form attributes with `Content-Type` = `application/x-www-form-urlencoded`. @@ -243,11 +238,11 @@ $ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f The route will be matched when the POST form contains `name=json`. -### How to filter route by GraphQL attributes +### How to filter route by GraphQL attributes? APISIX can handle HTTP GET and POST methods. At the same time, the request body can be a GraphQL query string or JSON-formatted content. -APISIX supports filtering route by some attributes of GraphQL. Currently we support: +APISIX supports filtering routes by some attributes of GraphQL. Currently, we support: * graphql_operation * graphql_name @@ -266,6 +261,8 @@ query getRepo { } ``` +Where + * The `graphql_operation` is `query` * The `graphql_name` is `getRepo`, * The `graphql_root_fields` is `["owner", "repo"]` diff --git a/docs/en/latest/ssl-protocol.md b/docs/en/latest/ssl-protocol.md new file mode 100644 index 000000000000..53dbcfbc752b --- /dev/null +++ b/docs/en/latest/ssl-protocol.md @@ -0,0 +1,343 @@ +--- +title: SSL Protocol +--- + + + +`APISIX` supports set TLS protocol and also supports dynamically specifying different TLS protocol versions for each [SNI](https://en.wikipedia.org/wiki/Server_Name_Indication). + +**For security reasons, the encryption suite used by default in `APISIX` does not support TLSv1.1 and lower versions.** +**If you need to enable the TLSv1.1 protocol, please add the encryption suite supported by the TLSv1.1 protocol to the configuration item `apisix.ssl.ssl_ciphers` in `config.yaml`.** + +## ssl_protocols Configuration + +### Static Configuration + +The `ssl_protocols` parameter in the static configuration `config.yaml` applies to the entire APISIX, but cannot be dynamically modified. It only takes effect when the matching SSL resource does not set `ssl_protocols`. + +```yaml +apisix: + ssl: + ssl_protocols: TLSv1.2 TLSv1.3 # default TLSv1.2 TLSv1.3 +``` + +### Dynamic Configuration + +Use the `ssl_protocols` field in the `ssl` resource to dynamically specify different TLS protocol versions for each SNI. + +Specify the `test.com` domain uses the TLSv1.2 and TLSv1.3: + +```bash +{ + "cert": "$cert", + "key": "$key", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.2", + "TLSv1.3" + ] +} +``` + +### Notes + +- Dynamic configuration has a higher priority than static configuration. When the `ssl_protocols` configuration item in the ssl resource is not empty, the static configuration will be overridden. +- The static configuration applies to the entire APISIX and requires a reload of APISIX to take effect. +- Dynamic configuration can control the TLS protocol version of each SNI in a fine-grained manner and can be dynamically modified, which is more flexible than static configuration. + +## Examples + +### How to specify the TLSv1.1 protocol + +While newer products utilize higher security-level TLS protocol versions, there are still legacy clients that rely on the lower-level TLSv1.1 protocol. However, enabling TLSv1.1 for new products presents potential security risks. In order to maintain the security of the API, it is crucial to have the ability to seamlessly switch between different protocol versions based on specific requirements and circumstances. +For example, consider two domain names: `test.com`, utilized by legacy clients requiring TLSv1.1 configuration, and `test2.com`, associated with new products that support TLSv1.2 and TLSv1.3 protocols. + +1. `config.yaml` configuration. + +```yaml +apisix: + ssl: + ssl_protocols: TLSv1.3 + # ssl_ciphers is for reference only + ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA +``` + +2. Specify the TLSv1.1 protocol version for the test.com domain. + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.1" + ] +}' +``` + +3. Create an SSL object for test.com without specifying the TLS protocol version, which will use the static configuration by default. + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat server2.crt)"'", + "key": "'"$(cat server2.key)"'", + "snis": ["test2.com"] +}' +``` + +4. Access Verification + +Failed, accessed test.com with TLSv1.3: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` + +Successfully, accessed test.com with TLSv1.1: + +```shell +$ curl --tls-max 1.1 --tlsv1.1 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.1 (OUT), TLS handshake, Client hello (1): +* TLSv1.1 (IN), TLS handshake, Server hello (2): +* TLSv1.1 (IN), TLS handshake, Certificate (11): +* TLSv1.1 (IN), TLS handshake, Server key exchange (12): +* TLSv1.1 (IN), TLS handshake, Server finished (14): +* TLSv1.1 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.1 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.1 (OUT), TLS handshake, Finished (20): +* TLSv1.1 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.1 / ECDHE-RSA-AES256-SHA +``` + +Successfully, accessed test2.com with TLSv1.3: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +``` + +Failed, accessed test2.com with TLSv1.1: + +```shell +curl --tls-max 1.1 --tlsv1.1 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.1 (OUT), TLS handshake, Client hello (1): +* TLSv1.1 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` + +### Certificates are associated with multiple domains, but different TLS protocols are used between domains + +Sometimes, we may encounter a situation where a certificate is associated with multiple domains, but they need to use different TLS protocols to ensure security. For example, the test.com domain needs to use the TLSv1.2 protocol, while the test2.com domain needs to use the TLSv1.3 protocol. In this case, we cannot simply create an SSL object for all domains, but need to create an SSL object for each domain separately and specify the appropriate protocol version. This way, we can perform the correct SSL handshake and encrypted communication based on different domains and protocol versions. The example is as follows: + +1. Create an SSL object for test.com using the certificate and specify the TLSv1.2 protocol. + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.2" + ] +}' +``` + +2. Use the same certificate as test.com to create an SSL object for test2.com and specify the TLSv1.3 protocol. + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/2 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test2.com"], + "ssl_protocols": [ + "TLSv1.3" + ] +}' +``` + +3. Access verification + +Successfully, accessed test.com with TLSv1.2: + +```shell +$ curl --tls-max 1.2 --tlsv1.2 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Server hello (2): +* TLSv1.2 (IN), TLS handshake, Certificate (11): +* TLSv1.2 (IN), TLS handshake, Server key exchange (12): +* TLSv1.2 (IN), TLS handshake, Server finished (14): +* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.2 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.2 (OUT), TLS handshake, Finished (20): +* TLSv1.2 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test.com +* start date: Jul 20 15:50:08 2023 GMT +* expire date: Jul 17 15:50:08 2033 GMT +* issuer: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test.com +* SSL certificate verify result: EE certificate key too weak (66), continuing anyway. +* Using HTTP2, server supports multi-use +* Connection state changed (HTTP/2 confirmed) +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* Using Stream ID: 1 (easy handle 0x5608905ee2e0) +> HEAD / HTTP/2 +> Host: test.com:9443 +> user-agent: curl/7.74.0 +> accept: */* + +``` + +Failed, accessed test.com with TLSv1.3: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version + +``` + +Successfully, accessed test2.com with TLSv1.3: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test2.com +* start date: Jul 20 16:05:47 2023 GMT +* expire date: Jul 17 16:05:47 2033 GMT +* issuer: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test2.com +* SSL certificate verify result: EE certificate key too weak (66), continuing anyway. +* Using HTTP2, server supports multi-use +* Connection state changed (HTTP/2 confirmed) +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* Using Stream ID: 1 (easy handle 0x55569cbe42e0) +> HEAD / HTTP/2 +> Host: test2.com:9443 +> user-agent: curl/7.74.0 +> accept: */* +> +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* old SSL session ID is stale, removing +``` + +Failed, accessed test2.com with TLSv1.2: + +```shell +$ curl --tls-max 1.2 --tlsv1.2 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` diff --git a/docs/en/latest/stream-proxy.md b/docs/en/latest/stream-proxy.md index 0ab87b8df4ef..9354c96b255b 100644 --- a/docs/en/latest/stream-proxy.md +++ b/docs/en/latest/stream-proxy.md @@ -21,67 +21,66 @@ title: Stream Proxy # --> -TCP is the protocol for many popular applications and services, such as LDAP, MySQL, and RTMP. UDP (User Datagram Protocol) is the protocol for many popular non-transactional applications, such as DNS, syslog, and RADIUS. +A stream proxy operates at the transport layer, handling stream-oriented traffic based on TCP and UDP protocols. TCP is used for many applications and services, such as LDAP, MySQL, and RTMP. UDP is used for many popular non-transactional applications, such as DNS, syslog, and RADIUS. -APISIX can dynamically load balancing TCP/UDP proxy. In Nginx world, we call TCP/UDP proxy to stream proxy, we followed this statement. +APISIX can serve as a stream proxy, in addition to being an application layer proxy. ## How to enable stream proxy? -Setting the `stream_proxy` option in `conf/config.yaml`, specify a list of addresses that require dynamic proxy. -By default, no stream proxy is enabled. +By default, stream proxy is disabled. + +To enable this option, set `apisix.proxy_mode` to `stream` or `http&stream`, depending on whether you want stream proxy only or both http and stream. Then add the `apisix.stream_proxy` option in `conf/config.yaml` and specify the list of addresses where APISIX should act as a stream proxy and listen for incoming requests. +:::note + +This "apisix.stream_proxy" option has only been added in versions after 3.2.1. + +::: ```yaml apisix: - stream_proxy: # TCP/UDP proxy - tcp: # TCP proxy address list - - 9100 + stream_proxy: + tcp: + - 9100 # listen on 9100 ports of all network interfaces for TCP requests - "127.0.0.1:9101" - udp: # UDP proxy address list - - 9200 + udp: + - 9200 # listen on 9200 ports of all network interfaces for UDP requests - "127.0.0.1:9211" ``` -If `apisix.enable_admin` is true, both HTTP and stream proxy are enabled with the configuration above. - -If you have set the `enable_admin` to false, and need to enable both HTTP and stream proxy, set the `only` to false: +If `apisix.stream_proxy` is undefined in `conf/config.yaml`, you will encounter an error similar to the following and not be able to add a stream route: -```yaml -apisix: - enable_admin: false - stream_proxy: # TCP/UDP proxy - only: false - tcp: # TCP proxy address list - - 9100 +``` +{"error_msg":"stream mode is disabled, can not add stream routes"} ``` -## How to set route? +## How to set a route? -Here is a mini example: +You can create a stream route using the Admin API `/stream_routes` endpoint. For example: ```shell curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { - "remote_addr": "127.0.0.1", + "remote_addr": "192.168.5.3", "upstream": { "nodes": { - "127.0.0.1:1995": 1 + "192.168.4.10:1995": 1 }, "type": "roundrobin" } }' ``` -It means APISIX will proxy the request to `127.0.0.1:1995` which the client remote address is `127.0.0.1`. +With this configuration, APISIX would only forward the request to the upstream service at `192.168.4.10:1995` if and only if the request is sent from `192.168.5.3`. See the next section to learn more about filtering options. -For more use cases, please take a look at [test case](https://github.com/apache/apisix/blob/master/t/stream-node/sanity.t). +More examples can be found in [test cases](https://github.com/apache/apisix/blob/master/t/stream-node/sanity.t). -## More route match options +## More stream route filtering options -And we can add more options to match a route. Currently stream route configuration supports 3 fields for filtering: +Currently there are three attributes in stream routes that can be used for filtering requests: -- server_addr: The address of the APISIX server that accepts the L4 stream connection. -- server_port: The port of the APISIX server that accepts the L4 stream connection. -- remote_addr: The address of client from which the request has been made. +- `server_addr`: The address of the APISIX server that accepts the L4 stream connection. +- `server_port`: The port of the APISIX server that accepts the L4 stream connection. +- `remote_addr`: The address of client from which the request has been made. Here is an example: @@ -101,7 +100,7 @@ curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f03 It means APISIX will proxy the request to `127.0.0.1:1995` when the server address is `127.0.0.1` and the server port is equal to `2000`. -Let's take another real world example: +Here is an example with MySQL: 1. Put this config inside `config.yaml` diff --git a/docs/en/latest/terminology/consumer-group.md b/docs/en/latest/terminology/consumer-group.md index 1cb06c3d769b..2f91657805ee 100644 --- a/docs/en/latest/terminology/consumer-group.md +++ b/docs/en/latest/terminology/consumer-group.md @@ -35,7 +35,9 @@ instead of managing each consumer individually. ## Example -The example below illustrates how to create a Consumer Group and bind it to a Consumer: +The example below illustrates how to create a Consumer Group and bind it to a Consumer. + +Create a Consumer Group which shares the same rate limiting quota: ```shell curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \ @@ -46,12 +48,14 @@ curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \ "count": 200, "time_window": 60, "rejected_code": 503, - "group": "$consumer_group_id" + "group": "grp_company_a" } } }' ``` +Create a Consumer within the Consumer Group: + ```shell curl http://127.0.0.1:9180/apisix/admin/consumers \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/en/latest/terminology/consumer.md b/docs/en/latest/terminology/consumer.md index 0b2a1ad3e827..591396c4f336 100644 --- a/docs/en/latest/terminology/consumer.md +++ b/docs/en/latest/terminology/consumer.md @@ -60,6 +60,8 @@ The process of identifying a Consumer in APISIX is described below: Consumers are useful when you have different consumers requesting the same API and you need to execute different Plugin and Upstream configurations based on the consumer. These need to be used in conjunction with the user authentication system. +Authentcation plugins that can be configured with a Consumer include `basic-auth`, `hmac-auth`, `jwt-auth`, `key-auth`, `ldap-auth`, and `wolf-rbac`. + Refer to the documentation for the [key-auth](../plugins/key-auth.md) authentication Plugin to further understand the concept of a Consumer. :::note diff --git a/docs/en/latest/terminology/plugin.md b/docs/en/latest/terminology/plugin.md index bbff2f798700..051fef32b07d 100644 --- a/docs/en/latest/terminology/plugin.md +++ b/docs/en/latest/terminology/plugin.md @@ -30,58 +30,83 @@ description: This article introduces the related information of the APISIX Plugi ## Description -This represents the configuration of the plugins that are executed during the HTTP request/response lifecycle. A **Plugin** configuration can be bound directly to a [`Route`](./route.md), a [`Service`](./service.md), a [`Consumer`](./consumer.md) or a [`Plugin Config`](./plugin-config.md). +APISIX Plugins extend APISIX's functionalities to meet organization or user-specific requirements in traffic management, observability, security, request/response transformation, serverless computing, and more. -You can also refer to [Admin API](../admin-api.md#plugin) for how to use this resource. +A **Plugin** configuration can be bound directly to a [`Route`](route.md), [`Service`](service.md), [`Consumer`](consumer.md) or [`Plugin Config`](plugin-config.md). You can refer to [Admin API plugins](../admin-api.md#plugin) for how to use this resource. -:::note +If existing APISIX Plugins do not meet your needs, you can also write your own plugins in Lua or other languages such as Java, Python, Go, and Wasm. -While configuring the same plugin, only one copy of the configuration is valid. The order of precedence is always `Consumer` > `Consumer Group` > `Route` > `Plugin Config` > `Service`. +## Plugins installation -::: +APISIX comes with a default configuration file called `config-default.yaml` and a user-defined configuration file called `config.yaml`. These files are located in the `conf` directory. If the same key (e.g. `plugins`) exists in both files, the configuration values for the key in `config.yaml` will overwrite those in `config-default.yaml`. -While configuring APISIX, you can declare the Plugins that are supported by the local APISIX node. This acts as a whitelisting mechanism as Plugins that are not in this whitelist will be automatically ignored. So, this feature can be used to temporarily turn off/turn on specific plugins. +The `plugins` block is where you can declare the Plugins loaded to your APISIX instance: -## Adding a Plugin +```yaml +plugins: + - real-ip # loaded + - ai + - client-control + - proxy-control + - request-id + - zipkin + # - skywalking # not loaded +... +``` -For adding new plugins based on existing plugins, copy the data in the `plugins` node from the default configuration file `conf/config-default.yaml` to your configuration file (`conf/config.yaml`). +## Plugins execution lifecycle -In a request, a Plugin is only executed once. This is true even if it is bound to multiple different objects like Routes and Services. The order in which Plugins are run is determined by its configured priorities: +An installed plugin is first initialized. The configuration of the plugin is then checked against the defined [JSON Schema](https://json-schema.org) to make sure the plugins configuration schema is correct. -```lua -local _M = { - version = 0.1, - priority = 0, -- the priority of this plugin will be 0 - name = plugin_name, - schema = schema, - metadata_schema = metadata_schema, -} -``` +When a request goes through APISIX, the plugin's corresponding methods are executed in one or more of the following phases : `rewrite`, `access`, `before_proxy`, `header_filter`, `body_filter`, and `log`. These phases are largely influenced by the [OpenResty directives](https://openresty-reference.readthedocs.io/en/latest/Directives/). + +
+
+Routes Diagram +
+
+ +## Plugins execution order + +In general, plugins are executed in the following order: + +1. Plugins in [global rules](./global-rule.md) + 1. plugins in rewrite phase + 2. plugins in access phase + +2. Plugins bound to other objects + 1. plugins in rewrite phase + 2. plugins in access phase -A Plugin configuration is submitted as part of the Route or Service and is placed under `plugins`. It internally uses the Plugin name as the hash key to holding the configuration items for the different Plugins. +Within each phase, you can optionally define a new priority number in the `_meta.priority` field of the plugin, which takes precedence over the default plugins priority during execution. Plugins with higher priority numbers are executed first. + +For example, if you want to have `limit-count` (priority 1002) run before `ip-restriction` (priority 3000) when requests hit a route, you can do so by passing a higher priority number to `_meta.priority` field of `limit-count`: ```json { - ... - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - }, - "prometheus": {} + ..., + "plugins": { + "limit-count": { + ..., + "_meta": { + "priority": 3010 + } } + } } ``` -Not all Plugins have specific configuration items (for example, [prometheus](/docs/apisix/plugins/prometheus/)). In such cases, an empty object identifier can be used. +To reset the priority of this plugin instance to the default, simply remove the `_meta.priority` field from your plugin configuration. -A warning level log as shown below indicates that the request was rejected by the Plugin. +## Plugins merging precedence -```shell -ip-restriction exits with http status code 403 -``` +When the same plugin is configured both globally in a global rule and locally in an object (e.g. a route), both plugin instances are executed sequentially. + +However, if the same plugin is configured locally on multiple objects, such as on [Route](./route.md), [Service](./service.md), [Consumer](./consumer.md), [Consumer Group](./consumer-group.md), or [Plugin Config](./plugin-config.md), only one copy of configuration is used as each non-global plugin is only executed once. This is because during execution, plugins configured in these objects are merged with respect to a specific order of precedence: + +`Consumer` > `Consumer Group` > `Route` > `Plugin Config` > `Service` + +such that if the same plugin has different configurations in different objects, the plugin configuration with the highest order of precedence during merging will be used. ## Plugin common configuration @@ -276,6 +301,8 @@ curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H 'X-API-KEY: edd1c9f034 If a configured Plugin is disabled, then its execution will be skipped. -### Hot reload in stand-alone mode +::: + +### Hot reload in standalone mode -For hot-reloading in stand-alone mode, see the plugin related section in [stand alone mode](../deployment-modes.md#stand-alone). +For hot-reloading in standalone mode, see the plugin related section in [stand alone mode](../deployment-modes.md#standalone). diff --git a/docs/en/latest/terminology/secret.md b/docs/en/latest/terminology/secret.md index 01901e487d72..8f0ec11a7a15 100644 --- a/docs/en/latest/terminology/secret.md +++ b/docs/en/latest/terminology/secret.md @@ -42,9 +42,9 @@ APISIX currently supports storing secrets in the following ways: You can use APISIX Secret functions by specifying format variables in the consumer configuration of the following plugins, such as `key-auth`. -::: note +:::note -If a configuration item is: `key: "$ENV://ABC"`, when the actual value corresponding to $ENV://ABC is not retrieved in APISIX Secret, the value of the key will be "$ENV://ABC" instead of `nil`. +If a key-value pair `key: "$ENV://ABC"` is configured in APISIX and the value of `$ENV://ABC` is unassigned in the environment variable, `$ENV://ABC` will be interpreted as a string literal, instead of `nil`. ::: diff --git a/docs/en/latest/tutorials/client-to-apisix-mtls.md b/docs/en/latest/tutorials/client-to-apisix-mtls.md index 09b19964ce01..8c3b1c66c822 100644 --- a/docs/en/latest/tutorials/client-to-apisix-mtls.md +++ b/docs/en/latest/tutorials/client-to-apisix-mtls.md @@ -89,9 +89,9 @@ curl -X PUT 'http://127.0.0.1:9180/apisix/admin/ssls/1' \ ``` - `sni`: Specify the domain name (CN) of the certificate. When the client tries to handshake with APISIX via TLS, APISIX will match the SNI data in `ClientHello` with this field and find the corresponding server certificate for handshaking. -- `cert`: The public key of the server certificate. +- `cert`: The server certificate. - `key`: The private key of the server certificate. -- `client.ca`: The public key of the client's certificate. For demonstration purposes, the same `CA` is used here. +- `client.ca`: The CA (certificate authority) file to verfiy the client certificate. For demonstration purposes, the same `CA` is used here. ### Configure the route in APISIX @@ -193,6 +193,131 @@ curl --resolve "test.com:9443:127.0.0.1" https://test.com:9443/anything -k --cer Since we configured the [proxy-rewrite](../plugins/proxy-rewrite.md) plugin in the example, we can see that the response body contains the request body received upstream, containing the correct data. +## MTLS bypass based on regular expression matching against URI + +APISIX allows configuring an URI whitelist to bypass MTLS. +If the URI of a request is in the whitelist, then the client certificate will not be checked. +Note that other URIs of the associated SNI will get HTTP 400 response +instead of alert error in the SSL handshake phase, if the client certificate is missing or invalid. + +### Timing diagram + +![skip mtls](../../../assets/images/skip-mtls.png) + +### Example + +1. Configure route and ssl via admin API + +```bash +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "nodes": { + "httpbin.org": 1 + } + } +}' + +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert": "'"$( GET /uuid HTTP/2 +> Host: admin.apisix.dev:9443 +> user-agent: curl/7.68.0 +> accept: */* +> +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* old SSL session ID is stale, removing +* Connection state changed (MAX_CONCURRENT_STREAMS == 128)! +< HTTP/2 400 +< date: Fri, 21 Apr 2023 07:53:23 GMT +< content-type: text/html; charset=utf-8 +< content-length: 229 +< server: APISIX/3.2.0 +< + +400 Bad Request + +

400 Bad Request

+
openresty
+

Powered by APISIX.

+ +* Connection #0 to host admin.apisix.dev left intact +``` + +3. Although the client certificate is missing, but the URI is in the whitelist, +you get successful response. + +```bash +curl https://admin.apisix.dev:9443/anything/foobar -i \ +--resolve 'admin.apisix.dev:9443:127.0.0.1' --cacert t/certs/mtls_ca.crt +HTTP/2 200 +content-type: application/json +content-length: 416 +date: Fri, 21 Apr 2023 07:58:28 GMT +access-control-allow-origin: * +access-control-allow-credentials: true +server: APISIX/3.2.0 +... +``` + ## Conclusion If you don't want to use curl or test on windows, you can read this gist for more details. [APISIX mTLS for client to APISIX](https://gist.github.com/bzp2010/6ce0bf7c15c191029ed54724547195b4). diff --git a/docs/en/latest/tutorials/expose-api.md b/docs/en/latest/tutorials/expose-api.md index c81f9f2001e5..cab143a25e63 100644 --- a/docs/en/latest/tutorials/expose-api.md +++ b/docs/en/latest/tutorials/expose-api.md @@ -111,10 +111,10 @@ curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ After creating the Route, you can test the Service with the following command: ``` -curl -i -X GET "http://127.0.0.1:9080/get?foo1=bar1&foo2=bar2" -H "Host: example.com" +curl -i -X GET "http://127.0.0.1:9080/anything/get?foo1=bar1&foo2=bar2" -H "Host: example.com" ``` -APISIX will forward the request to `http://httpbin.org:80/anything/foo?arg=10`. +APISIX will forward the request to `http://httpbin.org:80/anything/get?foo1=bar1&foo2=bar2`. ## More Tutorials diff --git a/docs/en/latest/tutorials/health-check.md b/docs/en/latest/tutorials/health-check.md index 442369ebb234..3de3304c6a9a 100644 --- a/docs/en/latest/tutorials/health-check.md +++ b/docs/en/latest/tutorials/health-check.md @@ -154,7 +154,7 @@ To observe the above log information, you need to adjust the error log level to ::: -The health check status can be fetched via `GET /v1/healthcheck` in [Control API](./control-api.md). +The health check status can be fetched via `GET /v1/healthcheck` in [Control API](../control-api.md). ```shell diff --git a/docs/en/latest/tutorials/monitor-api-health-check.md b/docs/en/latest/tutorials/monitor-api-health-check.md new file mode 100644 index 000000000000..e90142b89cfe --- /dev/null +++ b/docs/en/latest/tutorials/monitor-api-health-check.md @@ -0,0 +1,192 @@ +--- +title: Monitor API Health Check with Prometheus +keywords: + - API Health Check + - Monitoring with Prometheus + - API Gateway +description: In this tutorial, we'll guide you on how to enable and monitor API health checks using APISIX and Prometheus. +--- + + + +[APISIX](https://apisix.apache.org/) has a [health check](https://apisix.apache.org/docs/apisix/tutorials/health-check/) mechanism, which proactively checks the health status of the upstream nodes in your system. Also, APISIX integrates with [Prometheus](https://prometheus.io/) through its [plugin](https://apisix.apache.org/docs/apisix/plugins/prometheus/) that exposes upstream nodes (multiple instances of a backend API service that APISIX manages) health check metrics on the Prometheus metrics endpoint typically, on URL path **`/apisix/prometheus/metrics`**. + +In this tutorial, we'll guide you on how to **enable and monitor API health checks** using APISIX and Prometheus. + +## Prerequisite(s) + +- Before you start, it is good to have a basic understanding of APISIX. Familiarity with [API gateway](https://apisix.apache.org/docs/apisix/terminology/api-gateway/), and its key concepts such as [routes](https://docs.api7.ai/apisix/key-concepts/routes), [upstream](https://docs.api7.ai/apisix/key-concepts/upstreams), [Admin API](https://apisix.apache.org/docs/apisix/admin-api/), [plugins](https://docs.api7.ai/apisix/key-concepts/plugins), and HTTP protocol will also be beneficial. +- [Docker](https://docs.docker.com/get-docker/) is used to install the containerized etcd and APISIX. +- Install [cURL](https://curl.se/) to send requests to the services for validation. + +## Start the APISIX demo project + +This project leverages the pre-defined [Docker Compose configuration](https://github.com/apache/apisix-docker/blob/master/example/docker-compose.yml) file to set up, deploy and run APISIX, etcd, Prometheus, and other services with a single command. First, clone the [apisix-docker](https://github.com/apache/apisix-docker) repo on GitHub and open it in your favorite editor, navigate to `/example` folder, and start the project by simply running `docker compose up` from the folder. + +When you start the project, Docker downloads any images it needs to run. You can see the full list of services in [docker-compose.yaml](https://github.com/apache/apisix-docker/blob/master/example/docker-compose.yml) file. + +## Add health check API endpoints in upstream + +To check API health periodically, APISIX needs an HTTP path of the health endpoint of the upstream service. So, you need first to add `/health` endpoint for your backend service. From there, you inspect the most relevant metrics for that service such as memory usage, database connectivity, response duration, and more. Assume that we have two backend REST API services web1 and web2 running using the demo project and each has its **own health check** endpoint at URL path `/health`. At this point, you do not need to make additional configurations. In reality, you can replace them with your backend services. + +> The simplest and standardized way to validate the status of a service is to define a new [health check](https://datatracker.ietf.org/doc/html/draft-inadarei-api-health-check) endpoint like `/health` or `/status` + +## Setting Up Health Checks in APISIX + +This process involves checking the operational status of the 'upstream' nodes. APISIX provides two types of health checks: **Active checks** and **Passive Checks** respectively. Read more about Health Checks and how to enable them [here](https://apisix.apache.org/docs/apisix/tutorials/health-check/). Use the [Admin API](https://apisix.apache.org/docs/apisix/admin-api/) to create an Upstream object. Here is an example of creating an [Upstream](https://apisix.apache.org/docs/apisix/terminology/upstream/) object with two nodes (Per each backend service we defined) and configuring the health check parameters in the upstream object: + +```bash +curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "nodes":{ + "web1:80":1, + "web2:80":1 + }, + "checks":{ + "active":{ + "timeout":5, + "type":"http", + "http_path":"/health", + "healthy":{ + "interval":2, + "successes":1 + }, + "unhealthy":{ + "interval":1, + "http_failures":2 + } + } + } +}' +``` + +This example configures an active health check on the **`/health`** endpoint of the node. It considers the node healthy after **one successful health check** and unhealthy **after two failed health checks**. + +> Note that sometimes you might need the IP addresses of upstream nodes, not their domains (`web1` and `web2`) if you are running services outside docker network. Health check will be started only if the number of nodes (resolved IPs) is bigger than 1. + +## Enable the Prometheus Plugin + +Create a global rule to enable the `prometheus` plugin on all routes by adding `"prometheus": {}` in the plugins option. APISIX gathers internal runtime metrics and exposes them through port `9091` and URI path `/apisix/prometheus/metrics` by default that Prometheus can scrape. It is also possible to customize the export port and **URI path**, **add** **extra labels, the frequency of these scrapes, and other parameters** by configuring them in the Prometheus configuration `/prometheus_conf/prometheus.yml`file. + +```bash +curl "http://127.0.0.1:9180/apisix/admin/global_rules" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "id":"rule-for-metrics", + "plugins":{ + "prometheus":{ + } + } +}' +``` + +## Create a Route + +Create a [Route](https://apisix.apache.org/docs/apisix/terminology/route/) object to route incoming requests to upstream nodes: + +```bash +curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f136f87ad84b625c8f1" -X PUT -d ' +{ + "name":"backend-service-route", + "methods":[ + "GET" + ], + "uri":"/", + "upstream_id":"1" +}' +``` + +## Send validation requests to the route + +To generate some metrics, you try to send few requests to the route we created in the previous step: + +```bash +curl -i -X GET "http://localhost:9080/" +``` + +If you run the above requests a couple of times, you can see from responses that APISX routes some requests to `node1` and others to `node2`. That’s how Gateway load balancing works! + +```bash +HTTP/1.1 200 OK +Content-Type: text/plain; charset=utf-8 +Content-Length: 10 +Connection: keep-alive +Date: Sat, 22 Jul 2023 10:16:38 GMT +Server: APISIX/3.3.0 + +hello web2 + +... + +HTTP/1.1 200 OK +Content-Type: text/plain; charset=utf-8 +Content-Length: 10 +Connection: keep-alive +Date: Sat, 22 Jul 2023 10:16:39 GMT +Server: APISIX/3.3.0 + +hello web1 +``` + +## Collecting health check data with the Prometheus plugin + +Once the health checks and route are configured in APISIX, you can employ Prometheus to monitor health checks. APISIX **automatically exposes health check metrics data** for your APIs if the health check parameter is enabled for upstream nodes. You will see metrics in the response after fetching them from APISIX: + +```bash +curl -i http://127.0.0.1:9091/apisix/prometheus/metrics +``` + +Example Output: + +```bash +# HELP apisix_http_requests_total The total number of client requests since APISIX started +# TYPE apisix_http_requests_total gauge +apisix_http_requests_total 119740 +# HELP apisix_http_status HTTP status codes per service in APISIX +# TYPE apisix_http_status counter +apisix_http_status{code="200",route="1",matched_uri="/",matched_host="",service="",consumer="",node="172.27.0.5"} 29 +apisix_http_status{code="200",route="1",matched_uri="/",matched_host="",service="",consumer="",node="172.27.0.7"} 12 +# HELP apisix_upstream_status Upstream status from health check +# TYPE apisix_upstream_status gauge +apisix_upstream_status{name="/apisix/upstreams/1",ip="172.27.0.5",port="443"} 0 +apisix_upstream_status{name="/apisix/upstreams/1",ip="172.27.0.5",port="80"} 1 +apisix_upstream_status{name="/apisix/upstreams/1",ip="172.27.0.7",port="443"} 0 +apisix_upstream_status{name="/apisix/upstreams/1",ip="172.27.0.7",port="80"} 1 +``` + +Health check data is represented with metrics label `apisix_upstream_status`. It has attributes like upstream `name`, `ip` and `port`. A value of 1 represents healthy and 0 means the upstream node is unhealthy. + +## Visualize the data in the Prometheus dashboard + +Navigate to http://localhost:9090/ where the Prometheus instance is running in Docker and type **Expression** `apisix_upstream_status` in the search bar. You can also see the output of the health check statuses of upstream nodes on the **Prometheus dashboard** in the table or graph view: + +![Visualize the data in Prometheus dashboard](https://static.apiseven.com/uploads/2023/07/20/OGBtqbDq_output.png) + +## Next Steps + +You have now learned how to set up and monitor API health checks with Prometheus and APISIX. APISIX Prometheus plugin is configured to connect [Grafana](https://grafana.com/) automatically to visualize metrics. Keep exploring the data and customize the [Grafana dashboard](https://grafana.com/grafana/dashboards/11719-apache-apisix/) by adding a panel that shows the number of active health checks. + +### Related resources + +- [Monitoring API Metrics: How to Ensure Optimal Performance of Your API?](https://api7.ai/blog/api7-portal-monitor-api-metrics) +- [Monitoring Microservices with Prometheus and Grafana](https://api7.ai/blog/introduction-to-monitoring-microservices) + +### Recommended content + +- [Implementing resilient applications with API Gateway (Health Check)](https://dev.to/apisix/implementing-resilient-applications-with-api-gateway-health-check-338c) diff --git a/docs/en/latest/tutorials/protect-api.md b/docs/en/latest/tutorials/protect-api.md index a89e5103a24e..22caa1b16080 100644 --- a/docs/en/latest/tutorials/protect-api.md +++ b/docs/en/latest/tutorials/protect-api.md @@ -33,11 +33,11 @@ This article describes secure your API with the rate limiting plugin for API Gat ### Plugin -This represents the configuration of the plugins that are executed during the HTTP request/response lifecycle. A [Plugin](./terminology/plugin.md) configuration can be bound directly to a Route, a Service, a Consumer or a Plugin Config. +This represents the configuration of the plugins that are executed during the HTTP request/response lifecycle. A [Plugin](../terminology/plugin.md) configuration can be bound directly to a Route, a Service, a Consumer or a Plugin Config. :::note -If [Route](./terminology/route.md), [Service](./terminology/service.md), [Plugin Config](./terminology/plugin-config.md) or Consumer are all bound to the same for plugins, only one plugin configuration will take effect. The priority of plugin configurations is: Consumer > Route > Plugin Config > Service. At the same time, there are 6 stages involved in the plugin execution process, namely `rewrite`, `access`, `before_proxy`, `header_filter`, `body_filter` and `log`. +If [Route](../terminology/route.md), [Service](../terminology/service.md), [Plugin Config](../terminology/plugin-config.md) or Consumer are all bound to the same for plugins, only one plugin configuration will take effect. The priority of plugin configurations is: Consumer > Route > Plugin Config > Service. At the same time, there are 6 stages involved in the plugin execution process, namely `rewrite`, `access`, `before_proxy`, `header_filter`, `body_filter` and `log`. ::: @@ -55,7 +55,7 @@ We can use rate limits to limit our API services to ensure the stable operation 4. Reject client requests; 5. Limit the rate of response data. -APISIX provides several plugins for limiting current and speed, including [limit-conn](./plugins/limit-conn.md), [limit-count](./plugins/limit-count.md), [limit- req](./plugins/limit-req.md) and other plugins. +APISIX provides several plugins for limiting current and speed, including [limit-conn](../plugins/limit-conn.md), [limit-count](../plugins/limit-count.md), [limit- req](../plugins/limit-req.md) and other plugins. - The `limit-conn` Plugin limits the number of concurrent requests to your services. - The `limit-req` Plugin limits the number of requests to your service using the leaky bucket algorithm. @@ -109,14 +109,14 @@ If the above result is returned, the `limit-count` plugin has taken effect and p In addition to providing plugins for limiting current and speed, APISIX also offers many other plugins to meet the needs of actual scenarios: -- [proxy-cache](./plugins/proxy-cache.md): This plugin provides the ability to cache backend response data. It can be used with other plugins. The plugin supports both disk and memory-based caching. Currently, the data to be cached can be specified according to the response code and request mode, and more complex caching strategies can also be configured through the no_cache and cache_bypass attributes. -- [request-validation](./plugins/request-validation.md): This plugin is used to validate requests forwarded to upstream services in advance. -- [proxy-mirror](./plugins/proxy-mirror.md): This plugin provides the ability to mirror client requests. Traffic mirroring is copying the real online traffic to the mirroring service, so that the online traffic or request content can be analyzed in detail without affecting the online service. -- [api-breaker](./plugins/api-breaker.md): This plugin implements an API circuit breaker to help us protect upstream business services. -- [traffic-split](./plugins/traffic-split.md): You can use this plugin to gradually guide the percentage of traffic between upstreams to achieve blue-green release and grayscale release. -- [request-id](./plugins/request-id.md): The plugin adds a `unique` ID to each request proxy through APISIX for tracking API requests. -- [proxy-control](./plugins/proxy-control.md): This plugin can dynamically control the behavior of NGINX proxy. -- [client-control](./plugins/client-control.md): This plugin can dynamically control how NGINX handles client requests by setting an upper limit on the client request body size. +- [proxy-cache](../plugins/proxy-cache.md): This plugin provides the ability to cache backend response data. It can be used with other plugins. The plugin supports both disk and memory-based caching. Currently, the data to be cached can be specified according to the response code and request mode, and more complex caching strategies can also be configured through the no_cache and cache_bypass attributes. +- [request-validation](../plugins/request-validation.md): This plugin is used to validate requests forwarded to upstream services in advance. +- [proxy-mirror](../plugins/proxy-mirror.md): This plugin provides the ability to mirror client requests. Traffic mirroring is copying the real online traffic to the mirroring service, so that the online traffic or request content can be analyzed in detail without affecting the online service. +- [api-breaker](../plugins/api-breaker.md): This plugin implements an API circuit breaker to help us protect upstream business services. +- [traffic-split](../plugins/traffic-split.md): You can use this plugin to gradually guide the percentage of traffic between upstreams to achieve blue-green release and grayscale release. +- [request-id](../plugins/request-id.md): The plugin adds a `unique` ID to each request proxy through APISIX for tracking API requests. +- [proxy-control](../plugins/proxy-control.md): This plugin can dynamically control the behavior of NGINX proxy. +- [client-control](../plugins/client-control.md): This plugin can dynamically control how NGINX handles client requests by setting an upper limit on the client request body size. ## More Tutorials diff --git a/docs/en/latest/tutorials/websocket-authentication.md b/docs/en/latest/tutorials/websocket-authentication.md new file mode 100644 index 000000000000..f77d466a8d00 --- /dev/null +++ b/docs/en/latest/tutorials/websocket-authentication.md @@ -0,0 +1,129 @@ +--- +title: WebSocket Authentication +keywords: + - API Gateway + - Apache APISIX + - WebSocket + - Authentication +description: This article is a guide on how to configure authentication for WebSocket connections. +--- + + + +Apache APISIX supports [WebSocket](https://en.wikipedia.org/wiki/WebSocket) traffic, but the WebSocket protocol doesn't handle authentication. This article guides you on how to configure authentication for WebSocket connections using Apache APISIX. + +## WebSocket Protocol + +To establish a WebSocket connection, the client sends a WebSocket handshake request, for which the server returns a WebSocket handshake response as shown below: + +```text title="Client request" +GET /chat HTTP/1.1 +Host: server.example.com +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw== +Sec-WebSocket-Protocol: chat, superchat +Sec-WebSocket-Version: 13 +Origin: http://example.com +``` + +```text title="Server response" +HTTP/1.1 101 Switching Protocols +Upgrade: websocket +Connection: Upgrade +Sec-WebSocket-Accept: HSmrc0sMlYUkAGmm5OPpG2HaGWk= +Sec-WebSocket-Protocol: chat +``` + +The handshake workflow is shown below: + +![Websocket Handshake Workflow](https://static.apiseven.com/2022/12/06/638eda2e2415f.png) + +## WebSocket Authentication + +APISIX supports several authentication methods like [basic-auth](https://apisix.apache.org/docs/apisix/plugins/basic-auth/), [key-auth](https://apisix.apache.org/docs/apisix/plugins/key-auth/), and [jwt-auth](https://apisix.apache.org/docs/apisix/plugins/jwt-auth/). + +While establishing connections from the client to server in the _handshake_ phase, APISIX first checks its authentication information before choosing to forward the request or deny it. + +## Prerequisites + +Before you move on, make sure you have: + +1. A WebSocket server as the Upstream. This article uses [Postman's public echo service](https://blog.postman.com/introducing-postman-websocket-echo-service/): `wss://ws.postman-echo.com/raw`. +2. APISIX 3.0 installed. + +## Configuring Authentication + +### Create a Route + +First we will create a Route to the Upstream echo service. + +Since the Upstream uses wss protocol, the scheme is set to `https`. We should also set `enable_websocket` to `true`. + +In this tutorial, we will use the [key-auth](https://apisix.apache.org/docs/apisix/plugins/key-auth/) Plugin. This would work similarly for other authentication methods: + +```shell +curl --location --request PUT 'http://127.0.0.1:9180/apisix/admin/routes/1' \ +--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "uri": "/*", + "methods": ["GET"], + "enable_websocket": true, + "upstream": { + "type": "roundrobin", + "nodes": { + "ws.postman-echo.com:443": 1 + }, + "scheme": "https" + }, + "plugins": { + "key-auth": {} + } +}' +``` + +### Create a Consumer + +We will now create a [Consumer](https://apisix.apache.org/docs/apisix/terminology/consumer/) and add a key `this_is_the_key`. A user would now need to use this key configured in the Consumer object to access the API. + +```sh +curl --location --request PUT 'http://127.0.0.1:9180/apisix/admin/consumers/jack' \ +--header 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' \ +--header 'Content-Type: application/json' \ +--data-raw '{ + "username": "jack", + "plugins": { + "key-auth": { + "key": "this_is_the_key" + } + } +}' +``` + +## Testing the Route + +Now, if you try to connect `ws://127.0.0.1:9080/raw` without the `apikey` header or an incorrect key, APISIX will return a `401 Unauthorized`. + +![Connect without Key](https://static.apiseven.com/2022/12/06/638ef6db9dd4b.png) + +To authenticate, you can add the header `apikey` with the value `this_is_the_key`: + +![Connect with key](https://static.apiseven.com/2022/12/06/638efac7c42b6.png) diff --git a/docs/zh/latest/CHANGELOG.md b/docs/zh/latest/CHANGELOG.md index f8f8cad63cc4..92bd259120f9 100644 --- a/docs/zh/latest/CHANGELOG.md +++ b/docs/zh/latest/CHANGELOG.md @@ -23,6 +23,10 @@ title: CHANGELOG ## Table of Contents +- [3.5.0](#350) +- [3.4.0](#340) +- [3.3.0](#330) +- [3.2.1](#321) - [3.2.0](#320) - [3.1.0](#310) - [3.0.0](#300) @@ -68,6 +72,113 @@ title: CHANGELOG - [0.7.0](#070) - [0.6.0](#060) +## 3.5.0 + +### Change + +- :warning: request-id 插件移除雪花算法:[#9715](https://github.com/apache/apisix/pull/9715) +- :warning: 不再兼容 OpenResty 1.19 版本,需要将其升级到 1.21+ 版本:[#9913](https://github.com/apache/apisix/pull/9913) +- :warning: 删除配置项 `apisix.stream_proxy.only`,L4/L7 代理需要通过配置项 `apesix.proxy_mode` 来启用:[#9607](https://github.com/apache/apisix/pull/9607) +- :warning: admin-api 的 `/apisix/admin/plugins?all=true` 接口标记为弃用:[#9580](https://github.com/apache/apisix/pull/9580) +- :warning: ua-restriction 插件不允许同时启用黑名单和白名单:[#9841](https://github.com/apache/apisix/pull/9841) + +### Core + +- :sunrise: 支持根据 host 级别动态设置 TLS 协议版本:[#9903](https://github.com/apache/apisix/pull/9903) +- :sunrise: 支持强制删除资源:[#9810](https://github.com/apache/apisix/pull/9810) +- :sunrise: 支持从 yaml 中提取环境变量:[#9855](https://github.com/apache/apisix/pull/9855) +- :sunrise: admin-api 新增 schema validate API 校验资源配置:[#10065](https://github.com/apache/apisix/pull/10065) + +### Plugins + +- :sunrise: 新增 chaitin-waf 插件:[#9838](https://github.com/apache/apisix/pull/9838) +- :sunrise: file-logger 支持设置 var 变量:[#9712](https://github.com/apache/apisix/pull/9712) +- :sunrise: mock 插件支持添加响应头:[#9720](https://github.com/apache/apisix/pull/9720) +- :sunrise: proxy-rewrite 插件支持正则匹配 URL 编码:[#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: google-cloud-logging 插件支持 client_email 配置:[#9813](https://github.com/apache/apisix/pull/9813) +- :sunrise: opa 插件支持向上游发送 OPA server 返回的头:[#9710](https://github.com/apache/apisix/pull/9710) +- :sunrise: openid-connect 插件支持配置代理服务器:[#9948](https://github.com/apache/apisix/pull/9948) + +### Bugfixes + +- 修复 log-rotate 插件使用自定义名称时,max_kept 配置不起作用:[#9749](https://github.com/apache/apisix/pull/9749) +- 修复 limit_conn 在 stream 模式下非法使用 http 变量:[#9816](https://github.com/apache/apisix/pull/9816) +- 修复 loki-logger 插件在获取 log_labels 时会索引空值:[#9850](https://github.com/apache/apisix/pull/9850) +- 修复使用 limit-count 插件时,当请求被拒绝后,X-RateLimit-Reset 不应设置为 0:[#9978](https://github.com/apache/apisix/pull/9978) +- 修复 nacos 插件在运行时索引一个空值:[#9960](https://github.com/apache/apisix/pull/9960) +- 修复 etcd 在同步数据时,如果密钥有特殊字符,则同步异常:[#9967](https://github.com/apache/apisix/pull/9967) +- 修复 tencent-cloud-cls 插件 DNS 解析失败:[#9843](https://github.com/apache/apisix/pull/9843) +- 修复执行 reload 或 quit 命令时 worker 未退出:[#9909](https://github.com/apache/apisix/pull/9909) +- 修复在 traffic-split 插件中 upstream_id 有效性验证:[#10008](https://github.com/apache/apisix/pull/10008) + +## 3.4.0 + +### Core + +- :sunrise: 支持路由级别的 MTLS [#9322](https://github.com/apache/apisix/pull/9322) +- :sunrise: 支持全局规则的 id schema [#9517](https://github.com/apache/apisix/pull/9517) +- :sunrise: 支持使用单个长连接来监视 etcd 的所有资源 [#9456](https://github.com/apache/apisix/pull/9456) +- :sunrise: 支持 ssl 标签的最大长度为 256 [#9301](https://github.com/apache/apisix/pull/9301) + +### Plugins + +- :sunrise: 支持 proxy_rewrite 插件的多个正则表达式匹配 [#9194](https://github.com/apache/apisix/pull/9194) +- :sunrise: 添加 loki-logger 插件 [#9399](https://github.com/apache/apisix/pull/9399) +- :sunrise: 允许用户为 prometheus 插件配置 DEFAULT_BUCKETS [#9673](https://github.com/apache/apisix/pull/9673) + +### Bugfixes + +- 修复 (body-transformer):xml2lua 将空表替换为空字符串 [#9669](https://github.com/apache/apisix/pull/9669) +- 修复:opentelemetry 和 grpc-transcode 插件无法同时启用 [#9606](https://github.com/apache/apisix/pull/9606) +- 修复 (skywalking-logger, error-log-logger):支持在 skywalking service_instance_name 中使用 $hostname [#9401](https://github.com/apache/apisix/pull/9401) +- 修复 (admin):修复 secrets 不支持通过 PATCH 更新属性 [#9510](https://github.com/apache/apisix/pull/9510) +- 修复 (http-logger):默认请求路径应为'/' [#9472](https://github.com/apache/apisix/pull/9472) +- 修复:syslog 插件不起作用 [#9425](https://github.com/apache/apisix/pull/9425) +- 修复:splunk-hec-logging 的日志格式错误 [#9478](https://github.com/apache/apisix/pull/9478) +- 修复:etcd 复用 cli 并启用 keepalive [#9420](https://github.com/apache/apisix/pull/9420) +- 修复:upstream key 添加 mqtt_client_id 支持 [#9450](https://github.com/apache/apisix/pull/9450) +- 修复:body-transformer 插件总是返回原始 body [#9446](https://github.com/apache/apisix/pull/9446) +- 修复:当 consumer 使用 wolf-rbac 插件时,consumer 中的其他插件无效 [#9298](https://github.com/apache/apisix/pull/9298) +- 修复:当 host 是域名时,总是解析域名 [#9332](https://github.com/apache/apisix/pull/9332) +- 修复:response-rewrite 插件不能只添加一个字符 [#9372](https://github.com/apache/apisix/pull/9372) +- 修复:consul 支持只获取 health endpoint [#9204](https://github.com/apache/apisix/pull/9204) + +## 3.3.0 + +### Change + +- 默认路由从 `radixtree_uri` 修改为 `radixtree_host_uri`: [#9047](https://github.com/apache/apisix/pull/9047) +- CORS 插件将会在 `allow_origin` 不为 `*` 时默认添加 `Vary: Origin` 响应头:[#9010](https://github.com/apache/apisix/pull/9010) + +### Core + +- :sunrise: 支持将路由证书存储在 secrets manager 中:[#9247](https://github.com/apache/apisix/pull/9247) +- :sunrise: 支持通过配置绕过 Admin API 身份验证:[#9147](https://github.com/apache/apisix/pull/9147) + +### Plugins + +- :sunrise: fault-injection 插件支持请求头注入:[#9039](https://github.com/apache/apisix/pull/9039) +- :sunrise: 提供在其他插件中引用 proxy-rewrite 插件中路由改写捕捉到的变量支持:[#9112](https://github.com/apache/apisix/pull/9112) +- :sunrise: limit-count 插件提供 `username` 与 `ssl` redis 认证方式:[#9185](https://github.com/apache/apisix/pull/9185) + +### Bugfixes + +- 修复 etcd 数据同步异常:[#8493](https://github.com/apache/apisix/pull/8493) +- 修复在 `core.request.add_header` 中的无效缓存:[#8824](https://github.com/apache/apisix/pull/8824) +- 修复由健康检查引起的高 CPU 和内存占用:[#9015](https://github.com/apache/apisix/pull/9015) +- 仅当 `allow_origins_by_regex` 不为 `nil` 时生效:[#9028](https://github.com/apache/apisix/pull/9028) +- 在删除 upstream 时,检查 `traffic-split` 插件中的引用:[#9044](https://github.com/apache/apisix/pull/9044) +- 修复启动时无法连接到 etcd 的问题:[#9077](https://github.com/apache/apisix/pull/9077) +- 修复域节点的健康检查泄漏问题:[#9090](https://github.com/apache/apisix/pull/9090) +- 禁止非 `127.0.0.0/24` 的用户在没有 admin_key 的情况下访问 Admin API: [#9146](https://github.com/apache/apisix/pull/9146) +- 确保 hold_body_chunk 函数对每个插件设置独立缓冲区,避免数据污染:[#9266](https://github.com/apache/apisix/pull/9266) +- 确保 batch-requests 插件能够在尾部响应头存在时能够正确读取:[#9289](https://github.com/apache/apisix/pull/9289) +- 确保 `proxy-rewrite` 改写 `ngx.var.uri`: [#9309](https://github.com/apache/apisix/pull/9309) + +## 3.2.1 + +**这是一个 LTS 维护版本,您可以在 `release/3.2` 分支中看到 CHANGELOG。** + ## 3.2.0 ### Change @@ -1264,15 +1375,15 @@ title: CHANGELOG ### Core -- :sunrise: **[增加单机模式](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//stand-alone-cn.md)**: 使用 yaml 配置文件来更新 APISIX 的配置,这对于 kubernetes 更加友好。 [#464](https://github.com/apache/incubator-apisix/pull/464) -- :sunrise: **[支持 stream 代理](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//stream-proxy-cn.md)**. [#513](https://github.com/apache/incubator-apisix/pull/513) -- :sunrise: 支持[在 consumer 上绑定插件](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//architecture-design-cn.md#consumer). [#544](https://github.com/apache/incubator-apisix/pull/544) +- :sunrise: **[增加单机模式](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/deployment-modes.md#Standalone)**: 使用 yaml 配置文件来更新 APISIX 的配置,这对于 kubernetes 更加友好。 [#464](https://github.com/apache/incubator-apisix/pull/464) +- :sunrise: **[支持 stream 代理](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest/stream-proxy.md)**. [#513](https://github.com/apache/incubator-apisix/pull/513) +- :sunrise: 支持[在 consumer 上绑定插件](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest/terminology/consumer.md). [#544](https://github.com/apache/incubator-apisix/pull/544) - 上游增加对域名的支持,而不仅是 IP。[#522](https://github.com/apache/incubator-apisix/pull/522) - 当上游节点的权重为 0 时自动忽略。[#536](https://github.com/apache/incubator-apisix/pull/536) ### Plugins -- :sunrise: **[MQTT 代理](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/mqtt-proxy-cn.md)**: 支持用 `client_id` 对 MQTT 进行负载均衡,同时支持 MQTT 3.1 和 5.0 两个协议标准。 [#513](https://github.com/apache/incubator-apisix/pull/513) +- :sunrise: **[MQTT 代理](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/mqtt-proxy.md)**: 支持用 `client_id` 对 MQTT 进行负载均衡,同时支持 MQTT 3.1 和 5.0 两个协议标准。 [#513](https://github.com/apache/incubator-apisix/pull/513) - [proxy-rewrite](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/proxy-rewrite.md): 对代理到上游的请求进行改写,包括 host, uri 和 schema。 [#594](https://github.com/apache/incubator-apisix/pull/594) ### ARM @@ -1305,7 +1416,7 @@ title: CHANGELOG ### Core -- :sunrise: **[gRPC 协议转换](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/grpc-transcoding-cn.md)**: 支持 gRPC 协议的转换,这样客户端可以通过 HTTP/JSON 来访问你的 gRPC API. [#395](https://github.com/apache/incubator-apisix/issues/395) +- :sunrise: **[gRPC 协议转换](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/grpc-transcode.md)**: 支持 gRPC 协议的转换,这样客户端可以通过 HTTP/JSON 来访问你的 gRPC API. [#395](https://github.com/apache/incubator-apisix/issues/395) - :sunrise: **[radix tree 路由](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//router-radixtree.md)**: 默认的路由器更改为 radix tree,支持把 uri、host、cookie、请求头、请求参数、Nginx 内置变量等作为路由的条件,并支持等于、大于、小于等常见操作符,更加强大和灵活。**需要注意的是,这个改动不向下兼容,所有使用历史版本的用户,需要手动修改路由才能正常使用**。[#414](https://github.com/apache/incubator-apisix/issues/414) - 动态上游支持更多的参数,可以指定上游的 uri 和 host,以及是否开启 websocket. [#451](https://github.com/apache/incubator-apisix/pull/451) - 支持从 `ctx.var` 中直接获取 cookie 中的值。[#449](https://github.com/apache/incubator-apisix/pull/449) @@ -1313,9 +1424,9 @@ title: CHANGELOG ### Plugins -- :sunrise: **[serverless](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/serverless-cn.md)**: 支持 serverless,用户可以把任意 Lua 函数动态的在网关节点上运行。用户也可以把这个功能当做是轻量级的插件来使用。[#86](https://github.com/apache/incubator-apisix/pull/86) +- :sunrise: **[serverless](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/serverless.md)**: 支持 serverless,用户可以把任意 Lua 函数动态的在网关节点上运行。用户也可以把这个功能当做是轻量级的插件来使用。[#86](https://github.com/apache/incubator-apisix/pull/86) - :sunrise: **IdP 支持**: 支持外部的身份认证服务,比如 Auth0,okta 等,用户可以借此来对接 Oauth2.0 等认证方式。 [#447](https://github.com/apache/incubator-apisix/pull/447) -- [限流限速](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/limit-conn-cn.md)支持更多的限制 key,比如 X-Forwarded-For 和 X-Real-IP,并且允许用户把 Nginx 变量、请求头和请求参数作为 key. [#228](https://github.com/apache/incubator-apisix/issues/228) +- [限流限速](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/limit-conn.md)支持更多的限制 key,比如 X-Forwarded-For 和 X-Real-IP,并且允许用户把 Nginx 变量、请求头和请求参数作为 key. [#228](https://github.com/apache/incubator-apisix/issues/228) - [IP 黑白名单](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/ip-restriction.md) 支持 IP 黑白名单,提供更高的安全性。[#398](https://github.com/apache/incubator-apisix/pull/398) ### CLI @@ -1341,7 +1452,7 @@ title: CHANGELOG ### Core -- :sunrise: **[健康检查和服务熔断](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest/tutorials/health-check..md)**: 对上游节点开启健康检查,智能判断服务状态进行熔断和连接。[#249](https://github.com/apache/incubator-apisix/pull/249) +- :sunrise: **[健康检查和服务熔断](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest/tutorials/health-check.md)**: 对上游节点开启健康检查,智能判断服务状态进行熔断和连接。[#249](https://github.com/apache/incubator-apisix/pull/249) - 阻止 ReDoS(Regular expression Denial of Service). [#252](https://github.com/apache/incubator-apisix/pull/250) - 支持 debug 模式。[#319](https://github.com/apache/incubator-apisix/pull/319) - 允许自定义路由。[#364](https://github.com/apache/incubator-apisix/pull/364) @@ -1352,7 +1463,7 @@ title: CHANGELOG ### Plugins - :sunrise: **[分布式追踪 OpenTracing](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/zipkin.md)**: 支持 Zipkin 和 Apache SkyWalking. [#304](https://github.com/apache/incubator-apisix/pull/304) -- [JWT 认证](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/jwt-auth-cn.md). [#303](https://github.com/apache/incubator-apisix/pull/303) +- [JWT 认证](https://github.com/apache/incubator-apisix/blob/master/docs/zh/latest//plugins/jwt-auth.md). [#303](https://github.com/apache/incubator-apisix/pull/303) ### CLI diff --git a/docs/zh/latest/FAQ.md b/docs/zh/latest/FAQ.md index 923cf22f5cf4..944f96195420 100644 --- a/docs/zh/latest/FAQ.md +++ b/docs/zh/latest/FAQ.md @@ -109,7 +109,7 @@ luarocks config rocks_servers make deps ENV_LUAROCKS_SERVER=https://luarocks.cn ``` -如果通过上述操作仍然无法解决问题,可以尝试使用 `--verbose` 参数获取详细的日志来诊断问题。 +如果通过上述操作仍然无法解决问题,可以尝试使用 `--verbose` 或 `-v` 参数获取详细的日志来诊断问题。 ## 如何构建 APISIX-Base 环境? diff --git a/docs/zh/latest/README.md b/docs/zh/latest/README.md index 7de385278b6b..f254998c7ddb 100644 --- a/docs/zh/latest/README.md +++ b/docs/zh/latest/README.md @@ -131,14 +131,14 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、 - **运维友好** - OpenTracing 可观测性:支持 [Apache Skywalking](plugins/skywalking.md) 和 [Zipkin](plugins/zipkin.md)。 - - 对接外部服务发现:除了内置的 etcd 外,还支持 [Consul](../../en/latest/discovery/consul_kv.md)、[Nacos](discovery/nacos.md)、[Eureka](discovery/eureka.md) 和 [Zookeeper(CP)](../../en/latest/discovery/zookeeper.md)。 + - 对接外部服务发现:除了内置的 etcd 外,还支持 [Consul](../../en/latest/discovery/consul_kv.md)、[Nacos](discovery/nacos.md)、[Eureka](discovery/eureka.md) 和 [Zookeeper(CP)](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md)。 - 监控和指标:[Prometheus](plugins/prometheus.md) - 集群:APISIX 节点是无状态的,创建配置中心集群请参考 [etcd Clustering Guide](https://etcd.io/docs/v3.5/op-guide/clustering/)。 - 高可用:支持配置同一个集群内的多个 etcd 地址。 - [控制台](https://github.com/apache/apisix-dashboard): 操作 APISIX 集群。 - 版本控制:支持操作的多次回滚。 - CLI:使用命令行来启动、关闭和重启 APISIX。 - - [单机模式](stand-alone.md):支持从本地配置文件中加载路由规则,在 kubernetes(k8s) 等环境下更友好。 + - [单机模式](../../en/latest/deployment-modes.md#standalone):支持从本地配置文件中加载路由规则,在 kubernetes(k8s) 等环境下更友好。 - [全局规则](terminology/global-rule.md):允许对所有请求执行插件,比如黑白名单、限流限速等。 - 高性能:在单核上 QPS 可以达到 18k,同时延迟只有 0.2 毫秒。 - [故障注入](plugins/fault-injection.md) @@ -160,8 +160,8 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、 - **Serverless** - [Lua functions](plugins/serverless.md):能在 APISIX 每个阶段调用 lua 函数。 - - [Azure functions](docs/en/latest/plugins/azure-functions.md):能无缝整合进 Azure Serverless Function 中。作为动态上游,能将特定的 URI 请求全部代理到微软 Azure 云中。 - - [Apache OpenWhisk](docs/en/latest/plugins/openwhisk.md):与 Apache OpenWhisk 集成。作为动态上游,能将特定的 URI 请求代理到你自己的 OpenWhisk 集群。 + - [Azure functions](./plugins/azure-functions.md):能无缝整合进 Azure Serverless Function 中。作为动态上游,能将特定的 URI 请求全部代理到微软 Azure 云中。 + - [Apache OpenWhisk](./plugins/openwhisk.md):与 Apache OpenWhisk 集成。作为动态上游,能将特定的 URI 请求代理到你自己的 OpenWhisk 集群。 ## 立刻开始 @@ -213,7 +213,7 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、 ## 用户实际使用案例 -- [新浪微博:基于 Apache APISIX,新浪微博 API 网关的定制化开发之路](https://apisix.apache.org/blog/2021/07/14/the-road-to-customization-of-Sina-Weibo-API-gateway-based-on-Apache-APISIX) +- [新浪微博:基于 Apache APISIX,新浪微博 API 网关的定制化开发之路](https://apisix.apache.org/zh/blog/2021/07/06/the-road-to-customization-of-sina-weibo-api-gateway-based-on-apache-apisix/) - [欧盟数字工厂平台:API Security Gateway – Using APISIX in the eFactory Platform](https://www.efactory-project.eu/post/api-security-gateway-using-apisix-in-the-efactory-platform) - [贝壳找房:如何基于 Apache APISIX 搭建网关](https://mp.weixin.qq.com/s/yZl9MWPyF1-gOyCp8plflA) - [360:Apache APISIX 在基础运维平台项目中的实践](https://mp.weixin.qq.com/s/mF8w8hW4alIMww0MSu9Sjg) @@ -229,7 +229,7 @@ A/B 测试、金丝雀发布(灰度发布)、蓝绿部署、限流限速、 -欢迎用户把自己加入到 [Powered By](https://github.com/apache/apisix/blob/master/powered-by.md) 页面。 +欢迎用户把自己加入到 [Powered By](../../../powered-by.md) 页面。 ## 全景图 diff --git a/docs/zh/latest/admin-api.md b/docs/zh/latest/admin-api.md index d46bd09f4b17..acef9a2ad107 100644 --- a/docs/zh/latest/admin-api.md +++ b/docs/zh/latest/admin-api.md @@ -65,7 +65,7 @@ deployment: port: 9180 # Admin API 监听的 端口,必须使用与 node_listen 不同的端口。 ``` -### 使用环境变量 {$using-environment-variables} +### 使用环境变量 {#using-environment-variables} 要通过环境变量进行配置,可以使用 `${{VAR}}` 语法。例如: @@ -105,6 +105,46 @@ deployment: 首先查找环境变量 `ADMIN_KEY`,如果该环境变量不存在,它将使用 `edd1c9f034335f136f87ad84b625c8f1` 作为默认值。 +您还可以在 yaml 键中指定环境变量。这在 `standalone` 模式 中特别有用,您可以在其中指定上游节点,如下所示: + +```yaml title="./conf/apisix.yaml" +routes: + - + uri: "/test" + upstream: + nodes: + "${{HOST_IP}}:${{PORT}}": 1 + type: roundrobin +#END +``` + +### 强制删除 {#force-delete} + +默认情况下,Admin API 会检查资源间的引用关系,将会拒绝删除正在使用中的资源。 + +可以通过在删除请求中添加请求参数 `force=true` 来进行强制删除,例如: + +```bash +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" +}' +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '{ + "uri": "/*", + "upstream_id": 1 +}' +{"value":{"priority":0,"upstream_id":1,"uri":"/*","create_time":1689038794,"id":"1","status":1,"update_time":1689038916},"key":"/apisix/routes/1"} + +$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE +{"error_msg":"can not delete this upstream, route [1] is still using it now"} +$ curl "http://127.0.0.1:9180/apisix/admin/upstreams/1?force=anyvalue" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE +{"error_msg":"can not delete this upstream, route [1] is still using it now"} +$ curl "http://127.0.0.1:9180/apisix/admin/upstreams/1?force=true" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE +{"deleted":"1","key":"/apisix/upstreams/1"} +``` + ## v3 版本新功能 {#v3-new-function} 在 APISIX v3 版本中,Admin API 支持了一些不向下兼容的新特性,比如支持新的响应体格式、支持分页查询、支持过滤资源等。 @@ -118,7 +158,7 @@ APISIX 在 v3 版本对响应体做了以下调整: 返回单个资源: - ```json +```json { "modifiedIndex": 2685183, "value": { @@ -128,11 +168,11 @@ APISIX 在 v3 版本对响应体做了以下调整: "key": "/apisix/routes/1", "createdIndex": 2684956 } - ``` +``` 返回多个资源: - ```json +```json { "list": [ { @@ -156,7 +196,7 @@ APISIX 在 v3 版本对响应体做了以下调整: ], "total": 2 } - ``` +``` ### 支持分页查询 {#support-paging-query} @@ -868,10 +908,10 @@ APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上 | service_name | 是,与 `nodes` 二选一。 | string | 服务发现时使用的服务名,请参考 [集成服务发现注册中心](./discovery.md)。 | `a-bootiful-client` | | discovery_type | 是,与 `service_name` 配合使用。 | string | 服务发现类型,请参考 [集成服务发现注册中心](./discovery.md)。 | `eureka` | | key | 条件必需 | 匹配类型 | 该选项只有类型是 `chash` 才有效。根据 `key` 来查找对应的节点 `id`,相同的 `key` 在同一个对象中,则返回相同 id。目前支持的 NGINX 内置变量有 `uri, server_name, server_addr, request_uri, remote_port, remote_addr, query_string, host, hostname, arg_***`,其中 `arg_***` 是来自 URL 的请求参数,详细信息请参考 [NGINX 变量列表](http://nginx.org/en/docs/varindex.html)。 | | -| checks | 否 | health_checker | 配置健康检查的参数,详细信息请参考 [health-check](health-check.md)。 | | +| checks | 否 | health_checker | 配置健康检查的参数,详细信息请参考 [health-check](./tutorials/health-check.md)。 | | | retries | 否 | 整型 | 使用 NGINX 重试机制将请求传递给下一个上游,默认启用重试机制且次数为后端可用的节点数量。如果指定了具体重试次数,它将覆盖默认值。当设置为 `0` 时,表示不启用重试机制。 | | | retry_timeout | 否 | number | 限制是否继续重试的时间,若之前的请求和重试请求花费太多时间就不再继续重试。当设置为 `0` 时,表示不启用重试超时机制。 | | -| timeout | 否 | 超时时间对象 | 设置连接、发送消息、接收消息的超时时间,以秒为单位。 | | +| timeout | 否 | 超时时间对象 | 设置连接、发送消息、接收消息的超时时间,以秒为单位。| `{"connect": 0.5,"send": 0.5,"read": 0.5}` | | hash_on | 否 | 辅助 | `hash_on` 支持的类型有 `vars`(NGINX 内置变量),`header`(自定义 header),`cookie`,`consumer`,默认值为 `vars`。 | | name | 否 | 辅助 | 标识上游服务名称、使用场景等。 | | | desc | 否 | 辅助 | 上游服务描述、使用场景等。 | | @@ -902,7 +942,6 @@ APISIX 的 Upstream 除了基本的负载均衡算法选择外,还支持对上 - 设为 `header` 时,`key` 为必传参数,其值为自定义的 Header name,即 "http\_`key`"。 - 设为 `cookie` 时,`key` 为必传参数,其值为自定义的 cookie name,即 "cookie\_`key`"。请注意 cookie name 是**区分大小写字母**的。例如:`cookie_x_foo` 与 `cookie_X_Foo` 表示不同的 `cookie`。 - 设为 `consumer` 时,`key` 不需要设置。此时哈希算法采用的 `key` 为认证通过的 `consumer_name`。 -- 如果指定的 `hash_on` 和 `key` 获取不到值时,使用默认值:`remote_addr`。 以下特性需要 APISIX 运行于 [APISIX-Base](./FAQ.md#如何构建-APISIX-Base-环境?): @@ -1162,18 +1201,20 @@ SSL 资源请求地址:/apisix/admin/ssls/{id} | 名称 | 必选项 | 类型 | 描述 | 示例 | | ----------- | ------ | -------------- | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------ | -| cert | 是 | 证书 | HTTP 证书。 | | -| key | 是 | 私钥 | HTTPS 证书私钥 | | +| cert | 是 | 证书 | HTTP 证书。该字段支持使用 [APISIX Secret](./terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | | +| key | 是 | 私钥 | HTTPS 证书私钥。该字段支持使用 [APISIX Secret](./terminology/secret.md) 资源,将值保存在 Secret Manager 中。 | | | certs | 否 | 证书字符串数组 | 当你想给同一个域名配置多个证书时,除了第一个证书需要通过 `cert` 传递外,剩下的证书可以通过该参数传递上来。 | | | keys | 否 | 私钥字符串数组 | `certs` 对应的证书私钥,需要与 `certs` 一一对应。 | | | client.ca | 否 | 证书 | 设置将用于客户端证书校验的 `CA` 证书。该特性需要 OpenResty 为 1.19 及以上版本。 | | | client.depth | 否 | 辅助 | 设置客户端证书校验的深度,默认为 1。该特性需要 OpenResty 为 1.19 及以上版本。 | | +| client.skip_mtls_uri_regex | 否 | PCRE 正则表达式数组 | 用来匹配请求的 URI,如果匹配,则该请求将绕过客户端证书的检查,也就是跳过 MTLS。 | ["/hello[0-9]+", "/foobar"] | | snis | 是 | 匹配规则 | 非空数组形式,可以匹配多个 SNI。 | | | labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | | create_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | | update_time | 否 | 辅助 | epoch 时间戳,单位为秒。如果不指定则自动创建。 | 1602883670 | | type | 否 | 辅助 | 标识证书的类型,默认值为 `server`。 | `client` 表示证书是客户端证书,APISIX 访问上游时使用;`server` 表示证书是服务端证书,APISIX 验证客户端请求时使用。 | | status | 否 | 辅助 | 当设置为 `1` 时,启用此 SSL,默认值为 `1`。 | `1` 表示启用,`0` 表示禁用 | +| ssl_protocols | 否 | tls 协议字符串数组 | 用于控制服务器与客户端之间使用的 SSL/TLS 协议版本。更多的配置示例,请参考[SSL 协议](./ssl-protocol.md)。 | | SSL 对象 JSON 配置示例: @@ -1318,6 +1359,14 @@ Content-Type: text/plain Plugin 资源请求地址:/apisix/admin/plugins/{plugin_name} +### 请求参数 + +| 名称 | 描述 | 默认 | +| --------- | -------------------------------------- | -------- | +| subsystem | 插件子系统。 | http | + +可以在子系统上过滤插件,以便在通过查询参数传递的子系统中搜索 ({plugin_name}) + ### 请求方法 {#plugin-request-methods} | 名称        | 请求 URI | 请求 body | 描述          | @@ -1326,6 +1375,16 @@ Plugin 资源请求地址:/apisix/admin/plugins/{plugin_name} | GET       | /apisix/admin/plugins/{plugin_name} | 无 | 获取资源。 | | GET | /apisix/admin/plugins?all=true | 无 | 获取所有插件的所有属性。 | | GET | /apisix/admin/plugins?all=true&subsystem=stream| 无 | 获取所有 Stream 插件的属性。| +| GET | /apisix/admin/plugins?all=true&subsystem=http| 无 | 获取所有 HTTP 插件的属性。| +| PUT | /apisix/admin/plugins/reload | 无 | 根据代码中所做的更改重新加载插件。 | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=stream | 无 | 获取指定 Stream 插件的属性。 | +| GET | apisix/admin/plugins/{plugin_name}?subsystem=http | 无 | 获取指定 HTTP 插件的属性。 | + +:::caution + +获取所有插件属性的接口 `/apisix/admin/plugins?all=true` 将很快被弃用。 + +::: ### 使用示例 {#plugin-example} @@ -1345,7 +1404,7 @@ Plugin 资源请求地址:/apisix/admin/plugins/{plugin_name} - 获取指定插件的属性 ```shell - curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth" \ + curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth?subsystem=http" \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' ``` @@ -1357,7 +1416,7 @@ Plugin 资源请求地址:/apisix/admin/plugins/{plugin_name} 你可以使用 `/apisix/admin/plugins?all=true` 接口获取所有插件的所有属性,每个插件包括 `name`,`priority`,`type`,`schema`,`consumer_schema` 和 `version`。 -默认情况下,该接口只返回 HTTP 插件。如果你需要获取 Stream 插件,需要使用 `/apisix/admin/plugins?all=true&subsystem=stream`。 +您可以使用“/apisix/admin/plugins?all=true”获取所有插件的所有属性。这个 API 将很快被弃用 ::: @@ -1385,11 +1444,74 @@ Plugin 资源请求地址:/apisix/admin/stream_routes/{id} | ---------------- | ------| -------- | ------------------------------------------------------------------------------| ------ | | upstream | 否 | Upstream | Upstream 配置,详细信息请参考 [Upstream](terminology/upstream.md)。 | | | upstream_id | 否 | Upstream | 需要使用的 Upstream id,详细信息请 [Upstream](terminology/upstream.md)。 | | -| remote_addr | 否 | IP/CIDR | 过滤选项:如果客户端 IP 匹配,则转发到上游 | "127.0.0.1/32" 或 "127.0.0.1" | -| server_addr | 否 | IP/CIDR | 过滤选项:如果 APISIX 服务器的 IP 与 `server_addr` 匹配,则转发到上游。 | "127.0.0.1/32" 或 "127.0.0.1" | +| remote_addr | 否 | IPv4, IPv4 CIDR, IPv6 | 过滤选项:如果客户端 IP 匹配,则转发到上游 | "127.0.0.1" 或 "127.0.0.1/32" 或 "::1" | +| server_addr | 否 | IPv4, IPv4 CIDR, IPv6 | 过滤选项:如果 APISIX 服务器的 IP 与 `server_addr` 匹配,则转发到上游。 | "127.0.0.1" 或 "127.0.0.1/32" 或 "::1" | | server_port | 否 | 整数 | 过滤选项:如果 APISIX 服务器的端口 与 `server_port` 匹配,则转发到上游。 | 9090 | | sni | 否 | Host | 服务器名称。 | "test.com" | | protocol.name | 否 | 字符串 | xRPC 框架代理的协议的名称。 | "redis" | | protocol.conf | 否 | 配置 | 协议特定的配置。 | | 你可以查看 [Stream Proxy](./stream-proxy.md#更多-route-匹配选项) 了解更多过滤器的信息。 + +## Secret + +Secret 指的是 `Secrets Management`(密钥管理),可以使用任何支持的密钥管理器,例如 `vault`。 + +### 请求地址 {#secret-config-uri} + +Secret 资源请求地址:/apisix/admin/secrets/{secretmanager}/{id} + +### 请求方法 {#secret-config-request-methods} + +| 名称 | 请求 URI | 请求 body | 描述 | +| :--: | :----------------------------: | :---: | :---------------------------------------: | +| GET | /apisix/admin/secrets | NULL | 获取所有 secret 的列表。 | +| GET | /apisix/admin/secrets/{manager}/{id} | NULL | 根据 id 获取指定的 secret。 | +| PUT | /apisix/admin/secrets/{manager} | {...} | 创建新的 secret 配置。 | +| DELETE | /apisix/admin/secrets/{manager}/{id} | NULL | 删除具有指定 id 的 secret。 | +| PATCH | /apisix/admin/secrets/{manager}/{id} | {...} | 更新指定 secret 的选定属性。如果要删除一个属性,可以将该属性的值设置为 null。| +| PATCH | /apisix/admin/secrets/{manager}/{id}/{path} | {...} | 更新路径中指定的属性。其他属性的值保持不变。 + +### body 请求参数 {#secret-config-body-requset-parameters} + +当 `{secretmanager}` 是 `vault` 时: + +| 名称 | 必选项 | 类型 | 描述 | 例子 | +| ----------- | -------- | ----------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------ | +| uri | 是 | URI | Vault 服务器的 URI | | +| prefix | 是 | 字符串 | 密钥前缀 +| token | 是 | 字符串 | Vault 令牌 | | + +配置示例: + +```shell +{ + "uri": "https://localhost/vault", + "prefix": "/apisix/kv", + "token": "343effad" +} + +``` + +使用示例: + +```shell +curl -i http://127.0.0.1:9180/apisix/admin/secrets/vault/test2 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "http://xxx/get", + "prefix" : "apisix", + "token" : "apisix" +}' +``` + +```shell +HTTP/1.1 200 OK +... + +{"key":"\/apisix\/secrets\/vault\/test2","value":{"id":"vault\/test2","token":"apisix","prefix":"apisix","update_time":1669625828,"create_time":1669625828,"uri":"http:\/\/xxx\/get"}} +``` + +### 应答参数 {#secret-config-response-parameters} + +当前的响应是从 etcd 返回的。 diff --git a/docs/zh/latest/building-apisix.md b/docs/zh/latest/building-apisix.md index 39e322421abb..2f844d473f48 100644 --- a/docs/zh/latest/building-apisix.md +++ b/docs/zh/latest/building-apisix.md @@ -53,7 +53,7 @@ curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-depend 然后,创建一个目录并设置环境变量 `APISIX_VERSION`: ```shell -APISIX_VERSION='3.2.0' +APISIX_VERSION='3.5.0' mkdir apisix-${APISIX_VERSION} ``` @@ -273,4 +273,4 @@ APISIX 的一些特性需要在 OpenResty 中引入额外的 NGINX 模块。 prove -Itest-nginx/lib -r t/plugin/openid-connect.t ``` -如果你想要了解更多信息,请参考 [testing framework](https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md)。 +如果你想要了解更多信息,请参考 [testing framework](../../en/latest/internal/testing-framework.md)。 diff --git a/docs/zh/latest/certificate.md b/docs/zh/latest/certificate.md index 7c2648458e23..1d5c8a825f9c 100644 --- a/docs/zh/latest/certificate.md +++ b/docs/zh/latest/certificate.md @@ -33,85 +33,79 @@ SNI(Server Name Indication)是用来改善 SSL 和 TLS 的一项特性,它 * `key`:SSL 密钥对的私钥,pem 格式 * `snis`:SSL 证书所指定的一个或多个域名,注意在设置这个参数之前,你需要确保这个证书对应的私钥是有效的。 -为了简化示例,我们会使用下面的 Python 脚本: - -```python title="create-ssl.py" -#!/usr/bin/env python -# coding: utf-8 -import sys -# sudo pip install requests -import requests - -if len(sys.argv) <= 3: - print("bad argument") - sys.exit(1) -with open(sys.argv[1]) as f: - cert = f.read() -with open(sys.argv[2]) as f: - key = f.read() -sni = sys.argv[3] -api_key = "edd1c9f034335f136f87ad84b625c8f1" -resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json={ - "cert": cert, - "key": key, - "snis": [sni], -}, headers={ - "X-API-KEY": api_key, -}) -print(resp.status_code) -print(resp.text) -``` +创建一个包含证书和密钥,单一域名 SNI 的 SSL 对象: ```shell -# 创建 SSL 对象 -./create-ssl.py t.crt t.key test.com +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat t/certs/apisix.crt)"'", + "key": "'"$(cat t/certs/apisix.key)"'", + "snis": ["test.com"] +}' +``` -# 创建 Router 对象 +创建路由: + +```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { - "uri": "/hello", + "uri": "/get", "hosts": ["test.com"], "methods": ["GET"], "upstream": { "type": "roundrobin", "nodes": { - "127.0.0.1:1980": 1 + "httpbin.org": 1 } } }' +``` -# 测试一下 +测试: + +```shell +curl --resolve 'test.com:9443:127.0.0.1' https://test.com:9443/get -k -vvv -curl --resolve 'test.com:9443:127.0.0.1' https://test.com:9443/hello -vvv * Added test.com:9443:127.0.0.1 to DNS cache * About to connect() to test.com port 9443 (#0) * Trying 127.0.0.1... * Connected to test.com (127.0.0.1) port 9443 (#0) -* Initializing NSS with certpath: sql:/etc/pki/nssdb -* skipping SSL peer certificate verification -* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 * Server certificate: -* subject: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN -* start date: Jun 24 22:18:05 2019 GMT -* expire date: May 31 22:18:05 2119 GMT -* common name: test.com -* issuer: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN -> GET /hello HTTP/1.1 -> User-Agent: curl/7.29.0 +* subject: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* start date: Jun 24 22:18:05 2019 GMT +* expire date: May 31 22:18:05 2119 GMT +* issuer: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* SSL certificate verify result: self-signed certificate (18), continuing anyway. +> GET /get HTTP/2 > Host: test.com:9443 -> Accept: */* +> user-agent: curl/7.81.0 +> accept: */* ``` ### 泛域名 -一个 SSL 证书的域名也可能包含泛域名,如 `*.test.com`,它代表所有以 `test.com` 结尾的域名都可以使用该证书。 -比如 `*.test.com`,可以匹配 `www.test.com`、`mail.test.com`。 +一个 SSL 证书的域名也可能包含泛域名,如 `*.test.com`,它代表所有以 `test.com` 结尾的域名都可以使用该证书。比如 `*.test.com`,可以匹配 `www.test.com`、`mail.test.com`。 + +以下是在 APISIX 中配置泛域名 SNI 的 SSL 证书的示例。 -看下面这个例子,请注意我们把 `*.test.com` 作为 sni 传递进来: +创建一个包含证书和密钥,泛域名 SNI 的 SSL 对象: ```shell -./create-ssl.py t.crt t.key '*.test.com' +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat t/certs/apisix.crt)"'", + "key": "'"$(cat t/certs/apisix.key)"'", + "snis": ["*.test.com"] +}' +``` + +创建路由: +```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' { "uri": "/hello", @@ -124,33 +118,34 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 } } }' +``` -# 测试一下 +测试: -curl --resolve 'www.test.com:9443:127.0.0.1' https://www.test.com:9443/hello -vvv -* Added test.com:9443:127.0.0.1 to DNS cache -* About to connect() to test.com port 9443 (#0) -* Trying 127.0.0.1... -* Connected to test.com (127.0.0.1) port 9443 (#0) -* Initializing NSS with certpath: sql:/etc/pki/nssdb -* skipping SSL peer certificate verification -* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +```shell +curl --resolve 'www.test.com:9443:127.0.0.1' https://www.test.com:9443/get -k -vvv + +* Added www.test.com:9443:127.0.0.1 to DNS cache +* Hostname www.test.com was found in DNS cache +* Trying 127.0.0.1:9443... +* Connected to www.test.com (127.0.0.1) port 9443 (#0) +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 * Server certificate: -* subject: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN -* start date: Jun 24 22:18:05 2019 GMT -* expire date: May 31 22:18:05 2119 GMT -* common name: test.com -* issuer: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN -> GET /hello HTTP/1.1 -> User-Agent: curl/7.29.0 -> Host: test.com:9443 -> Accept: */* +* subject: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* start date: Jun 24 22:18:05 2019 GMT +* expire date: May 31 22:18:05 2119 GMT +* issuer: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com +* SSL certificate verify result: self signed certificate (18), continuing anyway. +> GET /get HTTP/2 +> Host: www.test.com:9443 +> user-agent: curl/7.74.0 +> accept: */* ``` ### 多域名的情况 -如果一个 SSL 证书包含多个独立域名,比如 `www.test.com` 和 `mail.test.com`, -你可以把它们都放入 `snis` 数组中,就像这样: +如果一个 SSL 证书包含多个独立域名,比如 `www.test.com` 和 `mail.test.com`,你可以把它们都放入 `snis` 数组中,就像这样: ```json { diff --git a/docs/zh/latest/config.json b/docs/zh/latest/config.json index 5d13c4fa10ab..136c40e8ed18 100644 --- a/docs/zh/latest/config.json +++ b/docs/zh/latest/config.json @@ -1,9 +1,16 @@ { - "version": "3.2.0", + "version": "3.5.0", "sidebar": [ { - "type": "doc", - "id": "getting-started" + "type": "category", + "label": "Getting Started", + "items": [ + "getting-started/README", + "getting-started/configure-routes", + "getting-started/load-balancing", + "getting-started/key-authentication", + "getting-started/rate-limiting" + ] }, { "type": "doc", @@ -104,7 +111,8 @@ "plugins/consumer-restriction", "plugins/csrf", "plugins/public-api", - "plugins/gm" + "plugins/gm", + "plugins/chaitin-waf" ] }, { @@ -166,7 +174,8 @@ "plugins/file-logger", "plugins/loggly", "plugins/elasticsearch-logger", - "plugins/tencent-cloud-cls" + "plugins/tencent-cloud-cls", + "plugins/loki-logger" ] } ] @@ -214,6 +223,10 @@ "type": "doc", "id": "building-apisix" }, + { + "type": "doc", + "id": "support-fips-in-apisix" + }, { "type": "doc", "id": "external-plugin" @@ -299,6 +312,10 @@ { "type": "doc", "id": "profile" + }, + { + "type": "doc", + "id": "ssl-protocol" } ] }, diff --git a/docs/zh/latest/control-api.md b/docs/zh/latest/control-api.md index f257789e72e3..eeb61b5b8eee 100644 --- a/docs/zh/latest/control-api.md +++ b/docs/zh/latest/control-api.md @@ -96,70 +96,50 @@ APISIX 中一些插件添加了自己的 control API。如果你对他们感兴 ```json [ - { - "healthy_nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - } - ], - "name": "upstream#/upstreams/1", - "nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - }, - { - "host": "127.0.0.2", - "port": 1988, - "priority": 0, - "weight": 1 - } - ], - "src_id": "1", - "src_type": "upstreams" - }, - { - "healthy_nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - } - ], - "name": "upstream#/routes/1", - "nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - }, - { - "host": "127.0.0.1", - "port": 1988, - "priority": 0, - "weight": 1 - } - ], - "src_id": "1", - "src_type": "routes" - } + { + "nodes": [ + { + "ip": "52.86.68.46", + "counter": { + "http_failure": 0, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "healthy" + }, + { + "ip": "100.24.156.8", + "counter": { + "http_failure": 5, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "unhealthy" + } + ], + "name": "/apisix/routes/1", + "type": "http" + } ] + ``` 每个 entry 包含以下字段: -* src_type:表示 health checker 的来源。值是 `[routes,services,upstreams]` 其中之一 -* src_id:表示创建 health checker 的对象的 id。例如,假设 id 为 1 的 Upstream 对象创建了一个 health checker,那么 `src_type` 就是 `upstreams`,`src_id` 就是 1 -* name:表示 health checker 的名称 -* nodes:health checker 的目标节点 -* healthy_nodes:表示 health checker 检测到的健康节点 +* name: 资源 ID,健康检查的报告对象。 +* type: 健康检查类型,取值为 `["http", "https", "tcp"]`。 +* nodes: 检查节点列表。 +* nodes[i].ip: IP 地址。 +* nodes[i].port: 端口。 +* nodes[i].status: 状态:`["healthy", "unhealthy", "mostly_healthy", "mostly_unhealthy"]`。 +* nodes[i].counter.success: 成功计数器。 +* nodes[i].counter.http_failure: HTTP 访问失败计数器。 +* nodes[i].counter.tcp_failure: TCP 连接或读写的失败计数器。 +* nodes[i].counter.timeout_failure: 超时计数器。 用户也可以通过 `/v1/healthcheck/$src_type/$src_id` 来获取指定 health checker 的状态。 @@ -167,40 +147,49 @@ APISIX 中一些插件添加了自己的 control API。如果你对他们感兴 ```json { - "healthy_nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - } - ], - "name": "upstream#/upstreams/1", - "nodes": [ - { - "host": "127.0.0.1", - "port": 1980, - "priority": 0, - "weight": 1 - }, - { - "host": "127.0.0.2", - "port": 1988, - "priority": 0, - "weight": 1 - } - ], - "src_id": "1", - "src_type": "upstreams" + "nodes": [ + { + "ip": "52.86.68.46", + "counter": { + "http_failure": 0, + "success": 2, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "healthy" + }, + { + "ip": "100.24.156.8", + "counter": { + "http_failure": 5, + "success": 0, + "timeout_failure": 0, + "tcp_failure": 0 + }, + "port": 80, + "status": "unhealthy" + } + ], + "type": "http" + "name": "/apisix/routes/1" } + ``` :::note -由于 APISIX 采用多进程架构,如果该进程从来没有处理特定上游的请求,则上游的健康检查信息不会出现在该进程上。这可能会导致健康检查 API 在测试期间无法获取所有数据。 +只有一个上游满足以下条件时,它的健康检查状态才会出现在结果里面: + +* 上游配置了健康检查。 +* 上游在任何一个 worker 进程处理过客户端请求。 ::: +如果你使用浏览器访问该 API,你将得到一个网页: + +![Health Check Status Page](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/health_check_status_page.png) + ### POST /v1/gc 引入自 2.8 版本 diff --git a/docs/zh/latest/discovery/dns.md b/docs/zh/latest/discovery/dns.md index e7abeb414254..b1f99eef2c4a 100644 --- a/docs/zh/latest/discovery/dns.md +++ b/docs/zh/latest/discovery/dns.md @@ -143,4 +143,4 @@ Upstream 配置是这样的: 我们把权重为 0 的记录当作权重为 1,因此节点“被选中的可能性很小”,这也是处理此类记录的常用方法。 对于端口为 0 的 SRV 记录,我们会使用上游协议的默认端口。 -你也可以在 “service_name” 字段中直接指定端口,比如 “srv.blah.service:8848”。 +你也可以在“service_name”字段中直接指定端口,比如“srv.blah.service:8848”。 diff --git a/docs/zh/latest/discovery/kubernetes.md b/docs/zh/latest/discovery/kubernetes.md index bb07097eed88..c4b751889cf3 100644 --- a/docs/zh/latest/discovery/kubernetes.md +++ b/docs/zh/latest/discovery/kubernetes.md @@ -32,13 +32,7 @@ description: 本文将介绍如何在 Apache APISIX 中基于 Kubernetes 进行 Kubernetes 服务发现以 [_List-Watch_](https://kubernetes.io/docs/reference/using-api/api-concepts) 方式监听 [_Kubernetes_](https://kubernetes.io) 集群 [_Endpoints_](https://kubernetes.io/docs/concepts/services-networking/service) 资源的实时变化,并将其值存储到 ngx.shared.DICT 中。 -同时遵循 [_APISIX Discovery 规范_](https://github.com/apache/apisix/blob/master/docs/zh/latest/discovery.md) 提供了节点查询接口。 - -:::note - -在四层中使用 Kubernetes 服务发现要求 OpenResty 版本大于等于 1.19.9.1 - -::: +同时遵循 [_APISIX Discovery 规范_](../discovery.md) 提供了节点查询接口。 ## Kubernetes 服务发现的使用 @@ -122,13 +116,13 @@ discovery: ### 单集群模式 Kubernetes 服务发现的查询接口 -单集群模式 Kubernetes 服务发现遵循 [_APISIX Discovery 规范_](https://github.com/apache/apisix/blob/master/docs/zh/latest/discovery.md) 提供节点查询接口。 +单集群模式 Kubernetes 服务发现遵循 [_APISIX Discovery 规范_](../discovery.md) 提供节点查询接口。 **函数:** nodes(service_name) **说明:** -service_name 必须满足格式: [namespace]/[name]:[portName] +service_name 必须满足格式:[namespace]/[name]:[portName] + namespace: Endpoints 所在的命名空间 @@ -231,13 +225,13 @@ discovery: ### 多集群模式 Kubernetes 服务发现的查询接口 -多集群模式 Kubernetes 服务发现遵循 [_APISIX Discovery 规范_](https://github.com/apache/apisix/blob/master/docs/zh/latest/discovery.md) 提供节点查询接口。 +多集群模式 Kubernetes 服务发现遵循 [_APISIX Discovery 规范_](../discovery.md) 提供节点查询接口。 **函数:** nodes(service_name) **说明:** -service_name 必须满足格式: [id]/[namespace]/[name]:[portName] +service_name 必须满足格式:[id]/[namespace]/[name]:[portName] + id: Kubernetes 服务发现配置中定义的集群 id 值 @@ -336,7 +330,7 @@ subjects: **Q: 怎样获取指定 [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) 的 Token 值?** -A: 假定你指定的 [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) 资源名为 “kubernetes-discovery“, 命名空间为 “apisix”, 请按如下步骤获取其 Token 值。 +A: 假定你指定的 [_ServiceAccount_](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/) 资源名为“kubernetes-discovery“, 命名空间为“apisix”, 请按如下步骤获取其 Token 值。 1. 获取 _Secret_ 资源名。执行以下命令,输出的第一列内容就是目标 _Secret_ 资源名: diff --git a/docs/zh/latest/discovery/nacos.md b/docs/zh/latest/discovery/nacos.md index b30e1dbd3665..4eef8b7238cd 100644 --- a/docs/zh/latest/discovery/nacos.md +++ b/docs/zh/latest/discovery/nacos.md @@ -59,7 +59,7 @@ discovery: #### 七层 例如,转发 URI 匹配 "/nacos/*" 的请求到一个上游服务, -该服务在 Nacos 中的服务名是 APISIX-NACOS ,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS ,创建路由时指定服务发现类型为 nacos 。 +该服务在 Nacos 中的服务名是 APISIX-NACOS,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS,创建路由时指定服务发现类型为 nacos。 ```shell $ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' @@ -126,7 +126,7 @@ $ curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 -H 'X-API-KEY: edd1c9f #### 指定命名空间 例如,转发 URI 匹配 "/nacosWithNamespaceId/*" 的请求到一个上游服务, -该服务在 Nacos 中的服务名是 APISIX-NACOS,命名空间是 test_ns,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns ,创建路由时指定服务发现类型为 nacos 。 +该服务在 Nacos 中的服务名是 APISIX-NACOS,命名空间是 test_ns,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns,创建路由时指定服务发现类型为 nacos。 ```shell $ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' @@ -175,7 +175,7 @@ $ curl http://127.0.0.1:9180/apisix/admin/routes/2 -H 'X-API-KEY: edd1c9f034335f #### 指定组 例如,转发 URI 匹配 "/nacosWithGroupName/*" 的请求到一个上游服务, -该服务在 Nacos 中的服务名是 APISIX-NACOS,组名是 test_group,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&groupName=test_group ,创建路由时指定服务发现类型为 nacos 。 +该服务在 Nacos 中的服务名是 APISIX-NACOS,组名是 test_group,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&groupName=test_group,创建路由时指定服务发现类型为 nacos。 ```shell $ curl http://127.0.0.1:9180/apisix/admin/routes/3 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' @@ -224,7 +224,7 @@ $ curl http://127.0.0.1:9180/apisix/admin/routes/3 -H 'X-API-KEY: edd1c9f034335f #### 同时指定命名空间和组 例如,转发 URI 匹配 "/nacosWithNamespaceIdAndGroupName/*" 的请求到一个上游服务, -该服务在 Nacos 中的服务名是 APISIX-NACOS,命名空间是 test_ns,组名是 test_group,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns&groupName=test_group ,创建路由时指定服务发现类型为 nacos 。 +该服务在 Nacos 中的服务名是 APISIX-NACOS,命名空间是 test_ns,组名是 test_group,查询地址是 http://192.168.33.1:8848/nacos/v1/ns/instance/list?serviceName=APISIX-NACOS&namespaceId=test_ns&groupName=test_group,创建路由时指定服务发现类型为 nacos。 ```shell $ curl http://127.0.0.1:9180/apisix/admin/routes/4 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d ' diff --git a/docs/zh/latest/getting-started.md b/docs/zh/latest/getting-started.md deleted file mode 100644 index 9ab54e44e3ef..000000000000 --- a/docs/zh/latest/getting-started.md +++ /dev/null @@ -1,246 +0,0 @@ ---- -title: 快速入门指南 -keywords: - - APISIX - - APISIX 入门指南 - - APISIX docker 安装教程 -description: 本文档将引导你了解如何开始使用 Apache APISIX。 ---- - - - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -本文将为你介绍 Apache APISIX 的概念、功能以及如何使用 APISIX。 - -通过本文你可以了解到以下内容: - -- Apache APISIX 是什么? -- APISIX 的架构及主要概念。 -- 如何使用 Docker 安装并运行 APISIX。 -- 如何使用 Admin API 创建第一个路由并配置上游。 -- 如何使用 APISIX Dashboard。 -- 如何寻求帮助。 - -## Apache APISIX 是什么? - -Apache APISIX 是 Apache 软件基金会下的云原生 API 网关,它兼具动态、实时、高性能等特点,提供了负载均衡、动态上游、灰度发布(金丝雀发布)、服务熔断、身份认证、可观测性等丰富的流量管理功能。我们可以使用 Apache APISIX 来处理传统的南北向流量,也可以处理服务间的东西向流量。同时,它也支持作为 [K8s Ingress Controller](https://github.com/apache/apisix-ingress-controller) 来使用。 - -### 主要特性 - -- 多平台支持:APISIX 提供了多平台解决方案,它不但支持裸机运行,也支持在 Kubernetes 中使用,还支持与 AWS Lambda、Azure Function、Lua 函数和 Apache OpenWhisk 等云服务集成。 -- 全动态能力:APISIX 支持热加载,这意味着你不需要重启服务就可以更新 APISIX 的配置。请访问[为什么 Apache APISIX 选择 Nginx + Lua 这个技术栈?](https://apisix.apache.org/zh/blog/2021/08/25/why-apache-apisix-chose-nginx-and-lua/)以了解实现原理。 -- 精细化路由:APISIX 支持使用 [NGINX 内置变量](https://nginx.org/en/docs/varindex.html)做为路由的匹配条件,你可以自定义匹配函数来过滤请求,匹配路由。 -- 运维友好:APISIX 支持与以下工具和平台集成:[HashiCorp Vault](./terminology/secret.md#使用-vault-管理密钥)、[Zipkin](./plugins/zipkin.md)、[Apache SkyWalking](./plugins/skywalking.md)、[Consul](./discovery/consul_kv.md)、[Nacos](./discovery/nacos.md)、[Eureka](./discovery.md)。通过 [APISIX Dashboard](/docs/dashboard/USER_GUIDE),运维人员可以通过友好且直观的 UI 配置 APISIX。 -- 多语言插件支持:APISIX 支持多种开发语言进行插件开发,开发人员可以选择擅长语言的 SDK 开发自定义插件。 - -## 主要概念 - -下图为 Apache APISIX 的架构: - -![flow-software-architecture](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/flow-software-architecture.png) - -下表是本文涉及到的 APISIX 的主要概念和组件: - -| 概念/组件 | 描述 | -|-------------|--------------------------------------------------------------------------------------------------| -| Route | 通过路由定义规则来匹配客户端请求,根据匹配结果加载并执行相应的插件,最后把请求转发给到指定的上游应用。 | -| Upstream | 上游的作用是按照配置规则对服务节点进行负载均衡,它的地址信息可以直接配置到路由或服务上。 | -| Admin API | 用户可以通过 Admin API 控制 APISIX 实例。 | - -## 前提条件 - -在开始使用 APISIX 之前,请确保你已经安装以下应用: - -- [Docker](https://www.docker.com/) 和 [Docker Compose](https://docs.docker.com/compose/)。 -- [curl](https://curl.se/docs/manpage.html) 用于测试 API。你也可以使用 [Hoppscotch](https://hoppscotch.io/) 之类的工具。 -- 本文使用的上游服务是 [httpbin.org](https://httpbin.org),你可以使用它进行测试。这是一个返回服务,它将返回我们在请求中传递的参数。 - -**请求内容:** - -请求 URL 由以下参数构成: - -- Protocol:即网络传输协议,在示例中,我们使用的是 `HTTP` 协议。 -- Port:即端口,示例中使用的 `80` 端口。 -- Host:即主机地址,示例中使用的是 `httpbin.org`。 -- Path:即路径,示例中的路径是 `/get`。 -- Query Parameters:即查询字符串,这里有两个字符串,分别是 `foo1` 和 `foo2`。 - -运行以下命令,发送请求: - -```bash -curl --location --request GET "http://httpbin.org/get?foo1=bar1&foo2=bar2" -``` - -**响应内容:** - -```json -{ - "args": { - "foo1": "bar1", - "foo2": "bar2" - }, - "headers": { - "Accept": "*/*", - "Host": "httpbin.org", - "User-Agent": "curl/7.29.0", - "X-Amzn-Trace-Id": "Root=1-6088fe84-24f39487166cce1f0e41efc9" - }, - "origin": "58.152.81.42", - "url": "http://httpbin.org/get?foo1=bar1&foo2=bar2" -} -``` - -## 安装 APISIX - -APISIX 可以借助 quickstart 脚本快速安装并启动。 - -```sh -curl -sL https://run.api7.ai/apisix/quickstart | sh -``` - -该命令在本地安装并运行了基于 Docker 的 APISIX 和 etcd 容器,其中 APISIX 采用 etcd 保存和同步配置信息。APISIX 和 etcd 容器使用 [**host**](https://docs.docker.com/network/host/) 的 Docker 网络模式,因此可以从本地直接访问。 - -如果一切顺利,将输出如下信息。 - -```text -✔ APISIX is ready! -``` - -:::note - -你也可以参考 [APISIX 安装指南](./installation-guide.md)了解不同的安装方法。 - -::: - -:::info IMPORTANT - -请确保其他系统进程没有占用 **9080、9180、9443 和 2379** 端口。 - -::: - -你可以通过 curl 来访问正在运行的 APISIX 实例。比如,你可以发送一个简单的 HTTP 请求来验证 APISIX 运行状态是否正常。 - -```sh -curl "http://127.0.0.1:9080" --head | grep Server -``` - -I如果一切顺利,将输出如下信息。 - -```text -Server: APISIX/3.1.0 -``` - -现在,你已经成功安装并运行了 APISIX ! - -## 创建路由 - -APISIX 提供了强大的 [Admin API](./admin-api.md) 和 [Dashboard](https://github.com/apache/apisix-dashboard) 供用户使用。在下述示例中,我们将使用 Admin API 创建一个 [Route](./terminology/route.md) 并与 [Upstream](./terminology/upstream.md) 绑定,当一个请求到达 APISIX 时,APISIX 会将请求转发到指定的上游服务中。 - -以下示例代码中,我们将为路由配置匹配规则,以便 APISIX 可以将请求转发到对应的上游服务: - -```bash -curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT -d ' -{ - "methods": ["GET"], - "host": "example.com", - "uri": "/anything/*", - "upstream": { - "type": "roundrobin", - "nodes": { - "httpbin.org:80": 1 - } - } -}' -``` - -该配置意味着,当请求满足下述的**所有**规则时,请求将被转发到上游服务(`httpbin.org:80`): - -- 请求的 HTTP 方法为 `GET`。 -- 请求头包含 `host` 字段,且它的值为 `example.com`。 -- 请求路径匹配 `/anything/*`,`*` 意味着任意的子路径,例如 `/anything/foo?arg=10`。 - -当路由创建完成后,可以通过以下命令访问上游服务: - -```bash -curl -i -X GET "http://127.0.0.1:9080/anything/foo?arg=10" -H "Host: example.com" -``` - -该请求将被 APISIX 转发到 `http://httpbin.org:80/anything/foo?arg=10`。 - -## 使用上游服务创建路由 - -你可以通过以下命令创建一个上游,并在路由中使用它,而不是直接将其配置在路由中: - -```bash -curl "http://127.0.0.1:9180/apisix/admin/upstreams/1" -X PUT -d ' -{ - "type": "roundrobin", - "nodes": { - "httpbin.org:80": 1 - } -}' -``` - -该上游配置与上一节配置在路由中的上游相同。同样使用了 `roundrobin` 作为负载均衡机制,并设置了 `httpbin.org:80` 为上游服务。为了将该上游绑定到路由,此处需要把 `upstream_id` 设置为 `"1"`。更多字段信息,请参考 [Admin API](./admin-api.md)。 - -上游服务创建完成后,可以通过以下命令绑定到指定路由: - -```bash -curl "http://127.0.0.1:9180/apisix/admin/routes/1" -X PUT -d ' -{ - "uri": "/get", - "host": "httpbin.org", - "upstream_id": "1" -}' -``` - -我们已经创建了路由与上游服务,现在可以通过以下命令访问上游服务: - -```bash -curl -i -X GET "http://127.0.0.1:9080/get?foo1=bar1&foo2=bar2" -H "Host: httpbin.org" -``` - -该请求将被 APISIX 转发到 `http://httpbin.org:80/anything/foo?arg=10`。 - -## 使用 APISIX Dashboard - -你还可以使用 APISIX Dashboard 创建和配置类似于上述步骤中所创建的路由。 - -如果你已经完成上述操作步骤,就可以通过 [`localhost:9000`](http://localhost:9000/) 访问 APISIX Dashboard。 - -单击侧边栏中的 [`Route`](http://localhost:9000/routes/list),可以查看已经配置的路由列表。你也可以看到在上述步骤中使用 Admin API 创建的路由。 - -你也可以通过单击 [`Create`](http://localhost:9000/routes/create) 按钮并按照提示创建新路由: - -![Creating a Route with APISIX Dashboard](../../assets/images/create-a-route.png) - -新创建的路由将被添加到路由列表中: - -![Creating a Route with APISIX Dashboard](../../assets/images/list-of-routes.png) - -想要了解更多关于 APISIX Dashboard 的信息,请参考 [APISIX Dashboard 文档](/docs/dashboard/USER_GUIDE)。 - -## 总结 - -完成上述步骤后,APISIX 就可以正常运行了。如果想利用 APISIX 实现身份验证、安全性、限流限速和可观测性等功能,可通过添加插件实现。各类插件的详细信息请参考[插件市场](/plugins)。 - -如果你在使用当中遇到困难,可以通过 [APISIX 社区频道](/docs/general/join)或者在 GitHub 上[提交一个 issue](/docs/general/submit-issue) 寻求帮助。 diff --git a/docs/zh/latest/getting-started/README.md b/docs/zh/latest/getting-started/README.md new file mode 100644 index 000000000000..7575132a3dcc --- /dev/null +++ b/docs/zh/latest/getting-started/README.md @@ -0,0 +1,71 @@ +--- +title: 入门指南 +description: 本教程使用脚本在本地环境快速安装 Apache APISIX,并且通过管理 API 来验证是否安装成功。 +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +Apache APISIX 是 Apache 软件基金会下的[顶级项目](https://projects.apache.org/project.html?apisix),由 API7.ai 开发并捐赠。它是一个具有动态、实时、高性能等特点的云原生 API 网关。 + +你可以使用 APISIX 网关作为所有业务的流量入口,它提供了动态路由、动态上游、动态证书、A/B 测试、灰度发布(金丝雀发布)、蓝绿部署、限速、防攻击、收集指标、监控报警、可观测、服务治理等功能。 + +本教程使用脚本在本地环境快速安装 Apache APISIX,并且通过管理 API 来验证是否安装成功。 + +## 前置条件 + +快速启动脚本需要以下条件: + +* 已安装 [Docker](https://docs.docker.com/get-docker/),用于部署 **etcd** 和 **APISIX**。 +* 已安装 [curl](https://curl.se/),用于验证 APISIX 是否安装成功。 + +## 安装 APISIX + +:::caution + +为了提供更好的体验,管理 API 默认无需授权,请在生产环境中打开授权开关。 + +::: +APISIX 可以借助 quickstart 脚本快速安装并启动: + +```shell +curl -sL https://run.api7.ai/apisix/quickstart | sh +``` + +该命令启动 _apisix-quickstart_ 和 _etcd_ 两个容器,APISIX 使用 etcd 保存和同步配置。APISIX 和 etcd 容器使用 Docker 的 [**host**](https://docs.docker.com/network/host/) 网络模式,因此可以从本地直接访问。 + +如果一切顺利,将输出如下信息: + +```text +✔ APISIX is ready! +``` + +## 验证 + +你可以通过 curl 来访问正在运行的 APISIX 实例。比如,你可以发送一个简单的 HTTP 请求来验证 APISIX 运行状态是否正常: + +```shell +curl "http://127.0.0.1:9080" --head | grep Server +``` + +如果一切顺利,将输出如下信息: + +```text +Server: APISIX/Version +``` + +这里的 `Version` 是指你已经安装的 APISIX 版本,比如 `APISIX/3.3.0`。 + +现在,你已经成功安装并运行了 APISIX! + +## 下一步 + +如果你已经成功地安装了 APISIX 并且正常运行,那么你可以继续进行下面的教程。 + +* [配置路由](configure-routes.md) +* [负载均衡](load-balancing.md) +* [限速](rate-limiting.md) +* [密钥验证](key-authentication.md) diff --git a/docs/zh/latest/getting-started/configure-routes.md b/docs/zh/latest/getting-started/configure-routes.md new file mode 100644 index 000000000000..6f6df3aeee56 --- /dev/null +++ b/docs/zh/latest/getting-started/configure-routes.md @@ -0,0 +1,71 @@ +--- +title: 配置路由 +slug: /getting-started/configure-routes +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +Apache APISIX 使用 _routes_ 来提供灵活的网关管理功能,在一个请求中,_routes_ 包含了访问路径和上游目标等信息。 + +本教程将引导你创建一个 route 并验证它,你可以参考以下步骤: + +1. 创建一个指向 [httpbin.org](http://httpbin.org)的 _upstream_。 +2. 使用 _cURL_ 发送一个请求,了解 APISIX 的代理和转发请求机制。 + +## Route 是什么 + +Route(也称之为路由)是访问上游目标的路径,在 [Apache APISIX](https://api7.ai/apisix) 中,Route 首先通过预定的规则来匹配客户端请求,然后加载和执行相应的插件,最后将请求转发至特定的 Upstream。 + +在 APISIX 中,一个最简单的 Route 仅由匹配路径和 Upstream 地址两个信息组成。 + +## Upstream 是什么 + +Upstream(也称之为上游)是一组具备相同功能的节点集合,它是对虚拟主机的抽象。Upstream 可以通过预先配置的规则对多个服务节点进行负载均衡。 + +## 前置条件 + +1. 参考[入门指南](./README.md)完成 APISIX 的安装。 + +## 创建路由 + +你可以创建一个路由,将客户端的请求转发至 [httpbin.org](http://httpbin.org)(这个网站能测试 HTTP 请求和响应的各种信息)。 + +通过下面的命令,你将创建一个路由,把请求`http://127.0.0.1:9080/ip` 转发至 [httpbin.org/ip](http://httpbin.org/ip): + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "getting-started-ip", + "uri": "/ip", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } +}' +``` + +如果配置成功,将会返回 `HTTP/1.1 201 OK`。 + +## 验证 + +```shell +curl "http://127.0.0.1:9080/ip" +``` + +你将会得到类似下面的返回: + +```text +{ + "origin": "183.94.122.205" +} +``` + +## 下一步 + +本教程创建的路由仅对应一个上游目标。在下个教程中,你将会学习如何配置多个上游目标的负载均衡。 diff --git a/docs/zh/latest/getting-started/key-authentication.md b/docs/zh/latest/getting-started/key-authentication.md new file mode 100644 index 000000000000..ddcce9029db0 --- /dev/null +++ b/docs/zh/latest/getting-started/key-authentication.md @@ -0,0 +1,183 @@ +--- +title: 密钥验证 +slug: /getting-started/key-authentication +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +API 网关主要作用是连接 API 消费者和提供者。出于安全考虑,在访问内部资源之前,应先对消费者进行身份验证和授权。 + +![身份验证](https://static.apiseven.com/uploads/2023/02/08/8mRaK3v1_consumer.png) + +APISIX 拥有灵活的插件扩展系统,目前有很多可用于用户身份验证和授权的插件。例如: + +- [Key Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/key-auth/) +- [Basic Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/basic-auth/) +- [JSON Web Token (JWT) Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/jwt-auth/) +- [Keycloak](https://apisix.apache.org/zh/docs/apisix/plugins/authz-keycloak/) +- [Casdoor](https://apisix.apache.org/zh/docs/apisix/plugins/authz-casdoor/) +- [Wolf RBAC](https://apisix.apache.org/zh/docs/apisix/plugins/wolf-rbac/) +- [OpenID Connect](https://apisix.apache.org/zh/docs/apisix/plugins/openid-connect/) +- [Central Authentication Service (CAS)](https://apisix.apache.org/zh/docs/apisix/plugins/cas-auth/) +- [HMAC](https://apisix.apache.org/zh/docs/apisix/plugins/hmac-auth/) +- [Casbin](https://apisix.apache.org/zh/docs/apisix/plugins/authz-casbin/) +- [LDAP](https://apisix.apache.org/zh/docs/apisix/plugins/ldap-auth/) +- [Open Policy Agent (OPA)](https://apisix.apache.org/zh/docs/apisix/plugins/opa/) +- [Forward Authentication](https://apisix.apache.org/zh/docs/apisix/plugins/forward-auth/) + +本教程中,你将创建一个带有 _密钥验证_ 插件的 _消费者_,并学习如何启用和停用身份验证插件。 + +## Consumer 是什么 + +Consumer(也称之为消费者)是指使用 API 的应用或开发人员。 + +在 APISIX 中,消费者需要一个全局唯一的 _名称_,并从上面的列表中选择一个身份验证 _插件_。 + +## Key Authentication 是什么 + +Key Authentication(也称之为密钥验证)是一个相对比较简单但是应用广泛的身份验证方法,它的设计思路如下: + +1. 管理员为路由添加一个身份验证密钥(API 密钥)。 +2. API 消费者在发送请求时,在查询字符串或者请求头中添加密钥。 + +## 启用 Key Authentication + +### 前置条件 + +1. 参考[快入门指南](./README.md)完成 APISIX 的安装。 +2. 完成[配置路由](./configure-routes.md#route-是什么)。 + +### 创建消费者 + +创建一个名为 `tom` 的消费者,并启用 `key-auth` 插件,密钥设置为 `secret-key`。所有携带密钥 `secret-key` 的请求都会被识别为消费者 `tom`。 + +:::caution + +生产环境请使用复杂的密钥。 + +::: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/consumers" -X PUT -d ' +{ + "username": "tom", + "plugins": { + "key-auth": { + "key": "secret-key" + } + } +}' +``` + +如果消费者创建成功,你将得到返回 `HTTP/1.1 201 OK`。 + +### 启用 Authentication + +在教程[配置路由](./configure-routes.md)中,我们已经创建了路由 `getting-started-ip`,我们通过 `PATCH` 方法为该路由增加 `key-auth` 插件: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "key-auth": {} + } +}' +``` + +如果增加插件成功,你将得到返回 `HTTP/1.1 201 Created`。 + +### 验证 + +我们可以在以下场景中进行验证: + +#### 1. 发送不带任何密钥的请求 + +发送一个不带请求头 `apikey` 的请求。 + +```shell +curl -i "http://127.0.0.1:9080/ip" +``` + +如果你已经启用了密钥身份验证,你将会得到返回 `HTTP/1.1 401 Unauthorized`,即未授权。 + +```text +HTTP/1.1 401 Unauthorized +Date: Wed, 08 Feb 2023 09:38:36 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.1.0 +``` + +#### 2. 发送携带错误密钥的请求 + +发送一个携带错误密钥(比如 `wrong-key`)的请求。 + +```shell +curl -i "http://127.0.0.1:9080/ip" -H 'apikey: wrong-key' +``` + +如果密钥错误,你也将得到返回 `HTTP/1.1 401 Unauthorized`,即未授权。 + +```text +HTTP/1.1 401 Unauthorized +Date: Wed, 08 Feb 2023 09:38:27 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +Server: APISIX/3.1.0 +``` + +#### 3. 发送携带正确密钥的请求 + +发送一个携带正确密钥(`secret-key`)的请求。 + +```shell +curl -i "http://127.0.0.1:9080/ip" -H 'apikey: secret-key' +``` + +你将会得到返回 `HTTP/1.1 200 OK`。 + +```text +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 44 +Connection: keep-alive +Date: Thu, 09 Feb 2023 03:27:57 GMT +Access-Control-Allow-Origin: * +Access-Control-Allow-Credentials: true +Server: APISIX/3.1.0 +``` + +### 禁用 Authentication + +将参数设置 `_meta.disable` 为 `true`,即可禁用密钥验证插件。 + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "key-auth": { + "_meta": { + "disable": true + } + } + } +}' +``` + +你可以发送一个不带任何密钥的请求来验证: + +```shell +curl -i "http://127.0.0.1:9080/ip" +``` + +因为你已经禁用了密钥验证插件,所以你将会得到返回 `HTTP/1.1 200 OK`。 + +## 下一步 + +你已经学习了如何为路由配置密钥验证。在下个教程中,你将学习如何配置限速。 diff --git a/docs/zh/latest/getting-started/load-balancing.md b/docs/zh/latest/getting-started/load-balancing.md new file mode 100644 index 000000000000..2923f8f516ea --- /dev/null +++ b/docs/zh/latest/getting-started/load-balancing.md @@ -0,0 +1,99 @@ +--- +title: 负载均衡 +slug: /getting-started/load-balancing +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +负载均衡管理客户端和服务端之间的流量。它决定由哪个服务来处理特定的请求,从而提高性能、可扩展性和可靠性。在设计需要处理大量流量的系统时,负载均衡是一个关键的考虑因素。 + +Apache APISIX 支持加权负载均衡算法,传入的流量按照预定顺序轮流分配给一组服务器的其中一个。 + +在本教程中,你将创建一个具有两个上游服务的路由,并且启用负载均衡来测试在两个服务之间的切换情况。 + +## 前置条件 + +1. 参考[入门指南](./README.md)完成 APISIX 的安装。 +2. 了解 APISIX 中[路由及上游](./configure-routes.md#route-是什么)的概念。 + +## 启用负载均衡 + +创建一个具有两个上游服务的路由,访问 `/headers` 将被转发到 [httpbin.org](https://httpbin.org/headers) 和 [mock.api7.ai](https://mock.api7.ai/headers) 这两个上游服务,并且会返回请求头。 + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes" -X PUT -d ' +{ + "id": "getting-started-headers", + "uri": "/headers", + "upstream" : { + "type": "roundrobin", + "nodes": { + "httpbin.org:443": 1, + "mock.api7.ai:443": 1 + }, + "pass_host": "node", + "scheme": "https" + } +}' +``` + +如果路由创建成功,你将会收到返回 `HTTP/1.1 201 OK`。 + +:::info + +1. 将 `pass_host` 字段设置为 `node`,将传递请求头给上游。 +2. 将 `scheme` 字段设置为 `https`,向上游发送请求时将启用 TLS。 + +::: + +## 验证 + +这两个服务返回不同的数据。 + +`httpbin.org` 返回: + +```json +{ + "headers": { + "Accept": "*/*", + "Host": "httpbin.org", + "User-Agent": "curl/7.58.0", + "X-Amzn-Trace-Id": "Root=1-63e34b15-19f666602f22591b525e1e80", + "X-Forwarded-Host": "localhost" + } +} +``` + +`mock.api7.ai` 返回: + +```json +{ + "headers": { + "accept": "*/*", + "host": "mock.api7.ai", + "user-agent": "curl/7.58.0", + "content-type": "application/json", + "x-application-owner": "API7.ai" + } +} +``` + +我们生成 100 个请求来测试负载均衡的效果: + +```shell +hc=$(seq 100 | xargs -i curl "http://127.0.0.1:9080/headers" -sL | grep "httpbin" | wc -l); echo httpbin.org: $hc, mock.api7.ai: $((100 - $hc)) +``` + +结果显示,请求几乎平均分配给这两个上游服务: + +```text +httpbin.org: 51, mock.api7.ai: 49 +``` + +## 下一步 + +你已经学习了如何配置负载均衡。在下个教程中,你将学习如何配置身份验证。 diff --git a/docs/zh/latest/getting-started/rate-limiting.md b/docs/zh/latest/getting-started/rate-limiting.md new file mode 100644 index 000000000000..22f635fd5ca4 --- /dev/null +++ b/docs/zh/latest/getting-started/rate-limiting.md @@ -0,0 +1,101 @@ +--- +title: 限速 +slug: /getting-started/rate-limiting +--- + + + + + +> 本教程由 [API7.ai](https://api7.ai/) 编写。 + +APISIX 是一个统一的控制中心,它管理 API 和微服务的进出流量。除了客户端发来的合理的请求,还可能存在网络爬虫产生的不必要的流量,此外,网络攻击(比如 DDos)也可能产生非法请求。 + +APISIX 提供限速功能,通过限制在规定时间内发送到上游服务的请求数量来保护 APIs 和微服务。请求的计数在内存中完成,具有低延迟和高性能的特点。 + +
+
+Routes Diagram +
+
+ +在本教程中,你将启用 `limit-count` 插件来限制传入流量的速率。 + +## 前置条件 + +1. 参考[入门指南](./README.md)完成 APISIX 的安装。 +2. 完成[配置路由](./configure-routes.md#route-是什么)。 + +## 启用 Rate Limiting + +在教程[配置路由](./configure-routes.md)中,我们已经创建了路由 `getting-started-ip`,我们通过 `PATCH` 方法为该路由增加 `limit-count` 插件: + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 10, + "rejected_code": 503 + } + } +}' +``` + +如果增加插件成功,你将得到返回 `HTTP/1.1 201 OK`。上述配置将传入流量的速率限制为每 10 秒最多 2 个请求。 + +### 验证 + +我们同时生成 100 个请求来测试限速插件的效果。 + +```shell +count=$(seq 100 | xargs -i curl "http://127.0.0.1:9080/ip" -I -sL | grep "503" | wc -l); echo \"200\": $((100 - $count)), \"503\": $count +``` + +请求结果同预期一致:在这 100 个请求中,有 2 个请求发送成功(状态码为 `200`),其他请求均被拒绝(状态码为 `503`)。 + +```text +"200": 2, "503": 98 +``` + +## 禁用 Rate Limiting + +将参数设置 `_meta.disable` 为 `true`,即可禁用限速插件。 + +```shell +curl -i "http://127.0.0.1:9180/apisix/admin/routes/getting-started-ip" -X PATCH -d ' +{ + "plugins": { + "limit-count": { + "_meta": { + "disable": true + } + } + } +}' +``` + +### 验证 + +我们再次同时生成 100 个请求来测试限速插件是否已被禁用: + +```shell +count=$(seq 100 | xargs -i curl "http://127.0.0.1:9080/ip" -I -sL | grep "503" | wc -l); echo \"200\": $((100 - $count)), \"503\": $count +``` + +结果显示所有的请求均成功: + +```text +"200": 100, "503": 0 +``` + +## 更多 + +你可以使用 APISIX 的变量来配置限速插件的规则,比如 `$host` 和 `$uri`。此外,APISIX 也支持使用 Redis 集群进行限速配置,即通过 Redis 来进行计数。 + +## 下一步 + +恭喜你!你已经学习了如何配置限速插件,这也意味着你已经完成了所有的入门教程。 + +你可以继续学习其他文档来定制 APISIX,以满足你的生产环境需要。 diff --git a/docs/zh/latest/installation-guide.md b/docs/zh/latest/installation-guide.md index c5c90096d45c..9bb1254dd4db 100644 --- a/docs/zh/latest/installation-guide.md +++ b/docs/zh/latest/installation-guide.md @@ -31,7 +31,7 @@ import TabItem from '@theme/TabItem'; 本文将介绍如何在你的环境中安装并运行 APISIX。 -关于如何快速运行 Apache APISIX,请参考[入门指南](./getting-started.md)。 +关于如何快速运行 Apache APISIX,请参考[入门指南](./getting-started/README.md)。 ## 安装 APISIX @@ -44,6 +44,7 @@ import TabItem from '@theme/TabItem'; {label: 'Docker', value: 'docker'}, {label: 'Helm', value: 'helm'}, {label: 'RPM', value: 'rpm'}, + {label: 'DEB', value: 'deb'}, {label: 'Source Code', value: 'source code'}, ]}> @@ -101,7 +102,7 @@ helm install apisix apisix/apisix --create-namespace --namespace apisix -该安装方法适用于 CentOS 7 和 CentOS 8。如果你选择该方法安装 APISIX,需要先安装 etcd。具体安装方法请参考[安装 etcd](#安装-etcd)。 +该安装方法适用于 CentOS 7 和 CentOS 8。如果你选择该方法安装 APISIX,需要先安装 etcd。具体安装方法请参考 [安装 etcd](#安装-etcd)。 ### 通过 RPM 仓库安装 @@ -133,29 +134,6 @@ sudo yum install apisix-2.13.1 ::: -### 通过 DEB 仓库安装 - -目前 APISIX 支持的 DEB 仓库仅支持 Debian 11(Bullseye),并且支持 amd64 和 arm64 架构。 - -```shell -# amd64 -sudo echo "deb http://openresty.org/package/debian bullseye openresty" | tee /etc/apt/sources.list.d/openresty.list -wget -O - http://repos.apiseven.com/pubkey.gpg | apt-key add - -echo "deb http://repos.apiseven.com/packages/debian bullseye main" | tee /etc/apt/sources.list.d/apisix.list - -# arm64 -sudo echo "deb http://openresty.org/package/debian bullseye openresty" | tee /etc/apt/sources.list.d/openresty.list -wget -O - http://repos.apiseven.com/pubkey.gpg | apt-key add - -echo "deb http://repos.apiseven.com/packages/arm64/debian bullseye main" | tee /etc/apt/sources.list.d/apisix.list -``` - -完成上述操作后使用以下命令安装 APISIX: - -```shell -sudo apt update -sudo apt install -y apisix=3.0.0-0 -``` - ### 通过 RPM 包离线安装: 将 APISIX 离线 RPM 包下载到 `apisix` 文件夹: @@ -195,9 +173,58 @@ apisix start + + +### 通过 DEB 仓库安装 + +目前 APISIX 支持的 DEB 仓库仅支持 Debian 11(Bullseye),并且支持 amd64 和 arm64 架构。 + +```shell +# amd64 +echo "deb http://openresty.org/package/debian bullseye openresty" | sudo tee /etc/apt/sources.list.d/openresty.list +wget -O - https://openresty.org/package/pubkey.gpg | sudo apt-key add - +wget -O - http://repos.apiseven.com/pubkey.gpg | sudo apt-key add - +echo "deb http://repos.apiseven.com/packages/debian bullseye main" | sudo tee /etc/apt/sources.list.d/apisix.list + +# arm64 +echo "deb http://openresty.org/package/debian bullseye openresty" | sudo tee /etc/apt/sources.list.d/openresty.list +wget -O - https://openresty.org/package/pubkey.gpg | sudo apt-key add - +wget -O - http://repos.apiseven.com/pubkey.gpg | sudo apt-key add - +echo "deb http://repos.apiseven.com/packages/arm64/debian bullseye main" | sudo tee /etc/apt/sources.list.d/apisix.list +``` + +完成上述操作后使用以下命令安装 APISIX: + +```shell +sudo apt update +sudo apt install -y apisix=3.0.0-0 +``` + +### 管理 APISIX 服务 + +APISIX 安装完成后,你可以运行以下命令初始化 NGINX 配置文件和 etcd: + +```shell +sudo apisix init +``` + +使用以下命令启动 APISIX: + +```shell +sudo apisix start +``` + +:::tip + +你可以运行 `apisix help` 命令,通过查看返回结果,获取其他操作的命令及描述。 + +::: + + + -如果你想要使用源码构建 APISIX,请参考[源码安装 APISIX](./building-apisix.md)。 +如果你想要使用源码构建 APISIX,请参考 [源码安装 APISIX](./building-apisix.md)。 @@ -310,4 +337,4 @@ systemctl stop apisix 如果你是通过其他方法安装的 APISIX,可以参考[配置文件模板](https://github.com/api7/apisix-build-tools/blob/master/usr/lib/systemd/system/apisix.service)进行修改,并将其添加在 `/usr/lib/systemd/system/apisix.service` 路径下。 -如需了解 APISIX 后续使用,请参考[入门指南](./getting-started.md)获取更多信息。 +如需了解 APISIX 后续使用,请参考[入门指南](./getting-started/README.md)获取更多信息。 diff --git a/docs/zh/latest/plugin-develop.md b/docs/zh/latest/plugin-develop.md index 01327903bdb8..383a05d4d55d 100644 --- a/docs/zh/latest/plugin-develop.md +++ b/docs/zh/latest/plugin-develop.md @@ -239,7 +239,7 @@ local _M = { } ``` -你在创建 [Consumer](https://github.com/apache/apisix/blob/master/docs/zh/latest/admin-api.md#consumer) 时会用到它。 +你在创建 [Consumer](./admin-api.md#consumer) 时会用到它。 为了检验这个配置,这个插件使用了如下的 schema: diff --git a/docs/zh/latest/plugins/api-breaker.md b/docs/zh/latest/plugins/api-breaker.md index 9f522d8e25f0..ff46dfff94a6 100644 --- a/docs/zh/latest/plugins/api-breaker.md +++ b/docs/zh/latest/plugins/api-breaker.md @@ -108,7 +108,7 @@ HTTP/1.1 502 Bad Gateway ``` -## 禁用插件 +## 删除插件 当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/authz-casbin.md b/docs/zh/latest/plugins/authz-casbin.md index 8908456bf0e5..d9fa29305d48 100644 --- a/docs/zh/latest/plugins/authz-casbin.md +++ b/docs/zh/latest/plugins/authz-casbin.md @@ -241,7 +241,7 @@ HTTP/1.1 403 Forbidden curl -i http://127.0.0.1:9080/res -H 'user: alice' -X GET ``` -## 禁用插件 +## 删除插件 当你需要禁用 `authz-casbin` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/authz-casdoor.md b/docs/zh/latest/plugins/authz-casdoor.md index 8a47ff2a5b20..6cc5551f0696 100644 --- a/docs/zh/latest/plugins/authz-casdoor.md +++ b/docs/zh/latest/plugins/authz-casdoor.md @@ -45,7 +45,7 @@ description: 本篇文档介绍了 Apache APISIX auth-casdoor 插件的相关信 :::info IMPORTANT -指定 `endpoint_addr` 和 `callback_url` 属性时不要以 “/” 来结尾。 +指定 `endpoint_addr` 和 `callback_url` 属性时不要以“/”来结尾。 `callback_url` 必须是路由的 URI。具体细节可查看下方示例内容,了解相关配置。 @@ -85,7 +85,7 @@ curl "http://127.0.0.1:9180/apisix/admin/routes/1" -H "X-API-KEY: edd1c9f034335f 上述操作完成后,用户就会被重定向到目标 URL。 -## 禁用插件 +## 删除插件 当需要禁用 `authz-casdoor` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/authz-keycloak.md b/docs/zh/latest/plugins/authz-keycloak.md index f9918fe4e7ce..18979ad48f13 100644 --- a/docs/zh/latest/plugins/authz-keycloak.md +++ b/docs/zh/latest/plugins/authz-keycloak.md @@ -44,7 +44,7 @@ description: 本文介绍了关于 Apache APISIX `authz-keycloak` 插件的基 | 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | |----------------------------------------------|---------------|-------|-----------------------------------------------|--------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| discovery | string | 否 | | https://host.domain/auth/realms/foo/.well-known/uma2-configuration | Keycloak 授权服务的 [discovery document](https://www.keycloak.org/docs/14.0/authorization_services/#_service_authorization_api) 的 URL。 | +| discovery | string | 否 | | https://host.domain/auth/realms/foo/.well-known/uma2-configuration | Keycloak 授权服务的 [discovery document](https://www.keycloak.org/docs/latest/authorization_services/index.html) 的 URL。 | | token_endpoint | string | 否 | | https://host.domain/auth/realms/foo/protocol/openid-connect/token | 接受 OAuth2 兼容 token 的接口,需要支持 `urn:ietf:params:oauth:grant-type:uma-ticket` 授权类型。 | | resource_registration_endpoint | string | 否 | | https://host.domain/auth/realms/foo/authz/protection/resource_set | 符合 UMA 的资源注册端点。如果提供,则覆盖发现中的值。 | | client_id | string | 是 | | | 客户端正在寻求访问的资源服务器的标识符。 | @@ -55,7 +55,7 @@ description: 本文介绍了关于 Apache APISIX `authz-keycloak` 插件的基 | lazy_load_paths | boolean | 否 | false | [true, false] | 当设置为 true 时,使用资源注册端点而不是静态权限将请求 URI 动态解析为资源。 | | http_method_as_scope | boolean | 否 | false | [true, false] | 设置为 true 时,将 HTTP 请求类型映射到同名范围并添加到所有请求的权限。 | | timeout | integer | 否 | 3000 | [1000, ...] | 与 Identity Server 的 HTTP 连接超时(毫秒)。 | -| access_token_expires_in | integer | 否 | 300 | [1, ...] | 访问令牌的有效期。 token. | +| access_token_expires_in | integer | 否 | 300 | [1, ...] | 访问令牌的有效期。token. | | access_token_expires_leeway | integer | 否 | 0 | [0, ...] | access_token 更新的到期余地。设置后,令牌将在到期前几秒更新 access_token_expires_leeway。这避免了 access_token 在到达 OAuth 资源服务器时刚刚过期的情况。 | | refresh_token_expires_in | integer | 否 | 3600 | [1, ...] | 刷新令牌的失效时间。 | | refresh_token_expires_leeway | integer | 否 | 0 | [0, ...] | refresh_token 更新的到期余地。设置后,令牌将在到期前几秒刷新 refresh_token_expires_leeway。这样可以避免在到达 OAuth 资源服务器时 refresh_token 刚刚过期的错误。 | @@ -173,7 +173,7 @@ curl http://127.0.0.1:9080/get \ -H 'Authorization: Bearer {JWT Token}' ``` -## 禁用插件 +## 删除插件 当你需要禁用 `authz-keycloak` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/aws-lambda.md b/docs/zh/latest/plugins/aws-lambda.md index 40ae728113a3..5163b05a9fb3 100644 --- a/docs/zh/latest/plugins/aws-lambda.md +++ b/docs/zh/latest/plugins/aws-lambda.md @@ -42,7 +42,7 @@ description: 本文介绍了关于 Apache APISIX aws-lambda 插件的基本信 | function_uri | string | 是 | | | 触发 lambda serverless 函数的 AWS API Gateway 端点。 | | authorization | object | 否 | | | 访问云函数的授权凭证。 | | authorization.apikey | string | 否 | | | 生成的 API 密钥,用于授权对 AWS Gateway 端点的请求。 | -| authorization.iam | object | 否 | | | 用于通过 AWS v4 请求签名执行的基于 AWS IAM 角色的授权。 请参考 [IAM 授权方案](#iam-授权方案)。 | +| authorization.iam | object | 否 | | | 用于通过 AWS v4 请求签名执行的基于 AWS IAM 角色的授权。请参考 [IAM 授权方案](#iam-授权方案)。 | | timeout | integer | 否 | 3000 | [100,...] | 代理请求超时(以毫秒为单位)。 | | ssl_verify | boolean | 否 | true | true/false | 当设置为 `true` 时执行 SSL 验证。 | | keepalive | boolean | 否 | true | true/false | 当设置为 `true` 时,保持连接的活动状态以便重复使用。 | @@ -199,9 +199,9 @@ Content-Type: application/json "Hello, APISIX!" ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/azure-functions.md b/docs/zh/latest/plugins/azure-functions.md index c2eb94a893b4..99b4439eb649 100644 --- a/docs/zh/latest/plugins/azure-functions.md +++ b/docs/zh/latest/plugins/azure-functions.md @@ -185,9 +185,9 @@ Content-Type: text/plain; charset=utf-8 Hello, APISIX ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/basic-auth.md b/docs/zh/latest/plugins/basic-auth.md index c094d60d0ecb..eaded4128017 100644 --- a/docs/zh/latest/plugins/basic-auth.md +++ b/docs/zh/latest/plugins/basic-auth.md @@ -129,7 +129,7 @@ HTTP/1.1 401 Unauthorized {"message":"Invalid user authorization"} ``` -## 禁用插件 +## 删除插件 当你需要禁用 `basic-auth` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/batch-requests.md b/docs/zh/latest/plugins/batch-requests.md index 8ae484d05f6e..58a4b37c2760 100644 --- a/docs/zh/latest/plugins/batch-requests.md +++ b/docs/zh/latest/plugins/batch-requests.md @@ -29,15 +29,17 @@ description: 本文介绍了关于 Apache APISIX `batch-request` 插件的基本 ## 描述 -`batch-requests` 插件可以一次接受多个请求并以 [HTTP pipeline](https://en.wikipedia.org/wiki/HTTP_pipelining) 的方式在网关发起多个 HTTP 请求,合并结果后再返回客户端。 +在启用 `batch-requests` 插件后,用户可以通过将多个请求组装成一个请求的形式,把请求发送给网关,网关会从请求体中解析出对应的请求,再分别封装成独立的请求,以 [HTTP pipeline](https://en.wikipedia.org/wiki/HTTP_pipelining) 的方式代替用户向网关自身再发起多个 HTTP 请求,经历路由匹配,转发到对应上游等多个阶段,合并结果后再返回客户端。 + +![batch-request](https://static.apiseven.com/uploads/2023/06/27/ATzEuOn4_batch-request.png) 在客户端需要访问多个 API 的情况下,这将显著提高性能。 :::note -外部批处理请求的 HTTP 请求头(除了以 `Content-` 开始的请求头,例如:`Content-Type`)适用于**批处理**中的每个请求。 +用户原始请求中的请求头(除了以 `Content-` 开始的请求头,例如:`Content-Type`)将被赋给 HTTP pipeline 中的每个请求,因此对于网关来说,这些以 HTTP pipeline 方式发送给自身的请求与用户直接发起的外部请求没有什么不同,只能访问已经配置好的路由,并将经历完整的鉴权过程,因此不存在安全问题。 -如果在外部请求和单个调用中都指定了相同的 HTTP 请求头,则单个调用的请求头优先。 +如果原始请求的请求头与插件中配置的请求头冲突,则以插件中配置的请求头优先(配置文件中指定的 real_ip_header 除外)。 ::: @@ -67,7 +69,7 @@ plugins: ## 配置插件 -默认情况下,可以发送到 `/apisix/batch-requests` 的最大请求体不能大于 1 MiB。 你可以通过 `apisix/admin/plugin_metadata/batch-requests` 更改插件的此配置: +默认情况下,可以发送到 `/apisix/batch-requests` 的最大请求体不能大于 1 MiB。你可以通过 `apisix/admin/plugin_metadata/batch-requests` 更改插件的此配置: ```shell curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/batch-requests \ @@ -212,7 +214,7 @@ curl --location --request POST 'http://127.0.0.1:9080/apisix/batch-requests' \ ] ``` -## 禁用插件 +## 删除插件 如果你想禁用插件,可以将 `batch-requests` 从配置文件中的插件列表删除,重新加载 APISIX 后即可生效。 diff --git a/docs/zh/latest/plugins/chaitin-waf.md b/docs/zh/latest/plugins/chaitin-waf.md new file mode 100644 index 000000000000..8d8331caac1d --- /dev/null +++ b/docs/zh/latest/plugins/chaitin-waf.md @@ -0,0 +1,253 @@ +--- +title: chaitin-waf +keywords: + - Apache APISIX + - API 网关 + - Plugin + - WAF +description: 本文介绍了关于 Apache APISIX `chaitin-waf` 插件的基本信息及使用方法。 +--- + + + +## 描述 + +在启用 `chaitin-waf` 插件后,流量将被转发给长亭 WAF 服务,用以检测和防止各种 Web 应用程序攻击,以保护应用程序和用户数据的安全。 + +## 响应头 + +根据插件配置,可以选择是否附加额外的响应头。 + +响应头的信息如下: + +- **X-APISIX-CHAITIN-WAF**:APISIX 是否将请求转发给 WAF 服务器。 + - yes:转发 + - no:不转发 + - unhealthy:符合匹配条件,但没有可用的 WAF 服务器 + - err:插件执行过程中出错。此时会附带 **X-APISIX-CHAITIN-WAF-ERROR** 请求头 + - waf-err:与 WAF 服务器交互时出错。此时会附带 **X-APISIX-CHAITIN-WAF-ERROR** 请求头 + - timeout:与 WAF 服务器的交互超时 +- **X-APISIX-CHAITIN-WAF-ERROR**:调试用响应头。APISIX 与 WAF 交互时的错误信息。 +- **X-APISIX-CHAITIN-WAF-TIME**:APISIX 与 WAF 交互所耗费的时间,单位是毫秒。 +- **X-APISIX-CHAITIN-WAF-STATUS**:WAF 服务器返回给 APISIX 的状态码。 +- **X-APISIX-CHAITIN-WAF-ACTION**:WAF 服务器返回给 APISIX 的处理结果。 + - pass:请求合法 + - reject:请求被 WAF 服务器拒绝 +- **X-APISIX-CHAITIN-WAF-SERVER**:调试用响应头。所使用的 WAF 服务器。 + +## 插件元数据 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|--------------------------|---------------|-----|-------|--------------------------------------------| +| nodes | array(object) | 必选 | | 长亭 WAF 的地址列表。 | +| nodes[0].host | string | 必选 | | 长亭 WAF 的地址,支持 IPV4、IPV6、Unix Socket 等配置方式。 | +| nodes[0].port | string | 可选 | 80 | 长亭 WAF 的端口。 | +| config | object | 否 | | 长亭 WAF 服务的配置参数值。当路由没有配置时将使用这里所配置的参数。 | +| config.connect_timeout | integer | 否 | 1000 | connect timeout, 毫秒 | +| config.send_timeout | integer | 否 | 1000 | send timeout, 毫秒 | +| config.read_timeout | integer | 否 | 1000 | read timeout, 毫秒 | +| config.req_body_size | integer | 否 | 1024 | 请求体大小,单位为 KB | +| config.keepalive_size | integer | 否 | 256 | 长亭 WAF 服务的最大并发空闲连接数 | +| config.keepalive_timeout | integer | 否 | 60000 | 空闲链接超时,毫秒 | + +一个典型的示例配置如下: + +```bash +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/chaitin-waf -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "nodes":[ + { + "host": "unix:/path/to/safeline/resources/detector/snserver.sock", + "port": 8000 + } + ] +}' +``` + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|--------------------------|---------------|-----|-------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| match | array[object] | 否 | | 匹配规则列表,默认为空且规则将被无条件执行。 | +| match.vars | array[array] | 否 | | 由一个或多个 `{var, operator, val}` 元素组成的列表,例如:`{"arg_name", "==", "json"}`,表示当前请求参数 `name` 是 `json`。这里的 `var` 与 NGINX 内部自身变量命名是保持一致,所以也可以使用 `request_uri`、`host` 等;对于已支持的运算符,具体用法请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list) 的 `operator-list` 部分。 | +| append_waf_resp_header | bool | 否 | true | 是否添加响应头 | +| append_waf_debug_header | bool | 否 | false | 是否添加调试用响应头,`add_header` 为 `true` 时才生效 | +| config | object | 否 | | 长亭 WAF 服务的配置参数值。当路由没有配置时将使用元数据里所配置的参数。 | +| config.connect_timeout | integer | 否 | | connect timeout, 毫秒 | +| config.send_timeout | integer | 否 | | send timeout, 毫秒 | +| config.read_timeout | integer | 否 | | read timeout, 毫秒 | +| config.req_body_size | integer | 否 | | 请求体大小,单位为 KB | +| config.keepalive_size | integer | 否 | | 长亭 WAF 服务的最大并发空闲连接数 | +| config.keepalive_timeout | integer | 否 | | 空闲链接超时,毫秒 | + +一个典型的示例配置如下,这里使用 `httpbun.org` 作为示例后端,可以按需替换: + +```bash +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/*", + "plugins": { + "chaitin-waf": { + "match": [ + { + "vars": [ + ["http_waf","==","true"] + ] + } + ] + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbun.org:80": 1 + } + } +}' +``` + +## 测试插件 + +以上述的示例配置为例进行测试。 + +不满足匹配条件时,请求可以正常触达: + +```bash +curl -H "Host: httpbun.org" http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 408 +Connection: keep-alive +X-APISIX-CHAITIN-WAF: no +Date: Wed, 19 Jul 2023 09:30:42 GMT +X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 +Server: APISIX/3.3.0 + +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Connection": "close", + "Host": "httpbun.org", + "User-Agent": "curl/8.1.2", + "X-Forwarded-For": "127.0.0.1", + "X-Forwarded-Host": "httpbun.org", + "X-Forwarded-Port": "9080", + "X-Forwarded-Proto": "http", + "X-Real-Ip": "127.0.0.1" + }, + "method": "GET", + "origin": "127.0.0.1, 122.231.76.178", + "url": "http://httpbun.org/get" +} +``` + +面对潜在的注入请求也原样转发并遇到 404 错误: + +```bash +curl -H "Host: httpbun.org" http://127.0.0.1:9080/getid=1%20AND%201=1 -i + +HTTP/1.1 404 Not Found +Content-Type: text/plain; charset=utf-8 +Content-Length: 19 +Connection: keep-alive +X-APISIX-CHAITIN-WAF: no +Date: Wed, 19 Jul 2023 09:30:28 GMT +X-Content-Type-Options: nosniff +X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 +Server: APISIX/3.3.0 + +404 page not found +``` + +当满足匹配条件时,正常请求依然可以触达上游: + +```bash +curl -H "Host: httpbun.org" -H "waf: true" http://127.0.0.1:9080/get -i + +HTTP/1.1 200 OK +Content-Type: application/json +Content-Length: 427 +Connection: keep-alive +X-APISIX-CHAITIN-WAF-TIME: 2 +X-APISIX-CHAITIN-WAF-STATUS: 200 +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-ACTION: pass +Date: Wed, 19 Jul 2023 09:29:58 GMT +X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596 +Server: APISIX/3.3.0 + +{ + "args": {}, + "headers": { + "Accept": "*/*", + "Connection": "close", + "Host": "httpbun.org", + "User-Agent": "curl/8.1.2", + "Waf": "true", + "X-Forwarded-For": "127.0.0.1", + "X-Forwarded-Host": "httpbun.org", + "X-Forwarded-Port": "9080", + "X-Forwarded-Proto": "http", + "X-Real-Ip": "127.0.0.1" + }, + "method": "GET", + "origin": "127.0.0.1, 122.231.76.178", + "url": "http://httpbun.org/get" +} +``` + +而潜在的攻击请求将会被拦截并返回 403 错误: + +```bash +curl -H "Host: httpbun.org" -H "waf: true" http://127.0.0.1:9080/getid=1%20AND%201=1 -i + +HTTP/1.1 403 Forbidden +Date: Wed, 19 Jul 2023 09:29:06 GMT +Content-Type: text/plain; charset=utf-8 +Transfer-Encoding: chunked +Connection: keep-alive +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-TIME: 2 +X-APISIX-CHAITIN-WAF-ACTION: reject +X-APISIX-CHAITIN-WAF-STATUS: 403 +Server: APISIX/3.3.0 +Set-Cookie: sl-session=UdywdGL+uGS7q8xMfnJlbQ==; Domain=; Path=/; Max-Age=86400 + +{"code": 403, "success":false, "message": "blocked by Chaitin SafeLine Web Application Firewall", "event_id": "51a268653f2c4189bfa3ec66afbcb26d"} +``` + +## 删除插件 + +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: + +```bash +$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbun.org:80": 1 + } + } +}' +``` diff --git a/docs/zh/latest/plugins/clickhouse-logger.md b/docs/zh/latest/plugins/clickhouse-logger.md index f78cf936cb14..09b07f0b63b2 100644 --- a/docs/zh/latest/plugins/clickhouse-logger.md +++ b/docs/zh/latest/plugins/clickhouse-logger.md @@ -40,7 +40,7 @@ description: 本文介绍了 API 网关 Apache APISIX 如何使用 clickhouse-lo | database | string | 是 | | | 使用的数据库。 | | logtable | string | 是 | | | 写入的表名。 | | user | string | 是 | | | ClickHouse 的用户。 | -| password | string | 是 | | | ClickHouse 的密码 。 | +| password | string | 是 | | | ClickHouse 的密码。 | | timeout | integer | 否 | 3 | [1,...] | 发送请求后保持连接活动的时间。 | | name | string | 否 | "clickhouse logger" | | 标识 logger 的唯一标识符。 | | ssl_verify | boolean | 否 | true | [true,false] | 当设置为 `true` 时,验证证书。 | @@ -74,25 +74,16 @@ curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/clickhouse-logger \ }' ``` -首先,你需要在 ClickHouse 数据库中创建一个表来存储日志: - -```sql -CREATE TABLE default.test ( - `host` String, - `client_ip` String, - `route_id` String, - `service_id` String, - `@timestamp` String, - PRIMARY KEY(`@timestamp`) -) ENGINE = MergeTree() +您可以使用 Clickhouse docker 镜像来创建一个容器,如下所示: + +```shell +docker run -d -p 8123:8123 -p 9000:9000 -p 9009:9009 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server ``` -在 ClickHouse 中执行`select * from default.test;`,将得到类似下面的数据: +然后在您的 ClickHouse 数据库中创建一个表来存储日志。 -``` -┌─host──────┬─client_ip─┬─route_id─┬─@timestamp────────────────┐ -│ 127.0.0.1 │ 127.0.0.1 │ 1 │ 2022-01-17T10:03:10+08:00 │ -└───────────┴───────────┴──────────┴───────────────────────────┘ +```shell +echo "CREATE TABLE default.test (\`host\` String, \`client_ip\` String, \`route_id\` String, \`service_id\` String, \`@timestamp\` String, PRIMARY KEY(\`@timestamp\`)) ENGINE = MergeTree()" | curl 'http://localhost:8123/' ``` ## 启用插件 @@ -136,9 +127,16 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ curl -i http://127.0.0.1:9080/hello ``` -## 禁用插件 +现在,如果您检查表中的行,您将获得以下输出: + +```shell +curl 'http://localhost:8123/?query=select%20*%20from%20default.test' +127.0.0.1 127.0.0.1 1 2023-05-08T19:15:53+05:30 +``` + +## 删除插件 -当你需要禁用该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/client-control.md b/docs/zh/latest/plugins/client-control.md index a86cdb95dbfd..581d6c6877ff 100644 --- a/docs/zh/latest/plugins/client-control.md +++ b/docs/zh/latest/plugins/client-control.md @@ -87,9 +87,9 @@ HTTP/1.1 413 Request Entity Too Large ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/consumer-restriction.md b/docs/zh/latest/plugins/consumer-restriction.md index 6a4b5f841e7d..55a43234de34 100644 --- a/docs/zh/latest/plugins/consumer-restriction.md +++ b/docs/zh/latest/plugins/consumer-restriction.md @@ -32,14 +32,16 @@ description: Consumer Restriction 插件允许用户根据 Route、Service 或 C ## 属性 -| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | -| --------- | ------------- | ------ | -----------------| -------------------------|------------------------| -| type | string | 否 | consumer_name | ["consumer_name", "consumer_group_id", "service_id", "route_id"] | 支持设置访问限制的对象类型。 | -| whitelist | array[string] | 是 | | | 加入白名单的对象,优先级高于 `allowed_by_methods`。 | -| blacklist | array[string] | 是 | | | 加入黑名单的对象,优先级高于 `whitelist`。 | -| rejected_code | integer | 否 | 403 | [200,...] | 当请求被拒绝时,返回的 HTTP 状态码。 | -| rejected_msg | string | 否 | | | 当请求被拒绝时,返回的错误信息。 | -| allowed_by_methods | array[object] | 否 | | ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS", "CONNECT", "TRACE", "PURGE"] | 为 Consumer 设置的允许的 HTTP 方法列表。 | +| 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | +| -------------------------- | ------------- | ------ | ------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | +| type | string | 否 | consumer_name | ["consumer_name", "consumer_group_id", "service_id", "route_id"] | 支持设置访问限制的对象类型。 | +| whitelist | array[string] | 是 | | | 加入白名单的对象,优先级高于`allowed_by_methods`。 | +| blacklist | array[string] | 是 | | | 加入黑名单的对象,优先级高于`whitelist`。 | +| rejected_code | integer | 否 | 403 | [200,...] | 当请求被拒绝时,返回的 HTTP 状态码。 | +| rejected_msg | string | 否 | | | 当请求被拒绝时,返回的错误信息。 | +| allowed_by_methods | array[object] | 否 | | | 一组为 Consumer 设置允许的配置,包括用户名和允许的 HTTP 方法列表。 | +| allowed_by_methods.user | string | 否 | | | 为 Consumer 设置的用户名。 | +| allowed_by_methods.methods | array[string] | 否 | | ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS", "CONNECT", "TRACE", "PURGE"] | 为 Consumer 设置的允许的 HTTP 方法列表。 | :::note @@ -320,9 +322,9 @@ HTTP/1.1 403 Forbidden {"message":"The service_id is forbidden."} ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/zh/latest/plugins/cors.md b/docs/zh/latest/plugins/cors.md index 01c9791564eb..e9c60d465a38 100644 --- a/docs/zh/latest/plugins/cors.md +++ b/docs/zh/latest/plugins/cors.md @@ -37,7 +37,7 @@ description: 本文介绍了 Apache APISIX cors 插件的基本信息及使用 | allow_origins | string | 否 | "*" | 允许跨域访问的 Origin,格式为 `scheme://host:port`,示例如 `https://somedomain.com:8081`。如果你有多个 Origin,请使用 `,` 分隔。当 `allow_credential` 为 `false` 时,可以使用 `*` 来表示允许所有 Origin 通过。你也可以在启用了 `allow_credential` 后使用 `**` 强制允许所有 Origin 均通过,但请注意这样存在安全隐患。 | | allow_methods | string | 否 | "*" | 允许跨域访问的 Method,比如:`GET`,`POST` 等。如果你有多个 Method,请使用 `,` 分割。当 `allow_credential` 为 `false` 时,可以使用 `*` 来表示允许所有 Method 通过。你也可以在启用了 `allow_credential` 后使用 `**` 强制允许所有 Method 都通过,但请注意这样存在安全隐患。 | | allow_headers | string | 否 | "*" | 允许跨域访问时请求方携带哪些非 `CORS 规范` 以外的 Header。如果你有多个 Header,请使用 `,` 分割。当 `allow_credential` 为 `false` 时,可以使用 `*` 来表示允许所有 Header 通过。你也可以在启用了 `allow_credential` 后使用 `**` 强制允许所有 Header 都通过,但请注意这样存在安全隐患。 | -| expose_headers | string | 否 | "*" | 允许跨域访问时响应方携带哪些非 `CORS 规范` 以外的 Header。如果你有多个 Header,请使用 `,` 分割。当 `allow_credential` 为 `false` 时,可以使用 `*` 来表示允许任意 Header 。你也可以在启用了 `allow_credential` 后使用 `**` 强制允许任意 Header,但请注意这样存在安全隐患。 | +| expose_headers | string | 否 | "*" | 允许跨域访问时响应方携带哪些非 `CORS 规范` 以外的 Header。如果你有多个 Header,请使用 `,` 分割。当 `allow_credential` 为 `false` 时,可以使用 `*` 来表示允许任意 Header。你也可以在启用了 `allow_credential` 后使用 `**` 强制允许任意 Header,但请注意这样存在安全隐患。 | | max_age | integer | 否 | 5 | 浏览器缓存 CORS 结果的最大时间,单位为秒。在这个时间范围内,浏览器会复用上一次的检查结果,`-1` 表示不缓存。请注意各个浏览器允许的最大时间不同,详情请参考 [Access-Control-Max-Age - MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Max-Age#directives)。 | | allow_credential | boolean | 否 | false | 是否允许跨域访问的请求方携带凭据(如 Cookie 等)。根据 CORS 规范,如果设置该选项为 `true`,那么将不能在其他属性中使用 `*`。 | | allow_origins_by_regex | array | 否 | nil | 使用正则表达式数组来匹配允许跨域访问的 Origin,如 `[".*\.test.com"]` 可以匹配任何 `test.com` 的子域名 `*`。如果 `allow_origins_by_regex` 属性已经指定,则会忽略 `allow_origins` 属性。 | @@ -99,7 +99,7 @@ curl http://127.0.0.1:9080/hello -v ... ``` -## 禁用插件 +## 删除插件 当你需要禁用 `cors` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/csrf.md b/docs/zh/latest/plugins/csrf.md index 7476babfe3e0..555b0038ba39 100644 --- a/docs/zh/latest/plugins/csrf.md +++ b/docs/zh/latest/plugins/csrf.md @@ -127,9 +127,9 @@ curl -i http://127.0.0.1:9080/hello -X POST -H 'apisix-csrf-token: eyJyYW5kb20iO HTTP/1.1 200 OK ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/zh/latest/plugins/datadog.md b/docs/zh/latest/plugins/datadog.md index b2e0f6f165d2..76219dad55ff 100644 --- a/docs/zh/latest/plugins/datadog.md +++ b/docs/zh/latest/plugins/datadog.md @@ -106,7 +106,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 现在,任何对 uri `/hello` 的请求都会生成上述指标,并推送到 Datadog Agent 的 DogStatsD 服务器。 -### 禁用插件 +### 删除插件 删除插件配置中相应的 JSON 配置以禁用 `datadog`。 APISIX 插件是支持热加载的,所以不用重新启动 APISIX,配置就能生效。 diff --git a/docs/zh/latest/plugins/dubbo-proxy.md b/docs/zh/latest/plugins/dubbo-proxy.md index 4bd2597fc23b..f8900c7597a2 100644 --- a/docs/zh/latest/plugins/dubbo-proxy.md +++ b/docs/zh/latest/plugins/dubbo-proxy.md @@ -27,7 +27,7 @@ title: dubbo-proxy ## 要求 -如果你正在使用 `OpenResty`, 你需要编译它来支持 `dubbo`, 参考 [APISIX-Base](../FAQ#如何构建-apisix-base-环境)。 +如果你正在使用 `OpenResty`, 你需要编译它来支持 `dubbo`, 参考 [APISIX-Base](../FAQ.md#如何构建-apisix-base-环境)。 ## 运行时属性 @@ -114,7 +114,7 @@ header2: value2 blahblah # "body" will be the body ``` -## 禁用插件 +## 删除插件 当你想在某个路由或服务中禁用 `dubbo-proxy` 插件,非常简单,你可以直接删除插件配置中的 `json` 配置,不需要重启服务就能立即生效: @@ -132,7 +132,7 @@ $ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335 }' ``` -现在 `dubbo-proxy` 插件就已经被禁用了。 此方法同样适用于其他插件。 +现在 `dubbo-proxy` 插件就已经被禁用了。此方法同样适用于其他插件。 如果你想彻底禁用 `dubbo-proxy` 插件, 你需要在 `config.yaml` 中注释掉以下内容: diff --git a/docs/zh/latest/plugins/echo.md b/docs/zh/latest/plugins/echo.md index 020c1553df3c..4ce489f09ccc 100644 --- a/docs/zh/latest/plugins/echo.md +++ b/docs/zh/latest/plugins/echo.md @@ -92,7 +92,7 @@ HTTP/1.1 200 OK before the body modification hello world ``` -## 禁用插件 +## 删除插件 当你需要禁用 `echo` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/elasticsearch-logger.md b/docs/zh/latest/plugins/elasticsearch-logger.md index a65ec77059af..d81e41be9c8a 100644 --- a/docs/zh/latest/plugins/elasticsearch-logger.md +++ b/docs/zh/latest/plugins/elasticsearch-logger.md @@ -32,7 +32,7 @@ description: 本文介绍了 API 网关 Apache APISIX 的 elasticsearch-logger `elasticsearch-logger` 插件用于将 `Apache APISIX` 的请求日志转发到 `Elasticsearch` 中进行分析和存储。 -启用该插件后 APISIX 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 Elaticsearch 中。更多信息,请参考 [Batch-Processor](./batch-processor.md)。 +启用该插件后 APISIX 将在 `Log Phase` 获取请求上下文信息并序列化为 [Bulk 格式](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html#docs-bulk) 后提交到批处理队列中,当触发批处理队列每批次最大处理容量或刷新缓冲区的最大时间时会将队列中的数据提交到 Elaticsearch 中。更多信息,请参考 [Batch-Processor](../batch-processor.md)。 ## 属性 @@ -192,7 +192,7 @@ curl -X GET "http://127.0.0.1:9200/services/_search" | jq . | 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | | ---------- | ------ | ------ | ------------------------------------------------------------ | ------ | ------------------------------------------------------------ | -| log_format | object | 可选 | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](https://github.com/apache/apisix/blob/master/docs/en/latest/apisix-variable.md) 或 [Nginx 内置变量](http://nginx.org/en/docs/varindex.html)。请注意,**该设置是全局生效的**,因此在指定 log_format 后,将对所有绑定 elasticsearch-logger 的 Route 或 Service 生效。 | +| log_format | object | 可选 | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [Nginx 内置变量](http://nginx.org/en/docs/varindex.html)。请注意,**该设置是全局生效的**,因此在指定 log_format 后,将对所有绑定 elasticsearch-logger 的 Route 或 Service 生效。 | ### 设置日志格式示例 @@ -255,16 +255,16 @@ curl -X GET "http://127.0.0.1:9200/services/_search" | jq . } ``` -### 禁用插件元数据 +### 删除插件元数据 ```shell curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/elasticsearch-logger \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/error-log-logger.md b/docs/zh/latest/plugins/error-log-logger.md index 8ca4ee771bfc..cc3a34b41b79 100644 --- a/docs/zh/latest/plugins/error-log-logger.md +++ b/docs/zh/latest/plugins/error-log-logger.md @@ -162,7 +162,7 @@ curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/error-log-logger \ }' ``` -## 禁用插件 +## 删除插件 当你不再需要该插件时,只需要在 `./conf/config.yaml` 中删除或注释该插件即可。 diff --git a/docs/zh/latest/plugins/ext-plugin-post-resp.md b/docs/zh/latest/plugins/ext-plugin-post-resp.md index 11c4b85659cc..35fb72f7a0de 100644 --- a/docs/zh/latest/plugins/ext-plugin-post-resp.md +++ b/docs/zh/latest/plugins/ext-plugin-post-resp.md @@ -90,7 +90,7 @@ curl -i http://127.0.0.1:9080/index.html 在返回结果中可以看到刚刚配置的 Plugin Runner 已经被触发,同时 `ext-plugin-A` 插件也已经被执行。 -## 禁用插件 +## 删除插件 当你需要禁用 `ext-plugin-post-resp` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/ext-plugin-pre-req.md b/docs/zh/latest/plugins/ext-plugin-pre-req.md index c3971e85da1c..3ad41500a749 100644 --- a/docs/zh/latest/plugins/ext-plugin-pre-req.md +++ b/docs/zh/latest/plugins/ext-plugin-pre-req.md @@ -81,7 +81,7 @@ curl -i http://127.0.0.1:9080/index.html 在返回结果中可以看到刚刚配置的 Plugin Runner 已经被触发,同时 `ext-plugin-A` 插件也已经被执行。 -## 禁用插件 +## 删除插件 当你需要禁用 `ext-plugin-pre-req` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/fault-injection.md b/docs/zh/latest/plugins/fault-injection.md index 1569fca1fc44..9ef8be668ac0 100644 --- a/docs/zh/latest/plugins/fault-injection.md +++ b/docs/zh/latest/plugins/fault-injection.md @@ -38,6 +38,7 @@ description: 本文介绍了关于 Apache APISIX `fault-injection` 插件的基 | ----------------- | ------- | ---- | ---------- | -------------------------- | | abort.http_status | integer | 是 | [200, ...] | 返回给客户端的 HTTP 状态码 | | abort.body | string | 否 | | 返回给客户端的响应数据。支持使用 NGINX 变量,如 `client addr: $remote_addr\n`| +| abort.headers | object | 否 | | 返回给客户端的响应头,可以包含 NGINX 变量,如 `$remote_addr` | | abort.percentage | integer | 否 | [0, 100] | 将被中断的请求占比 | | abort.vars | array[] | 否 | | 执行故障注入的规则,当规则匹配通过后才会执行故障注。`vars` 是一个表达式的列表,来自 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list)。 | | delay.duration | number | 是 | | 延迟时间,可以指定小数 | @@ -268,7 +269,7 @@ Server: APISIX/2.2 Fault Injection! ``` -## 禁用插件 +## 删除插件 当你需要禁用 `fault-injection` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/file-logger.md b/docs/zh/latest/plugins/file-logger.md index df591df4e482..313cf9e25307 100644 --- a/docs/zh/latest/plugins/file-logger.md +++ b/docs/zh/latest/plugins/file-logger.md @@ -50,7 +50,8 @@ description: API 网关 Apache APISIX file-logger 插件可用于将日志数据 | path | string | 是 | 自定义输出文件路径。例如:`logs/file.log`。 | | log_format | object | 否 | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | | include_resp_body | boolean | 否 | 当设置为 `true` 时,生成的文件包含响应体。 | -| include_resp_body_expr | array | 否 | 当 `include_resp_body` 属性设置为 `true` 时,使用该属性并基于 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 进行过滤。 如果存在,则仅在表达式计算结果为 `true` 时记录响应。 | +| include_resp_body_expr | array | 否 | 当 `include_resp_body` 属性设置为 `true` 时,使用该属性并基于 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 进行过滤。如果存在,则仅在表达式计算结果为 `true` 时记录响应。 | +| match | array[] | 否 | 当设置了这个选项后,只有匹配规则的日志才会被记录。`match` 是一个表达式列表,具体请参考 [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list)。 | ## 插件元数据设置 @@ -124,9 +125,49 @@ hello, world 访问成功后,你可以在对应的 `logs` 目录下找到 `file.log` 文件。 -## 禁用插件 +## 过滤日志 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "file-logger": { + "path": "logs/file.log", + "match": { + { + { "arg_name","==","jack" } + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:9001": 1 + } + }, + "uri": "/hello" +}' +``` + +测试: + +```shell +curl -i http://127.0.0.1:9080/hello?name=jack +``` + +在 `logs/file.log` 中可以看到日志记录 + +```shell +curl -i http://127.0.0.1:9080/hello?name=rose +``` + +在 `logs/file.log` 中看不到日志记录 + +## 删除插件 + +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/forward-auth.md b/docs/zh/latest/plugins/forward-auth.md index 092215b93427..a7babfe4601e 100644 --- a/docs/zh/latest/plugins/forward-auth.md +++ b/docs/zh/latest/plugins/forward-auth.md @@ -47,6 +47,7 @@ description: 本文介绍了关于 Apache APISIX `forward-auth` 插件的基本 | keepalive | boolean | 否 | true | [true, false] | HTTP 长连接。 | | keepalive_timeout | integer | 否 | 60000ms | [1000, ...]ms | 长连接超时时间。 | | keepalive_pool | integer | 否 | 5 | [1, ...]ms | 长连接池大小。 | +| allow_degradation | boolean | 否 | false | | 当设置为 `true` 时,允许在身份验证服务器不可用时跳过身份验证。 | ## 数据定义 @@ -156,7 +157,7 @@ HTTP/1.1 403 Forbidden Location: http://example.com/auth ``` -## 禁用插件 +## 删除插件 当你需要禁用 `forward-auth` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/gm.md b/docs/zh/latest/plugins/gm.md index 80c2d7b1ca89..68c3d48dded7 100644 --- a/docs/zh/latest/plugins/gm.md +++ b/docs/zh/latest/plugins/gm.md @@ -78,7 +78,7 @@ apisix: ... ssl: ... - # 可按实际情况调整。错误的 cipher 会导致 “no shared cipher” 或 “no ciphers available” 报错。 + # 可按实际情况调整。错误的 cipher 会导致“no shared cipher”或“no ciphers available”报错。 ssl_ciphers: HIGH:!aNULL:!MD5 ``` @@ -186,6 +186,6 @@ New, NTLSv1.1, Cipher is ECDHE-SM2-SM4-CBC-SM3 ... ``` -## 禁用插件 +## 删除插件 如果不再使用此插件,可将 `gm` 插件从 `./conf/config.yaml` 配置文件中移除,然后重启 APISIX 或者通过插件热加载的接口触发插件的卸载。 diff --git a/docs/zh/latest/plugins/google-cloud-logging.md b/docs/zh/latest/plugins/google-cloud-logging.md index 2fc51a026e51..c720fa515dfa 100644 --- a/docs/zh/latest/plugins/google-cloud-logging.md +++ b/docs/zh/latest/plugins/google-cloud-logging.md @@ -37,6 +37,7 @@ description: API 网关 Apache APISIX 的 google-cloud-logging 插件可用于 | 名称 | 必选项 | 默认值 | 描述 | | ----------------------- | -------- | ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------- | | auth_config | 是 | | `auth_config` 和 `auth_file` 必须配置一个。 | +| auth_config.client_email | 是 | | 谷歌服务帐号的 email 参数。 | | auth_config.private_key | 是 | | 谷歌服务帐号的私钥参数。 | | auth_config.project_id | 是 | | 谷歌服务帐号的项目 ID。 | | auth_config.token_uri | 是 | https://oauth2.googleapis.com/token | 请求谷歌服务帐户的令牌的 URI。 | @@ -46,7 +47,7 @@ description: API 网关 Apache APISIX 的 google-cloud-logging 插件可用于 | ssl_verify | 否 | true | 当设置为 `true` 时,启用 `SSL` 验证。 | | resource | 否 | {"type": "global"} | 谷歌监控资源,请参考 [MonitoredResource](https://cloud.google.com/logging/docs/reference/v2/rest/v2/MonitoredResource)。 | | log_id | 否 | apisix.apache.org%2Flogs | 谷歌日志 ID,请参考 [LogEntry](https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry)。 | -| log_format | 否 | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | +| log_format | 否 |{"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"}| 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | 注意:schema 中还定义了 `encrypt_fields = {"auth_config.private_key"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 @@ -98,6 +99,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ "google-cloud-logging": { "auth_config":{ "project_id":"apisix", + "client_email":"your service account email@apisix.iam.gserviceaccount.com", "private_key":"-----BEGIN RSA PRIVATE KEY-----your private key-----END RSA PRIVATE KEY-----", "token_uri":"https://oauth2.googleapis.com/token", "scopes":[ @@ -137,6 +139,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ "google-cloud-logging": { "auth_config":{ "project_id":"apisix", + "client_email":"your service account email@apisix.iam.gserviceaccount.com", "private_key":"-----BEGIN RSA PRIVATE KEY-----your private key-----END RSA PRIVATE KEY-----" } } @@ -167,9 +170,9 @@ hello, world 访问成功后,你可以登录 [Google Cloud Logging Service](https://console.cloud.google.com/logs/viewer) 查看相关日志。 -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/grpc-transcode.md b/docs/zh/latest/plugins/grpc-transcode.md index 5d0760768d24..4c03f4cf9735 100644 --- a/docs/zh/latest/plugins/grpc-transcode.md +++ b/docs/zh/latest/plugins/grpc-transcode.md @@ -103,34 +103,14 @@ protoc --include_imports --descriptor_set_out=proto.pb proto/helloworld.proto 然后将 `proto.pb` 的内容作为 proto 的 `content` 字段提交。 -由于 proto 的内容是二进制的,我们需要使用以下 Python 脚本将其转换成 `base64`: - -```python -#!/usr/bin/env python -# coding: utf-8 - -import base64 -import sys - -# sudo pip install requests -import requests - -if len(sys.argv) <= 1: - print("bad argument") - sys.exit(1) -with open(sys.argv[1], 'rb') as f: - content = base64.b64encode(f.read()) -id = sys.argv[2] -api_key = "edd1c9f034335f136f87ad84b625c8f1" # use your API key - -reqParam = { - "content": content, -} -resp = requests.put("http://127.0.0.1:9180/apisix/admin/protos/" + id, json=reqParam, headers={ - "X-API-KEY": api_key, -}) -print(resp.status_code) -print(resp.text) +由于 proto 的内容是二进制的,我们需要使用以下 shell 命令将其转换成 `base64`: + +```shell +curl http://127.0.0.1:9180/apisix/admin/protos/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "content" : "'"$(base64 -w0 /path/to/proto.pb)"'" +}' ``` 该脚本将使用 `.pb` 文件和要创建的 `id`,将 proto 的内容转换成 `base64`,并使用转换后的内容调用 Admin API。 @@ -259,7 +239,7 @@ Trailer: grpc-message 上传 proto 文件: ```shell -curl http://127.0.0.1:9080/apisix/admin/proto/1 \ +curl http://127.0.0.1:9080/apisix/admin/protos/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "content" : "syntax = \"proto3\"; @@ -329,7 +309,7 @@ Server: APISIX web server 注意返回体中还存在未解码的字段,如果需要解码该字段,需要在上传的 proto 文件中加上该字段对应的 `message type`。 ```shell -curl http://127.0.0.1:9080/apisix/admin/proto/1 \ +curl http://127.0.0.1:9080/apisix/admin/protos/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "content" : "syntax = \"proto3\"; @@ -396,7 +376,7 @@ Server: APISIX web server {"error":{"details":[{"type":"service","message":"The server is out of service","code":1}],"message":"Out of service","code":14}} ``` -## 禁用插件 +## 删除插件 当你需要禁用 `grpc-transcode` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/grpc-web.md b/docs/zh/latest/plugins/grpc-web.md index b64b74385d7b..48077a1c2400 100644 --- a/docs/zh/latest/plugins/grpc-web.md +++ b/docs/zh/latest/plugins/grpc-web.md @@ -76,7 +76,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ ::: -## 禁用插件 +## 删除插件 当你需要禁用 `grpc-web` 插件时,可以通过如下命令删除相应的 `JSON` 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/gzip.md b/docs/zh/latest/plugins/gzip.md index 7cd485f53d59..c97072d27f17 100644 --- a/docs/zh/latest/plugins/gzip.md +++ b/docs/zh/latest/plugins/gzip.md @@ -96,7 +96,7 @@ Warning: curl to output it to your terminal anyway, or consider "--output Warning: " to save to a file. ``` -## 禁用插件 +## 删除插件 当你需要禁用 `gzip` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/hmac-auth.md b/docs/zh/latest/plugins/hmac-auth.md index de043f2f3ffd..c5a07f5f1efa 100644 --- a/docs/zh/latest/plugins/hmac-auth.md +++ b/docs/zh/latest/plugins/hmac-auth.md @@ -381,9 +381,9 @@ Accept-Ranges: bytes ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/http-logger.md b/docs/zh/latest/plugins/http-logger.md index f8f16832d2df..b9bb7674b4e3 100644 --- a/docs/zh/latest/plugins/http-logger.md +++ b/docs/zh/latest/plugins/http-logger.md @@ -43,7 +43,7 @@ description: 本文介绍了 API 网关 Apache APISIX 的 http-logger 插件。 | log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | | include_req_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,将请求体包含在日志中。如果请求体太大而无法保存在内存中,由于 NGINX 的限制,无法记录。 | | include_resp_body | boolean | 否 | false | [false, true] | 当设置为 `true` 时,包含响应体。 | -| include_resp_body_expr | array | 否 | | | 当 `include_resp_body` 属性设置为 `true` 时,使用该属性并基于 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 进行过滤。 如果存在,则仅在表达式计算结果为 `true` 时记录响应。 | +| include_resp_body_expr | array | 否 | | | 当 `include_resp_body` 属性设置为 `true` 时,使用该属性并基于 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 进行过滤。如果存在,则仅在表达式计算结果为 `true` 时记录响应。 | | concat_method | string | 否 | "json" | ["json", "new_line"] | 枚举类型: **json**:对所有待发日志使用 `json.encode` 编码。**new_line**:对每一条待发日志单独使用 `json.encode` 编码并使用 `\n` 连接起来。 | | ssl_verify | boolean | 否 | false | [false, true] | 当设置为 `true` 时验证证书。 | @@ -115,9 +115,9 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ curl -i http://127.0.0.1:9080/hello ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/ip-restriction.md b/docs/zh/latest/plugins/ip-restriction.md index e2ac77cc010e..66db9829ade5 100644 --- a/docs/zh/latest/plugins/ip-restriction.md +++ b/docs/zh/latest/plugins/ip-restriction.md @@ -139,7 +139,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f13 }' ``` -## 禁用插件 +## 删除插件 当你需要禁用 `ip-restriction` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/jwt-auth.md b/docs/zh/latest/plugins/jwt-auth.md index 78cc3d68ff6d..c06f9e59b557 100644 --- a/docs/zh/latest/plugins/jwt-auth.md +++ b/docs/zh/latest/plugins/jwt-auth.md @@ -60,7 +60,7 @@ Route 端: | cookie | string | 否 | jwt | 设置我们从哪个 cookie 获取 token,优先级低于 query。 | | hide_credentials | boolean | 否 | false | 该参数设置为 `true` 时,则不会将含有认证信息的 header\query\cookie 传递给 Upstream。| -您可以使用 [HashiCorp Vault](https://www.vaultproject.io/) 实施 `jwt-auth`,以从其[加密的 KV 引擎](https://www.vaultproject.io/docs/secrets/kv) 使用 [APISIX Secret](../terminology/secret.md) 资源。 +您可以使用 [HashiCorp Vault](https://www.vaultproject.io/) 实施 `jwt-auth`,以从其[加密的 KV 引擎](https://developer.hashicorp.com/vault/docs/secrets/kv) 使用 [APISIX Secret](../terminology/secret.md) 资源。 ## 接口 @@ -252,7 +252,7 @@ Accept-Ranges: bytes ... ``` -## 禁用插件 +## 删除插件 当你需要禁用 `jwt-auth` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/kafka-logger.md b/docs/zh/latest/plugins/kafka-logger.md index dee5510765b3..49801f67b4f3 100644 --- a/docs/zh/latest/plugins/kafka-logger.md +++ b/docs/zh/latest/plugins/kafka-logger.md @@ -45,7 +45,7 @@ description: API 网关 Apache APISIX 的 kafka-logger 插件用于将日志作 | brokers.sasl_config.password | string | 是 | | | Kafka broker 中 sasl 配置中的 password,如果 sasl_config 存在,则必须填写 | | kafka_topic | string | 是 | | | 需要推送的 topic。 | | producer_type | string | 否 | async | ["async", "sync"] | 生产者发送消息的模式。 | -| required_acks | integer | 否 | 1 | [0, 1, -1] | 生产者在确认一个请求发送完成之前需要收到的反馈信息的数量。该参数是为了保证发送请求的可靠性。该属性的配置与 Kafka `acks` 属性相同,具体配置请参考 [Apache Kafka 文档](https://kafka.apache.org/documentation/#producerconfigs_acks)。 | +| required_acks | integer | 否 | 1 | [1, -1] | 生产者在确认一个请求发送完成之前需要收到的反馈信息的数量。该参数是为了保证发送请求的可靠性。该属性的配置与 Kafka `acks` 属性相同,具体配置请参考 [Apache Kafka 文档](https://kafka.apache.org/documentation/#producerconfigs_acks)。required_acks 还不支持为 0。 | | key | string | 否 | | | 用于消息分区而分配的密钥。 | | timeout | integer | 否 | 3 | [1,...] | 发送数据的超时时间。 | | name | string | 否 | "kafka logger" | | batch processor 的唯一标识。 | @@ -214,9 +214,9 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ curl -i http://127.0.0.1:9080/hello ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/key-auth.md b/docs/zh/latest/plugins/key-auth.md index 9b4759b157be..f4a568ec8524 100644 --- a/docs/zh/latest/plugins/key-auth.md +++ b/docs/zh/latest/plugins/key-auth.md @@ -50,7 +50,7 @@ Router 端: | ----------------- | ------ | ----- | ------ |----------------------------------------------------------------------------------------------------------------------------------------------------------| | header | string | 否 | apikey | 设置我们从哪个 header 获取 key。 | | query | string | 否 | apikey | 设置我们从哪个 query string 获取 key,优先级低于 `header`。 | -| hide_credentials | bool | 否 | false | 当设置为 `false` 时将含有认证信息的 header 或 query string 传递给 Upstream。 如果为 `true` 时将删除对应的 header 或 query string,具体删除哪一个取决于是从 header 获取 key 还是从 query string 获取 key。 | +| hide_credentials | bool | 否 | false | 当设置为 `false` 时将含有认证信息的 header 或 query string 传递给 Upstream。如果为 `true` 时将删除对应的 header 或 query string,具体删除哪一个取决于是从 header 获取 key 还是从 query string 获取 key。 | ## 启用插件 @@ -151,7 +151,7 @@ HTTP/1.1 401 Unauthorized {"message":"Invalid API key in request"} ``` -## 禁用插件 +## 删除插件 当你需要禁用 `key-auth` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/ldap-auth.md b/docs/zh/latest/plugins/ldap-auth.md index 8a7d049e4e01..2a4af09bed53 100644 --- a/docs/zh/latest/plugins/ldap-auth.md +++ b/docs/zh/latest/plugins/ldap-auth.md @@ -137,7 +137,7 @@ HTTP/1.1 401 Unauthorized {"message":"Invalid user authorization"} ``` -## 禁用插件 +## 删除插件 当你需要禁用 `ldap-auth` 插件时,可以通过以下命令删除相应的 JSON 配置。APISIX 将自动重新加载,无需重启服务: diff --git a/docs/zh/latest/plugins/limit-conn.md b/docs/zh/latest/plugins/limit-conn.md index c51ba2eb1560..794835e42eb9 100644 --- a/docs/zh/latest/plugins/limit-conn.md +++ b/docs/zh/latest/plugins/limit-conn.md @@ -122,9 +122,9 @@ curl -i http://127.0.0.1:9080/index.html?sleep=20 ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/limit-count.md b/docs/zh/latest/plugins/limit-count.md index b70f4631a913..dbacdfa00a20 100644 --- a/docs/zh/latest/plugins/limit-count.md +++ b/docs/zh/latest/plugins/limit-count.md @@ -44,15 +44,18 @@ description: 本文介绍了 Apache APISIX limit-count 插件的相关操作, | policy | string | 否 | "local" | ["local", "redis", "redis-cluster"] | 用于检索和增加限制计数的策略。当设置为 `local` 时,计数器被以内存方式保存在节点本地;当设置为 `redis` 时,计数器保存在 Redis 服务节点上,从而可以跨节点共享结果,通常用它来完成全局限速;当设置为 `redis-cluster` 时,使用 Redis 集群而不是单个实例。| | allow_degradation | boolean | 否 | false | | 当插件功能临时不可用时(例如 Redis 超时),当设置为 `true` 时,则表示可以允许插件降级并进行继续请求的操作。 | | show_limit_quota_header | boolean | 否 | true | | 当设置为 `true` 时,在响应头中显示 `X-RateLimit-Limit`(限制的总请求数)和 `X-RateLimit-Remaining`(剩余还可以发送的请求数)字段。 | -| group | string | 否 | | 非空 | 配置相同 group 的路由将共享相同的限流计数器。 | +| group | string | 否 | | 非空 | 配置相同 group 的路由将共享相同的限流计数器。请勿使用先前使用过的值进行配置,插件将报错。 | | redis_host | string | 否 | | | 当使用 `redis` 限速策略时,Redis 服务节点的地址。**当 `policy` 属性设置为 `redis` 时必选。**| | redis_port | integer | 否 | 6379 | [1,...] | 当使用 `redis` 限速策略时,Redis 服务节点的端口。| +| redis_username | string | 否 | | | 若使用 Redis ACL 进行身份验证(适用于 Redis 版本 >=6.0),则需要提供 Redis 用户名。若使用 Redis legacy 方式 `requirepass` 进行身份验证,则只需将密码配置在 `redis_password`。当 `policy` 设置为 `redis` 时使用。| | redis_password | string | 否 | | | 当使用 `redis` 或者 `redis-cluster` 限速策略时,Redis 服务节点的密码。| +| redis_ssl | boolean | 否 | false | | 当使用 `redis` 限速策略时,如果设置为 true,则使用 SSL 连接到 `redis` | +| redis_ssl_verify | boolean | 否 | false | | 当使用 `redis` 限速策略时,如果设置为 true,则验证服务器 SSL 证书的有效性,具体请参考 [tcpsock:sslhandshake](https://github.com/openresty/lua-nginx-module#tcpsocksslhandshake). | | redis_database | integer | 否 | 0 | redis_database >= 0 | 当使用 `redis` 限速策略时,Redis 服务节点中使用的 `database`,并且只针对非 Redis 集群模式(单实例模式或者提供单入口的 Redis 公有云服务)生效。| | redis_timeout | integer | 否 | 1000 | [1,...] | 当 `policy` 设置为 `redis` 或 `redis-cluster` 时,Redis 服务节点的超时时间(以毫秒为单位)。| | redis_cluster_nodes | array | 否 | | | 当使用 `redis-cluster` 限速策略时,Redis 集群服务节点的地址列表(至少需要两个地址)。**当 `policy` 属性设置为 `redis-cluster` 时必选。**| | redis_cluster_name | string | 否 | | | 当使用 `redis-cluster` 限速策略时,Redis 集群服务节点的名称。**当 `policy` 设置为 `redis-cluster` 时必选。**| -| redis_cluster_ssl | boolean | 否 | false | | 当使用 `redis-cluster` 限速策略时, 如果设置为 true,则使用 SSL 连接到 `redis-cluster` | +| redis_cluster_ssl | boolean | 否 | false | | 当使用 `redis-cluster` 限速策略时,如果设置为 true,则使用 SSL 连接到 `redis-cluster` | | redis_cluster_ssl_verify | boolean | 否 | false | | 当使用 `redis-cluster` 限速策略时,如果设置为 true,则验证服务器 SSL 证书的有效性 | ## 启用插件 @@ -296,9 +299,9 @@ Server: APISIX web server {"error_msg":"Requests are too frequent, please try again later."} ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/limit-req.md b/docs/zh/latest/plugins/limit-req.md index 2de947cbfbd8..6f844611dd2f 100644 --- a/docs/zh/latest/plugins/limit-req.md +++ b/docs/zh/latest/plugins/limit-req.md @@ -213,9 +213,9 @@ HTTP/1.1 403 Forbidden ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/log-rotate.md b/docs/zh/latest/plugins/log-rotate.md index b2957416dbeb..61a2b9684d5d 100644 --- a/docs/zh/latest/plugins/log-rotate.md +++ b/docs/zh/latest/plugins/log-rotate.md @@ -39,7 +39,7 @@ description: 云原生 API 网关 Apache APISIX log-rotate 插件用于定期切 | ------------------ | ------- | ------ | ------- | ------------- | ---------------------------------------------------------------------------- | | interval | integer | 是 | 60 * 60 | | 每间隔多长时间切分一次日志,以秒为单位。 | | max_kept | integer | 是 | 24 * 7 | | 最多保留多少份历史日志,超过指定数量后,自动删除老文件。 | -| max_size | integer | 否 | -1 | | 日志文件超过指定大小时进行切分,单位为 Byte 。如果 `max_size` 小于 0 或者根据 `interval` 计算的时间到达时,将不会根据 `max_size` 切分日志。 | +| max_size | integer | 否 | -1 | | 日志文件超过指定大小时进行切分,单位为 Byte。如果 `max_size` 小于 0 或者根据 `interval` 计算的时间到达时,将不会根据 `max_size` 切分日志。 | | enable_compression | boolean | 否 | false | [false, true] | 当设置为 `true` 时,启用日志文件压缩。该功能需要在系统中安装 `tar` 。 | 开启该插件后,就会按照参数自动切分日志文件了。比如以下示例是根据 `interval: 10` 和 `max_kept: 10` 得到的样本。 @@ -99,7 +99,7 @@ plugin_attr: 配置完成,你需要重新加载 APISIX。 -## 禁用插件 +## 删除插件 当你不再需要该插件时,只需要在 `./conf/config.yaml` 中删除或注释该插件即可。 diff --git a/docs/zh/latest/plugins/loggly.md b/docs/zh/latest/plugins/loggly.md index e02f640b9457..9c5b74010696 100644 --- a/docs/zh/latest/plugins/loggly.md +++ b/docs/zh/latest/plugins/loggly.md @@ -38,7 +38,7 @@ description: API 网关 Apache APISIX loggly 插件可用于将日志转发到 S | 名称 | 类型 | 必选项 | 默认值 | 描述 | |------------------------|---------------|----------|---------|---------------------------------------------------------------------------------------------------------------------------------------------------| | customer_token | string | 是 | | 将日志发送到 Loggly 时使用的唯一标识符,以确保将日志发送到正确的组织帐户。 | -| severity | string (enum) | 否 | INFO | Syslog 日志事件的严重性级别。 包括:`DEBUG`、`INFO`、`NOTICE`、`WARNING`、`ERR`、`CRIT`、`ALERT` 和 `EMEGR`。 | +| severity | string (enum) | 否 | INFO | Syslog 日志事件的严重性级别。包括:`DEBUG`、`INFO`、`NOTICE`、`WARNING`、`ERR`、`CRIT`、`ALERT` 和 `EMEGR`。 | | severity_map | object | 否 | nil | 一种将上游 HTTP 响应代码映射到 Syslog 中的方法。 `key-value`,其中 `key` 是 HTTP 响应代码,`value`是 Syslog 严重级别。例如`{"410": "CRIT"}`。 | | tags | array | 否 | | 元数据将包含在任何事件日志中,以帮助进行分段和过滤。 | | log_format | object | 否 | | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | @@ -57,7 +57,7 @@ description: API 网关 Apache APISIX loggly 插件可用于将日志转发到 S | 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | |------------|---------|-------|----------------------|--------------------------------|---------------------------------------------------------------------| | host | string | 否 | "logs-01.loggly.com" | | 发送日志的主机的端点。 | -| port | integer | 否 | 514 | | 要连接的 Loggly 端口。 仅用于 `syslog` 协议。 | +| port | integer | 否 | 514 | | 要连接的 Loggly 端口。仅用于 `syslog` 协议。 | | timeout | integer | 否 | 5000 | | 发送数据请求超时时间(以毫秒为单位)。 | | protocol | string | 否 | "syslog" | [ "syslog", "http", "https" ] | 将日志发送到 Loggly 的协议。 | | log_format | object | 否 | nil | | 以 JSON 格式的键值对来声明日志格式。对于值部分,仅支持字符串。如果是以 `$` 开头,则表明是要获取 [APISIX 变量](../../../en/latest/apisix-variable.md) 或 [NGINX 内置变量](http://nginx.org/en/docs/varindex.html)。 | @@ -147,9 +147,9 @@ curl -i http://127.0.0.1:9080/index.html ![Loggly Dashboard](../../../assets/images/plugin/loggly-dashboard.png) -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/loki-logger.md b/docs/zh/latest/plugins/loki-logger.md new file mode 100644 index 000000000000..c075debf2efb --- /dev/null +++ b/docs/zh/latest/plugins/loki-logger.md @@ -0,0 +1,165 @@ +--- +title: loki-logger +keywords: + - Apache APISIX + - API 网关 + - Plugin + - Loki-logger + - Grafana Loki +description: 本文件包含关于 Apache APISIX loki-logger 插件的信息。 +--- + + + +## 描述 + +`loki-logger` 插件用于将日志转发到 [Grafana Loki](https://grafana.com/oss/loki/) 进行分析和存储。 + +当启用该插件时,APISIX 将把请求上下文信息序列化为 [JSON 中的日志条目](https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki) 并将其提交到批处理队列中。当队列中的数据量超过最大批处理大小时,数据将被推送到 Grafana Loki。有关更多详细信息,请参阅批处理处理器 [batch processor](../batch-processor.md)。 + +## 属性 + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|--|---|---|---|---| +| endpoint_addrs | array[string] | True | | Loki API 基础 URL,格式如 http://127.0.0.1:3100,支持 HTTPS 和域名。如果配置了多个端点,它们将随机选择一个进行写入 | +| endpoint_uri | string | False | /loki/api/v1/push | 如果您正在使用与 Loki Push API 兼容的日志收集服务,您可以使用此配置项自定义 API 路径。 | +| tenant_id | string | False | fake | Loki 租户 ID。根据 Loki 的 [多租户文档](https://grafana.com/docs/loki/latest/operations/multi-tenancy/#multi-tenancy),在单租户模式下,默认值设置为 `fake`。 | +| log_labels | object | False | {job = "apisix"} | Loki 日志标签。您可以使用 [APISIX 变量](../apisix-variable.md) 和 [Nginx 变量](http://nginx.org/en/docs/varindex.html) 只需在字符串前面加上 `$` 符号即可,可以使用单个变量或组合变量,例如 `$host` 或 `$remote_addr:$remote_port`。 | +| ssl_verify | boolean | False | true | 当设置为 `true` 时,将验证 SSL 证书。 | +| timeout | integer | False | 3000ms | Loki 服务 HTTP 调用的超时时间,范围从 1 到 60,000 毫秒。 | +| keepalive | boolean | False | true | 当设置为 `true` 时,会保持连接以供多个请求使用。 | +| keepalive_timeout | integer | False | 60000ms | 连接空闲时间后关闭连接。范围大于或等于 1000 毫秒。 | +| keepalive_pool | integer | False | 5 | 连接池限制。范围大于或等于 1。 | +| log_format | object | False | | 以 JSON 格式声明的键值对形式的日志格式。值仅支持字符串类型。可以通过在字符串前面加上 `$` 来使用 [APISIX 变量](../apisix-variable.md) 和 [Nginx 变量](http://nginx.org/en/docs/varindex.html) 。 | +| include_req_body | boolean | False | false | 当设置为 `true` 时,日志中将包含请求体。如果请求体太大而无法在内存中保存,则由于 Nginx 的限制,无法记录请求体。| +| include_req_body_expr | array | False | | 当 `include_req_body` 属性设置为 `true` 时的过滤器。只有当此处设置的表达式求值为 `true` 时,才会记录请求体。有关更多信息,请参阅 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 。 | +| include_resp_body | boolean | False | false | 当设置为 `true` 时,日志中将包含响应体。 | +| include_resp_body_expr | array | False | | 当 `include_resp_body` 属性设置为 `true` 时的过滤器。只有当此处设置的表达式求值为 `true` 时,才会记录响应体。有关更多信息,请参阅 [lua-resty-expr](https://github.com/api7/lua-resty-expr) 。 | + +该插件支持使用批处理器对条目(日志/数据)进行批量聚合和处理,避免了频繁提交数据的需求。批处理器每隔 `5` 秒或当队列中的数据达到 `1000` 时提交数据。有关更多信息或设置自定义配置,请参阅 [批处理器](../batch-processor.md#configuration)。 + +## 元数据 + +您还可以通过配置插件元数据来设置日志的格式。以下配置项可供选择: + +| 名称 | 类型 | 必选项 | 默认值 | 描述 | +|------|------|----------|--|-------------| +| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | 日志格式以 JSON 格式声明为键值对。值只支持字符串类型。可以通过在字符串前面加上 `$` 来使用 [APISIX 变量](../apisix-variable.md) 和 [Nginx 变量](http://nginx.org/en/docs/varindex.html) 。 | + +:::info 重要提示 + +配置插件元数据具有全局范围。这意味着它将对使用 `loki-logger` 插件的所有路由和服务生效。 + +::: + +以下示例展示了如何通过 Admin API 进行配置: + +```shell +curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/loki-logger -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } +}' +``` + +使用这个配置,您的日志将被格式化为以下形式: + +```shell +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +{"host":"localhost","@timestamp":"2020-09-23T19:05:05-04:00","client_ip":"127.0.0.1","route_id":"1"} +``` + +## 启用插件 + +以下示例展示了如何在特定的路由上启用 `loki-logger` 插件: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "plugins": { + "loki-logger": { + "endpoint_addrs" : ["http://127.0.0.1:3100"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" +}' +``` + +## 示例用法 + +现在,如果您向 APISIX 发出请求,该请求将被记录在您的 Loki 服务器中: + +```shell +curl -i http://127.0.0.1:9080/hello +``` + +## 删除插件 + +当您需要删除 `loki-logger` 插件时,您可以使用以下命令删除相应的 JSON 配置,APISIX 将自动重新加载相关配置,而无需重启服务: + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "methods": ["GET"], + "uri": "/hello", + "plugins": {}, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } +}' +``` + +## FAQ + +### 日志未正确推送 + +请查看 `error.log` 文件以获取此类日志。 + +```text +2023/04/30 13:45:46 [error] 19381#19381: *1075673 [lua] batch-processor.lua:95: Batch Processor[loki logger] failed to process entries: loki server returned status: 401, body: no org id, context: ngx.timer, client: 127.0.0.1, server: 0.0.0.0:9081 +``` + +可以根据错误代码 `failed to process entries: loki server returned status: 401, body: no org id` 和 loki 服务器的响应正文来诊断错误。 + +### 当请求每秒 (RPS) 较高时出现错误? + +- 请确保 `keepalive` 相关的配置已正确设置。有关更多信息,请参阅[属性](#属性) 。 +- 请检查 `error.log` 中的日志,查找此类日志。 + + ```text + 2023/04/30 13:49:34 [error] 19381#19381: *1082680 [lua] batch-processor.lua:95: Batch Processor[loki logger] failed to process entries: loki server returned status: 429, body: Ingestion rate limit exceeded for user tenant_1 (limit: 4194304 bytes/sec) while attempting to ingest '1000' lines totaling '616307' bytes, reduce log volume or contact your Loki administrator to see if the limit can be increased, context: ngx.timer, client: 127.0.0.1, server: 0.0.0.0:9081 + ``` + + - 通常与高 QPS 相关的日志如上所示。错误信息为:`Ingestion rate limit exceeded for user tenant_1 (limit: 4194304 bytes/sec) while attempting to ingest '1000' lines totaling '616307' bytes, reduce log volume or contact your Loki administrator to see if the limit can be increased`。 + - 请参考 [Loki 文档](https://grafana.com/docs/loki/latest/configuration/#limits_config) ,添加默认日志量和突发日志量的限制,例如 `ingestion_rate_mb` 和 `ingestion_burst_size_mb`。 + + 在开发过程中进行测试时,将 `ingestion_burst_size_mb` 设置为 100 可以确保 APISIX 以至少 10000 RPS 的速率正确推送日志。 diff --git a/docs/zh/latest/plugins/mocking.md b/docs/zh/latest/plugins/mocking.md index c6c944f31927..c6ea804c7599 100644 --- a/docs/zh/latest/plugins/mocking.md +++ b/docs/zh/latest/plugins/mocking.md @@ -41,6 +41,7 @@ description: 本文介绍了关于 Apache APISIX `mocking` 插件的基本信息 | response_example| string | 否 | | 返回响应的 Body,支持使用变量,例如 `$remote_addr $consumer_name`,与 `response_schema` 字段二选一。 | | response_schema | object | 否 | | 指定响应的 `jsonschema` 对象,未指定 `response_example` 字段时生效。 | | with_mock_header| boolean| 否 | true | 当设置为 `true` 时,将添加响应头 `x-mock-by: APISIX/{version}`。设置为 `false` 时则不添加该响应头。 | +| response_headers| object | 否 | | 要在模拟响应中添加的标头。示例:`{"X-Foo": "bar", "X-Few": "baz"}` | JSON Schema 在其字段中支持以下类型: @@ -224,7 +225,7 @@ Server: APISIX/2.10.0 {"a":1,"b":2} ``` -## 禁用插件 +## 删除插件 当你需要禁用 `mocking` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/mqtt-proxy.md b/docs/zh/latest/plugins/mqtt-proxy.md index 7cd779787dbc..edee5b141062 100644 --- a/docs/zh/latest/plugins/mqtt-proxy.md +++ b/docs/zh/latest/plugins/mqtt-proxy.md @@ -50,7 +50,6 @@ description: 本文档介绍了 Apache APISIX mqtt-proxy 插件的信息,通 http: 'radixtree_uri' ssl: 'radixtree_sni' stream_proxy: # TCP/UDP proxy - only: false # 如需 HTTP 与 Stream 代理同时生效,需要增加该键值 tcp: # TCP proxy port list - 9100 dns_resolver: @@ -155,9 +154,9 @@ curl 127.0.0.1:9180/apisix/admin/stream_routes/1 \ ::: -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/stream_routes/1 \ diff --git a/docs/zh/latest/plugins/node-status.md b/docs/zh/latest/plugins/node-status.md index 824751d324a8..34231f0d7a35 100644 --- a/docs/zh/latest/plugins/node-status.md +++ b/docs/zh/latest/plugins/node-status.md @@ -97,7 +97,7 @@ Server: APISIX web server | reading | 当前正在读取请求头的连接数。 | | id | APISIX UID 信息,保存在 `./conf/apisix.uid` 文件中。 | -## 禁用插件 +## 删除插件 如果你不再需要该插件,可以从配置文件 (`./conf/config.yaml`) 中删除它: diff --git a/docs/zh/latest/plugins/opa.md b/docs/zh/latest/plugins/opa.md index 1f02964c4f8b..8ca473876dad 100644 --- a/docs/zh/latest/plugins/opa.md +++ b/docs/zh/latest/plugins/opa.md @@ -299,7 +299,7 @@ curl -X GET 127.0.0.1:9080/get } ``` -## 禁用插件 +## 删除插件 当你需要禁用 `opa` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/openfunction.md b/docs/zh/latest/plugins/openfunction.md index 2b3eb8df7fa3..cc88feb6f527 100644 --- a/docs/zh/latest/plugins/openfunction.md +++ b/docs/zh/latest/plugins/openfunction.md @@ -142,7 +142,7 @@ curl http://127.0.0.1:9080/hello/123 Hello, 123! ``` -## 禁用插件 +## 删除插件 当你需要禁用 `openfunction` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/openid-connect.md b/docs/zh/latest/plugins/openid-connect.md index c914f5241511..aececc4986e0 100644 --- a/docs/zh/latest/plugins/openid-connect.md +++ b/docs/zh/latest/plugins/openid-connect.md @@ -60,6 +60,13 @@ description: OpenID Connect(OIDC)是基于 OAuth 2.0 的身份认证协议 | set_refresh_token_header | boolean | 否 | false | | 当设置为 `true` 并且刷新令牌可用时,则会将该属性设置在`X-Refresh-Token`请求头中。 | | session | object | 否 | | | 当设置 bearer_only 为 false 时,openid-connect 插件将使用 Authorization Code 在 IDP 上进行认证,因此你必须设置 session 相关设置。 | | session.secret | string | 是 | 自动生成 | 16 个以上字符 | 用于 session 加密和 HMAC 计算的密钥。 | +| proxy_opts | object | 否 | | | 给 openid-connect 插件配置一个 proxy。 | +| proxy_opts | object | 否 | | | 用来访问身份认证服务器的代理服务器。 | +| proxy_opts.proxy_opts.http_proxy | string | 否 | | http://proxy-server:port | HTTP 代理服务器地址。 | +| proxy_opts.proxy_opts.https_proxy | string | 否 | | http://proxy-server:port | HTTPS 代理服务器地址。 | +| proxy_opts.http_proxy_authorization | string | 否 | | Basic [base64 username:password] | `http_proxy` 默认的 `Proxy-Authorization` 请求头参数值。 | +| proxy_opts.https_proxy_authorization | string | 否 | | Basic [base64 username:password] | 与`http_proxy_authorization`相同,但与`https_proxy`一起使用(因为使用 HTTPS 时,授权是在连接时完成的,因此不能通过传递 Proxy-Authorization 请求头来覆盖此授权)。 | +| proxy_opts.no_proxy | string | 否 | | | 不应被代理的主机的逗号分隔列表。 | 注意:schema 中还定义了 `encrypt_fields = {"client_secret"}`,这意味着该字段将会被加密存储在 etcd 中。具体参考 [加密存储字段](../plugin-develop.md#加密存储字段)。 diff --git a/docs/zh/latest/plugins/opentelemetry.md b/docs/zh/latest/plugins/opentelemetry.md index a1a9faf856cf..474aafb012d3 100644 --- a/docs/zh/latest/plugins/opentelemetry.md +++ b/docs/zh/latest/plugins/opentelemetry.md @@ -147,7 +147,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ }' ``` -## 禁用插件 +## 删除插件 当你需要禁用 `opentelemetry` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/openwhisk.md b/docs/zh/latest/plugins/openwhisk.md index a079bb5b3123..061c2955bf00 100644 --- a/docs/zh/latest/plugins/openwhisk.md +++ b/docs/zh/latest/plugins/openwhisk.md @@ -62,7 +62,7 @@ description: 本文介绍了关于 Apache APISIX openwhisk 插件的基本信息 ### 搭建 Apache OpenWhisk 测试环境 -1. 在使用 `openwhisk` 插件之前,你需要通过以下命令运行 OpenWhisk stand-alone 模式。请确保当前环境中已经安装 Docker 软件。 +1. 在使用 `openwhisk` 插件之前,你需要通过以下命令运行 OpenWhisk standalone 模式。请确保当前环境中已经安装 Docker 软件。 ```shell docker run --rm -d \ @@ -118,9 +118,9 @@ curl -i http://127.0.0.1:9080/hello { "ready": true } ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/prometheus.md b/docs/zh/latest/plugins/prometheus.md index 8afc3fe6570f..ea0a9d096639 100644 --- a/docs/zh/latest/plugins/prometheus.md +++ b/docs/zh/latest/plugins/prometheus.md @@ -59,6 +59,25 @@ plugin_attr: export_uri: /apisix/metrics ``` +### 如何修改延迟指标中的 `default_buckets` + +`DEFAULT_BUCKETS` 是 `http_latency` 指标中 bucket 数组的默认值。 + +你可以通过修改配置文件中的 `default_buckets` 来重新指定 `DEFAULT_BUCKETS` + +配置示例如下: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: + default_buckets: + - 15 + - 55 + - 105 + - 205 + - 505 +``` + ## API `prometheus` 插件会增加 `/apisix/prometheus/metrics` 接口或者你自定义的 URI 来暴露其指标信息。 @@ -208,6 +227,13 @@ scrape_configs: - Info: 当前 APISIX 节点信息。 - Shared dict: APISIX 中所有共享内存的容量以及剩余可用空间。 +- `apisix_upstream_status`: 上游健康检查的节点状态,`1` 表示健康,`0` 表示不健康。属性如下所示: + + | 名称 | 描述 | + |--------------|-------------------------------------------------------------------------------------------------------------------------------| + | name | 上游所依附的资源 ID,例如 `/apisix/routes/1`, `/apisix/upstreams/1`. | + | ip | 上游节点的 IP 地址。 | + | port | 上游节点的端口号。 | 以下是 APISIX 的原始的指标数据集: @@ -297,9 +323,13 @@ apisix_shared_dict_free_space_bytes{name="balancer-ewma-locks"} 10412032 apisix_shared_dict_free_space_bytes{name="discovery"} 1032192 apisix_shared_dict_free_space_bytes{name="etcd-cluster-health-check"} 10412032 ... +# HELP apisix_upstream_status Upstream status from health check +# TYPE apisix_upstream_status gauge +apisix_upstream_status{name="/apisix/routes/1",ip="100.24.156.8",port="80"} 0 +apisix_upstream_status{name="/apisix/routes/1",ip="52.86.68.46",port="80"} 1 ``` -## 禁用插件 +## 删除插件 当你需要禁用 `prometheus` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/proxy-cache.md b/docs/zh/latest/plugins/proxy-cache.md index 0fb5c9acb8e5..ca4bea58b7ad 100644 --- a/docs/zh/latest/plugins/proxy-cache.md +++ b/docs/zh/latest/plugins/proxy-cache.md @@ -48,7 +48,7 @@ description: 本文介绍了 Apache APISIX proxy-cache 插件的相关操作, :::note 注意 - 对于基于磁盘的缓存,不能动态配置缓存的过期时间,只能通过后端服务响应头 `Expires` 或 `Cache-Control` 来设置过期时间,当后端响应头中没有 `Expires` 或 `Cache-Control` 时,默认缓存时间为 10 秒钟 -- 当上游服务不可用时, APISIX 将返回 `502` 或 `504` HTTP 状态码,默认缓存时间为 10 秒钟; +- 当上游服务不可用时,APISIX 将返回 `502` 或 `504` HTTP 状态码,默认缓存时间为 10 秒钟; - 变量以 `$` 开头,不存在时等价于空字符串。也可以使用变量和字符串的结合,但是需要以数组的形式分开写,最终变量被解析后会和字符串拼接在一起。 ::: @@ -58,22 +58,33 @@ description: 本文介绍了 Apache APISIX proxy-cache 插件的相关操作, 你可以在 APISIX 配置文件 `conf/config.yaml` 中添加你的缓存配置,示例如下: ```yaml title="conf/config.yaml" -proxy_cache: # 代理缓存配置 - cache_ttl: 10s # 如果上游未指定缓存时间,则为默认缓存时间 - zones: # 缓存的参数 - - name: disk_cache_one # 缓存名称(缓存区域),管理员可以通过 admin api 中的 cache_zone 字段指定要使用的缓存区域 - memory_size: 50m # 共享内存的大小,用于存储缓存索引 - disk_size: 1G # 磁盘大小,用于存储缓存数据 - disk_path: "/tmp/disk_cache_one" # 存储缓存数据的路径 - cache_levels: "1:2" # 缓存的层次结构级别 +apisix: + proxy_cache: + cache_ttl: 10s # 如果上游未指定缓存时间,则为默认磁盘缓存时间 + zones: + - name: disk_cache_one + memory_size: 50m + disk_size: 1G + disk_path: /tmp/disk_cache_one + cache_levels: 1:2 + # - name: disk_cache_two + # memory_size: 50m + # disk_size: 1G + # disk_path: "/tmp/disk_cache_two" + # cache_levels: "1:2" + - name: memory_cache + memory_size: 50m ``` -以下示例展示了如何在指定路由上启用 `proxy-cache` 插件,`cache_zone` 字段默认设置为 `disk_cache_one`: +### 使用基于磁盘的缓存 + +以下示例展示了如何在路由上启用 `proxy-cache` 插件。该插件默认使用基于磁盘的 `cache_strategy` 和默认使用`disk_cache_one` 为 `cache_zone`: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { + "uri": "/ip", "plugins": { "proxy-cache": { "cache_key": ["$uri", "-cache-id"], @@ -86,11 +97,35 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ }, "upstream": { "nodes": { - "127.0.0.1:1999": 1 + "httpbin.org": 1 }, "type": "roundrobin" + } +}' +``` + +### 使用基于内存的缓存 + +以下示例展示了如何在路由上启用 `proxy-cache` 插件,并使用基于内存的 `cache_strategy` 和相应的基于内存的 `cache_zone`。 + +```shell +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/ip", + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_zone": "memory_cache", + "cache_ttl": 10 + } }, - "uri": "/hello" + "upstream": { + "nodes": { + "httpbin.org": 1 + }, + "type": "roundrobin" + } }' ``` @@ -99,7 +134,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ 按上述配置启用插件后,使用 `curl` 命令请求该路由: ```shell -curl http://127.0.0.1:9080/hello -i +curl http://127.0.0.1:9080/ip -i ``` 如果返回 `200` HTTP 状态码,并且响应头中包含 `Apisix-Cache-Status`字段,则表示该插件已启用: @@ -115,7 +150,7 @@ hello 如果你是第一次请求该路由,数据未缓存,那么 `Apisix-Cache-Status` 字段应为 `MISS`。此时再次请求该路由: ```shell -curl http://127.0.0.1:9080/hello -i +curl http://127.0.0.1:9080/ip -i ``` 如果返回的响应头中 `Apisix-Cache-Status` 字段变为 `HIT`,则表示数据已被缓存,插件生效: @@ -135,7 +170,7 @@ hello 为了清除缓存数据,你只需要指定请求的 method 为 `PURGE`: ```shell -curl -i http://127.0.0.1:9080/hello -X PURGE +curl -i http://127.0.0.1:9080/ip -X PURGE ``` HTTP 响应码为 `200` 即表示删除成功,如果缓存的数据未找到将返回 `404`: @@ -146,20 +181,20 @@ HTTP/1.1 200 OK ::: -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { - "uri": "/hello", + "uri": "/ip", "plugins": {}, "upstream": { "type": "roundrobin", "nodes": { - "127.0.0.1:1999": 1 + "httpbin.org": 1 } } }' diff --git a/docs/zh/latest/plugins/proxy-control.md b/docs/zh/latest/plugins/proxy-control.md index 50e6f00c9eec..a64603043bad 100644 --- a/docs/zh/latest/plugins/proxy-control.md +++ b/docs/zh/latest/plugins/proxy-control.md @@ -75,9 +75,9 @@ curl -i http://127.0.0.1:9080/upload -d @very_big_file 如果在错误日志中没有找到关于 "a client request body is buffered to a temporary file" 的信息,则说明插件生效。 -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/proxy-mirror.md b/docs/zh/latest/plugins/proxy-mirror.md index 365368d8b15d..5b39a556f996 100644 --- a/docs/zh/latest/plugins/proxy-mirror.md +++ b/docs/zh/latest/plugins/proxy-mirror.md @@ -40,9 +40,9 @@ description: 本文介绍了 Apache APISIX proxy-mirror 插件的相关操作, | 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | | ---- | ------ | ------ | ------ | ------ | ------------------------------------------------------------------------------------------------------- | -| host | string | 是 | | | 指定镜像服务的地址,地址中需要包含 `schema`(`http` 或 `https`),但不能包含 `path` 部分。例如 `http://127.0.0.1:9797`。 | -| path | string | 否 | | | 指定镜像请求的路径。如果不指定,则默认会使用当前路径。 | -| path_concat_mode | string | 否 | replace | ["replace", "prefix"] | 当指定镜像请求的路径时,设置请求路径的拼接模式。`replace` 模式将会直接使用 `path` 作为镜像请求的路径。`prefix` 模式将会使用 `path` + `来源请求 URI` 作为镜像请求的路径。 | +| host | string | 是 | | | 指定镜像服务的地址,地址中需要包含 `schema`(`http(s)` 或 `grpc(s)`),但不能包含 `path` 部分。例如 `http://127.0.0.1:9797`。 | +| path | string | 否 | | | 指定镜像请求的路径。如果不指定,则默认会使用当前路径。如果是为了镜像 grpc 流量,这个选项不再适用。| +| path_concat_mode | string | 否 | replace | ["replace", "prefix"] | 当指定镜像请求的路径时,设置请求路径的拼接模式。`replace` 模式将会直接使用 `path` 作为镜像请求的路径。`prefix` 模式将会使用 `path` + `来源请求 URI` 作为镜像请求的路径。当然如果是为了镜像 grpc 流量,这个选项也不再适用。| | sample_ratio | number | 否 | 1 | [0.00001, 1] | 镜像请求的采样率。当设置为 `1` 时为全采样。 | ## 启用插件 @@ -115,9 +115,9 @@ HTTP/1.1 200 OK hello world ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/proxy-rewrite.md b/docs/zh/latest/plugins/proxy-rewrite.md index ff6feaffded3..1b5de57d296d 100644 --- a/docs/zh/latest/plugins/proxy-rewrite.md +++ b/docs/zh/latest/plugins/proxy-rewrite.md @@ -38,10 +38,10 @@ description: 本文介绍了关于 Apache APISIX `proxy-rewrite` 插件的基本 | --------- | ------------- | ----- | ------- | ---------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | uri | string | 否 | | | 转发到上游的新 `uri` 地址。支持 [NGINX variables](https://nginx.org/en/docs/http/ngx_http_core_module.html) 变量,例如:`$arg_name`。 | | method | string | 否 | | ["GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS","MKCOL", "COPY", "MOVE", "PROPFIND", "PROPFIND","LOCK", "UNLOCK", "PATCH", "TRACE"] | 将路由的请求方法代理为该请求方法。 | -| regex_uri | array[string] | 否 | | | 转发到上游的新 `uri` 地址。使用正则表达式匹配来自客户端的 `uri`,如果匹配成功,则使用模板替换转发到上游的 `uri`,如果没有匹配成功,则将客户端请求的 `uri` 转发至上游。当同时配置 `uri` 和 `regex_uri` 属性时,优先使用 `uri`。例如:["^/iresty/(.*)/(.*)/(.*)","/$1-$2-$3"] 第一个元素代表匹配来自客户端请求的 `uri` 正则表达式,第二个元素代表匹配成功后转发到上游的 `uri` 模板。但是目前 APISIX 仅支持一个 `regex_uri`,所以 `regex_uri` 数组的长度是 `2`。 | +| regex_uri | array[string] | 否 | | | 使用正则表达式匹配来自客户端的 `uri`,如果匹配成功,则使用模板替换转发到上游的 `uri`,如果没有匹配成功,则将客户端请求的 `uri` 转发至上游。当同时配置 `uri` 和 `regex_uri` 属性时,优先使用 `uri`。当前支持多组正则表达式进行模式匹配,插件将逐一尝试匹配直至成功或全部失败。例如:`["^/iresty/(.*)/(.*)/(.*)", "/$1-$2-$3", ^/theothers/(.*)/(.*)", "/theothers/$1-$2"]`,奇数索引的元素代表匹配来自客户端请求的 `uri` 正则表达式,偶数索引的元素代表匹配成功后转发到上游的 `uri` 模板。请注意该值的长度必须为**偶数值**。 | | host | string | 否 | | | 转发到上游的新 `host` 地址,例如:`iresty.com`。| | headers | object | 否 | | | | -| headers.add | object | 否 | | | 添加新的请求头,如果头已经存在,会追加到末尾。格式为 `{"name: value", ...}`。这个值能够以 `$var` 的格式包含 NGINX 变量,比如 `$remote_addr $balancer_ip`。也支持以变量的形式引用 `regex_uri` 的匹配结果,比如 `$1-$2-$3`。 | +| headers.add | object | 否 | | | 添加新的请求头,如果头已经存在,会追加到末尾。格式为 `{"name": "value", ...}`。这个值能够以 `$var` 的格式包含 NGINX 变量,比如 `$remote_addr $balancer_ip`。也支持以变量的形式引用 `regex_uri` 的匹配结果,比如 `$1-$2-$3`。 | | headers.set | object | 否 | | | 改写请求头,如果请求头不存在,则会添加这个请求头。格式为 `{"name": "value", ...}`。这个值能够以 `$var` 的格式包含 NGINX 变量,比如 `$remote_addr $balancer_ip`。也支持以变量的形式引用 `regex_uri` 的匹配结果,比如 `$1-$2-$3`。 | | headers.remove | array | 否 | | | 移除响应头。格式为 `["name", ...]`。 @@ -103,7 +103,7 @@ curl -X GET http://127.0.0.1:9080/test/index.html 127.0.0.1 - [26/Sep/2019:10:52:20 +0800] iresty.com GET /test/home.html HTTP/1.1 200 38 - curl/7.29.0 - 0.000 199 107 ``` -## 禁用插件 +## 删除插件 当你需要禁用 `proxy-rewrite` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/public-api.md b/docs/zh/latest/plugins/public-api.md index c6ffbe45159f..303fd3c04260 100644 --- a/docs/zh/latest/plugins/public-api.md +++ b/docs/zh/latest/plugins/public-api.md @@ -156,9 +156,9 @@ curl -i 'http://127.0.0.1:9080/gen_token?key=user-key' HTTP/1.1 401 Unauthorized ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/zh/latest/plugins/real-ip.md b/docs/zh/latest/plugins/real-ip.md index 3fa52484b36c..0922541afef1 100644 --- a/docs/zh/latest/plugins/real-ip.md +++ b/docs/zh/latest/plugins/real-ip.md @@ -43,7 +43,7 @@ description: 本文介绍了关于 Apache APISIX `real-ip` 插件的基本信息 | 名称 | 类型 | 必选项 | 有效值 | 描述 | |-------------------|---------------|--|-------------------------------------------------------------|----------------------------------------------------------------------| -| source | string | 是 | 任何 NGINX 变量,如 `arg_realip` 或 `http_x_forwarded_for` 。 | 动态设置客户端的 IP 地址和端口。如果该值不包含端口,则不会更改客户端的端口。| +| source | string | 是 | 任何 NGINX 变量,如 `arg_realip` 或 `http_x_forwarded_for` 。 | 动态设置客户端的 IP 地址和端口,或主机名。如果该值不包含端口,则不会更改客户端的端口。 | | trusted_addresses | array[string] | 否 | IP 或 CIDR 范围列表。 | 动态设置 `set_real_ip_from` 字段。 | | recursive | boolean | 否 | true 或者 false,默认是 false | 如果禁用递归搜索,则与受信任地址之一匹配的原始客户端地址将替换为配置的`source`中发送的最后一个地址。如果启用递归搜索,则与受信任地址之一匹配的原始客户端地址将替换为配置的`source`中发送的最后一个非受信任地址。 | @@ -97,7 +97,7 @@ remote-addr: 1.2.3.4 remote-port: 9080 ``` -## 禁用插件 +## 删除插件 当你需要禁用 `real-ip` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/redirect.md b/docs/zh/latest/plugins/redirect.md index 98aee9ed5881..e700a36d59d4 100644 --- a/docs/zh/latest/plugins/redirect.md +++ b/docs/zh/latest/plugins/redirect.md @@ -37,7 +37,7 @@ description: 本文介绍了关于 Apache APISIX `redirect` 插件的基本信 |---------------------|---------------|-----|-------|--------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | http_to_https | boolean | 否 | false | [true,false] | 当设置为 `true` 并且请求是 HTTP 时,它将被重定向具有相同 URI 和 301 状态码的 HTTPS,原 URI 的查询字符串也将包含在 Location 头中。 | | uri | string | 否 | | | 要重定向到的 URI,可以包含 NGINX 变量。例如:`/test/index.htm`,`$uri/index.html`,`${uri}/index.html`,`https://example.com/foo/bar`。如果你引入了一个不存在的变量,它不会报错,而是将其视为一个空变量。 | -| regex_uri | array[string] | 否 | | | 将来自客户端的 URL 与正则表达式匹配并重定向。当匹配成功后使用模板替换发送重定向到客户端,如果未匹配成功会将客户端请求的 URI 转发至上游。 和 `regex_uri` 不可以同时存在。例如:["^/iresty/(.)/(.)/(.*)","/$1-$2-$3"] 第一个元素代表匹配来自客户端请求的 URI 正则表达式,第二个元素代表匹配成功后发送重定向到客户端的 URI 模板。 | +| regex_uri | array[string] | 否 | | | 将来自客户端的 URL 与正则表达式匹配并重定向。当匹配成功后使用模板替换发送重定向到客户端,如果未匹配成功会将客户端请求的 URI 转发至上游。和 `regex_uri` 不可以同时存在。例如:["^/iresty/(.)/(.)/(.*)","/$1-$2-$3"] 第一个元素代表匹配来自客户端请求的 URI 正则表达式,第二个元素代表匹配成功后发送重定向到客户端的 URI 模板。 | | ret_code | integer | 否 | 302 | [200, ...] | HTTP 响应码 | | encode_uri | boolean | 否 | false | [true,false] | 当设置为 `true` 时,对返回的 `Location` Header 按照 [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986) 的编码格式进行编码。 | | append_query_string | boolean | 否 | false | [true,false] | 当设置为 `true` 时,将原始请求中的查询字符串添加到 `Location` Header。如果已配置 `uri` 或 `regex_uri` 已经包含查询字符串,则请求中的查询字符串将附加一个`&`。如果你已经处理过查询字符串(例如,使用 NGINX 变量 `$request_uri`),请不要再使用该参数以避免重复。 | @@ -147,7 +147,7 @@ Location: https://127.0.0.1:9443/hello ... ``` -## 禁用插件 +## 删除插件 当你需要禁用 `redirect` 插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/referer-restriction.md b/docs/zh/latest/plugins/referer-restriction.md index 430f8c112775..73c60b1de5ba 100644 --- a/docs/zh/latest/plugins/referer-restriction.md +++ b/docs/zh/latest/plugins/referer-restriction.md @@ -113,9 +113,9 @@ HTTP/1.1 200 OK ... ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/zh/latest/plugins/request-id.md b/docs/zh/latest/plugins/request-id.md index 88cbba76b391..01b49ee9f96a 100644 --- a/docs/zh/latest/plugins/request-id.md +++ b/docs/zh/latest/plugins/request-id.md @@ -42,44 +42,10 @@ description: 本文介绍了 Apache APISIX request-id 插件的相关操作, | ------------------- | ------- | -------- | -------------- | ------ | ------------------------------ | | header_name | string | 否 | "X-Request-Id" | | unique ID 的请求头的名称。 | | include_in_response | boolean | 否 | true | | 当设置为 `true` 时,将 unique ID 加入返回头。 | -| algorithm | string | 否 | "uuid" | ["uuid", "snowflake", "nanoid", "range_id"] | 指定的 unique ID 生成算法。 | +| algorithm | string | 否 | "uuid" | ["uuid", "nanoid", "range_id"] | 指定的 unique ID 生成算法。 | | range_id.char_set | string | 否 | "abcdefghijklmnopqrstuvwxyzABCDEFGHIGKLMNOPQRSTUVWXYZ0123456789| 字符串长度最小为 6 | range_id 算法的字符集 | | range_id.length | integer | 否 | 16 | 最小值为 6 | range_id 算法的 id 长度 | -### 使用 snowflake 算法生成 unique ID - -:::caution 警告 - -- 当使用 `snowflake` 算法时,请确保 APISIX 有权限写入 etcd。 -- 在决定使用 `snowflake` 算法时,请仔细阅读本文档了解配置细节。因为一旦启用相关配置信息后,就不能随意调整,否则可能会导致生成重复的 ID。 - -::: - -`snowflake` 算法支持灵活配置来满足各种需求,可配置的参数如下: - -| 名称 | 类型 | 必选项 | 默认值 | 描述 | -| ------------------- | ------- | -------- | -------------- | ------------------------------ | -| enable | boolean | 否 | false | 当设置为 `true` 时, 启用 `snowflake` 算法。 | -| snowflake_epoc | integer | 否 | 1609459200000 | 起始时间戳,以毫秒为单位。默认为 `2021-01-01T00:00:00Z`, 可以支持 `69 年`到 `2090-09-07 15:47:35Z`。 | -| data_machine_bits | integer | 否 | 12 | 最多支持的机器(进程)数量。 与 `snowflake` 定义中 `workerIDs` 和 `datacenterIDs` 的集合对应,插件会为每一个进程分配一个 unique ID。最大支持进程数为 `pow(2, data_machine_bits)`。即对于默认值 `12 bits`,最多支持的进程数为 `4096`。| -| sequence_bits | integer | 否 | 10 | 每个节点每毫秒内最多产生的 ID 数量。 每个进程每毫秒最多产生 `1024` 个 ID。 | -| data_machine_ttl | integer | 否 | 30 | etcd 中 `data_machine` 注册有效时间,以秒为单位。 | -| data_machine_interval | integer | 否 | 10 | etcd 中 `data_machine` 续约间隔时间,以秒为单位。 | - -如果你需要使用 `snowflake` 算法,请务必在配置文件 `./conf/config.yaml` 中添加以下参数: - -```yaml title="conf/config.yaml" -plugin_attr: - request-id: - snowflake: - enable: true - snowflake_epoc: 1609459200000 - data_machine_bits: 12 - sequence_bits: 10 - data_machine_ttl: 30 - data_machine_interval: 10 -``` - ## 启用插件 以下示例展示了如何在指定路由上启用 `request-id` 插件: @@ -120,9 +86,9 @@ HTTP/1.1 200 OK X-Request-Id: fe32076a-d0a5-49a6-a361-6c244c1df956 ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/5 \ diff --git a/docs/zh/latest/plugins/request-validation.md b/docs/zh/latest/plugins/request-validation.md index ac49c50ecf2c..9ea38161c0ae 100644 --- a/docs/zh/latest/plugins/request-validation.md +++ b/docs/zh/latest/plugins/request-validation.md @@ -265,9 +265,9 @@ curl --header "Content-Type: application/json" \ 现在只允许符合已配置规则的有效请求到达上游服务。不符合配置的请求将被拒绝,并返回 `400` 或自定义状态码。 -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/5 \ diff --git a/docs/zh/latest/plugins/response-rewrite.md b/docs/zh/latest/plugins/response-rewrite.md index 7d108030713a..a59c34f54806 100644 --- a/docs/zh/latest/plugins/response-rewrite.md +++ b/docs/zh/latest/plugins/response-rewrite.md @@ -49,7 +49,7 @@ description: 本文介绍了关于 Apache APISIX `response-rewrite` 插件的基 |-----------------|---------|--------|--------|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | status_code | integer | 否 | | [200, 598] | 修改上游返回状态码,默认保留原始响应代码。 | | body | string | 否 | | | 修改上游返回的 `body` 内容,如果设置了新内容,header 里面的 content-length 字段也会被去掉。 | -| body_base64 | boolean | 否 | false | | 描述 `body` 字段是否需要 base64 解码之后再返回给客户端,用在某些图片和 Protobuffer 场景。 | +| body_base64 | boolean | 否 | false | | 当设置时,在写给客户端之前,在`body`中传递的主体将被解码,这在一些图像和 Protobuffer 场景中使用。注意,这个字段只允许对插件配置中传递的主体进行解码,并不对上游响应进行解码。 | | headers | object | 否 | | | | | headers.add | array | 否 | | | 添加新的响应头。格式为 `["name: value", ...]`。这个值能够以 `$var` 的格式包含 NGINX 变量,比如 `$remote_addr $balancer_ip`。 | | headers.set | object | 否 | | | 改写响应头。格式为 `{"name": "value", ...}`。这个值能够以 `$var` 的格式包含 NGINX 变量,比如 `$remote_addr $balancer_ip`。 | @@ -225,7 +225,7 @@ X-Server-id: 3 ``` -## 禁用插件 +## 删除插件 当你需要禁用 `response-rewrite` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/rocketmq-logger.md b/docs/zh/latest/plugins/rocketmq-logger.md index 9428e80f5228..afdd40e52a32 100644 --- a/docs/zh/latest/plugins/rocketmq-logger.md +++ b/docs/zh/latest/plugins/rocketmq-logger.md @@ -193,9 +193,9 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ curl -i http://127.0.0.1:9080/hello ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/server-info.md b/docs/zh/latest/plugins/server-info.md index 9d52c3d01e82..9fa5aac8468b 100644 --- a/docs/zh/latest/plugins/server-info.md +++ b/docs/zh/latest/plugins/server-info.md @@ -102,7 +102,7 @@ curl http://127.0.0.1:9090/v1/server_info -s | jq . ::: -## 禁用插件 +## 删除插件 如果你想禁用插件,可以将 `server-info` 从配置文件中的插件列表删除,重新加载 APISIX 后即可生效。 diff --git a/docs/zh/latest/plugins/serverless.md b/docs/zh/latest/plugins/serverless.md index cc389c017e10..97d6ee6a8a8f 100644 --- a/docs/zh/latest/plugins/serverless.md +++ b/docs/zh/latest/plugins/serverless.md @@ -37,7 +37,7 @@ APISIX 有两个 `serverless` 插件:`serverless-pre-function` 和 `serverless | 名称 | 类型 | 必选项 | 默认值 | 有效值 | 描述 | | --------- | ------------- | ------- | ---------- | ---------------------------------------------------------------------------- | ------------------------------------------------------------------------------ | -| phase | string | 否 | ["access"] | ["rewrite", "access", "header_filter", "body_filter", "log", "before_proxy"] | 执行 severless 函数的阶段。 | +| phase | string | 否 | ["access"] | ["rewrite", "access", "header_filter", "body_filter", "log", "before_proxy"] | 执行 serverless 函数的阶段。 | | functions | array[string] | 是 | | | 指定运行的函数列表。该属性可以包含一个函数,也可以是多个函数,按照先后顺序执行。 | :::info 重要 @@ -117,9 +117,9 @@ curl -i http://127.0.0.1:9080/index.html 如果你在 `./logs/error.log` 中发现 `serverless pre function` 和 `match uri /index.html` 两个 error 级别的日志,表示指定的函数已经生效。 -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/skywalking-logger.md b/docs/zh/latest/plugins/skywalking-logger.md index 0b69d614f558..9c3d282bddc8 100644 --- a/docs/zh/latest/plugins/skywalking-logger.md +++ b/docs/zh/latest/plugins/skywalking-logger.md @@ -117,9 +117,9 @@ curl -i http://127.0.0.1:9080/hello 完成上述步骤后,你可以在 SkyWalking UI 查看到相关日志。 -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/skywalking.md b/docs/zh/latest/plugins/skywalking.md index eed3afca47a2..324e108174c0 100644 --- a/docs/zh/latest/plugins/skywalking.md +++ b/docs/zh/latest/plugins/skywalking.md @@ -31,7 +31,7 @@ description: 本文将介绍 API 网关 Apache APISIX 如何通过 skywalking `skywalking` 插件用于与 [Apache SkyWalking](https://github.com/apache/skywalking) 集成。 -SkyWalking 使用其原生的 NGINX Lua tracer 从服务和 URI 角度提供了分布式追踪、拓扑分析以及 metrics 。 +SkyWalking 使用其原生的 NGINX Lua tracer 从服务和 URI 角度提供了分布式追踪、拓扑分析以及 metrics。 服务端目前支持 HTTP 和 gRPC 两种协议,在 APISIX 中目前只支持 HTTP 协议。 @@ -199,7 +199,7 @@ OK ![plugin_skywalking](../../../assets/images/plugin/skywalking-5.png) -## 禁用插件 +## 删除插件 当你需要禁用 `skywalking` 插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/sls-logger.md b/docs/zh/latest/plugins/sls-logger.md index b4d21a463726..16d520521448 100644 --- a/docs/zh/latest/plugins/sls-logger.md +++ b/docs/zh/latest/plugins/sls-logger.md @@ -121,7 +121,7 @@ hello, world * 查看阿里云日志服务上传记录 ![sls logger view](../../../assets/images/plugin/sls-logger-1.png "阿里云日志服务预览") -## 禁用插件 +## 删除插件 想要禁用“sls-logger”插件,是非常简单的,将对应的插件配置从 json 配置删除,就会立即生效,不需要重新启动服务: diff --git a/docs/zh/latest/plugins/splunk-hec-logging.md b/docs/zh/latest/plugins/splunk-hec-logging.md index 7a67f3aad762..57ea2fc37e13 100644 --- a/docs/zh/latest/plugins/splunk-hec-logging.md +++ b/docs/zh/latest/plugins/splunk-hec-logging.md @@ -157,9 +157,9 @@ hello, world ![splunk hec search view](../../../assets/images/plugin/splunk-hec-admin-cn.png) -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过如下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/syslog.md b/docs/zh/latest/plugins/syslog.md index 8e16fe69b86b..357c3038d128 100644 --- a/docs/zh/latest/plugins/syslog.md +++ b/docs/zh/latest/plugins/syslog.md @@ -92,9 +92,9 @@ HTTP/1.1 200 OK hello, world ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/tcp-logger.md b/docs/zh/latest/plugins/tcp-logger.md index 81dbc98badc8..c6a9679580a5 100644 --- a/docs/zh/latest/plugins/tcp-logger.md +++ b/docs/zh/latest/plugins/tcp-logger.md @@ -120,9 +120,9 @@ HTTP/1.1 200 OK hello, world ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/tencent-cloud-cls.md b/docs/zh/latest/plugins/tencent-cloud-cls.md index b32bff50de95..70ca669e5f9a 100644 --- a/docs/zh/latest/plugins/tencent-cloud-cls.md +++ b/docs/zh/latest/plugins/tencent-cloud-cls.md @@ -129,9 +129,9 @@ HTTP/1.1 200 OK hello, world ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/traffic-split.md b/docs/zh/latest/plugins/traffic-split.md index 97e87fd47c49..68e52baac842 100644 --- a/docs/zh/latest/plugins/traffic-split.md +++ b/docs/zh/latest/plugins/traffic-split.md @@ -168,7 +168,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \ ### 灰度发布 -灰度发布(又名金丝雀发布)是指在已经上线与未上线服务之间,能够平滑过渡的一种发布方式。 在其上可以进行 A/B 测试,即让一部分用户继续用产品特性 A,一部分用户开始用产品特性 B。如果用户对特性 B 没有什么反对意见,那么逐步扩大范围,把所有用户都迁移到特性 B 上面来。 +灰度发布(又名金丝雀发布)是指在已经上线与未上线服务之间,能够平滑过渡的一种发布方式。在其上可以进行 A/B 测试,即让一部分用户继续用产品特性 A,一部分用户开始用产品特性 B。如果用户对特性 B 没有什么反对意见,那么逐步扩大范围,把所有用户都迁移到特性 B 上面来。 以下示例展示了如何通过配置 `weighted_upstreams` 的 `weight` 属性来实现流量分流。按 3:2 的权重流量比例进行划分,其中 60% 的流量到达运行在 `1981` 端口上的上游服务,40% 的流量到达运行在 `1980` 端口上的上游服务: @@ -623,9 +623,9 @@ curl http://127.0.0.1:9080/hello -H 'x-api-id: 3' 1980 ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ diff --git a/docs/zh/latest/plugins/ua-restriction.md b/docs/zh/latest/plugins/ua-restriction.md index 3f31e092c2ae..e65a55a890fa 100644 --- a/docs/zh/latest/plugins/ua-restriction.md +++ b/docs/zh/latest/plugins/ua-restriction.md @@ -43,7 +43,7 @@ description: 本文介绍了 Apache APISIX ua-restriction 插件的使用方法 :::note -`allowlist` 和 `denylist` 可以同时启用。同时启用时,插件会根据 `User-Agent` 先检查 `allowlist`,再检查 `denylist`。 +`allowlist` 和 `denylist` 不可以同时启用。 ::: @@ -120,7 +120,7 @@ HTTP/1.1 403 Forbidden {"message":"Not allowed"} ``` -## 禁用插件 +## 删除插件 当你需要禁用 `ua-restriction` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/udp-logger.md b/docs/zh/latest/plugins/udp-logger.md index d9fd580c28c0..703b3ab536fc 100644 --- a/docs/zh/latest/plugins/udp-logger.md +++ b/docs/zh/latest/plugins/udp-logger.md @@ -118,9 +118,9 @@ HTTP/1.1 200 OK hello, world ``` -## 禁用插件 +## 删除插件 -当你需要禁用该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: +当你需要删除该插件时,可通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: ```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/zh/latest/plugins/uri-blocker.md b/docs/zh/latest/plugins/uri-blocker.md index b58a0e692ace..33c92dbf39a5 100644 --- a/docs/zh/latest/plugins/uri-blocker.md +++ b/docs/zh/latest/plugins/uri-blocker.md @@ -89,7 +89,7 @@ Server: APISIX web server ... ``` -## 禁用插件 +## 删除插件 当你需要禁用 `uri-blocker` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/wolf-rbac.md b/docs/zh/latest/plugins/wolf-rbac.md index 231ea08367f7..dad4f26d13ff 100644 --- a/docs/zh/latest/plugins/wolf-rbac.md +++ b/docs/zh/latest/plugins/wolf-rbac.md @@ -269,7 +269,7 @@ HTTP/1.1 200 OK {"message":"success to change password"} ``` -## 禁用插件 +## 删除插件 当你需要禁用 `wolf-rbac` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/workflow.md b/docs/zh/latest/plugins/workflow.md index e4c0b082c5be..0c7228a9e904 100644 --- a/docs/zh/latest/plugins/workflow.md +++ b/docs/zh/latest/plugins/workflow.md @@ -147,7 +147,7 @@ curl http://127.0.0.1:0080/hello/fake -i HTTP/1.1 200 OK ``` -## Disable Plugin +## Delete Plugin 当你需要禁用 `workflow` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/plugins/zipkin.md b/docs/zh/latest/plugins/zipkin.md index b252771c30e2..6f6e0d2b8148 100644 --- a/docs/zh/latest/plugins/zipkin.md +++ b/docs/zh/latest/plugins/zipkin.md @@ -31,7 +31,7 @@ description: 本文介绍了关于 Apache APISIX zipkin 插件的基本信息及 [Zipkin](https://github.com/openzipkin/zipkin) 是一个开源的分布调用链追踪系统。`zipkin` 插件基于 [Zipkin API 规范](https://zipkin.io/pages/instrumenting.html),支持收集跟踪信息并上报给 Zipkin Collector。 -该插件也支持 [Apache SkyWalking](https://skywalking.apache.org/docs/main/latest/en/setup/backend/zipkin-trace/#zipkin-receiver) 和 [Jaeger](https://www.jaegertracing.io/docs/1.31/getting-started/#migrating-from-zipkin),因为它们都支持了 Zipkin [v1](https://zipkin.io/zipkin-api/zipkin-api.yaml) 和 [v2](https://zipkin.io/zipkin-api/zipkin2-api.yaml) API。当然 `zipkin` 插件也可以与其他支持了 Zipkin v1 和 v2 API 格式的调用链追踪系统集成。 +该插件也支持 [Apache SkyWalking](https://skywalking.apache.org/docs/main/next/en/setup/backend/zipkin-trace/#zipkin-receiver) 和 [Jaeger](https://www.jaegertracing.io/docs/1.31/getting-started/#migrating-from-zipkin),因为它们都支持了 Zipkin [v1](https://zipkin.io/zipkin-api/zipkin-api.yaml) 和 [v2](https://zipkin.io/zipkin-api/zipkin2-api.yaml) API。当然 `zipkin` 插件也可以与其他支持了 Zipkin v1 和 v2 API 格式的调用链追踪系统集成。 ## 属性 @@ -216,7 +216,7 @@ HTTP/1.1 200 OK ![jaeger web-ui trace](../../../assets/images/plugin/jaeger-2.png) -## 禁用插件 +## 删除插件 当你需要禁用 `zipkin` 插件时,可以通过以下命令删除相应的 JSON 配置,APISIX 将会自动重新加载相关配置,无需重启服务: diff --git a/docs/zh/latest/ssl-protocol.md b/docs/zh/latest/ssl-protocol.md new file mode 100644 index 000000000000..563b2095df90 --- /dev/null +++ b/docs/zh/latest/ssl-protocol.md @@ -0,0 +1,343 @@ +--- +title: SSL 协议 +--- + + + +`APISIX` 支持 TLS 协议,还支持动态的为每一个 SNI 指定不同的 TLS 协议版本。 + +**为了安全考虑,APISIX 默认使用的加密套件不支持 TLSv1.1 以及更低的版本。** +**如果你需要启用 TLSv1.1 协议,请在 config.yaml 的配置项 apisix.ssl.ssl_ciphers 增加 TLSv1.1 协议所支持的加密套件。** + +## ssl_protocols 配置 + +### 静态配置 + +静态配置中 config.yaml 的 ssl_protocols 参数会作用于 APISIX 全局,但是不能动态修改,仅当匹配的 SSL 资源未设置 `ssl_protocols`,静态配置才会生效。 + +```yaml +apisix: + ssl: + ssl_protocols: TLSv1.2 TLSv1.3 # default TLSv1.2 TLSv1.3 +``` + +### 动态配置 + +使用 ssl 资源中 ssl_protocols 字段动态的为每一个 SNI 指定不同的 TLS 协议版本。 + +指定 test.com 域名使用 TLSv1.2 TLSv1.3 协议版本: + +```bash +{ + "cert": "$cert", + "key": "$key", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.2", + "TLSv1.3" + ] +} +``` + +### 注意事项 + +- 动态配置优先级比静态配置更高,当 ssl 资源配置项 ssl_protocols 不为空时 静态配置将会被覆盖。 +- 静态配置作用于全局需要重启 apisix 才能生效。 +- 动态配置可细粒度的控制每个 SNI 的 TLS 协议版本,并且能够动态修改,相比于静态配置更加灵活。 + +## 使用示例 + +### 如何指定 TLSv1.1 协议 + +存在一些老旧的客户端,仍然采用较低级别的 TLSv1.1 协议版本,而新的产品则使用较高安全级别的 TLS 协议版本。如果让新产品支持 TLSv1.1 可能会带来一些安全隐患。为了保证 API 的安全性,我们需要在协议版本之间进行灵活转换。 +例如:test.com 是老旧客户端所使用的域名,需要将其配置为 TLSv1.1 而 test2.com 属于新产品,同时支持了 TLSv1.2,TLSv1.3 协议。 + +1. config.yaml 配置。 + +```yaml +apisix: + ssl: + ssl_protocols: TLSv1.3 + # ssl_ciphers is for reference only + ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA +``` + +2. 为 test.com 域名指定 TLSv1.1 协议版本。 + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.1" + ] +}' +``` + +3. 为 test.com 创建 SSL 对象,未指定 TLS 协议版本,将默认使用静态配置。 + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat server2.crt)"'", + "key": "'"$(cat server2.key)"'", + "snis": ["test2.com"] +}' +``` + +4. 访问验证 + +使用 TLSv1.3 访问 test.com 失败: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` + +使用 TLSv1.1 访问 test.com 成功: + +```shell +$ curl --tls-max 1.1 --tlsv1.1 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.1 (OUT), TLS handshake, Client hello (1): +* TLSv1.1 (IN), TLS handshake, Server hello (2): +* TLSv1.1 (IN), TLS handshake, Certificate (11): +* TLSv1.1 (IN), TLS handshake, Server key exchange (12): +* TLSv1.1 (IN), TLS handshake, Server finished (14): +* TLSv1.1 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.1 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.1 (OUT), TLS handshake, Finished (20): +* TLSv1.1 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.1 / ECDHE-RSA-AES256-SHA +``` + +使用 TLSv1.3 访问 test2.com 成功: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +``` + +使用 TLSv1.1 访问 test2.com 失败: + +```shell +curl --tls-max 1.1 --tlsv1.1 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.1 (OUT), TLS handshake, Client hello (1): +* TLSv1.1 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` + +### 证书关联多个域名,但域名之间使用不同的 TLS 协议 + +有时候,我们可能会遇到这样一种情况,即一个证书关联了多个域名,但是它们需要使用不同的 TLS 协议来保证安全性。例如 test.com 域名需要使用 TlSv1.2 协议,而 test2.com 域名则需要使用 TLSv1.3 协议。在这种情况下,我们不能简单地为所有的域名创建一个 SSL 对象,而是需要为每个域名单独创建一个 SSL 对象,并指定相应的协议版本。这样,我们就可以根据不同的域名和协议版本来进行正确的 SSL 握手和加密通信。示例如下: + +1. 使用证书为 test.com 创建 ssl 对象,并指定 TLSv1.2 协议。 + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test.com"], + "ssl_protocols": [ + "TLSv1.2" + ] +}' +``` + +2. 使用与 test.com 同一证书,为 test2.com 创建 ssl 对象,并指定 TLSv1.3 协议。 + +```bash +curl http://127.0.0.1:9180/apisix/admin/ssls/2 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert" : "'"$(cat server.crt)"'", + "key": "'"$(cat server.key)"'", + "snis": ["test2.com"], + "ssl_protocols": [ + "TLSv1.3" + ] +}' +``` + +3. 访问验证 + +使用 TLSv1.2 访问 test.com 成功: + +```shell +$ curl --tls-max 1.2 --tlsv1.2 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS handshake, Server hello (2): +* TLSv1.2 (IN), TLS handshake, Certificate (11): +* TLSv1.2 (IN), TLS handshake, Server key exchange (12): +* TLSv1.2 (IN), TLS handshake, Server finished (14): +* TLSv1.2 (OUT), TLS handshake, Client key exchange (16): +* TLSv1.2 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.2 (OUT), TLS handshake, Finished (20): +* TLSv1.2 (IN), TLS handshake, Finished (20): +* SSL connection using TLSv1.2 / ECDHE-RSA-AES128-GCM-SHA256 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test.com +* start date: Jul 20 15:50:08 2023 GMT +* expire date: Jul 17 15:50:08 2033 GMT +* issuer: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test.com +* SSL certificate verify result: EE certificate key too weak (66), continuing anyway. +* Using HTTP2, server supports multi-use +* Connection state changed (HTTP/2 confirmed) +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* Using Stream ID: 1 (easy handle 0x5608905ee2e0) +> HEAD / HTTP/2 +> Host: test.com:9443 +> user-agent: curl/7.74.0 +> accept: */* + +``` + +使用 TLSv1.3 协议访问 test.com 失败: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version + +``` + +使用 TLSv1.3 协议访问 test2.com 成功: + +```shell +$ curl --tls-max 1.3 --tlsv1.3 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.3 (OUT), TLS handshake, Client hello (1): +* TLSv1.3 (IN), TLS handshake, Server hello (2): +* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8): +* TLSv1.3 (IN), TLS handshake, Certificate (11): +* TLSv1.3 (IN), TLS handshake, CERT verify (15): +* TLSv1.3 (IN), TLS handshake, Finished (20): +* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1): +* TLSv1.3 (OUT), TLS handshake, Finished (20): +* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384 +* ALPN, server accepted to use h2 +* Server certificate: +* subject: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test2.com +* start date: Jul 20 16:05:47 2023 GMT +* expire date: Jul 17 16:05:47 2033 GMT +* issuer: C=AU; ST=Some-State; O=Internet Widgits Pty Ltd; CN=test2.com +* SSL certificate verify result: EE certificate key too weak (66), continuing anyway. +* Using HTTP2, server supports multi-use +* Connection state changed (HTTP/2 confirmed) +* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0 +* Using Stream ID: 1 (easy handle 0x55569cbe42e0) +> HEAD / HTTP/2 +> Host: test2.com:9443 +> user-agent: curl/7.74.0 +> accept: */* +> +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* old SSL session ID is stale, removing +``` + +使用 TLSv1.2 协议访问 test2.com 失败: + +```shell +$ curl --tls-max 1.2 --tlsv1.2 https://test2.com:9443 -v -k -I +* Trying 127.0.0.1:9443... +* Connected to test2.com (127.0.0.1) port 9443 (#0) +* ALPN, offering h2 +* ALPN, offering http/1.1 +* successfully set certificate verify locations: +* CAfile: /etc/ssl/certs/ca-certificates.crt +* CApath: /etc/ssl/certs +* TLSv1.2 (OUT), TLS handshake, Client hello (1): +* TLSv1.2 (IN), TLS alert, protocol version (582): +* error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +* Closing connection 0 +curl: (35) error:1409442E:SSL routines:ssl3_read_bytes:tlsv1 alert protocol version +``` diff --git a/docs/zh/latest/stream-proxy.md b/docs/zh/latest/stream-proxy.md index e2c1de110f43..22a17be6cdb3 100644 --- a/docs/zh/latest/stream-proxy.md +++ b/docs/zh/latest/stream-proxy.md @@ -27,7 +27,7 @@ APISIX 可以对 TCP/UDP 协议进行代理并实现动态负载均衡。在 ngi ## 如何开启 Stream 代理 -在 `conf/config.yaml` 配置文件设置 `stream_proxy` 选项,指定一组需要进行动态代理的 IP 地址。默认情况不开启 stream 代理。 +要启用该选项,请将 `apisix.proxy_mode` 设置为 `stream` 或 `http&stream`,具体取决于您是只需要流代理还是需要 http 和流。然后在 conf/config.yaml 中添加 apisix.stream_proxy 选项并指定 APISIX 应充当流代理并侦听传入请求的地址列表。 ```yaml apisix: @@ -40,19 +40,6 @@ apisix: - "127.0.0.1:9211" ``` -如果 `apisix.enable_admin` 为 true,上面的配置会同时启用 HTTP 和 stream 代理。 - -如果你设置 `enable_admin` 为 false,且需要同时启用 HTTP 和 stream 代理,设置 `only` 为 false: - -```yaml -apisix: - enable_admin: false - stream_proxy: # TCP/UDP proxy - only: false - tcp: # TCP proxy address list - - 9100 -``` - ## 如何设置 route 简例如下: diff --git a/docs/zh/latest/support-fips-in-apisix.md b/docs/zh/latest/support-fips-in-apisix.md new file mode 100644 index 000000000000..92facc21f3cc --- /dev/null +++ b/docs/zh/latest/support-fips-in-apisix.md @@ -0,0 +1,60 @@ +--- +id: support-fips-in-apisix +title: 通过 OpenSSL 3.0 使 APISIX 支持 FIPS 模式 +keywords: + - API 网关 + - Apache APISIX + - 贡献代码 + - 构建 APISIX + - OpenSSL 3.0 FIPS +description: 本文将介绍如何在 Apache APISIX 中使用 OpenSSL 3.0 来编译 apisix-base,即可启用 FIPS 模式。 +--- + + + +目前,OpenSSL 3.0 [支持了](https://www.openssl.org/blog/blog/2022/08/24/FIPS-validation-certificate-issued/) [FIPS](https://en.wikipedia.org/wiki/FIPS_140-2) 模式。为了在 APISIX 中支持 FIPS 模式,你应该使用 OpenSSL 3.0 来编译 apisix-base。 + +## 编译 + +如果你需要使用 OpenSSL 3.0 来编译 apisix-base,请以 root 用户角色来执行以下命令: + +```bash +cd $(mktemp -d) +OPENSSL3_PREFIX=${OPENSSL3_PREFIX-/usr/local} +apt install -y build-essential +git clone https://github.com/openssl/openssl +cd openssl +./Configure --prefix=$OPENSSL3_PREFIX/openssl-3.0 enable-fips +make install +echo $OPENSSL3_PREFIX/openssl-3.0/lib64 > /etc/ld.so.conf.d/openssl3.conf +ldconfig +$OPENSSL3_PREFIX/openssl-3.0/bin/openssl fipsinstall -out $OPENSSL3_PREFIX/openssl-3.0/ssl/fipsmodule.cnf -module $OPENSSL3_PREFIX/openssl-3.0/lib64/ossl-modules/fips.so +sed -i 's@# .include fipsmodule.cnf@.include '"$OPENSSL3_PREFIX"'/openssl-3.0/ssl/fipsmodule.cnf@g; s/# \(fips = fips_sect\)/\1\nbase = base_sect\n\n[base_sect]\nactivate=1\n/g' $OPENSSL3_PREFIX/openssl-3.0/ssl/openssl.cnf +cd .. + +export cc_opt="-I$OPENSSL3_PREFIX/openssl-3.0/include" +export ld_opt="-L$OPENSSL3_PREFIX/openssl-3.0/lib64 -Wl,-rpath,$OPENSSL3_PREFIX/openssl-3.0/lib64" + +wget https://raw.githubusercontent.com/api7/apisix-build-tools/master/build-apisix-base.sh +chmod +x build-apisix-base.sh +./build-apisix-base.sh latest +``` + +apisix-base 将安装在 `/usr/local/openresty-debug`。 diff --git a/docs/zh/latest/terminology/consumer-group.md b/docs/zh/latest/terminology/consumer-group.md index d9390949b579..94f870503c49 100644 --- a/docs/zh/latest/terminology/consumer-group.md +++ b/docs/zh/latest/terminology/consumer-group.md @@ -34,6 +34,8 @@ description: 本文介绍了 Apache APISIX Consumer Group 对象的概念及使 以下示例展示了如何创建消费者组并将其绑定到消费者中。 +创建一个共享相同限流配额的消费者组: + ```shell curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' @@ -43,12 +45,14 @@ curl http://127.0.0.1:9180/apisix/admin/consumer_groups/company_a \ "count": 200, "time_window": 60, "rejected_code": 503, - "group": "$consumer_group_id" + "group": "grp_company_a" } } }' ``` +在消费者组中创建消费者: + ```shell curl http://127.0.0.1:9180/apisix/admin/consumers \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' diff --git a/docs/zh/latest/terminology/consumer.md b/docs/zh/latest/terminology/consumer.md index cb869a221bdf..7892b1b87bdf 100644 --- a/docs/zh/latest/terminology/consumer.md +++ b/docs/zh/latest/terminology/consumer.md @@ -56,6 +56,10 @@ Consumer 是某类服务的消费者,需要与用户认证配合才可以使 2. 获取 consumer_name:通过授权认证,即可自然获取到对应的 Consumer name,它是 Consumer 对象的唯一识别标识; 3. 获取 Consumer 上绑定的 Plugin 或 Upstream 信息:完成对不同 Consumer 做不同配置的效果。 +当有不同的使用者请求相同的 API,并且需要根据使用者执行不同的插件和上游配置时,使用 Consumer 是非常合适的。需要与用户身份验证系统结合使用。 + +目前,可以与 Consumer 配置的身份验证插件包括 `basic-auth` 、`hmac-auth`、`jwt-auth`、`key-auth`、`ldap-auth` 和 `wolf-rbac`。 + 你可以参考 [key-auth](../plugins/key-auth.md) 认证授权插件的调用逻辑,进一步理解 Consumer 概念和使用。 :::note 注意 diff --git a/docs/zh/latest/terminology/plugin.md b/docs/zh/latest/terminology/plugin.md index 2301add9465a..bf76760f4529 100644 --- a/docs/zh/latest/terminology/plugin.md +++ b/docs/zh/latest/terminology/plugin.md @@ -29,63 +29,83 @@ description: 本文介绍了 APISIX Plugin 对象的相关信息及其使用方 ## 描述 -Plugin 表示将在 HTTP 请求/响应生命周期期间执行的插件配置。Plugin 的配置信息可以直接绑定在 [Route](./route.md) 上,也可以被绑定在 [Service](./service.md)、[Consumer](./consumer.md) 或 [Plugin Config](./plugin-config.md) 上。 +APISIX 插件可以扩展 APISIX 的功能,以满足组织或用户特定的流量管理、可观测性、安全、请求/响应转换、无服务器计算等需求。 -你也可以参考 [Admin API](../admin-api.md#plugin) 了解如何使用该资源。 +APISIX 提供了许多现有的插件,可以定制和编排以满足你的需求。这些插件可以全局启用,以在每个传入请求上触发,也可以局部绑定到其他对象,例如在 [Route](./route.md)、[Service](./service.md)、[Consumer](./consumer.md) 或 [Plugin Config](./plugin-config.md) 上。你可以参考 [Admin API](../admin-api.md#plugin) 了解如何使用该资源。 -:::note 注意 +如果现有的 APISIX 插件不满足需求,你还可以使用 Lua 或其他语言(如 Java、Python、Go 和 Wasm)编写自定义插件。 -对于同一个插件的配置,只能有一个是有效的,其插件配置优先级为 Consumer > Route > Plugin Config > Service。 +## 插件安装 -::: +APISIX 附带一个`config-default.yaml`的默认配置文件和一个 `config.yaml` 的用户自定义配置文件。这些文件位于`conf`目录中。如果两个文件中都存在相同的键 (例如`plugins`),则`config.yaml`文件中该键的配置值将覆盖`config-default.yaml`文件中的配置值。 -## 配置简介 +例如: -如果你想在现有插件的基础上新增插件,请复制 `./conf/config-default.yaml` 中的 `plugins` 参数下的插件列表到 `./conf/config.yaml` 的 `plugins` 参数中。 +```yaml +plugins: + - real-ip # 安装 + - ai + - client-control + - proxy-control + - request-id + - zipkin + # - skywalking # 未安装 +... +``` -:::tip 提示 +## 插件执行生命周期 -在 `./conf/config.yaml` 中的 `plugins` 参数中,可以声明本地 APISIX 节点支持了哪些插件。这是个白名单机制,不在该白名单的插件配置将被自动忽略。该特性可用于临时关闭或打开特定插件,应对突发情况非常有效。 +安装的插件首先会被初始化。然后会检查插件的配置,以确保插件配置遵循定义的[JSON Schema](https://json-schema.org)。 -::: +当一个请求通过 APISIX 时,插件的相应方法会在以下一个或多个阶段中执行: `rewrite`, `access`, `before_proxy`, `header_filter`, `body_filter`, and `log`。这些阶段在很大程度上受到[OpenResty 指令](https://openresty-reference.readthedocs.io/en/latest/Directives/)的影响。 -一个插件在一次请求中只会执行一次,即使被同时绑定到多个不同对象中(比如 Route 或 Service)。插件运行先后顺序是根据插件自身的优先级来决定的,例如: +
+
+Routes Diagram +
+
-```lua -local _M = { - version = 0.1, - priority = 0, -- 这个插件的优先级为 0 - name = plugin_name, - schema = schema, - metadata_schema = metadata_schema, -} -``` +## 插件执行顺序 + +通常情况下,插件按照以下顺序执行: -插件的配置信息,可以存放 Route、Service、Plugin Config 等对象中的 `plugins` 参数下。如下所示的配置中,包含 `limit-count` 和 `prometheus` 两个插件的配置信息: +1. [全局规则](./global-rule.md) 插件 + 1. rewrite 阶段的插件 + 2. access 阶段的插件 + +2. 绑定到其他对象的插件 + 1. rewrite 阶段的插件 + 2. access 阶段的插件 + +在每个阶段内,你可以在插件的 `_meta.priority` 字段中可选地定义一个新的优先级数,该优先级数优先于默认插件优先级在执行期间。具有更高优先级数的插件首先执行。 + +例如,如果你想在请求到达路由时,让 `limit-count`(优先级 1002)先于 `ip-restriction`(优先级 3000)运行,可以通过将更高的优先级数传递给 `limit-count` 的 `_meta.priority` 字段来实现: ```json { - "plugins": { - "limit-count": { - "count": 2, - "time_window": 60, - "rejected_code": 503, - "key": "remote_addr" - }, - "prometheus": {} + ..., + "plugins": { + "limit-count": { + ..., + "_meta": { + "priority": 3010 + } } + } } ``` -并不是所有插件都有具体配置项,比如 [prometheus](../plugins/prometheus.md) 下是没有任何具体配置项,此时可以使用一个空对象启用该插件。 +若要将此插件实例的优先级重置为默认值,只需从插件配置中删除`_meta.priority`字段即可。 -如果一个请求因为某个插件而被拒绝,会有类似如下 `warn` 级别的日志: +## 插件合并优先顺序 -```shell +当同一个插件在全局规则中和局部规则(例如路由)中同时配置时,两个插件将顺序执行。 -ip-restriction exits with http status code 403 +然而,如果相同的插件在多个对象上本地配置,例如在[`Route`](route.md), [`Service`](service.md), [`Consumer`](consumer.md) 或[`Plugin Config`](plugin-config.md) 上,每个非全局插件只会执行一次,因为在执行期间,针对特定的优先顺序,这些对象中配置的插件会被合并: -``` +`Consumer` > `Consumer Group` > `Route` > `Plugin Config` > `Service` + +因此,如果相同的插件在不同的对象中具有不同的配置,则合并期间具有最高优先顺序的插件配置将被使用。 ## 通用配置 @@ -284,6 +304,6 @@ curl http://127.0.0.1:9180/apisix/admin/plugins/reload -H 'X-API-KEY: edd1c9f034 ::: -## stand-alone 模式下的热加载 +## Standalone 模式下的热加载 -关于 Stand Alone 模式下的热加载的信息,请参考 [stand alone 模式](../stand-alone.md)。 +关于 Stand Alone 模式下的热加载的信息,请参考 [stand alone 模式](../../../en/latest/deployment-modes.md#standalone)。 diff --git a/docs/zh/latest/terminology/router.md b/docs/zh/latest/terminology/router.md index 43cf809c0ce9..f9d66151bbf6 100644 --- a/docs/zh/latest/terminology/router.md +++ b/docs/zh/latest/terminology/router.md @@ -48,10 +48,9 @@ Router 具有以下配置: ::: 注意 -在3.2及之前版本,APISIX 使用 `radixtree_uri` 作为默认路由,`radixtree_uri` 比 `radixtree_host_uri` 拥有更好的性能,如果你对性能有更高的要求,并且能够接受 `radixtree_uri` 只使用 `uri` 作为主索引的特点,可以考虑继续使用 `radixtree_uri` 作为默认路由 +在 3.2 及之前版本,APISIX 使用 `radixtree_uri` 作为默认路由,`radixtree_uri` 比 `radixtree_host_uri` 拥有更好的性能,如果你对性能有更高的要求,并且能够接受 `radixtree_uri` 只使用 `uri` 作为主索引的特点,可以考虑继续使用 `radixtree_uri` 作为默认路由 ::: - - `apisix.router.ssl`:SSL 加载匹配路由。 - `radixtree_sni`:(默认)使用 `SNI` (Server Name Indication) 作为主索引(基于 radixtree 引擎)。 diff --git a/docs/zh/latest/terminology/secret.md b/docs/zh/latest/terminology/secret.md index 95f84067ef4b..24a995f15f37 100644 --- a/docs/zh/latest/terminology/secret.md +++ b/docs/zh/latest/terminology/secret.md @@ -42,7 +42,7 @@ APISIX 目前支持通过以下方式存储密钥: 你可以在以下插件的 consumer 配置中通过指定格式的变量来使用 APISIX Secret 功能,比如 `key-auth` 插件。 -::: note +:::note 如果某个配置项为:`key: "$ENV://ABC"`,当 APISIX Secret 中没有检索到 $ENV://ABC 对应的真实值,那么 key 的值将是 "$ENV://ABC" 而不是 `nil`。 @@ -125,9 +125,9 @@ $secret://$manager/$id/$secret_name/$key ``` - manager: 密钥管理服务,可以是 Vault、AWS 等 -- APISIX Secret 资源 ID, 需要与添加 APISIX Secret 资源时指定的 ID 保持一致 +- APISIX Secret 资源 ID,需要与添加 APISIX Secret 资源时指定的 ID 保持一致 - secret_name: 密钥管理服务中的密钥名称 -- key: 密钥管理服务中密钥对应的 key +- key:密钥管理服务中密钥对应的 key ### 示例:在 key-auth 插件中使用 diff --git a/docs/zh/latest/terminology/upstream.md b/docs/zh/latest/terminology/upstream.md index 65ef3b5e6135..dca9e56c25c9 100644 --- a/docs/zh/latest/terminology/upstream.md +++ b/docs/zh/latest/terminology/upstream.md @@ -75,7 +75,7 @@ APISIX 的 Upstream 对象除了基本的负载均衡算法外,还支持对上 以下示例是将上游信息直接配置在路由中: - ```shell +```shell curl http://127.0.0.1:9180/apisix/admin/routes/1 \ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { @@ -95,7 +95,7 @@ APISIX 的 Upstream 对象除了基本的负载均衡算法外,还支持对上 } } }' - ``` +``` ## 使用示例 diff --git a/docs/zh/latest/tutorials/client-to-apisix-mtls.md b/docs/zh/latest/tutorials/client-to-apisix-mtls.md index fdf90a947e76..0999ab65a3db 100644 --- a/docs/zh/latest/tutorials/client-to-apisix-mtls.md +++ b/docs/zh/latest/tutorials/client-to-apisix-mtls.md @@ -80,7 +80,7 @@ curl -X PUT 'http://127.0.0.1:9180/apisix/admin/ssls/1' \ --header 'Content-Type: application/json' \ --data-raw '{ "sni": "test.com", - "cert": "<服务器证书公钥>", + "cert": "<服务器证书>", "key": "<服务器证书私钥>", "client": { "ca": "<客户端证书公钥>" @@ -89,9 +89,9 @@ curl -X PUT 'http://127.0.0.1:9180/apisix/admin/ssls/1' \ ``` - `sni`:指定证书的域名(CN),当客户端尝试通过 TLS 与 APISIX 握手时,APISIX 会将 `ClientHello` 中的 SNI 数据与该字段进行匹配,找到对应的服务器证书进行握手。 -- `cert`:服务器证书的公钥。 +- `cert`:服务器证书。 - `key`:服务器证书的私钥。 -- `client.ca`:客户端证书的公钥。为了演示方便,这里使用了同一个 `CA`。 +- `client.ca`:用来验证客户端证书的 CA 文件。为了演示方便,这里使用了同一个 `CA`。 ### 配置测试路由 @@ -193,6 +193,126 @@ curl --resolve "test.com:9443:127.0.0.1" https://test.com:9443/anything -k --cer 由于我们在示例中配置了 `proxy-rewrite` 插件,我们可以看到响应体中包含上游收到的请求体,包含了正确数据。 +## 基于对 URI 正则表达式匹配,绕过 MTLS + +APISIX 允许配置 URI 白名单以便绕过 MTLS。如果请求的 URI 在白名单内,客户端证书将不被检查。注意,如果针对白名单外的 URI 发请求,而该请求缺乏客户端证书或者提供了非法客户端证书,会得到 HTTP 400 响应,而不是在 SSL 握手阶段被拒绝。 + +### 时序图 + +![skip mtls](../../../assets/images/skip-mtls.png) + +### 例子 + +1. 配置路由和证书 + +```bash +curl http://127.0.0.1:9180/apisix/admin/routes/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/*", + "upstream": { + "nodes": { + "httpbin.org": 1 + } + } +}' + +curl http://127.0.0.1:9180/apisix/admin/ssls/1 \ +-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "cert": "'"$( GET /uuid HTTP/2 +> Host: admin.apisix.dev:9443 +> user-agent: curl/7.68.0 +> accept: */* +> +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4): +* old SSL session ID is stale, removing +* Connection state changed (MAX_CONCURRENT_STREAMS == 128)! +< HTTP/2 400 +< date: Fri, 21 Apr 2023 07:53:23 GMT +< content-type: text/html; charset=utf-8 +< content-length: 229 +< server: APISIX/3.2.0 +< + +400 Bad Request + +

400 Bad Request

+
openresty
+

Powered by APISIX.

+ +* Connection #0 to host admin.apisix.dev left intact + + +3. 虽然没提供客户端证书,但是 URI 在白名单内,请求会被成功处理和响应。 + +```bash +curl https://admin.apisix.dev:9443/anything/foobar -i \ +--resolve 'admin.apisix.dev:9443:127.0.0.1' --cacert t/certs/mtls_ca.crt +HTTP/2 200 +content-type: application/json +content-length: 416 +date: Fri, 21 Apr 2023 07:58:28 GMT +access-control-allow-origin: * +access-control-allow-credentials: true +server: APISIX/3.2.0 +... +``` + ## 总结 想了解更多有关 Apache APISIX 的 mTLS 功能介绍,可以阅读:[TLS 双向认证](../mtls.md)。 diff --git a/docs/zh/latest/tutorials/expose-api.md b/docs/zh/latest/tutorials/expose-api.md index c2886273ae5a..9561717e6e44 100644 --- a/docs/zh/latest/tutorials/expose-api.md +++ b/docs/zh/latest/tutorials/expose-api.md @@ -37,13 +37,13 @@ description: 本文介绍了如何通过 Apache APISIX 发布服务和路由。 [Upstream](../terminology/upstream.md) 也称为上游,上游是对虚拟主机的抽象,即应用层服务或节点的抽象。 -上游的作用是按照配置规则对服务节点进行负载均衡,它的地址信息可以直接配置到路由或服务上。当多个路由或服务引用同一个上游时,可以通过创建上游对象,在路由或服务中使用上游的 ID 方式引用上游,减轻维护压力。 +上游的作用是按照配置规则对服务节点进行负载均衡,它的地址信息可以直接配置到路由或服务上。当多个路由或服务引用同一个上游时,可以通过创建上游对象,在路由或服务中使用上游 ID 的方式引用上游,减轻维护压力。 ### 路由 [Route](../terminology/route.md) 也称为路由,是 APISIX 中最基础和最核心的资源对象。 -APISIX 可以通过路由定义规则来匹配客户端请求,根据匹配结果加载并执行相应的[插件](./terminology/plugin.md),最后把请求转发给到指定的上游服务。路由中主要包含三部分内容:匹配规则、插件配置和上游信息。 +APISIX 可以通过路由定义规则来匹配客户端请求,根据匹配结果加载并执行相应的[插件](../terminology/plugin.md),最后把请求转发给到指定的上游服务。路由中主要包含三部分内容:匹配规则、插件配置和上游信息。 ### 服务 @@ -114,13 +114,13 @@ curl "http://127.0.0.1:9180/apisix/admin/routes/1" \ 在创建完成路由后,你可以通过以下命令测试路由是否正常: ``` -curl -i -X GET "http://127.0.0.1:9080/get?foo1=bar1&foo2=bar2" -H "Host: example.com" +curl -i -X GET "http://127.0.0.1:9080/anything/get?foo1=bar1&foo2=bar2" -H "Host: example.com" ``` -该请求将被 APISIX 转发到 `http://httpbin.org:80/anything/foo?arg=10`。 +该请求将被 APISIX 转发到 `http://httpbin.org:80/anything/get?foo1=bar1&foo2=bar2`。 ## 更多教程 你可以查看[保护 API](./protect-api.md) 来保护你的 API。 -接下来,你可以通过 APISIX 的一些[插件](./plugins/batch-requests.md),实现更多功能。 +接下来,你可以通过 APISIX 的一些[插件](../plugins/batch-requests.md),实现更多功能。 diff --git a/docs/zh/latest/tutorials/observe-your-api.md b/docs/zh/latest/tutorials/observe-your-api.md index d65aca50cd58..489f3b8b2c56 100644 --- a/docs/zh/latest/tutorials/observe-your-api.md +++ b/docs/zh/latest/tutorials/observe-your-api.md @@ -54,7 +54,7 @@ APISIX 中提供了很多具有丰富功能的可观测性插件。你可以通 - [error-logger](../plugins/error-log-logger.md) - [google-cloud-logging](../plugins/google-cloud-logging.md) -你可以在 APISIX [插件中心](../plugins/http-logger.md) 查看 APISIX 支持的所有日志插件。接下来我们将使用 `http-logger` 插件为你演示如何将 APISIX 的日志数据发送到 HTPP/HTTPS 服务器中。 +你可以在 APISIX [插件中心](../plugins/http-logger.md) 查看 APISIX 支持的所有日志插件。接下来我们将使用 `http-logger` 插件为你演示如何将 APISIX 的日志数据发送到 HTTP/HTTPS 服务器中。 :::note 注意 @@ -172,7 +172,7 @@ apisix_etcd_modify_indexes{key="routes"} 20028 **链路追踪**就是将一次请求还原成调用链路,并将该请求的调用情况使用拓扑的方式展现,比如展示各个微服务节点上的耗时,请求具体经过了哪些服务器以及每个服务节点的请求状态等内容。 -[Zipkin](https://zipkin.io/) 一个开源的分布式追踪系统。 APISIX 的 [zipkin 插件](../plugins/zipkin.md) 支持根据 [Zipkin API 规范](https://zipkin.io/pages/instrumenting.html) 收集链路信息并报告给 Zipkin Collector。 +[Zipkin](https://zipkin.io/) 一个开源的分布式追踪系统。APISIX 的[zipkin 插件](../plugins/zipkin.md) 支持根据 [Zipkin API 规范](https://zipkin.io/pages/instrumenting.html) 收集链路信息并报告给 Zipkin Collector。 :::tip 提示 @@ -219,7 +219,7 @@ curl -i http://127.0.0.1:9080/get "X-B3-Traceid": "e18985df47dab632d62083fd96626692", ``` -你可以通过访问 `http://127.0.0.1:9411/zipkin`,在 Zinkin 的 Web UI 上看到请求链路。 +你可以通过访问 `http://127.0.0.1:9411/zipkin`,在 Zipkin 的 Web UI 上看到请求链路。 ![Zipkin plugin output 1](https://static.apiseven.com/2022/09/14/6321dc27f3d33.png) diff --git a/docs/zh/latest/tutorials/protect-api.md b/docs/zh/latest/tutorials/protect-api.md index bdcc8a056c95..56b2a675e3de 100644 --- a/docs/zh/latest/tutorials/protect-api.md +++ b/docs/zh/latest/tutorials/protect-api.md @@ -35,11 +35,11 @@ description: 本文介绍了如何通过 Apache APISIX 发布服务和路由。 ### 插件 -[Plugin](./terminology/plugin.md) 也称之为插件,它是扩展 APISIX 应用层能力的关键机制,也是在使用 APISIX 时最常用的资源对象。插件主要是在 HTTP 请求或响应生命周期期间执行的、针对请求的个性化策略。插件可以与路由、服务或消费者绑定。 +[Plugin](../terminology/plugin.md) 也称之为插件,它是扩展 APISIX 应用层能力的关键机制,也是在使用 APISIX 时最常用的资源对象。插件主要是在 HTTP 请求或响应生命周期期间执行的、针对请求的个性化策略。插件可以与路由、服务或消费者绑定。 :::note 注意 -如果 [路由](./terminology/route.md)、[服务](./terminology/service.md)、[插件配置](./terminology/plugin-config.md) 或消费者都绑定了相同的插件,则只有一份插件配置会生效,插件配置的优先级由高到低顺序是:消费者 > 路由 > 插件配置 > 服务。同时在插件执行过程中也会涉及 6 个阶段,分别是 `rewrite`、`access`、`before_proxy`、`header_filter`、`body_filter` 和 `log`。 +如果 [路由](../terminology/route.md)、[服务](../terminology/service.md)、[插件配置](../terminology/plugin-config.md) 或消费者都绑定了相同的插件,则只有一份插件配置会生效,插件配置的优先级由高到低顺序是:消费者 > 路由 > 插件配置 > 服务。同时在插件执行过程中也会涉及 6 个阶段,分别是 `rewrite`、`access`、`before_proxy`、`header_filter`、`body_filter` 和 `log`。 ::: @@ -57,7 +57,7 @@ description: 本文介绍了如何通过 Apache APISIX 发布服务和路由。 4. 拒绝客户端请求; 5. 限制响应数据的速率。 -为了实现上述功能,APISIX 提供了多个限流限速的插件,包括 [limit-conn](./plugins/limit-conn.md)、[limit-count](./plugins/limit-count.md) 和 [limit-req](./plugins/limit-req.md)。 +为了实现上述功能,APISIX 提供了多个限流限速的插件,包括 [limit-conn](../plugins/limit-conn.md)、[limit-count](../plugins/limit-count.md) 和 [limit-req](../plugins/limit-req.md)。 - `limit-conn` 插件主要用于限制客户端对服务的并发请求数。 - `limit-req` 插件使用漏桶算法限制对用户服务的请求速率。 @@ -114,14 +114,14 @@ curl http://127.0.0.1:9080/index.html APISIX 除了提供限流限速的插件外,还提供了很多其他的关于 **traffic** 插件来满足实际场景的需求: -- [proxy-cache](./plugins/proxy-cache.md):该插件提供缓存后端响应数据的能力,它可以和其他插件一起使用。该插件支持基于磁盘和内存的缓存。 -- [request-validation](./plugins/request-validation.md):该插件用于提前验证向上游服务转发的请求。 -- [proxy-mirror](./plugins/proxy-mirror.md):该插件提供了镜像客户端请求的能力。流量镜像是将线上真实流量拷贝到镜像服务中,以便在不影响线上服务的情况下,对线上流量或请求内容进行具体的分析。 -- [api-breaker](./plugins/api-breaker.md):该插件实现了 API 熔断功能,从而帮助我们保护上游业务服务。 -- [traffic-split](./plugins/traffic-split.md):该插件使用户可以逐步引导各个上游之间的流量百分比。,你可以使用该插件实现蓝绿发布,灰度发布。 -- [request-id](./plugins/request-id.md):该插件通过 APISIX 为每一个请求代理添加 `unique` ID 用于追踪 API 请求。 -- [proxy-control](./plugins/proxy-control.md):该插件能够动态地控制 NGINX 代理的相关行为。 -- [client-control](./plugins/client-control.md):该插件能够通过设置客户端请求体大小的上限来动态地控制 NGINX 处理客户端的请求。 +- [proxy-cache](../plugins/proxy-cache.md):该插件提供缓存后端响应数据的能力,它可以和其他插件一起使用。该插件支持基于磁盘和内存的缓存。 +- [request-validation](../plugins/request-validation.md):该插件用于提前验证向上游服务转发的请求。 +- [proxy-mirror](../plugins/proxy-mirror.md):该插件提供了镜像客户端请求的能力。流量镜像是将线上真实流量拷贝到镜像服务中,以便在不影响线上服务的情况下,对线上流量或请求内容进行具体的分析。 +- [api-breaker](../plugins/api-breaker.md):该插件实现了 API 熔断功能,从而帮助我们保护上游业务服务。 +- [traffic-split](../plugins/traffic-split.md):该插件使用户可以逐步引导各个上游之间的流量百分比。,你可以使用该插件实现蓝绿发布,灰度发布。 +- [request-id](../plugins/request-id.md):该插件通过 APISIX 为每一个请求代理添加 `unique` ID 用于追踪 API 请求。 +- [proxy-control](../plugins/proxy-control.md):该插件能够动态地控制 NGINX 代理的相关行为。 +- [client-control](../plugins/client-control.md):该插件能够通过设置客户端请求体大小的上限来动态地控制 NGINX 处理客户端的请求。 ## 更多操作 diff --git a/powered-by.md b/powered-by.md index 6b77a6e57704..082efe2c1b94 100644 --- a/powered-by.md +++ b/powered-by.md @@ -26,7 +26,7 @@ or providing commercial products including APISIX. Users are encouraged to add themselves to this page, [issue](https://github.com/apache/apisix/issues/487) and PR are welcomed. -1. aimiaobi 妙笔 AI +1. aimiaobi 妙笔 AI 1. AUGUR 奥格科技股份有限公司 1. AISPEECH 思必驰信息科技股份有限公司 1. cunw 湖南新云网 @@ -45,10 +45,10 @@ Users are encouraged to add themselves to this page, [issue](https://github.com/ 1. eZone 简单一点科技 1. fansup 1. Tencent Game 腾讯游戏 -1. haieruplus 海尔优家 +1. haieruplus 海尔优家 1. hellowin 好洛维 1. HelloTalk, Inc. -1. 航天网信 +1. 航天网信 1. Huawei 华为 1. 虎牙 1. 好医生集团 @@ -65,12 +65,12 @@ Users are encouraged to add themselves to this page, [issue](https://github.com/ 1. NASA JPL 美国国家航空航天局 喷气推进实验室 1. Purcotton 深圳全棉时代科技有限公司 1. 360 奇虎 -1. sinog2c 湖南国科云通 +1. sinog2c 湖南国科云通 1. sinovatech 炎黄新星 1. Taikanglife 泰康云 1. tangdou 糖豆网 1. Tencent Cloud 腾讯云 -1. Travelsky 中国航信 +1. Travelsky 中国航信 1. vbill 随行付 1. VIVO 1. 万思 @@ -84,11 +84,11 @@ Users are encouraged to add themselves to this page, [issue](https://github.com/ 1. 紫豪网络 1. zuzuche 租租车 1. zybang 作业帮 -1. 中食安泓(广东)健康产业有限公司 +1. 中食安泓(广东)健康产业有限公司 1. 上海泽怡信息科技 1. 北京新片场传媒股份有限公司 1. 武汉精臣智慧标识科技有限公司 -1. 北京大学信息技术高等研究院 +1. 北京大学信息技术高等研究院 1. HONOR 荣耀 1. 群之脉信息科技 1. 大房鸭 @@ -97,7 +97,7 @@ Users are encouraged to add themselves to this page, [issue](https://github.com/ 1. 数地科技 1. 微吼 1. 小鹏汽车 -1. Ideacreep +1. Ideacreep @@ -105,7 +105,7 @@ Users are encouraged to add themselves to this page, [issue](https://github.com/ ## NASA JPL -Using Apache APISIX as a API gateway to deal with north-south and east-west traffic between microservices. +Using Apache APISIX as an API gateway to deal with north-south and east-west traffic between microservices. ## ke.com diff --git a/rockspec/apisix-3.2.1-0.rockspec b/rockspec/apisix-3.2.1-0.rockspec new file mode 100644 index 000000000000..746105bfbce3 --- /dev/null +++ b/rockspec/apisix-3.2.1-0.rockspec @@ -0,0 +1,103 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "master-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "3.2.1", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "api7-lua-resty-dns-client = 7.0.1", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.10.4", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 2.2.2", + "api7-lua-resty-jwt = 0.2.4", + "lua-resty-hmac-ffi = 0.05", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 3.10", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.2", + "lua-protobuf = 0.4.1", + "lua-resty-openidc = 1.7.5", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20220527", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.20-0", + "lua-resty-logger-socket = 2.0.1-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.02-4", + "lua-resty-expr = 1.3.2", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.1.0-1", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.9.2-1", + "ext-plugin-proto = 0.6.0", + "casbin = 1.41.5", + "api7-snowflake = 2.0-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.2-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", + "nanoid = 0.1-1", + "lua-resty-mediador = 0.1.2-1", + "lua-resty-ldap = 0.1.0-0" +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-3.3.0-0.rockspec b/rockspec/apisix-3.3.0-0.rockspec new file mode 100644 index 000000000000..7dbb009decfa --- /dev/null +++ b/rockspec/apisix-3.3.0-0.rockspec @@ -0,0 +1,103 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "3.3.0-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "3.3.0", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "api7-lua-resty-dns-client = 7.0.1", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.10.4", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 2.2.3", + "api7-lua-resty-jwt = 0.2.4", + "lua-resty-hmac-ffi = 0.05", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 3.10", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.2", + "lua-protobuf = 0.4.1", + "lua-resty-openidc = 1.7.5", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20220527", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.20-0", + "lua-resty-logger-socket = 2.0.1-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.02-4", + "lua-resty-expr = 1.3.2", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.1.0-1", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.9.2-1", + "ext-plugin-proto = 0.6.0", + "casbin = 1.41.5", + "api7-snowflake = 2.0-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.2-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", + "nanoid = 0.1-1", + "lua-resty-mediador = 0.1.2-1", + "lua-resty-ldap = 0.2.2-0" +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-3.4.0-0.rockspec b/rockspec/apisix-3.4.0-0.rockspec new file mode 100644 index 000000000000..50988dc78a35 --- /dev/null +++ b/rockspec/apisix-3.4.0-0.rockspec @@ -0,0 +1,103 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "3.4.0-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "3.4.0", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "api7-lua-resty-dns-client = 7.0.1", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.10.4", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 3.0.0", + "api7-lua-resty-jwt = 0.2.4", + "lua-resty-hmac-ffi = 0.05", + "lua-resty-cookie = 0.1.0", + "lua-resty-session = 3.10", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.2", + "lua-protobuf = 0.4.1", + "lua-resty-openidc = 1.7.5", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20221218", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.20-0", + "lua-resty-logger-socket = 2.0.1-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.02-4", + "lua-resty-expr = 1.3.2", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.1.0-1", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.9.2-1", + "ext-plugin-proto = 0.6.0", + "casbin = 1.41.5", + "api7-snowflake = 2.0-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.2-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", + "nanoid = 0.1-1", + "lua-resty-mediador = 0.1.2-1", + "lua-resty-ldap = 0.2.2-0" +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-3.5.0-0.rockspec b/rockspec/apisix-3.5.0-0.rockspec new file mode 100644 index 000000000000..1c0ad6d8c1d3 --- /dev/null +++ b/rockspec/apisix-3.5.0-0.rockspec @@ -0,0 +1,103 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +package = "apisix" +version = "3.5.0-0" +supported_platforms = {"linux", "macosx"} + +source = { + url = "git://github.com/apache/apisix", + branch = "3.5.0", +} + +description = { + summary = "Apache APISIX is a cloud-native microservices API gateway, delivering the ultimate performance, security, open source and scalable platform for all your APIs and microservices.", + homepage = "https://github.com/apache/apisix", + license = "Apache License 2.0", +} + +dependencies = { + "lua-resty-ctxdump = 0.1-0", + "api7-lua-resty-dns-client = 7.0.1", + "lua-resty-template = 2.0", + "lua-resty-etcd = 1.10.4", + "api7-lua-resty-http = 0.2.0", + "lua-resty-balancer = 0.04", + "lua-resty-ngxvar = 0.5.2", + "lua-resty-jit-uuid = 0.0.7", + "lua-resty-healthcheck-api7 = 3.0.0", + "api7-lua-resty-jwt = 0.2.5", + "lua-resty-hmac-ffi = 0.06-1", + "lua-resty-cookie = 0.2.0-1", + "lua-resty-session = 3.10", + "opentracing-openresty = 0.1", + "lua-resty-radixtree = 2.8.2", + "lua-protobuf = 0.5.0-1", + "lua-resty-openidc = 1.7.6-3", + "luafilesystem = 1.7.0-2", + "api7-lua-tinyyaml = 0.4.2", + "nginx-lua-prometheus = 0.20230607-1", + "jsonschema = 0.9.8", + "lua-resty-ipmatcher = 0.6.1", + "lua-resty-kafka = 0.22-0", + "lua-resty-logger-socket = 2.0.1-0", + "skywalking-nginx-lua = 0.6.0", + "base64 = 1.5-2", + "binaryheap = 0.4", + "api7-dkjson = 0.1.1", + "resty-redis-cluster = 1.05-1", + "lua-resty-expr = 1.3.2", + "graphql = 0.0.2", + "argparse = 0.7.1-1", + "luasocket = 3.1.0-1", + "luasec = 0.9-1", + "lua-resty-consul = 0.3-2", + "penlight = 1.13.1", + "ext-plugin-proto = 0.6.0", + "casbin = 1.41.8-1", + "inspect == 3.1.1", + "lualdap = 1.2.6-1", + "lua-resty-rocketmq = 0.3.0-0", + "opentelemetry-lua = 0.2-3", + "net-url = 0.9-1", + "xml2lua = 1.5-2", + "nanoid = 0.1-1", + "lua-resty-mediador = 0.1.2-1", + "lua-resty-ldap = 0.1.0-0", + "lua-resty-t1k = 1.1.0" +} + +build = { + type = "make", + build_variables = { + CFLAGS="$(CFLAGS)", + LIBFLAG="$(LIBFLAG)", + LUA_LIBDIR="$(LUA_LIBDIR)", + LUA_BINDIR="$(LUA_BINDIR)", + LUA_INCDIR="$(LUA_INCDIR)", + LUA="$(LUA)", + OPENSSL_INCDIR="$(OPENSSL_INCDIR)", + OPENSSL_LIBDIR="$(OPENSSL_LIBDIR)", + }, + install_variables = { + ENV_INST_PREFIX="$(PREFIX)", + ENV_INST_BINDIR="$(BINDIR)", + ENV_INST_LIBDIR="$(LIBDIR)", + ENV_INST_LUADIR="$(LUADIR)", + ENV_INST_CONFDIR="$(CONFDIR)", + }, +} diff --git a/rockspec/apisix-master-0.rockspec b/rockspec/apisix-master-0.rockspec index 8a73fd59c73e..9d52c71827c8 100644 --- a/rockspec/apisix-master-0.rockspec +++ b/rockspec/apisix-master-0.rockspec @@ -34,42 +34,41 @@ dependencies = { "lua-resty-ctxdump = 0.1-0", "api7-lua-resty-dns-client = 7.0.1", "lua-resty-template = 2.0", - "lua-resty-etcd = 1.10.3", - "api7-lua-resty-http = 0.2.0", + "lua-resty-etcd = 1.10.5", + "api7-lua-resty-http = 0.2.2-0", "lua-resty-balancer = 0.04", "lua-resty-ngxvar = 0.5.2", "lua-resty-jit-uuid = 0.0.7", - "lua-resty-healthcheck-api7 = 2.2.2", - "api7-lua-resty-jwt = 0.2.4", - "lua-resty-hmac-ffi = 0.05", - "lua-resty-cookie = 0.1.0", + "lua-resty-healthcheck-api7 = 3.0.0", + "api7-lua-resty-jwt = 0.2.5", + "lua-resty-hmac-ffi = 0.06-1", + "lua-resty-cookie = 0.2.0-1", "lua-resty-session = 3.10", "opentracing-openresty = 0.1", "lua-resty-radixtree = 2.8.2", - "lua-protobuf = 0.4.1", - "lua-resty-openidc = 1.7.5", + "lua-protobuf = 0.5.0-1", + "lua-resty-openidc = 1.7.6-3", "luafilesystem = 1.7.0-2", - "api7-lua-tinyyaml = 0.4.2", - "nginx-lua-prometheus = 0.20220527", + "api7-lua-tinyyaml = 0.4.4", + "nginx-lua-prometheus = 0.20230607-1", "jsonschema = 0.9.8", "lua-resty-ipmatcher = 0.6.1", - "lua-resty-kafka = 0.20-0", + "lua-resty-kafka = 0.22-0", "lua-resty-logger-socket = 2.0.1-0", "skywalking-nginx-lua = 0.6.0", "base64 = 1.5-2", "binaryheap = 0.4", "api7-dkjson = 0.1.1", - "resty-redis-cluster = 1.02-4", + "resty-redis-cluster = 1.05-1", "lua-resty-expr = 1.3.2", "graphql = 0.0.2", "argparse = 0.7.1-1", "luasocket = 3.1.0-1", "luasec = 0.9-1", "lua-resty-consul = 0.3-2", - "penlight = 1.9.2-1", + "penlight = 1.13.1", "ext-plugin-proto = 0.6.0", - "casbin = 1.41.5", - "api7-snowflake = 2.0-1", + "casbin = 1.41.8-1", "inspect == 3.1.1", "lualdap = 1.2.6-1", "lua-resty-rocketmq = 0.3.0-0", @@ -78,7 +77,8 @@ dependencies = { "xml2lua = 1.5-2", "nanoid = 0.1-1", "lua-resty-mediador = 0.1.2-1", - "lua-resty-ldap = 0.2.0-0" + "lua-resty-ldap = 0.1.0-0", + "lua-resty-t1k = 1.1.0" } build = { diff --git a/t/APISIX.pm b/t/APISIX.pm index e0ad704277b5..b8e288abc881 100644 --- a/t/APISIX.pm +++ b/t/APISIX.pm @@ -33,13 +33,6 @@ my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; $ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); $ENV{TEST_NGINX_FAST_SHUTDOWN} ||= 1; -Test::Nginx::Socket::set_http_config_filter(sub { - my $config = shift; - my $snippet = `$apisix_home/t/bin/gen_snippet.lua conf_server`; - $config .= $snippet; - return $config; -}); - sub read_file($) { my $infile = shift; open my $in, "$apisix_home/$infile" @@ -102,6 +95,7 @@ my $etcd_key = read_file("t/certs/etcd.key"); $user_yaml_config = <<_EOC_; apisix: node_listen: 1984 + proxy_mode: http&stream stream_proxy: tcp: - 9100 @@ -203,8 +197,10 @@ _EOC_ $grpc_location .= <<_EOC_; grpc_set_header Content-Type application/grpc; + grpc_set_header TE trailers; grpc_socket_keepalive on; grpc_pass \$upstream_scheme://apisix_backend; + mirror /proxy_mirror_grpc; header_filter_by_lua_block { apisix.http_header_filter_phase() @@ -264,6 +260,7 @@ env ENABLE_ETCD_AUTH; env APISIX_PROFILE; env PATH; # for searching external plugin runner's binary env TEST_NGINX_HTML_DIR; +env OPENSSL111_BIN; _EOC_ @@ -647,6 +644,8 @@ _EOC_ $ipv6_fake_server server_tokens off; + access_log logs/fake-server-access.log main; + location / { content_by_lua_block { require("lib.server").go() @@ -672,6 +671,8 @@ _EOC_ $http_config .= <<_EOC_; server_tokens off; + access_log logs/fake-server-access.log main; + ssl_certificate_by_lua_block { local ngx_ssl = require "ngx.ssl" ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) @@ -705,10 +706,18 @@ _EOC_ ssl_certificate_key cert/apisix.key; lua_ssl_trusted_certificate cert/apisix.crt; + ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; + + ssl_client_hello_by_lua_block { + apisix.http_ssl_client_hello_phase() + } + ssl_certificate_by_lua_block { apisix.http_ssl_phase() } + access_log logs/access.log main; + set \$dubbo_service_name ''; set \$dubbo_service_version ''; set \$dubbo_method ''; @@ -736,6 +745,7 @@ _EOC_ } location / { + set \$upstream_mirror_host ''; set \$upstream_mirror_uri ''; set \$upstream_upgrade ''; set \$upstream_connection ''; @@ -776,16 +786,11 @@ _EOC_ ### the following x-forwarded-* headers is to send to upstream server - set \$var_x_forwarded_for \$remote_addr; set \$var_x_forwarded_proto \$scheme; set \$var_x_forwarded_host \$host; set \$var_x_forwarded_port \$server_port; - if (\$http_x_forwarded_for != "") { - set \$var_x_forwarded_for "\${http_x_forwarded_for}, \${realip_remote_addr}"; - } - - proxy_set_header X-Forwarded-For \$var_x_forwarded_for; + proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto \$var_x_forwarded_proto; proxy_set_header X-Forwarded-Host \$var_x_forwarded_host; proxy_set_header X-Forwarded-Port \$var_x_forwarded_port; @@ -826,6 +831,22 @@ _EOC_ proxy_set_header Host \$upstream_host; proxy_pass \$upstream_mirror_uri; } + + location = /proxy_mirror_grpc { + internal; +_EOC_ + + if ($version !~ m/\/apisix-nginx-module/) { + $config .= <<_EOC_; + if (\$upstream_mirror_uri = "") { + return 200; + } +_EOC_ + } + + $config .= <<_EOC_; + grpc_pass \$upstream_mirror_host; + } _EOC_ $block->set_value("config", $config); @@ -850,17 +871,6 @@ deployment: _EOC_ if ($yaml_config !~ m/deployment:/) { - # TODO: remove this temporary option once we have using gRPC by default - if ($ENV{TEST_CI_USE_GRPC}) { - $default_deployment .= <<_EOC_; - etcd: - host: - - "http://127.0.0.1:2379" - prefix: /apisix - use_grpc: true -_EOC_ - } - $yaml_config = $default_deployment . $yaml_config; } diff --git a/t/admin/api.t b/t/admin/api.t index 67d7344b6998..982becc79e7a 100644 --- a/t/admin/api.t +++ b/t/admin/api.t @@ -156,3 +156,83 @@ X-API-VERSION: v2 GET /t --- response_body passed + + + +=== TEST 10: Access with api key, and admin_key_required=true +--- yaml_config +deployment: + admin: + admin_key_required: true +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- request +GET /apisix/admin/routes +--- error_code: 200 + + + +=== TEST 11: Access with wrong api key, and admin_key_required=true +--- yaml_config +deployment: + admin: + admin_key_required: true +--- more_headers +X-API-KEY: wrong-key +--- request +GET /apisix/admin/routes +--- error_code: 401 + + + +=== TEST 12: Access without api key, and admin_key_required=true +--- yaml_config +deployment: + admin: + admin_key_required: true +--- request +GET /apisix/admin/routes +--- error_code: 401 + + + +=== TEST 13: Access with api key, but admin_key_required=false +--- yaml_config +deployment: + admin: + admin_key_required: false +--- more_headers +X-API-KEY: edd1c9f034335f136f87ad84b625c8f1 +--- request +GET /apisix/admin/routes +--- error_code: 200 +--- error_log +Admin key is bypassed! + + + +=== TEST 14: Access with wrong api key, but admin_key_required=false +--- yaml_config +deployment: + admin: + admin_key_required: false +--- more_headers +X-API-KEY: wrong-key +--- request +GET /apisix/admin/routes +--- error_code: 200 +--- error_log +Admin key is bypassed! + + + +=== TEST 15: Access without api key, but admin_key_required=false +--- yaml_config +deployment: + admin: + admin_key_required: false +--- request +GET /apisix/admin/routes +--- error_code: 200 +--- error_log +Admin key is bypassed! diff --git a/t/admin/consumer-group-force-delete.t b/t/admin/consumer-group-force-delete.t new file mode 100644 index 000000000000..d5e96c6f0c55 --- /dev/null +++ b/t/admin/consumer-group-force-delete.t @@ -0,0 +1,163 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set consumer_group(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumer_groups/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 200, + "time_window": 60, + "rejected_code": 503, + "group": "consumer_group_1" + } + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 201 +--- response_body +passed + + + +=== TEST 2: add consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/consumers/1', + ngx.HTTP_PUT, + [[{ + "username": "1", + "plugins": { + "key-auth": { + "key": "auth-one" + } + }, + "group_id": "1" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: delete consumer_group(wrong header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/consumer_groups/1?force=anyvalue', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this consumer group, consumer [1] is still using it now"} + + + +=== TEST 4: delete consumer_group(without force delete header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/consumer_groups/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this consumer group, consumer [1] is still using it now"} + + + +=== TEST 5: delete consumer_group(force delete) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/consumer_groups/1?force=true', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed + + + +=== TEST 6: delete consumer +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/consumers/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed diff --git a/t/admin/plugin-configs-force-delete.t b/t/admin/plugin-configs-force-delete.t new file mode 100644 index 000000000000..7d4f73739e34 --- /dev/null +++ b/t/admin/plugin-configs-force-delete.t @@ -0,0 +1,163 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set plugin_configs(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503 + } + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 201 +--- response_body +passed + + + +=== TEST 2: add route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugin_config_id": 1, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "uri": "/index.html" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: delete plugin_configs(wrong header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/plugin_configs/1?force=anyvalue', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this plugin config, route [1] is still using it now"} + + + +=== TEST 4: delete plugin_configs(without force delete header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/plugin_configs/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this plugin config, route [1] is still using it now"} + + + +=== TEST 5: delete plugin_configs(force delete) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/plugin_configs/1?force=true', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed + + + +=== TEST 6: delete route +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed diff --git a/t/admin/plugins-reload.t b/t/admin/plugins-reload.t index 3cb555f94a5d..df891279d36b 100644 --- a/t/admin/plugins-reload.t +++ b/t/admin/plugins-reload.t @@ -418,3 +418,12 @@ location /t { GET /t --- response_body hello world + + + +=== TEST 9: wrong method to reload plugins +--- request +GET /apisix/admin/plugins/reload +--- error_code: 405 +--- response_body +{"error_msg":"please use PUT method to reload the plugins, GET method is not allowed."} diff --git a/t/admin/plugins.t b/t/admin/plugins.t index e8a82171e791..e7bcf09e97b4 100644 --- a/t/admin/plugins.t +++ b/t/admin/plugins.t @@ -74,6 +74,7 @@ referer-restriction csrf uri-blocker request-validation +chaitin-waf openid-connect cas-auth authz-casbin @@ -88,9 +89,9 @@ consumer-restriction forward-auth opa authz-keycloak +proxy-cache body-transformer proxy-mirror -proxy-cache proxy-rewrite workflow api-breaker @@ -109,6 +110,7 @@ grpc-web public-api prometheus datadog +loki-logger elasticsearch-logger echo loggly @@ -137,12 +139,12 @@ ext-plugin-post-resp -=== TEST 2: wrong path +=== TEST 2: invalid plugin --- request -GET /apisix/admin/plugins ---- error_code: 400 +GET /apisix/admin/plugins/asdf +--- error_code: 404 --- response_body -{"error_msg":"not found plugin name"} +{"error_msg":"plugin not found in subsystem http"} @@ -411,3 +413,54 @@ plugins: } --- response_body {"batch-requests":"global","error-log-logger":"global","node-status":"global","server-info":"global"} + + + +=== TEST 13: check with wrong plugin subsystem +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local _, message, _ = t('/apisix/admin/plugins?subsystem=asdf', + ngx.HTTP_GET + ) + ngx.say(message) + } + } +--- response_body eval +qr/\{"error_msg":"unsupported subsystem: asdf"\}/ + + + +=== TEST 14: check with right plugin in wrong subsystem +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local _, message, _ = t('/apisix/admin/plugins/http-logger?subsystem=stream', + ngx.HTTP_GET + ) + ngx.say(message) + } + } +--- response_body eval +qr/\{"error_msg":"plugin not found in subsystem stream"\}/ + + + +=== TEST 15: check with right plugin in right subsystem +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local _, _ , message = t('/apisix/admin/plugins/http-logger?subsystem=http', + ngx.HTTP_GET + ) + ngx.say(message) + } + } +--- response_body eval +qr/this is a mark for our injected plugin schema/ diff --git a/t/admin/proto.t b/t/admin/proto.t deleted file mode 100644 index e560ffffcd20..000000000000 --- a/t/admin/proto.t +++ /dev/null @@ -1,216 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX 'no_plan'; - -repeat_each(1); -no_long_string(); -no_root_location(); -no_shuffle(); -log_level("info"); - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - - if (!$block->no_error_log && !$block->error_log) { - $block->set_value("no_error_log", "[error]\n[alert]"); - } -}); - -run_tests; - -__DATA__ - -=== TEST 1: put proto (id:1) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/protos/1', - ngx.HTTP_PUT, - [[{ - "content": "syntax = \"proto3\"; - package proto; - message HelloRequest{ - string name = 1; - } - - message HelloResponse{ - int32 code = 1; - string msg = 2; - } - // The greeting service definition. - service Hello { - // Sends a greeting - rpc SayHi (HelloRequest) returns (HelloResponse){} - }" - }]] - ) - - if code ~= 201 then - ngx.status = code - ngx.say("[put proto] code: ", code, " message: ", message) - return - end - - ngx.say("[put proto] code: ", code, " message: ", message) - } - } ---- response_body -[put proto] code: 201 message: passed - - - -=== TEST 2: delete proto(id:1) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/protos/1', - ngx.HTTP_DELETE - ) - - if code ~= 200 then - ngx.status = code - ngx.say("[delete proto] code: ", code, " message: ", message) - return - end - - ngx.say("[delete proto] code: ", code, " message: ", message) - } - } ---- response_body -[delete proto] code: 200 message: passed - - - -=== TEST 3: put proto (id:2) + route refer proto(proto id 2) + delete proto(proto id 2) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/protos/2', - ngx.HTTP_PUT, - [[{ - "content": "syntax = \"proto3\"; - package proto; - message HelloRequest{ - string name = 1; - } - - message HelloResponse{ - int32 code = 1; - string msg = 2; - } - // The greeting service definition. - service Hello { - // Sends a greeting - rpc SayHi (HelloRequest) returns (HelloResponse){} - }" - }]] - ) - - if code ~= 201 then - ngx.status = code - ngx.say("[put proto] code: ", code, " message: ", message) - return - end - ngx.say("[put proto] code: ", code, " message: ", message) - - - code, message = t('/apisix/admin/routes/2', - ngx.HTTP_PUT, - [[{ - "methods": ["GET"], - "plugins": { - "grpc-transcode": { - "_meta": { - "disable": false - }, - "method": "SayHi", - "proto_id": 2, - "service": "proto.Hello" - } - }, - "upstream": { - "nodes": { - "127.0.0.1:8080": 1 - }, - "type": "roundrobin" - }, - "uri": "/grpc/sayhi", - "name": "hi-grpc" - }]] - ) - - if code ~= 201 then - ngx.status = code - ngx.say("[route refer proto] code: ", code, " message: ", message) - return - end - ngx.say("[route refer proto] code: ", code, " message: ", message) - - ngx.sleep(0.1) -- ensure reference is synced from etcd - - code, message = t('/apisix/admin/protos/2', - ngx.HTTP_DELETE - ) - - ngx.say("[delete proto] code: ", code) - } - } ---- response_body -[put proto] code: 201 message: passed -[route refer proto] code: 201 message: passed -[delete proto] code: 400 - - - -=== TEST 4: reject invalid proto ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, message = t('/apisix/admin/protos/1', - ngx.HTTP_PUT, - [[{ - "content": "syntax = \"proto3\"; - package proto; - message HelloRequest{ - string name = 1; - } - - message HelloResponse{ - int32 code = 1; - string msg = 1; - }" - }]] - ) - - if code ~= 200 then - ngx.status = code - end - - ngx.say(message) - } - } ---- error_code: 400 ---- response_body eval -qr/invalid content:/ diff --git a/t/admin/protos-force-delete.t b/t/admin/protos-force-delete.t new file mode 100644 index 000000000000..909128924bfe --- /dev/null +++ b/t/admin/protos-force-delete.t @@ -0,0 +1,175 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set proto(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + }" + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 201 +--- response_body +passed + + + +=== TEST 2: add route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: delete proto(wrong header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1?force=anyvalue', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this proto, route [1] is still using it now"} + + + +=== TEST 4: delete proto(without force delete header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this proto, route [1] is still using it now"} + + + +=== TEST 5: delete proto(force delete) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1?force=true', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed + + + +=== TEST 6: delete route +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed diff --git a/t/admin/protos.t b/t/admin/protos.t index 320c6179ee7f..e560ffffcd20 100644 --- a/t/admin/protos.t +++ b/t/admin/protos.t @@ -38,7 +38,7 @@ run_tests; __DATA__ -=== TEST 1: test /apisix/admin/protos/{id} +=== TEST 1: put proto (id:1) --- config location /t { content_by_lua_block { @@ -75,3 +75,142 @@ __DATA__ } --- response_body [put proto] code: 201 message: passed + + + +=== TEST 2: delete proto(id:1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_DELETE + ) + + if code ~= 200 then + ngx.status = code + ngx.say("[delete proto] code: ", code, " message: ", message) + return + end + + ngx.say("[delete proto] code: ", code, " message: ", message) + } + } +--- response_body +[delete proto] code: 200 message: passed + + + +=== TEST 3: put proto (id:2) + route refer proto(proto id 2) + delete proto(proto id 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/2', + ngx.HTTP_PUT, + [[{ + "content": "syntax = \"proto3\"; + package proto; + message HelloRequest{ + string name = 1; + } + + message HelloResponse{ + int32 code = 1; + string msg = 2; + } + // The greeting service definition. + service Hello { + // Sends a greeting + rpc SayHi (HelloRequest) returns (HelloResponse){} + }" + }]] + ) + + if code ~= 201 then + ngx.status = code + ngx.say("[put proto] code: ", code, " message: ", message) + return + end + ngx.say("[put proto] code: ", code, " message: ", message) + + + code, message = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "grpc-transcode": { + "_meta": { + "disable": false + }, + "method": "SayHi", + "proto_id": 2, + "service": "proto.Hello" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "uri": "/grpc/sayhi", + "name": "hi-grpc" + }]] + ) + + if code ~= 201 then + ngx.status = code + ngx.say("[route refer proto] code: ", code, " message: ", message) + return + end + ngx.say("[route refer proto] code: ", code, " message: ", message) + + ngx.sleep(0.1) -- ensure reference is synced from etcd + + code, message = t('/apisix/admin/protos/2', + ngx.HTTP_DELETE + ) + + ngx.say("[delete proto] code: ", code) + } + } +--- response_body +[put proto] code: 201 message: passed +[route refer proto] code: 201 message: passed +[delete proto] code: 400 + + + +=== TEST 4: reject invalid proto +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content": "syntax = \"proto3\"; + package proto; + message HelloRequest{ + string name = 1; + } + + message HelloResponse{ + int32 code = 1; + string msg = 1; + }" + }]] + ) + + if code ~= 200 then + ngx.status = code + end + + ngx.say(message) + } + } +--- error_code: 400 +--- response_body eval +qr/invalid content:/ diff --git a/t/admin/resources.t b/t/admin/resources.t new file mode 100644 index 000000000000..e1f39ccc0702 --- /dev/null +++ b/t/admin/resources.t @@ -0,0 +1,55 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +run_tests; + +__DATA__ + +=== TEST 1: invalid resource type: 'routs' +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routs/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }, + "desc": "new route", + "uri": "/index.html" + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like +{"error_msg":"Unsupported resource type: routs"} diff --git a/t/admin/routes4.t b/t/admin/routes4.t index 3c799be8be90..ba9ff44142e3 100644 --- a/t/admin/routes4.t +++ b/t/admin/routes4.t @@ -611,7 +611,7 @@ failed to read request body: request size 1678025 is greater than the maximum si "methods": ["GET", "GET"], "upstream": { "nodes": { - "httpbin.org:8080": 1, + "apisix.com:8080": 1, "test.com:8080": 1 }, "type": "roundrobin", diff --git a/t/admin/schema-validate.t b/t/admin/schema-validate.t new file mode 100644 index 000000000000..46f51021edfd --- /dev/null +++ b/t/admin/schema-validate.t @@ -0,0 +1,400 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("warn"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: validate ok +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uri": "/httpbin/*", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 200 + + + +=== TEST 2: validate failed, wrong uri type +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uri": 666, + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg": {"property \"uri\" validation failed: wrong type: expected string, got number"}} + + + +=== TEST 3: validate failed, length limit +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uri": "", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uri\" validation failed: string too short, expected at least 1, got 0"} + + + +=== TEST 4: validate failed, array type expected +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uris": "foobar", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uris\" validation failed: wrong type: expected array, got string"} + + + +=== TEST 5: validate failed, array size limit +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uris": [], + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uris\" validation failed: expect array to have at least 1 items"} + + + +=== TEST 6: validate failed, array unique items +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "uris": ["/foo", "/foo"], + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"uris\" validation failed: expected unique items but items 1 and 2 are equal"} + + + +=== TEST 7: validate failed, uri or uris is mandatory +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"allOf 1 failed: value should match only one schema, but matches none"} + + + +=== TEST 8: validate failed, enum check +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "status": 3, + "uri": "/foo", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"status\" validation failed: matches none of the enum values"} + + + +=== TEST 9: validate failed, wrong combination +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "script": "xxxxxxxxxxxxxxxxxxxxx", + "plugin_config_id": "foo" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"allOf 1 failed: value should match only one schema, but matches none"} + + + +=== TEST 10: validate failed, id_schema check +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/routes', + ngx.HTTP_POST, + [[{ + "plugin_config_id": "@@@@@@@@@@@@@@@@", + "uri": "/foo", + "upstream": { + "scheme": "https", + "type": "roundrobin", + "nodes": { + "nghttp2.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"property \"plugin_config_id\" validation failed: object matches none of the required"} + + + +=== TEST 11: upstream ok +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/upstreams', + ngx.HTTP_POST, + [[{ + "nodes":{ + "nghttp2.org":100 + }, + "type":"roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 200 + + + +=== TEST 12: upstream failed, wrong nodes format +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/schema/validate/upstreams', + ngx.HTTP_POST, + [[{ + "nodes":[ + "nghttp2.org" + ], + "type":"roundrobin" + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + } +} +--- error_code: 400 +--- response +{"error_msg":"allOf 1 failed: value should match only one schema, but matches none"} diff --git a/t/admin/schema.t b/t/admin/schema.t index 35ac20187053..7853addd32d2 100644 --- a/t/admin/schema.t +++ b/t/admin/schema.t @@ -112,7 +112,7 @@ qr/"required":\["count","time_window"\]/ === TEST 8: get not exist plugin --- request GET /apisix/admin/schema/plugins/no-exist ---- error_code: 400 +--- error_code: 404 diff --git a/t/admin/secrets.t b/t/admin/secrets.t index f1a788db63ad..79402ab9dfde 100644 --- a/t/admin/secrets.t +++ b/t/admin/secrets.t @@ -131,7 +131,7 @@ passed -=== TEST 4: PATCH +=== TEST 4: PATCH on path --- config location /t { content_by_lua_block { @@ -169,7 +169,69 @@ passed -=== TEST 5: DELETE +=== TEST 5: PATCH +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local res = assert(etcd.get('/secrets/vault/test1')) + local prev_create_time = res.body.node.value.create_time + assert(prev_create_time ~= nil, "create_time is nil") + local prev_update_time = res.body.node.value.update_time + assert(prev_update_time ~= nil, "update_time is nil") + ngx.sleep(1) + + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PATCH, + [[{ + "uri": "http://127.0.0.1:12800/get", + "prefix" : "apisix", + "token" : "apisix" + }]], + [[{ + "value": { + "uri": "http://127.0.0.1:12800/get", + "prefix" : "apisix", + "token" : "apisix" + }, + "key": "/apisix/secrets/vault/test1" + }]] + ) + + ngx.status = code + ngx.say(body) + + local res = assert(etcd.get('/secrets/vault/test1')) + assert(res.body.node.value.token == "apisix") + } + } +--- response_body +passed + + + +=== TEST 6: PATCH without id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/secrets/vault', + ngx.HTTP_PATCH, + [[{}]], + [[{}]] + ) + ngx.status = code + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"no secret id"} + + + +=== TEST 7: DELETE --- config location /t { content_by_lua_block { @@ -185,7 +247,7 @@ passed -=== TEST 6: PUT with invalid format +=== TEST 8: PUT with invalid format --- config location /t { content_by_lua_block { diff --git a/t/admin/services-force-delete.t b/t/admin/services-force-delete.t new file mode 100644 index 000000000000..439b44e098ab --- /dev/null +++ b/t/admin/services-force-delete.t @@ -0,0 +1,156 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set service(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/services/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + } + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 201 +--- response_body +passed + + + +=== TEST 2: add route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "service_id": 1, + "uri": "/index.html" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: delete service(wrong header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/1?force=anyvalue', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this service directly, route [1] is still using it now"} + + + +=== TEST 4: delete service(without force delete header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this service directly, route [1] is still using it now"} + + + +=== TEST 5: delete service(force delete) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/services/1?force=true', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed + + + +=== TEST 6: delete route +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed diff --git a/t/admin/ssl4.t b/t/admin/ssl4.t index dcb18ce397db..fe3b8fcae449 100644 --- a/t/admin/ssl4.t +++ b/t/admin/ssl4.t @@ -364,3 +364,43 @@ location /t { t.test('/apisix/admin/ssls/1', ngx.HTTP_DELETE) } } + + + +=== TEST 11: set ssl(sni: www.test.com) with long label +--- yaml_config +apisix: + node_listen: 1984 + ssl: + key_encrypt_salt: null +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "www.test.com", + labels = {secret = "js-design-test-bigdata-data-app-service-router-my-secret-number-123456"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "www.test.com", + "labels": { + "secret": "js-design-test-bigdata-data-app-service-router-my-secret-number-123456" + }, + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed diff --git a/t/admin/ssl5.t b/t/admin/ssl5.t new file mode 100644 index 000000000000..c9bd7b162133 --- /dev/null +++ b/t/admin/ssl5.t @@ -0,0 +1,86 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +no_root_location(); + +run_tests; + +__DATA__ + +=== TEST 1: Not supported set TLSv1.0 for ssl_protocols +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", ssl_protocols = {"TLSv1.0", "TLSv1.2"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.print(body) + } + } +--- request +GET /t +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"ssl_protocols\" validation failed: failed to validate item 1: matches none of the enum values"} + + + +=== TEST 2: The default value for the ssl_protocols is null +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "ssl_protocols": null, + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/t/admin/stream-routes.t b/t/admin/stream-routes.t index a25496d57dcd..77a6d5bad8a9 100644 --- a/t/admin/stream-routes.t +++ b/t/admin/stream-routes.t @@ -607,3 +607,47 @@ GET /t {"error_msg":"unknown protocol [xxx]"} passed {"error_msg":"property \"faults\" validation failed: wrong type: expected array, got string"} + + + +=== TEST 17: set route with remote_addr and server_addr in IPV6 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "::1", + "server_addr": "::1", + "server_port": 1982, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 4 + } + }, + "upstream": { + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "127.0.0.1", + "port": 1980, + "weight": 1 + } + ] + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/t/admin/upstream-array-nodes.t b/t/admin/upstream-array-nodes.t index 70c7a8ede858..b02a759796a6 100644 --- a/t/admin/upstream-array-nodes.t +++ b/t/admin/upstream-array-nodes.t @@ -251,7 +251,7 @@ no valid upstream node -=== TEST 9: additional properties is valid +=== TEST 9: additional properties is invalid --- config location /t { content_by_lua_block { @@ -277,8 +277,9 @@ no valid upstream node } --- request GET /t ---- response_body -passed +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid configuration: additional properties forbidden, found .*"\}/ diff --git a/t/admin/upstream-force-delete.t b/t/admin/upstream-force-delete.t new file mode 100644 index 000000000000..6d834b11469a --- /dev/null +++ b/t/admin/upstream-force-delete.t @@ -0,0 +1,154 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); +log_level("info"); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "nodes": { + "127.0.0.1:8080": 1 + }, + "type": "roundrobin" + }]] + ) + ngx.status = code + ngx.say(body) + } + } +--- error_code: 201 +--- response_body +passed + + + +=== TEST 2: add route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream_id": 1, + "uri": "/index.html" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.print(message) + return + end + ngx.say(message) + } + } +--- response_body +passed + + + +=== TEST 3: delete upstream(wrong header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1?force=anyvalue', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this upstream, route [1] is still using it now"} + + + +=== TEST 4: delete upstream(without force delete header) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body +[delete] code: 400 message: {"error_msg":"can not delete this upstream, route [1] is still using it now"} + + + +=== TEST 5: delete upstream(force delete) +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/upstreams/1?force=true', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed + + + +=== TEST 6: delete route +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.3) + local t = require("lib.test_admin").test + local code, message = t('/apisix/admin/routes/1', + ngx.HTTP_DELETE + ) + ngx.print("[delete] code: ", code, " message: ", message) + } + } +--- response_body chomp +[delete] code: 200 message: passed diff --git a/t/admin/upstream.t b/t/admin/upstream.t index 7725e32b6070..b92a2f403f82 100644 --- a/t/admin/upstream.t +++ b/t/admin/upstream.t @@ -330,7 +330,7 @@ GET /t -=== TEST 11: additional properties is valid +=== TEST 11: additional properties is invalid --- config location /t { content_by_lua_block { @@ -354,8 +354,9 @@ GET /t } --- request GET /t ---- response_body -passed +--- error_code: 400 +--- response_body eval +qr/\{"error_msg":"invalid configuration: additional properties forbidden, found .*"\}/ diff --git a/t/admin/upstream3.t b/t/admin/upstream3.t index e40e24e99b4a..335bbfae36db 100644 --- a/t/admin/upstream3.t +++ b/t/admin/upstream3.t @@ -442,7 +442,7 @@ passed } --- error_code: 400 --- response_body -{"error_msg":"invalid configuration: failed to match pattern \"^((uri|server_name|server_addr|request_uri|remote_port|remote_addr|query_string|host|hostname)|arg_[0-9a-zA-z_-]+)$\" with \"not_support\""} +{"error_msg":"invalid configuration: failed to match pattern \"^((uri|server_name|server_addr|request_uri|remote_port|remote_addr|query_string|host|hostname|mqtt_client_id)|arg_[0-9a-zA-z_-]+)$\" with \"not_support\""} @@ -521,7 +521,7 @@ passed } --- error_code: 400 --- response_body -{"error_msg":"invalid configuration: failed to match pattern \"^((uri|server_name|server_addr|request_uri|remote_port|remote_addr|query_string|host|hostname)|arg_[0-9a-zA-z_-]+)$\" with \"not_support\""} +{"error_msg":"invalid configuration: failed to match pattern \"^((uri|server_name|server_addr|request_uri|remote_port|remote_addr|query_string|host|hostname|mqtt_client_id)|arg_[0-9a-zA-z_-]+)$\" with \"not_support\""} diff --git a/t/admin/upstream4.t b/t/admin/upstream4.t index 99c840f944f1..b657edc6e731 100644 --- a/t/admin/upstream4.t +++ b/t/admin/upstream4.t @@ -205,7 +205,7 @@ passed ngx.HTTP_PUT, [[{ "nodes": { - "httpbin.org:8080": 1, + "apisix.com:8080": 1, "test.com:8080": 1 }, "type": "roundrobin", diff --git a/t/admin/upstream5.t b/t/admin/upstream5.t index 9fd59bfe7e86..572d2925773f 100644 --- a/t/admin/upstream5.t +++ b/t/admin/upstream5.t @@ -111,3 +111,489 @@ passed } --- response_body passed + + + +=== TEST 4: prepare upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_PUT, [[{ + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + } + } +--- response_body +passed + + + +=== TEST 5: prepare route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/routes/1", ngx.HTTP_PUT, [[{ + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + } + } +--- response_body +passed + + + +=== TEST 6: delete upstream when plugin in route still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in route [1] is still using it now"} + + + +=== TEST 7: delete route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/routes/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: prepare service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/services/1", ngx.HTTP_PUT, [[{ + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: delete upstream when plugin in service still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in service [1] is still using it now"} + + + +=== TEST 10: delete service +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/services/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: prepare global_rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t("/apisix/admin/global_rules/1", ngx.HTTP_PUT, [[{ + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 12: delete upstream when plugin in global_rule still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in global_rules [1] is still using it now"} + + + +=== TEST 13: delete global_rule +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/global_rules/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: prepare plugin_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t("/apisix/admin/plugin_configs/1", ngx.HTTP_PUT, [[{ + "plugins": { + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 15: delete upstream when plugin in plugin_config still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in plugin_config [1] is still using it now"} + + + +=== TEST 16: delete plugin_config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/plugin_configs/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 17: prepare consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t("/apisix/admin/consumers", ngx.HTTP_PUT, [[{ + "username": "test", + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: delete upstream when plugin in consumer still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in consumer [test] is still using it now"} + + + +=== TEST 19: delete consumer +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/consumers/test", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: prepare consumer_group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t("/apisix/admin/consumer_groups/1", ngx.HTTP_PUT, [[{ + "plugins": { + "key-auth": { + "key": "auth-one" + }, + "traffic-split": { + "rules": [ + { + "weighted_upstreams": [ + { + "upstream_id": 1, + "weight": 1 + }, + { + "weight": 1 + } + ] + } + ] + } + } + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 21: delete upstream when plugin in consumer_group still refer it +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"can not delete this upstream, plugin in consumer_group [1] is still using it now"} + + + +=== TEST 22: delete consumer_group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/consumer_groups/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 23: delete upstream +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t("/apisix/admin/upstreams/1", ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed diff --git a/t/bin/gen_snippet.lua b/t/bin/gen_snippet.lua deleted file mode 100755 index b2ab349be348..000000000000 --- a/t/bin/gen_snippet.lua +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env luajit --- --- Licensed to the Apache Software Foundation (ASF) under one or more --- contributor license agreements. See the NOTICE file distributed with --- this work for additional information regarding copyright ownership. --- The ASF licenses this file to You under the Apache License, Version 2.0 --- (the "License"); you may not use this file except in compliance with --- the License. You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an "AS IS" BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the License. --- --- this script generates Nginx configuration in the test --- so we can test some features with test-nginx -local pkg_cpath_org = package.cpath -local pkg_path_org = package.path -local pkg_cpath = "deps/lib64/lua/5.1/?.so;deps/lib/lua/5.1/?.so;" -local pkg_path = "deps/share/lua/5.1/?.lua;" --- modify the load path to load our dependencies -package.cpath = pkg_cpath .. pkg_cpath_org -package.path = pkg_path .. pkg_path_org - - -local file = require("apisix.cli.file") -local schema = require("apisix.cli.schema") -local snippet = require("apisix.cli.snippet") -local util = require("apisix.cli.util") -local yaml_conf, err = file.read_yaml_conf("t/servroot") -if not yaml_conf then - error(err) -end - -if yaml_conf.deployment.role == "data_plane" and - yaml_conf.deployment.config_provider == "yaml" - or yaml_conf.deployment.config_provider == "xds" then - return -end - -local ok, err = schema.validate(yaml_conf) -if not ok then - error(err) -end - -local or_info, err = util.execute_cmd("openresty -V 2>&1") -if not or_info then - error("failed to exec cmd \'openresty -V 2>&1\', err: " .. err) -end - -local use_apisix_base = true -if not or_info:find("apisix-nginx-module", 1, true) then - use_apisix_base = false -end - -local res, err -if arg[1] == "conf_server" then - res, err = snippet.generate_conf_server( - { - apisix_home = "t/servroot/", - use_apisix_base = use_apisix_base, - }, - yaml_conf) -end - -if not res then - error(err or "none") -end -print(res) diff --git a/t/certs/localhost_slapd_cert.pem b/t/certs/localhost_slapd_cert.pem index 6140ea5f630c..0830e61b761d 100644 --- a/t/certs/localhost_slapd_cert.pem +++ b/t/certs/localhost_slapd_cert.pem @@ -1,24 +1,21 @@ -----BEGIN CERTIFICATE----- -MIIECDCCAnCgAwIBAgIUc40/PofbLcrqu/2MJMEkYfrxB+4wDQYJKoZIhvcNAQEL -BQAwVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG -Wmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMMCHRlc3QuY29tMB4XDTIy -MDgwMjA1NDI1OFoXDTIzMDgwMjA1NDI1OFowLjESMBAGA1UEAxMJbG9jYWxob3N0 -MRgwFgYDVQQKEw9FeGFtcGxlIENvbXBhbnkwggEiMA0GCSqGSIb3DQEBAQUAA4IB -DwAwggEKAoIBAQCxE5zfta69uPsQVDiV0OwWHDGxTBYNzmp5zsVwOF3bOH+hyB4M -+qFxPEuH84/Ib4GJdLM67qZth1azHudKy/QGPFkoeFUW1JhB9QGyjh/URwxTy05b -Ce5w7Ee1rMV/GWu6fxMfIE3o5U0XuW1IKQFaZVdNuQlvG4VjL59BfnEF+YXb1QDB -kIpvf59q+UuZgit8CrO1dDYeJ/xO3N9v2CS2u6si9/XWgIwayw67tmb7cbTu/srB -C99w97IMP5/Vkeu6fkg2jTuvCRARzMQJ11krDmtGeYum9SSCdyTLxK1u7w33DuhQ -3HE/PfHJj9QV1MKIeruVjEvawJsRiWQG0Ai7AgMBAAGjdjB0MAwGA1UdEwEB/wQC -MAAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0PAQH/BAUDAwegADAdBgNVHQ4E -FgQUcGOrPCoztq5Z7mjgGtaCkPkmDWowHwYDVR0jBBgwFoAUmbUr1fJgcJdG6ZLx -bYMojlFHG7MwDQYJKoZIhvcNAQELBQADggGBABNOTIiLHNQJfyV20UxcyzZ9xTuc -DuMzEexWJ6S33yJTyp5jni0vFaF9wnT1MOtp+Zizz0hQq0d+GvsmBzjkDdipFqUB -Dt4517l4Z/H4n4FV0jhqQhhzcPRWI5H2MNU0Ezno1iCaKD29Kq61fo2qrU7SNDre -RjnGueTW6u+YLj1ss+UK2rTCRX/Nqqz+MrvIift5Kj4c/8sAD3Zn2aXlH0dXSTcX -DaqNDPQvcdlqNMRSJSthLXYBn40Ro6mH7uA+e4aIVn4jyYvyb8qY5LhQPesTcJZw -IEDmIgFEIh0k1YoGvLD6TkMdKPUG536zH+4iZjKpwGwNQ/dTBgn4+5UOqguiYgXd -MP/eeXSCGLAIjQ4+i1ghv1eAlHuHSQ3Dm75icpAL7VHFdoI7I3wqeE5+IyrUXjX0 -s1bCjIuwGxgoBBTzv25OijmTmMcLYDp04PR5qSwckvsrrxHr+2ujeqS+AGxzZ4Sk -N1JSJL69zUwfCVdE3mR+6OmmDcuVlB3u+grLFQ== +MIIDcjCCAdoCFCS4ndwl6lusO7yj4zxbngp17nNUMA0GCSqGSIb3DQEBCwUAMFYx +CzAJBgNVBAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhh +aTEPMA0GA1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0yMzA4MDMw +NjM3NDhaGA8yMTA1MDkyMjA2Mzc0OFowEzERMA8GA1UEAwwIdGVzdC5jb20wggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC4mQik/vxL1bdjXcXWNT6DBVGL +A87CeVYleNE5Fx5KROU5Y388h80VgSTmV3ytu9ZuNEB8hcZqmAttZXcipMyNm9PI +vTXaDFaQVclYNJ27hy7rpaJ29WkeLKlUx/pRUpOCg3lbafSmURf5C234LZL1qe5O +0CIvY3uAzkGWMUE8ABYnef+ucULZPLa2+Y9wIx76oP5tfmcM/pQhDXt+GK/bZyat +1sUmEHCVC2gjvHoZO8T7n4ccpi5v06Klj8BKVxRlGVkO2w4hlDbNyh6FWKK31nF8 +BLu/TKF70xSOzX4OFNT6/GJ8R9AyeK52f6OzNmlNUY3UsMEeX8Y2qL4hNrFhAgMB +AAEwDQYJKoZIhvcNAQELBQADggGBAL6g7NZfVTEVklJPmTFSqwQfuu0YXmIvUQIF +jvbNOmwC+nHSq6yuJFC+83R/on/IPWrkP489bEVwqaBQXnZnIMOTjIk8k9elBm/N +1BBpzWUiKNp+HqRjPCanvRCIPUZ9hWpmJy6uzG6VodEtxZgJ7lsArj0AlOYlkHHa +Ctmnl5g6H4m8sNACZaAixesb9wM8Slvn1zhpAeIYZsvIaBZOZnWuwHD1R7seh4ob +BDhDaUfXOYb0VJaKNWnJ5ItPxh4/YMSuS7mG5o2ELnzWN6OeDEQrqKFW17qWLXko +DXEfyrQnODDI+fXvasJhQ62hH33rQF/Q4yJQOEEr7gQUxtMYCxtGCumx2/5MFTuB +E8sf8FykV5jGjwdwMHhPGAmhpMJwM6i65P9GwZguqVmeFv2l4eSTmMinURlkwaAw +cx+LrigAxSKOCcnnnC6Uza1VShyDAuj+XKPglwwJd99UJlk1VG/9TXp3WZTOvSt+ +KttglpiMHyqzCYcMDTGbjPm/UsjFTw== -----END CERTIFICATE----- diff --git a/t/certs/localhost_slapd_key.pem b/t/certs/localhost_slapd_key.pem index fa33248c6240..2b23ac4887c9 100644 --- a/t/certs/localhost_slapd_key.pem +++ b/t/certs/localhost_slapd_key.pem @@ -1,27 +1,28 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAsROc37Wuvbj7EFQ4ldDsFhwxsUwWDc5qec7FcDhd2zh/ocge -DPqhcTxLh/OPyG+BiXSzOu6mbYdWsx7nSsv0BjxZKHhVFtSYQfUBso4f1EcMU8tO -WwnucOxHtazFfxlrun8THyBN6OVNF7ltSCkBWmVXTbkJbxuFYy+fQX5xBfmF29UA -wZCKb3+favlLmYIrfAqztXQ2Hif8Ttzfb9gktrurIvf11oCMGssOu7Zm+3G07v7K -wQvfcPeyDD+f1ZHrun5INo07rwkQEczECddZKw5rRnmLpvUkgncky8Stbu8N9w7o -UNxxPz3xyY/UFdTCiHq7lYxL2sCbEYlkBtAIuwIDAQABAoIBAGDANpaEzlUbHRJu -8fvpixUJkp0s1V/1yHeFYptOMPn2hMYAcWrmBg+4wgwmKAl742sXOFaazpRJvjVg -TT+w8EP39T8HgHZY8lgXZjYJMZrqtvGRw946Lu3EK+o33DD10sazZ98551e48cZk -qjEjNnoNpQXydBUhFGB9RKakT1zTb8e+ZQdsrE+ZzgM9/xVFRx4gsfNbed/5TMHZ -QbwaqPzQRiS9ScRwvZ+TE20cGQ66qZqR6+JCatc8BpXA9Q6ZmTj61MSl6MMzCuOS -yIGm5J+siPkLV/ki+MAHk59G9iEsTjS1T1l4aQn0kTtdMx9oVCPODY6Jdi8jIaU/ -TwGWuQECgYEAxJEg/YKjZGQFhidP64OGi1ochFZxuJFwcZ17DgmZPkiU+vpC8KYl -QpR0r0zN9vqP+71nMMoVJfektXRMP4cy0ebSAbx47X5IfdYUhID+/OAlxbl1O9ah -lGWk90zknVvQKahImtYZqepQEYyetQiDB4gX2bLT+8IIt16ebGC/TyUCgYEA5p3g -Tcj69nxyy4BuGxYuNfTORTCzd9zhURN7325HVBMlhen/f1e+yjV1zth9yLDl5Wyl -99jkVCvy6p83s+1EDKdgOTYrxgD31Y934De/m53U6P/yHeic3z9dIgIAn+qcJqU6 -CL28lXEV8jKLNmlR0crWSjtSBDIpA3BWWN834l8CgYAxgcPnVZHFZROnGBue2391 -dXqdMhBuReMmGl21yWEZOLqdA478gTv9KtrAk/2D6NN+udNVjHALIfYP5XyWu3xn -NVVLLqbeWeH0H4kHXl3aXrHkvLL0ITiM4ZTM3EbwAwHInCO9K5NHIkaMRPhr6/rk -WLh5Efsl+1aqqGAKN8u3KQKBgFDjcUh3RSdtkSo12ujfR8gfHLaCFYDmVZWFev5s -hNJFgPTOlZJJ6Z6tT6wEnWHmQkzNZg1f4v5vB94piHUwtJynnIWUrZfewQ8EKmzX -wPpJSuOK2paI/3UCmZ0TDLsKpEidzZRBUMMuDh+MgO3N1Sf7uFwDIIpeOap+HZtA -eC6LAoGAFaN/0hr3kBCGGUQ0MKSEw1A4jJntR+Enz5+vJ1F/yW7E3SNp5gHz8sF1 -ppt3OZKtZeIoaCapIEr4hRZzzZr2zNHu3tyizscLAdcqKbt2o7OlPK7Z5mhREN8E -F4obLQI+YsAv2aOY2EFTSPq70N2OL45NLsdq3igpKZEIbpUgnwA= ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4mQik/vxL1bdj +XcXWNT6DBVGLA87CeVYleNE5Fx5KROU5Y388h80VgSTmV3ytu9ZuNEB8hcZqmAtt +ZXcipMyNm9PIvTXaDFaQVclYNJ27hy7rpaJ29WkeLKlUx/pRUpOCg3lbafSmURf5 +C234LZL1qe5O0CIvY3uAzkGWMUE8ABYnef+ucULZPLa2+Y9wIx76oP5tfmcM/pQh +DXt+GK/bZyat1sUmEHCVC2gjvHoZO8T7n4ccpi5v06Klj8BKVxRlGVkO2w4hlDbN +yh6FWKK31nF8BLu/TKF70xSOzX4OFNT6/GJ8R9AyeK52f6OzNmlNUY3UsMEeX8Y2 +qL4hNrFhAgMBAAECggEAKO9euGAHCLYS3ckcpvzVqO5T/9TPU9beYJ7jHprez69p +eYlz3LNsqhkiWqYJ8ujVi0ixCCwOLPMcjZzTh24uIjTtCPXUbE8SHx228YVxePVo +VT88wM55CgTzY+aYvtHl/iozji733q3a+BItx7wre6i8POPwwLt51r1mU+0GP0yQ ++spwGR3POjRZeXiWYKSwhfu/STBpQXLANHCpQOmYFCbjTVpCzJ03msQUfYJNmETd +DqyLGbE4aBoPmekrk8GQa/gn04SIsOi8WZeNhsUT9WXyeLFV0DEwnx0sv6IwOk2o +Fqymr71fKNMIvTpCt8wB0Q/rmvPzrprC+hHIZyX5AQKBgQDol5SYzTijZQasV+2d +DqO5IxE8xl8z10bLgsExKcHC6xyhk+el0XFO1fWs0SJ9ptNuKH32C8IlfEr6M3EE +XovQcRfT4crtnWhLmGPAFYKo91f5fiKy1YWggEtsnY+OODo34RCtORtKD6+iC+XE +LFbLMNQA6sHXVONthiEQ6fhIkQKBgQDLLPHgFFL8kSj+Tt/7ctRnwA7qPTUtAwWG +b2QzDMEpifu4O9DJWoE3sfMYkQhyz2g6S+O5XXm56CDtkV+Miazt29z0dM2Jz7uV +NLtymba/s6wBiWFUggHA4Dro1vYa4MJ94ampqi+XPJaJP/j6WoYIu/JhKQDIBtlP +ARaG0O3D0QKBgQCYMyB8wMXQPfqY6kzFml/OdUEBWPnRTYy4/U34Is/1Aa7RmJxb +6GrR4LaLqKp+OJ1gF0UdrWIU73mMsf7BkjDBbE/gSX9l77vgw856UlkWwgwiacTA +63IureUtJQlcUjTefftQru7JjuwqCMkIjs8Y1VHVa8j+ZEESWVPn4oKi0QKBgQCT +4YnHlGN2q91PhG9ooILTZSo1+gj7UyixWeBve8gYiPMrfHYSKIrG1AHhjqa8khQF +4njE0bGoy7kz0UzfiNHSauYfE+kKdqXNCw2ocxNd4tO+ZpTuIpZOIacfFF8a3x8Q +6rBH6rQq+xGCooqBBmRqdQoNCAAmlz2SUHNp+yYkEQKBgQDNBbH7WPjiggahF0TI +oweS86hQ9tDlUFMg/On2eMOY0juSU1yPsPA/JsIglSTDWGRasjFDhd29FVrnGGY5 +5GHy/Gh/6ZZUdrJVsypGw/Dy9amLgmkKTJU4SWDYOb6s1ocGvNPFSYgw0yFe56nx +TU+2zHJo/t2FXssGfnbFWrQAxA== +-----END PRIVATE KEY----- diff --git a/t/certs/server_1024.crt b/t/certs/server_1024.crt new file mode 100644 index 000000000000..85c6442c9d4d --- /dev/null +++ b/t/certs/server_1024.crt @@ -0,0 +1,12 @@ +-----BEGIN CERTIFICATE----- +MIIBrTCCARYCFGohAv7D46F+kSOf08X/MLwtazC7MA0GCSqGSIb3DQEBCwUAMBcx +FTATBgNVBAMMDGNhLmxvY2FsaG9zdDAeFw0yMzA1MjIwMjQ3MzZaFw0zMzA1MTkw +MjQ3MzZaMBQxEjAQBgNVBAMMCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB +jQAwgYkCgYEA2YEV7+FWPl2R9EOSvi2iPyymiUnSaYhIaTuSoqRISOjCSmgrmKpJ +yDN1Cg2hqBHPkWW8BuphpV405ja+94xvOEc0qcP/Or6zDhDfWcaoqxGRAcdwHDgQ +XYMfOxlhwWCp5+vWKep3FPXpHamE09PqUbKWqIa/16aK/1sFR7Q+JJkCAwEAATAN +BgkqhkiG9w0BAQsFAAOBgQCA5aSfk4lZjAEFQ9mWN7Z97b9bnwLw2bv4PgAhDtas +0IxIvhuwR4ZTFv+Pe4PNNNvfnMgTRUWFVpjywUX7Km+k9WFavrikPgAGEfzkR2HR +WE4ETNuS7sGxczosqD+00An4lZ+58uYGEitUOJ6xO80NIhNnOGgo5N/d4fFUTyYH +bw== +-----END CERTIFICATE----- diff --git a/t/certs/server_1024.key b/t/certs/server_1024.key new file mode 100644 index 000000000000..28233461133b --- /dev/null +++ b/t/certs/server_1024.key @@ -0,0 +1,16 @@ +-----BEGIN PRIVATE KEY----- +MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBANmBFe/hVj5dkfRD +kr4toj8spolJ0mmISGk7kqKkSEjowkpoK5iqScgzdQoNoagRz5FlvAbqYaVeNOY2 +vveMbzhHNKnD/zq+sw4Q31nGqKsRkQHHcBw4EF2DHzsZYcFgqefr1inqdxT16R2p +hNPT6lGylqiGv9emiv9bBUe0PiSZAgMBAAECgYBvf5j7S4ymk9kKWsmS7FnMANuu +bUWMC+zy5TMaZRUZKxjOg/A1ZrZEBvzslmhUfCzn4DsvYF+GInEDwvTKehdY07I+ ++hpv1M9Wa7v12ofRWNvZjsbMHfiWM/pFBZFYryV4sQ4qHFRj3TKgXu3pZWPx41wn +ayMUtxYRR3Lez4UiMQJBAP31OIC65xbWGr1W/YJ6IwOPFBgyB6O6qFTcR/lAdJ6H +6MVVs8XEWC4o/ZTk8RGog2nWzVsRCN2pqQUGUHBGRE8CQQDbQNL6eGbsuSxM1uUS +PjrAs5t9rfrwpx41ubjRoGIukEYeX1YXDf4WICe/51vE3jvVfVvFOJuvGoO9QqzB +LgaXAkEAtyRG0R74VBGnSvAW9idaZNCj7yb1N2/+wOPyy59d+o2MofLCKFcGOJO6 ++8t2xgM+ce9EPO419JTLnSIGlFE4JQJAGueHfCjOKHpIj11HWse8Ge1wRSnWQzWe +pWUW4tJVefVGRW/ZdpbG+RwVBJ11S2Eh4n6xhi/+GqycQdsuq73kHQJBAPCknTOP +KpiH5qtKw84nsF+RkWZQ6BhzvP5OuW4avQBx1jQUjpjUhOWfctFH7MLI92Ti+iUS +MYi+HgfT9Lx4kbc= +-----END PRIVATE KEY----- diff --git a/t/certs/vector_logs_ca.crt b/t/certs/vector_logs_ca.crt new file mode 100644 index 000000000000..42e3659f61e0 --- /dev/null +++ b/t/certs/vector_logs_ca.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDazCCAlOgAwIBAgIUa34rzhtYT21pC8NwNIYf3phFciQwDQYJKoZIhvcNAQEL +BQAwRTELMAkGA1UEBhMCVVMxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA0MjQxMzE2NDNaFw0yMzA1 +MjQxMzE2NDNaMEUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw +HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQC84FXe/8ofZB2rj5TPHasXdiBEbTCv04ti/lV92WOC +MrHLKibI2+kI3YRQXaR5/1F2bXfROUhdRgkB8NOSM4WbaD1mtr/7mW2Tatxplxsp +1s0zmHHl5v0VYwBahcUs6nlSe19dgfrj4s0Wn7p4E7iSq/UDAs+We/dQowusQTVs +Q2ZhjDlFY22CV/oyCYsNq3ORRgwZRm9cmVmUUF7GX70yjT1KvLkFjc7y1vwi8XJY +ADhw/hjtEzAOkxdUai84+jyhpQYQWMOgrlP1DXnZw1bNKqo6NTkMzfNCS+ul5PMs +Noyxcw1iyGW6Bm81LANsnMM7BLhPQATShmW7O83WUJ4vAgMBAAGjUzBRMB0GA1Ud +DgQWBBRdFCb//WETC8mDxg/75e+RoVNoDjAfBgNVHSMEGDAWgBRdFCb//WETC8mD +xg/75e+RoVNoDjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC8 +cKOagO+SVJBMzJppm4uGdSM6TJ2wbkn6K5/eOtZmYdKtW6fkAC9tf0DR7dVP1DUk +24lS+atR1Oe7SukxJyd+NafCZ61uf+zrMC3wgBGnufrbPWaDDVxi6c3I0I+WNaCk +DHHY+9UtjvSboWKG1yuEExPN6aDeytbpscG1DNi7l96Ac3Yzs007SFljA7NBrf65 +So9SZYSdJVC/JrOnfK2HZPeAqvoyUO5JsCh02q5AskxTqfBGy6VUVQO5mN8bxYHV +GG5XD46rpwQYNT2bWWRF5d0bRv7ecNkCoupm6hCQROg4FZHGPnqHGqDTcgCLZ59e +8rHh2gsDMMNYvSMTi+0N +-----END CERTIFICATE----- diff --git a/t/certs/vector_logs_ca.key b/t/certs/vector_logs_ca.key new file mode 100644 index 000000000000..e36b69354b13 --- /dev/null +++ b/t/certs/vector_logs_ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvOBV3v/KH2Qdq4+Uzx2rF3YgRG0wr9OLYv5VfdljgjKxyyom +yNvpCN2EUF2kef9Rdm130TlIXUYJAfDTkjOFm2g9Zra/+5ltk2rcaZcbKdbNM5hx +5eb9FWMAWoXFLOp5UntfXYH64+LNFp+6eBO4kqv1AwLPlnv3UKMLrEE1bENmYYw5 +RWNtglf6MgmLDatzkUYMGUZvXJlZlFBexl+9Mo09Sry5BY3O8tb8IvFyWAA4cP4Y +7RMwDpMXVGovOPo8oaUGEFjDoK5T9Q152cNWzSqqOjU5DM3zQkvrpeTzLDaMsXMN +YshlugZvNSwDbJzDOwS4T0AE0oZluzvN1lCeLwIDAQABAoIBADM7ou9fcQM80/OC +efoIcS1nBG+rMqau+kM6/BOsERrzB1k1sNmRFVArTkXCcOgKwp0eKn8dS6zJX44g +NjOVOCukhetDrSXhQ2DWfr1BmMOrmXPiaRrUolfXx/PGD2sUmx4tivvBUz3Xeowl +fZ4us0VN0aMkcwy9yaMc5wCtm4Em+uMrUIvWSAl3ji09oG4NNBQHUsEWJoRMZ/AG +GQowc7Ga850ybZlza1uWh29a3bbQqEwHExJwiCISv25PJ/xQLqH65biB4MU+ym17 +Ou/MDn9cYndxBal/XI4R7HbeIjMgw2XxwXiiDOuKAn5TlCzHmySRXFj1BoT8xoXa +vTXVlAkCgYEA+nc2GiainyW0MAASX53Ue5zsFh4T2CaA4TTHeXEK22rL1Sz3LsbX +ymBqCcNwbcSTYUzBsf6YzSsPLUwIzBGEN0p5Ywts5KtWavAxllBj2MOTP4yQfLvh +AxOq94hqrDLMs/g7LkFrfspYMCXmegGjjXGuqirKbigXkFVQkvOUcwUCgYEAwQy8 +kl2+deq1OD9rJId596nDx6JVLBt7/VP4dOOaS2/YQeFnUdM1xMM+zovEZb3UZMpp +8yhRE7hB7Fm0688yd+F7GpFC49LyVTitZcaIV7nMnXjJQQ27WyiAZoRKHt1gP4io +OCZAaOEJRbGJcWR3sSPHfX93R+xEtFNAexb/eqMCgYEA8NDV7+bdzO7PhKdNAyoZ +NpD2XX2lztmWaPH6KMWLjtPsD5cgQpVkvWxeB+0lmCS9H3xRb/Y+rGWOPhsxCiR9 +Xzv34kcF+AbVHBS9WK0Kk0vXs+5Ord9mxTKP21gKWG6vawpsvFiiJlIe4IxQQVZ6 +DnETYwGpiKh7n4an5eLVBJECgYEAnviuEJnBzbiJotgWku49MgVKg4raOIgpgmMz +po4G8TgZDadgPbGABZgCkHPoNyArVxSYSvRYT7TcFJWKtuTY2n+DsE0OmC2OAT+7 +CqSCgjsulD5y/G8iad7gXYtyvhfuumL+o75cLAGkcQ/R7t6c8fJUxLPCtieKLDSi +VLqLh6ECgYAlk8O5Rz2bSje4F+b0PZLAGjQRytpTjrgVpxwJ4bBXYeqhfd+3Fi8s +OraFx+kj/YOOFkk5uu75/fbccEY1DG0nmWUR0pjHM+QndB4rpkSxtp+pfVo2nRn0 +pAY8ep+TFRLwmy7ZXpOFPYlGPwx+rjSm9vk9EJYjxZE8YYldiBBKHw== +-----END RSA PRIVATE KEY----- diff --git a/t/certs/vector_logs_server.crt b/t/certs/vector_logs_server.crt new file mode 100644 index 000000000000..95bb51bec4d2 --- /dev/null +++ b/t/certs/vector_logs_server.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDETCCAfkCFEynFsv9L6bzyJJVmjoaKuCoYZwkMA0GCSqGSIb3DQEBCwUAMEUx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl +cm5ldCBXaWRnaXRzIFB0eSBMdGQwHhcNMjMwNDI0MTMxNzAwWhcNMjQwNDIzMTMx +NzAwWjBFMQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UE +CgwYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAtGvdwIHuk6k3vphBnGIdtlZ/6ImHSVBsNHz5y6E9X31a88EH +wtnxT5Ang8K6Y4pQt+LsjhI0NdUY2skiKDnGpo2IkaFAn9nERQ1GJstIHr7ltal6 +ureV4n/Na/T6n6GPnwD4+P86XvpIwFtJZujYr2tUl4qm/t1P7zHjB/UsF9G6H/aN +oCsDkG3a7+b8uWAZLkyHS4RLF3pG6pDWns8/vC/P9nTT7o3Ha2DV7TPaY0hlsXf6 +0/SCSm7EonnVVwhnKyy5Z0FsCXClg7weN4ZKPb+ypF0o0/LLqw481lbSfAu5kpjE +r/rHpsQonRbQrcrD9xovXmw2vdk/2jJn6wpFQwIDAQABMA0GCSqGSIb3DQEBCwUA +A4IBAQBv9e8gsD75iySf+pam11JUujjL0gpqdzY72CKo4abYX5NZhMiBs6OCKicz +EedR/EgRY+26RMThKC0zSy3hOO6SKPw03FLsV2B8ooDzaOa4l3F/E6NQ5yNDoK+K +lT1G85fW3bQWtNoB8aa/r1/eExZy3kZF8GSl+/BvwLtOwtGXMO0Y1URo81Dl0da+ +F2yv6ZGziEYIWYTUK3kxOpe0Sl4wHz33olWoli2qpYlSndUUIWoVYJr4gtH/xTEV +GHxdOhxcfyMNi6ceYG4HGWyKRFR9TJAU+PRBxHI8UUpg+BG3/DQmfA5+7xgAws37 +dEVsm725hta8vPUSMSAdRrArBlh+ +-----END CERTIFICATE----- diff --git a/t/certs/vector_logs_server.key b/t/certs/vector_logs_server.key new file mode 100644 index 000000000000..54035d4fa9a6 --- /dev/null +++ b/t/certs/vector_logs_server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAtGvdwIHuk6k3vphBnGIdtlZ/6ImHSVBsNHz5y6E9X31a88EH +wtnxT5Ang8K6Y4pQt+LsjhI0NdUY2skiKDnGpo2IkaFAn9nERQ1GJstIHr7ltal6 +ureV4n/Na/T6n6GPnwD4+P86XvpIwFtJZujYr2tUl4qm/t1P7zHjB/UsF9G6H/aN +oCsDkG3a7+b8uWAZLkyHS4RLF3pG6pDWns8/vC/P9nTT7o3Ha2DV7TPaY0hlsXf6 +0/SCSm7EonnVVwhnKyy5Z0FsCXClg7weN4ZKPb+ypF0o0/LLqw481lbSfAu5kpjE +r/rHpsQonRbQrcrD9xovXmw2vdk/2jJn6wpFQwIDAQABAoIBAQCB24lV/6759Le8 +pNXEexIrpQKXGjWXXR0kgjdAiyMjUZRfETZG1prKy1TFjyiccHc8g0YD07JkdKZZ +Ap9lGICUbBY5yzg6VYDguncdgP69smSfZgaB0ZU92wK9iyvALYazyP1qKjmXFsm6 +OXoRadJcIAJYuGEN27imzt87YQmFciXj63lW4usR7rPpacW004VeWqGfXTnckJd6 +TYFq0xmdhnGxDxOlf6fs5zOEw17NrGlYxQVtdst8sGmpAPMEM7DzvDsjfEPxDuXl +hQJE8Zk8jK3Xwrnc03NWisZ4QVhgxeR7PVcraFo623qiI/CzH9YqUqMCtIMAqz/T +COXXl9JxAoGBAOosUC72SM7ZRshneHHszEaZDvfLINdKGUKCDvYlLEmVFqE5iRFy +SomVci2jtrlGH1gJAWfwkT09JVgtGosRIA0MS82HseLN/QIa01dAmmiZqM/CLbcn +mpb0CQDkm0Bbz6fokQkFB/sBA5Kj3kOKRydCLp2S0Ugs50cKXDHP5fuVAoGBAMU8 +9rIvmNdweGTiYjHYLJBkzu7eL+5RB2zVSMuZtVivaDTfleilbRlcrBBIaM0urv2W +UtROB9ack2Ijn/BF+tHkBRVWpaZFdHJ8qMfz2bBDgf6za/LBcvuT35i7ibPT+zfg +UFXtArmGwPq3AZdWBwIKyN8rM7253WDnUlkN7Ed3AoGBAMPAR0b6meJPvtvHoueZ +Cyn4yIpbQxi02GjAT8FzUZIxDrm3Xt02rRhV1RxRvm0iMRFmdcZtUvveIVmUWpvl +tOUzYiptREZT6yvXQNOvLWRDDtqdd5mjgZauaNhWQXGLTgsOXi8sBX/NWS87zJCp +BtHKgS03jbrHzo2UG32ITLgBAoGAJRoardoWPjCB9ThAkG/BskfERVq2WXYUl3xn +fSUk39HfIFMOt/ymUScFluqIDFDDyiAE5Lro7o31i3h4FZKUY/conaL29hgKl56r +gTF1uZp5UZgerkOFhZ2Dag+dD57ImvIvKnqzEIMwufjC69za5J9yucg+q2nTIu9g +pi/gSnECgYEAhfJ5uq1qa+g23np02ED5ttqmyrMRGGInx3mr2QgJDTum6FujpYCM +PwJhMwKJZXcf3eUlECSJPa+9UGI53d+JDlQdwq9Pi726KFtrBiS4t9aSyZSpkoWk +SVdYGaOMtokDKRJibazXjpGFJQy9tAMgtqptS3kL03IuJc643y+lMFc= +-----END RSA PRIVATE KEY----- diff --git a/t/chaos/killetcd/killetcd.go b/t/chaos/killetcd/killetcd.go index 4f92cd9ccb38..069b66581263 100644 --- a/t/chaos/killetcd/killetcd.go +++ b/t/chaos/killetcd/killetcd.go @@ -140,17 +140,12 @@ var _ = ginkgo.Describe("Test Get Success When Etcd Got Killed", func() { // fail to set route since etcd is all killed // while get route could still succeed ginkgo.It("get stats after kill etcd", func() { - timeStart := time.Now() utils.SetRoute(e, httpexpect.Status5xx) utils.GetRoute(eDataPanel, http.StatusOK) utils.TestPrometheusEtcdMetric(ePrometheus, 0) bandwidthAfter, durationAfter = utils.GetEgressBandwidthPerSecond(ePrometheus) bpsAfter = bandwidthAfter / durationAfter - - errorLog, err := utils.Log(apisixPod, cliSet.KubeCli, timeStart) - gomega.Expect(err).To(gomega.BeNil()) - gomega.Ω(errorLog).Should(gomega.ContainSubstring("invalid response code: 502")) }) ginkgo.It("ingress bandwidth per second not change much", func() { diff --git a/t/chaos/utils/Dockerfile b/t/chaos/utils/Dockerfile index 36bf2212f96d..5d5ba6dac932 100644 --- a/t/chaos/utils/Dockerfile +++ b/t/chaos/utils/Dockerfile @@ -17,7 +17,7 @@ ARG ENABLE_PROXY=false -FROM openresty/openresty:1.19.3.2-alpine-fat AS production-stage +FROM openresty/openresty:1.21.4.2-alpine-fat AS production-stage ARG ENABLE_PROXY ARG APISIX_PATH @@ -36,11 +36,6 @@ RUN set -x \ sudo \ && cd apisix \ && git config --global url.https://github.com/.insteadOf git://github.com/ \ - && curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sudo sh -s -- -y \ - && source "$HOME/.cargo/env" \ - && export CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \ - # next line is for rust cdylib compile on musl - && export RUSTFLAGS="-C target-feature=-crt-static" \ && make deps \ && cp -v bin/apisix /usr/bin/ \ && mv ../apisix /usr/local/apisix \ diff --git a/t/cli/test_access_log.sh b/t/cli/test_access_log.sh index 7c40b35a3b8a..01dda603a285 100755 --- a/t/cli/test_access_log.sh +++ b/t/cli/test_access_log.sh @@ -91,12 +91,6 @@ if [ $count_test_access_log -eq 1 ]; then exit 1 fi -count_access_log_off=`grep -c "access_log off;" conf/nginx.conf || true` -if [ $count_access_log_off -ne 5 ]; then - echo "failed: nginx.conf file doesn't find access_log off; when disable access log" - exit 1 -fi - make run sleep 0.1 curl http://127.0.0.1:9080/hi @@ -230,6 +224,7 @@ echo "passed: should find upstream scheme" # check stream logs echo ' apisix: + proxy_mode: stream stream_proxy: # UDP proxy udp: - "127.0.0.1:9200" diff --git a/t/cli/test_admin.sh b/t/cli/test_admin.sh index 6f39ffae170a..aad049728176 100755 --- a/t/cli/test_admin.sh +++ b/t/cli/test_admin.sh @@ -189,6 +189,62 @@ fi echo "pass: missing admin key and only allow 127.0.0.0/24 to access admin api" +# allow any IP to access admin api with empty admin_key, when admin_key_required=true + +git checkout conf/config.yaml + +echo ' +deployment: + admin: + admin_key_required: true + admin_key: ~ + allow_admin: + - 0.0.0.0/0 +' > conf/config.yaml + +make init > output.log 2>&1 | true + +if ! grep -E "ERROR: missing valid Admin API token." output.log > /dev/null; then + echo "failed: should show 'ERROR: missing valid Admin API token.'" + exit 1 +fi + +echo ' +deployment: + admin: + admin_key_required: false + admin_key: ~ + allow_admin: + - 0.0.0.0/0 +' > conf/config.yaml + +make init > output.log 2>&1 | true + +if grep -E "ERROR: missing valid Admin API token." output.log > /dev/null; then + echo "failed: should not show 'ERROR: missing valid Admin API token.'" + exit 1 +fi + +if ! grep -E "Warning! Admin key is bypassed" output.log > /dev/null; then + echo "failed: should show 'Warning! Admin key is bypassed'" + exit 1 +fi + +echo ' +deployment: + admin: + admin_key_required: invalid-value +' > conf/config.yaml + +make init > output.log 2>&1 | true + +if grep -E "path[deployment->admin->admin_key_required] expect: boolean, but got: string" output.log > /dev/null; then + echo "check admin_key_required value failed: should show 'expect: boolean, but got: string'" + exit 1 +fi + +echo "pass: allow empty admin_key, when admin_key_required=false" + # admin api, allow any IP but use default key echo ' diff --git a/t/cli/test_cmd.sh b/t/cli/test_cmd.sh index 449069577ae7..953b6dc4a119 100755 --- a/t/cli/test_cmd.sh +++ b/t/cli/test_cmd.sh @@ -21,34 +21,6 @@ git checkout conf/config.yaml -# remove stale conf server sock -touch conf/config_listen.sock -./bin/apisix start -sleep 0.5 -./bin/apisix stop -sleep 0.5 - -if [ -e conf/config_listen.sock ]; then - echo "failed: should remove stale conf server sock" - exit 1 -fi - -# don't remove stale conf server sock when APISIX is running -./bin/apisix start -sleep 0.5 -./bin/apisix start -sleep 0.5 - -if [ ! -e conf/config_listen.sock ]; then - echo "failed: should not remove stale conf server sock" - exit 1 -fi - -./bin/apisix stop -sleep 0.5 - -echo "passed: stale conf server sock removed" - # check restart with old nginx.pid exist echo "-1" > logs/nginx.pid out=$(./bin/apisix start 2>&1 || true) @@ -86,10 +58,19 @@ fi make stop echo "pass: check APISIX running" -# check customized config.yaml is copied and reverted. +# check customized config git checkout conf/config.yaml +# start with not existed customized config +make init + +if ./bin/apisix start -c conf/not_existed_config.yaml; then + echo "failed: apisix still start with invalid customized config.yaml" + exit 1 +fi + +# start with customized config echo " deployment: admin: @@ -101,37 +82,129 @@ deployment: admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' " > conf/customized_config.yaml -cp conf/config.yaml conf/config_original.yaml +./bin/apisix start -c conf/customized_config.yaml -make init +# check if .customized_config_path has been created +if [ ! -e conf/.customized_config_path ]; then + rm conf/customized_config.yaml + echo ".config_path file should exits" + exit 1 +fi -if ./bin/apisix start -c conf/not_existed_config.yaml; then - echo "failed: apisix still start with invalid customized config.yaml" +# check if the custom config is used +code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} https://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +if [ ! $code -eq 200 ]; then + rm conf/customized_config.yaml + echo "failed: customized config.yaml not be used" exit 1 fi -./bin/apisix start -c conf/customized_config.yaml +make stop -if cmp -s "conf/config.yaml" "conf/config_original.yaml"; then - rm conf/config_original.yaml - echo "failed: customized config.yaml copied failed" +# check if .customized_config_path has been removed +if [ -e conf/.config_path ]; then + rm conf/customized_config_path.yaml + echo ".config_path file should be removed" exit 1 fi -code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} https://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +# start with invalied config +echo "abc" > conf/customized_config.yaml + +if ./bin/apisix start -c conf/customized_config.yaml ; then + rm conf/customized_config.yaml + echo "start should be failed" + exit 1 +fi + +# check if apisix can be started use correctly default config. (https://github.com/apache/apisix/issues/9700) +./bin/apisix start + +code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') if [ ! $code -eq 200 ]; then - rm conf/config_original.yaml conf/customized_config.yaml - echo "failed: customized config.yaml not be used" + rm conf/customized_config.yaml + echo "failed: should use default config" exit 1 fi make stop -if ! cmp -s "conf/config.yaml" "conf/config_original.yaml"; then - rm conf/config_original.yaml conf/customized_config.yaml - echo "failed: customized config.yaml reverted failed" +# check if apisix can be started after multiple start failures. (https://github.com/apache/apisix/issues/9171) +echo " +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' + admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' + etcd: + host: + - http://127.0.0.1:22379 +" > conf/customized_config.yaml + +./bin/apisix start -c conf/customized_config.yaml || true +./bin/apisix start -c conf/customized_config.yaml || true +./bin/apisix start -c conf/customized_config.yaml || true + +echo " +deployment: + admin: + admin_listen: + port: 9180 + https_admin: true + admin_api_mtls: + admin_ssl_cert: '../t/certs/apisix_admin_ssl.crt' + admin_ssl_cert_key: '../t/certs/apisix_admin_ssl.key' +" > conf/customized_config.yaml + +./bin/apisix start -c conf/customized_config.yaml + +code=$(curl -k -i -m 20 -o /dev/null -s -w %{http_code} https://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') +if [ ! $code -eq 200 ]; then + rm conf/customized_config.yaml + echo "failed: should use default config" + exit 1 +fi + +rm conf/customized_config.yaml +echo "passed: test customized config successful" + +# test quit command +bin/apisix start + +if ! ps -ef | grep "apisix" | grep "master process" | grep -v "grep"; then + echo "apisix not started" + exit 1 +fi + +bin/apisix quit + +sleep 0.5 + +if ps -ef | grep "worker process is shutting down" | grep -v "grep"; then + echo "all workers should exited" + exit 1 +fi + +echo "passed: test quit command successful" + +# test reload command +bin/apisix start + +if ! ps -ef | grep "apisix" | grep "master process" | grep -v "grep"; then + echo "apisix not started" + exit 1 +fi + +bin/apisix reload + +sleep 0.5 + +if ps -ef | grep "worker process is shutting down" | grep -v "grep"; then + echo "old workers should exited" exit 1 fi -rm conf/config_original.yaml conf/customized_config.yaml -echo "passed: customized config.yaml copied and reverted succeeded" +echo "passed: test reload command successful" diff --git a/t/cli/test_core_config.sh b/t/cli/test_core_config.sh index 7b96820539cc..f799241b945d 100755 --- a/t/cli/test_core_config.sh +++ b/t/cli/test_core_config.sh @@ -45,6 +45,7 @@ echo "passed: set lua_max_running_timers successfully" echo " apisix: + proxy_mode: http&stream stream_proxy: tcp: - addr: 9100 diff --git a/t/cli/test_deployment_control_plane.sh b/t/cli/test_deployment_control_plane.sh index 58858f41fd1c..ed3a062a8117 100755 --- a/t/cli/test_deployment_control_plane.sh +++ b/t/cli/test_deployment_control_plane.sh @@ -19,57 +19,6 @@ . ./t/cli/common.sh -echo ' -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - cert: t/certs/mtls_server.crt - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 -' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep 'property "cert_key" is required'; then - echo "failed: should check deployment schema during init" - exit 1 -fi - -echo "passed: should check deployment schema during init" - -# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns -echo ' -apisix: - enable_admin: false -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: admin.apisix.dev:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - admin: - https_admin: "abc" - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - trusted_ca_cert: t/certs/mtls_ca.crt -' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep 'property "https_admin" validation failed: wrong type: expected boolean, got string'; then - echo "failed: should check deployment schema during init" - exit 1 -fi - -echo "passed: should check deployment schema during init" - # The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns echo ' apisix: @@ -78,23 +27,16 @@ deployment: role: control_plane role_control_plane: config_provider: etcd - conf_server: - listen: admin.apisix.dev:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key etcd: prefix: "/apisix" host: - http://127.0.0.1:2379 - certs: - trusted_ca_cert: t/certs/mtls_ca.crt ' > conf/config.yaml make run sleep 1 code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -make stop if [ ! $code -eq 200 ]; then echo "failed: control_plane should enable Admin API" @@ -103,40 +45,6 @@ fi echo "passed: control_plane should enable Admin API" -# use https -# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns -echo ' -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: admin.apisix.dev:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt -' > conf/config.yaml - -make run -sleep 1 - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') - -if [ ! $code -eq 200 ]; then - make stop - echo "failed: could not work with etcd" - exit 1 -fi - -echo "passed: work well with etcd in control plane" - curl -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' { "upstream": { diff --git a/t/cli/test_deployment_data_plane.sh b/t/cli/test_deployment_data_plane.sh index ef5ef61e37c6..b7edb0e2411b 100755 --- a/t/cli/test_deployment_data_plane.sh +++ b/t/cli/test_deployment_data_plane.sh @@ -27,14 +27,14 @@ echo ' deployment: role: data_plane role_data_plane: - config_provider: control_plane - control_plane: - host: - - https://127.0.0.1:12379 - prefix: "/apisix" - timeout: 30 - tls: - verify: false + config_provider: etcd + etcd: + host: + - https://127.0.0.1:12379 + prefix: "/apisix" + timeout: 30 + tls: + verify: false ' > conf/config.yaml make run @@ -64,12 +64,12 @@ echo ' deployment: role: data_plane role_data_plane: - config_provider: control_plane - control_plane: - host: - - https://127.0.0.1:12379 - prefix: "/apisix" - timeout: 30 + config_provider: etcd + etcd: + host: + - https://127.0.0.1:12379 + prefix: "/apisix" + timeout: 30 ' > conf/config.yaml out=$(make run 2>&1 || true) diff --git a/t/cli/test_deployment_mtls.sh b/t/cli/test_deployment_mtls.sh deleted file mode 100755 index 5fa4c6984a21..000000000000 --- a/t/cli/test_deployment_mtls.sh +++ /dev/null @@ -1,88 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -. ./t/cli/common.sh - -exit_if_not_customed_nginx - -# use mTLS -# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns -echo ' -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: admin.apisix.dev:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt -' > conf/config.yaml - -make run -sleep 1 - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -make stop - -if [ ! $code -eq 200 ]; then - echo "failed: could not work with etcd" - exit 1 -fi - -echo "passed: work well with etcd in control plane" - -echo ' -deployment: - role: data_plane - role_data_plane: - config_provider: control_plane - control_plane: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - timeout: 30 - tls: - verify: false - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt -' > conf/config.yaml - -rm logs/error.log -make run -sleep 1 - -make stop - -if grep '\[error\] .\+ https://admin.apisix.dev:22379' logs/error.log; then - echo "failed: work well with control plane in data plane" - exit 1 -fi - -echo "passed: work well with control plane in data plane" diff --git a/t/cli/test_deployment_traditional.sh b/t/cli/test_deployment_traditional.sh index 1dead769bc10..24996eb3b947 100755 --- a/t/cli/test_deployment_traditional.sh +++ b/t/cli/test_deployment_traditional.sh @@ -45,6 +45,7 @@ fi # Both HTTP and Stream echo ' apisix: + proxy_mode: http&stream enable_admin: true stream_proxy: tcp: @@ -74,6 +75,7 @@ fi echo ' apisix: enable_admin: false + proxy_mode: stream stream_proxy: tcp: - addr: 9100 @@ -98,57 +100,6 @@ fi echo "passed: could connect to etcd" -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - - https://127.0.0.1:2379 -' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep 'all nodes in the etcd cluster should enable/disable TLS together'; then - echo "failed: should validate etcd host" - exit 1 -fi - -echo "passed: validate etcd host" - -# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns - -# etcd mTLS verify -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false - ' > conf/config.yaml - -make run -sleep 1 - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -make stop - -if [ ! $code -eq 200 ]; then - echo "failed: could not work when mTLS is enabled" - exit 1 -fi - -echo "passed: etcd enables mTLS successfully" - echo ' deployment: role: traditional diff --git a/t/cli/test_dns.sh b/t/cli/test_dns.sh index cb8f8eaee565..86dd9dbb1f19 100755 --- a/t/cli/test_dns.sh +++ b/t/cli/test_dns.sh @@ -41,6 +41,7 @@ fi echo ' apisix: + proxy_mode: http&stream stream_proxy: tcp: - 9100 @@ -62,6 +63,7 @@ echo "pass: dns_resolver_valid takes effect" echo ' apisix: + proxy_mode: http&stream stream_proxy: tcp: - 9100 @@ -130,6 +132,7 @@ rm logs/error.log || true echo " apisix: enable_admin: true + proxy_mode: http&stream stream_proxy: tcp: - addr: 9100 diff --git a/t/cli/test_etcd_grpc.sh b/t/cli/test_etcd_grpc.sh deleted file mode 100755 index 47266c3e4e97..000000000000 --- a/t/cli/test_etcd_grpc.sh +++ /dev/null @@ -1,104 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# 'make init' operates scripts and related configuration files in the current directory -# The 'apisix' command is a command in the /usr/local/apisix, -# and the configuration file for the operation is in the /usr/local/apisix/conf - -. ./t/cli/common.sh - -exit_if_not_customed_nginx - -# check etcd while enable auth -git checkout conf/config.yaml - -export ETCDCTL_API=3 -etcdctl version -etcdctl --endpoints=127.0.0.1:2379 user add "root:apache-api6" -etcdctl --endpoints=127.0.0.1:2379 role add root -etcdctl --endpoints=127.0.0.1:2379 user grant-role root root -etcdctl --endpoints=127.0.0.1:2379 user get root -etcdctl --endpoints=127.0.0.1:2379 auth enable -etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 del /apisix --prefix - -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - http://127.0.0.1:2379 - prefix: /apisix - timeout: 30 - use_grpc: true - user: root - password: apache-api6 -' > conf/config.yaml - -make run -sleep 1 - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -make stop - -if [ ! $code -eq 200 ]; then - echo "failed: could not work with etcd" - exit 1 -fi - -echo "passed: work well with etcd auth enabled" - -etcdctl --endpoints=127.0.0.1:2379 --user=root:apache-api6 auth disable -etcdctl --endpoints=127.0.0.1:2379 role delete root -etcdctl --endpoints=127.0.0.1:2379 user delete root - -# check connect to etcd with ipv6 address in cli -git checkout conf/config.yaml - -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - http://[::1]:2379 - prefix: /apisix - use_grpc: true - timeout: 30 -' > conf/config.yaml - -rm logs/error.log || true -make run -sleep 0.1 - -if grep "update endpoint: http://\[::1\]:2379 to unhealthy" logs/error.log; then - echo "failed: connect to etcd via ipv6 address failed" - exit 1 -fi - -if grep "host or service not provided, or not known" logs/error.log; then - echo "failed: luasocket resolve ipv6 addresses failed" - exit 1 -fi - -make stop - -echo "passed: connect to etcd via ipv6 address successfully" diff --git a/t/cli/test_etcd_grpc_healthcheck.sh b/t/cli/test_etcd_grpc_healthcheck.sh deleted file mode 100755 index 3a977e0265f1..000000000000 --- a/t/cli/test_etcd_grpc_healthcheck.sh +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# 'make init' operates scripts and related configuration files in the current directory -# The 'apisix' command is a command in the /usr/local/apisix, -# and the configuration file for the operation is in the /usr/local/apisix/conf - -. ./t/cli/common.sh - -exit_if_not_customed_nginx - -custom_clean_up() { - clean_up - - # stop etcd docker container - docker-compose -f ./t/cli/docker-compose-etcd-cluster.yaml down -} - -trap custom_clean_up EXIT - -export ETCD_ENABLE_GRPC_GATEWAY=false - -# create 3 node etcd cluster in docker -ETCD_NAME_0=etcd0 -ETCD_NAME_1=etcd1 -ETCD_NAME_2=etcd2 -HEALTH_CHECK_RETRY_TIMEOUT=10 - -if [ -f "logs/error.log" ]; then - rm logs/error.log -fi -touch logs/error.log - -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "http://0.0.0.0:23790" - - "http://0.0.0.0:23791" - - "http://0.0.0.0:23792" - health_check_timeout: '"$HEALTH_CHECK_RETRY_TIMEOUT"' - timeout: 2 -' > conf/config.yaml - -docker-compose -f ./t/cli/docker-compose-etcd-cluster.yaml up -d - -# case 1: Check apisix not got effected when one etcd node disconnected -make init && make run - -docker stop ${ETCD_NAME_0} -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -if [ ! $code -eq 200 ]; then - echo "failed: apisix got effect when one etcd node out of a cluster disconnected" - exit 1 -fi -docker start ${ETCD_NAME_0} - -docker stop ${ETCD_NAME_1} -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -if [ ! $code -eq 200 ]; then - echo "failed: apisix got effect when one etcd node out of a cluster disconnected" - exit 1 -fi -docker start ${ETCD_NAME_1} - -make stop - -echo "passed: apisix not got effected when one etcd node disconnected" - -# case 2: Check when all etcd nodes disconnected, apisix trying to reconnect with backoff, and could successfully recover when reconnected -make init && make run - -docker stop ${ETCD_NAME_0} && docker stop ${ETCD_NAME_1} && docker stop ${ETCD_NAME_2} - -sleep_till=$(date +%s -d "$DATE + $HEALTH_CHECK_RETRY_TIMEOUT second") - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -if [ $code -eq 200 ]; then - echo "failed: apisix not got effect when all etcd nodes disconnected" - exit 1 -fi - -docker start ${ETCD_NAME_0} && docker start ${ETCD_NAME_1} && docker start ${ETCD_NAME_2} - -# case 3: sleep till etcd health check try to check again -current_time=$(date +%s) -sleep_seconds=$(( $sleep_till - $current_time + 3)) -if [ "$sleep_seconds" -gt 0 ]; then - sleep $sleep_seconds -fi - -code=$(curl -o /dev/null -s -w %{http_code} http://127.0.0.1:9180/apisix/admin/routes -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1') -if [ ! $code -eq 200 ]; then - echo "failed: apisix could not recover when etcd node recover" - docker ps - cat logs/error.log - exit 1 -fi - -make stop - -echo "passed: when all etcd nodes disconnected, apisix trying to reconnect with backoff, and could successfully recover when reconnected" - -# case 4: stop one etcd node (result: start successful) -docker stop ${ETCD_NAME_0} - -out=$(make init 2>&1) -if echo "$out" | grep "23790" | grep "connection refused"; then - echo "passed: APISIX successfully to start, stop only one etcd node" -else - echo "failed: stop only one etcd node APISIX should start normally" - exit 1 -fi - -# case 5: stop two etcd nodes (result: start failure) -docker stop ${ETCD_NAME_1} - -out=$(make init 2>&1 || true) -if echo "$out" | grep "23791" | grep "connection refused"; then - echo "passed: APISIX failed to start, etcd cluster must have two or more healthy nodes" -else - echo "failed: two etcd nodes have been stopped, APISIX should fail to start" - exit 1 -fi - -# case 6: stop all etcd nodes (result: start failure) -docker stop ${ETCD_NAME_2} - -out=$(make init 2>&1 || true) -if echo "$out" | grep "23792" | grep "connection refused"; then - echo "passed: APISIX failed to start, all etcd nodes have stopped" -else - echo "failed: all etcd nodes have stopped, APISIX should not be able to start" - exit 1 -fi diff --git a/t/cli/test_etcd_grpc_mtls.sh b/t/cli/test_etcd_grpc_mtls.sh deleted file mode 100755 index 8f37a711272e..000000000000 --- a/t/cli/test_etcd_grpc_mtls.sh +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -. ./t/cli/common.sh - -exit_if_not_customed_nginx - -# The 'admin.apisix.dev' is injected by ci/common.sh@set_coredns - -# etcd mTLS verify -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false - ' > conf/config.yaml - -out=$(make init 2>&1 || echo "ouch") -if echo "$out" | grep "bad certificate"; then - echo "failed: apisix should not echo \"bad certificate\"" - exit 1 -fi - -echo "passed: certificate verify success expectedly" - -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - verify: false - ' > conf/config.yaml - -out=$(make init 2>&1 || echo "ouch") -if ! echo "$out" | grep "bad certificate"; then - echo "failed: apisix should echo \"bad certificate\"" - exit 1 -fi - -echo "passed: certificate verify fail expectedly" - -# etcd mTLS verify with CA -echo ' -apisix: - ssl: - ssl_trusted_certificate: t/certs/mtls_ca.crt -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - ' > conf/config.yaml - -out=$(make init 2>&1 || echo "ouch") -if echo "$out" | grep "certificate verify failed"; then - echo "failed: apisix should not echo \"certificate verify failed\"" - exit 1 -fi - -if echo "$out" | grep "ouch"; then - echo "failed: apisix should not fail" - exit 1 -fi - -echo "passed: certificate verify with CA success expectedly" - -# etcd mTLS in stream subsystem -echo ' -apisix: - stream_proxy: - tcp: - - addr: 9100 - ssl: - ssl_trusted_certificate: t/certs/mtls_ca.crt -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - ' > conf/config.yaml - -out=$(make init 2>&1 || echo "ouch") -if echo "$out" | grep "certificate verify failed"; then - echo "failed: apisix should not echo \"certificate verify failed\"" - exit 1 -fi - -if echo "$out" | grep "ouch"; then - echo "failed: apisix should not fail" - exit 1 -fi - -rm logs/error.log || true -make run -sleep 1 -make stop - -if grep "\[error\]" logs/error.log; then - echo "failed: veirfy etcd certificate during sync should not fail" -fi - -echo "passed: certificate verify in stream subsystem successfully" - -# use host in etcd.host as sni by default -git checkout conf/config.yaml -echo ' -apisix: - ssl: - ssl_trusted_certificate: t/certs/mtls_ca.crt -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.2:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - ' > conf/config.yaml - -rm logs/error.log || true -make init -make run -sleep 1 -make stop - -if ! grep -E "cannot validate certificate for 127.0.0.2 because it doesn't contain any IP SANs" logs/error.log; then - echo "failed: should got certificate host mismatch when use host in etcd.host as sni" - exit 1 -fi - - -echo "passed: use host in etcd.host as sni by default" diff --git a/t/cli/test_etcd_grpc_tls.sh b/t/cli/test_etcd_grpc_tls.sh deleted file mode 100755 index 9e429e41b373..000000000000 --- a/t/cli/test_etcd_grpc_tls.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# 'make init' operates scripts and related configuration files in the current directory -# The 'apisix' command is a command in the /usr/local/apisix, -# and the configuration file for the operation is in the /usr/local/apisix/conf - -. ./t/cli/common.sh - -exit_if_not_customed_nginx - -# Check etcd tls verify failure -git checkout conf/config.yaml - -echo ' -apisix: - ssl: - ssl_trusted_certificate: t/certs/mtls_ca.crt -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:12379" - prefix: "/apisix" - ' > conf/config.yaml - -out=$(make init 2>&1 || true) -if ! echo "$out" | grep "certificate verify failed"; then - echo "failed: apisix should echo \"certificate verify failed\"" - exit 1 -fi - -echo "passed: Show certificate verify failed info successfully" - - -# Check etcd tls without verification -git checkout conf/config.yaml - -echo ' -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:12379" - prefix: "/apisix" - tls: - verify: false - ' > conf/config.yaml - -out=$(make init 2>&1 || true) -if echo "$out" | grep "certificate verify failed"; then - echo "failed: apisix should not echo \"certificate verify failed\"" - exit 1 -fi - -echo "passed: Certificate verification successfully" diff --git a/t/cli/test_etcd_mtls.sh b/t/cli/test_etcd_mtls.sh index d61d6d517c1f..a05dcdb784ca 100755 --- a/t/cli/test_etcd_mtls.sh +++ b/t/cli/test_etcd_mtls.sh @@ -102,6 +102,7 @@ echo "passed: certificate verify with CA success expectedly" # etcd mTLS in stream subsystem echo ' apisix: + proxy_mode: http&stream stream_proxy: tcp: - addr: 9100 @@ -167,7 +168,7 @@ make run sleep 1 make stop -if ! grep -E 'upstream SSL certificate does not match \"127.0.0.1\" while SSL handshaking to upstream' logs/error.log; then +if ! grep -F 'certificate host mismatch' logs/error.log; then echo "failed: should got certificate host mismatch when use host in etcd.host as sni" exit 1 fi diff --git a/t/cli/test_main.sh b/t/cli/test_main.sh index 29534c83aa54..3b0cab766d59 100755 --- a/t/cli/test_main.sh +++ b/t/cli/test_main.sh @@ -670,10 +670,10 @@ echo "passed: bad lua_module_hook should be rejected" echo ' apisix: + proxy_mode: http&stream extra_lua_path: "\$prefix/example/?.lua" lua_module_hook: "my_hook" stream_proxy: - only: false tcp: - addr: 9100 ' > conf/config.yaml @@ -810,6 +810,7 @@ git checkout conf/config.yaml echo ' apisix: + proxy_mode: http&stream stream_proxy: tcp: - addr: 9100 diff --git a/t/cli/test_prometheus_run_in_privileged.sh b/t/cli/test_prometheus_run_in_privileged.sh index 7f8a3e2ec5ff..a97cf307e26c 100755 --- a/t/cli/test_prometheus_run_in_privileged.sh +++ b/t/cli/test_prometheus_run_in_privileged.sh @@ -55,6 +55,7 @@ rm logs/error.log || true echo " apisix: + proxy_mode: http&stream extra_lua_path: "\$prefix/t/lib/?.lua" enable_admin: true stream_proxy: @@ -87,6 +88,7 @@ rm logs/error.log || true echo " apisix: + proxy_mode: http&stream extra_lua_path: "\$prefix/t/lib/?.lua" enable_admin: false stream_proxy: diff --git a/t/cli/test_prometheus_stream.sh b/t/cli/test_prometheus_stream.sh index 561b9a820cf5..abf960e776a5 100755 --- a/t/cli/test_prometheus_stream.sh +++ b/t/cli/test_prometheus_stream.sh @@ -23,6 +23,7 @@ exit_if_not_customed_nginx echo " apisix: + proxy_mode: http&stream enable_admin: true stream_proxy: tcp: @@ -65,6 +66,7 @@ echo "passed: prometheus works when both http & stream are enabled" echo " apisix: + proxy_mode: stream enable_admin: false stream_proxy: tcp: diff --git a/t/cli/test_route_match_with_graphql.sh b/t/cli/test_route_match_with_graphql.sh new file mode 100755 index 000000000000..c67027748146 --- /dev/null +++ b/t/cli/test_route_match_with_graphql.sh @@ -0,0 +1,98 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +. ./t/cli/common.sh + +echo ' +deployment: + role: data_plane + role_data_plane: + config_provider: yaml + +apisix: + router: + http: radixtree_uri + +nginx_config: + worker_processes: 1 + +' > conf/config.yaml + +echo ' +routes: + - uri: "/hello" + hosts: + - test.com + vars: + - - "graphql_name" + - "==" + - "createAccount" + priority: 30 + id: "graphql1" + upstream_id: "invalid" + + - uri: "/hello" + hosts: + - test.com + plugins: + echo: + body: "test server" + priority: 20 + id: "graphql2" + upstream_id: "invalid" + + - uri: "/hello" + hosts: + - test2.com + plugins: + echo: + body: "test2" + priority: 20 + id: "graphql3" + upstream_id: "invalid" + +upstreams: + - nodes: + 127.0.0.1:1999: 1 + id: "invalid" +#END +' > conf/apisix.yaml + +make run + +dd if=/dev/urandom of=tmp_data.json bs=300K count=1 + +for i in {1..100}; do + curl -s http://127.0.0.1:9080/hello -H "Host: test.com" -H "Content-Type: application/json" -X POST -d @tmp_data.json > /tmp/graphql_request1.txt & + curl -s http://127.0.0.1:9080/hello -H "Host: test2.com" -H "Content-Type: application/json" -X POST -d @tmp_data.json > /tmp/graphql_request2.txt & + + wait + + if diff /tmp/graphql_request1.txt /tmp/graphql_request2.txt > /dev/null; then + make stop + echo "failed: route match error in GraphQL requests, route should not be the same" + exit 1 + fi +done + +make stop + +rm tmp_data.json /tmp/graphql_request1.txt /tmp/graphql_request2.txt + +echo "passed: GraphQL requests can be correctly matched to the route" diff --git a/t/cli/test_snippet.sh b/t/cli/test_snippet.sh index 1b545dd9cf0a..72eee7e64a96 100755 --- a/t/cli/test_snippet.sh +++ b/t/cli/test_snippet.sh @@ -25,8 +25,8 @@ echo ' apisix: node_listen: 9080 enable_admin: true + proxy_mode: http&stream stream_proxy: - only: false tcp: - 9100 nginx_config: diff --git a/t/cli/test_standalone.sh b/t/cli/test_standalone.sh index 2a3add66662b..a0d91c11c4a0 100755 --- a/t/cli/test_standalone.sh +++ b/t/cli/test_standalone.sh @@ -26,7 +26,7 @@ standalone() { trap standalone EXIT -# support environment variables +# support environment variables in yaml values echo ' apisix: enable_admin: false @@ -69,6 +69,33 @@ fi echo "passed: resolve variables in apisix.yaml conf success" +# support environment variables in yaml keys +echo ' +routes: + - + uri: "/test" + plugins: + proxy-rewrite: + uri: "/apisix/nginx_status" + upstream: + nodes: + "${{HOST_IP}}:${{PORT}}": 1 + type: roundrobin +#END +' > conf/apisix.yaml + +# variable is valid +HOST_IP="127.0.0.1" PORT="9091" make init +HOST_IP="127.0.0.1" PORT="9091" make run +sleep 0.1 + +code=$(curl -o /dev/null -s -m 5 -w %{http_code} http://127.0.0.1:9080/test) +if [ ! $code -eq 200 ]; then + echo "failed: resolve variables in apisix.yaml conf failed" +fi + +echo "passed: resolve variables in apisix.yaml conf success" + # configure standalone via deployment echo ' deployment: diff --git a/t/cli/test_stream_config.sh b/t/cli/test_stream_config.sh index 5a15ae10fe2f..baab138a0c99 100755 --- a/t/cli/test_stream_config.sh +++ b/t/cli/test_stream_config.sh @@ -22,6 +22,7 @@ echo " apisix: enable_admin: false + proxy_mode: stream stream_proxy: tcp: - addr: 9100 @@ -40,8 +41,8 @@ echo "passed: enable stream proxy only by default" echo " apisix: enable_admin: false + proxy_mode: http&stream stream_proxy: - only: false tcp: - addr: 9100 " > conf/config.yaml @@ -57,6 +58,7 @@ fi echo " apisix: enable_admin: true + proxy_mode: http&stream stream_proxy: tcp: - addr: 9100 @@ -76,6 +78,7 @@ echo " apisix: ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt + proxy_mode: http&stream stream_proxy: tcp: - addr: 9100 @@ -92,6 +95,7 @@ echo "passed: set trust certificate" echo " apisix: + proxy_mode: http&stream stream_proxy: tcp: - addr: 9100 @@ -108,6 +112,7 @@ fi echo " apisix: + proxy_mode: http&stream stream_proxy: tcp: - addr: 9100 diff --git a/t/cli/test_tls_over_tcp.sh b/t/cli/test_tls_over_tcp.sh index 566af9418a24..5d378ce6a9ad 100755 --- a/t/cli/test_tls_over_tcp.sh +++ b/t/cli/test_tls_over_tcp.sh @@ -22,8 +22,8 @@ # check tls over tcp proxy echo " apisix: + proxy_mode: http&stream stream_proxy: - only: false tcp: - addr: 9100 tls: true diff --git a/t/cli/test_validate_config.sh b/t/cli/test_validate_config.sh index 1c00360f1c30..8db581684332 100755 --- a/t/cli/test_validate_config.sh +++ b/t/cli/test_validate_config.sh @@ -82,6 +82,7 @@ deployment: apisix: node_listen: 9080 enable_admin: true + proxy_mode: http&stream stream_proxy: tcp: - "localhost:9100" diff --git a/t/config-center-yaml/plugin-configs.t b/t/config-center-yaml/plugin-configs.t index 2958b13785fa..f10c3651ad45 100644 --- a/t/config-center-yaml/plugin-configs.t +++ b/t/config-center-yaml/plugin-configs.t @@ -115,7 +115,7 @@ world --- response_headers in: out --- error_log eval -qr/conf_version: \d+#1,/ +qr/conf_version: \d+#\d+,/ diff --git a/t/config-center-yaml/route-upstream.t b/t/config-center-yaml/route-upstream.t index 53b9d51d3fb2..d5353a449332 100644 --- a/t/config-center-yaml/route-upstream.t +++ b/t/config-center-yaml/route-upstream.t @@ -140,17 +140,17 @@ hello world routes: - id: 1 - uri: /get + uri: /hello upstream_id: 1 upstreams: - id: 1 nodes: - "httpbin.org:80": 1 + "test.com:1980": 1 type: roundrobin #END --- request -GET /get +GET /hello --- error_code: 200 @@ -161,19 +161,19 @@ GET /get routes: - id: 1 - uri: /get + uri: /hello upstream_id: 1 upstreams: - id: 1 nodes: - "httpbin.org:80": 1 + "test.com:1980": 1 type: chash hash_on: header key: "$aaa" #END --- request -GET /get +GET /hello --- error_code: 502 --- error_log invalid configuration: failed to match pattern diff --git a/t/control/healthcheck.t b/t/control/healthcheck.t index 3c9cefff8681..5d40e970739b 100644 --- a/t/control/healthcheck.t +++ b/t/control/healthcheck.t @@ -67,6 +67,7 @@ upstreams: --- config location /t { content_by_lua_block { + local core = require("apisix.core") local json = require("toolkit.json") local t = require("lib.test_admin") local http = require "resty.http" @@ -76,21 +77,44 @@ upstreams: ngx.sleep(2.2) - local code, body, res = t.test('/v1/healthcheck', + local _, _, res = t.test('/v1/healthcheck', ngx.HTTP_GET) res = json.decode(res) + assert(#res == 1, "invalid number of results") table.sort(res[1].nodes, function(a, b) - return a.host < b.host + return a.ip < b.ip end) - ngx.say(json.encode(res)) + ngx.say(core.json.stably_encode(res[1].nodes)) - local code, body, res = t.test('/v1/healthcheck/upstreams/1', + local _, _, res = t.test('/v1/healthcheck/upstreams/1', ngx.HTTP_GET) res = json.decode(res) table.sort(res.nodes, function(a, b) - return a.host < b.host + return a.ip < b.ip end) - ngx.say(json.encode(res)) + ngx.say(core.json.stably_encode(res.nodes)) + + local _, _, res = t.test('/v1/healthcheck/upstreams/1', + ngx.HTTP_GET, nil, nil, {["Accept"] = "text/html"}) + local xml2lua = require("xml2lua") + local xmlhandler = require("xmlhandler.tree") + local handler = xmlhandler:new() + local parser = xml2lua.parser(handler) + parser.parse(parser, res) + local matches = 0 + for _, td in ipairs(handler.root.html.body.table.tr) do + if td.td then + if td.td[4] == "127.0.0.2:1988" then + assert(td.td[5] == "unhealthy", "127.0.0.2:1988 is not unhealthy") + matches = matches + 1 + end + if td.td[4] == "127.0.0.1:1980" then + assert(td.td[5] == "healthy", "127.0.0.1:1980 is not healthy") + matches = matches + 1 + end + end + end + assert(matches == 2, "unexpected html") } } --- grep_error_log eval @@ -99,8 +123,8 @@ qr/unhealthy TCP increment \(.+\) for '[^']+'/ unhealthy TCP increment (1/2) for '(127.0.0.2:1988)' unhealthy TCP increment (2/2) for '(127.0.0.2:1988)' --- response_body -[{"healthy_nodes":[{"host":"127.0.0.1","port":1980,"priority":0,"weight":1}],"name":"upstream#/upstreams/1","nodes":[{"host":"127.0.0.1","port":1980,"priority":0,"weight":1},{"host":"127.0.0.2","port":1988,"priority":0,"weight":1}],"src_id":"1","src_type":"upstreams"}] -{"healthy_nodes":[{"host":"127.0.0.1","port":1980,"priority":0,"weight":1}],"name":"upstream#/upstreams/1","nodes":[{"host":"127.0.0.1","port":1980,"priority":0,"weight":1},{"host":"127.0.0.2","port":1988,"priority":0,"weight":1}],"src_id":"1","src_type":"upstreams"} +[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"ip":"127.0.0.2","port":1988,"status":"unhealthy"}] +[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"ip":"127.0.0.2","port":1988,"status":"unhealthy"}] @@ -169,8 +193,8 @@ qr/unhealthy TCP increment \(.+\) for '[^']+'/ unhealthy TCP increment (1/2) for '127.0.0.1(127.0.0.1:1988)' unhealthy TCP increment (2/2) for '127.0.0.1(127.0.0.1:1988)' --- response_body -[{"healthy_nodes":[{"host":"127.0.0.1","port":1980,"priority":0,"weight":1}],"name":"upstream#/routes/1","nodes":[{"host":"127.0.0.1","port":1980,"priority":0,"weight":1},{"host":"127.0.0.1","port":1988,"priority":0,"weight":1}],"src_id":"1","src_type":"routes"}] -{"healthy_nodes":[{"host":"127.0.0.1","port":1980,"priority":0,"weight":1}],"name":"upstream#/routes/1","nodes":[{"host":"127.0.0.1","port":1980,"priority":0,"weight":1},{"host":"127.0.0.1","port":1988,"priority":0,"weight":1}],"src_id":"1","src_type":"routes"} +[{"name":"/routes/1","nodes":[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1988,"status":"unhealthy"}],"type":"http"}] +{"name":"/routes/1","nodes":[{"counter":{"http_failure":0,"success":0,"tcp_failure":0,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1980,"status":"healthy"},{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1988,"status":"unhealthy"}],"type":"http"} @@ -244,8 +268,8 @@ qr/unhealthy TCP increment \(.+\) for '[^']+'/ unhealthy TCP increment (1/2) for '127.0.0.1(127.0.0.1:1988)' unhealthy TCP increment (2/2) for '127.0.0.1(127.0.0.1:1988)' --- response_body -[{"healthy_nodes":{},"name":"upstream#/services/1","nodes":[{"host":"127.0.0.1","port":1980,"priority":0,"weight":1},{"host":"127.0.0.1","port":1988,"priority":0,"weight":1}],"src_id":"1","src_type":"services"}] -{"healthy_nodes":{},"name":"upstream#/services/1","nodes":[{"host":"127.0.0.1","port":1980,"priority":0,"weight":1},{"host":"127.0.0.1","port":1988,"priority":0,"weight":1}],"src_id":"1","src_type":"services"} +[{"name":"/services/1","nodes":[{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1988,"status":"unhealthy"}],"type":"http"}] +{"name":"/services/1","nodes":[{"counter":{"http_failure":0,"success":0,"tcp_failure":2,"timeout_failure":0},"hostname":"127.0.0.1","ip":"127.0.0.1","port":1988,"status":"unhealthy"}],"type":"http"} @@ -279,69 +303,3 @@ GET /v1/healthcheck/route/1 --- error_code: 400 --- response_body {"error_msg":"invalid src type route"} - - - -=== TEST 7: default health status ---- yaml_config -apisix: - node_listen: 1984 -deployment: - role: data_plane - role_data_plane: - config_provider: yaml ---- apisix_yaml -routes: - - - uris: - - /hello - upstream_id: 1 -upstreams: - - nodes: - "127.0.0.1:1988": 1 - "127.0.0.2:1980": 1 - type: chash - id: 1 - key: "uri" - checks: - active: - http_path: "/status" - healthy: - interval: 1 - successes: 1 - unhealthy: - interval: 1 - http_failures: 1 -#END ---- config - location /t { - content_by_lua_block { - local json = require("toolkit.json") - local t = require("lib.test_admin") - - -- not hit - local code, body, res = t.test('/v1/healthcheck', - ngx.HTTP_GET) - ngx.print(res) - - -- hit, but no enough to mark node to unhealthy - local http = require "resty.http" - local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" - local httpc = http.new() - local res, err = httpc:request_uri(uri, {method = "GET"}) - local code, body, res = t.test('/v1/healthcheck', - ngx.HTTP_GET) - res = json.decode(res) - table.sort(res[1].healthy_nodes, function(a, b) - return a.host < b.host - end) - ngx.say(json.encode(res[1].healthy_nodes)) - } - } ---- grep_error_log eval -qr/unhealthy TCP increment \(.+\) for '[^']+'/ ---- grep_error_log_out -unhealthy TCP increment (1/2) for '(127.0.0.1:1988)' ---- response_body -{} -[{"host":"127.0.0.1","port":1988,"priority":0,"weight":1},{"host":"127.0.0.2","port":1980,"priority":0,"weight":1}] diff --git a/t/core/config_etcd.t b/t/core/config_etcd.t index 380b82522138..d81cf5615a2b 100644 --- a/t/core/config_etcd.t +++ b/t/core/config_etcd.t @@ -84,9 +84,9 @@ end --- request GET /t --- grep_error_log chop -peer closed connection in SSL handshake while SSL handshaking to upstream +peer closed connection in SSL handshake --- grep_error_log_out eval -qr/(peer closed connection in SSL handshake while SSL handshaking to upstream){1,}/ +qr/(peer closed connection in SSL handshake){1,}/ @@ -118,42 +118,7 @@ qr/(closed){1,}/ -=== TEST 4: originate TLS connection to etcd cluster and verify TLS certificate (default behavior) ---- yaml_config -apisix: - node_listen: 1984 - ssl: - ssl_trusted_certificate: t/servroot/conf/cert/etcd.pem -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - "https://127.0.0.1:12379" ---- extra_init_by_lua -local health_check = require("resty.etcd.health_check") -health_check.get_target_status = function() - return true -end ---- config - location /t { - content_by_lua_block { - ngx.sleep(4) - ngx.say("ok") - } - } ---- timeout: 5 ---- request -GET /t ---- grep_error_log chop -10:certificate has expired ---- grep_error_log_out eval -qr/(10:certificate has expired){1,}/ - - - -=== TEST 5: set route(id: 1) to etcd cluster with TLS +=== TEST 4: set route(id: 1) to etcd cluster with TLS --- yaml_config apisix: node_listen: 1984 @@ -197,7 +162,7 @@ passed -=== TEST 6: get route(id: 1) from etcd cluster with TLS +=== TEST 5: get route(id: 1) from etcd cluster with TLS --- yaml_config apisix: node_listen: 1984 @@ -233,7 +198,7 @@ passed -=== TEST 7: ensure only one auth request per subsystem for all the etcd sync +=== TEST 6: ensure only one auth request per subsystem for all the etcd sync --- yaml_config apisix: node_listen: 1984 @@ -266,11 +231,10 @@ qr/etcd auth failed/ etcd auth failed etcd auth failed etcd auth failed -etcd auth failed -=== TEST 8: ensure add prefix automatically for _M.getkey +=== TEST 7: ensure add prefix automatically for _M.getkey --- config location /t { content_by_lua_block { @@ -301,7 +265,7 @@ passed -=== TEST 9: Test ETCD health check mode switch during APISIX startup +=== TEST 8: Test ETCD health check mode switch during APISIX startup --- config location /t { content_by_lua_block { @@ -320,7 +284,7 @@ qr/healthy check use round robin -=== TEST 10: last_err can be nil when the reconnection is successful +=== TEST 9: last_err can be nil when the reconnection is successful --- config location /t { content_by_lua_block { @@ -350,7 +314,7 @@ passed -=== TEST 11: reloaded data may be in res.body.node (special kvs structure) +=== TEST 10: reloaded data may be in res.body.node (special kvs structure) --- yaml_config deployment: role: traditional @@ -397,7 +361,7 @@ qr/readdir key: fake res: \{("value":"bar","key":"foo"|"key":"foo","value":"bar" -=== TEST 12: reloaded data may be in res.body.node (admin_api_version is v2) +=== TEST 11: reloaded data may be in res.body.node (admin_api_version is v2) --- yaml_config deployment: role: traditional @@ -444,3 +408,81 @@ qr/readdir key: fake res: \{.*"nodes":\[\{.*"value":\["bar"\].*\}\].*\}/ --- wait: 1 --- no_error_log [error] + + + +=== TEST 12: test route with special character "-" +--- yaml_config +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null + etcd: + prefix: "/apisix-test" +--- config + location /t { + content_by_lua_block { + ngx.sleep(0.5) + + local http = require "resty.http" + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + } + }]] + ) + if code >= 300 then + ngx.status = code + return + end + ngx.say(body) + + -- hit + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET" + }) + + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.print(res.body) + + -- delete route + code, body = t('/apisix/admin/routes/1', ngx.HTTP_DELETE) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + -- hit + res, err = httpc:request_uri(uri, { + method = "GET" + }) + + if not res then + ngx.log(ngx.ERR, err) + return + end + ngx.print(res.body) + } + } +--- request +GET /t +--- response_body +passed +hello world +passed +{"error_msg":"404 Route Not Found"} diff --git a/t/core/etcd-grpc-auth-fail.t b/t/core/etcd-grpc-auth-fail.t deleted file mode 100644 index b11f51ae24bf..000000000000 --- a/t/core/etcd-grpc-auth-fail.t +++ /dev/null @@ -1,106 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -BEGIN { - $ENV{"ETCD_ENABLE_AUTH"} = "false"; - delete $ENV{"FLUSH_ETCD"}; -} - -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - - -repeat_each(1); -no_long_string(); -no_root_location(); -log_level("info"); - -# Authentication is enabled at etcd and credentials are set -system('etcdctl --endpoints="http://127.0.0.1:2379" user add root:5tHkHhYkjr6cQY'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role add root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role root root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role list'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user user list'); -# Grant the user access to the specified directory -system('etcdctl --endpoints="http://127.0.0.1:2379" user add apisix:abc123'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role add apisix'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role apisix apisix'); -system('etcdctl --endpoints=http://127.0.0.1:2379 role grant-permission apisix --prefix=true readwrite /apisix/'); -system('etcdctl --endpoints="http://127.0.0.1:2379" auth enable'); - -run_tests; - -# Authentication is disabled at etcd -system('etcdctl --endpoints="http://127.0.0.1:2379" --user root:5tHkHhYkjr6cQY auth disable'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user delete root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role delete root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user delete apisix'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role delete apisix'); -__DATA__ - -=== TEST 1: Set and Get a value pass ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local key = "/test_key" - local val = "test_value" - local res, err = core.etcd.set(key, val) - ngx.say(err) - } - } ---- request -GET /t ---- error_log eval -qr /(insufficient credentials code: 401|etcdserver: user name is empty)/ - - - -=== TEST 2: etcd grants permissions with a different prefix than the one used by apisix, etcd will forbidden ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local key = "/test_key" - local val = "test_value" - local res, err = core.etcd.set(key, val) - ngx.say(err) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - "http://127.0.0.1:2379" - use_grpc: false - prefix: "/apisix" - user: apisix - password: abc123 ---- request -GET /t ---- error_log eval -qr /etcd forbidden code: 403/ diff --git a/t/core/etcd-grpc-auth.t b/t/core/etcd-grpc-auth.t deleted file mode 100644 index 12e2ce28079a..000000000000 --- a/t/core/etcd-grpc-auth.t +++ /dev/null @@ -1,108 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -BEGIN { - $ENV{"ETCD_ENABLE_AUTH"} = "true"; - delete $ENV{"FLUSH_ETCD"}; -} - -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - - -repeat_each(1); -no_long_string(); -no_root_location(); -log_level("info"); - -# Authentication is enabled at etcd and credentials are set -system('etcdctl --endpoints="http://127.0.0.1:2379" user add root:5tHkHhYkjr6cQY'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role add root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role root root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role list'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user user list'); -# Grant the user access to the specified directory -system('etcdctl --endpoints="http://127.0.0.1:2379" user add apisix:abc123'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role add apisix'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user grant-role apisix apisix'); -system('etcdctl --endpoints=http://127.0.0.1:2379 role grant-permission apisix --prefix=true readwrite /apisix'); -system('etcdctl --endpoints="http://127.0.0.1:2379" auth enable'); - -run_tests; - -# Authentication is disabled at etcd -system('etcdctl --endpoints="http://127.0.0.1:2379" --user root:5tHkHhYkjr6cQY auth disable'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user delete root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role delete root'); -system('etcdctl --endpoints="http://127.0.0.1:2379" user delete apisix'); -system('etcdctl --endpoints="http://127.0.0.1:2379" role delete apisix'); - - -__DATA__ - -=== TEST 1: Set and Get a value pass with authentication ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local key = "/test_key" - local val = "test_value" - core.etcd.set(key, val) - local res, err = core.etcd.get(key) - ngx.say(res.body.node.value) - core.etcd.delete(val) - } - } ---- request -GET /t ---- response_body -test_value - - - -=== TEST 2: etcd grants permissions with the same prefix as apisix uses, etcd is normal ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local key = "/test_key" - local val = "test_value" - local res, err = core.etcd.set(key, val) - ngx.say(err) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "http://127.0.0.1:2379" - prefix: "/apisix" - user: apisix - password: abc123 ---- request -GET /t diff --git a/t/core/etcd-grpc-mtls.t b/t/core/etcd-grpc-mtls.t deleted file mode 100644 index 4e9ca306edf2..000000000000 --- a/t/core/etcd-grpc-mtls.t +++ /dev/null @@ -1,292 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - -run_tests; - -__DATA__ - -=== TEST 1: run etcd in init_worker phase ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false ---- extra_init_worker_by_lua - if ngx.worker.id() ~= 0 then - return - end - - local etcd = require("apisix.core.etcd") - assert(etcd.set("/a", "ab")) - - local out = "" - local res, err = etcd.get("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.body.node.value - - local res, err = etcd.delete("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.status - - local res, err = etcd.get("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.status - ngx.log(ngx.WARN, out) ---- config - location /t { - return 200; - } ---- request -GET /t ---- grep_error_log eval -qr/init_worker_by_lua:\d+: [^,]+/ ---- grep_error_log_out -init_worker_by_lua:31: ab200404 - - - -=== TEST 2: run etcd in init phase (stream) ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false ---- stream_extra_init_worker_by_lua - if ngx.worker.id() ~= 0 then - return - end - - local etcd = require("apisix.core.etcd") - assert(etcd.set("/a", "ab")) - - local out = "" - local res, err = etcd.get("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.body.node.value - - local res, err = etcd.delete("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.status - - local res, err = etcd.get("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - out = out .. res.status - ngx.log(ngx.WARN, out) ---- stream_server_config - content_by_lua_block { - ngx.say("ok") - } ---- stream_enable ---- grep_error_log eval -qr/init_worker_by_lua:\d+: \S+/ ---- grep_error_log_out -init_worker_by_lua:31: ab200404, - - - -=== TEST 3: sync ---- extra_yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local t = require("lib.test_admin").test - - local consumers, _ = core.config.new("/consumers", { - automatic = true, - item_schema = core.schema.consumer, - }) - - ngx.sleep(0.6) - local idx = consumers.prev_index - - local code, body = t('/apisix/admin/consumers', - ngx.HTTP_PUT, - [[{ - "username": "jobs", - "plugins": { - "basic-auth": { - "username": "jobs", - "password": "678901" - } - } - }]]) - - ngx.sleep(2) - local new_idx = consumers.prev_index - if new_idx > idx then - ngx.say("prev_index updated") - else - ngx.say("prev_index not update") - end - } - } ---- request -GET /t ---- response_body -prev_index updated ---- error_log -waitdir key - - - -=== TEST 4: sync (stream) ---- extra_yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://127.0.0.1:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key - verify: false ---- stream_server_config - content_by_lua_block { - local core = require("apisix.core") - - local sr, _ = core.config.new("/stream_routes", { - automatic = true, - item_schema = core.schema.stream_routes, - }) - - ngx.sleep(0.6) - local idx = sr.prev_index - - assert(core.etcd.set("/stream_routes/1", - { - plugins = { - } - })) - - ngx.sleep(2) - local new_idx = sr.prev_index - if new_idx > idx then - ngx.say("prev_index updated") - else - ngx.say("prev_index not update") - end - } ---- stream_enable ---- stream_response -prev_index updated ---- error_log -waitdir key - - - -=== TEST 5: ssl_trusted_certificate ---- yaml_config -apisix: - ssl: - ssl_trusted_certificate: t/certs/mtls_ca.crt -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - host: - - "https://admin.apisix.dev:22379" - prefix: "/apisix" - tls: - cert: t/certs/mtls_client.crt - key: t/certs/mtls_client.key ---- extra_init_worker_by_lua - if ngx.worker.id() ~= 0 then - return - end - - local etcd = require("apisix.core.etcd") - assert(etcd.set("/a", "ab")) - local res, err = etcd.get("/a") - if not res then - ngx.log(ngx.ERR, err) - return - end - ngx.log(ngx.WARN, res.body.node.value) ---- config - location /t { - return 200; - } ---- request -GET /t ---- error_log -init_worker_by_lua:14: ab diff --git a/t/core/etcd-sync.t b/t/core/etcd-sync.t index e74ae19ec710..aef5e23619a9 100644 --- a/t/core/etcd-sync.t +++ b/t/core/etcd-sync.t @@ -22,65 +22,7 @@ run_tests; __DATA__ -=== TEST 1: minus timeout to watch repeatedly ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - # this test requires the HTTP long pull as the gRPC stream is shared and can't change - # default timeout in the fly - use_grpc: false - admin: - admin_key: null ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local t = require("lib.test_admin").test - - local consumers, _ = core.config.new("/consumers", { - automatic = true, - item_schema = core.schema.consumer, - timeout = 0.2 - }) - - ngx.sleep(0.6) - local idx = consumers.prev_index - - local code, body = t('/apisix/admin/consumers', - ngx.HTTP_PUT, - [[{ - "username": "jobs", - "plugins": { - "basic-auth": { - "username": "jobs", - "password": "123456" - } - } - }]]) - - ngx.sleep(2) - local new_idx = consumers.prev_index - core.log.info("idx:", idx, " new_idx: ", new_idx) - if new_idx > idx then - ngx.say("prev_index updated") - else - ngx.say("prev_index not update") - end - } - } ---- request -GET /t ---- response_body -prev_index updated ---- error_log eval -qr/(create watch stream for key|cancel watch connection success)/ - - - -=== TEST 2: using default timeout +=== TEST 1: using default timeout --- config location /t { content_by_lua_block { @@ -126,7 +68,7 @@ waitdir key -=== TEST 3: no update +=== TEST 2: no update --- config location /t { content_by_lua_block { @@ -162,7 +104,7 @@ prev_index not update -=== TEST 4: bad plugin configuration (validated via incremental sync) +=== TEST 3: bad plugin configuration (validated via incremental sync) --- config location /t { content_by_lua_block { @@ -182,7 +124,7 @@ property "uri" validation failed -=== TEST 5: bad plugin configuration (validated via full sync) +=== TEST 4: bad plugin configuration (validated via full sync) --- config location /t { content_by_lua_block { @@ -196,7 +138,7 @@ property "uri" validation failed -=== TEST 6: bad plugin configuration (validated without sync during start) +=== TEST 5: bad plugin configuration (validated without sync during start) --- extra_yaml_config disable_sync_configuration_during_start: true --- config diff --git a/t/core/etcd.t b/t/core/etcd.t index eaa425ec6b15..670ada081ab2 100644 --- a/t/core/etcd.t +++ b/t/core/etcd.t @@ -396,40 +396,8 @@ ab --- request GET /t --- grep_error_log eval -qr/init_by_lua:\d+: \S+/ ---- grep_error_log_out -init_by_lua:12: ab -init_by_lua:19: 200 -init_by_lua:26: 404 - - - -=== TEST 8: error handling in server_version ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - host: - - "http://127.0.0.1:2379" - prefix: "/apisix" ---- config - location /t { - content_by_lua_block { - local etcd_lib = require("resty.etcd") - -- the mock won't take effect when using gRPC because the connection will be cached - etcd_lib.new = function() - return nil, "ouch" - end - local etcd = require("apisix.core.etcd") - local res, err = etcd.server_version() - ngx.say(err) - } - } ---- request -GET /t ---- response_body -ouch ---- error_log -failed to get server_info from etcd +qr/init_by_lua.*: \S+/ +--- grep_error_log_out eval +qr{init_by_lua.* ab +init_by_lua.* 200 +init_by_lua.* 404} diff --git a/t/core/response.t b/t/core/response.t index c7cb92a56d56..dc748a7ad025 100644 --- a/t/core/response.t +++ b/t/core/response.t @@ -188,6 +188,7 @@ aaa: } body_filter_by_lua_block { local core = require("apisix.core") + ngx.ctx._plugin_name = "test" local final_body = core.response.hold_body_chunk(ngx.ctx) if not final_body then return diff --git a/t/core/schema_def.t b/t/core/schema_def.t index 0afee9c9de60..b6a7bba05b0c 100644 --- a/t/core/schema_def.t +++ b/t/core/schema_def.t @@ -82,3 +82,60 @@ __DATA__ } --- response_body passed + + + +=== TEST 2: Missing required fields of global_rule. +--- config + location /t { + content_by_lua_block { + local schema_def = require("apisix.schema_def") + local core = require("apisix.core") + + local cases = { + {}, + { id = "ADfwefq12D9s" }, + { id = 1 }, + { + plugins = { + foo = "bar", + }, + }, + } + for _, c in ipairs(cases) do + local ok, err = core.schema.check(schema_def.global_rule, c) + assert(not ok) + assert(err ~= nil) + ngx.say("ok: ", ok, " err: ", err) + end + } + } +--- request +GET /t +--- response_body eval +qr/ok: false err: property "(id|plugins)" is required/ + + + +=== TEST 3: Sanity check with minimal valid configuration. +--- config + location /t { + content_by_lua_block { + local schema_def = require("apisix.schema_def") + local core = require("apisix.core") + + local case = { + id = 1, + plugins = {}, + } + + local ok, err = core.schema.check(schema_def.global_rule, case) + assert(ok) + assert(err == nil) + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed diff --git a/t/core/utils.t b/t/core/utils.t index 4e6b0d76619d..9faa545e17e1 100644 --- a/t/core/utils.t +++ b/t/core/utils.t @@ -361,3 +361,35 @@ apisix: GET /t --- error_log failed to parse domain: ipv6.local + + + +=== TEST 12: get_last_index +--- config + location /t { + content_by_lua_block { + local string_rfind = require("pl.stringx").rfind + local cases = { + {"you are welcome", "co"}, + {"nice to meet you", "meet"}, + {"chicken run", "cc"}, + {"day day up", "day"}, + {"happy new year", "e"}, + {"apisix__1928", "__"} + } + + for _, case in ipairs(cases) do + local res = string_rfind(case[1], case[2]) + ngx.say("res:", res) + end + } + } +--- request +GET /t +--- response_body +res:12 +res:9 +res:nil +res:5 +res:12 +res:7 diff --git a/t/debug/debug-mode.t b/t/debug/debug-mode.t index ca0798ce002b..5d645e8e62ec 100644 --- a/t/debug/debug-mode.t +++ b/t/debug/debug-mode.t @@ -61,8 +61,8 @@ loaded plugin and sort by priority: 2510 name: jwt-auth loaded plugin and sort by priority: 2500 name: key-auth loaded plugin and sort by priority: 2400 name: consumer-restriction loaded plugin and sort by priority: 2000 name: authz-keycloak +loaded plugin and sort by priority: 1085 name: proxy-cache loaded plugin and sort by priority: 1010 name: proxy-mirror -loaded plugin and sort by priority: 1009 name: proxy-cache loaded plugin and sort by priority: 1008 name: proxy-rewrite loaded plugin and sort by priority: 1005 name: api-breaker loaded plugin and sort by priority: 1003 name: limit-conn diff --git a/t/deployment/conf_server.t b/t/deployment/conf_server.t deleted file mode 100644 index 2e15ed8bdc61..000000000000 --- a/t/deployment/conf_server.t +++ /dev/null @@ -1,467 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX 'no_plan'; - -worker_connections(256); - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - -}); - -run_tests(); - -__DATA__ - -=== TEST 1: sync in https ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local t = require("lib.test_admin").test - - local consumers, _ = core.config.new("/consumers", { - automatic = true, - item_schema = core.schema.consumer, - }) - - ngx.sleep(0.6) - local idx = consumers.prev_index - - local code, body = t('/apisix/admin/consumers', - ngx.HTTP_PUT, - [[{ - "username": "jobs", - "plugins": { - "basic-auth": { - "username": "jobs", - "password": "678901" - } - } - }]]) - - ngx.sleep(2) - local new_idx = consumers.prev_index - if new_idx > idx then - ngx.say("prev_index updated") - else - ngx.say("prev_index not update") - end - } - } ---- response_body -prev_index updated ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - admin: - admin_key: ~ - etcd: - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - tls: - verify: false - - - -=== TEST 2: mix ip & domain ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.2:2379 - - http://localhost:2379 - - http://[::1]:2379 ---- error_log -dns resolve localhost, result: ---- response_body -foo - - - -=== TEST 3: resolve domain, result changed ---- extra_init_by_lua - local resolver = require("apisix.core.resolver") - local old_f = resolver.parse_domain - local counter = 0 - resolver.parse_domain = function (domain) - if domain == "localhost" then - counter = counter + 1 - if counter % 2 == 0 then - return "127.0.0.2" - else - return "127.0.0.3" - end - else - return old_f(domain) - end - end ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - # use localhost so the connection is OK in the situation that the DNS - # resolve is not done in APISIX - - http://localhost:2379 ---- response_body -foo ---- error_log -localhost is resolved to: 127.0.0.3 -localhost is resolved to: 127.0.0.2 - - - -=== TEST 4: update balancer if the DNS result changed ---- extra_init_by_lua - local etcd = require("apisix.core.etcd") - etcd.get_etcd_syncer = function () - return etcd.new() - end - - local resolver = require("apisix.core.resolver") - local old_f = resolver.parse_domain - package.loaded.counter = 0 - resolver.parse_domain = function (domain) - if domain == "x.com" then - local counter = package.loaded.counter - package.loaded.counter = counter + 1 - if counter % 2 == 0 then - return "127.0.0.2" - else - return "127.0.0.3" - end - else - return old_f(domain) - end - end - - local picker = require("apisix.balancer.least_conn") - package.loaded.n_picker = 0 - local old_f = picker.new - picker.new = function (nodes, upstream) - package.loaded.n_picker = package.loaded.n_picker + 1 - return old_f(nodes, upstream) - end ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - local counter = package.loaded.counter - local n_picker = package.loaded.n_picker - if counter == n_picker then - ngx.say("OK") - else - ngx.say(counter, " ", n_picker) - end - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - - http://x.com:2379 ---- response_body -foo -OK ---- error_log -x.com is resolved to: 127.0.0.3 -x.com is resolved to: 127.0.0.2 - - - -=== TEST 5: retry ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:1979 - - http://[::1]:1979 - - http://localhost:2379 ---- error_log -connect() failed ---- response_body -foo - - - -=== TEST 6: check default SNI ---- http_config -server { - listen 12345 ssl; - ssl_certificate cert/apisix.crt; - ssl_certificate_key cert/apisix.key; - - ssl_certificate_by_lua_block { - local ngx_ssl = require "ngx.ssl" - ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) - } - - location / { - proxy_pass http://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - - https://localhost:12345 - tls: - verify: false ---- error_log -Receive SNI: localhost - - - -=== TEST 7: check configured SNI ---- http_config -server { - listen 12345 ssl; - ssl_certificate cert/apisix.crt; - ssl_certificate_key cert/apisix.key; - - ssl_certificate_by_lua_block { - local ngx_ssl = require "ngx.ssl" - ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) - } - - location / { - proxy_pass http://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - - https://127.0.0.1:12345 - tls: - verify: false - sni: "x.com" ---- error_log -Receive SNI: x.com - - - -=== TEST 8: check Host header ---- http_config -server { - listen 12345; - location / { - access_by_lua_block { - ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host) - } - proxy_pass http://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:12345 - - http://localhost:12345 ---- error_log -Receive Host: localhost -Receive Host: 127.0.0.1 - - - -=== TEST 9: check Host header after retry ---- http_config -server { - listen 12345; - location / { - access_by_lua_block { - ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host) - } - proxy_pass http://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:1979 - - http://localhost:12345 ---- error_log -Receive Host: localhost - - - -=== TEST 10: default timeout ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - local etcd_cli = require("resty.etcd") - local f = etcd_cli.new - local timeout - etcd_cli.new = function(conf) - timeout = conf.timeout - return f(conf) - end - etcd.new() - ngx.say(timeout) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 ---- response_body -30 - - - -=== TEST 11: ipv6 ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://[::1]:2379 diff --git a/t/deployment/conf_server2.t b/t/deployment/conf_server2.t deleted file mode 100644 index 886b0cb420fd..000000000000 --- a/t/deployment/conf_server2.t +++ /dev/null @@ -1,161 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX 'no_plan'; - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - -}); - -run_tests(); - -__DATA__ - -=== TEST 1: health check, ensure unhealthy endpoint is skipped ---- http_config -server { - listen 12345; - location / { - access_by_lua_block { - if package.loaded.start_to_fail then - ngx.exit(502) - end - } - proxy_pass http://127.0.0.1:2379; - } -} ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - - http://localhost:12345 ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - package.loaded.start_to_fail = true - for i = 1, 7 do - assert(etcd.set("/apisix/test", "foo")) - end - package.loaded.start_to_fail = nil - ngx.say('OK') - } - } ---- response_body -OK ---- error_log -report failure, endpoint: localhost:12345 -endpoint localhost:12345 is unhealthy, skipped - - - -=== TEST 2: health check, all endpoints are unhealthy ---- http_config -server { - listen 12345; - location / { - access_by_lua_block { - if package.loaded.start_to_fail then - ngx.exit(502) - end - } - proxy_pass http://127.0.0.1:2379; - } -} ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - prefix: "/apisix" - host: - - http://localhost:12345 - - http://127.0.0.1:12345 ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - package.loaded.start_to_fail = true - for i = 1, 6 do - etcd.set("/apisix/test", "foo") - end - package.loaded.start_to_fail = nil - local _, err = etcd.set("/apisix/test", "foo") - ngx.say(err) - } - } ---- response_body -invalid response code: 503 ---- error_log -endpoint localhost:12345 is unhealthy, skipped -endpoint 127.0.0.1:12345 is unhealthy, skipped - - - -=== TEST 3: health check, all endpoints recover from unhealthy ---- http_config -server { - listen 12345; - location / { - access_by_lua_block { - if package.loaded.start_to_fail then - ngx.exit(502) - end - } - proxy_pass http://127.0.0.1:2379; - } -} ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - health_check_timeout: 1 - prefix: "/apisix" - host: - - http://localhost:12345 - - http://127.0.0.1:12345 ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - package.loaded.start_to_fail = true - for i = 1, 6 do - etcd.set("/apisix/test", "foo") - end - package.loaded.start_to_fail = nil - ngx.sleep(1.2) - local res, err = etcd.set("/apisix/test", "foo") - ngx.say(err or res.body.node.value) - } - } ---- response_body -foo ---- error_log -endpoint localhost:12345 is unhealthy, skipped -endpoint 127.0.0.1:12345 is unhealthy, skipped diff --git a/t/deployment/grpc/conf_server.t b/t/deployment/grpc/conf_server.t deleted file mode 100644 index 5ea0bbe214b3..000000000000 --- a/t/deployment/grpc/conf_server.t +++ /dev/null @@ -1,458 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - -}); - -run_tests(); - -__DATA__ - -=== TEST 1: sync in https ---- config - location /t { - content_by_lua_block { - local core = require("apisix.core") - local t = require("lib.test_admin").test - - local consumers, _ = core.config.new("/consumers", { - automatic = true, - item_schema = core.schema.consumer, - }) - - ngx.sleep(0.6) - local idx = consumers.prev_index - - local code, body = t('/apisix/admin/consumers', - ngx.HTTP_PUT, - [[{ - "username": "jobs", - "plugins": { - "basic-auth": { - "username": "jobs", - "password": "678901" - } - } - }]]) - - ngx.sleep(2) - local new_idx = consumers.prev_index - if new_idx > idx then - ngx.say("prev_index updated") - else - ngx.say("prev_index not update") - end - } - } ---- response_body -prev_index updated ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - admin: - admin_key: ~ - etcd: - use_grpc: true - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - tls: - verify: false - - - -=== TEST 2: mix ip & domain ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - prefix: "/apisix" - host: - - http://127.0.0.2:2379 - - http://localhost:2379 - - http://[::1]:2379 ---- response_body -foo - - - -=== TEST 3: check default SNI ---- http_config -server { - listen 12345 http2 ssl; - ssl_certificate cert/apisix.crt; - ssl_certificate_key cert/apisix.key; - - ssl_certificate_by_lua_block { - local ngx_ssl = require "ngx.ssl" - ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) - } - - location / { - grpc_pass grpc://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - - https://localhost:12345 - timeout: 1 - tls: - verify: false ---- error_log -Receive SNI: localhost - - - -=== TEST 4: check configured SNI ---- http_config -server { - listen 12345 http2 ssl; - ssl_certificate cert/apisix.crt; - ssl_certificate_key cert/apisix.key; - - ssl_certificate_by_lua_block { - local ngx_ssl = require "ngx.ssl" - ngx.log(ngx.WARN, "Receive SNI: ", ngx_ssl.server_name()) - } - - location / { - grpc_pass grpc://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - prefix: "/apisix" - host: - - https://127.0.0.1:12379 - - https://127.0.0.1:12345 - timeout: 1 - tls: - verify: false - sni: "x.com" ---- error_log -Receive SNI: x.com - - - -=== TEST 5: ipv6 ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - prefix: "/apisix" - host: - - http://[::1]:2379 - - - -=== TEST 6: resolve domain, result changed ---- extra_init_by_lua - local resolver = require("apisix.core.resolver") - local old_f = resolver.parse_domain - local counter = 0 - resolver.parse_domain = function (domain) - if domain == "localhost" then - counter = counter + 1 - if counter % 2 == 0 then - return "127.0.0.2" - else - return "127.0.0.3" - end - else - return old_f(domain) - end - end ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - prefix: "/apisix" - host: - # use localhost so the connection is OK in the situation that the DNS - # resolve is not done in APISIX - - http://localhost:2379 ---- response_body -foo ---- error_log -localhost is resolved to: 127.0.0.3 -localhost is resolved to: 127.0.0.2 - - - -=== TEST 7: update balancer if the DNS result changed ---- extra_init_by_lua - local etcd = require("apisix.core.etcd") - etcd.get_etcd_syncer = function () - return etcd.new() - end - - local resolver = require("apisix.core.resolver") - local old_f = resolver.parse_domain - package.loaded.counter = 0 - resolver.parse_domain = function (domain) - if domain == "x.com" then - local counter = package.loaded.counter - package.loaded.counter = counter + 1 - if counter % 2 == 0 then - return "127.0.0.2" - else - return "127.0.0.3" - end - else - return old_f(domain) - end - end - - local picker = require("apisix.balancer.least_conn") - package.loaded.n_picker = 0 - local old_f = picker.new - picker.new = function (nodes, upstream) - package.loaded.n_picker = package.loaded.n_picker + 1 - return old_f(nodes, upstream) - end ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - local counter = package.loaded.counter - local n_picker = package.loaded.n_picker - if counter == n_picker then - ngx.say("OK") - else - ngx.say(counter, " ", n_picker) - end - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - timeout: 1 - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - - http://x.com:2379 ---- response_body -foo -OK ---- error_log -x.com is resolved to: 127.0.0.3 -x.com is resolved to: 127.0.0.2 - - - -=== TEST 8: retry ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - timeout: 1 - prefix: "/apisix" - host: - - http://127.0.0.1:1979 - - http://[::1]:1979 - - http://localhost:2379 ---- error_log -connect() failed ---- response_body -foo - - - -=== TEST 9: check Host header ---- http_config -server { - listen 12345 http2; - location / { - access_by_lua_block { - ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host) - } - grpc_pass grpc://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - timeout: 1 - prefix: "/apisix" - host: - - http://127.0.0.1:12345 - - http://localhost:12345 ---- error_log -Receive Host: localhost -Receive Host: 127.0.0.1 - - - -=== TEST 10: check Host header after retry ---- http_config -server { - listen 12345 http2; - location / { - access_by_lua_block { - ngx.log(ngx.WARN, "Receive Host: ", ngx.var.http_host) - } - grpc_pass grpc://127.0.0.1:2379; - } -} ---- config - location /t { - content_by_lua_block { - local etcd = require("apisix.core.etcd") - assert(etcd.set("/apisix/test", "foo")) - local res = assert(etcd.get("/apisix/test")) - ngx.say(res.body.node.value) - } - } ---- response_body -foo ---- yaml_config -deployment: - role: traditional - role_traditional: - config_provider: etcd - etcd: - use_grpc: true - timeout: 1 - prefix: "/apisix" - host: - - http://127.0.0.1:1979 - - http://localhost:12345 ---- error_log -Receive Host: localhost diff --git a/t/deployment/grpc/mtls.t b/t/deployment/grpc/mtls.t deleted file mode 100644 index 1aca2576dde5..000000000000 --- a/t/deployment/grpc/mtls.t +++ /dev/null @@ -1,118 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - -}); - -run_tests(); - -__DATA__ - -=== TEST 1: mTLS for control plane ---- exec -grpcurl -import-path ./t/lib -proto etcd.proto -d '{}' -cert t/certs/mtls_client.crt -key t/certs/mtls_client.key -insecure localhost:12345 etcdserverpb.Maintenance.Status ---- response_body eval -qr/"version":/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - use_grpc: true - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt - - - -=== TEST 2: no client certificate ---- exec -curl -k https://localhost:12345/version ---- response_body eval -qr/No required SSL certificate was sent/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - use_grpc: true - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt - - - -=== TEST 3: wrong client certificate ---- exec -curl --cert t/certs/apisix.crt --key t/certs/apisix.key -k https://localhost:12345/version ---- response_body eval -qr/The SSL certificate error/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - use_grpc: true - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt diff --git a/t/deployment/mtls.t b/t/deployment/mtls.t deleted file mode 100644 index 2d8ecb7e6839..000000000000 --- a/t/deployment/mtls.t +++ /dev/null @@ -1,115 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX; - -my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; -my $version = eval { `$nginx_binary -V 2>&1` }; - -if ($version !~ m/\/apisix-nginx-module/) { - plan(skip_all => "apisix-nginx-module not installed"); -} else { - plan('no_plan'); -} - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - -}); - -run_tests(); - -__DATA__ - -=== TEST 1: mTLS for control plane ---- exec -curl --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key -k https://localhost:12345/version ---- response_body eval -qr/"etcdserver":/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt - - - -=== TEST 2: no client certificate ---- exec -curl -k https://localhost:12345/version ---- response_body eval -qr/No required SSL certificate was sent/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt - - - -=== TEST 3: wrong client certificate ---- exec -curl --cert t/certs/apisix.crt --key t/certs/apisix.key -k https://localhost:12345/version ---- response_body eval -qr/The SSL certificate error/ ---- yaml_config -deployment: - role: control_plane - role_control_plane: - config_provider: etcd - conf_server: - listen: 0.0.0.0:12345 - cert: t/certs/mtls_server.crt - cert_key: t/certs/mtls_server.key - client_ca_cert: t/certs/mtls_ca.crt - etcd: - prefix: "/apisix" - host: - - http://127.0.0.1:2379 - certs: - cert: t/certs/mtls_client.crt - cert_key: t/certs/mtls_client.key - trusted_ca_cert: t/certs/mtls_ca.crt diff --git a/t/discovery/consul.t b/t/discovery/consul.t index 39c5ab287b50..57a6ab5b4fbd 100644 --- a/t/discovery/consul.t +++ b/t/discovery/consul.t @@ -558,6 +558,9 @@ upstreams: table.sort(nodes, function(a, b) return a.port < b.port end) + for _, node in ipairs(nodes) do + node.counter = nil + end ngx.say(json.encode(nodes)) local code, body, res = t.test('/v1/healthcheck/upstreams/1', @@ -567,12 +570,90 @@ upstreams: table.sort(nodes, function(a, b) return a.port < b.port end) + for _, node in ipairs(nodes) do + node.counter = nil + end ngx.say(json.encode(nodes)) } } --- request GET /thc --- response_body -[{"host":"127.0.0.1","port":30513,"priority":0,"weight":1},{"host":"127.0.0.1","port":30514,"priority":0,"weight":1}] -[{"host":"127.0.0.1","port":30513,"priority":0,"weight":1},{"host":"127.0.0.1","port":30514,"priority":0,"weight":1}] +[{"ip":"127.0.0.1","port":30513,"status":"healthy"},{"ip":"127.0.0.1","port":30514,"status":"healthy"}] +[{"ip":"127.0.0.1","port":30513,"status":"healthy"},{"ip":"127.0.0.1","port":30514,"status":"healthy"}] +--- ignore_error_log + + + +=== TEST 13: test consul catalog service change +--- yaml_config +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + keepalive: false + fetch_interval: 3 + default_service: + host: "127.0.0.1" + port: 20999 +#END +--- apisix_yaml +routes: + - + uri: /* + upstream: + service_name: service_a + discovery_type: consul + type: roundrobin +#END +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8500; +} + +location /sleep { + content_by_lua_block { + local args = ngx.req.get_uri_args() + local sec = args.sec or "2" + ngx.sleep(tonumber(sec)) + ngx.say("ok") + } +} +--- timeout: 6 +--- request eval +[ + "PUT /v1/agent/service/deregister/service_a1", + "GET /sleep?sec=3", + "GET /hello", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep?sec=5", + "GET /hello", + "PUT /v1/agent/service/deregister/service_a1", + "GET /sleep?sec=5", + "GET /hello", + "PUT /v1/agent/service/register\n" . "{\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "GET /sleep?sec=5", + "GET /hello", +] +--- response_body_like eval +[ + qr//, + qr/ok\n/, + qr/missing consul services\n/, + qr//, + qr/ok\n/, + qr/server 1\n/, + qr//, + qr/ok\n/, + qr/missing consul services\n/, + qr//, + qr/ok\n/, + qr/server 1\n/, +] --- ignore_error_log diff --git a/t/discovery/consul_dump.t b/t/discovery/consul_dump.t index 366c76a989fb..c74e5b1500bd 100644 --- a/t/discovery/consul_dump.t +++ b/t/discovery/consul_dump.t @@ -451,3 +451,55 @@ discovery: GET /bonjour --- response_body {"service_a":[{"host":"127.0.0.1","port":30511,"weight":1}],"service_b":[{"host":"127.0.0.1","port":30517,"weight":1}]} + + + +=== TEST 16: prepare nodes with consul health check +--- config +location /v1/agent { + proxy_pass http://127.0.0.1:8500; +} +--- request eval +[ + "PUT /v1/agent/service/deregister/service_a1", + "PUT /v1/agent/service/deregister/service_a2", + "PUT /v1/agent/service/deregister/service_b1", + "PUT /v1/agent/service/deregister/service_b2", + "PUT /v1/agent/service/register\n" . "{\"Checks\": [{\"http\": \"http://baidu.com\",\"interval\": \"1s\"}],\"ID\":\"service_a1\",\"Name\":\"service_a\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":30511,\"Meta\":{\"service_a_version\":\"4.0\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", + "PUT /v1/agent/service/register\n" . "{\"Checks\": [{\"http\": \"http://127.0.0.1:8002\",\"interval\": \"1s\"}],\"ID\":\"service_b1\",\"Name\":\"service_b\",\"Tags\":[\"primary\",\"v1\"],\"Address\":\"127.0.0.1\",\"Port\":8002,\"Meta\":{\"service_b_version\":\"4.1\"},\"EnableTagOverride\":false,\"Weights\":{\"Passing\":10,\"Warning\":1}}", +] +--- response_body eval +--- error_code eval +[200, 200, 200, 200, 200, 200] + + + +=== TEST 17: show dump services with consul health check +--- yaml_config +apisix: + node_listen: 1984 + enable_control: true +discovery: + consul: + servers: + - "http://127.0.0.1:8500" + dump: + path: "consul.dump" + load_on_init: false +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin") + ngx.sleep(2) + local code, body, res = t.test('/v1/discovery/consul/show_dump_file', + ngx.HTTP_GET) + local entity = json.decode(res) + ngx.say(json.encode(entity.services)) + } + } +--- timeout: 3 +--- request +GET /t +--- response_body +{"service_a":[{"host":"127.0.0.1","port":30511,"weight":1}]} diff --git a/t/discovery/consul_kv.t b/t/discovery/consul_kv.t index da557d1d0127..9363f768d209 100644 --- a/t/discovery/consul_kv.t +++ b/t/discovery/consul_kv.t @@ -425,25 +425,33 @@ upstreams: local code, body, res = t.test('/v1/healthcheck', ngx.HTTP_GET) res = json.decode(res) - table.sort(res[1].nodes, function(a, b) - return a.host < b.host + local nodes = res[1].nodes + table.sort(nodes, function(a, b) + return a.ip < b.ip end) - ngx.say(json.encode(res)) + for _, node in ipairs(nodes) do + node.counter = nil + end + ngx.say(json.encode(nodes)) local code, body, res = t.test('/v1/healthcheck/upstreams/1', ngx.HTTP_GET) res = json.decode(res) - table.sort(res.nodes, function(a, b) - return a.host < b.host + local nodes = res.nodes + table.sort(nodes, function(a, b) + return a.ip < b.ip end) - ngx.say(json.encode(res)) + for _, node in ipairs(nodes) do + node.counter = nil + end + ngx.say(json.encode(nodes)) } } --- request GET /thc --- response_body -[{"healthy_nodes":[{"host":"127.0.0.1","port":30511,"priority":0,"weight":1}],"name":"upstream#/upstreams/1","nodes":[{"host":"127.0.0.1","port":30511,"priority":0,"weight":1},{"host":"127.0.0.2","port":1988,"priority":0,"weight":1}],"src_id":"1","src_type":"upstreams"}] -{"healthy_nodes":[{"host":"127.0.0.1","port":30511,"priority":0,"weight":1}],"name":"upstream#/upstreams/1","nodes":[{"host":"127.0.0.1","port":30511,"priority":0,"weight":1},{"host":"127.0.0.2","port":1988,"priority":0,"weight":1}],"src_id":"1","src_type":"upstreams"} +[{"ip":"127.0.0.1","port":30511,"status":"healthy"},{"ip":"127.0.0.2","port":1988,"status":"unhealthy"}] +[{"ip":"127.0.0.1","port":30511,"status":"healthy"},{"ip":"127.0.0.2","port":1988,"status":"unhealthy"}] --- ignore_error_log diff --git a/t/fips/jwt-auth.t b/t/fips/jwt-auth.t new file mode 100644 index 000000000000..ec1061315775 --- /dev/null +++ b/t/fips/jwt-auth.t @@ -0,0 +1,267 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(2); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: create public API route (jwt-auth sign) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/plugin/jwt/sign" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: add consumer with username and plugins with public_key, private_key(private_key numbits = 512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-rs256", + "algorithm": "RS256", + "public_key": "-----BEGIN PUBLIC KEY-----\nMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKebDxlvQMGyEesAL1r1nIJBkSdqu3Hr\n7noq/0ukiZqVQLSJPMOv0oxQSutvvK3hoibwGakDOza+xRITB7cs2cECAwEAAQ==\n-----END PUBLIC KEY-----", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIBOgIBAAJBAKebDxlvQMGyEesAL1r1nIJBkSdqu3Hr7noq/0ukiZqVQLSJPMOv\n0oxQSutvvK3hoibwGakDOza+xRITB7cs2cECAwEAAQJAYPWh6YvjwWobVYC45Hz7\n+pqlt1DWeVQMlN407HSWKjdH548ady46xiQuZ5Cfx3YyCcnsfVWaQNbC+jFbY4YL\nwQIhANfASwz8+2sKg1xtvzyaChX5S5XaQTB+azFImBJumixZAiEAxt93Td6JH1RF\nIeQmD/K+DClZMqSrliUzUqJnCPCzy6kCIAekDsRh/UF4ONjAJkKuLedDUfL3rNFb\n2M4BBSm58wnZAiEAwYLMOg8h6kQ7iMDRcI9I8diCHM8yz0SfbfbsvzxIFxECICXs\nYvIufaZvBa8f+E/9CANlVhm5wKAyM8N8GJsiCyEG\n-----END RSA PRIVATE KEY-----" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: JWT sign and verify use RS256 algorithm(private_key numbits = 512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: sign/verify use RS256 algorithm(private_key numbits = 512) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, err, sign = t('/apisix/plugin/jwt/sign?key=user-key-rs256', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + } + } +--- error_code: 401 +--- error_log +JWT token invalid: invalid jwt string + + + +=== TEST 5: add consumer with username and plugins with public_key, private_key(private_key numbits = 1024) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "kerouac", + "plugins": { + "jwt-auth": { + "key": "user-key-rs256", + "algorithm": "RS256", + "public_key": "-----BEGIN PUBLIC KEY-----\nMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDGxOfVe/seP5T/V8pkS5YNAPRC\n3Ffxxedi7v0pyZh/4d4p9Qx0P9wOmALwlOq4Ftgks311pxG0zL0LcTJY4ikbc3r0\nh8SM0yhj9UV1VGtuia4YakobvpM9U+kq3lyIMO9ZPRez0cP3AJIYCt5yf8E7bNYJ\njbJNjl8WxvM1tDHqVQIDAQAB\n-----END PUBLIC KEY-----", + ]] .. [[ + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQDGxOfVe/seP5T/V8pkS5YNAPRC3Ffxxedi7v0pyZh/4d4p9Qx0\nP9wOmALwlOq4Ftgks311pxG0zL0LcTJY4ikbc3r0h8SM0yhj9UV1VGtuia4Yakob\nvpM9U+kq3lyIMO9ZPRez0cP3AJIYCt5yf8E7bNYJjbJNjl8WxvM1tDHqVQIDAQAB\nAoGAYFy9eAXvLC7u8QuClzT9vbgksvVXvWKQVqo+GbAeOoEpz3V5YDJFYN3ZLwFC\n+ZQ5nTFXNV6Veu13CMEMA4NBIa8I4r3aYzSjq7X7UEBkLDBtEUge52mYakNfXD8D\nqViHkyJqvtVnBl7jNZVqbBderQnXA0kigaeZPL3+hkYKBgECQQDmiDbUL3FBynLy\nNX6/JdAbO4g1Nl/1RsGg8svhb6vRM8WQyIQWt5EKi7yoP/9nIRXcIgdwpVO6wZRU\nDojL0oy1AkEA3LpjqXxIRzcy2ALsqKN3hoNPGAlkPyG3Mlph91mqSZ2jYpXCX9LW\nhhQdf9GmfO8jZtYhYAJqEMOJrKeZHToLIQJBAJbrJbnTNTn05ztZehh5ELxDRPBR\nIJDaOXi8emyjRsA2PGiEXLTih7l3sZIUE4fYSQ9L18MO+LmScSB2Q2fr9uECQFc7\nIh/dCgN7ARD1Nun+kEIMqrlpHMEGZgv0RDsoqG+naOaRINwVysn6MR5OkGlXaLo/\nbbkvuxMc88/T/GLciYECQQC4oUveCOic4Qs6TQfMUKKv/kJ09slbD70HkcBzA5nY\nyro4RT4z/SN6T3SD+TuWn2//I5QxiQEIbOCTySci7yuh\n-----END RSA PRIVATE KEY-----" + } + } + } + ]] + ) + ngx.status = code + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: JWT sign and verify use RS256 algorithm(private_key numbits = 1024) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "jwt-auth": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 7: sign/verify use RS256 algorithm(private_key numbits = 1024) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, err, sign = t('/apisix/plugin/jwt/sign?key=user-key-rs256', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + } + } +--- error_code: 401 +--- error_log +JWT token invalid: invalid jwt string + + + +=== TEST 8: sign/verify use RS256 algorithm(private_key numbits = 1024,with extra payload) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, err, sign = t('/apisix/plugin/jwt/sign?key=user-key-rs256&payload=%7B%22aaa%22%3A%2211%22%2C%22bb%22%3A%22222%22%7D', + ngx.HTTP_GET + ) + + if code > 200 then + ngx.status = code + ngx.say(err) + return + end + + local code, _, res = t('/hello?jwt=' .. sign, + ngx.HTTP_GET + ) + + ngx.status = code + } + } +--- error_code: 401 +--- error_log +JWT token invalid: invalid jwt string diff --git a/t/fips/openid-connect.t b/t/fips/openid-connect.t new file mode 100644 index 000000000000..8a23501cd93f --- /dev/null +++ b/t/fips/openid-connect.t @@ -0,0 +1,111 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('warn'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: configure oidc plugin with small public key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "bearer_only": true, + "scope": "apisix", + "public_key": "-----BEGIN PUBLIC KEY-----\n]] .. + [[MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANW16kX5SMrMa2t7F2R1w6Bk/qpjS4QQ\n]] .. + [[hnrbED3Dpsl9JXAx90MYsIWp51hBxJSE/EPVK8WF/sjHK1xQbEuDfEECAwEAAQ==\n]] .. + [[-----END PUBLIC KEY-----", + "token_signing_alg_values_expected": "RS256" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: hit failed +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, { + method = "GET", + headers = { + ["Authorization"] = "Bearer eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9" .. + ".eyJkYXRhMSI6IkRhdGEgMSIsImlhdCI6MTU4NTEyMjUwMiwiZXhwIjoxOTAwNjk" .. + "4NTAyLCJhdWQiOiJodHRwOi8vbXlzb2Z0Y29ycC5pbiIsImlzcyI6Ik15c29mdCB" .. + "jb3JwIiwic3ViIjoic29tZUB1c2VyLmNvbSJ9.u1ISx7JbuK_GFRIUqIMP175FqX" .. + "RyF9V7y86480Q4N3jNxs3ePbc51TFtIHDrKttstU4Tub28PYVSlr-HXfjo7w", + } + }) + ngx.status = res.status + if res.status == 200 then + ngx.say(true) + end + } + } +--- error_code: 401 +--- error_log +jwt signature verification failed: invalid key length diff --git a/t/fips/ssls.t b/t/fips/ssls.t new file mode 100644 index 000000000000..d7f08370483a --- /dev/null +++ b/t/fips/ssls.t @@ -0,0 +1,75 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('warn'); +no_root_location(); +no_shuffle(); + +run_tests(); + +__DATA__ + +=== TEST 1: configure cert with smaller key +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_cert = t.read_file("t/certs/server_1024.crt") + local ssl_key = t.read_file("t/certs/server_1024.key") + + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + }, + uri = "/hello" + } + assert(t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + } + local code = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + } + } +--- request +GET /t + + + +=== TEST 2: curl failed +--- exec +curl -k https://localhost:1994/hello -H "Host: localhost" +--- error_log +failed to set PEM cert: SSL_use_certificate() failed diff --git a/t/gm/gm.t b/t/gm/gm.t index ab6b62c63e3b..dfd64d370c5a 100644 --- a/t/gm/gm.t +++ b/t/gm/gm.t @@ -15,6 +15,22 @@ # specific language governing permissions and limitations # under the License. +BEGIN { + $ENV{TEST_ENV_GMSSL_CRT_ENC} = "-----BEGIN CERTIFICATE----- +MIIB2DCCAX6gAwIBAgIBAzAKBggqgRzPVQGDdTBFMQswCQYDVQQGEwJBQTELMAkG +A1UECAwCQkIxCzAJBgNVBAoMAkNDMQswCQYDVQQLDAJERDEPMA0GA1UEAwwGc3Vi +IGNhMB4XDTIyMTEwMjAzMTkzNloXDTMyMTAzMDAzMTkzNlowSTELMAkGA1UEBhMC +QUExCzAJBgNVBAgMAkJCMQswCQYDVQQKDAJDQzELMAkGA1UECwwCREQxEzARBgNV +BAMMCnNlcnZlciBlbmMwWjAUBggqgRzPVQGCLQYIKoEcz1UBgi0DQgAED+MQrLrZ +9PbMmz/44Kb73Qc7FlMs7u034XImjJREBAn1KzZ7jqcYfCiV/buhmu1sLhMXnB69 +mERtf1tAaXcgIaNaMFgwCQYDVR0TBAIwADALBgNVHQ8EBAMCAzgwHQYDVR0OBBYE +FBxHDo0gHhMoYkDeHWySTIJy5BZpMB8GA1UdIwQYMBaAFCTrpmbUig3JfveqAIGJ +6n+vAk2AMAoGCCqBHM9VAYN1A0gAMEUCIHtXgpOxcb3mZv2scRZHZz5YGFr45dfk +VfLkF9BkrB/xAiEA8EeUg7nCFfgHzrfgB7v0wgN1Hrgj8snTUO6IDfkBKYM= +-----END CERTIFICATE----- +"; +} + use t::APISIX; if (-f "/usr/local/tongsuo/bin/openssl") { @@ -168,3 +184,74 @@ location /t { --- response_body --- error_log SSL_do_handshake() failed + + + +=== TEST 5: set ssl: server_enc with secret ref +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local f = assert(io.open("t/certs/server_sign.crt")) + local cert_sign = f:read("*a") + f:close() + + local f = assert(io.open("t/certs/server_enc.key")) + local pkey_enc = f:read("*a") + f:close() + + local f = assert(io.open("t/certs/server_sign.key")) + local pkey_sign = f:read("*a") + f:close() + + local data = { + cert = "$env://TEST_ENV_GMSSL_CRT_ENC", + key = pkey_enc, + certs = {cert_sign}, + keys = {pkey_sign}, + sni = "localhost", + gm = true, + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + + ngx.say(body) + } +} +--- response_body +passed + + + +=== TEST 6: hit +--- exec +/usr/local/tongsuo/bin/openssl s_client -connect localhost:1994 -servername localhost -cipher ECDHE-SM2-WITH-SM4-SM3 -enable_ntls -ntls -verifyCAfile t/certs/gm_ca.crt -sign_cert t/certs/client_sign.crt -sign_key t/certs/client_sign.key -enc_cert t/certs/client_enc.crt -enc_key t/certs/client_enc.key +--- response_body eval +qr/^CONNECTED/ +--- no_error_log +SSL_do_handshake() failed +[error] diff --git a/t/grpc_server_example/go.mod b/t/grpc_server_example/go.mod index 6e7a0fb6de8a..d440130899a9 100644 --- a/t/grpc_server_example/go.mod +++ b/t/grpc_server_example/go.mod @@ -3,8 +3,8 @@ module github.com/api7/grpc_server_example go 1.11 require ( - github.com/golang/protobuf v1.5.0 + github.com/golang/protobuf v1.5.2 golang.org/x/net v0.7.0 - google.golang.org/grpc v1.32.0 - google.golang.org/protobuf v1.27.1 + google.golang.org/grpc v1.53.0 + google.golang.org/protobuf v1.28.1 ) diff --git a/t/grpc_server_example/go.sum b/t/grpc_server_example/go.sum index 3a6620532bc8..66241a2668d4 100644 --- a/t/grpc_server_example/go.sum +++ b/t/grpc_server_example/go.sum @@ -1,83 +1,1116 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/t/kubernetes/discovery/stream/kubernetes.t b/t/kubernetes/discovery/stream/kubernetes.t index 3df431fb64a8..5d9e06c86291 100644 --- a/t/kubernetes/discovery/stream/kubernetes.t +++ b/t/kubernetes/discovery/stream/kubernetes.t @@ -51,11 +51,7 @@ use t::APISIX; my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; my $version = eval { `$nginx_binary -V 2>&1` }; -if ($version =~ m/\/1.19.3/) { - plan(skip_all => "require OpenResty version >= 1.19.9.1"); -} else { - plan('no_plan'); -} +plan('no_plan'); repeat_each(1); log_level('warn'); diff --git a/t/lib/chaitin_waf_server.lua b/t/lib/chaitin_waf_server.lua new file mode 100644 index 000000000000..4130bd019edb --- /dev/null +++ b/t/lib/chaitin_waf_server.lua @@ -0,0 +1,60 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- +local _M = {} + +local function get_socket() + ngx.flush(true) + local sock, err = ngx.req.socket(true) + if not sock then + ngx.log(ngx.ERR, "failed to get the request socket: " .. tostring(err)) + return nil + end + return sock +end + +function _M.pass() + local sock = get_socket() + sock:send({ string.char(65), string.char(1), string.char(0), string.char(0), string.char(0) }) + sock:send(".") + sock:send({ string.char(165), string.char(77), string.char(0), string.char(0), string.char(0) }) + sock:send("{\"event_id\":\"1e902e84bf5a4ead8f7760a0fe2c7719\",\"request_hit_whitelist\":false}") + + ngx.exit(200) +end + +function _M.reject() + local sock = get_socket() + sock:send({ string.char(65), string.char(1), string.char(0), string.char(0), string.char(0) }) + sock:send("?") + sock:send({ string.char(2), string.char(3), string.char(0), string.char(0), string.char(0) }) + sock:send("403") + sock:send({ string.char(37), string.char(77), string.char(0), string.char(0), string.char(0) }) + sock:send("{\"event_id\":\"b3c6ce574dc24f09a01f634a39dca83b\",\"request_hit_whitelist\":false}") + sock:send({ string.char(35), string.char(79), string.char(0), string.char(0), string.char(0) }) + sock:send("Set-Cookie:sl-session=ulgbPfMSuWRNsi/u7Aj9aA==; Domain=; Path=/; Max-Age=86400\n") + sock:send({ string.char(164), string.char(51), string.char(0), string.char(0), string.char(0) }) + sock:send("") + + ngx.exit(200) +end + +function _M.timeout() + ngx.sleep(100) + _M.pass() +end + +return _M diff --git a/t/lib/grafana_loki.lua b/t/lib/grafana_loki.lua new file mode 100644 index 000000000000..fc1173982130 --- /dev/null +++ b/t/lib/grafana_loki.lua @@ -0,0 +1,63 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. +-- + +local cjson = require("cjson") +local http = require("resty.http") + +local _M = {} + + +function _M.fetch_logs_from_loki(from, to, options) + options = options or {} + + local direction = options.direction or "backward" + local limit = options.limit or "10" + local query = options.query or [[{job="apisix"} | json]] + local url = options.url or "http://127.0.0.1:3100/loki/api/v1/query_range" + local headers = options.headers or { + ["X-Scope-OrgID"] = "tenant_1" + } + + local httpc = http.new() + local res, err = httpc:request_uri(url, { + query = { + start = from, + ["end"] = to, + direction = direction, + limit = limit, + query = query, + }, + headers = headers + }) + + if not res or err then + return nil, err + end + + if res.status > 300 then + return nil, "HTTP status code: " .. res.status .. ", body: " .. res.body + end + + local data = cjson.decode(res.body) + if not data then + return nil, "failed to decode response body: " .. res.body + end + return data, nil +end + + +return _M diff --git a/t/lib/server.lua b/t/lib/server.lua index 542c6eaef046..c7386e1b7e84 100644 --- a/t/lib/server.lua +++ b/t/lib/server.lua @@ -570,6 +570,7 @@ function _M.go() local action = string.sub(ngx.var.uri, 2) action = string.gsub(action, "[/\\.-]", "_") if not action or not _M[action] then + ngx.log(ngx.WARN, "undefined path in test server, uri: ", ngx.var.request_uri) return ngx.exit(404) end diff --git a/t/node/client-mtls.t b/t/node/client-mtls.t index e3c6386934f3..a0de7d4cfa89 100644 --- a/t/node/client-mtls.t +++ b/t/node/client-mtls.t @@ -503,3 +503,118 @@ curl --cert t/certs/mtls_client.crt --key t/certs/mtls_client.key -k https://loc qr/400 Bad Request/ --- error_log client certificate verified with SNI localhost, but the host is test.com + + + +=== TEST 15: set verification (2 ssl objects, both have mTLS) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + local json = require("toolkit.json") + local ssl_ca_cert = t.read_file("t/certs/mtls_ca.crt") + local ssl_ca_cert2 = t.read_file("t/certs/apisix.crt") + local ssl_cert = t.read_file("t/certs/mtls_client.crt") + local ssl_key = t.read_file("t/certs/mtls_client.key") + local data = { + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1, + }, + }, + uri = "/*" + } + assert(t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + )) + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "localhost", + client = { + ca = ssl_ca_cert, + depth = 2, + skip_mtls_uri_regex = { + "/hello[0-9]+", + } + } + } + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + return + end + + local data = { + cert = ssl_cert, + key = ssl_key, + sni = "test.com", + client = { + ca = ssl_ca_cert2, + depth = 2, + } + } + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- request +GET /t + + + +=== TEST 16: skip the mtls, although no client cert provided +--- exec +curl -k https://localhost:1994/hello1 +--- response_body eval +qr/hello1 world/ + + + +=== TEST 17: skip the mtls, although with wrong client cert +--- exec +curl -k --cert t/certs/test2.crt --key t/certs/test2.key -k https://localhost:1994/hello1 +--- response_body eval +qr/hello1 world/ + + + +=== TEST 18: mtls failed, returns 400 +--- exec +curl -k https://localhost:1994/hello +--- response_body eval +qr/400 Bad Request/ +--- error_log +client certificate was not present + + + +=== TEST 19: mtls failed, wrong client cert +--- exec +curl --cert t/certs/test2.crt --key t/certs/test2.key -k https://localhost:1994/hello +--- response_body eval +qr/400 Bad Request/ +--- error_log +client certificate verification is not passed: FAILED + + + +=== TEST 20: mtls failed, at handshake phase +--- exec +curl -k -v --resolve "test.com:1994:127.0.0.1" https://test.com:1994/hello +--- error_log +peer did not return a certificate diff --git a/t/node/data_encrypt2.t b/t/node/data_encrypt2.t index a1c73729e794..14296189842b 100644 --- a/t/node/data_encrypt2.t +++ b/t/node/data_encrypt2.t @@ -697,3 +697,46 @@ apisix: } --- response_body [200,200,503] + + + +=== TEST 12: verify whether print warning log when disable data_encryption +--- yaml_config +apisix: + data_encryption: + enable: false +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr" + } + } + }]] + ) + if code > 300 then + ngx.status = code + return + end + ngx.say(body) + } + } +--- reponse_body +passed +--- no_error_log +failed to get schema for plugin diff --git a/t/node/healthcheck-stop-checker.t b/t/node/healthcheck-stop-checker.t index 1a5eaf286704..54ed61763c8f 100644 --- a/t/node/healthcheck-stop-checker.t +++ b/t/node/healthcheck-stop-checker.t @@ -165,15 +165,6 @@ create new checker: table: 0x content_by_lua_block { local t = require("lib.test_admin").test - -- release the clean handler of previous test - local code, _, body = t('/apisix/admin/routes/1', "DELETE") - - if code > 300 then - ngx.status = code - ngx.say(body) - return - end - local code, _, body = t('/apisix/admin/upstreams/stopchecker', "PUT", [[{"type":"roundrobin","nodes":{"127.0.0.1:1980":1,"127.0.0.1:1981":1},"checks":{"active":{"http_path":"/status","healthy":{"interval":1,"successes":1},"unhealthy":{"interval":1,"http_failures":2}}}}]] @@ -256,7 +247,6 @@ ok --- grep_error_log eval qr/create new checker: table: 0x|try to release checker: table: 0x/ --- grep_error_log_out -try to release checker: table: 0x create new checker: table: 0x try to release checker: table: 0x create new checker: table: 0x diff --git a/t/node/merge-route.t b/t/node/merge-route.t index 474e08abbd13..b09b272cf795 100644 --- a/t/node/merge-route.t +++ b/t/node/merge-route.t @@ -235,10 +235,10 @@ qr/merge_service_route.*"time_window":60/] ngx.HTTP_PUT, [[{ "upstream": { - "scheme": "https", + "scheme": "http", "type": "roundrobin", "nodes": { - "httpbin.org:443": 1 + "test.com:1980": 1 } } }]] @@ -266,10 +266,10 @@ passed ngx.HTTP_PUT, [[{ "uri": "/fake", - "host": "httpbin.org", + "host": "test.com", "plugins": { "proxy-rewrite": { - "uri": "/get" + "uri": "/echo" } }, "service_id": "1" @@ -293,10 +293,9 @@ passed --- request GET /fake --- more_headers -host: httpbin.org ---- response_body eval -qr/"Host": "httpbin.org"/ ---- timeout: 5 +host: test.com +--- response_headers +host: test.com @@ -304,7 +303,7 @@ qr/"Host": "httpbin.org"/ --- request GET /fake --- more_headers -host: httpbin.orgxxx +host: test.comxxx --- error_code: 404 diff --git a/t/node/plugin-configs.t b/t/node/plugin-configs.t index 11f9601030ad..f601ae86d773 100644 --- a/t/node/plugin-configs.t +++ b/t/node/plugin-configs.t @@ -113,10 +113,10 @@ __DATA__ hello world --- grep_error_log eval -qr/conf_version: \d+#\d/ +qr/conf_version: \d+#\d+/ --- grep_error_log_out eval -qr/conf_version: \d+#1 -conf_version: \d+#2 +qr/conf_version: \d+#\d+ +conf_version: \d+#\d+ / diff --git a/t/node/route-domain.t b/t/node/route-domain.t index f45338e66a38..729aceb7391b 100644 --- a/t/node/route-domain.t +++ b/t/node/route-domain.t @@ -92,9 +92,9 @@ qr/dns resolver domain: www.apiseven.com to \d+.\d+.\d+.\d+/ }, "type": "roundrobin", "pass_host": "rewrite", - "upstream_host": "httpbin.org" + "upstream_host": "test.com" }, - "uri": "/uri" + "uri": "/echo" }]] ) @@ -113,10 +113,9 @@ passed === TEST 5: hit route --- request -GET /uri ---- response_body eval -qr/host: httpbin.org/ ---- timeout: 10 +GET /echo +--- response_headers +host: test.com @@ -130,13 +129,13 @@ qr/host: httpbin.org/ [[{ "upstream": { "nodes": { - "httpbin.org:80": 1 + "test.com:1980": 1 }, "type": "roundrobin", "desc": "new upstream", "pass_host": "node" }, - "uri": "/get" + "uri": "/echo" }]] ) @@ -155,10 +154,9 @@ passed === TEST 7: hit route --- request -GET /get ---- response_body eval -qr/"Host": "httpbin.org"/ ---- timeout: 10 +GET /echo +--- response_headers +host: test.com:1980 diff --git a/t/node/ssl-protocols.t b/t/node/ssl-protocols.t new file mode 100644 index 000000000000..76fba7ff06ea --- /dev/null +++ b/t/node/ssl-protocols.t @@ -0,0 +1,298 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +my $openssl_bin = $ENV{OPENSSL111_BIN}; +if (! -x $openssl_bin) { + $ENV{OPENSSL111_BIN} = '/usr/local/openresty/openssl111/bin/openssl'; + if (! -x $ENV{OPENSSL111_BIN}) { + plan(skip_all => "openssl111 not installed"); + } +} + +plan('no_plan'); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +deployment: + role: traditional + role_traditional: + config_provider: etcd + admin: + admin_key: null +apisix: + node_listen: 1984 + proxy_mode: http&stream + stream_proxy: + tcp: + - 9100 + enable_resolv_search_opt: false + ssl: + ssl_protocols: TLSv1.1 TLSv1.2 TLSv1.3 + ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA +_EOC_ + + $block->set_value("yaml_config", $yaml_config); +}); + +run_tests(); + +__DATA__ + +=== TEST 1: set route +--- config +location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello", "/world"] + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(message) + return + end + ngx.say(body) + } +} +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: create ssl for test.com (unset ssl_protocols) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com"} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "ssl_protocols": null, + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: Successfully, access test.com with TLSv1.3 +--- exec +echo -n "Q" | $OPENSSL111_BIN s_client -connect 127.0.0.1:1994 -servername test.com -tls1_3 2>&1 | cat +--- response_body eval +qr/Server certificate/ + + + +=== TEST 4: Successfully, access test.com with TLSv1.2 +--- exec +curl -k -v --tls-max 1.2 --tlsv1.2 --resolve "test.com:1994:127.0.0.1" https://test.com:1994/hello 2>&1 | cat +--- response_body eval +qr/TLSv1\.2 \(IN\), TLS handshake, Server hello(?s).*hello world/ + + + +=== TEST 5: Successfully, access test.com with TLSv1.1 +--- exec +echo -n "Q" | $OPENSSL111_BIN s_client -connect 127.0.0.1:1994 -servername test.com -tls1_1 2>&1 | cat +--- response_body eval +qr/Server certificate/ + + + +=== TEST 6: set TLSv1.2 and TLSv1.3 for test.com +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", ssl_protocols = {"TLSv1.2", "TLSv1.3"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "ssl_protocols": ["TLSv1.2", "TLSv1.3"], + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 7: Set TLSv1.3 for the test2.com +--- config +location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/test2.crt") + local ssl_key = t.read_file("t/certs/test2.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test2.com", ssl_protocols = {"TLSv1.3"}} + + local code, body = t.test('/apisix/admin/ssls/2', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test2.com" + }, + "key": "/apisix/ssls/2" + }]] + ) + + ngx.status = code + ngx.say(body) + } +} +--- response_body +passed +--- request +GET /t + + + +=== TEST 8: Successfully, access test.com with TLSv1.3 +--- exec +echo -n "Q" | $OPENSSL111_BIN s_client -connect 127.0.0.1:1994 -servername test.com -tls1_3 2>&1 | cat +--- response_body eval +qr/Server certificate/ + + + +=== TEST 9: Successfully, access test.com with TLSv1.2 +--- exec +curl -k -v --tls-max 1.2 --tlsv1.2 --resolve "test.com:1994:127.0.0.1" https://test.com:1994/hello 2>&1 | cat +--- response_body eval +qr/TLSv1\.2 \(IN\), TLS handshake, Server hello(?s).*hello world/ + + + +=== TEST 10: Successfully, access test2.com with TLSv1.3 +--- exec +echo -n "Q" | $OPENSSL111_BIN s_client -connect 127.0.0.1:1994 -servername test2.com -tls1_3 2>&1 | cat +--- response_body eval +qr/Server certificate/ + + + +=== TEST 11: Failed, access test2.com with TLSv1.2 +--- exec +curl -k -v --tls-max 1.2 --tlsv1.2 --resolve "test2.com:1994:127.0.0.1" https://test2.com:1994/hello 2>&1 | cat +--- response_body eval +qr/TLSv1\.2 \(IN\), TLS alert/ + + + +=== TEST 12: set TLSv1.1 for test.com +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + + local ssl_cert = t.read_file("t/certs/apisix.crt") + local ssl_key = t.read_file("t/certs/apisix.key") + local data = {cert = ssl_cert, key = ssl_key, sni = "test.com", ssl_protocols = {"TLSv1.1"}} + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data), + [[{ + "value": { + "sni": "test.com", + "ssl_protocols": ["TLSv1.1"], + }, + "key": "/apisix/ssls/1" + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 13: Successfully, access test.com with TLSv1.1 +--- exec +echo -n "Q" | $OPENSSL111_BIN s_client -connect 127.0.0.1:1994 -servername test.com -tls1_1 2>&1 | cat +--- response_body eval +qr/Server certificate/ + + + +=== TEST 14: Failed, access test.com with TLSv1.3 +--- exec +echo -n "Q" | $OPENSSL111_BIN s_client -connect 127.0.0.1:1994 -servername test.com -tls1_3 2>&1 | cat +--- response_body eval +qr/tlsv1 alert/ diff --git a/t/node/upstream-domain.t b/t/node/upstream-domain.t index 35dccbb5ee04..24048467f390 100644 --- a/t/node/upstream-domain.t +++ b/t/node/upstream-domain.t @@ -110,7 +110,7 @@ qr/dns resolver domain: foo.com to \d+.\d+.\d+.\d+/ ngx.HTTP_PUT, [[{ "nodes": { - "httpbin.orgx:80": 0 + "test.comx:80": 0 }, "type": "roundrobin", "desc": "new upstream" @@ -150,8 +150,8 @@ GET /t status: 503 status: 503 --- error_log -failed to parse domain: httpbin.orgx -failed to parse domain: httpbin.orgx +failed to parse domain: test.comx +failed to parse domain: test.comx --- timeout: 10 diff --git a/t/node/upstream-websocket.t b/t/node/upstream-websocket.t index a374b638f6f5..a19a202deb00 100644 --- a/t/node/upstream-websocket.t +++ b/t/node/upstream-websocket.t @@ -270,7 +270,10 @@ passed local client = require "resty.websocket.client" local wb = client:new() local uri = "wss://127.0.0.1:1994/websocket_handshake" - local ok, err = wb:connect(uri) + local opts = { + server_name = "127.0.0.1" + } + local ok, err = wb:connect(uri, opts) if not ok then ngx.say("failed to connect: " .. err) return diff --git a/t/node/upstream.t b/t/node/upstream.t index b5d3acf2a121..c99b10aecad6 100644 --- a/t/node/upstream.t +++ b/t/node/upstream.t @@ -205,7 +205,7 @@ GET /t ngx.HTTP_PUT, [[{ "nodes": { - "httpbin.org:80": 1 + "test.com:1980": 1 }, "type": "roundrobin", "desc": "new upstream", @@ -234,7 +234,7 @@ passed local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, [[{ - "uri": "/get", + "uri": "/echo", "upstream_id": "1" }]] ) @@ -254,9 +254,9 @@ passed === TEST 12: hit route --- request -GET /get ---- response_body eval -qr/"Host": "httpbin.org"/ +GET /echo +--- response_headers +host: test.com:1980 @@ -274,7 +274,7 @@ qr/"Host": "httpbin.org"/ "type": "roundrobin", "desc": "new upstream", "pass_host": "rewrite", - "upstream_host": "httpbin.org" + "upstream_host": "test.com" }]] ) @@ -299,7 +299,7 @@ passed local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, [[{ - "uri": "/uri", + "uri": "/echo", "upstream_id": "1" }]] ) @@ -319,9 +319,9 @@ passed === TEST 15: hit route --- request -GET /uri ---- response_body eval -qr/host: httpbin.org/ +GET /echo +--- response_headers +host: test.com diff --git a/t/plugin/authz-casdoor.t b/t/plugin/authz-casdoor.t index 926d3daef97a..25fab487a29d 100644 --- a/t/plugin/authz-casdoor.t +++ b/t/plugin/authz-casdoor.t @@ -129,12 +129,15 @@ done "endpoint_addr":"]] .. fake_uri .. [[", "client_id":"7ceb9b7fda4a9061ec1c", "client_secret":"3416238e1edf915eac08b8fe345b2b95cdba7e04" + }, + "proxy-rewrite": { + "uri": "/echo" } }, "upstream": { "type": "roundrobin", "nodes": { - "httpbin.org:80": 1 + "test.com:1980": 1 } } }]] @@ -466,12 +469,15 @@ apisix: "endpoint_addr": "http://127.0.0.1:10420", "client_id":"7ceb9b7fda4a9061ec1c", "client_secret":"3416238e1edf915eac08b8fe345b2b95cdba7e04" + }, + "proxy-rewrite": { + "uri": "/echo" } }, "upstream": { "type": "roundrobin", "nodes": { - "httpbin.org:80": 1 + "test.com:1980": 1 } } }]] diff --git a/t/plugin/authz-keycloak.t b/t/plugin/authz-keycloak.t index b09c1d792484..58a633113330 100644 --- a/t/plugin/authz-keycloak.t +++ b/t/plugin/authz-keycloak.t @@ -31,7 +31,7 @@ __DATA__ local plugin = require("apisix.plugins.authz-keycloak") local ok, err = plugin.check_schema({ client_id = "foo", - token_endpoint = "https://host.domain/auth/realms/foo/protocol/openid-connect/token" + token_endpoint = "https://host.domain/realms/foo/protocol/openid-connect/token" }) if not ok then ngx.say(err) @@ -54,7 +54,7 @@ done local plugin = require("apisix.plugins.authz-keycloak") local ok, err = plugin.check_schema({ client_id = "foo", - discovery = "https://host.domain/auth/realms/foo/.well-known/uma2-configuration" + discovery = "https://host.domain/realms/foo/.well-known/uma2-configuration" }) if not ok then ngx.say(err) @@ -78,8 +78,8 @@ done local ok, err = plugin.check_schema({ client_id = "foo", lazy_load_paths = true, - token_endpoint = "https://host.domain/auth/realms/foo/protocol/openid-connect/token", - resource_registration_endpoint = "https://host.domain/auth/realms/foo/authz/protection/resource_set" + token_endpoint = "https://host.domain/realms/foo/protocol/openid-connect/token", + resource_registration_endpoint = "https://host.domain/realms/foo/authz/protection/resource_set" }) if not ok then ngx.say(err) @@ -103,7 +103,7 @@ done local ok, err = plugin.check_schema({ client_id = "foo", lazy_load_paths = true, - discovery = "https://host.domain/auth/realms/foo/.well-known/uma2-configuration" + discovery = "https://host.domain/realms/foo/.well-known/uma2-configuration" }) if not ok then ngx.say(err) @@ -125,9 +125,9 @@ done content_by_lua_block { local plugin = require("apisix.plugins.authz-keycloak") local ok, err = plugin.check_schema({ - discovery = "https://host.domain/auth/realms/foo/.well-known/uma2-configuration", - token_endpoint = "https://host.domain/auth/realms/foo/protocol/openid-connect/token", - resource_registration_endpoint = "https://host.domain/auth/realms/foo/authz/protection/resource_set", + discovery = "https://host.domain/realms/foo/.well-known/uma2-configuration", + token_endpoint = "https://host.domain/realms/foo/protocol/openid-connect/token", + resource_registration_endpoint = "https://host.domain/realms/foo/authz/protection/resource_set", client_id = "University", client_secret = "secret", grant_type = "urn:ietf:params:oauth:grant-type:uma-ticket", @@ -187,7 +187,7 @@ done location /t { content_by_lua_block { local plugin = require("apisix.plugins.authz-keycloak") - local ok, err = plugin.check_schema({discovery = "https://host.domain/auth/realms/foo/.well-known/uma2-configuration"}) + local ok, err = plugin.check_schema({discovery = "https://host.domain/realms/foo/.well-known/uma2-configuration"}) if not ok then ngx.say(err) end @@ -210,7 +210,7 @@ done local plugin = require("apisix.plugins.authz-keycloak") local ok, err = plugin.check_schema({ client_id = "foo", - token_endpoint = "https://host.domain/auth/realms/foo/protocol/openid-connect/token", + token_endpoint = "https://host.domain/realms/foo/protocol/openid-connect/token", lazy_load_paths = true }) if not ok then @@ -238,7 +238,7 @@ done [[{ "plugins": { "authz-keycloak": { - "token_endpoint": "https://127.0.0.1:8443/auth/realms/University/protocol/openid-connect/token", + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", "permissions": ["course_resource#delete"], "client_id": "course_management", "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", @@ -296,7 +296,7 @@ GET /t --- response_body false --- error_log -Error while sending authz request to https://127.0.0.1:8443/auth/realms/University/protocol/openid-connect/token: 18 +Error while sending authz request to https://127.0.0.1:8443/realms/University/protocol/openid-connect/token: 18 --- error_code: 503 @@ -311,7 +311,7 @@ Error while sending authz request to https://127.0.0.1:8443/auth/realms/Universi [[{ "plugins": { "authz-keycloak": { - "token_endpoint": "https://127.0.0.1:8443/auth/realms/University/protocol/openid-connect/token", + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", "permissions": ["course_resource#delete"], "client_id": "course_management", "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", @@ -382,7 +382,7 @@ Request denied: HTTP 401 Unauthorized. Body: {"error":"HTTP 401 Unauthorized"} [[{ "plugins": { "authz-keycloak": { - "token_endpoint": "http://127.0.0.1:8443/auth/realms/University/protocol/openid-connect/token", + "token_endpoint": "http://127.0.0.1:8443/realms/University/protocol/openid-connect/token", "client_id": "course_management", "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", "policy_enforcement_mode": "ENFORCING", @@ -447,7 +447,7 @@ GET /t [[{ "plugins": { "authz-keycloak": { - "token_endpoint": "http://127.0.0.1:8443/auth/realms/University/protocol/openid-connect/token", + "token_endpoint": "http://127.0.0.1:8443/realms/University/protocol/openid-connect/token", "client_id": "course_management", "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", "policy_enforcement_mode": "ENFORCING", @@ -515,7 +515,7 @@ Location: http://127.0.0.1/test [[{ "plugins": { "authz-keycloak": { - "token_endpoint": "https://127.0.0.1:8443/auth/realms/University/protocol/openid-connect/token", + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", "permissions": ["course_resource#view"], "client_id": "course_management", "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", @@ -587,7 +587,7 @@ true [[{ "plugins": { "authz-keycloak": { - "token_endpoint": "https://127.0.0.1:8443/auth/realms/University/protocol/openid-connect/token", + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", "permissions": ["course_resource#view"], "client_id": "course_management", "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", diff --git a/t/plugin/authz-keycloak2.t b/t/plugin/authz-keycloak2.t index 65faded2eab3..09d5a8157f7c 100644 --- a/t/plugin/authz-keycloak2.t +++ b/t/plugin/authz-keycloak2.t @@ -34,7 +34,7 @@ __DATA__ [[{ "plugins": { "authz-keycloak": { - "token_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token", + "token_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token", "permissions": ["course_resource#view"], "client_id": "course_management", "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", @@ -71,7 +71,7 @@ passed local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", @@ -96,6 +96,7 @@ passed if res.status == 200 then ngx.say(true) else + ngx.say(res.status) ngx.say(false) end else @@ -147,7 +148,7 @@ Invalid bearer token [[{ "plugins": { "authz-keycloak": { - "discovery": "http://127.0.0.1:8090/auth/realms/University/.well-known/uma2-configuration", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/uma2-configuration", "permissions": ["course_resource#view"], "client_id": "course_management", "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", @@ -184,7 +185,7 @@ passed local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", @@ -260,7 +261,7 @@ Invalid bearer token [[{ "plugins": { "authz-keycloak": { - "token_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token", + "token_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token", "permissions": ["course_resource#delete"], "client_id": "course_management", "grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket", @@ -297,7 +298,7 @@ passed local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", @@ -348,7 +349,7 @@ true [[{ "plugins": { "authz-keycloak": { - "discovery": "http://127.0.0.1:8090/auth/realms/University/.well-known/uma2-configuration", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/uma2-configuration", "client_id": "course_management", "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", "lazy_load_paths": true, @@ -385,7 +386,7 @@ passed local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", @@ -431,7 +432,7 @@ true local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", @@ -477,7 +478,7 @@ true local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", @@ -523,7 +524,7 @@ true local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", @@ -571,7 +572,7 @@ true local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", @@ -617,7 +618,7 @@ true local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", diff --git a/t/plugin/authz-keycloak3.t b/t/plugin/authz-keycloak3.t index 2671e903999f..619417bc36d5 100644 --- a/t/plugin/authz-keycloak3.t +++ b/t/plugin/authz-keycloak3.t @@ -42,7 +42,7 @@ __DATA__ [[{ "plugins": { "authz-keycloak": { - "token_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token", + "token_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token", "access_denied_redirect_uri": "http://127.0.0.1/test", "permissions": ["course_resource#delete"], "client_id": "course_management", @@ -78,7 +78,7 @@ passed local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=student@gmail.com&password=123456", @@ -127,7 +127,7 @@ apisix: [[{ "plugins": { "authz-keycloak": { - "token_endpoint": "https://127.0.0.1:8443/auth/realms/University/protocol/openid-connect/token", + "token_endpoint": "https://127.0.0.1:8443/realms/University/protocol/openid-connect/token", "permissions": ["course_resource#view"], "client_id": "course_management", "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", diff --git a/t/plugin/batch-requests-grpc.t b/t/plugin/batch-requests-grpc.t new file mode 100644 index 000000000000..4acd5005434c --- /dev/null +++ b/t/plugin/batch-requests-grpc.t @@ -0,0 +1,205 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->no_error_log && !$block->error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } + + my $extra_yaml_config = <<_EOC_; +plugins: + - grpc-transcode + - public-api + - batch-requests +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); +}); + +run_tests; + +__DATA__ + +=== TEST 1: pre-create public API route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/batch-requests" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: set proto(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local etcd = require("apisix.core.etcd") + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 3: set routes(id: 2) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/2', + ngx.HTTP_PUT, + [[{ + "methods": ["GET", "POST"], + "uri": "/grpctest", + "plugins": { + "grpc-transcode": { + "proto_id": "1", + "service": "helloworld.Greeter", + "method": "SayHello" + } + }, + "upstream": { + "scheme": "grpc", + "type": "roundrobin", + "nodes": { + "127.0.0.1:50051": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: hit route +--- request +GET /grpctest?name=world +--- response_body eval +qr/\{"message":"Hello world"\}/ + + + +=== TEST 5: successful batch-requests for both grpc and http +--- config + location = /aggregate { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/batch-requests', + ngx.HTTP_POST, + [=[{ + "headers": { + "Content-Type":"application/json" + }, + "pipeline":[ + { + "method":"GET", + "path":"/grpctest" + }, + { + "method":"GET", + "path":"/get" + } + ] + }]=], + [=[[ + { + "status": 200, + "body":"{\"message\":\"Hello \"}" + }, + { + "status": 200, + "body":"hello" + } + ]]=]) + + ngx.status = code + ngx.say(body) + } + } + + location = /get { + content_by_lua_block { + ngx.print("hello") + } + } +--- request +GET /aggregate +--- response_body +passed diff --git a/t/plugin/body-transformer.t b/t/plugin/body-transformer.t index ae2211de4b92..8baf2ef0de16 100644 --- a/t/plugin/body-transformer.t +++ b/t/plugin/body-transformer.t @@ -141,7 +141,7 @@ location /demo { assert(res.status == 200) local data1 = core.json.decode(res.body) local data2 = core.json.decode[[{"status":"200","currency":"EUR","population":46704314,"capital":"Madrid","name":"Spain"}]] - assert(core.json.stably_encode(data1), core.json.stably_encode(data2)) + assert(core.json.stably_encode(data1) == core.json.stably_encode(data2)) } } @@ -652,3 +652,245 @@ foobar: assert(res.status == 200) } } + + + +=== TEST 10: cooperation of proxy-cache plugin +--- http_config +lua_shared_dict memory_cache 50m; +--- config +location /demo { + content_by_lua_block { + ngx.say([[ + + + + + hello + + + + ]]) + } +} + + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local req_template = ngx.encode_base64[[ + + + + + {{_escape_xml(country)}} + + + + ]] + + local rsp_template = ngx.encode_base64[[ + {"result": {*_escape_json(Envelope.Body.CapitalCityResponse.CapitalCityResult)*}} + ]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/capital", + "plugins": { + "proxy-rewrite": { + "set": { + "Accept-Encoding": "identity", + "Content-Type": "text/xml" + }, + "uri": "/demo" + }, + "proxy-cache":{ + "cache_strategy": "memory", + "cache_bypass": ["$arg_bypass"], + "cache_http_status": [200], + "cache_key": ["$uri", "-cache-id"], + "cache_method": ["POST"], + "hide_cache_headers": true, + "no_cache": ["$arg_test"], + "cache_zone": "memory_cache" + }, + "body-transformer": { + "request": { + "input_format": "json", + "template": "%s" + }, + "response": { + "input_format": "xml", + "template": "%s" + } + }, + "response-rewrite":{ + "headers": { + "set": { + "Content-Type": "application/json" + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], req_template, rsp_template, ngx.var.server_port) + ) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/capital" + local body = [[{"country": "foo"}]] + local opt = {method = "POST", body = body} + local httpc = http.new() + + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + local data = core.json.decode(res.body) + assert(data.result == "hello") + assert(res.headers["Apisix-Cache-Status"] == "MISS") + + local res2 = httpc:request_uri(uri, opt) + assert(res2.status == 200) + local data2 = core.json.decode(res2.body) + assert(data2.result == "hello") + assert(res2.headers["Apisix-Cache-Status"] == "HIT") + } + } + + + +=== TEST 11: return raw body with _body anytime +--- http_config +--- config + location /demo { + content_by_lua_block { + ngx.header.content_type = "application/json" + ngx.print('{"result": "hello world"}') + } + } + + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local rsp_template = ngx.encode_base64[[ + {"raw_body": {*_escape_json(_body)*}, "result": {*_escape_json(result)*}} + ]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/capital", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "response": { + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], rsp_template, ngx.var.server_port) + ) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/capital" + local opt = {method = "GET", headers = {["Content-Type"] = "application/json"}} + local httpc = http.new() + + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + local data = core.json.decode(res.body) + assert(data.result == "hello world") + assert(data.raw_body == '{"result": "hello world"}') + } + } + + + +=== TEST 12: empty xml value should be rendered as empty string +--- config + location /demo { + content_by_lua_block { + ngx.print([[ + + + + 33333333333 + + 33333333333 + + + + + + ]]) + } + } + + location /t { + content_by_lua_block { + local t = require("lib.test_admin") + + local rsp_template = ngx.encode_base64[[ +{ "KOVKood":"{{Envelope.Body.RR58isikEpiletResponse.response.KOVKood}}" } + ]] + + local code, body = t.test('/apisix/admin/routes/1', + ngx.HTTP_PUT, + string.format([[{ + "uri": "/ws", + "plugins": { + "proxy-rewrite": { + "uri": "/demo" + }, + "body-transformer": { + "response": { + "input_format": "xml", + "template": "%s" + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:%d": 1 + } + } + }]], rsp_template, ngx.var.server_port) + ) + + if code >= 300 then + ngx.status = code + return + end + ngx.sleep(0.5) + + local core = require("apisix.core") + local http = require("resty.http") + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/ws" + local opt = {method = "GET"} + local httpc = http.new() + local res = httpc:request_uri(uri, opt) + assert(res.status == 200) + local data1 = core.json.decode(res.body) + local data2 = core.json.decode[[{"KOVKood":""}]] + assert(core.json.stably_encode(data1) == core.json.stably_encode(data2)) + } + } diff --git a/t/plugin/chaitin-waf-reject.t b/t/plugin/chaitin-waf-reject.t new file mode 100644 index 000000000000..262ed28f04f6 --- /dev/null +++ b/t/plugin/chaitin-waf-reject.t @@ -0,0 +1,139 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $stream_default_server = <<_EOC_; + server { + listen 8088; + listen 8089; + content_by_lua_block { + require("lib.chaitin_waf_server").reject() + } + } +_EOC_ + + $block->set_value("extra_stream_config", $stream_default_server); + $block->set_value("stream_conf_enable", 1); + + # setup default conf.yaml + my $extra_yaml_config = $block->extra_yaml_config // <<_EOC_; +apisix: + stream_proxy: # TCP/UDP L4 proxy + only: true # Enable L4 proxy only without L7 proxy. + tcp: + - addr: 9100 # Set the TCP proxy listening ports. + tls: true + - addr: "127.0.0.1:9101" + udp: # Set the UDP proxy listening ports. + - 9200 + - "127.0.0.1:9201" +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + # use /do instead of /t because stream server will inject a default /t location + $block->set_value("request", "GET /do"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "host": "127.0.0.1", + "port": 8088 + }, + { + "host": "127.0.0.1", + "port": 8089 + } + ] + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.print(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "upstream": { + "servers": ["httpbun.org"] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: pass +--- request +GET /hello +--- error_code: 403 +--- response_body +{"code": 403, "success":false, "message": "blocked by Chaitin SafeLine Web Application Firewall", "event_id": "b3c6ce574dc24f09a01f634a39dca83b"} +--- error_log +--- response_headers +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-STATUS: 403 +X-APISIX-CHAITIN-WAF-ACTION: reject +--- response_headers_like +X-APISIX-CHAITIN-WAF-TIME: diff --git a/t/plugin/chaitin-waf-timeout.t b/t/plugin/chaitin-waf-timeout.t new file mode 100644 index 000000000000..063f1bc4e186 --- /dev/null +++ b/t/plugin/chaitin-waf-timeout.t @@ -0,0 +1,139 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $stream_default_server = <<_EOC_; + server { + listen 8088; + listen 8089; + content_by_lua_block { + require("lib.chaitin_waf_server").timeout() + } + } +_EOC_ + + $block->set_value("extra_stream_config", $stream_default_server); + $block->set_value("stream_conf_enable", 1); + + # setup default conf.yaml + my $extra_yaml_config = $block->extra_yaml_config // <<_EOC_; +apisix: + stream_proxy: # TCP/UDP L4 proxy + only: true # Enable L4 proxy only without L7 proxy. + tcp: + - addr: 9100 # Set the TCP proxy listening ports. + tls: true + - addr: "127.0.0.1:9101" + udp: # Set the UDP proxy listening ports. + - 9200 + - "127.0.0.1:9201" +plugins: + - chaitin-waf +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + # use /do instead of /t because stream server will inject a default /t location + $block->set_value("request", "GET /do"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "host": "127.0.0.1", + "port": 8088 + }, + { + "host": "127.0.0.1", + "port": 8089 + } + ] + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.print(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "upstream": { + "servers": ["httpbun.org"] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: timeout +--- request +GET /hello +--- error_code: 200 +--- response_body +hello world +--- error_log +--- response_headers +X-APISIX-CHAITIN-WAF: timeout +--- response_headers_like +X-APISIX-CHAITIN-WAF-TIME: diff --git a/t/plugin/chaitin-waf.t b/t/plugin/chaitin-waf.t new file mode 100644 index 000000000000..cd568df13630 --- /dev/null +++ b/t/plugin/chaitin-waf.t @@ -0,0 +1,272 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $stream_default_server = <<_EOC_; + server { + listen 8088; + listen 8089; + listen unix:/tmp/safeline-snserver.sock; + content_by_lua_block { + require("lib.chaitin_waf_server").pass() + } + } +_EOC_ + + $block->set_value("extra_stream_config", $stream_default_server); + $block->set_value("stream_conf_enable", 1); + + # setup default conf.yaml + my $extra_yaml_config = $block->extra_yaml_config // <<_EOC_; +apisix: + stream_proxy: # TCP/UDP L4 proxy + only: true # Enable L4 proxy only without L7 proxy. + tcp: + - addr: 9100 # Set the TCP proxy listening ports. + tls: true + - addr: "127.0.0.1:9101" + udp: # Set the UDP proxy listening ports. + - 9200 + - "127.0.0.1:9201" +plugins: + - chaitin-waf +_EOC_ + + $block->set_value("extra_yaml_config", $extra_yaml_config); + + if (!$block->request) { + # use /do instead of /t because stream server will inject a default /t location + $block->set_value("request", "GET /do"); + } + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: wrong schema: nodes empty +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [] + } + ]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: expect array to have at least 1 items"} + + + +=== TEST 2: wrong schema: nodes misses host +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [ + {} + ] + } + ]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"invalid configuration: property \"nodes\" validation failed: failed to validate item 1: property \"host\" is required"} + + + +=== TEST 3: sanity +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/chaitin-waf', + ngx.HTTP_PUT, + [[{ + "nodes": [ + { + "host": "127.0.0.1", + "port": 8088 + }, + { + "host": "127.0.0.1", + "port": 8089 + }, + { + "host": "unix:/tmp/safeline-snserver.sock", + "port": 8000 + } + ] + }]] + ) + + if code >= 300 then + ngx.status = code + return ngx.print(body) + end + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "upstream": { + "servers": ["httpbun.org"] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 4: pass +--- request +GET /hello +--- error_code: 200 +--- response_body +hello world +--- error_log +--- response_headers +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-STATUS: 200 +X-APISIX-CHAITIN-WAF-ACTION: pass +--- response_headers_like +X-APISIX-CHAITIN-WAF-TIME: + + + +=== TEST 5: match condition +--- config + location /do { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "chaitin-waf": { + "upstream": { + "servers": ["httpbun.org"] + }, + "match": [ + { + "vars": [ + ["http_waf","==","true"] + ] + } + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: no match +--- request +GET /hello +--- error_code: 200 +--- response_body +hello world +--- error_log +--- response_headers +X-APISIX-CHAITIN-WAF: no + + + +=== TEST 7: matched +--- request +GET /hello +--- more_headers +waf: true +--- error_code: 200 +--- response_body +hello world +--- error_log +--- response_headers +X-APISIX-CHAITIN-WAF: yes +X-APISIX-CHAITIN-WAF-STATUS: 200 +X-APISIX-CHAITIN-WAF-ACTION: pass +--- response_headers_like +X-APISIX-CHAITIN-WAF-TIME: diff --git a/t/plugin/clickhouse-logger.t b/t/plugin/clickhouse-logger.t index 3aa28190aed1..43ce54cd8a6d 100644 --- a/t/plugin/clickhouse-logger.t +++ b/t/plugin/clickhouse-logger.t @@ -183,13 +183,11 @@ passed "plugins": { "clickhouse-logger": { "user": "default", - "password": "a", + "password": "", "database": "default", - "logtable": "t", - "endpoint_addrs": ["http://127.0.0.1:1980/clickhouse_logger_server", - "http://127.0.0.1:10420/clickhouse-logger/test1"], - "batch_max_size":1, - "inactive_timeout":1 + "logtable": "test", + "endpoint_addrs": ["http://127.0.0.1:8123", + "http://127.0.0.1:8124"] } }, "upstream": { @@ -214,21 +212,24 @@ passed -=== TEST 6: access local server +=== TEST 6: hit route --- request GET /opentracing ---- response_body -opentracing ---- error_log -clickhouse body: INSERT INTO t FORMAT JSONEachRow -clickhouse headers: x-clickhouse-key:a -clickhouse headers: x-clickhouse-user:default -clickhouse headers: x-clickhouse-database:default +--- error_code: 200 --- wait: 5 -=== TEST 7: log format in plugin +=== TEST 7: get log +--- exec +echo "select * from default.test" | curl 'http://localhost:8123/' --data-binary @- +echo "select * from default.test" | curl 'http://localhost:8124/' --data-binary @- +--- response_body_like +.*127.0.0.1.*1.* + + + +=== TEST 8: use single clickhouse server --- config location /t { content_by_lua_block { @@ -239,24 +240,19 @@ clickhouse headers: x-clickhouse-database:default "plugins": { "clickhouse-logger": { "user": "default", - "password": "a", + "password": "", "database": "default", - "logtable": "t", - "endpoint_addrs": ["http://127.0.0.1:10420/clickhouse-logger/test1"], - "log_format": { - "vip": "$remote_addr" - }, - "batch_max_size":1, - "inactive_timeout":1 + "logtable": "test", + "endpoint_addr": "http://127.0.0.1:8123" } }, "upstream": { "nodes": { - "127.0.0.1:1980": 1 + "127.0.0.1:1982": 1 }, "type": "roundrobin" }, - "uri": "/hello" + "uri": "/opentracing" }]] ) @@ -266,18 +262,21 @@ clickhouse headers: x-clickhouse-database:default ngx.say(body) } } ---- request -GET /t --- response_body passed -=== TEST 8: hit route and report logger +=== TEST 9: hit route --- request -GET /hello ---- response_body -hello world ---- wait: 1.5 ---- error_log eval -qr/clickhouse body: INSERT INTO t FORMAT JSONEachRow \{.*"vip":"127.0.0.1".*\}/ +GET /opentracing +--- error_code: 200 +--- wait: 5 + + + +=== TEST 10: get log +--- exec +echo "select * from default.test" | curl 'http://localhost:8123/' --data-binary @- +--- response_body_like +.*127.0.0.1.*1.* diff --git a/t/plugin/consumer-restriction.t b/t/plugin/consumer-restriction.t index 862e8695dd92..cc86aacdcd7d 100644 --- a/t/plugin/consumer-restriction.t +++ b/t/plugin/consumer-restriction.t @@ -314,7 +314,7 @@ passed GET /hello --- error_code: 401 --- response_body -{"message":"Missing authentication or identity verification."} +{"message":"The request is rejected, please check the consumer_name for this request"} @@ -325,7 +325,7 @@ GET /hello Authorization: Basic amFjazIwMTk6MTIzNDU2 --- error_code: 401 --- response_body -{"message":"Missing authentication or identity verification."} +{"message":"The request is rejected, please check the consumer_name for this request"} @@ -336,7 +336,7 @@ GET /hello Authorization: Basic amFjazIwMjA6MTIzNDU2 --- error_code: 401 --- response_body -{"message":"Missing authentication or identity verification."} +{"message":"The request is rejected, please check the consumer_name for this request"} @@ -383,7 +383,7 @@ passed GET /hello --- error_code: 401 --- response_body -{"message":"Missing authentication or identity verification."} +{"message":"The request is rejected, please check the consumer_name for this request"} @@ -394,7 +394,7 @@ GET /hello Authorization: Basic amFjazIwMTk6MTIzNDU2 --- error_code: 401 --- response_body -{"message":"Missing authentication or identity verification."} +{"message":"The request is rejected, please check the consumer_name for this request"} @@ -405,7 +405,7 @@ GET /hello Authorization: Basic amFjazIwMjA6MTIzNDU2 --- error_code: 401 --- response_body -{"message":"Missing authentication or identity verification."} +{"message":"The request is rejected, please check the consumer_name for this request"} diff --git a/t/plugin/consumer-restriction2.t b/t/plugin/consumer-restriction2.t index 6fdba1daafe1..febae314df76 100644 --- a/t/plugin/consumer-restriction2.t +++ b/t/plugin/consumer-restriction2.t @@ -135,7 +135,37 @@ passed -=== TEST 5: set whitelist +=== TEST 5: consumer jack3 with no consumer group +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "jack3", + "plugins": { + "basic-auth": { + "username": "jack2021", + "password": "123456" + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 6: set whitelist --- config location /t { content_by_lua_block { @@ -175,7 +205,7 @@ passed -=== TEST 6: verify unauthorized +=== TEST 7: verify unauthorized --- request GET /hello --- error_code: 401 @@ -184,7 +214,7 @@ GET /hello -=== TEST 7: verify jack1 +=== TEST 8: verify jack1 --- request GET /hello --- more_headers @@ -194,7 +224,7 @@ hello world -=== TEST 8: verify jack2 +=== TEST 9: verify jack2 --- request GET /hello --- more_headers @@ -205,7 +235,7 @@ Authorization: Basic amFjazIwMjA6MTIzNDU2 -=== TEST 9: set blacklist +=== TEST 10: set blacklist --- config location /t { content_by_lua_block { @@ -246,7 +276,7 @@ passed -=== TEST 10: verify unauthorized +=== TEST 11: verify unauthorized --- request GET /hello --- error_code: 401 @@ -255,7 +285,7 @@ GET /hello -=== TEST 11: verify jack1 +=== TEST 12: verify jack1 --- request GET /hello --- more_headers @@ -266,10 +296,119 @@ Authorization: Basic amFjazIwMTk6MTIzNDU2 -=== TEST 12: verify jack2 +=== TEST 13: verify jack2 --- request GET /hello --- more_headers Authorization: Basic amFjazIwMjA6MTIzNDU2 --- response_body hello world + + + +=== TEST 14: verify jack2 +--- request +GET /hello +--- more_headers +Authorization: Basic amFjazIwMjE6MTIzNDU2 +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the consumer_group_id for this request"} + + + +=== TEST 15: set blacklist with service_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "consumer-restriction": { + "type": "service_id", + "blacklist": [ + "1" + ], + "rejected_msg": "request is forbidden" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 16: hit +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the service_id for this request"} + + + +=== TEST 17: set whitelist with service_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "consumer-restriction": { + "type": "service_id", + "whitelist": [ + "1" + ], + "rejected_msg": "request is forbidden" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: hit +--- request +GET /hello +--- error_code: 401 +--- response_body +{"message":"The request is rejected, please check the service_id for this request"} diff --git a/t/plugin/error-log-logger-skywalking.t b/t/plugin/error-log-logger-skywalking.t index ffebf1cf4fa5..da339f68e466 100644 --- a/t/plugin/error-log-logger-skywalking.t +++ b/t/plugin/error-log-logger-skywalking.t @@ -118,8 +118,8 @@ qr/Batch Processor\[error-log-logger\] failed to process entries: error while se --- request GET /tg --- response_body ---- error_log eval -qr/.*\[\{\"body\":\{\"text\":\{\"text\":\".*this is an error message for test.*\"\}\},\"endpoint\":\"\",\"service\":\"APISIX\",\"serviceInstance\":\"instance\".*/ +--- error_log +this is an error message for test --- wait: 5 @@ -196,3 +196,34 @@ qr/.*\[\{\"body\":\{\"text\":\{\"text\":\".*this is an info message for test.*\" GET /tg --- response_body passed + + + +=== TEST 8: put plugin metadata with $hostname and log an error level message +--- config + location /tg { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/error-log-logger', + ngx.HTTP_PUT, + [[{ + "skywalking": { + "endpoint_addr": "http://127.0.0.1:1982/log", + "service_instance_name": "$hostname" + }, + "batch_max_size": 15, + "inactive_timeout": 1 + }]] + ) + ngx.sleep(2) + core.log.error("this is an error message for test.") + } + } +--- request +GET /tg +--- response_body +--- no_error_log eval +qr/\\\"serviceInstance\\\":\\\"\$hostname\\\"/ +qr/\\\"serviceInstance\\\":\\\"\\\"/ +--- wait: 0.5 diff --git a/t/plugin/ext-plugin/http-req-call.t b/t/plugin/ext-plugin/http-req-call.t index 7d4f678b7d72..6fc8240a7612 100644 --- a/t/plugin/ext-plugin/http-req-call.t +++ b/t/plugin/ext-plugin/http-req-call.t @@ -514,8 +514,8 @@ GET /hello ext.go({rewrite_bad_path = true}) } } ---- access_log -GET /plugin_proxy_rewrite_args%3Fa=2 +--- error_log +undefined path in test server, uri: /plugin_proxy_rewrite_args%3Fa=2 --- error_code: 404 diff --git a/t/plugin/ext-plugin/sanity.t b/t/plugin/ext-plugin/sanity.t index 1a3732402d71..873a540a8215 100644 --- a/t/plugin/ext-plugin/sanity.t +++ b/t/plugin/ext-plugin/sanity.t @@ -14,7 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # -use t::APISIX 'no_plan'; +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} repeat_each(1); no_long_string(); diff --git a/t/plugin/ext-plugin/sanity2.t b/t/plugin/ext-plugin/sanity2.t index 206e7b090c2d..67260055862e 100644 --- a/t/plugin/ext-plugin/sanity2.t +++ b/t/plugin/ext-plugin/sanity2.t @@ -14,7 +14,16 @@ # See the License for the specific language governing permissions and # limitations under the License. # -use t::APISIX 'no_plan'; +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} repeat_each(1); no_long_string(); diff --git a/t/plugin/fault-injection2.t b/t/plugin/fault-injection2.t index 68b756bb06c4..0ba23f536069 100644 --- a/t/plugin/fault-injection2.t +++ b/t/plugin/fault-injection2.t @@ -90,3 +90,53 @@ Fault Injection! GET /hello?name=jack&age=18 --- response_body hello world + + + +=== TEST 4: inject header config +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [=[{ + "plugins": { + "fault-injection": { + "abort": { + "http_status": 200, + "headers" : { + "h1": "v1", + "h2": 2, + "h3": "$uri" + } + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]=] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 5: inject header +--- request +GET /hello +--- response_headers +h1: v1 +h2: 2 +h3: /hello diff --git a/t/plugin/file-logger2.t b/t/plugin/file-logger2.t index 90ce001c64c0..5cf3a76ccf4d 100644 --- a/t/plugin/file-logger2.t +++ b/t/plugin/file-logger2.t @@ -274,3 +274,95 @@ passed } --- response_body write file log success + + + +=== TEST 8: Add new configuration with match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "file-logger": { + "path": "file-with-match.log", + "match": [ + [ + [ "arg_name","==","jack" ] + ] + ], + "log_format": { + "request": "$request" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: Request match +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello?name=jack", ngx.HTTP_GET) + local fd, err = io.open("file-with-match.log", 'r') + if not fd then + core.log.error("failed to open file: file-with-match.log, error info: ", err) + return + end + local msg = fd:read() + + local new_msg = core.json.decode(msg) + if new_msg.request == 'GET /hello?name=jack HTTP/1.1' + and new_msg.route_id == '1' + then + msg = "write file log success" + ngx.status = code + ngx.say(msg) + end + + os.remove("file-with-match.log") + } + } +--- response_body +write file log success + + + +=== TEST 10: Request not match +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + local code = t("/hello?name=tony", ngx.HTTP_GET) + local fd, err = io.open("file-with-match.log", 'r') + if not fd then + local msg = "not write file log" + ngx.say(msg) + return + end + } + } +--- response_body +not write file log diff --git a/t/plugin/forward-auth.t b/t/plugin/forward-auth.t index 8f0445b505af..7896d4f84b74 100644 --- a/t/plugin/forward-auth.t +++ b/t/plugin/forward-auth.t @@ -206,6 +206,55 @@ property "request_method" validation failed: matches none of the enum values "uri": "/ping" }]], }, + { + url = "/apisix/admin/routes/4", + data = [[{ + "plugins": { + "serverless-pre-function": { + "phase": "rewrite", + "functions" : ["return function() require(\"apisix.core\").response.exit(444); end"] + } + }, + "upstream_id": "u1", + "uri": "/crashed-auth" + }]], + }, + { + url = "/apisix/admin/routes/5", + data = [[{ + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:1984/crashed-auth", + "request_headers": ["Authorization"], + "upstream_headers": ["X-User-ID"], + "client_headers": ["Location"] + } + }, + "upstream_id": "u1", + "uri": "/nodegr" + }]], + }, + { + url = "/apisix/admin/routes/6", + data = [[{ + "uri": "/hello", + "plugins": { + "forward-auth": { + "uri": "http://127.0.0.1:1984/crashed-auth", + "request_headers": ["Authorization"], + "upstream_headers": ["X-User-ID"], + "client_headers": ["Location"], + "allow_degradation": true + } + }, + "upstream": { + "nodes": { + "test.com:1980": 1 + }, + "type": "roundrobin" + } + }]], + } } local t = require("lib.test_admin").test @@ -217,7 +266,7 @@ property "request_method" validation failed: matches none of the enum values } } --- response_body eval -"201passed\n" x 6 +"201passed\n" x 9 @@ -305,3 +354,23 @@ POST /ping --- error_code: 403 --- response_headers Location: http://example.com/auth + + + +=== TEST 11: hit route (unavailable auth server, expect failure) +--- request +GET /nodegr +--- more_headers +Authorization: 111 +--- error_code: 403 +--- error_log +failed to process forward auth, err: closed + + + +=== TEST 12: hit route (unavailable auth server, allow degradation) +--- request +GET /hello +--- more_headers +Authorization: 111 +--- error_code: 200 diff --git a/t/plugin/google-cloud-logging.t b/t/plugin/google-cloud-logging.t index ff03e157400a..bc4293cf6755 100644 --- a/t/plugin/google-cloud-logging.t +++ b/t/plugin/google-cloud-logging.t @@ -74,6 +74,7 @@ passed local plugin = require("apisix.plugins.google-cloud-logging") local ok, err = plugin.check_schema({ auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", private_key = "private_key", project_id = "apisix", token_uri = "http://127.0.0.1:1980/token", @@ -132,6 +133,7 @@ passed local plugin = require("apisix.plugins.google-cloud-logging") local ok, err = plugin.check_schema({ auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", private_key = "private_key", project_id = "apisix", token_uri = "http://127.0.0.1:1980/token", @@ -190,6 +192,7 @@ value should match only one schema, but matches none plugins = { ["google-cloud-logging"] = { auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", private_key = [[ -----BEGIN RSA PRIVATE KEY----- MIIBOwIBAAJBAKeXgPvU/dAfVhOPk5BTBXCaOXy/0S3mY9VHyqvWZBJ97g6tGbLZ @@ -260,6 +263,7 @@ Batch Processor[google-cloud-logging] exceeded the max_retry_count plugins = { ["google-cloud-logging"] = { auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", private_key = [[ -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR @@ -346,6 +350,7 @@ Batch Processor[google-cloud-logging] exceeded the max_retry_count plugins = { ["google-cloud-logging"] = { auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", private_key = [[ -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR @@ -428,6 +433,7 @@ hello world plugins = { ["google-cloud-logging"] = { auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", private_key = [[ -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR @@ -510,6 +516,7 @@ hello world plugins = { ["google-cloud-logging"] = { auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", private_key = [[ -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR diff --git a/t/plugin/google-cloud-logging2.t b/t/plugin/google-cloud-logging2.t index 4290d3fc84b9..def2ca0ca00a 100644 --- a/t/plugin/google-cloud-logging2.t +++ b/t/plugin/google-cloud-logging2.t @@ -93,6 +93,7 @@ apisix: plugins = { ["google-cloud-logging"] = { auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", private_key = [[ -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR @@ -211,6 +212,7 @@ YnwwDKc5vNzo0OU4StTRQbwgCnTZ3dmYiBFm8aGnvTxlE86D2nT07Q3BWhUdky6OGIox4MRLbiHz13NZ plugins = { ["google-cloud-logging"] = { auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", private_key = [[ -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR @@ -350,6 +352,7 @@ the mock backend is hit plugins = { ["google-cloud-logging"] = { auth_config = { + client_email = "email@apisix.iam.gserviceaccount.com", private_key = [[ -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDDzrFwnA3EvYyR diff --git a/t/plugin/grpc-transcode2.t b/t/plugin/grpc-transcode2.t index 0f7cc8d0ba99..da91d3ceb265 100644 --- a/t/plugin/grpc-transcode2.t +++ b/t/plugin/grpc-transcode2.t @@ -486,7 +486,7 @@ GET /grpc_plus?a=1&b=2 --- response_body eval qr/\{"result":3\}/ --- error_log eval -qr/request log: \{.*body":\"\\u0000\\u0000\\u0000\\u0000\\u0002\\b\\u0003\\u0000\\u0000\\u0000\\u0000\\u0002\\b\\u0003"/ +qr/request log: \{.*body":\"\\u0000\\u0000\\u0000\\u0000\\u0002\\b\\u0003"/ diff --git a/t/plugin/grpc-web/go.mod b/t/plugin/grpc-web/go.mod index 94c3e614f283..b1cfda9efc3d 100644 --- a/t/plugin/grpc-web/go.mod +++ b/t/plugin/grpc-web/go.mod @@ -3,7 +3,6 @@ module apisix.apache.org/plugin/grpc-web go 1.16 require ( - github.com/golang/protobuf v1.4.3 - golang.org/x/text v0.3.8 // indirect - google.golang.org/grpc v1.43.0 + github.com/golang/protobuf v1.5.2 + google.golang.org/grpc v1.53.0 ) diff --git a/t/plugin/grpc-web/go.sum b/t/plugin/grpc-web/go.sum index d90b3c86e713..2c5b51f0ce77 100644 --- a/t/plugin/grpc-web/go.sum +++ b/t/plugin/grpc-web/go.sum @@ -1,29 +1,455 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -31,92 +457,626 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -125,11 +1085,28 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/t/plugin/http-logger-log-format.t b/t/plugin/http-logger-log-format.t index d2c6daa9ee0f..0bc8ceab31fa 100644 --- a/t/plugin/http-logger-log-format.t +++ b/t/plugin/http-logger-log-format.t @@ -63,7 +63,7 @@ passed [[{ "plugins": { "http-logger": { - "uri": "http://127.0.0.1:1980/log", + "uri": "http://127.0.0.1:3001", "batch_max_size": 1, "max_retry_count": 1, "retry_delay": 2, @@ -85,6 +85,13 @@ passed if code >= 300 then ngx.status = code end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end ngx.say(body) } } @@ -95,14 +102,11 @@ passed -=== TEST 3: hit route and report http logger ---- request -GET /hello ---- response_body -hello world ---- wait: 0.5 ---- error_log eval -qr/request log: \{.*route_id":"1".*\}/ +=== TEST 3: report http logger +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/.*route_id":"1".*/ @@ -116,7 +120,7 @@ qr/request log: \{.*route_id":"1".*\}/ [[{ "plugins": { "http-logger": { - "uri": "http://127.0.0.1:1980/log", + "uri": "http://127.0.0.1:3001", "batch_max_size": 2, "max_retry_count": 1, "retry_delay": 2, @@ -138,6 +142,13 @@ qr/request log: \{.*route_id":"1".*\}/ if code >= 300 then ngx.status = code end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end ngx.say(body) } } @@ -148,28 +159,11 @@ passed -=== TEST 5: hit route and report http logger ---- config -location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - - for i = 1, 2 do - t('/hello', ngx.HTTP_GET) - end - - ngx.sleep(3) - ngx.say("done") - } -} ---- request -GET /t ---- error_code: 200 ---- grep_error_log eval +=== TEST 5: report http logger +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval qr/"\@timestamp":"20/ ---- grep_error_log_out -"@timestamp":"20 -"@timestamp":"20 @@ -183,7 +177,7 @@ qr/"\@timestamp":"20/ [[{ "plugins": { "http-logger": { - "uri": "http://127.0.0.1:1980/log", + "uri": "http://127.0.0.1:3001", "batch_max_size": 1, "max_retry_count": 1, "retry_delay": 2, @@ -205,6 +199,13 @@ qr/"\@timestamp":"20/ if code >= 300 then ngx.status = code end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end ngx.say(body) } } @@ -215,14 +216,11 @@ passed -=== TEST 7: hit route and report http logger ---- request -GET /hello ---- response_body -hello world ---- wait: 0.5 ---- error_log eval -qr/request log: \{"\@timestamp":.*,"client_ip":"127.0.0.1","host":"localhost","route_id":"1"\}/ +=== TEST 7: report http logger +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/"route_id":"1"/ @@ -236,7 +234,7 @@ qr/request log: \{"\@timestamp":.*,"client_ip":"127.0.0.1","host":"localhost","r [[{ "plugins": { "http-logger": { - "uri": "http://127.0.0.1:1980/log", + "uri": "http://127.0.0.1:3001", "batch_max_size": 2, "max_retry_count": 1, "retry_delay": 2, @@ -258,6 +256,20 @@ qr/request log: \{"\@timestamp":.*,"client_ip":"127.0.0.1","host":"localhost","r if code >= 300 then ngx.status = code end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end ngx.say(body) } } @@ -268,25 +280,11 @@ passed -=== TEST 9: hit route and report http logger ---- config -location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - - for i = 1, 2 do - t('/hello', ngx.HTTP_GET) - end - - ngx.sleep(3) - ngx.say("done") - } -} ---- request -GET /t ---- error_code: 200 ---- error_log eval -qr/request log: \[\{"\@timestamp":".*","client_ip":"127.0.0.1","host":"127.0.0.1","route_id":"1"\},\{"\@timestamp":".*","client_ip":"127.0.0.1","host":"127.0.0.1","route_id":"1"\}\]/ +=== TEST 9: report http logger to confirm two json in array +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/\[\{.*?\},\{.*?\}\]/ @@ -356,7 +354,7 @@ passed [[{ "plugins": { "http-logger": { - "uri": "http://127.0.0.1:1982/log", + "uri": "http://127.0.0.1:3001", "batch_max_size": 1, "max_retry_count": 1, "retry_delay": 2, @@ -378,6 +376,13 @@ passed if code >= 300 then ngx.status = code end + + local code, _, _ = t("/hello", "GET",null,null,{apikey = "auth-one"}) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end ngx.say(body) } } @@ -388,15 +393,11 @@ passed -=== TEST 13: hit ---- request -GET /hello ---- more_headers -apikey: auth-one ---- grep_error_log eval -qr/request log: \{.+\}/ ---- grep_error_log_out eval -qr/\Q{"apisix_latency":\E[^,]+\Q,"client_ip":"127.0.0.1","consumer":{"username":"jack"},"latency":\E[^,]+\Q,"request":{"headers":{"apikey":"auth-one","connection":"close","host":"localhost"},"method":"GET","querystring":{},"size":\E\d+\Q,"uri":"\/hello","url":"http:\/\/localhost:1984\/hello"},"response":{"headers":{"connection":"close","content-length":"\E\d+\Q","content-type":"text\/plain","server":"\E[^"]+\Q"},"size":\E\d+\Q,"status":200},"route_id":"1","server":{"hostname":"\E[^"]+\Q","version":"\E[^"]+\Q"},"service_id":"","start_time":\E\d+\Q,"upstream":"127.0.0.1:1982","upstream_latency":\E[^,]+\Q}\E/ +=== TEST 13: check logs +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/"consumer":\{"username":"jack"\}/ --- wait: 0.5 @@ -428,6 +429,16 @@ done === TEST 15: use custom variable in the logger +--- extra_init_by_lua + local core = require "apisix.core" + + core.ctx.register_var("a6_route_labels", function(ctx) + local route = ctx.matched_route and ctx.matched_route.value + if route and route.labels then + return route.labels + end + return nil + end) --- config location /t { content_by_lua_block { @@ -453,20 +464,20 @@ done [[{ "plugins": { "http-logger": { - "uri": "http://127.0.0.1:1980/log", + "uri": "http://127.0.0.1:3001", "batch_max_size": 1, "concat_method": "json" } }, + "labels":{ + "key":"testvalue" + }, "upstream": { "nodes": { "127.0.0.1:1982": 1 }, "type": "roundrobin" }, - "labels": { - "k": "v" - }, "uri": "/hello" }]] ) @@ -474,6 +485,13 @@ done if code >= 300 then ngx.status = code end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end ngx.say(body) } } @@ -485,23 +503,10 @@ passed === TEST 16: hit route and report http logger ---- extra_init_by_lua - local core = require "apisix.core" - - core.ctx.register_var("a6_route_labels", function(ctx) - local route = ctx.matched_route and ctx.matched_route.value - if route and route.labels then - return route.labels - end - return nil - end) ---- request -GET /hello ---- response_body -hello world ---- wait: 0.5 ---- error_log eval -qr/request log: \{"client_ip":"127.0.0.1","host":"localhost","labels":\{"k":"v"\},"route_id":"1"\}/ +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/.*testvalue.*/ @@ -515,7 +520,7 @@ qr/request log: \{"client_ip":"127.0.0.1","host":"localhost","labels":\{"k":"v"\ [[{ "plugins": { "http-logger": { - "uri": "http://127.0.0.1:1980/log", + "uri": "http://127.0.0.1:3001", "batch_max_size": 1, "max_retry_count": 1, "retry_delay": 2, @@ -540,6 +545,13 @@ qr/request log: \{"client_ip":"127.0.0.1","host":"localhost","labels":\{"k":"v"\ if code >= 300 then ngx.status = code end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end ngx.say(body) } } @@ -551,10 +563,7 @@ passed === TEST 18: hit route and report http logger ---- request -GET /hello ---- response_body -hello world ---- wait: 0.5 ---- error_log eval -qr/request log: \{.*"x_ip":"127.0.0.1".*\}/ +--- exec +tail -n 1 ci/pod/vector/http.log +--- response_body eval +qr/"x_ip":"127.0.0.1".*\}/ diff --git a/t/plugin/http-logger2.t b/t/plugin/http-logger2.t index 09572439c775..12ee8b437d78 100644 --- a/t/plugin/http-logger2.t +++ b/t/plugin/http-logger2.t @@ -88,6 +88,12 @@ add_block_preprocessor(sub { end } } + + location / { + content_by_lua_block { + ngx.log(ngx.WARN, "test http logger for root path") + } + } } _EOC_ @@ -305,3 +311,49 @@ test-http-logger-request --- error_log received Authorization header: [nil] --- wait: 1.5 + + + +=== TEST 10: add default path +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "http-logger": { + "uri": "http://127.0.0.1:12001", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:12001": 1 + }, + "type": "roundrobin" + }, + "uri": "/http-logger/test" + }]]) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: hit +--- request +GET /http-logger/test +--- error_log +test http logger for root path diff --git a/t/plugin/inspect.t b/t/plugin/inspect.t index 0bbbfd7ff937..56f56d83ae4b 100644 --- a/t/plugin/inspect.t +++ b/t/plugin/inspect.t @@ -527,3 +527,31 @@ upvar2=yes --- error_log inspect: remove hook: t/lib/test_inspect.lua#27 inspect: all hooks removed + + + +=== TEST 14: jit should be recovered after all hooks are done +--- config + location /t { + content_by_lua_block { + local test = require("lib.test_inspect") + + local t1 = test.hot1() + + write_hooks([[ + local test = require("lib.test_inspect") + local dbg = require "apisix.inspect.dbg" + dbg.set_hook("t/lib/test_inspect.lua", 47, test.hot1, function(info) + return true + end) + ]]) + + ngx.sleep(1.5) + + local t2 = test.hot1() + assert(t2-t1 < t1*0.8, "hot1 consumes at least double times than before") + } + } +--- error_log +inspect: remove hook: t/lib/test_inspect.lua#47 +inspect: all hooks removed diff --git a/t/plugin/ip-restriction.t b/t/plugin/ip-restriction.t index d3f4f23b7b2e..11bcb52e55ce 100644 --- a/t/plugin/ip-restriction.t +++ b/t/plugin/ip-restriction.t @@ -625,7 +625,7 @@ GET /hello === TEST 29: set whitelist and user-defined message --- config - location /t { +location /t { content_by_lua_block { local t = require("lib.test_admin").test local code, body = t('/apisix/admin/routes/1', diff --git a/t/plugin/jwt-auth3.t b/t/plugin/jwt-auth3.t index 7daf6cf164a3..3ab089a974c6 100755 --- a/t/plugin/jwt-auth3.t +++ b/t/plugin/jwt-auth3.t @@ -280,11 +280,11 @@ hello: world }, "upstream": { "nodes": { - "httpbin.org:80": 1 + "test.com:1980": 1 }, "type": "roundrobin" }, - "uri": "/get" + "uri": "/hello" }]] ) @@ -299,11 +299,11 @@ hello: world === TEST 12: verify (in cookie) with hiding credentials --- request -GET /get +GET /hello --- more_headers Cookie: hello=world; jwt-cookie=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJrZXkiOiJ1c2VyLWtleSIsImV4cCI6MTg3OTMxODU0MX0.fNtFJnNmJgzbiYmGB0Yjvm-l6A6M4jRV1l4mnVFSYjs; foo=bar ---- response_body eval -qr/hello=world; foo=bar/ +--- response_body +hello world diff --git a/t/plugin/ldap-auth.t b/t/plugin/ldap-auth.t index e16bc05135a3..ba16f2c75b0b 100644 --- a/t/plugin/ldap-auth.t +++ b/t/plugin/ldap-auth.t @@ -359,7 +359,7 @@ find consumer user01 "plugins": { "ldap-auth": { "base_dn": "ou=users,dc=example,dc=org", - "ldap_uri": "localhost:1636", + "ldap_uri": "test.com:1636", "uid": "cn", "use_tls": true } @@ -408,7 +408,7 @@ find consumer user01 "plugins": { "ldap-auth": { "base_dn": "ou=users,dc=example,dc=org", - "ldap_uri": "localhost:1636", + "ldap_uri": "test.com:1636", "uid": "cn", "use_tls": true, "tls_verify": true diff --git a/t/plugin/limit-count-redis-cluster2.t b/t/plugin/limit-count-redis-cluster2.t index d5363c016d8b..ede8ee9dcc82 100644 --- a/t/plugin/limit-count-redis-cluster2.t +++ b/t/plugin/limit-count-redis-cluster2.t @@ -87,3 +87,53 @@ OK OK OK Done + + + +=== TEST 2: test header X-RateLimit-Reset shouldn't be set to 0 after request be rejected +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello2", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "key": "remote_addr", + "policy": "redis-cluster", + "redis_cluster_nodes": [ + "127.0.0.1:5000", + "127.0.0.1:5001" + ], + "redis_cluster_name": "redis-cluster-1" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + for i = 1, 3 do + local _, _, headers = t('/hello2', ngx.HTTP_GET) + ngx.sleep(1) + if tonumber(headers["X-RateLimit-Reset"]) > 0 then + ngx.say("OK") + else + ngx.say("WRONG") + end + end + ngx.say("Done") + } + } +--- response_body +OK +OK +OK +Done diff --git a/t/plugin/limit-count-redis-cluster3.t b/t/plugin/limit-count-redis-cluster3.t new file mode 100644 index 000000000000..b534c3a2c363 --- /dev/null +++ b/t/plugin/limit-count-redis-cluster3.t @@ -0,0 +1,79 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: modified redis script, cost == 2 +--- config + location /t { + content_by_lua_block { + local conf = { + redis_cluster_nodes = {"127.0.0.1:5000", "127.0.0.1:5001"}, + redis_cluster_name = "redis-cluster-1", + redis_cluster_ssl = false, + redis_timeout = 1000, + key_type = "var", + time_window = 60, + show_limit_quota_header = true, + allow_degradation = false, + key = "remote_addr", + rejected_code = 503, + count = 3, + policy = "redis-cluster", + redis_cluster_ssl_verify = false + } + + local lim_count_redis_cluster = require("apisix.plugins.limit-count.limit-count-redis-cluster") + local lim = lim_count_redis_cluster.new("limit-count", 3, 60, conf) + local uri = ngx.var.uri + local _, remaining, _ = lim:incoming(uri, 2) + + ngx.say("remaining: ", remaining) + } + } +--- response_body +remaining: 1 diff --git a/t/plugin/limit-count-redis.t b/t/plugin/limit-count-redis.t index a777a3871d06..d06188050df2 100644 --- a/t/plugin/limit-count-redis.t +++ b/t/plugin/limit-count-redis.t @@ -344,7 +344,175 @@ failed to limit count: WRONGPASS invalid username-password pair or user is disab -=== TEST 13: restore redis password to '' +=== TEST 13: set route, with redis host, port and bad username and good password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_timeout": 1001, + "redis_username": "bob", + "redis_password": "somepassword" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 14: request for TEST 13 +--- request +GET /hello +--- error_code eval +500 +--- error_log +failed to limit count: WRONGPASS invalid username-password pair or user is disabled + + + +=== TEST 15: set route, with redis host, port and good username and bad password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_timeout": 1001, + "redis_username": "alice", + "redis_password": "badpassword" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: request for TEST 15 +--- request +GET /hello +--- error_code eval +500 +--- error_log +failed to limit count: WRONGPASS invalid username-password pair or user is disabled + + + +=== TEST 17: set route, with redis host, port and right username and password +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_timeout": 1001, + "redis_username": "alice", + "redis_password": "somepassword" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 18: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 19: up the limit +--- pipelined_requests eval +["GET /hello1", "GET /hello", "GET /hello2", "GET /hello", "GET /hello"] +--- error_code eval +[404, 503, 404, 503, 503] + + + +=== TEST 20: restore redis password to '' --- config location /t { content_by_lua_block { diff --git a/t/plugin/limit-count-redis3.t b/t/plugin/limit-count-redis3.t index 781f527206c0..bf952df1863b 100644 --- a/t/plugin/limit-count-redis3.t +++ b/t/plugin/limit-count-redis3.t @@ -186,3 +186,189 @@ passed } --- response_body ["1","0","0"] + + + +=== TEST 5: set route, with redis host, port and SSL +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6380, + "redis_timeout": 1001, + "redis_ssl": true, + "redis_ssl_verify": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: up the limit +--- pipelined_requests eval +["GET /hello", "GET /hello", "GET /hello", "GET /hello"] +--- error_code eval +[200, 200, 503, 503] + + + +=== TEST 7: up the limit +--- pipelined_requests eval +["GET /hello1", "GET /hello", "GET /hello2", "GET /hello", "GET /hello"] +--- error_code eval +[404, 503, 404, 503, 503] + + + +=== TEST 8: set route, with redis host, port, SSL and SSL verify is true(will cause ssl handshake err), with enable degradation switch +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "rejected_code": 503, + "key": "remote_addr", + "allow_degradation": true, + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6380, + "redis_timeout": 1001, + "redis_ssl": true, + "redis_ssl_verify": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: enable degradation switch for TEST 8 +--- request +GET /hello +--- response_body +hello world +--- error_log +failed to do ssl handshake + + + +=== TEST 10: set router +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "plugins": { + "limit-count": { + "count": 2, + "time_window": 60, + "policy": "redis", + "redis_host": "127.0.0.1", + "redis_port": 6379, + "redis_database": 1, + "redis_timeout": 1001 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 11: test header X-RateLimit-Reset shouldn't be set to 0 after request be rejected +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.sleep(1) + local reset = res.headers["X-RateLimit-Reset"] + if tonumber(reset) <= 0 then + ngx.say("failed") + end + + end + ngx.say("success") + } + } +--- response_body +success diff --git a/t/plugin/limit-count-redis4.t b/t/plugin/limit-count-redis4.t new file mode 100644 index 000000000000..47deedf0636c --- /dev/null +++ b/t/plugin/limit-count-redis4.t @@ -0,0 +1,87 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +repeat_each(1); +no_long_string(); +no_shuffle(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: modified redis script, cost == 2 +--- config + location /t { + content_by_lua_block { + local conf = { + allow_degradation = false, + rejected_code = 503, + redis_timeout = 1000, + key_type = "var", + time_window = 60, + show_limit_quota_header = true, + count = 3, + redis_host = "127.0.0.1", + redis_port = 6379, + redis_database = 0, + policy = "redis", + key = "remote_addr" + } + + local lim_count_redis = require("apisix.plugins.limit-count.limit-count-redis") + local lim = lim_count_redis.new("limit-count", 3, 60, conf) + local uri = ngx.var.uri + local _, remaining, _ = lim:incoming(uri, 2) + + ngx.say("remaining: ", remaining) + } + } +--- response_body +remaining: 1 diff --git a/t/plugin/limit-count4.t b/t/plugin/limit-count4.t index bcefe5156fd0..a3453c58caaf 100644 --- a/t/plugin/limit-count4.t +++ b/t/plugin/limit-count4.t @@ -171,3 +171,34 @@ passed } --- response_body ["1","0","0"] + + + +=== TEST 5: test header X-RateLimit-Reset shouldn't be set to 0 after request be rejected +--- config + location /t { + content_by_lua_block { + local json = require "t.toolkit.json" + local http = require "resty.http" + local uri = "http://127.0.0.1:" .. ngx.var.server_port + .. "/hello" + local ress = {} + for i = 1, 3 do + local httpc = http.new() + local res, err = httpc:request_uri(uri) + if not res then + ngx.say(err) + return + end + ngx.sleep(1) + local reset = res.headers["X-RateLimit-Reset"] + if tonumber(reset) <= 0 then + ngx.say("failed") + end + + end + ngx.say("success") + } + } +--- response_body +success diff --git a/t/plugin/limit-count5.t b/t/plugin/limit-count5.t new file mode 100644 index 000000000000..47daaa71c772 --- /dev/null +++ b/t/plugin/limit-count5.t @@ -0,0 +1,91 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->request) { + $block->set_value("request", "GET /t"); + } + + if (!$block->error_log && !$block->no_error_log) { + $block->set_value("no_error_log", "[error]\n[alert]"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: modified limit-count.incoming, cost == 2 +--- config + location = /t { + content_by_lua_block { + local conf = { + time_window = 60, + count = 10, + allow_degradation = false, + key_type = "var", + policy = "local", + rejected_code = 503, + show_limit_quota_header = true, + key = "remote_addr" + } + local limit_count_local = require "apisix.plugins.limit-count.limit-count-local" + local lim = limit_count_local.new("plugin-limit-count", 10, 60) + local uri = ngx.var.uri + for i = 1, 7 do + local delay, err = lim:incoming(uri, true, conf, 2) + if not delay then + ngx.say(err) + else + local remaining = err + ngx.say("remaining: ", remaining) + end + end + } + } +--- request + GET /t +--- response_body +remaining: 8 +remaining: 6 +remaining: 4 +remaining: 2 +remaining: 0 +rejected +rejected diff --git a/t/plugin/log-rotate3.t b/t/plugin/log-rotate3.t index 8e7b92eee65a..3f1edd95a37b 100644 --- a/t/plugin/log-rotate3.t +++ b/t/plugin/log-rotate3.t @@ -132,3 +132,76 @@ start xxxxxx } --- response_body passed + + + +=== TEST 4: max_kept effective on differently named compression files +--- extra_yaml_config +plugins: + - log-rotate +plugin_attr: + log-rotate: + interval: 1 + max_kept: 1 + enable_compression: true +--- yaml_config +nginx_config: + error_log: logs/err1.log + http: + access_log: logs/acc1.log +--- config + location /t { + error_log logs/err1.log info; + access_log logs/acc1.log; + + content_by_lua_block { + ngx.sleep(3) + local lfs = require("lfs") + local count = 0 + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, ".tar.gz$") then + count = count + 1 + end + end + --- only two compression file + ngx.say(count) + } + } +--- response_body +2 + + + +=== TEST 5: check whether new log files were created +--- extra_yaml_config +plugins: + - log-rotate +plugin_attr: + log-rotate: + interval: 1 + max_kept: 0 + enable_compression: false +--- yaml_config +nginx_config: + error_log: logs/err2.log + http: + access_log: logs/acc2.log +--- config + location /t { + error_log logs/err2.log info; + access_log logs/acc2.log; + + content_by_lua_block { + ngx.sleep(3) + local lfs = require("lfs") + local count = 0 + for file_name in lfs.dir(ngx.config.prefix() .. "/logs/") do + if string.match(file_name, "err2.log$") or string.match(file_name, "acc2.log$") then + count = count + 1 + end + end + ngx.say(count) + } + } +--- response_body +2 diff --git a/t/plugin/loki-logger.t b/t/plugin/loki-logger.t new file mode 100644 index 000000000000..72c79f1e97a6 --- /dev/null +++ b/t/plugin/loki-logger.t @@ -0,0 +1,376 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: sanity +--- config + location /t { + content_by_lua_block { + local test_cases = { + {endpoint_addrs = {"http://127.0.0.1:8199"}}, + {endpoint_addrs = "http://127.0.0.1:8199"}, + {endpoint_addrs = {}}, + {}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, endpoint_uri = "/loki/api/v1/push"}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, endpoint_uri = 1234}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, tenant_id = 1234}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, log_labels = "1234"}, + {endpoint_addrs = {"http://127.0.0.1:8199"}, log_labels = {job = "apisix6"}}, + } + local plugin = require("apisix.plugins.loki-logger") + + for _, case in ipairs(test_cases) do + local ok, err = plugin.check_schema(case) + ngx.say(ok and "done" or err) + end + } + } +--- response_body +done +property "endpoint_addrs" validation failed: wrong type: expected array, got string +property "endpoint_addrs" validation failed: expect array to have at least 1 items +property "endpoint_addrs" is required +done +property "endpoint_uri" validation failed: wrong type: expected string, got number +property "tenant_id" validation failed: wrong type: expected string, got number +property "log_labels" validation failed: wrong type: expected object, got string +done + + + +=== TEST 2: setup route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://127.0.0.1:3100"], + "tenant_id": "tenant_1", + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 3: hit route +--- request +GET /hello +--- more_headers +test-header: only-for-test#1 +--- response_body +hello world + + + +=== TEST 4: check loki log +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local now = ngx.now() * 1000 + local data, err = require("lib.grafana_loki").fetch_logs_from_loki( + tostring(now - 3000) .. "000000", -- from + tostring(now) .. "000000" -- to + ) + + assert(err == nil, "fetch logs error: " .. (err or "")) + assert(data.status == "success", "loki response error: " .. cjson.encode(data)) + assert(#data.data.result > 0, "loki log empty: " .. cjson.encode(data)) + + local entry = data.data.result[1] + assert(entry.stream.request_headers_test_header == "only-for-test#1", + "expected field request_headers_test_header value: " .. cjson.encode(entry)) + } + } +--- error_code: 200 + + + +=== TEST 5: setup route (with log_labels) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://127.0.0.1:3100"], + "tenant_id": "tenant_1", + "log_labels": { + "custom_label": "custom_label_value" + }, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: hit route +--- request +GET /hello +--- more_headers +test-header: only-for-test#2 +--- response_body +hello world + + + +=== TEST 7: check loki log (with custom_label) +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local now = ngx.now() * 1000 + local data, err = require("lib.grafana_loki").fetch_logs_from_loki( + tostring(now - 3000) .. "000000", -- from + tostring(now) .. "000000", -- to + { query = [[{custom_label="custom_label_value"} | json]] } + ) + + assert(err == nil, "fetch logs error: " .. (err or "")) + assert(data.status == "success", "loki response error: " .. cjson.encode(data)) + assert(#data.data.result > 0, "loki log empty: " .. cjson.encode(data)) + + local entry = data.data.result[1] + assert(entry.stream.request_headers_test_header == "only-for-test#2", + "expected field request_headers_test_header value: " .. cjson.encode(entry)) + } + } +--- error_code: 200 + + + +=== TEST 8: setup route (with tenant_id) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://127.0.0.1:3100"], + "tenant_id": "tenant_2", + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 9: hit route +--- request +GET /hello +--- more_headers +test-header: only-for-test#3 +--- response_body +hello world + + + +=== TEST 10: check loki log (with tenant_id tenant_1) +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local now = ngx.now() * 1000 + local data, err = require("lib.grafana_loki").fetch_logs_from_loki( + tostring(now - 10000) .. "000000", -- from + tostring(now) .. "000000" -- to + ) + + assert(err == nil, "fetch logs error: " .. (err or "")) + assert(data.status == "success", "loki response error: " .. cjson.encode(data)) + assert(#data.data.result > 0, "loki log empty: " .. cjson.encode(data)) + + local entry = data.data.result[1] + assert(entry.stream.request_headers_test_header ~= "only-for-test#3", + "expected field request_headers_test_header value: " .. cjson.encode(entry)) + } + } +--- error_code: 200 + + + +=== TEST 11: check loki log (with tenant_id tenant_2) +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local now = ngx.now() * 1000 + local data, err = require("lib.grafana_loki").fetch_logs_from_loki( + tostring(now - 3000) .. "000000", -- from + tostring(now) .. "000000", -- to + { headers = { + ["X-Scope-OrgID"] = "tenant_2" + } } + ) + + assert(err == nil, "fetch logs error: " .. (err or "")) + assert(data.status == "success", "loki response error: " .. cjson.encode(data)) + assert(#data.data.result > 0, "loki log empty: " .. cjson.encode(data)) + + local entry = data.data.result[1] + assert(entry.stream.request_headers_test_header == "only-for-test#3", + "expected field request_headers_test_header value: " .. cjson.encode(entry)) + } + } +--- error_code: 200 + + + +=== TEST 12: setup route (with log_labels as variables) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "loki-logger": { + "endpoint_addrs": ["http://127.0.0.1:3100"], + "tenant_id": "tenant_1", + "log_labels": { + "custom_label": "$remote_addr" + }, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit route +--- request +GET /hello +--- response_body +hello world + + + +=== TEST 14: check loki log (with custom_label) +--- config + location /t { + content_by_lua_block { + local cjson = require("cjson") + local now = ngx.now() * 1000 + local data, err = require("lib.grafana_loki").fetch_logs_from_loki( + tostring(now - 3000) .. "000000", -- from + tostring(now) .. "000000", -- to + { query = [[{custom_label="127.0.0.1"} | json]] } + ) + + assert(err == nil, "fetch logs error: " .. (err or "")) + assert(data.status == "success", "loki response error: " .. cjson.encode(data)) + assert(#data.data.result > 0, "loki log empty: " .. cjson.encode(data)) + } + } +--- error_code: 200 diff --git a/t/plugin/mocking.t b/t/plugin/mocking.t index 644ee2cf38df..46d82ef80a8f 100644 --- a/t/plugin/mocking.t +++ b/t/plugin/mocking.t @@ -424,3 +424,44 @@ passed GET /hello --- response_body chomp empty_var: + + + +=== TEST 19: set route (return headers) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "mocking": { + "response_example": "hello world", + "response_headers": { + "X-Apisix": "is, cool", + "X-Really": "yes" + } + } + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 20: hit route +--- request +GET /hello +--- response_headers +X-Apisix: is, cool +X-Really: yes diff --git a/t/plugin/opa.t b/t/plugin/opa.t index 9354b35d1043..9d731ae0682c 100644 --- a/t/plugin/opa.t +++ b/t/plugin/opa.t @@ -179,3 +179,47 @@ test-header: only-for-test --- error_code: 403 --- response {"code":40001,"desc":"Give you a object reason"} + + + +=== TEST 12: setup route with plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "opa": { + "host": "http://127.0.0.1:8181", + "policy": "example", + "send_headers_upstream": ["user"] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/echo"] + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 13: hit route +--- request +GET /echo?test=1234&user=none +--- response_headers +user: none diff --git a/t/plugin/openid-connect.t b/t/plugin/openid-connect.t index 6f6f7aa0dad3..515319248c9b 100644 --- a/t/plugin/openid-connect.t +++ b/t/plugin/openid-connect.t @@ -171,7 +171,7 @@ true [[{ "plugins": { "openid-connect": { - "discovery": "http://127.0.0.1:8090/auth/realms/University/.well-known/openid-configuration", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", "realm": "University", "client_id": "course_management", "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", @@ -179,7 +179,7 @@ true "ssl_verify": false, "timeout": 10, "introspection_endpoint_auth_method": "client_secret_post", - "introspection_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token/introspect", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect", "set_access_token_header": true, "access_token_in_authorization_header": false, "set_id_token_header": true, @@ -276,7 +276,7 @@ x-userinfo: ey.* [[{ "plugins": { "openid-connect": { - "discovery": "http://127.0.0.1:8090/auth/realms/University/.well-known/openid-configuration", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", "realm": "University", "client_id": "course_management", "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", @@ -284,7 +284,7 @@ x-userinfo: ey.* "ssl_verify": false, "timeout": 10, "introspection_endpoint_auth_method": "client_secret_post", - "introspection_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token/introspect", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect", "set_access_token_header": true, "access_token_in_authorization_header": true, "set_id_token_header": false, @@ -731,14 +731,14 @@ jwt signature verification failed "openid-connect": { "client_id": "course_management", "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", - "discovery": "http://127.0.0.1:8090/auth/realms/University/.well-known/openid-configuration", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", "redirect_uri": "http://localhost:3000", "ssl_verify": false, "timeout": 10, "bearer_only": true, "realm": "University", "introspection_endpoint_auth_method": "client_secret_post", - "introspection_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token/introspect" + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect" } }, "upstream": { @@ -770,7 +770,7 @@ passed local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", @@ -892,7 +892,7 @@ OIDC introspection failed: invalid token "openid-connect": { "client_id": "course_management", "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", - "discovery": "http://127.0.0.1:8090/auth/realms/University/.well-known/openid-configuration", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", "redirect_uri": "http://localhost:3000", "ssl_verify": false, "timeout": 10, @@ -900,7 +900,7 @@ OIDC introspection failed: invalid token "use_jwks": true, "realm": "University", "introspection_endpoint_auth_method": "client_secret_post", - "introspection_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token/introspect" + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect" } }, "upstream": { @@ -932,7 +932,7 @@ passed local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", @@ -1023,7 +1023,7 @@ OIDC introspection failed: invalid jwt: invalid jwt string [[{ "plugins": { "openid-connect": { - "discovery": "http://127.0.0.1:8090/auth/realms/University/.well-known/openid-configuration", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", "realm": "University", "client_id": "course_management", "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", @@ -1031,7 +1031,7 @@ OIDC introspection failed: invalid jwt: invalid jwt string "ssl_verify": false, "timeout": 10, "introspection_endpoint_auth_method": "client_secret_post", - "introspection_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token/introspect", + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect", "set_access_token_header": true, "access_token_in_authorization_header": false, "set_id_token_header": true, @@ -1102,7 +1102,7 @@ passed -- Request the location, it's a URL of keycloak and contains the post_logout_redirect_uri -- Like: - -- http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/logout?post_logout_redirect=http://127.0.0.1:1984/hello + -- http://127.0.0.1:8080/realms/University/protocol/openid-connect/logout?post_logout_redirect=http://127.0.0.1:1984/hello local location = res.headers["Location"] res, err = httpc:request_uri(location, { method = "GET" @@ -1207,7 +1207,7 @@ true "openid-connect": { "client_id": "course_management", "client_secret": "d1ec69e9-55d2-4109-a3ea-befa071579d5", - "discovery": "http://127.0.0.1:8090/auth/realms/University/.well-known/openid-configuration", + "discovery": "http://127.0.0.1:8080/realms/University/.well-known/openid-configuration", "realm": "University", "bearer_only": true, "access_token_in_authorization_header": true, @@ -1217,7 +1217,7 @@ true "ssl_verify": false, "timeout": 10, "introspection_endpoint_auth_method": "client_secret_post", - "introspection_endpoint": "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token/introspect" + "introspection_endpoint": "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token/introspect" } }, "upstream": { @@ -1249,7 +1249,7 @@ passed local json_decode = require("toolkit.json").decode local http = require "resty.http" local httpc = http.new() - local uri = "http://127.0.0.1:8090/auth/realms/University/protocol/openid-connect/token" + local uri = "http://127.0.0.1:8080/realms/University/protocol/openid-connect/token" local res, err = httpc:request_uri(uri, { method = "POST", body = "grant_type=password&client_id=course_management&client_secret=d1ec69e9-55d2-4109-a3ea-befa071579d5&username=teacher@gmail.com&password=123456", diff --git a/t/plugin/openid-connect3.t b/t/plugin/openid-connect3.t new file mode 100644 index 000000000000..84eb7c26de2b --- /dev/null +++ b/t/plugin/openid-connect3.t @@ -0,0 +1,111 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +log_level('debug'); +repeat_each(1); +no_long_string(); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]"); + } + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Set up new route access the auth server via http proxy +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "openid-connect": { + "client_id": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "client_secret": "60Op4HFM0I8ajz0WdiStAbziZ-VFQttXuxixHHs2R7r7-CW8GR79l-mmLqMhc-Sa", + "discovery": "https://samples.auth0.com/.well-known/openid-configuration", + "redirect_uri": "https://iresty.com", + "ssl_verify": false, + "timeout": 10, + "scope": "apisix", + "proxy_opts": { + "http_proxy": "http://127.0.0.1:8080", + "http_proxy_authorization": "Basic dXNlcm5hbWU6cGFzc3dvcmQK" + }, + "use_pkce": false + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + + } + } +--- response_body +passed + + + +=== TEST 2: Access route w/o bearer token. Should redirect to authentication endpoint of ID provider. +--- config + location /t { + content_by_lua_block { + local http = require "resty.http" + local httpc = http.new() + local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local res, err = httpc:request_uri(uri, {method = "GET"}) + ngx.status = res.status + local location = res.headers['Location'] + if location and string.find(location, 'https://samples.auth0.com/authorize') ~= -1 and + string.find(location, 'scope=apisix') ~= -1 and + string.find(location, 'client_id=kbyuFDidLLm280LIwVFiazOqjO3ty8KH') ~= -1 and + string.find(location, 'response_type=code') ~= -1 and + string.find(location, 'redirect_uri=https://iresty.com') ~= -1 then + ngx.say(true) + end + } + } +--- timeout: 10s +--- response_body +true +--- error_code: 302 +--- error_log +use http proxy diff --git a/t/plugin/opentelemetry.t b/t/plugin/opentelemetry.t index 5adc158e9e0a..f9e64853d93d 100644 --- a/t/plugin/opentelemetry.t +++ b/t/plugin/opentelemetry.t @@ -16,7 +16,6 @@ # use t::APISIX 'no_plan'; - add_block_preprocessor(sub { my ($block) = @_; @@ -26,38 +25,23 @@ plugins: - opentelemetry plugin_attr: opentelemetry: + trace_id_source: x-request-id batch_span_processor: max_export_batch_size: 1 inactive_timeout: 0.5 + collector: + address: 127.0.0.1:4318 + request_timeout: 3 + request_headers: + foo: bar _EOC_ $block->set_value("extra_yaml_config", $extra_yaml_config); } - - - if (!$block->extra_init_by_lua) { - my $extra_init_by_lua = <<_EOC_; --- mock exporter http client -local client = require("opentelemetry.trace.exporter.http_client") -client.do_request = function() - ngx.log(ngx.INFO, "opentelemetry export span") - return "ok" -end -_EOC_ - - $block->set_value("extra_init_by_lua", $extra_init_by_lua); - } - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - if (!defined $block->response_body) { $block->set_value("response_body", "passed\n"); } - $block; }); - repeat_each(1); no_long_string(); no_root_location(); @@ -98,110 +82,29 @@ __DATA__ ngx.say(body) } } +--- request +GET /t === TEST 2: trigger opentelemetry --- request GET /opentracing +--- wait: 2 --- response_body opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span - - - -=== TEST 3: use default always_off sampler ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "opentelemetry": { - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/opentracing" - }]] - ) - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } - -=== TEST 4: not trigger opentelemetry ---- request -GET /opentracing ---- response_body -opentracing ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out +=== TEST 3: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*opentelemetry-lua.*/ -=== TEST 5: use trace_id_ratio sampler, default fraction = 0 ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "opentelemetry": { - "sampler": { - "name": "trace_id_ratio" - } - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/opentracing" - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } - - - -=== TEST 6: not trigger opentelemetry ---- request -GET /opentracing ---- response_body -opentracing ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out - - - -=== TEST 7: use trace_id_ratio sampler, fraction = 1.0 +=== TEST 4: use trace_id_ratio sampler, fraction = 1.0 --- config location /t { content_by_lua_block { @@ -235,120 +138,29 @@ qr/opentelemetry export span/ ngx.say(body) } } - - - -=== TEST 8: trigger opentelemetry --- request -GET /opentracing ---- response_body -opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span - - - -=== TEST 9: use parent_base sampler, default root sampler = always_off ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "opentelemetry": { - "sampler": { - "name": "parent_base" - } - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/opentracing" - }]] - ) +GET /t - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } - -=== TEST 10: not trigger opentelemetry +=== TEST 5: trigger opentelemetry --- request GET /opentracing +--- wait: 2 --- response_body opentracing ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out - -=== TEST 11: use parent_base sampler, root sampler = always_on ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "opentelemetry": { - "sampler": { - "name": "parent_base", - "options": { - "root": { - "name": "always_on" - } - } - } - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/opentracing" - }]] - ) - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } +=== TEST 6: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*opentelemetry-lua.*/ -=== TEST 12: trigger opentelemetry ---- request -GET /opentracing ---- response_body -opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span - - - -=== TEST 13: use parent_base sampler, root sampler = trace_id_ratio with default fraction = 0 +=== TEST 7: use parent_base sampler, root sampler = trace_id_ratio with default fraction = 0 --- config location /t { content_by_lua_block { @@ -384,36 +196,31 @@ opentelemetry export span ngx.say(body) } } - - - -=== TEST 14: not trigger opentelemetry --- request -GET /opentracing ---- response_body -opentracing ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out +GET /t -=== TEST 15: trigger opentelemetry, trace_flag = 1 +=== TEST 8: trigger opentelemetry, trace_flag = 1 --- request GET /opentracing --- more_headers traceparent: 00-00000000000000000000000000000001-0000000000000001-01 +--- wait: 2 --- response_body opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span -=== TEST 16: use parent_base sampler, root sampler = trace_id_ratio with fraction = 1 +=== TEST 9: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*"traceId":"00000000000000000000000000000001",.*/ + + + +=== TEST 10: use parent_base sampler, root sampler = trace_id_ratio with fraction = 1 --- config location /t { content_by_lua_block { @@ -452,36 +259,31 @@ opentelemetry export span ngx.say(body) } } +--- request +GET /t -=== TEST 17: trigger opentelemetry +=== TEST 11: trigger opentelemetry, trace_flag = 1 --- request GET /opentracing +--- more_headers +traceparent: 00-00000000000000000000000000000001-0000000000000001-01 +--- wait: 2 --- response_body opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span -=== TEST 18: not trigger opentelemetry, trace_flag = 0 ---- request -GET /opentracing ---- more_headers -traceparent: 00-00000000000000000000000000000001-0000000000000001-00 ---- response_body -opentracing ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out +=== TEST 12: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*"traceId":"00000000000000000000000000000001",.*/ -=== TEST 19: set additional_attributes +=== TEST 13: set additional_attributes --- config location /t { content_by_lua_block { @@ -531,122 +333,38 @@ qr/opentelemetry export span/ ngx.say(body) } } +--- request +GET /t -=== TEST 20: trigger opentelemetry, test trace_id_source=x-request-id, custom resource, additional_attributes ---- extra_yaml_config -plugins: - - opentelemetry -plugin_attr: - opentelemetry: - trace_id_source: x-request-id - resource: - service.name: test - test_key: test_val - batch_span_processor: - max_export_batch_size: 1 - inactive_timeout: 0.5 ---- extra_init_by_lua - local core = require("apisix.core") - local otlp = require("opentelemetry.trace.exporter.otlp") - local span_kind = require("opentelemetry.trace.span_kind") - otlp.export_spans = function(self, spans) - if (#spans ~= 1) then - ngx.log(ngx.ERR, "unexpected spans length: ", #spans) - return - end - - local span = spans[1] - if span:context().trace_id ~= "01010101010101010101010101010101" then - ngx.log(ngx.ERR, "unexpected trace id: ", span:context().trace_id) - return - end - - local current_span_kind = span:plain().kind - if current_span_kind ~= span_kind.server then - ngx.log(ngx.ERR, "expected span.kind to be server but got ", current_span_kind) - return - end - - if span.name ~= "/opentracing?foo=bar&a=b" then - ngx.log(ngx.ERR, "expect span name: /opentracing?foo=bar&a=b, but got ", span.name) - return - end - - local expected_resource_attrs = { - test_key = "test_val", - } - expected_resource_attrs["service.name"] = "test" - expected_resource_attrs["telemetry.sdk.language"] = "lua" - expected_resource_attrs["telemetry.sdk.name"] = "opentelemetry-lua" - expected_resource_attrs["telemetry.sdk.version"] = "0.1.1" - expected_resource_attrs["hostname"] = core.utils.gethostname() - local actual_resource_attrs = span.tracer.provider.resource:attributes() - if #actual_resource_attrs ~= 6 then - ngx.log(ngx.ERR, "expect len(actual_resource) = 6, but got ", #actual_resource_attrs) - return - end - for _, attr in ipairs(actual_resource_attrs) do - local expected_val = expected_resource_attrs[attr.key] - if not expected_val then - ngx.log(ngx.ERR, "unexpected resource attr key: ", attr.key) - return - end - if attr.value.string_value ~= expected_val then - ngx.log(ngx.ERR, "unexpected resource attr val: ", attr.value.string_value) - return - end - end - - local expected_attributes = { - service = "service_name", - route = "route_name", - http_user_agent = "test_nginx", - arg_foo = "bar", - cookie_token = "auth_token", - remote_addr = "127.0.0.1", - } - if #span.attributes ~= 6 then - ngx.log(ngx.ERR, "expect len(span.attributes) = 6, but got ", #span.attributes) - return - end - for _, attr in ipairs(span.attributes) do - local expected_val = expected_attributes[attr.key] - if not expected_val then - ngx.log(ngx.ERR, "unexpected attr key: ", attr.key) - return - end - if attr.value.string_value ~= expected_val then - ngx.log(ngx.ERR, "unexpected attr val: ", attr.value.string_value) - return - end - end - - ngx.log(ngx.INFO, "opentelemetry export span") - end +=== TEST 14: trigger opentelemetry --- request GET /opentracing?foo=bar&a=b --- more_headers X-Request-Id: 01010101010101010101010101010101 User-Agent: test_nginx Cookie: token=auth_token; +--- wait: 2 --- response_body opentracing ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span -=== TEST 21: create route for /specific_status +=== TEST 15: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*\/opentracing\?foo=bar.*/ + + + +=== TEST 16: create route for /specific_status --- config location /t { content_by_lua_block { local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/2', + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, [[{ "name": "route_name", @@ -673,54 +391,21 @@ opentelemetry export span ngx.say(body) } } +--- request +GET /t -=== TEST 22: 500 status, test span.status ---- extra_init_by_lua - local otlp = require("opentelemetry.trace.exporter.otlp") - otlp.export_spans = function(self, spans) - if (#spans ~= 1) then - ngx.log(ngx.ERR, "unexpected spans length: ", #spans) - return - end - - local span = spans[1] - if span.status.code ~= 2 then - ngx.log(ngx.ERR, "unexpected status.code: ", span.status.code) - end - if span.status.message ~= "upstream response status: 500" then - ngx.log(ngx.ERR, "unexpected status.message: ", span.status.message) - end - - ngx.log(ngx.INFO, "opentelemetry export span") - end +=== TEST 17: test response empty body --- request -GET /specific_status ---- more_headers -X-Test-Upstream-Status: 500 ---- error_code: 500 +HEAD /specific_status --- response_body -upstream status: 500 ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span +--- wait: 2 -=== TEST 23: test response empty body ---- extra_init_by_lua - local otlp = require("opentelemetry.trace.exporter.otlp") - otlp.export_spans = function(self, spans) - ngx.log(ngx.INFO, "opentelemetry export span") - end ---- request -HEAD /specific_status ---- response_body ---- wait: 1 ---- grep_error_log eval -qr/opentelemetry export span/ ---- grep_error_log_out -opentelemetry export span +=== TEST 18: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*\/specific_status.*/ diff --git a/t/plugin/opentelemetry2.t b/t/plugin/opentelemetry2.t index 052cdbc12b66..3344ce97bb61 100644 --- a/t/plugin/opentelemetry2.t +++ b/t/plugin/opentelemetry2.t @@ -28,6 +28,7 @@ plugins: - opentelemetry plugin_attr: opentelemetry: + trace_id_source: x-request-id batch_span_processor: max_export_batch_size: 1 inactive_timeout: 0.5 @@ -35,30 +36,6 @@ _EOC_ $block->set_value("extra_yaml_config", $extra_yaml_config); } - - if (!$block->extra_init_by_lua) { - my $extra_init_by_lua = <<_EOC_; --- mock exporter http client -local client = require("opentelemetry.trace.exporter.http_client") -client.do_request = function() - ngx.log(ngx.INFO, "opentelemetry export span") - return "ok" -end -local ctx_new = require("opentelemetry.context").new -require("opentelemetry.context").new = function (...) - local ctx = ctx_new(...) - local current = ctx.current - ctx.current = function (...) - ngx.log(ngx.INFO, "opentelemetry context current") - return current(...) - end - return ctx -end -_EOC_ - - $block->set_value("extra_init_by_lua", $extra_init_by_lua); - } - if (!$block->request) { $block->set_value("request", "GET /t"); } @@ -130,96 +107,12 @@ passed --- request GET /hello --- error_code: 401 ---- wait: 1 ---- grep_error_log eval -qr/(opentelemetry export span|opentelemetry context current|plugin body_filter phase)/ ---- grep_error_log_out -plugin body_filter phase -plugin body_filter phase -opentelemetry context current -opentelemetry export span +--- wait: 2 -=== TEST 3: set additional_attributes with match ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "name": "route_name", - "plugins": { - "opentelemetry": { - "sampler": { - "name": "always_on" - }, - "additional_header_prefix_attributes": [ - "x-my-header-*" - ] - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/attributes" - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- response_body -passed - - - -=== TEST 4: opentelemetry expands headers ---- extra_init_by_lua - local otlp = require("opentelemetry.trace.exporter.otlp") - otlp.export_spans = function(self, spans) - if (#spans ~= 1) then - ngx.log(ngx.ERR, "unexpected spans length: ", #spans) - return - end - - local attributes_names = {} - local attributes = {} - local span = spans[1] - for _, attribute in ipairs(span.attributes) do - if attribute.key == "hostname" then - -- remove any randomness - goto skip - end - table.insert(attributes_names, attribute.key) - attributes[attribute.key] = attribute.value.string_value or "" - ::skip:: - end - table.sort(attributes_names) - for _, attribute in ipairs(attributes_names) do - ngx.log(ngx.INFO, "attribute " .. attribute .. ": \"" .. attributes[attribute] .. "\"") - end - - ngx.log(ngx.INFO, "opentelemetry export span") - end ---- request -GET /attributes ---- more_headers -x-my-header-name: william -x-my-header-nick: bill ---- wait: 1 ---- error_code: 404 ---- grep_error_log eval -qr/attribute .+?:.[^,]*/ ---- grep_error_log_out -attribute route: "route_name" -attribute service: "" -attribute x-my-header-name: "william" -attribute x-my-header-nick: "bill" +=== TEST 3: check log +--- exec +tail -n 1 ci/pod/otelcol-contrib/data-otlp.json +--- response_body eval +qr/.*\/hello.*/ diff --git a/t/plugin/opentelemetry4-bugfix-pb-state.t b/t/plugin/opentelemetry4-bugfix-pb-state.t new file mode 100644 index 000000000000..5607e912edc4 --- /dev/null +++ b/t/plugin/opentelemetry4-bugfix-pb-state.t @@ -0,0 +1,175 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use t::APISIX 'no_plan'; +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +plugins: + - opentelemetry +plugin_attr: + opentelemetry: + trace_id_source: x-request-id + batch_span_processor: + max_export_batch_size: 1 + inactive_timeout: 0.5 + collector: + address: 127.0.0.1:4318 + request_timeout: 3 + request_headers: + foo: bar +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + $block; +}); +repeat_each(1); +no_long_string(); +no_root_location(); +log_level("debug"); + +run_tests; + +__DATA__ + +=== TEST 1: set additional_attributes with match +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "name": "route_name", + "plugins": { + "opentelemetry": { + "sampler": { + "name": "always_on" + }, + "additional_header_prefix_attributes": [ + "x-my-header-*" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: opentelemetry expands headers +--- extra_init_by_lua + local otlp = require("opentelemetry.trace.exporter.otlp") + local orig_export_spans = otlp.export_spans + otlp.export_spans = function(self, spans) + if (#spans ~= 1) then + ngx.log(ngx.ERR, "unexpected spans length: ", #spans) + return + end + + local attributes_names = {} + local attributes = {} + local span = spans[1] + for _, attribute in ipairs(span.attributes) do + if attribute.key == "hostname" then + -- remove any randomness + goto skip + end + table.insert(attributes_names, attribute.key) + attributes[attribute.key] = attribute.value.string_value or "" + ::skip:: + end + table.sort(attributes_names) + for _, attribute in ipairs(attributes_names) do + ngx.log(ngx.INFO, "attribute " .. attribute .. ": \"" .. attributes[attribute] .. "\"") + end + + ngx.log(ngx.INFO, "opentelemetry export span") + return orig_export_spans(self, spans) + end +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/protos/1', + ngx.HTTP_PUT, + [[{ + "content" : "syntax = \"proto3\"; + package helloworld; + service Greeter { + rpc SayHello (HelloRequest) returns (HelloReply) {} + } + message HelloRequest { + string name = 1; + } + message HelloReply { + string message = 1; + }" + }]] + ) + + if code >= 300 then + ngx.status = code + end + local http = require "resty.http" + local httpc = http.new() + local uri1 = "http://127.0.0.1:" .. ngx.var.server_port .. "/hello" + local headers = { + ["x-my-header-name"] = "william", + ["x-my-header-nick"] = "bill", + } + local res, err = httpc:request_uri(uri1, {method = "GET", headers = headers}) + if not res then + ngx.say(err) + return + end + ngx.status = res.status + } + } +--- request +GET /t +--- wait: 1 +--- error_code: 200 +--- no_error_log +type 'opentelemetry.proto.trace.v1.TracesData' does not exists +--- grep_error_log eval +qr/attribute .+?:.[^,]*/ +--- grep_error_log_out +attribute route: "route_name" +attribute service: "" +attribute x-my-header-name: "william" +attribute x-my-header-nick: "bill" diff --git a/t/plugin/prometheus4.t b/t/plugin/prometheus4.t index 999316a613bd..2a72736c8d3f 100644 --- a/t/plugin/prometheus4.t +++ b/t/plugin/prometheus4.t @@ -134,3 +134,58 @@ GET /hello GET /apisix/prometheus/metrics --- response_body eval qr/apisix_http_status\{code="200",route="10",matched_uri="\/hello",matched_host="",service="",consumer="",node="127.0.0.1",dummy=""\} \d+/ + + + +=== TEST 7: set route +--- yaml_config +plugin_attr: + prometheus: + default_buckets: + - 15 + - 55 + - 105 + - 205 + - 505 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- pipelined_requests eval +["GET /t", "GET /hello1"] +--- response_body eval +["passed\n", "hello1 world\n"] + + + +=== TEST 8: fetch metrics +--- request +GET /apisix/prometheus/metrics +--- response_body eval +qr/apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="15"\} \d+ +apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="55"\} \d+ +apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="105"\} \d+ +apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="205"\} \d+ +apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="505"\} \d+/ diff --git a/t/plugin/proxy-cache/memory.t b/t/plugin/proxy-cache/memory.t index 5984dbd47776..e617f8a1b0fc 100644 --- a/t/plugin/proxy-cache/memory.t +++ b/t/plugin/proxy-cache/memory.t @@ -661,3 +661,46 @@ GET /hello --- more_headers Cache-Control: only-if-cached --- error_code: 504 + + + +=== TEST 36: configure plugin without memory_cache zone for cache_strategy = memory +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-cache": { + "cache_strategy": "memory", + "cache_key":["$host","$uri"], + "cache_bypass": ["$arg_bypass"], + "cache_control": true, + "cache_method": ["GET"], + "cache_ttl": 10, + "cache_http_status": [200] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1986": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body_like +.*err: invalid or empty cache_zone for cache_strategy: memory.* +--- error_code: 400 diff --git a/t/plugin/proxy-mirror.t b/t/plugin/proxy-mirror.t index 9c58f4bac98e..458bcf342d1e 100644 --- a/t/plugin/proxy-mirror.t +++ b/t/plugin/proxy-mirror.t @@ -705,8 +705,8 @@ passed [[{ "plugins": { "proxy-mirror": { - "host": "http://httpbin.org", - "path": "/get" + "host": "http://test.com:1980", + "path": "/hello" } }, "upstream": { @@ -736,7 +736,7 @@ GET /hello --- response_body hello world --- error_log_like eval -qr/http:\/\/httpbin\.org is resolved to: http:\/\/((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}/ +qr/http:\/\/test\.com is resolved to: http:\/\/((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})(\.((2(5[0-5]|[0-4]\d))|[0-1]?\d{1,2})){3}/ @@ -838,3 +838,75 @@ GET /hello?a=1 hello world --- error_log uri: /a/hello?a=1 + + + +=== TEST 30: (grpc) sanity check (normal case grpc) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "grpc://127.0.0.1:1986" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "scheme": "grpc", + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed + + + +=== TEST 31: (grpcs) sanity check (normal case for grpcs) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-mirror": { + "host": "grpcs://127.0.0.1:1986" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "scheme": "grpc", + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- error_code: 200 +--- response_body +passed diff --git a/t/plugin/proxy-mirror3.t b/t/plugin/proxy-mirror3.t new file mode 100644 index 000000000000..65a23dc823d9 --- /dev/null +++ b/t/plugin/proxy-mirror3.t @@ -0,0 +1,76 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +log_level('info'); +no_root_location(); +no_shuffle(); + +add_block_preprocessor(sub { + my ($block) = @_; + + my $yaml_config = $block->yaml_config // <<_EOC_; +apisix: + node_listen: 1984 +deployment: + role: data_plane + role_data_plane: + config_provider: yaml +_EOC_ + + $block->set_value("yaml_config", $yaml_config); + + if (!$block->request) { + $block->set_value("request", "POST /hello"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: grpc mirror +--- log_level: debug +--- http2 +--- apisix_yaml +routes: + - + id: 1 + uris: + - /helloworld.Greeter/SayHello + methods: [ + POST + ] + plugins: + proxy-mirror: + host: grpc://127.0.0.1:19797 + sample_ratio: 1 + upstream: + scheme: grpc + nodes: + "127.0.0.1:50051": 1 + type: roundrobin +#END +--- exec +grpcurl -import-path ./t/grpc_server_example/proto -proto helloworld.proto -plaintext -d '{"name":"apisix"}' 127.0.0.1:1984 helloworld.Greeter.SayHello +--- response_body +{ + "message": "Hello apisix" +} +--- error_log eval +qr/Connection refused\) while connecting to upstream.*proxy_mirror_grpc/ diff --git a/t/plugin/proxy-rewrite.t b/t/plugin/proxy-rewrite.t index 558fd9adee03..4d7465862d69 100644 --- a/t/plugin/proxy-rewrite.t +++ b/t/plugin/proxy-rewrite.t @@ -1180,7 +1180,7 @@ done -=== TEST 43: set route(rewrite host with port) +=== TEST 43: set route(rewrite host with port), ensure ngx.var.uri matched the rewritten version --- config location /t { content_by_lua_block { @@ -1193,6 +1193,12 @@ done "proxy-rewrite": { "uri": "/uri", "host": "test.com:6443" + }, + "serverless-post-function": { + "phase": "access", + "functions" : ["return function(conf, ctx) + assert(ngx.var.uri == \"/uri\", \"proxy-rewrite do not call ngx.req.set_uri\") + end"] } }, "upstream": { diff --git a/t/plugin/proxy-rewrite3.t b/t/plugin/proxy-rewrite3.t index 45bcd6f5c918..55afe14cc738 100644 --- a/t/plugin/proxy-rewrite3.t +++ b/t/plugin/proxy-rewrite3.t @@ -771,3 +771,231 @@ passed GET /test/plugin/proxy/rewrite HTTP/1.1 --- response_headers X-Request-ID: test1///test2 + + + +=== TEST 33: set route (test if X-Forwarded-Port can be set before proxy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "headers": { + "X-Forwarded-Port": "9882" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 34: test if X-Forwarded-Port can be set before proxy +--- request +GET /echo HTTP/1.1 +--- more_headers +X-Forwarded-Port: 9881 +--- response_headers +X-Forwarded-Port: 9882 + + + +=== TEST 35: set route (test if X-Forwarded-For can be set before proxy) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "proxy-rewrite": { + "headers": { + "X-Forwarded-For": "22.22.22.22" + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/echo" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 36: test if X-Forwarded-For can be set before proxy +--- request +GET /echo HTTP/1.1 +--- more_headers +X-Forwarded-For: 11.11.11.11 +--- response_headers +X-Forwarded-For: 22.22.22.22, 127.0.0.1 + + + +=== TEST 37: setting multiple regex_uris +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "regex_uri": [ + "^/test/(.*)/(.*)/(.*)/hello", + "/hello/$1_$2_$3", + "^/test/(.*)/(.*)/(.*)/world", + "/world/$1_$2_$3" + ] + } + }, + "upstream": { + "nodes": { + "127.0.0.1:8125": 1 + }, + "type": "roundrobin" + }, + "uri": "/test/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 38: hit +--- request +GET /test/plugin/proxy/rewrite/hello HTTP/1.1 +--- http_config + server { + listen 8125; + location / { + content_by_lua_block { + ngx.say(ngx.var.request_uri) + } + } + } +--- response_body +/hello/plugin_proxy_rewrite + + + +=== TEST 39: hit +--- request +GET /test/plugin/proxy/rewrite/world HTTP/1.1 +--- http_config + server { + listen 8125; + location / { + content_by_lua_block { + ngx.say(ngx.var.request_uri) + } + } + } +--- response_body +/world/plugin_proxy_rewrite + + + +=== TEST 40: use regex uri with unsafe allowed +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "proxy-rewrite": { + "regex_uri": [ + "/hello/(.+)", + "/hello?unsafe_variable=$1" + ], + "use_real_request_uri_unsafe": true + } + }, + "upstream": { + "nodes": { + "127.0.0.1:8125": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello/*" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 41: hit +--- request +GET /hello/%ED%85%8C%EC%8A%A4%ED%8A%B8 HTTP/1.1 +--- http_config + server { + listen 8125; + location / { + content_by_lua_block { + ngx.say(ngx.var.request_uri) + } + } + } +--- response_body +/hello?unsafe_variable=%ED%85%8C%EC%8A%A4%ED%8A%B8 diff --git a/t/plugin/request-id-reload-bugfix.t b/t/plugin/request-id-reload-bugfix.t deleted file mode 100644 index 71775e7d9b62..000000000000 --- a/t/plugin/request-id-reload-bugfix.t +++ /dev/null @@ -1,117 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -use t::APISIX 'no_plan'; - -log_level('warn'); -repeat_each(1); -no_long_string(); -no_root_location(); - -add_block_preprocessor(sub { - my ($block) = @_; - - if (!$block->request) { - $block->set_value("request", "GET /t"); - } - - my $extra_init_by_lua = <<_EOC_; - local core = require("apisix.core") - local orig_new = core.etcd.new - core.etcd.new = function(...) - local cli, prefix = orig_new(...) - cli.keepalive = function(...) - return false, "test error" - end - -- only simulate error once - -- because reload would redo init() - core.etcd.new = orig_new - return cli, prefix - end - - local timers = require("apisix.timers") - local orig_unregister = timers.unregister_timer - unregister_cnt = 0 - timers.unregister_timer = function(name, privileged) - core.log.error("unregister timer: ", name) - unregister_cnt = unregister_cnt + 1 - return orig_unregister(name, privileged) - end -_EOC_ - - $block->set_value("extra_init_by_lua", $extra_init_by_lua); -}); - -run_tests; - -__DATA__ - -=== TEST 1: unregister timer when etcd keepalive failed ---- yaml_config -plugins: - - request-id -plugin_attr: - request-id: - snowflake: - enable: true - data_machine_interval: 1 ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "request-id": { - "algorithm": "snowflake" - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1982": 1 - }, - "type": "roundrobin" - }, - "uri": "/opentracing" - }]] - ) - if code >= 300 then - ngx.status = code - return - end - - -- wait for keepalive fails - ngx.sleep(2) - - local code = t('/apisix/admin/plugins/reload', - ngx.HTTP_PUT) - if code >= 300 then - ngx.status = code - return - end - - ngx.sleep(2) - ngx.log(ngx.ERR, unregister_cnt) - if unregister_cnt ~= 1 then - ngx.status = 500 - end - } - } ---- timeout: 5 ---- error_log -lease failed -unregister timer: plugin#request-id diff --git a/t/plugin/request-id.t b/t/plugin/request-id.t index 4ea667a5946b..39e9f943042b 100644 --- a/t/plugin/request-id.t +++ b/t/plugin/request-id.t @@ -378,43 +378,7 @@ X-Request-Id and Custom-Header-Name are different -=== TEST 12: check for snowflake id ---- yaml_config -plugins: - - request-id -plugin_attr: - request-id: - snowflake: - enable: true - snowflake_epoc: 1609459200000 - data_machine_bits: 10 - sequence_bits: 10 - data_machine_ttl: 30 - data_machine_interval: 10 ---- config -location /t { - content_by_lua_block { - ngx.sleep(3) - local core = require("apisix.core") - local key = "/plugins/request-id/snowflake/1" - local res, err = core.etcd.get(key) - if err ~= nil then - ngx.status = 500 - ngx.say(err) - return - end - if res.body.node.key ~= "/apisix/plugins/request-id/snowflake/1" then - ngx.say(core.json.encode(res.body.node)) - end - ngx.say("ok") - } -} ---- response_body -ok - - - -=== TEST 13: wrong type +=== TEST 12: wrong algorithm type --- config location /t { content_by_lua_block { @@ -432,176 +396,7 @@ done -=== TEST 14: add plugin with algorithm snowflake (default uuid) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "request-id": { - "algorithm": "snowflake" - } - }, - "upstream": { - "nodes": { - "127.0.0.1:1982": 1 - }, - "type": "roundrobin" - }, - "uri": "/opentracing" - }]] - ) - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- response_body -passed - - - -=== TEST 15: check for snowflake id ---- yaml_config -plugins: - - request-id -plugin_attr: - request-id: - snowflake: - enable: true ---- config - location /t { - content_by_lua_block { - local http = require "resty.http" - local t = {} - local ids = {} - for i = 1, 180 do - local th = assert(ngx.thread.spawn(function() - local httpc = http.new() - local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" - local res, err = httpc:request_uri(uri, - { - method = "GET", - headers = { - ["Content-Type"] = "application/json", - } - } - ) - if not res then - ngx.log(ngx.ERR, err) - return - end - local id = res.headers["X-Request-Id"] - if not id then - return -- ignore if the data is not synced yet. - end - if ids[id] == true then - ngx.say("ids not unique") - return - end - ids[id] = true - end, i)) - table.insert(t, th) - end - for i, th in ipairs(t) do - ngx.thread.wait(th) - end - ngx.say("true") - } - } ---- wait: 5 ---- response_body -true - - - -=== TEST 16: check for delta_offset 1000 milliseconds ---- yaml_config -plugins: - - request-id -plugin_attr: - request-id: - snowflake: - enable: true - snowflake_epoc: 1609459200000 - data_machine_bits: 12 - sequence_bits: 10 - data_machine_ttl: 30 - data_machine_interval: 10 - delta_offset: 1000 ---- config - location /t { - content_by_lua_block { - local http = require "resty.http" - local t = {} - local ids = {} - for i = 1, 180 do - local th = assert(ngx.thread.spawn(function() - local httpc = http.new() - local uri = "http://127.0.0.1:" .. ngx.var.server_port .. "/opentracing" - local res, err = httpc:request_uri(uri, - { - method = "GET", - headers = { - ["Content-Type"] = "application/json", - } - } - ) - if not res then - ngx.log(ngx.ERR, err) - return - end - local id = res.headers["X-Request-Id"] - if not id then - return -- ignore if the data is not synced yet. - end - if ids[id] == true then - ngx.say("ids not unique") - return - end - ids[id] = true - end, i)) - table.insert(t, th) - end - for i, th in ipairs(t) do - ngx.thread.wait(th) - end - ngx.say("true") - } - } ---- wait: 5 ---- response_body -true - - - -=== TEST 17: wrong delta_offset ---- yaml_config -plugins: - - request-id -plugin_attr: - request-id: - snowflake: - enable: true - delta_offset: 1001 ---- config - location /t { - content_by_lua_block { - ngx.say("done") - } - } ---- response_body -done ---- error_log -ailed to check the plugin_attr[request-id]: property "snowflake" validation failed: property "delta_offset" validation failed: matches none of the enum values - - - -=== TEST 18: add plugin with include_in_response true +=== TEST 13: add plugin with include_in_response true --- config location /t { content_by_lua_block { @@ -635,7 +430,7 @@ passed -=== TEST 19: echo back the client's header if given +=== TEST 14: echo back the client's header if given --- request GET /opentracing --- more_headers @@ -645,7 +440,7 @@ X-Request-ID: 123 -=== TEST 20: add plugin with algorithm nanoid (default uuid) +=== TEST 15: add plugin with algorithm nanoid (default uuid) --- config location /t { content_by_lua_block { diff --git a/t/plugin/request-id2.t b/t/plugin/request-id2.t index a66c6b3ddaa3..421a0891c39f 100644 --- a/t/plugin/request-id2.t +++ b/t/plugin/request-id2.t @@ -70,7 +70,7 @@ passed -=== TEST 2: add plugin with algorithm range_id +=== TEST 2: add plugin with algorithm range_id (set automatic default) --- config location /t { content_by_lua_block { @@ -103,7 +103,13 @@ passed -=== TEST 3: add plugin with algorithm range_id +=== TEST 3: hit +--- request +GET /opentracing + + + +=== TEST 4: add plugin with algorithm range_id --- config location /t { content_by_lua_block { diff --git a/t/plugin/response-rewrite.t b/t/plugin/response-rewrite.t index def0c61db219..2e4dcf4eb324 100644 --- a/t/plugin/response-rewrite.t +++ b/t/plugin/response-rewrite.t @@ -696,3 +696,40 @@ passed --- request GET /hello --- response_body + + + +=== TEST 27: test add header with one word +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "response-rewrite": { + "headers": { + "add": [ + "X-Server-test:a" + ] + } + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uris": ["/hello"] + }]] + ) + + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed diff --git a/t/plugin/skywalking-logger.t b/t/plugin/skywalking-logger.t index 8d9bdeda7f89..900c5a07aa39 100644 --- a/t/plugin/skywalking-logger.t +++ b/t/plugin/skywalking-logger.t @@ -132,7 +132,8 @@ done "max_retry_count": 1, "retry_delay": 2, "buffer_duration": 2, - "inactive_timeout": 2 + "inactive_timeout": 2, + "service_instance_name": "$hostname" } }, "upstream": { @@ -281,3 +282,15 @@ opentracing --- error_log eval qr/.*\{\\\"json\\\":.*\\\\\\"my_ip\\\\\\":\\\\\\"127\.0\.0\.1\\\\\\".*\}/ --- wait: 0.5 + + + +=== TEST 12: test serviceInstance $hostname +--- request +GET /opentracing +--- response_body +opentracing +--- no_error_log eval +qr/\\\"serviceInstance\\\":\\\"\$hostname\\\"/ +qr/\\\"serviceInstance\\\":\\\"\\\"/ +--- wait: 0.5 diff --git a/t/plugin/sls-logger.t b/t/plugin/sls-logger.t index a56d6121f876..9e668e1bf3b0 100644 --- a/t/plugin/sls-logger.t +++ b/t/plugin/sls-logger.t @@ -173,16 +173,20 @@ hello world end math.randomseed(os.time()) - local rfc5424 = require("apisix.plugins.slslog.rfc5424") + local rfc5424 = require("apisix.utils.rfc5424") local m = 0 -- because the millisecond value obtained by `ngx.now` may be `0` -- it is executed multiple times to ensure the accuracy of the test for i = 1, 5 do ngx.sleep(string.format("%0.3f", math.random())) + local structured_data = { + {name = "project", value = "apisix.apache.org"}, + {name = "logstore", value = "apisix.apache.org"}, + {name = "access-key-id", value = "apisix.sls.logger"}, + {name = "access-key-secret", value = "BD274822-96AA-4DA6-90EC-15940FB24444"} + } local log_entry = rfc5424.encode("SYSLOG", "INFO", "localhost", "apisix", - 123456, "apisix.apache.org", "apisix.apache.log", - "apisix.sls.logger", "BD274822-96AA-4DA6-90EC-15940FB24444", - "hello world") + 123456, "hello world", structured_data) m = get_syslog_timestamp_millisecond(log_entry) + m end @@ -226,15 +230,13 @@ passed === TEST 9: access --- extra_init_by_lua local json = require("toolkit.json") - local rfc5424 = require("apisix.plugins.slslog.rfc5424") + local rfc5424 = require("apisix.utils.rfc5424") local old_f = rfc5424.encode - rfc5424.encode = function(facility, severity, hostname, appname, pid, project, - logstore, access_key_id, access_key_secret, msg) + rfc5424.encode = function(facility, severity, hostname, appname, pid, msg, structured_data) local r = json.decode(msg) assert(r.client_ip == "127.0.0.1", r.client_ip) assert(r.host == "localhost", r.host) - return old_f(facility, severity, hostname, appname, pid, project, - logstore, access_key_id, access_key_secret, msg) + return old_f(facility, severity, hostname, appname, pid, msg, structured_data) end --- request GET /hello @@ -372,14 +374,12 @@ passed === TEST 13: access --- extra_init_by_lua local json = require("toolkit.json") - local rfc5424 = require("apisix.plugins.slslog.rfc5424") + local rfc5424 = require("apisix.utils.rfc5424") local old_f = rfc5424.encode - rfc5424.encode = function(facility, severity, hostname, appname, pid, project, - logstore, access_key_id, access_key_secret, msg) + rfc5424.encode = function(facility, severity, hostname, appname, pid, msg, structured_data) local r = json.decode(msg) assert(r.vip == "127.0.0.1", r.vip) - return old_f(facility, severity, hostname, appname, pid, project, - logstore, access_key_id, access_key_secret, msg) + return old_f(facility, severity, hostname, appname, pid, msg, structured_data) end --- request GET /hello diff --git a/t/plugin/splunk-hec-logging.t b/t/plugin/splunk-hec-logging.t index 78d6e9689ebd..068b5727fede 100644 --- a/t/plugin/splunk-hec-logging.t +++ b/t/plugin/splunk-hec-logging.t @@ -145,7 +145,7 @@ GET /hello --- response_body hello world --- error_log -Batch Processor[splunk-hec-logging] failed to process entries: failed to send splunk, Invalid token +Batch Processor[splunk-hec-logging] failed to process entries: failed to send splunk, Invalid authorization Batch Processor[splunk-hec-logging] exceeded the max_retry_count @@ -224,41 +224,85 @@ hello world --- config location /t { content_by_lua_block { - local config = { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/splunk-hec-logging', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr", + "message_1":"test custom log format in plugin" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { uri = "/hello", upstream = { type = "roundrobin", nodes = { - ["127.0.0.1:1980"] = 1 + ["127.0.0.1:1982"] = 1 } }, plugins = { ["splunk-hec-logging"] = { endpoint = { - uri = "http://127.0.0.1:1980/splunk_hec_logging", + uri = "http://127.0.0.1:18088/services/collector", token = "BD274822-96AA-4DA6-90EC-18940FB2414C" }, - batch_max_size = 1, + batch_max_size = 3, inactive_timeout = 1 } } - } - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) - + }) if code >= 300 then ngx.status = code ngx.say(body) return end + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say(body) + } + } +--- response_body +passed +--- wait: 5 + + + +=== TEST 8: check splunk log +--- exec +tail -n 1 ci/pod/vector/splunk.log +--- response_body eval +qr/.*test custom log format in plugin.*/ + + + +=== TEST 9: set route to test custom log format in route +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test local code, body = t('/apisix/admin/plugin_metadata/splunk-hec-logging', ngx.HTTP_PUT, [[{ "log_format": { "host": "$host", "@timestamp": "$time_iso8601", - "client_ip": "$remote_addr" + "vip": "$remote_addr", + "message_2":"logger format in plugin" } }]] ) @@ -267,111 +311,118 @@ hello world ngx.say(body) return end + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { + uri = "/hello", + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1982"] = 1 + } + }, + plugins = { + ["splunk-hec-logging"] = { + endpoint = { + uri = "http://127.0.0.1:18088/services/collector", + token = "BD274822-96AA-4DA6-90EC-18940FB2414C" + }, + batch_max_size = 3, + inactive_timeout = 1 + } + } + }) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end ngx.say(body) } } --- response_body passed +--- wait: 5 -=== TEST 8: hit ---- extra_init_by_lua - local core = require("apisix.core") - local decode = require("toolkit.json").decode - local up = require("lib.server") - up.splunk_hec_logging = function() - ngx.log(ngx.WARN, "the mock backend is hit") - - ngx.req.read_body() - local data = ngx.req.get_body_data() - ngx.log(ngx.WARN, data) - data = decode(data) - assert(data[1].event.client_ip == "127.0.0.1") - assert(data[1].source == "apache-apisix-splunk-hec-logging") - assert(data[1].host == core.utils.gethostname()) - ngx.say('{}') - end ---- request -GET /hello ---- wait: 2 ---- response_body -hello world ---- error_log -the mock backend is hit ---- no_error_log -[error] +=== TEST 10: check splunk log +--- exec +tail -n 1 ci/pod/vector/splunk.log +--- response_body eval +qr/.*logger format in plugin.*/ -=== TEST 9: set route to test custom log format in route +=== TEST 11: set route test batched data --- config location /t { content_by_lua_block { - local config = { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/plugin_metadata/splunk-hec-logging', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "@timestamp": "$time_iso8601", + "vip": "$remote_addr", + "message_3":"test batched data" + } + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, { uri = "/hello", upstream = { type = "roundrobin", nodes = { - ["127.0.0.1:1980"] = 1 + ["127.0.0.1:1982"] = 1 } }, plugins = { ["splunk-hec-logging"] = { endpoint = { - uri = "http://127.0.0.1:1980/splunk_hec_logging", + uri = "http://127.0.0.1:18088/services/collector", token = "BD274822-96AA-4DA6-90EC-18940FB2414C" }, - log_format = { - host = "$host", - ["@timestamp"] = "$time_iso8601", - vip = "$remote_addr" - }, - batch_max_size = 1, + batch_max_size = 3, inactive_timeout = 1 } } - } - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', ngx.HTTP_PUT, config) + }) if code >= 300 then ngx.status = code - ngx.say(body) - return end + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end ngx.say(body) } } --- response_body passed +--- wait: 5 -=== TEST 10: hit ---- extra_init_by_lua - local core = require("apisix.core") - local decode = require("toolkit.json").decode - local up = require("lib.server") - up.splunk_hec_logging = function() - ngx.log(ngx.WARN, "the mock backend is hit") - - ngx.req.read_body() - local data = ngx.req.get_body_data() - ngx.log(ngx.WARN, data) - data = decode(data) - assert(data[1].event.vip == "127.0.0.1") - assert(data[1].source == "apache-apisix-splunk-hec-logging") - assert(data[1].host == core.utils.gethostname()) - ngx.say('{}') - end ---- request -GET /hello ---- wait: 2 ---- response_body -hello world ---- error_log -the mock backend is hit ---- no_error_log -[error] +=== TEST 12: check splunk log +--- exec +tail -n 1 ci/pod/vector/splunk.log +--- response_body eval +qr/.*test batched data.*/ diff --git a/t/plugin/syslog.t b/t/plugin/syslog.t index a8795bc24be6..5e13aa301c56 100644 --- a/t/plugin/syslog.t +++ b/t/plugin/syslog.t @@ -31,7 +31,7 @@ __DATA__ local plugin = require("apisix.plugins.syslog") local ok, err = plugin.check_schema({ host = "127.0.0.1", - port = 3000, + port = 5140, }) if not ok then ngx.say(err) @@ -73,7 +73,7 @@ done local plugin = require("apisix.plugins.syslog") local ok, err = plugin.check_schema({ host = "127.0.0.1", - port = "3000", + port = "5140", }) if not ok then ngx.say(err) @@ -100,7 +100,7 @@ done "plugins": { "syslog": { "host" : "127.0.0.1", - "port" : 5044 + "port" : 5140 } }, "upstream": { @@ -142,7 +142,7 @@ hello world local logger_socket = require("resty.logger.socket") local logger, err = logger_socket:new({ host = "127.0.0.1", - port = 5044, + port = 5140, flush_limit = 100, }) @@ -183,7 +183,7 @@ done "plugins": { "syslog": { "host" : "127.0.0.1", - "port" : 5044, + "port" : 5140, "flush_limit" : 1, "inactive_timeout": 1 } @@ -236,7 +236,15 @@ unlock with key route#1 -=== TEST 8: check plugin configuration updating +=== TEST 8: check log +--- exec +tail -n 1 ci/pod/vector/syslog-tcp.log +--- response_body eval +qr/.*apisix_latency.*/ + + + +=== TEST 9: check plugin configuration updating --- config location /t { content_by_lua_block { @@ -327,7 +335,7 @@ sending a batch logs to 127.0.0.1:5045 -=== TEST 9: add log format +=== TEST 10: add log format --- config location /t { content_by_lua_block { @@ -355,7 +363,7 @@ passed -=== TEST 10: Add route and Enable Syslog Plugin, batch_max_size=1 +=== TEST 11: Add route and Enable Syslog Plugin, batch_max_size=1 --- config location /t { content_by_lua_block { @@ -369,7 +377,7 @@ passed "disable": false, "flush_limit": 1, "host" : "127.0.0.1", - "port" : 5050 + "port" : 5140 } }, "upstream": { @@ -394,7 +402,7 @@ passed -=== TEST 11: hit route and report sys logger +=== TEST 12: hit route and report sys logger --- extra_init_by_lua local syslog = require("apisix.plugins.syslog.init") local json = require("apisix.core.json") @@ -416,7 +424,15 @@ qr/syslog-log-format.*\{.*"upstream":"127.0.0.1:\d+"/ -=== TEST 12: log format in plugin +=== TEST 13: check log +--- exec +tail -n 1 ci/pod/vector/syslog-tcp.log +--- response_body eval +qr/.*\"host\":\"localhost\".*/ + + + +=== TEST 14: log format in plugin --- config location /t { content_by_lua_block { @@ -432,7 +448,7 @@ qr/syslog-log-format.*\{.*"upstream":"127.0.0.1:\d+"/ "vip": "$remote_addr" }, "host" : "127.0.0.1", - "port" : 5050 + "port" : 5140 } }, "upstream": { @@ -461,7 +477,7 @@ passed -=== TEST 13: access +=== TEST 15: access --- extra_init_by_lua local syslog = require("apisix.plugins.syslog.init") local json = require("apisix.core.json") @@ -481,3 +497,65 @@ hello world [error] --- error_log push_entry is called with data + + + +=== TEST 16: check log +--- exec +tail -n 1 ci/pod/vector/syslog-tcp.log +--- response_body eval +qr/.*vip.*/ + + + +=== TEST 17: test udp mode +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "syslog": { + "batch_max_size": 1, + "disable": false, + "flush_limit": 1, + "host" : "127.0.0.1", + "port" : 5150, + "sock_type": "udp" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 18: hit +--- request +GET /hello + + + +=== TEST 19: check log +--- exec +tail -n 1 ci/pod/vector/syslog-udp.log +--- response_body eval +qr/.*upstream.*/ diff --git a/t/plugin/tcp-logger.t b/t/plugin/tcp-logger.t index 0d15b5692d5f..3ef774f813d1 100644 --- a/t/plugin/tcp-logger.t +++ b/t/plugin/tcp-logger.t @@ -97,7 +97,7 @@ done "plugins": { "tcp-logger": { "host": "127.0.0.1", - "port": 5044, + "port": 3000, "tls": false } }, @@ -134,6 +134,7 @@ hello world === TEST 6: error log +--- log_level: error --- config location /t { content_by_lua_block { @@ -192,7 +193,7 @@ failed to connect to TCP server: host[312.0.0.1] port[2000] "plugins": { "tcp-logger": { "host": "127.0.0.1", - "port": 5044, + "port": 3000, "tls": false, "batch_max_size": 1 } @@ -226,7 +227,7 @@ failed to connect to TCP server: host[312.0.0.1] port[2000] "plugins": { "tcp-logger": { "host": "127.0.0.1", - "port": 5045, + "port": 43000, "tls": false, "batch_max_size": 1 } @@ -269,8 +270,8 @@ passedopentracing --- grep_error_log eval qr/sending a batch logs to 127.0.0.1:(\d+)/ --- grep_error_log_out -sending a batch logs to 127.0.0.1:5044 -sending a batch logs to 127.0.0.1:5045 +sending a batch logs to 127.0.0.1:3000 +sending a batch logs to 127.0.0.1:43000 @@ -312,7 +313,7 @@ GET /t "plugins": { "tcp-logger": { "host": "127.0.0.1", - "port": 8125, + "port": 3000, "tls": false, "batch_max_size": 1, "inactive_timeout": 1 @@ -338,6 +339,7 @@ GET /t ngx.HTTP_PUT, [[{ "log_format": { + "case name": "plugin_metadata", "host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr" @@ -349,7 +351,15 @@ GET /t ngx.say(body) return end - ngx.say(body) + + local code, _, _ = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say("passed") } } --- request @@ -359,47 +369,38 @@ passed -=== TEST 10: access ---- stream_conf_enable ---- extra_stream_config - server { - listen 8125; - content_by_lua_block { - local decode = require("toolkit.json").decode - ngx.log(ngx.WARN, "the mock backend is hit") +=== TEST 10: log format in plugin_metadata +--- exec +tail -n 1 ci/pod/vector/tcp.log +--- response_body eval +qr/.*plugin_metadata.*/ - local sock, err = ngx.req.socket(true) - if not sock then - ngx.log(ngx.ERR, "failed to get the request socket: ", err) - return - end - local data, err = sock:receive('*a') - if not data then - if err and err ~= "closed" then - ngx.log(ngx.ERR, "socket error, returning: ", err) - end - return - end +=== TEST 11: remove tcp logger metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + + local code, body = t('/apisix/admin/plugin_metadata/tcp-logger', + ngx.HTTP_PUT, + [[{ + "log_format": {} + }]] + ) - data = decode(data) - assert(data.client_ip == "127.0.0.1") + ngx.say(body) } } --- request -GET /hello +GET /t --- response_body -hello world ---- wait: 2 ---- error_log -the mock backend is hit ---- no_error_log -[error] +passed -=== TEST 11: log format in plugin +=== TEST 12: log format in plugin --- config location /t { content_by_lua_block { @@ -410,9 +411,10 @@ the mock backend is hit "plugins": { "tcp-logger": { "host": "127.0.0.1", - "port": 8125, + "port": 3000, "tls": false, "log_format": { + "case name": "logger format in plugin", "vip": "$remote_addr" }, "batch_max_size": 1, @@ -435,50 +437,84 @@ the mock backend is hit return end - ngx.say(body) + local code, _, body2 = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + + ngx.say("passed") } } --- request GET /t +--- wait: 0.5 --- response_body passed -=== TEST 12: access ---- stream_conf_enable ---- extra_stream_config - server { - listen 8125; +=== TEST 13: check tcp log +--- exec +tail -n 1 ci/pod/vector/tcp.log +--- response_body eval +qr/.*logger format in plugin.*/ + + + +=== TEST 14: true tcp log with tls +--- config + location /t { content_by_lua_block { - local decode = require("toolkit.json").decode - ngx.log(ngx.WARN, "the mock backend is hit") + local t = require("lib.test_admin").test + local code, body1 = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tcp-logger": { + "host": "127.0.0.1", + "port": 43000, + "tls": true, + "batch_max_size": 1 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) - local sock, err = ngx.req.socket(true) - if not sock then - ngx.log(ngx.ERR, "failed to get the request socket: ", err) + if code >= 300 then + ngx.status = code + ngx.say("fail") return end - local data, err = sock:receive('*a') - - if not data then - if err and err ~= "closed" then - ngx.log(ngx.ERR, "socket error, returning: ", err) - end + local code, _, body2 = t("/opentracing", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") return end - data = decode(data) - assert(data.vip == "127.0.0.1") + ngx.print(body2) } } --- request -GET /hello +GET /t +--- wait: 0.5 --- response_body -hello world ---- wait: 2 ---- error_log -the mock backend is hit ---- no_error_log -[error] +opentracing + + + +=== TEST 15: check tls log +--- exec +tail -n 1 ci/pod/vector/tls-datas.log +--- response_body eval +qr/.*route_id.*1.*/ diff --git a/t/plugin/tencent-cloud-cls.t b/t/plugin/tencent-cloud-cls.t index fff3bc12950e..6ad4d585b3b1 100644 --- a/t/plugin/tencent-cloud-cls.t +++ b/t/plugin/tencent-cloud-cls.t @@ -500,3 +500,61 @@ GET /opentracing --- response_body opentracing --- wait: 0.5 + + + +=== TEST 15: add plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "tencent-cloud-cls": { + "cls_host": "127.0.0.1:10420", + "cls_topic": "143b5d70-139b-4aec-b54e-bb97756916de", + "secret_id": "secret_id", + "secret_key": "secret_key", + "batch_max_size": 1, + "max_retry_count": 1, + "retry_delay": 2, + "buffer_duration": 2, + "inactive_timeout": 2 + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1982": 1 + }, + "type": "roundrobin" + }, + "uri": "/opentracing" + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 16: test resolvt e ip failed +--- extra_init_by_lua + local socket = require("socket") + socket.dns.toip = function(address) + return nil, "address can't be resolved" + end +--- request +GET /opentracing +--- response_body +opentracing +--- error_log eval +qr/resolve ip failed, hostname: .*, error: address can't be resolved/ +--- wait: 0.5 diff --git a/t/plugin/traffic-split.t b/t/plugin/traffic-split.t index f79152fd8f21..4a56988e83c6 100644 --- a/t/plugin/traffic-split.t +++ b/t/plugin/traffic-split.t @@ -710,11 +710,12 @@ passed === TEST 19: domain name resolved successfully ---- request -GET / ---- error_code: 200 ---- error_log eval -qr/dns resolver domain: www.apiseven.com to \d+.\d+.\d+.\d+/ +--- pipelined_requests eval +["GET /", "GET /"] +--- error_code eval +[200, 200] +--- error_log_like eval +qr/(dns resolver domain: www.apiseven.com to \d+.\d+.\d+.\d+){2}/ diff --git a/t/plugin/traffic-split2.t b/t/plugin/traffic-split2.t index 05b8047d3f24..5100337e48d9 100644 --- a/t/plugin/traffic-split2.t +++ b/t/plugin/traffic-split2.t @@ -752,3 +752,50 @@ GET /uri?id=1 qr/host: 127.0.0.1/ --- error_log proxy request to 127.0.0.1:1980 + + + +=== TEST 20: invalid upstream_id should report failure +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + + local data = { + uri = "/route", + plugins = { + ["traffic-split"] = { + rules = { + { + weighted_upstreams = { + { + upstream_id = "invalid-id", + weight = 1 + } + } + }, + } + } + }, + upstream = { + type = "roundrobin", + nodes = { + ["127.0.0.1:1980"] = 1 + } + } + } + + code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PATCH, + json.encode(data) + ) + ngx.status, body = t('/route', ngx.HTTP_GET) + ngx.say(body) + } + } +--- request +GET /t +--- error_code: 500 +--- error_log +failed to find upstream by id: invalid-id diff --git a/t/plugin/ua-restriction.t b/t/plugin/ua-restriction.t index 0e8a9544bd34..56f07b39b13f 100644 --- a/t/plugin/ua-restriction.t +++ b/t/plugin/ua-restriction.t @@ -38,13 +38,12 @@ run_tests; __DATA__ -=== TEST 1: set allowlist, denylist, bypass_missing and user-defined message +=== TEST 1: set both allowlist and denylist --- config location /t { content_by_lua_block { local plugin = require("apisix.plugins.ua-restriction") local conf = { - bypass_missing = true, allowlist = { "my-bot1", "my-bot2" @@ -53,18 +52,18 @@ __DATA__ "my-bot1", "my-bot2" }, - message = "User-Agent Forbidden", } local ok, err = plugin.check_schema(conf) if not ok then ngx.say(err) + return end ngx.say(require("toolkit.json").encode(conf)) } } --- response_body -{"allowlist":["my-bot1","my-bot2"],"bypass_missing":true,"denylist":["my-bot1","my-bot2"],"message":"User-Agent Forbidden"} +value should match only one schema, but matches both schemas 1 and 2 @@ -216,88 +215,30 @@ User-Agent:my-bot2 -=== TEST 9: hit route and user-agent match denylist regex +=== TEST 9: hit route and user-agent in denylist with reverse order multiple user-agent --- request GET /hello --- more_headers -User-Agent:Baiduspider/3.0 +User-Agent:my-bot2 +User-Agent:my-bot1 --- error_code: 403 --- response_body {"message":"Not allowed"} -=== TEST 10: hit route and user-agent not in denylist ---- request -GET /hello ---- more_headers -User-Agent:foo/bar ---- error_code: 200 ---- response_body -hello world - - - -=== TEST 11: set allowlist ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "uri": "/hello", - "upstream": { - "type": "roundrobin", - "nodes": { - "127.0.0.1:1980": 1 - } - }, - "plugins": { - "ua-restriction": { - "allowlist": [ - "my-bot1", - "(Baiduspider)/(\\d+)\\.(\\d+)" - ] - } - } - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- response_body -passed - - - -=== TEST 12: hit route and user-agent in allowlist ---- request -GET /hello ---- more_headers -User-Agent:my-bot1 ---- error_code: 200 ---- response_body -hello world - - - -=== TEST 13: hit route and user-agent match allowlist regex +=== TEST 10: hit route and user-agent match denylist regex --- request GET /hello --- more_headers User-Agent:Baiduspider/3.0 ---- error_code: 200 +--- error_code: 403 --- response_body -hello world +{"message":"Not allowed"} -=== TEST 14: hit route and user-agent not in allowlist +=== TEST 11: hit route and user-agent not in denylist --- request GET /hello --- more_headers @@ -308,7 +249,7 @@ hello world -=== TEST 15: set config: user-agent in both allowlist and denylist +=== TEST 12: set allowlist --- config location /t { content_by_lua_block { @@ -326,11 +267,7 @@ hello world "plugins": { "ua-restriction": { "allowlist": [ - "foo/bar", - "(Baiduspider)/(\\d+)\\.(\\d+)" - ], - "denylist": [ - "foo/bar", + "my-bot1", "(Baiduspider)/(\\d+)\\.(\\d+)" ] } @@ -349,157 +286,62 @@ passed -=== TEST 16: hit route and user-agent in both allowlist and denylist, pass(part 1) +=== TEST 13: hit route and user-agent in allowlist --- request GET /hello --- more_headers -User-Agent:foo/bar +User-Agent:my-bot1 --- error_code: 200 --- response_body hello world -=== TEST 17: hit route and user-agent in both allowlist and denylist, pass(part 2) +=== TEST 14: hit route and user-agent match allowlist regex --- request GET /hello --- more_headers -User-Agent:Baiduspider/1.0 +User-Agent:Baiduspider/3.0 --- error_code: 200 --- response_body hello world -=== TEST 18: bypass_missing test, using default, reset conf(part1) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "uri": "/hello", - "upstream": { - "type": "roundrobin", - "nodes": { - "127.0.0.1:1980": 1 - } - }, - "plugins": { - "ua-restriction": { - } - } - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- response_body -passed - - - -=== TEST 19: bypass_missing test, using default, send request without User-Agent(part2) +=== TEST 15: hit route and user-agent not in allowlist --- request GET /hello +--- more_headers +User-Agent:foo/bar --- error_code: 403 --- response_body {"message":"Not allowed"} -=== TEST 20: bypass_missing test, set to true(part1) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "uri": "/hello", - "upstream": { - "type": "roundrobin", - "nodes": { - "127.0.0.1:1980": 1 - } - }, - "plugins": { - "ua-restriction": { - "bypass_missing": true - } - } - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- response_body -passed - - - -=== TEST 21: bypass_missing test, set to true, send request without User-Agent(part2) +=== TEST 16: hit route and user-agent in allowlist with multiple user-agent --- request GET /hello ---- error_code: 200 +--- more_headers +User-Agent:foo/bar +User-Agent:my-bot1 --- response_body hello world -=== TEST 22: bypass_missing test, set to false(part1) ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "uri": "/hello", - "upstream": { - "type": "roundrobin", - "nodes": { - "127.0.0.1:1980": 1 - } - }, - "plugins": { - "ua-restriction": { - "bypass_missing": false - } - } - }]] - ) - - if code >= 300 then - ngx.status = code - end - ngx.say(body) - } - } ---- response_body -passed - - - -=== TEST 23: bypass_missing test, set to false, send request without User-Agent(part2) +=== TEST 17: hit route and user-agent in allowlist with reverse order multiple user-agent --- request GET /hello ---- error_code: 403 +--- more_headers +User-Agent:my-bot1 +User-Agent:foo/bar --- response_body -{"message":"Not allowed"} +hello world -=== TEST 24: message that do not reach the minimum range +=== TEST 18: message that do not reach the minimum range --- config location /t { content_by_lua_block { @@ -530,7 +372,7 @@ qr/string too short, expected at least 1, got 0/ -=== TEST 25: exceeds the maximum limit of message +=== TEST 19: exceeds the maximum limit of message --- config location /t { content_by_lua_block { @@ -568,7 +410,7 @@ qr/string too long, expected at most 1024, got 1025/ -=== TEST 26: set custom message +=== TEST 20: set custom message --- config location /t { content_by_lua_block { @@ -606,7 +448,7 @@ passed -=== TEST 27: test custom message +=== TEST 21: test custom message --- request GET /hello --- more_headers @@ -617,7 +459,7 @@ User-Agent:Baiduspider/1.0 -=== TEST 28: test remove ua-restriction, add denylist(part 1) +=== TEST 22: test remove ua-restriction, add denylist(part 1) --- config location /enable { content_by_lua_block { @@ -656,7 +498,7 @@ passed -=== TEST 29: test remove ua-restriction, fail(part 2) +=== TEST 23: test remove ua-restriction, fail(part 2) --- request GET /hello --- more_headers @@ -667,7 +509,7 @@ User-Agent:Baiduspider/1.0 -=== TEST 30: test remove ua-restriction, remove plugin(part 3) +=== TEST 24: test remove ua-restriction, remove plugin(part 3) --- config location /disable { content_by_lua_block { @@ -701,7 +543,7 @@ passed -=== TEST 31: test remove ua-restriction, check spider User-Agent(part 4) +=== TEST 25: test remove ua-restriction, check spider User-Agent(part 4) --- request GET /hello --- more_headers @@ -711,7 +553,7 @@ hello world -=== TEST 32: set disable=true +=== TEST 26: set disable=true --- config location /t { content_by_lua_block { @@ -744,7 +586,7 @@ passed -=== TEST 33: the element in allowlist is null +=== TEST 27: the element in allowlist is null --- config location /t { content_by_lua_block { @@ -771,7 +613,7 @@ done -=== TEST 34: the element in denylist is null +=== TEST 28: the element in denylist is null --- config location /t { content_by_lua_block { @@ -795,3 +637,125 @@ done --- response_body property "denylist" validation failed: wrong type: expected array, got table done + + + +=== TEST 29: test both allowlist and denylist are not exist +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin ua-restriction err: value should match only one schema, but matches none"} + + + +=== TEST 30: test bypass_missing +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + "allowlist": [ + "my-bot1" + ] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 31: hit +--- request +GET /hello +--- error_code: 403 +--- response_body +{"message":"Not allowed"} + + + +=== TEST 32: test bypass_missing with true +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "ua-restriction": { + "bypass_missing": true, + "denylist": [ + "my-bot1" + ] + } + } + }]] + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 33: hit +--- request +GET /hello +--- response_body +hello world diff --git a/t/plugin/udp-logger.t b/t/plugin/udp-logger.t index 934b47c68ee3..44744823634b 100644 --- a/t/plugin/udp-logger.t +++ b/t/plugin/udp-logger.t @@ -223,7 +223,7 @@ failed to connect to UDP server: host[312.0.0.1] port[2000] "plugins": { "udp-logger": { "host": "127.0.0.1", - "port": 2001, + "port": 2002, "tls": false, "batch_max_size": 1 } @@ -267,7 +267,7 @@ passedopentracing qr/sending a batch logs to 127.0.0.1:(\d+)/ --- grep_error_log_out sending a batch logs to 127.0.0.1:2000 -sending a batch logs to 127.0.0.1:2001 +sending a batch logs to 127.0.0.1:2002 @@ -298,7 +298,7 @@ GET /t -=== TEST 9: add plugin +=== TEST 9: configure plugin and access route /hello --- config location /t { content_by_lua_block { @@ -309,7 +309,7 @@ GET /t "plugins": { "udp-logger": { "host": "127.0.0.1", - "port": 8125, + "port": 8127, "tls": false, "batch_max_size": 1, "inactive_timeout": 1 @@ -336,17 +336,26 @@ GET /t [[{ "log_format": { "host": "$host", + "case name": "plugin_metadata", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr" } }]] ) + if code >= 300 then ngx.status = code ngx.say(body) return end + ngx.say(body) + local code, _, _ = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end } } --- request @@ -356,47 +365,15 @@ passed -=== TEST 10: access ---- stream_conf_enable ---- extra_stream_config - server { - listen 8125 udp; - content_by_lua_block { - local decode = require("toolkit.json").decode - ngx.log(ngx.WARN, "the mock backend is hit") - - local sock, err = ngx.req.socket() - if not sock then - ngx.log(ngx.ERR, "failed to get the request socket: ", err) - return - end - - local data, err = sock:receive() - - if not data then - if err and err ~= "no more data" then - ngx.log(ngx.ERR, "socket error, returning: ", err) - end - return - end - - data = decode(data) - assert(data.client_ip == "127.0.0.1") - } - } ---- request -GET /hello ---- response_body -hello world ---- wait: 2 ---- error_log -the mock backend is hit ---- no_error_log -[error] +=== TEST 10: check if log exists to confirm if logging server was hit +--- exec +tail -n 1 ci/pod/vector/udp.log +--- response_body eval +qr/.*plugin_metadata.*/ -=== TEST 11: log format in plugin +=== TEST 11: configure plugin and access route /hello --- config location /t { content_by_lua_block { @@ -407,11 +384,8 @@ the mock backend is hit "plugins": { "udp-logger": { "host": "127.0.0.1", - "port": 8125, + "port": 8127, "tls": false, - "log_format": { - "vip": "$remote_addr" - }, "batch_max_size": 1, "inactive_timeout": 1 } @@ -432,50 +406,42 @@ the mock backend is hit return end - ngx.say(body) - } - } ---- request -GET /t ---- response_body -passed - - - -=== TEST 12: access ---- stream_conf_enable ---- extra_stream_config - server { - listen 8125 udp; - content_by_lua_block { - local decode = require("toolkit.json").decode - ngx.log(ngx.WARN, "the mock backend is hit") + local code, body = t('/apisix/admin/plugin_metadata/udp-logger', + ngx.HTTP_PUT, + [[{ + "log_format": { + "host": "$host", + "case name": "logger format in plugin", + "@timestamp": "$time_iso8601", + "client_ip": "$remote_addr" + } + }]] + ) - local sock, err = ngx.req.socket() - if not sock then - ngx.log(ngx.ERR, "failed to get the request socket: ", err) + if code >= 300 then + ngx.status = code + ngx.say(body) return end - local data, err = sock:receive() - - if not data then - if err and err ~= "no more data" then - ngx.log(ngx.ERR, "socket error, returning: ", err) - end + ngx.say(body) + local code, _, _ = t("/hello", "GET") + if code >= 300 then + ngx.status = code + ngx.say("fail") return end - - data = decode(data) - assert(data.vip == "127.0.0.1") } } --- request -GET /hello +GET /t --- response_body -hello world ---- wait: 2 ---- error_log -the mock backend is hit ---- no_error_log -[error] +passed + + + +=== TEST 12: check log format from logging server +--- exec +tail -n 1 ci/pod/vector/udp.log +--- response_body eval +qr/.*logger format in plugin.*/ diff --git a/t/plugin/wolf-rbac.t b/t/plugin/wolf-rbac.t index 34c7068ca56f..8136e3df6bc0 100644 --- a/t/plugin/wolf-rbac.t +++ b/t/plugin/wolf-rbac.t @@ -685,3 +685,53 @@ passed ngx.status = code } } + + + +=== TEST 36: add consumer with echo plugin +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "wolf_rbac_with_other_plugins", + "plugins": { + "wolf-rbac": { + "appid": "wolf-rbac-app", + "server": "http://127.0.0.1:1982" + }, + "echo": { + "body": "consumer merge echo plugins\n" + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed +--- no_error_log +[error] + + + +=== TEST 37: verify echo plugin in consumer +--- request +GET /hello +--- more_headers +Authorization: V1#wolf-rbac-app#wolf-rbac-token +--- response_headers +X-UserId: 100 +X-Username: admin +X-Nickname: administrator +--- response_body +consumer merge echo plugins +--- no_error_log +[error] diff --git a/t/plugin/workflow2.t b/t/plugin/workflow2.t index e2f158f13f3c..686e4bb1321f 100644 --- a/t/plugin/workflow2.t +++ b/t/plugin/workflow2.t @@ -279,3 +279,40 @@ passed ["GET /hello", "GET /hello1", "GET /hello", "GET /hello1"] --- error_code eval [200, 200, 503, 200] + + + +=== TEST 8: test no rules +--- config + location /t { + content_by_lua_block { + local json = require("toolkit.json") + local t = require("lib.test_admin").test + local data = { + uri = "/*", + plugins = { + workflow = { + } + }, + upstream = { + nodes = { + ["127.0.0.1:1980"] = 1 + }, + type = "roundrobin" + } + } + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + json.encode(data) + ) + + if code >= 300 then + ngx.status = code + end + + ngx.print(body) + } + } +--- error_code: 400 +--- response_body +{"error_msg":"failed to check the configuration of plugin workflow err: property \"rules\" is required"} diff --git a/t/router/radixtree-sni.t b/t/router/radixtree-sni.t index 9a28e3d78f78..781bf28d9b95 100644 --- a/t/router/radixtree-sni.t +++ b/t/router/radixtree-sni.t @@ -202,7 +202,7 @@ location /t { connected: 1 failed to do SSL handshake: handshake failed --- error_log -failed to find any SSL certificate by SNI +failed to match any SSL certificate by SNI diff --git a/t/router/radixtree-sni2.t b/t/router/radixtree-sni2.t index b34d0b725a3e..c761c9043d45 100644 --- a/t/router/radixtree-sni2.t +++ b/t/router/radixtree-sni2.t @@ -19,7 +19,76 @@ use t::APISIX 'no_plan'; log_level('debug'); no_root_location(); -$ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); +BEGIN { + $ENV{TEST_NGINX_HTML_DIR} ||= html_dir(); + $ENV{TEST_ENV_SSL_CRT} = "-----BEGIN CERTIFICATE----- +MIIEsTCCAxmgAwIBAgIUMbgUUCYHkuKDaPy0bzZowlK0JG4wDQYJKoZIhvcNAQEL +BQAwVzELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwG +Wmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxEjAQBgNVBAMMCXRlc3QyLmNvbTAgFw0y +MDA0MDQyMjE3NTJaGA8yMTIwMDMxMTIyMTc1MlowVzELMAkGA1UEBhMCQ04xEjAQ +BgNVBAgMCUd1YW5nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVz +dHkxEjAQBgNVBAMMCXRlc3QyLmNvbTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCC +AYoCggGBAMQGBk35V3zaNVDWzEzVGd+EkZnUOrRpXQg5mmcnoKnrQ5rQQMsQCbMO +gFvLt/9OEZQmbE2HuEKsPzL79Yjdu8rGjSoQdbJZ9ccO32uvln1gn68iK79o7Tvm +TCi+BayyNA+lo9IxrBm1wGBkOU1ZPasGYzgBAbMLTSDps1EYxNR8t4l9PrTTRsh6 +NZyTYoDeVIsKZ9SckpjWVnxHOkF+AzZzIJJSe2pj572TDLYA/Xw9I4X3L+SHzwTl +iGWNXb2tU367LHERHvensQzdle7mQN2kE5GpB7QPWB+t9V4mn30jc/LyDvOaei6L ++pbl5CriGBTjaR80oXhK765K720BQeKUezri15bQlMaUGQRnzr53ZsqA4PEh6WCX +hUT2ibO32+uZFXzVQw8y/JUkPf76pZagi8DoLV+sfSbUtnpbQ8wyV2qqTM2eCuPi +RgUwXQi2WssKKzrqcgKil3vksHZozLtOmyZiNE4qfNxv+UGoIybJtZmB+9spY0Rw +5zBRuULycQIDAQABo3MwcTAdBgNVHQ4EFgQUCmZefzpizPrb3VbiIDhrA48ypB8w +HwYDVR0jBBgwFoAUCmZefzpizPrb3VbiIDhrA48ypB8wDAYDVR0TBAUwAwEB/zAh +BgNVHREEGjAYggl0ZXN0Mi5jb22CCyoudGVzdDIuY29tMA0GCSqGSIb3DQEBCwUA +A4IBgQA0nRTv1zm1ACugJFfYZfxZ0mLJfRUCFMmFfhy+vGiIu6QtnOFVw/tEOyMa +m78lBiqac15n3YWYiHiC5NFffTZ7XVlOjN2i4x2z2IJsHNa8tU80AX0Q/pizGK/d ++dzlcsGBb9MGT18h/B3/EYQFKLjUsr0zvDb1T0YDlRUsN3Bq6CvZmvfe9F7Yh4Z/ +XO5R+rX8w9c9A2jzM5isBw2qp/Ggn5RQodMwApEYkJdu80MuxaY6s3dssS4Ay8wP +VNFEeLcdauJ00ES1OnbnuNiYSiSMOgWBsnR+c8AaSRB/OZLYQQKGGYbq0tspwRjM +MGJRrI/jdKnvJQ8p02abdvA9ZuFChoD3Wg03qQ6bna68ZKPd9peBPpMrDDGDLkGI +NzZ6bLJKILnQkV6b1OHVnPDsKXfXjUTTNK/QLJejTXu9RpMBakYZMzs/SOSDtFlS +A+q25t6+46nvA8msUSBKyOGBX42mJcKvR4OgG44PfDjYfmjn2l+Dz/jNXDclpb+Q +XAzBnfM= +-----END CERTIFICATE-----"; + $ENV{TEST_ENV_SSL_KEY} = "-----BEGIN RSA PRIVATE KEY----- +MIIG5QIBAAKCAYEAxAYGTflXfNo1UNbMTNUZ34SRmdQ6tGldCDmaZyegqetDmtBA +yxAJsw6AW8u3/04RlCZsTYe4Qqw/Mvv1iN27ysaNKhB1sln1xw7fa6+WfWCfryIr +v2jtO+ZMKL4FrLI0D6Wj0jGsGbXAYGQ5TVk9qwZjOAEBswtNIOmzURjE1Hy3iX0+ +tNNGyHo1nJNigN5Uiwpn1JySmNZWfEc6QX4DNnMgklJ7amPnvZMMtgD9fD0jhfcv +5IfPBOWIZY1dva1TfrsscREe96exDN2V7uZA3aQTkakHtA9YH631XiaffSNz8vIO +85p6Lov6luXkKuIYFONpHzSheErvrkrvbQFB4pR7OuLXltCUxpQZBGfOvndmyoDg +8SHpYJeFRPaJs7fb65kVfNVDDzL8lSQ9/vqllqCLwOgtX6x9JtS2eltDzDJXaqpM +zZ4K4+JGBTBdCLZayworOupyAqKXe+SwdmjMu06bJmI0Tip83G/5QagjJsm1mYH7 +2yljRHDnMFG5QvJxAgMBAAECggGBAIELlkruwvGmlULKpWRPReEn3NJwLNVoJ56q +jUMri1FRWAgq4PzNahU+jrHfwxmHw3rMcK/5kQwTaOefh1y63E35uCThARqQroSE +/gBeb6vKWFVrIXG5GbQ9QBXyQroV9r/2Q4q0uJ+UTzklwbNx9G8KnXbY8s1zuyrX +rvzMWYepMwqIMSfJjuebzH9vZ4F+3BlMmF4XVUrYj8bw/SDwXB0UXXT2Z9j6PC1J +CS0oKbgIZ8JhoF3KKjcHBGwWTIf5+byRxeG+z99PBEBafm1Puw1vLfOjD3DN/fso +8xCEtD9pBPBJ+W97x/U+10oKetmP1VVEr2Ph8+s2VH1zsRF5jo5d0GtvJqOwIQJ7 +z3OHJ7lLODw0KAjB1NRXW4dTTUDm6EUuUMWFkGAV6YTyhNLAT0DyrUFJck9RiY48 +3QN8vSf3n/+3wwg1gzcJ9w3W4DUbvGqu86CaUQ4UegfYJlusY/3YGp5bGNQdxmws +lgIoSRrHp6UJKsP8Yl08MIvT/oNLgQKBwQD75SuDeyE0ukhEp0t6v+22d18hfSef +q3lLWMI1SQR9Kiem9Z1KdRkIVY8ZAHANm6D8wgjOODT4QZtiqJd2BJn3Xf+aLfCd +CW0hPvmGTcp/E4sDZ2u0HbIrUStz7ZcgXpjD2JJAJGEKY2Z7J65gnTqbqoBDrw1q +1+FqtikkHRte1UqxjwnWBpSdoRQFgNPHxPWffhML1xsD9Pk1B1b7JoakYcKsNoQM +oXUKPLxSZEtd0hIydqmhGYTa9QWBPNDlA5UCgcEAxzfGbOrPBAOOYZd3jORXQI6p +H7SddTHMQyG04i+OWUd0HZFkK7/k6r26GFmImNIsQMB26H+5XoKRFKn+sUl14xHY +FwB140j0XSav2XzT38UpJ9CptbgK1eKGQVp41xwRYjHVScE5hJuA3a1TKM0l26rp +hny/KaP+tXuqt9QbxcUN6efubNYyFP+m6nq2/XdX74bJuGpXLq8W0oFdiocO6tmF +4/Hsc4dCVrcwULqXQa0lJ57zZpfIPARqWM2847xtAoHBANVUNbDpg6rTJMc34722 +dAy3NhL3mqooH9aG+hsEls+l9uT4WFipqSScyU8ERuHPbt0BO1Hi2kFx1rYMUBG8 +PeT4b7NUutVUGV8xpUNv+FH87Bta6CUnjTAQUzuf+QCJ/NjIPrwh0yloG2+roIvk +PLF/CZfI1hUpdZfZZChYmkiLXPHZURw4gH6q33j1rOYf0WFc9aZua0vDmZame6zB +6P+oZ6VPmi/UQXoFC/y/QfDYK18fjfOI2DJTlnDoX4XErQKBwGc3M5xMz/MRcJyJ +oIwj5jzxbRibOJV2tpD1jsU9xG/nQHbtVEwCgTVKFXf2M3qSMhFeZn0xZ7ZayZY+ +OVJbcDO0lBPezjVzIAB/Qc7aCOBAQ4F4b+VRtHN6iPqlSESTK0KH9Szgas+UzeCM +o7BZEctNMu7WBSkq6ZXXu+zAfZ8q6HmPDA3hsFMG3dFQwSxzv+C/IhZlKkRqvNVV +50QVk5oEF4WxW0PECY/qG6NH+YQylDSB+zPlYf4Of5cBCWOoxQKBwQCeo37JpEAR +kYtqSjXkC5GpPTz8KR9lCY4SDuC1XoSVCP0Tk23GX6GGyEf4JWE+fb/gPEFx4Riu +7pvxRwq+F3LaAa/FFTNUpY1+8UuiMO7J0B1RkVXkyJjFUF/aQxAnOoZPmzrdZhWy +bpe2Ka+JS/aXSd1WRN1nmo/DarpWFvdLWZFwUt6zMziH40o1gyPHEuXOqVtf2QCe +Q6WC9xnEz4lbb/fR2TF9QRA4FtoRpDe/f3ZGIpWE0RdwyZZ6uA7T1+Q= +-----END RSA PRIVATE KEY-----"; +} add_block_preprocessor(sub { my ($block) = @_; @@ -336,7 +405,7 @@ location /t { --- response_body failed to do SSL handshake: handshake failed --- error_log -failed to fetch ssl config: failed to find SNI: please check if the client requests via IP or uses an outdated protocol +failed to find SNI: please check if the client requests via IP or uses an outdated protocol --- no_error_log [alert] @@ -543,3 +612,211 @@ qr/(fetch|release) table \w+/ --- grep_error_log_out fetch table api_ctx release table api_ctx + + + +=== TEST 15: store secret into vault +--- exec +VAULT_TOKEN='root' VAULT_ADDR='http://0.0.0.0:8200' vault kv put kv/apisix/ssl test2.com.crt=@t/certs/test2.crt test2.com.key=@t/certs/test2.key +--- response_body +Success! Data written to: kv/apisix/ssl + + + +=== TEST 16: set ssl conf with secret ref: vault +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- put secret vault config + local code, body = t('/apisix/admin/secrets/vault/test1', + ngx.HTTP_PUT, + [[{ + "uri": "http://127.0.0.1:8200", + "prefix": "kv/apisix", + "token" : "root" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + -- set ssl + local code, body = t('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + [[{ + "cert": "$secret://vault/test1/ssl/test2.com.crt", + "key": "$secret://vault/test1/ssl/test2.com.key", + "sni": "test2.com" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 17: get cert and key from vault +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "test2.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +ssl handshake: true + + + +=== TEST 18: set ssl conf with secret ref: env +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + -- set ssl + local code, body = t('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + [[{ + "cert": "$env://TEST_ENV_SSL_CRT", + "key": "$env://TEST_ENV_SSL_KEY", + "sni": "test2.com" + }]] + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 19: get cert and key from env +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "test2.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +ssl handshake: true + + + +=== TEST 20: set ssl conf with secret ref: only cert use env +--- request +GET /t +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin") + -- set ssl + local ssl_key = t.read_file("t/certs/test2.key") + local data = { + cert = "$env://TEST_ENV_SSL_CRT", + key = ssl_key, + sni = "TesT2.com" + } + + local code, body = t.test('/apisix/admin/ssls/1', + ngx.HTTP_PUT, + core.json.encode(data) + ) + if code >= 300 then + ngx.status = code + return ngx.say(body) + end + + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 21: get cert from env +--- config +listen unix:$TEST_NGINX_HTML_DIR/nginx.sock ssl; + +location /t { + content_by_lua_block { + do + local sock = ngx.socket.tcp() + + sock:settimeout(2000) + + local ok, err = sock:connect("unix:$TEST_NGINX_HTML_DIR/nginx.sock") + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local sess, err = sock:sslhandshake(nil, "test2.com", false) + if not sess then + ngx.say("failed to do SSL handshake: ", err) + return + end + ngx.say("ssl handshake: ", sess ~= nil) + end -- do + -- collectgarbage() + } +} +--- response_body +ssl handshake: true diff --git a/t/secret/vault.t b/t/secret/vault.t index a31183e37a2c..b3db13179447 100644 --- a/t/secret/vault.t +++ b/t/secret/vault.t @@ -210,7 +210,7 @@ value --- request GET /t --- response_body_like -failed to decode result, res: {\"errors\":\[\"permission denied\"\]}\n +failed to decode result, res: \{\"errors\":\[\"permission denied\"\]}\n @@ -235,4 +235,4 @@ failed to decode result, res: {\"errors\":\[\"permission denied\"\]}\n --- request GET /t --- response_body_like -failed to decode result, res: {\"errors\":\[\"permission denied\"\]}\n +failed to decode result, res: \{\"errors\":\[\"permission denied\"\]}\n diff --git a/t/stream-node/sni.t b/t/stream-node/sni.t index f2833d2f494c..41554ba6c0c2 100644 --- a/t/stream-node/sni.t +++ b/t/stream-node/sni.t @@ -276,6 +276,7 @@ proxy request to 127.0.0.2:1995 --- yaml_config apisix: node_listen: 1984 + proxy_mode: http&stream stream_proxy: tcp: - 9100 diff --git a/t/stream-plugin/limit-conn2.t b/t/stream-plugin/limit-conn2.t new file mode 100644 index 000000000000..9efb2b6dfb1f --- /dev/null +++ b/t/stream-plugin/limit-conn2.t @@ -0,0 +1,134 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} + +$ENV{TEST_NGINX_REDIS_PORT} ||= 1985; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +xrpc: + protocols: + - name: redis +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + + $block; +}); + +worker_connections(1024); +run_tests; + +__DATA__ + +=== TEST 1: create a stream router with limit-conn +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "limit-conn": { + "conn": 2, + "burst": 1, + "default_conn_delay": 0.1, + "key": "$remote_port $server_addr", + "key_type": "var_combination" + } + }, + "upstream": { + "type": "none", + "nodes": { + "127.0.0.1:6379": 1 + } + }, + "protocol": { + "name": "redis" + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 2: access the redis via proxy +--- config + location /t { + content_by_lua_block { + local redis = require "resty.redis" + local red = redis:new() + + local ok, err = red:connect("127.0.0.1", $TEST_NGINX_REDIS_PORT) + if not ok then + ngx.say("failed to connect: ", err) + return + end + + local res, err = red:hmset("animals", "dog", "bark", "cat", "meow") + if not res then + ngx.say("failed to set animals: ", err) + return + end + ngx.say("hmset animals: ", res) + + local res, err = red:hmget("animals", "dog", "cat") + if not res then + ngx.say("failed to get animals: ", err) + return + end + ngx.say("hmget animals: ", res) + + ok, err = red:close() + if not ok then + ngx.say("failed to close: ", err) + return + end + } + } +--- response_body +hmset animals: OK +hmget animals: barkmeow +--- no_error_log +attempt to perform arithmetic on field 'request_time' +--- stream_conf_enable diff --git a/t/stream-plugin/mqtt-proxy2.t b/t/stream-plugin/mqtt-proxy2.t index 8c797a353d77..35211878e330 100644 --- a/t/stream-plugin/mqtt-proxy2.t +++ b/t/stream-plugin/mqtt-proxy2.t @@ -75,3 +75,110 @@ passed --- error_log failed to parse domain: loc, error: --- timeout: 10 + + + +=== TEST 3: set upstream(id: 1) +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/upstreams/1', + ngx.HTTP_PUT, + [[{ + "type": "chash", + "key": "mqtt_client_id", + "nodes": [ + { + "host": "0.0.0.0", + "port": 1995, + "weight": 1 + }, + { + "host": "127.0.0.1", + "port": 1995, + "weight": 1 + } + ] + }]] + ) + + ngx.status = code + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 4: balance with mqtt_client_id +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + [[{ + "remote_addr": "127.0.0.1", + "server_port": 1985, + "plugins": { + "mqtt-proxy": { + "protocol_name": "MQTT", + "protocol_level": 5 + } + }, + "upstream_id": 1 + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 5: hit route with empty id +--- stream_request eval +"\x10\x0d\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x00" +--- stream_response +hello world +--- grep_error_log eval +qr/(mqtt client id: \w+|proxy request to \S+)/ +--- grep_error_log_out +proxy request to 127.0.0.1:1995 + + + +=== TEST 6: hit route with different client id, part 1 +--- stream_request eval +"\x10\x0e\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x01\x66" +--- stream_response +hello world +--- grep_error_log eval +qr/(mqtt client id: \w+|proxy request to \S+)/ +--- grep_error_log_out +mqtt client id: f +proxy request to 0.0.0.0:1995 + + + +=== TEST 7: hit route with different client id, part 2 +--- stream_request eval +"\x10\x0e\x00\x04\x4d\x51\x54\x54\x05\x02\x00\x3c\x00\x00\x01\x67" +--- stream_response +hello world +--- grep_error_log eval +qr/(mqtt client id: \w+|proxy request to \S+)/ +--- grep_error_log_out +mqtt client id: g +proxy request to 127.0.0.1:1995 diff --git a/t/tars/discovery/stream/tars.t b/t/tars/discovery/stream/tars.t index b7c55a0f49f5..b674970272ba 100644 --- a/t/tars/discovery/stream/tars.t +++ b/t/tars/discovery/stream/tars.t @@ -19,11 +19,7 @@ use t::APISIX; my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; my $version = eval { `$nginx_binary -V 2>&1` }; -if ($version =~ m/\/1.19.3/) { - plan(skip_all => "require OpenResty version >= 1.19.9.1"); -} else { - plan('no_plan'); -} +plan('no_plan'); repeat_each(1); log_level('warn'); diff --git a/t/utils/rfc5424.t b/t/utils/rfc5424.t new file mode 100644 index 000000000000..06051e627cc4 --- /dev/null +++ b/t/utils/rfc5424.t @@ -0,0 +1,83 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX 'no_plan'; + +repeat_each(1); +no_long_string(); +no_root_location(); + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } + +}); + +run_tests(); + +__DATA__ + +=== TEST 1: Compatibility testing +--- config + location /t { + content_by_lua_block { + local rfc5424 = require("apisix.utils.rfc5424") + local structured_data = { + {name = "project", value = "apisix.apache.org"}, + {name = "logstore", value = "apisix.apache.org"}, + {name = "access-key-id", value = "apisix.sls.logger"}, + {name = "access-key-secret", value = "BD274822-96AA-4DA6-90EC-15940FB24444"} + } + local data = rfc5424.encode("SYSLOG", "INFO", "localhost", "apisix", + 123456, "hello world", structured_data) + ngx.say(data) + } + } +--- response_body eval +qr/<46>1.*localhost apisix 123456 - \[logservice project=\"apisix\.apache\.org\" logstore=\"apisix\.apache\.org\" access-key-id=\"apisix\.sls\.logger\" access-key-secret=\"BD274822-96AA-4DA6-90EC-15940FB24444\"\] hello world/ + + + +=== TEST 2: No structured data test +--- config + location /t { + content_by_lua_block { + local rfc5424 = require("apisix.utils.rfc5424") + local data = rfc5424.encode("SYSLOG", "INFO", "localhost", "apisix", + 123456, "hello world") + ngx.say(data) + } + } +--- response_body eval +qr/<46>1.*localhost apisix 123456 - - hello world/ + + + +=== TEST 3: No host and appname test +--- config + location /t { + content_by_lua_block { + local rfc5424 = require("apisix.utils.rfc5424") + local data = rfc5424.encode("SYSLOG", "INFO", nil, nil, + 123456, "hello world") + ngx.say(data) + } + } +--- response_body eval +qr/<46>1.*- - 123456 - - hello world/ diff --git a/t/wasm/fault-injection.t b/t/wasm/fault-injection.t index e1cf2a43f9da..f690e9ea294c 100644 --- a/t/wasm/fault-injection.t +++ b/t/wasm/fault-injection.t @@ -180,3 +180,101 @@ GET /hello --- error_code: 401 --- response_body_like eval qr/401 Authorization Required<\/title>/ + + + +=== TEST 7: fault injection +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_fault_injection": { + "conf": { + "http_status": 401, + "body": "HIT\n" + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 8: hit +--- request +GET /hello +--- error_code: 401 +--- response_body +HIT + + + +=== TEST 9: fault injection, with 0 percentage +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_fault_injection": { + "conf": { + "http_status": 401, + "percentage": 0 + } + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- ret_code: 401 +--- response_body +passed + + + +=== TEST 10: hit +--- request +GET /hello +--- response_body +hello world diff --git a/t/wasm/global-rule.t b/t/wasm/global-rule.t index 05f34e3f5b1d..f9fd322e38f2 100644 --- a/t/wasm/global-rule.t +++ b/t/wasm/global-rule.t @@ -57,13 +57,6 @@ __DATA__ local code, body = t('/apisix/admin/global_rules/1', ngx.HTTP_PUT, [[{ - "uri": "/hello", - "upstream": { - "type": "roundrobin", - "nodes": { - "127.0.0.1:1980": 1 - } - }, "plugins": { "wasm_log": { "conf": "blahblah" diff --git a/t/wasm/request-body.t b/t/wasm/request-body.t index e45db39b56a0..156df2527ae8 100644 --- a/t/wasm/request-body.t +++ b/t/wasm/request-body.t @@ -200,3 +200,52 @@ hello qr/request get body: \w+/ --- grep_error_log_out request get body: ell + + + +=== TEST 8: invalid conf type no set conf +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm-request-body": { + "setting": {"processReqBody":true, "start":1, "size":3} + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/property.*conf.*is required/ + + + +=== TEST 9: hit +--- request +POST /hello +hello +--- grep_error_log eval +qr/request get body: \w+/ +--- grep_error_log_out +request get body: ell diff --git a/t/wasm/route.t b/t/wasm/route.t index 6e949dd6b0d8..717e54ebc951 100644 --- a/t/wasm/route.t +++ b/t/wasm/route.t @@ -51,48 +51,147 @@ run_tests(); __DATA__ -=== TEST 1: check schema +=== TEST 1: scheme check with empty json body --- config location /t { content_by_lua_block { - local json = require("toolkit.json") local t = require("lib.test_admin").test - for _, case in ipairs({ - {input = { - }}, - {input = { - conf = {} - }}, - {input = { - conf = "" - }}, - }) do - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - { - id = "1", - uri = "/echo", - upstream = { - type = "roundrobin", - nodes = {} - }, - plugins = { - wasm_log = case.input + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 } - } - ) - ngx.say(json.decode(body).error_msg) + }, + "plugins": { + "wasm_log": {} + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return end + + ngx.say(body) } } ---- response_body -failed to check the configuration of plugin wasm_log err: property "conf" is required -failed to check the configuration of plugin wasm_log err: property "conf" validation failed: wrong type: expected string, got table -failed to check the configuration of plugin wasm_log err: property "conf" validation failed: string too short, expected at least 1, got 0 +--- error_code: 400 +--- error_log eval +qr/invalid request body/ + + + +=== TEST 2: scheme check with conf type number +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": {"conf": 123} + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- error_log eval +qr/invalid request body/ + + + +=== TEST 3: scheme check with conf json type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": {"conf": {}}} + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/value should match only one schema, but matches none/ -=== TEST 2: sanity + +=== TEST 4: scheme check with conf json type +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/hello", + "upstream": { + "type": "roundrobin", + "nodes": { + "127.0.0.1:1980": 1 + } + }, + "plugins": { + "wasm_log": {"conf": ""}} + }]] + ) + + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + + ngx.say(body) + } + } +--- error_code: 400 +--- response_body_like eval +qr/value should match only one schema, but matches none/ + + + +=== TEST 5: sanity --- config location /t { content_by_lua_block { @@ -132,7 +231,7 @@ passed -=== TEST 3: hit +=== TEST 6: hit --- request GET /hello --- grep_error_log eval @@ -143,7 +242,7 @@ run plugin ctx 1 with conf zzz in http ctx 2 -=== TEST 4: run wasm plugin in rewrite phase (prior to the one run in access phase) +=== TEST 7: run wasm plugin in rewrite phase (prior to the one run in access phase) --- extra_yaml_config wasm: plugins: @@ -164,7 +263,7 @@ run plugin ctx 1 with conf blahblah in http ctx 2 -=== TEST 5: plugin from service +=== TEST 8: plugin from service --- config location /t { content_by_lua_block { @@ -231,7 +330,7 @@ passed -=== TEST 6: hit +=== TEST 9: hit --- config location /t { content_by_lua_block { @@ -262,7 +361,7 @@ run plugin ctx 3 with conf blahblah in http ctx 4 -=== TEST 7: plugin from plugin_config +=== TEST 10: plugin from plugin_config --- config location /t { content_by_lua_block { @@ -335,7 +434,7 @@ passed -=== TEST 8: hit +=== TEST 11: hit --- config location /t { content_by_lua_block { diff --git a/t/xds-library/config_xds.t b/t/xds-library/config_xds.t index b82b7d9c5b07..98b407068e5b 100644 --- a/t/xds-library/config_xds.t +++ b/t/xds-library/config_xds.t @@ -112,11 +112,17 @@ qr/can not load xDS library/ --- config location /t { content_by_lua_block { - -- wait for xds library sync data - ngx.sleep(1.5) local core = require("apisix.core") - local version = ngx.shared["xds-config-version"]:get("version") - ngx.say(version) + local version + for i = 1, 5 do + version = ngx.shared["xds-config-version"]:get("version") + if version then + ngx.say(version) + break + end + -- wait for xds library sync data + ngx.sleep(1.5) + end } } --- response_body eval diff --git a/t/xds-library/main.go b/t/xds-library/main.go index 4327d8305898..6ee3bb2eac62 100644 --- a/t/xds-library/main.go +++ b/t/xds-library/main.go @@ -34,7 +34,6 @@ import "C" import ( "context" "fmt" - "math/rand" "strconv" "time" "unsafe" @@ -84,7 +83,7 @@ func write_config(config_zone unsafe.Pointer, version_zone unsafe.Pointer) { "create_time": 1646972532, "uri": "/hello1", "priority": 0, -"id": "1", +"id": "2", "upstream_id": "1" }`) @@ -95,17 +94,21 @@ func write_config(config_zone unsafe.Pointer, version_zone unsafe.Pointer) { } +func get_version() string { + return strconv.FormatInt(time.Now().UnixNano()/1e6, 10) +} + func update_conf_version(zone unsafe.Pointer) { ctx := context.Background() + key := "version" + write_shdict(key, get_version(), zone) go func() { for { select { case <-ctx.Done(): return - case <-time.After(time.Second * time.Duration(rand.Intn(10))): - key := "version" - version := strconv.FormatInt(time.Now().UnixNano()/1e6, 10) - write_shdict(key, version, zone) + case <-time.After(time.Second * 5): + write_shdict("version", get_version(), zone) } } }() diff --git a/t/xrpc/dubbo.t b/t/xrpc/dubbo.t new file mode 100644 index 000000000000..290eadb3c6fa --- /dev/null +++ b/t/xrpc/dubbo.t @@ -0,0 +1,168 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +use t::APISIX; + +my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; +my $version = eval { `$nginx_binary -V 2>&1` }; + +if ($version !~ m/\/apisix-nginx-module/) { + plan(skip_all => "apisix-nginx-module not installed"); +} else { + plan('no_plan'); +} +add_block_preprocessor(sub { + my ($block) = @_; + + if (!$block->extra_yaml_config) { + my $extra_yaml_config = <<_EOC_; +xrpc: + protocols: + - name: dubbo +_EOC_ + $block->set_value("extra_yaml_config", $extra_yaml_config); + } + + my $config = $block->config // <<_EOC_; + location /t { + content_by_lua_block { + ngx.req.read_body() + local sock = ngx.socket.tcp() + sock:settimeout(1000) + local ok, err = sock:connect("127.0.0.1", 1985) + if not ok then + ngx.log(ngx.ERR, "failed to connect: ", err) + return ngx.exit(503) + end + + local bytes, err = sock:send(ngx.req.get_body_data()) + if not bytes then + ngx.log(ngx.ERR, "send stream request error: ", err) + return ngx.exit(503) + end + while true do + local data, err = sock:receiveany(4096) + if not data then + sock:close() + break + end + ngx.print(data) + end + } + } +_EOC_ + + $block->set_value("config", $config); + + if ((!defined $block->error_log) && (!defined $block->no_error_log)) { + $block->set_value("no_error_log", "[error]\nRPC is not finished"); + } + + $block; +}); + +worker_connections(1024); +run_tests; + +__DATA__ + +=== TEST 1: init +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/stream_routes/1', + ngx.HTTP_PUT, + { + protocol = { + name = "dubbo" + }, + upstream = { + nodes = { + ["127.0.0.1:20880"] = 1 + }, + type = "roundrobin" + } + } + ) + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- request +GET /t +--- response_body +passed + + + +=== TEST 2: use dubbo_backend_provider server. request=org.apache.dubbo.backend.DemoService,service_version:1.0.1#hello,response=dubbo success & 200 +--- request eval +"GET /t +\xda\xbb\xc2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xef\x05\x32\x2e\x30\x2e\x32\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x05\x31\x2e\x30\x2e\x30\x05\x68\x65\x6c\x6c\x6f\x0f\x4c\x6a\x61\x76\x61\x2f\x75\x74\x69\x6c\x2f\x4d\x61\x70\x3b\x48\x04\x6e\x61\x6d\x65\x08\x7a\x68\x61\x6e\x67\x73\x61\x6e\x5a\x48\x04\x70\x61\x74\x68\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x12\x72\x65\x6d\x6f\x74\x65\x2e\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x0b\x73\x70\x2d\x63\x6f\x6e\x73\x75\x6d\x65\x72\x09\x69\x6e\x74\x65\x72\x66\x61\x63\x65\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x07\x76\x65\x72\x73\x69\x6f\x6e\x05\x31\x2e\x30\x2e\x30\x07\x74\x69\x6d\x65\x6f\x75\x74\x04\x31\x30\x30\x30\x5a" +--- response_body eval +"\xda\xbb\x02\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x30\x94\x48\x04\x62\x6f\x64\x79\x0e\x64\x75\x62\x62\x6f\x20\x73\x75\x63\x63\x65\x73\x73\x0a\x06\x73\x74\x61\x74\x75\x73\x03\x32\x30\x30\x5a\x48\x05\x64\x75\x62\x62\x6f\x05\x32\x2e\x30\x2e\x32\x5a" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 3: heart beat. request=\xe2|11..,response=\x22|00... +--- request eval +"GET /t +\xda\xbb\xe2\x00\x00\x00\x00\x00\x00\x00\x00\x34\x00\x00\x00\x01\x4e" +--- response_body eval +"\xda\xbb\x22\x14\x00\x00\x00\x00\x00\x00\x00\x34\x00\x00\x00\x01\x4e" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 4: no response. Different from test2 \x82=10000010, the second bit=0 of the third byte means no need to return +--- request eval +"GET /t +\xda\xbb\x82\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xef\x05\x32\x2e\x30\x2e\x32\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x05\x31\x2e\x30\x2e\x30\x05\x68\x65\x6c\x6c\x6f\x0f\x4c\x6a\x61\x76\x61\x2f\x75\x74\x69\x6c\x2f\x4d\x61\x70\x3b\x48\x04\x6e\x61\x6d\x65\x08\x7a\x68\x61\x6e\x67\x73\x61\x6e\x5a\x48\x04\x70\x61\x74\x68\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x12\x72\x65\x6d\x6f\x74\x65\x2e\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x0b\x73\x70\x2d\x63\x6f\x6e\x73\x75\x6d\x65\x72\x09\x69\x6e\x74\x65\x72\x66\x61\x63\x65\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x07\x76\x65\x72\x73\x69\x6f\x6e\x05\x31\x2e\x30\x2e\x30\x07\x74\x69\x6d\x65\x6f\x75\x74\x04\x31\x30\x30\x30\x5a" +--- response_body eval +"" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 5: failed response. request=org.apache.dubbo.backend.DemoService,service_version:1.0.1#fail,response=503 +--- request eval +"GET /t +\xda\xbb\xc2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xee\x05\x32\x2e\x30\x2e\x32\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x05\x31\x2e\x30\x2e\x30\x04\x66\x61\x69\x6c\x0f\x4c\x6a\x61\x76\x61\x2f\x75\x74\x69\x6c\x2f\x4d\x61\x70\x3b\x48\x04\x6e\x61\x6d\x65\x08\x7a\x68\x61\x6e\x67\x73\x61\x6e\x5a\x48\x04\x70\x61\x74\x68\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x12\x72\x65\x6d\x6f\x74\x65\x2e\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x0b\x73\x70\x2d\x63\x6f\x6e\x73\x75\x6d\x65\x72\x09\x69\x6e\x74\x65\x72\x66\x61\x63\x65\x30\x24\x6f\x72\x67\x2e\x61\x70\x61\x63\x68\x65\x2e\x64\x75\x62\x62\x6f\x2e\x62\x61\x63\x6b\x65\x6e\x64\x2e\x44\x65\x6d\x6f\x53\x65\x72\x76\x69\x63\x65\x07\x76\x65\x72\x73\x69\x6f\x6e\x05\x31\x2e\x30\x2e\x30\x07\x74\x69\x6d\x65\x6f\x75\x74\x04\x31\x30\x30\x30\x5a" +--- response_body eval +"\xda\xbb\x02\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x2d\x94\x48\x04\x62\x6f\x64\x79\x0b\x64\x75\x62\x62\x6f\x20\x66\x61\x69\x6c\x0a\x06\x73\x74\x61\x74\x75\x73\x03\x35\x30\x33\x5a\x48\x05\x64\x75\x62\x62\x6f\x05\x32\x2e\x30\x2e\x32\x5a" +--- stream_conf_enable +--- log_level: debug +--- no_error_log + + + +=== TEST 6: invalid magic(dabc<>dabb) for heart beat. +--- request eval +"GET /t +\xda\xbc\xe2\x00\x00\x00\x00\x00\x00\x00\x00\x34\x00\x00\x00\x01\x4e" +--- error_log +unknown magic number +--- stream_conf_enable diff --git a/t/xrpc/pingpong2.t b/t/xrpc/pingpong2.t index fc77fa1482df..7365929ac326 100644 --- a/t/xrpc/pingpong2.t +++ b/t/xrpc/pingpong2.t @@ -497,7 +497,7 @@ passed --- stream_conf_enable --- wait: 0.5 --- error_log eval -qr/message received:.*\"client_ip\\"\:\\"127.0.0.1\\"/ +qr/message received:.*\"client_ip\"\:\"127.0.0.1\"/ @@ -556,7 +556,7 @@ passed --- stream_conf_enable --- wait: 0.5 --- error_log eval -qr/message received:.*\"client_ip\\"\:\\"127.0.0.1\\"/ +qr/message received:.*\"client_ip\"\:\"127.0.0.1\"/ diff --git a/t/xrpc/pingpong3.t b/t/xrpc/pingpong3.t index c6d98810d656..da16e626b9e4 100644 --- a/t/xrpc/pingpong3.t +++ b/t/xrpc/pingpong3.t @@ -190,4 +190,4 @@ passed --- stream_conf_enable --- wait: 0.5 --- error_log eval -qr/message received:.*\"rpc_time\\"\:(0.\d+|0)\}\"/ +qr/message received:.*\"rpc_time\"\:(0.\d+|0)\}/ diff --git a/t/xrpc/redis2.t b/t/xrpc/redis2.t index 65ca9829c616..076a406b4f4f 100644 --- a/t/xrpc/redis2.t +++ b/t/xrpc/redis2.t @@ -16,6 +16,8 @@ # use t::APISIX; +log_level("warn"); + my $nginx_binary = $ENV{'TEST_NGINX_BINARY'} || 'nginx'; my $version = eval { `$nginx_binary -V 2>&1` }; @@ -117,7 +119,7 @@ passed { name = "syslog", filter = { - {"rpc_time", ">=", 0.01}, + {"rpc_time", ">=", 0.001}, }, conf = { host = "127.0.0.1", @@ -149,6 +151,7 @@ passed === TEST 3: verify the data received by the log server +--- stream_conf_enable --- config location /t { content_by_lua_block { @@ -190,11 +193,10 @@ passed hmset animals: OK hmget animals: barkmeow ping: pong ---- stream_conf_enable --- wait: 1 --- grep_error_log eval -qr/message received:.*\"redis_cmd_line\\"\:[^,]+/ +qr/message received:.*\"redis_cmd_line\":[^}|^,]+/ --- grep_error_log_out eval -[qr/message received:.*\"redis_cmd_line\\"\:\\\"hmset animals dog bark cat meow\\\"/, -qr/message received:.*\"redis_cmd_line\\"\:\\\"hmget animals dog cat\\\"/, -qr/message received:.*\"redis_cmd_line\\"\:\\\"ping\\\"/] +qr{message received:.*\"redis_cmd_line\":\"hmset animals dog bark cat meow\"(?s).* +message received:.*\"redis_cmd_line\":\"hmget animals dog cat\"(?s).* +message received:.*\"redis_cmd_line\":\"ping\"} diff --git a/utils/install-dependencies.sh b/utils/install-dependencies.sh index 11851cbae294..85cfa7483081 100755 --- a/utils/install-dependencies.sh +++ b/utils/install-dependencies.sh @@ -103,7 +103,7 @@ function multi_distro_installation() { install_dependencies_with_apt "debian" elif grep -Eqi "Ubuntu" /etc/issue || grep -Eq "Ubuntu" /etc/*-release; then install_dependencies_with_apt "ubuntu" - elif grep -Eqi "Arch" /etc/issue || grep -Eq "Arch" /etc/*-release; then + elif grep -Eqi "Arch" /etc/issue || grep -Eqi "EndeavourOS" /etc/issue || grep -Eq "Arch" /etc/*-release; then install_dependencies_with_aur else echo "Non-supported operating system version"