diff --git a/.asf.yaml b/.asf.yaml
index 3b495a705ca7..010df03881f8 100644
--- a/.asf.yaml
+++ b/.asf.yaml
@@ -53,6 +53,18 @@ github:
dismiss_stale_reviews: true
require_code_owner_reviews: true
required_approving_review_count: 2
+ release/3.5:
+ required_pull_request_reviews:
+ require_code_owner_reviews: true
+ required_approving_review_count: 2
+ release/3.4:
+ required_pull_request_reviews:
+ require_code_owner_reviews: true
+ required_approving_review_count: 2
+ release/3.3:
+ required_pull_request_reviews:
+ require_code_owner_reviews: true
+ required_approving_review_count: 2
release/3.2:
required_pull_request_reviews:
require_code_owner_reviews: true
diff --git a/.github/actions/autocorrect b/.github/actions/autocorrect
new file mode 160000
index 000000000000..2aefcaa00480
--- /dev/null
+++ b/.github/actions/autocorrect
@@ -0,0 +1 @@
+Subproject commit 2aefcaa0048021cfade67c6cae2b9503f97243f9
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index efa5b8f631e3..95e72fe578b9 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -28,11 +28,10 @@ jobs:
- ubuntu-20.04
os_name:
- linux_openresty
- - linux_openresty_1_19
test_dir:
- t/plugin/[a-k]*
- t/plugin/[l-z]*
- - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc
+ - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library t/xrpc
runs-on: ${{ matrix.platform }}
@@ -43,7 +42,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
@@ -137,7 +136,7 @@ jobs:
[[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
echo "Linux launch services, done."
- name: Start Dubbo Backend
- if: matrix.os_name == 'linux_openresty' && steps.test_env.outputs.type == 'plugin'
+ if: matrix.os_name == 'linux_openresty' && (steps.test_env.outputs.type == 'plugin' || steps.test_env.outputs.type == 'last')
run: |
sudo apt install -y maven
cd t/lib/dubbo-backend
diff --git a/.github/workflows/centos7-ci.yml b/.github/workflows/centos7-ci.yml
index dc08b0fd384e..2be0c39cbb94 100644
--- a/.github/workflows/centos7-ci.yml
+++ b/.github/workflows/centos7-ci.yml
@@ -30,12 +30,12 @@ jobs:
test_dir:
- t/plugin/[a-k]*
- t/plugin/[l-z]*
- - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc
+ - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/chaos.yml b/.github/workflows/chaos.yml
index 2bf3518dd2ab..7b47664c55e9 100644
--- a/.github/workflows/chaos.yml
+++ b/.github/workflows/chaos.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 35
steps:
- - uses: actions/checkout@v3.2.0
+ - uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/cli-master.yml b/.github/workflows/cli-master.yml
index dd77dcd1537c..d521a9d7a103 100644
--- a/.github/workflows/cli-master.yml
+++ b/.github/workflows/cli-master.yml
@@ -33,7 +33,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/cli.yml b/.github/workflows/cli.yml
index 2f76670a0611..7c50e3fc58d2 100644
--- a/.github/workflows/cli.yml
+++ b/.github/workflows/cli.yml
@@ -31,14 +31,14 @@ jobs:
- linux_apisix_current_luarocks_in_customed_nginx
runs-on: ${{ matrix.platform }}
- timeout-minutes: 15
+ timeout-minutes: 30
env:
SERVER_NAME: ${{ matrix.job_name }}
OPENRESTY_VERSION: default
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/close-unresponded.yml b/.github/workflows/close-unresponded.yml
new file mode 100644
index 000000000000..52e81228eba2
--- /dev/null
+++ b/.github/workflows/close-unresponded.yml
@@ -0,0 +1,38 @@
+name: Check Issues
+
+on:
+ workflow_dispatch:
+ schedule:
+ - cron: '0 10 * * *'
+
+permissions:
+ contents: read
+
+jobs:
+ prune_stale:
+ permissions:
+ issues: write # for actions/stale to close stale issues
+ name: Prune Unresponded
+ runs-on: ubuntu-latest
+ timeout-minutes: 10
+
+ steps:
+ - name: Prune Stale
+ uses: actions/stale@v8
+ with:
+ days-before-issue-stale: 14
+ days-before-issue-close: 3
+ stale-issue-message: >
+ Due to lack of the reporter's response this issue has been labeled with "no response".
+ It will be close in 3 days if no further activity occurs. If this issue is still
+ relevant, please simply write any comment. Even if closed, you can still revive the
+ issue at any time or discuss it on the dev@apisix.apache.org list.
+ Thank you for your contributions.
+ close-issue-message: >
+ This issue has been closed due to lack of activity. If you think that
+ is incorrect, or the issue requires additional review, you can revive the issue at
+ any time.
+ # Issues with these labels will never be considered stale.
+ only-labels: 'wait for update'
+ stale-issue-label: 'no response'
+ ascending: true
diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml
index 07a1807f811e..9e2befc9ac1f 100644
--- a/.github/workflows/code-lint.yml
+++ b/.github/workflows/code-lint.yml
@@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- - uses: actions/checkout@v3.2.0
+ - uses: actions/checkout@v4
- name: Install
run: |
. ./ci/common.sh
@@ -37,7 +37,7 @@ jobs:
timeout-minutes: 5
steps:
- name: Checkout code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
- name: Shellcheck code
run: |
diff --git a/.github/workflows/doc-lint.yml b/.github/workflows/doc-lint.yml
index 7a8b19904def..25e371162fbf 100644
--- a/.github/workflows/doc-lint.yml
+++ b/.github/workflows/doc-lint.yml
@@ -20,9 +20,9 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 1
steps:
- - uses: actions/checkout@v3.2.0
+ - uses: actions/checkout@v4
- name: 🚀 Use Node.js
- uses: actions/setup-node@v3.5.1
+ uses: actions/setup-node@v3.8.1
with:
node-version: "12.x"
- run: npm install -g markdownlint-cli@0.25.0
@@ -47,8 +47,10 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 1
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
+ with:
+ submodules: recursive
- name: Check Chinese copywriting
- uses: huacnlee/autocorrect-action@v2.6.2
+ uses: ./.github/actions/autocorrect
with:
- args: --lint --no-diff-bg-color docs/zh/latest/**.md
+ args: autocorrect --lint --no-diff-bg-color ./docs/zh/latest/
diff --git a/.github/workflows/fips.yml b/.github/workflows/fips.yml
index 5b2a004d72b2..2115b0a7b8c8 100644
--- a/.github/workflows/fips.yml
+++ b/.github/workflows/fips.yml
@@ -30,8 +30,9 @@ jobs:
# The RSA and SHA tests are fully covered by jwt-auth and hmac-auth plugin tests, while other plugins only repeat such tests.
- t/plugin/jwt-auth2.t t/plugin/jwt-auth.t t/plugin/hmac-auth.t
# all SSL related core tests are covered by below two lists.
- - t/admin/ssl* t/admin/schema.t t/admin/upstream.t t/config-center-yaml/ssl.t t/core/etcd-mtls.t t/core/config_etcd.t t/deployment/conf_server.t t/misc/patch.t
+ - t/admin/ssl* t/admin/schema.t t/admin/upstream.t t/config-center-yaml/ssl.t t/core/etcd-mtls.t t/core/config_etcd.t t/misc/patch.t
- t/node/grpc-proxy-unary.t t/node/upstream-keepalive-pool.t t/node/upstream-websocket.t t/node/client-mtls.t t/node/upstream-mtls.t t/pubsub/kafka.t t/router/radixtree-sni2.t t/router/multi-ssl-certs.t t/router/radixtree-sni.t t/stream-node/mtls.t t/stream-node/tls.t t/stream-node/upstream-tls.t t/stream-node/sni.t
+ - t/fips
runs-on: ${{ matrix.platform }}
timeout-minutes: 90
@@ -41,7 +42,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
@@ -62,13 +63,18 @@ jobs:
path: ~/openssl-3.0
key: ${{ runner.os }}-${{ env.cache-name }}-${{ matrix.os_name }}
+ - name: set openssl prefix
+ id: set_openssl_prefix
+ shell: bash
+ run: |
+ echo "openssl3_prefix=$HOME" >>$GITHUB_OUTPUT
+
- name: Toggle openssl compile
id: test_ssl_env
shell: bash
if: steps.cache-openssl.outputs.cache-hit != 'true'
run: |
echo "openssl3=yes" >>$GITHUB_OUTPUT
- echo "openssl3_prefix=$HOME" >>$GITHUB_OUTPUT
- name: Extract test type
shell: bash
@@ -78,6 +84,9 @@ jobs:
if [[ $test_dir =~ 't/plugin' ]]; then
echo "type=plugin" >>$GITHUB_OUTPUT
fi
+ if [[ $test_dir =~ 't/fips' ]]; then
+ echo "type=plugin" >>$GITHUB_OUTPUT
+ fi
if [[ $test_dir =~ 't/admin' ]]; then
echo "type=first" >>$GITHUB_OUTPUT
fi
@@ -92,6 +101,7 @@ jobs:
- name: Linux launch common services
run: |
make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
+ sudo ./ci/init-common-test-service.sh
- name: Cache images
id: cache-images
@@ -131,7 +141,7 @@ jobs:
- name: Linux Install
env:
COMPILE_OPENSSL3: ${{ steps.test_ssl_env.outputs.openssl3 }}
- OPENSSL3_PREFIX: ${{ steps.test_ssl_env.outputs.openssl3_prefix }}
+ OPENSSL3_PREFIX: ${{ steps.set_openssl_prefix.outputs.openssl3_prefix }}
USE_OPENSSL3: yes
run: |
sudo --preserve-env=OPENRESTY_VERSION \
diff --git a/.github/workflows/fuzzing-ci.yaml b/.github/workflows/fuzzing-ci.yaml
index ec3701532d77..4d313ebfeb1a 100644
--- a/.github/workflows/fuzzing-ci.yaml
+++ b/.github/workflows/fuzzing-ci.yaml
@@ -27,7 +27,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/gm-cron.yaml b/.github/workflows/gm-cron.yaml
index 669a21798be3..f0327540498e 100644
--- a/.github/workflows/gm-cron.yaml
+++ b/.github/workflows/gm-cron.yaml
@@ -20,7 +20,7 @@ jobs:
test_dir:
- t/plugin/[a-k]*
- t/plugin/[l-z]*
- - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/deployment t/discovery t/error_page t/misc
+ - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
- t/node t/pubsub t/router t/script t/stream-node t/utils t/wasm t/xds-library t/xrpc
runs-on: ${{ matrix.platform }}
@@ -33,7 +33,7 @@ jobs:
# scripts or a separate action?
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/gm.yml b/.github/workflows/gm.yml
index 06663006e942..297c746caebd 100644
--- a/.github/workflows/gm.yml
+++ b/.github/workflows/gm.yml
@@ -39,7 +39,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
@@ -72,6 +72,7 @@ jobs:
- name: Linux launch common services
run: |
make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
+ sudo ./ci/init-common-test-service.sh
- name: Linux Before install
env:
diff --git a/.github/workflows/kubernetes-ci.yml b/.github/workflows/kubernetes-ci.yml
index ea72fe57144c..dc8857739b85 100644
--- a/.github/workflows/kubernetes-ci.yml
+++ b/.github/workflows/kubernetes-ci.yml
@@ -28,7 +28,6 @@ jobs:
- ubuntu-20.04
os_name:
- linux_openresty
- - linux_openresty_1_19
runs-on: ${{ matrix.platform }}
timeout-minutes: 15
@@ -38,7 +37,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/license-checker.yml b/.github/workflows/license-checker.yml
index 2122e0db8fbc..830f1a1802e3 100644
--- a/.github/workflows/license-checker.yml
+++ b/.github/workflows/license-checker.yml
@@ -30,8 +30,8 @@ jobs:
timeout-minutes: 3
steps:
- - uses: actions/checkout@v3.2.0
+ - uses: actions/checkout@v4
- name: Check License Header
- uses: apache/skywalking-eyes@v0.4.0
+ uses: apache/skywalking-eyes@v0.5.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/link-check.yml b/.github/workflows/link-check.yml
new file mode 100644
index 000000000000..20b2f16ec94e
--- /dev/null
+++ b/.github/workflows/link-check.yml
@@ -0,0 +1,49 @@
+name: 'Link Checker'
+
+# **What it does**: Renders the content of every page and check all internal links.
+# **Why we have it**: To make sure all links connect correctly.
+# **Who does it impact**: Docs content.
+
+on:
+ workflow_dispatch:
+ push:
+ # branches: [master, 'release/**']
+ paths:
+ - '**/*.md'
+ - '**/link-check.yml'
+ pull_request:
+ branches: [master, "release/**"]
+ paths:
+ - '**/*.md'
+ - '**/link-check.yml'
+
+permissions:
+ contents: read
+ # Needed for the 'trilom/file-changes-action' action
+ pull-requests: read
+
+# This allows a subsequently queued workflow run to interrupt previous runs
+concurrency:
+ group: '${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}'
+ cancel-in-progress: true
+
+jobs:
+ check-links:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Get script
+ run: |
+ wget https://raw.githubusercontent.com/xuruidong/markdown-link-checker/main/link_checker.py
+
+ - name: Setup python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.9'
+
+ - name: Link check (critical, all files)
+ run: |
+ # python link_checker.py ./ --enable-external --ignore "http://apisix.iresty.com" "https://www.upyun.com" "https://github.com/apache/apisix/actions/workflows/build.yml/badge.svg" "https://httpbin.org/" "https://en.wikipedia.org/wiki/Cache"
+ python link_checker.py ./
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 66f898b4201b..aa8c2a11f574 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code.
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
- name: spell check
run: |
pip install codespell==2.1.0
@@ -30,10 +30,10 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
- name: Setup Nodejs env
- uses: actions/setup-node@v3.5.1
+ uses: actions/setup-node@v3.8.1
with:
node-version: '12'
diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml
index c054303ccd74..17cf28691a4a 100644
--- a/.github/workflows/performance.yml
+++ b/.github/workflows/performance.yml
@@ -18,7 +18,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/redhat-ci.yaml b/.github/workflows/redhat-ci.yaml
new file mode 100644
index 000000000000..9bd8d39e35aa
--- /dev/null
+++ b/.github/workflows/redhat-ci.yaml
@@ -0,0 +1,157 @@
+name: CI Redhat UBI - Daily
+
+on:
+ schedule:
+ - cron: "0 0 * * *"
+ pull_request:
+ branches: [master]
+ paths-ignore:
+ - 'docs/**'
+ - '**/*.md'
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref == 'refs/heads/master' && github.run_number || github.ref }}
+ cancel-in-progress: true
+
+permissions:
+ contents: read
+
+jobs:
+ test_apisix:
+ name: run ci on redhat ubi
+ runs-on: ubuntu-20.04
+ timeout-minutes: 90
+ strategy:
+ fail-fast: false
+ matrix:
+ test_dir:
+ - t/plugin/[a-k]*
+ - t/plugin/[l-z]*
+ - t/admin t/cli t/config-center-yaml t/control t/core t/debug t/discovery t/error_page t/misc
+ - t/node t/pubsub t/router t/script t/secret t/stream-node t/utils t/wasm t/xds-library
+
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v4
+ with:
+ submodules: recursive
+
+ - name: Cache deps
+ uses: actions/cache@v3
+ env:
+ cache-name: cache-deps
+ with:
+ path: deps
+ key: ${{ runner.os }}-${{ env.cache-name }}-ubi8.6-${{ hashFiles('rockspec/apisix-master-0.rockspec') }}
+
+ - name: Extract branch name
+ if: ${{ startsWith(github.ref, 'refs/heads/release/') }}
+ id: branch_env
+ shell: bash
+ run: |
+ echo "version=${GITHUB_REF##*/}" >>$GITHUB_OUTPUT
+
+ - name: Extract test type
+ shell: bash
+ id: test_env
+ run: |
+ test_dir="${{ matrix.test_dir }}"
+ if [[ $test_dir =~ 't/plugin' ]]; then
+ echo "type=plugin" >>$GITHUB_OUTPUT
+ fi
+ if [[ $test_dir =~ 't/admin ' ]]; then
+ echo "type=first" >>$GITHUB_OUTPUT
+ fi
+ if [[ $test_dir =~ ' t/xds-library' ]]; then
+ echo "type=last" >>$GITHUB_OUTPUT
+ fi
+
+ - name: Free disk space
+ run: |
+ bash ./ci/free_disk_space.sh
+
+ - name: Linux launch common services
+ run: |
+ make ci-env-up project_compose_ci=ci/pod/docker-compose.common.yml
+ sudo ./ci/init-common-test-service.sh
+
+ - name: Build rpm package
+ if: ${{ startsWith(github.ref, 'refs/heads/release/') }}
+ run: |
+ export VERSION=${{ steps.branch_env.outputs.version }}
+ sudo gem install --no-document fpm
+ git clone --depth 1 https://github.com/api7/apisix-build-tools.git
+
+ # move codes under build tool
+ mkdir ./apisix-build-tools/apisix
+ for dir in `ls|grep -v "^apisix-build-tools$"`;do cp -r $dir ./apisix-build-tools/apisix/;done
+
+ cd apisix-build-tools
+ make package type=rpm app=apisix version=${VERSION} checkout=release/${VERSION} image_base=ubi image_tag=8.6 local_code_path=./apisix
+ cd ..
+ rm -rf $(ls -1 --ignore=apisix-build-tools --ignore=t --ignore=utils --ignore=ci --ignore=Makefile --ignore=rockspec)
+
+ - name: Build xDS library
+ if: steps.test_env.outputs.type == 'last'
+ run: |
+ cd t/xds-library
+ go build -o libxds.so -buildmode=c-shared main.go export.go
+
+ - name: Run redhat docker and mapping apisix into container
+ env:
+ TEST_FILE_SUB_DIR: ${{ matrix.test_dir }}
+ run: |
+ docker run -itd -v ${{ github.workspace }}:/apisix --env TEST_FILE_SUB_DIR="$TEST_FILE_SUB_DIR" --name ubiInstance --net="host" --dns 8.8.8.8 --dns-search apache.org registry.access.redhat.com/ubi8/ubi:8.6 /bin/bash
+
+ - name: Cache images
+ id: cache-images
+ uses: actions/cache@v3
+ env:
+ cache-name: cache-apisix-docker-images
+ with:
+ path: docker-images-backup
+ key: ${{ runner.os }}-${{ env.cache-name }}-${{ steps.test_env.outputs.type }}-${{ hashFiles(format('./ci/pod/docker-compose.{0}.yml', steps.test_env.outputs.type )) }}
+
+ - if: ${{ steps.cache-images.outputs.cache-hit == 'true' }}
+ name: Load saved docker images
+ run: |
+ if [[ -f docker-images-backup/apisix-images.tar ]]; then
+ [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
+ docker load --input docker-images-backup/apisix-images.tar
+ rm docker-images-backup/apisix-images.tar
+ make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
+ echo "loaded docker images"
+ if [[ ${{ steps.test_env.outputs.type }} != first ]]; then
+ sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
+ fi
+ fi
+ - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
+ name: Linux launch services
+ run: |
+ [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh before
+ [[ ${{ steps.test_env.outputs.type }} == plugin ]] && ./ci/pod/openfunction/build-function-image.sh
+ make ci-env-up project_compose_ci=ci/pod/docker-compose.${{ steps.test_env.outputs.type }}.yml
+ [[ ${{ steps.test_env.outputs.type }} != first ]] && sudo ./ci/init-${{ steps.test_env.outputs.type }}-test-service.sh after
+ echo "Linux launch services, done."
+
+ - name: Install dependencies
+ run: |
+ docker exec ubiInstance bash -c "cd apisix && chmod +x ./ci/redhat-ci.sh && ./ci/redhat-ci.sh install_dependencies"
+
+ - name: Install rpm package
+ if: ${{ startsWith(github.ref, 'refs/heads/release/') }}
+ run: |
+ docker exec ubiInstance bash -c "cd apisix && rpm -iv --prefix=/apisix ./apisix-build-tools/output/apisix-${{ steps.branch_env.outputs.version }}-0.ubi8.6.x86_64.rpm"
+ # Dependencies are attached with rpm, so revert `make deps`
+ docker exec ubiInstance bash -c "cd apisix && rm -rf deps"
+ docker exec ubiInstance bash -c "cd apisix && mv usr/bin . && mv usr/local/apisix/* ."
+
+ - name: Run test cases
+ run: |
+ docker exec ubiInstance bash -c "cd apisix && chmod +x ./ci/redhat-ci.sh && ./ci/redhat-ci.sh run_case"
+
+ - if: ${{ steps.cache-images.outputs.cache-hit != 'true' }}
+ name: Save docker images
+ run: |
+ echo "start backing up, $(date)"
+ bash ./ci/backup-docker-images.sh ${{ steps.test_env.outputs.type }}
+ echo "backup done, $(date)"
diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml
index a2b606667fad..85df2c0816f7 100644
--- a/.github/workflows/semantic.yml
+++ b/.github/workflows/semantic.yml
@@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out repository code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/action-semantic-pull-request
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index 4f751e7e962f..3bd686e6f124 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -19,7 +19,7 @@ jobs:
steps:
- name: Prune Stale
- uses: actions/stale@v7
+ uses: actions/stale@v8
with:
days-before-issue-stale: 350
days-before-issue-close: 14
diff --git a/.github/workflows/tars-ci.yml b/.github/workflows/tars-ci.yml
index aa4c1b6e45af..9e1c9fa2963e 100644
--- a/.github/workflows/tars-ci.yml
+++ b/.github/workflows/tars-ci.yml
@@ -28,7 +28,6 @@ jobs:
- ubuntu-20.04
os_name:
- linux_openresty
- - linux_openresty_1_19
runs-on: ${{ matrix.platform }}
timeout-minutes: 15
@@ -38,7 +37,7 @@ jobs:
steps:
- name: Check out code
- uses: actions/checkout@v3.2.0
+ uses: actions/checkout@v4
with:
submodules: recursive
diff --git a/.github/workflows/update-labels.yml b/.github/workflows/update-labels.yml
new file mode 100644
index 000000000000..80919aefb841
--- /dev/null
+++ b/.github/workflows/update-labels.yml
@@ -0,0 +1,62 @@
+name: Update labels when user responds in issue and pr
+permissions:
+ issues: write
+ pull-requests: write
+
+on:
+ issue_comment:
+ types: [created]
+ pull_request_review_comment:
+ types: [created]
+
+jobs:
+ issue_commented:
+ if: github.event.issue && !github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && contains(github.event.issue.labels.*.name, 'wait for update') && !contains(github.event.issue.labels.*.name, 'user responded')
+ runs-on: ubuntu-latest
+ steps:
+ - name: update labels when user responds
+ uses: actions/github-script@v6
+ with:
+ script: |
+ github.rest.issues.addLabels({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ labels: ["user responded"]
+ })
+ github.rest.issues.removeLabel({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ name: "wait for update"
+ })
+
+ pr_commented:
+ if: github.event.issue && github.event.issue.pull_request && github.event.comment.user.login == github.event.issue.user.login && (contains(github.event.issue.labels.*.name, 'wait for update') || contains(github.event.issue.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.issue.labels.*.name, 'user responded')
+ runs-on: ubuntu-latest
+ steps:
+ - name: update label when user responds
+ uses: actions/github-script@v6
+ with:
+ script: |
+ github.rest.issues.addLabels({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ labels: ["user responded"]
+ })
+
+ pr_review_commented:
+ if: github.event.pull_request && github.event.comment.user.login == github.event.pull_request.user.login && (contains(github.event.pull_request.labels.*.name, 'wait for update') || contains(github.event.pull_request.labels.*.name, 'discuss') || contains(github.event.issue.labels.*.name, 'need test cases')) && !contains(github.event.pull_request.labels.*.name, 'user responded')
+ runs-on: ubuntu-latest
+ steps:
+ - name: update label when user responds
+ uses: actions/github-script@v6
+ with:
+ script: |
+ github.rest.issues.addLabels({
+ issue_number: context.issue.number,
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ labels: ["user responded"]
+ })
diff --git a/.gitmodules b/.gitmodules
index 3c8ed44e4c56..981985528a80 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -4,3 +4,6 @@
[submodule ".github/actions/action-semantic-pull-request"]
path = .github/actions/action-semantic-pull-request
url = https://github.com/amannn/action-semantic-pull-request.git
+[submodule ".github/actions/autocorrect"]
+ path = .github/actions/autocorrect
+ url = https://github.com/huacnlee/autocorrect.git
diff --git a/.licenserc.yaml b/.licenserc.yaml
index 315fa71bcf42..8b423f25cdd1 100644
--- a/.licenserc.yaml
+++ b/.licenserc.yaml
@@ -23,6 +23,8 @@ header:
paths-ignore:
- '.gitignore'
+ - '.gitattributes'
+ - '.gitmodules'
- 'LICENSE'
- 'NOTICE'
- '**/*.json'
@@ -46,7 +48,11 @@ header:
# Exclude plugin-specific configuration files
- 't/plugin/authz-casbin'
- 't/coredns'
+ - 't/fuzzing/requirements.txt'
+ - 't/perf/requirements.txt'
- 'autodocs/'
- 'docs/**/*.md'
+ - '.ignore_words'
+ - '.luacheckrc'
comment: on-failure
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0097b2ef93cf..92bd8498c313 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -23,6 +23,10 @@ title: Changelog
## Table of Contents
+- [3.5.0](#350)
+- [3.4.0](#340)
+- [3.3.0](#330)
+- [3.2.1](#321)
- [3.2.0](#320)
- [3.1.0](#310)
- [3.0.0](#300)
@@ -68,6 +72,117 @@ title: Changelog
- [0.7.0](#070)
- [0.6.0](#060)
+## 3.5.0
+
+### Change
+
+- :warning: remove snowflake algorithm in the request-id plugin: [#9715](https://github.com/apache/apisix/pull/9715)
+- :warning: No longer compatible with OpenResty 1.19, it needs to be upgraded to 1.21+: [#9913](https://github.com/apache/apisix/pull/9913)
+- :warning: Remove the configuration item `apisix.stream_proxy.only`, the L4/L7 proxy needs to be enabled through the configuration item `apisix.proxy_mode`: [#9607](https://github.com/apache/apisix/pull/9607)
+- :warning: The admin-api `/apisix/admin/plugins?all=true` marked as deprecated: [#9580](https://github.com/apache/apisix/pull/9580)
+- :warning: allowlist and denylist can't be enabled at the same time in ua-restriction plugin: [#9841](https://github.com/apache/apisix/pull/9841)
+
+### Core
+
+- :sunrise: Support host level dynamic setting of tls protocol version: [#9903](https://github.com/apache/apisix/pull/9903)
+- :sunrise: Support force delete resource: [#9810](https://github.com/apache/apisix/pull/9810)
+- :sunrise: Support pulling env vars from yaml keys: [#9855](https://github.com/apache/apisix/pull/9855)
+- :sunrise: Add schema validate API in admin-api: [#10065](https://github.com/apache/apisix/pull/10065)
+
+### Plugins
+
+- :sunrise: Add chaitin-waf plugin: [#9838](https://github.com/apache/apisix/pull/9838)
+- :sunrise: Support vars for file-logger plugin: [#9712](https://github.com/apache/apisix/pull/9712)
+- :sunrise: Support adding response headers for mock plugin: [#9720](https://github.com/apache/apisix/pull/9720)
+- :sunrise: Support regex_uri with unsafe_uri for proxy-rewrite plugin: [#9813](https://github.com/apache/apisix/pull/9813)
+- :sunrise: Support set client_email field for google-cloud-logging plugin: [#9813](https://github.com/apache/apisix/pull/9813)
+- :sunrise: Support sending headers upstream returned by OPA server for opa plugin: [#9710](https://github.com/apache/apisix/pull/9710)
+- :sunrise: Support configuring proxy server for openid-connect plugin: [#9948](https://github.com/apache/apisix/pull/9948)
+
+### Bugfixes
+
+- Fix(log-rotate): the max_kept configuration doesn't work when using custom name: [#9749](https://github.com/apache/apisix/pull/9749)
+- Fix(limit_conn): do not use the http variable in stream mode: [#9816](https://github.com/apache/apisix/pull/9816)
+- Fix(loki-logger): getting an error with log_labels: [#9850](https://github.com/apache/apisix/pull/9850)
+- Fix(limit-count): X-RateLimit-Reset shouldn't be set to 0 after request be rejected: [#9978](https://github.com/apache/apisix/pull/9978)
+- Fix(nacos): attempt to index upvalue 'applications' (a nil value): [#9960](https://github.com/apache/apisix/pull/9960)
+- Fix(etcd): can't sync etcd data if key has special character: [#9967](https://github.com/apache/apisix/pull/9967)
+- Fix(tencent-cloud-cls): dns parsing failure: [#9843](https://github.com/apache/apisix/pull/9843)
+- Fix(reload): worker not exited when executing quit or reload command [#9909](https://github.com/apache/apisix/pull/9909)
+- Fix(traffic-split): upstream_id validity verification [#10008](https://github.com/apache/apisix/pull/10008)
+
+## 3.4.0
+
+### Core
+
+- :sunrise: Support route-level MTLS [#9322](https://github.com/apache/apisix/pull/9322)
+- :sunrise: Support id schema for global_rules [#9517](https://github.com/apache/apisix/pull/9517)
+- :sunrise: Support use a single long http connection to watch all resources for etcd [#9456](https://github.com/apache/apisix/pull/9456)
+- :sunrise: Support max len 256 for ssl label [#9301](https://github.com/apache/apisix/pull/9301)
+
+### Plugins
+
+- :sunrise: Support multiple regex pattern matching for proxy_rewrite plugin [#9194](https://github.com/apache/apisix/pull/9194)
+- :sunrise: Add loki-logger plugin [#9399](https://github.com/apache/apisix/pull/9399)
+- :sunrise: Allow user configure DEFAULT_BUCKETS for prometheus plugin [#9673](https://github.com/apache/apisix/pull/9673)
+
+### Bugfixes
+
+- Fix(body-transformer): xml2lua: replace empty table with empty string [#9669](https://github.com/apache/apisix/pull/9669)
+- Fix: opentelemetry and grpc-transcode plugins cannot work together [#9606](https://github.com/apache/apisix/pull/9606)
+- Fix(skywalking-logger, error-log-logger): support $hostname in skywalking service_instance_name [#9401](https://github.com/apache/apisix/pull/9401)
+- Fix(admin): fix secrets do not support to update attributes by PATCH [#9510](https://github.com/apache/apisix/pull/9510)
+- Fix(http-logger): default request path should be '/' [#9472](https://github.com/apache/apisix/pull/9472)
+- Fix: syslog plugin doesn't work [#9425](https://github.com/apache/apisix/pull/9425)
+- Fix: wrong log format for splunk-hec-logging [#9478](https://github.com/apache/apisix/pull/9478)
+- Fix(etcd): reuse cli and enable keepalive [#9420](https://github.com/apache/apisix/pull/9420)
+- Fix: upstream key config add mqtt_client_id support [#9450](https://github.com/apache/apisix/pull/9450)
+- Fix: body-transformer plugin return raw body anytime [#9446](https://github.com/apache/apisix/pull/9446)
+- Fix(wolf-rbac): other plugin in consumer not effective when consumer used wolf-rbac plugin [#9298](https://github.com/apache/apisix/pull/9298)
+- Fix: always parse domain when host is domain name [#9332](https://github.com/apache/apisix/pull/9332)
+- Fix: response-rewrite plugin can't add only one character [#9372](https://github.com/apache/apisix/pull/9372)
+- Fix(consul): support to fetch only health endpoint [#9204](https://github.com/apache/apisix/pull/9204)
+
+## 3.3.0
+
+**The changes marked with :warning: are not backward compatible.**
+
+### Change
+
+- :warning: Change the default router from `radixtree_uri` to `radixtree_host_uri`: [#9047](https://github.com/apache/apisix/pull/9047)
+- :warning: CORS plugin will add `Vary: Origin` header when `allow_origin` is not `*`: [#9010](https://github.com/apache/apisix/pull/9010)
+
+### Core
+
+- :sunrise: Support store route's cert in secrets manager: [#9247](https://github.com/apache/apisix/pull/9247)
+- :sunrise: Support bypassing Admin API Auth by configuration: [#9147](https://github.com/apache/apisix/pull/9147)
+
+### Plugins
+
+- :sunrise: Support header injection for `fault-injection` plugin: [#9039](https://github.com/apache/apisix/pull/9039)
+- :sunrise: Support variable when rewrite header in `proxy-rewrite` plugin: [#9112](https://github.com/apache/apisix/pull/9112)
+- :sunrise: `limit-count` plugin supports `username` and `ssl` for redis policy: [#9185](https://github.com/apache/apisix/pull/9185)
+
+### Bugfixes
+
+- Fix etcd data sync exception: [#8493](https://github.com/apache/apisix/pull/8493)
+- Fix invalidate cache in `core.request.add_header` and fix some calls: [#8824](https://github.com/apache/apisix/pull/8824)
+- Fix the high CPU and memory usage cause by healthcheck impl: [#9015](https://github.com/apache/apisix/pull/9015)
+- Consider using `allow_origins_by_regex` only when it is not `nil`: [#9028](https://github.com/apache/apisix/pull/9028)
+- Check upstream reference in `traffic-split` plugin when delete upstream: [#9044](https://github.com/apache/apisix/pull/9044)
+- Fix failing to connect to etcd at startup: [#9077](https://github.com/apache/apisix/pull/9077)
+- Fix health checker leak for domain nodes: [#9090](https://github.com/apache/apisix/pull/9090)
+- Prevent non `127.0.0.0/24` to access admin api with empty admin_key: [#9146](https://github.com/apache/apisix/pull/9146)
+- Ensure `hold_body_chunk` should use separate buffer for each plugin in case of pollution: [#9266](https://github.com/apache/apisix/pull/9266)
+- Ensure `batch-requests` plugin read trailer headers if existed: [#9289](https://github.com/apache/apisix/pull/9289)
+- Ensure `proxy-rewrite` should set `ngx.var.uri`: [#9309](https://github.com/apache/apisix/pull/9309)
+
+## 3.2.1
+
+**This is an LTS maintenance release and you can see the CHANGELOG in `release/3.2` branch.**
+
+[https://github.com/apache/apisix/blob/release/3.2/CHANGELOG.md#321](https://github.com/apache/apisix/blob/release/3.2/CHANGELOG.md#321)
+
## 3.2.0
### Change
@@ -298,7 +413,7 @@ Returns multiple configurations:
- Port of Admin API changed to 9180: [#7806](https://github.com/apache/apisix/pull/7806)
- We only support OpenResty 1.19.3.2 and above: [#7625](https://github.com/apache/apisix/pull/7625)
-- Adjusted the priority of the Plugin Config object so that the priority of a plugin configuration with the same name changes from Consumer > Plugin Config > Route > Service to Consumer > Route > Plugin Config > Service: [#7614](https://github. com/apache/apisix/pull/7614)
+- Adjusted the priority of the Plugin Config object so that the priority of a plugin configuration with the same name changes from Consumer > Plugin Config > Route > Service to Consumer > Route > Plugin Config > Service: [#7614](https://github.com/apache/apisix/pull/7614)
### Core
@@ -1290,7 +1405,7 @@ and support for ARM platform, and proxy rewrite plugin.
### Core
-- :sunrise: **[support stand-alone mode](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/stand-alone-cn.md)**: using yaml to update configurations of APISIX, more friendly to kubernetes. [#464](https://github.com/apache/incubator-apisix/pull/464)
+- :sunrise: **[support standalone mode](https://github.com/apache/apisix/blob/master/docs/en/latest/deployment-modes.md#standalone)**: using yaml to update configurations of APISIX, more friendly to kubernetes. [#464](https://github.com/apache/incubator-apisix/pull/464)
- :sunrise: **[support stream proxy](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/stream-proxy.md)**. [#513](https://github.com/apache/incubator-apisix/pull/513)
- :sunrise: support consumer bind plugins. [#544](https://github.com/apache/incubator-apisix/pull/544)
- support domain name in upstream, not only IP. [#522](https://github.com/apache/incubator-apisix/pull/522)
@@ -1332,7 +1447,7 @@ This release brings many new features, such as IP black and white list, gPRC pro
### Core
-- :sunrise: **[gRPC transcoding](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/grpc-transcoding.md)**: supports protocol transcoding so that clients can access your gRPC API by using HTTP/JSON. [#395](https://github.com/apache/incubator-apisix/issues/395)
+- :sunrise: **[gRPC transcoding](https://github.com/apache/apisix/blob/master/docs/en/latest/plugins/grpc-transcode.md)**: supports protocol transcoding so that clients can access your gRPC API by using HTTP/JSON. [#395](https://github.com/apache/incubator-apisix/issues/395)
- :sunrise: **[radix tree router](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/router-radixtree.md)**: The radix tree is used as the default router implementation. It supports the uri, host, cookie, request header, request parameters, Nginx built-in variables, etc. as the routing conditions, and supports common operators such as equal, greater than, less than, etc., more powerful and flexible.**IMPORTANT: This change is not downward compatible. All users who use historical versions need to manually modify their routing to work properly.** [#414](https://github.com/apache/incubator-apisix/issues/414)
- Dynamic upstream supports more parameters, you can specify the upstream uri and host, and whether to enable websocket. [#451](https://github.com/apache/incubator-apisix/pull/451)
- Support for get values from cookies directly from `ctx.var`. [#449](https://github.com/apache/incubator-apisix/pull/449)
@@ -1378,7 +1493,7 @@ This release brings many new features such as health check and circuit breaker,
### Plugins
- :sunrise: **[OpenTracing](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/zipkin.md)**: support Zipkin and Apache SkyWalking. [#304](https://github.com/apache/incubator-apisix/pull/304)
-- [JWT auth](https://github.com/apache/incubator-apisix/blob/master/docs/en/latest/plugins/jwt-auth-cn.md). [#303](https://github.com/apache/incubator-apisix/pull/303)
+- [JWT auth](https://github.com/apache/apisix/blob/master/docs/en/latest/plugins/jwt-auth.md). [#303](https://github.com/apache/incubator-apisix/pull/303)
### CLI
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 7a96ea5db17f..fe93188bc703 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -65,7 +65,7 @@ We strive to:
* Repeated harassment of others. In general, if someone asks you to stop, then stop.
* Advocating for, or encouraging, any of the above behaviour.
-6. __Be concise.__ Keep in mind that what you write once will be read by hundreds of persons. Writing a short email means people can understand the conversation as efficiently as possible. Short emails should always strive to be empathetic, welcoming, friendly and patient. When a long explanation is necessary, consider adding a summary.
+6. __Be concise.__ Keep in mind that what you write once will be read by hundreds of people. Writing a short email means people can understand the conversation as efficiently as possible. Short emails should always strive to be empathetic, welcoming, friendly and patient. When a long explanation is necessary, consider adding a summary.
Try to bring new ideas to a conversation so that each mail adds something unique to the thread, keeping in mind that the rest of the thread still contains the other messages with arguments that have already been made.
diff --git a/CODE_STYLE.md b/CODE_STYLE.md
index 9b25f552cf90..f19bf355aeb8 100644
--- a/CODE_STYLE.md
+++ b/CODE_STYLE.md
@@ -322,7 +322,7 @@ end
The function should return ``, `err`.
The first return value means successful or not, if not, the second return value specifies the error message.
-The error message can be ignored in some case.
+The error message can be ignored in some cases.
```lua
--No
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 7f8434e63460..872e2a59b87a 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -44,7 +44,7 @@ Once we've discussed your changes and you've got your code ready, make sure that
* Includes tests for new functionality.
* References the original issue in the description, e.g. "Resolves #123".
* Has a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
-* Ensure your pull request's title starts from one of the word in the `types` section of [semantic.yml](https://github.com/apache/apisix/blob/master/.github/semantic.yml).
+* Ensure your pull request's title starts from one of the word in the `types` section of [semantic.yml](https://github.com/apache/apisix/blob/master/.github/workflows/semantic.yml).
* Follow the [PR manners](https://raw.githubusercontent.com/apache/apisix/master/.github/PULL_REQUEST_TEMPLATE.md)
## Contribution Guidelines for Documentation
diff --git a/Makefile b/Makefile
index abe654407bc7..f3cc375e061e 100644
--- a/Makefile
+++ b/Makefile
@@ -26,6 +26,7 @@ VERSION ?= master
project_name ?= apache-apisix
project_release_name ?= $(project_name)-$(VERSION)-src
+OTEL_CONFIG ?= ./ci/pod/otelcol-contrib/data-otlp.json
# Hyperconverged Infrastructure
ENV_OS_NAME ?= $(shell uname -s | tr '[:upper:]' '[:lower:]')
@@ -80,6 +81,9 @@ ifeq ($(ENV_OS_NAME), darwin)
ifeq ($(shell test -d $(ENV_HOMEBREW_PREFIX)/opt/openresty-openssl111 && echo -n yes), yes)
ENV_OPENSSL_PREFIX := $(ENV_HOMEBREW_PREFIX)/opt/openresty-openssl111
endif
+ ifeq ($(shell test -d $(ENV_HOMEBREW_PREFIX)/opt/pcre && echo -n yes), yes)
+ ENV_PCRE_PREFIX := $(ENV_HOMEBREW_PREFIX)/opt/pcre
+ endif
endif
@@ -145,7 +149,7 @@ help:
@echo
-### deps : Installation dependencies
+### deps : Installing dependencies
.PHONY: deps
deps: runtime
$(eval ENV_LUAROCKS_VER := $(shell $(ENV_LUAROCKS) --version | grep -E -o "luarocks [0-9]+."))
@@ -153,14 +157,15 @@ deps: runtime
mkdir -p ~/.luarocks; \
$(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.OPENSSL_LIBDIR $(addprefix $(ENV_OPENSSL_PREFIX), /lib); \
$(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.OPENSSL_INCDIR $(addprefix $(ENV_OPENSSL_PREFIX), /include); \
- $(ENV_LUAROCKS) install rockspec/apisix-master-0.rockspec --tree=deps --only-deps --local $(ENV_LUAROCKS_SERVER_OPT); \
+ [ '$(ENV_OS_NAME)' == 'darwin' ] && $(ENV_LUAROCKS) config $(ENV_LUAROCKS_FLAG_LOCAL) variables.PCRE_INCDIR $(addprefix $(ENV_PCRE_PREFIX), /include); \
+ $(ENV_LUAROCKS) install rockspec/apisix-master-0.rockspec --tree deps --only-deps $(ENV_LUAROCKS_SERVER_OPT); \
else \
$(call func_echo_warn_status, "WARNING: You're not using LuaRocks 3.x; please remove the luarocks and reinstall it via https://raw.githubusercontent.com/apache/apisix/master/utils/linux-install-luarocks.sh"); \
exit 1; \
fi
-### undeps : Uninstallation dependencies
+### undeps : Uninstalling dependencies
.PHONY: undeps
undeps:
@$(call func_echo_status, "$@ -> [ Start ]")
@@ -337,9 +342,6 @@ install: runtime
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/serverless
$(ENV_INSTALL) apisix/plugins/serverless/*.lua $(ENV_INST_LUADIR)/apisix/plugins/serverless/
- $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/slslog
- $(ENV_INSTALL) apisix/plugins/slslog/*.lua $(ENV_INST_LUADIR)/apisix/plugins/slslog/
-
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/plugins/syslog
$(ENV_INSTALL) apisix/plugins/syslog/*.lua $(ENV_INST_LUADIR)/apisix/plugins/syslog/
@@ -373,6 +375,9 @@ install: runtime
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis
$(ENV_INSTALL) apisix/stream/xrpc/protocols/redis/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/redis/
+ $(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo
+ $(ENV_INSTALL) apisix/stream/xrpc/protocols/dubbo/*.lua $(ENV_INST_LUADIR)/apisix/stream/xrpc/protocols/dubbo/
+
$(ENV_INSTALL) -d $(ENV_INST_LUADIR)/apisix/utils
$(ENV_INSTALL) apisix/utils/*.lua $(ENV_INST_LUADIR)/apisix/utils/
@@ -441,6 +446,8 @@ compress-tar:
.PHONY: ci-env-up
ci-env-up:
@$(call func_echo_status, "$@ -> [ Start ]")
+ touch $(OTEL_CONFIG)
+ chmod 777 $(OTEL_CONFIG)
$(ENV_DOCKER_COMPOSE) up -d
@$(call func_echo_success_status, "$@ -> [ Done ]")
@@ -465,5 +472,6 @@ ci-env-rebuild:
.PHONY: ci-env-down
ci-env-down:
@$(call func_echo_status, "$@ -> [ Start ]")
+ rm $(OTEL_CONFIG)
$(ENV_DOCKER_COMPOSE) down
@$(call func_echo_success_status, "$@ -> [ Done ]")
diff --git a/README.md b/README.md
index b3dbfdfe5d46..9d304c5fed92 100644
--- a/README.md
+++ b/README.md
@@ -123,14 +123,14 @@ A/B testing, canary release, blue-green deployment, limit rate, defense against
- Zipkin tracing: [Zipkin](docs/en/latest/plugins/zipkin.md)
- Open source APM: support [Apache SkyWalking](docs/en/latest/plugins/skywalking.md)
- - Works with external service discovery: In addition to the built-in etcd, it also supports [Consul](docs/en/latest/discovery/consul_kv.md), [Nacos](docs/en/latest/discovery/nacos.md), [Eureka](docs/en/latest/discovery/eureka.md) and [Zookeeper (CP)](docs/en/latest/discovery/zookeeper.md).
+ - Works with external service discovery: In addition to the built-in etcd, it also supports [Consul](docs/en/latest/discovery/consul_kv.md), [Nacos](docs/en/latest/discovery/nacos.md), [Eureka](docs/en/latest/discovery/eureka.md) and [Zookeeper (CP)](https://github.com/api7/apisix-seed/blob/main/docs/en/latest/zookeeper.md).
- Monitoring And Metrics: [Prometheus](docs/en/latest/plugins/prometheus.md)
- Clustering: APISIX nodes are stateless, creates clustering of the configuration center, please refer to [etcd Clustering Guide](https://etcd.io/docs/v3.5/op-guide/clustering/).
- High availability: Support to configure multiple etcd addresses in the same cluster.
- [Dashboard](https://github.com/apache/apisix-dashboard)
- Version Control: Supports rollbacks of operations.
- CLI: start\stop\reload APISIX through the command line.
- - [Stand-Alone](docs/en/latest/deployment-modes.md#stand-alone): Supports to load route rules from local YAML file, it is more friendly such as under the kubernetes(k8s).
+ - [Standalone](docs/en/latest/deployment-modes.md#standalone): Supports to load route rules from local YAML file, it is more friendly such as under the kubernetes(k8s).
- [Global Rule](docs/en/latest/terminology/global-rule.md): Allows to run any plugin for all request, eg: limit rate, IP filter etc.
- High performance: The single-core QPS reaches 18k with an average delay of fewer than 0.2 milliseconds.
- [Fault Injection](docs/en/latest/plugins/fault-injection.md)
diff --git a/THREAT_MODEL.md b/THREAT_MODEL.md
index e537d1a469c8..c10560c4d4aa 100644
--- a/THREAT_MODEL.md
+++ b/THREAT_MODEL.md
@@ -57,4 +57,4 @@ We should keep security in mind, and validate the input from the client before u
As the maintainer:
We should keep security in mind, and review the code line by line.
-We are open to the discussion from the security researchers.
+We are open to discussion from the security researchers.
diff --git a/apisix/admin/init.lua b/apisix/admin/init.lua
index 072a584350ee..333c798e6ada 100644
--- a/apisix/admin/init.lua
+++ b/apisix/admin/init.lua
@@ -16,6 +16,7 @@
--
local require = require
local core = require("apisix.core")
+local get_uri_args = ngx.req.get_uri_args
local route = require("apisix.utils.router")
local plugin = require("apisix.plugin")
local v3_adapter = require("apisix.admin.v3_adapter")
@@ -56,8 +57,8 @@ local resources = {
stream_routes = require("apisix.admin.stream_routes"),
plugin_metadata = require("apisix.admin.plugin_metadata"),
plugin_configs = require("apisix.admin.plugin_config"),
- consumer_groups = require("apisix.admin.consumer_group"),
- secrets = require("apisix.admin.secrets"),
+ consumer_groups = require("apisix.admin.consumer_group"),
+ secrets = require("apisix.admin.secrets"),
}
@@ -67,6 +68,12 @@ local router
local function check_token(ctx)
local local_conf = core.config.local_conf()
+
+ -- check if admin_key is required
+ if local_conf.deployment.admin.admin_key_required == false then
+ return true
+ end
+
local admin_key = core.table.try_read_attr(local_conf, "deployment", "admin", "admin_key")
if not admin_key then
return true
@@ -98,6 +105,22 @@ local function check_token(ctx)
return true
end
+-- Set the `apictx` variable and check admin api token, if the check fails, the current
+-- request will be interrupted and an error response will be returned.
+--
+-- NOTE: This is a higher wrapper for `check_token` function.
+local function set_ctx_and_check_token()
+ local api_ctx = {}
+ core.ctx.set_vars_meta(api_ctx)
+ ngx.ctx.api_ctx = api_ctx
+
+ local ok, err = check_token(api_ctx)
+ if not ok then
+ core.log.warn("failed to check token: ", err)
+ core.response.exit(401, { error_msg = "failed to check token" })
+ end
+end
+
local function strip_etcd_resp(data)
if type(data) == "table"
@@ -136,15 +159,7 @@ end
local function run()
- local api_ctx = {}
- core.ctx.set_vars_meta(api_ctx)
- ngx.ctx.api_ctx = api_ctx
-
- local ok, err = check_token(api_ctx)
- if not ok then
- core.log.warn("failed to check token: ", err)
- core.response.exit(401, {error_msg = "failed to check token"})
- end
+ set_ctx_and_check_token()
local uri_segs = core.utils.split_uri(ngx.var.uri)
core.log.info("uri: ", core.json.delay_encode(uri_segs))
@@ -160,7 +175,8 @@ local function run()
if seg_res == "stream_routes" then
local local_conf = core.config.local_conf()
- if not local_conf.apisix.stream_proxy then
+ if local_conf.apisix.proxy_mode ~= "stream" and
+ local_conf.apisix.proxy_mode ~= "http&stream" then
core.log.warn("stream mode is disabled, can not add any stream ",
"routes")
core.response.exit(400, {error_msg = "stream mode is disabled, " ..
@@ -170,7 +186,7 @@ local function run()
local resource = resources[seg_res]
if not resource then
- core.response.exit(404, {error_msg = "not found"})
+ core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res})
end
local method = str_lower(get_method())
@@ -238,31 +254,32 @@ end
local function get_plugins_list()
- local api_ctx = {}
- core.ctx.set_vars_meta(api_ctx)
- ngx.ctx.api_ctx = api_ctx
-
- local ok, err = check_token(api_ctx)
- if not ok then
- core.log.warn("failed to check token: ", err)
- core.response.exit(401, {error_msg = "failed to check token"})
+ set_ctx_and_check_token()
+ local args = get_uri_args()
+ local subsystem = args["subsystem"]
+ -- If subsystem is passed then it should be either http or stream.
+ -- If it is not passed/nil then http will be default.
+ subsystem = subsystem or "http"
+ if subsystem == "http" or subsystem == "stream" then
+ local plugins = resources.plugins.get_plugins_list(subsystem)
+ core.response.exit(200, plugins)
end
+ core.response.exit(400,"invalid subsystem passed")
+end
- local plugins = resources.plugins.get_plugins_list()
- core.response.exit(200, plugins)
+-- Handle unsupported request methods for the virtual "reload" plugin
+local function unsupported_methods_reload_plugin()
+ set_ctx_and_check_token()
+
+ core.response.exit(405, {
+ error_msg = "please use PUT method to reload the plugins, "
+ .. get_method() .. " method is not allowed."
+ })
end
local function post_reload_plugins()
- local api_ctx = {}
- core.ctx.set_vars_meta(api_ctx)
- ngx.ctx.api_ctx = api_ctx
-
- local ok, err = check_token(api_ctx)
- if not ok then
- core.log.warn("failed to check token: ", err)
- core.response.exit(401, {error_msg = "failed to check token"})
- end
+ set_ctx_and_check_token()
local success, err = events.post(reload_event, get_method(), ngx_time())
if not success then
@@ -359,6 +376,41 @@ local function reload_plugins(data, event, source, pid)
end
+local function schema_validate()
+ local uri_segs = core.utils.split_uri(ngx.var.uri)
+ core.log.info("uri: ", core.json.delay_encode(uri_segs))
+
+ local seg_res = uri_segs[6]
+ local resource = resources[seg_res]
+ if not resource then
+ core.response.exit(404, {error_msg = "Unsupported resource type: ".. seg_res})
+ end
+
+ local req_body, err = core.request.get_body(MAX_REQ_BODY)
+ if err then
+ core.log.error("failed to read request body: ", err)
+ core.response.exit(400, {error_msg = "invalid request body: " .. err})
+ end
+
+ if req_body then
+ local data, err = core.json.decode(req_body)
+ if err then
+ core.log.error("invalid request body: ", req_body, " err: ", err)
+ core.response.exit(400, {error_msg = "invalid request body: " .. err,
+ req_body = req_body})
+ end
+
+ req_body = data
+ end
+
+ local ok, err = core.schema.check(resource.schema, req_body)
+ if ok then
+ core.response.exit(200)
+ end
+ core.response.exit(400, {error_msg = err})
+end
+
+
local uri_route = {
{
paths = [[/apisix/admin]],
@@ -375,11 +427,22 @@ local uri_route = {
methods = {"GET"},
handler = get_plugins_list,
},
+ {
+ paths = [[/apisix/admin/schema/validate/*]],
+ methods = {"POST"},
+ handler = schema_validate,
+ },
{
paths = reload_event,
methods = {"PUT"},
handler = post_reload_plugins,
},
+ -- Handle methods other than "PUT" on "/plugin/reload" to inform user
+ {
+ paths = reload_event,
+ methods = { "GET", "POST", "DELETE", "PATCH" },
+ handler = unsupported_methods_reload_plugin,
+ },
}
@@ -395,6 +458,13 @@ function _M.init_worker()
events.register(reload_plugins, reload_event, "PUT")
if ngx_worker_id() == 0 then
+ -- check if admin_key is required
+ if local_conf.deployment.admin.admin_key_required == false then
+ core.log.warn("Admin key is bypassed! ",
+ "If you are deploying APISIX in a production environment, ",
+ "please disable `admin_key_required` and set a secure admin key!")
+ end
+
local ok, err = ngx_timer_at(0, function(premature)
if premature then
return
diff --git a/apisix/admin/plugins.lua b/apisix/admin/plugins.lua
index 26dcab719332..201f8f3c998e 100644
--- a/apisix/admin/plugins.lua
+++ b/apisix/admin/plugins.lua
@@ -18,11 +18,12 @@ local require = require
local core = require("apisix.core")
local check_schema = require("apisix.plugin").check_schema
local ipairs = ipairs
-local pcall = pcall
local table_sort = table.sort
local table_insert = table.insert
local get_uri_args = ngx.req.get_uri_args
local plugin_get_all = require("apisix.plugin").get_all
+local plugin_get_http = require("apisix.plugin").get
+local plugin_get_stream = require("apisix.plugin").get_stream
local encrypt_conf = require("apisix.plugin").encrypt_conf
local pairs = pairs
@@ -42,7 +43,15 @@ end
function _M.get(name)
local arg = get_uri_args()
- if arg and arg["all"] == "true" then
+ -- If subsystem is passed inside args then it should be oneOf: http / stream.
+ local subsystem = arg["subsystem"] or "http"
+ if subsystem ~= "http" and subsystem ~= "stream" then
+ return 400, {error_msg = "unsupported subsystem: "..subsystem}
+ end
+
+ -- arg all to be deprecated
+ if (arg and arg["all"] == "true") then
+ core.log.warn("query parameter \"all\" will be deprecated soon.")
local http_plugins, stream_plugins = plugin_get_all({
version = true,
priority = true,
@@ -60,16 +69,18 @@ function _M.get(name)
return 200, http_plugins
end
- if not name then
- return 400, {error_msg = "not found plugin name"}
- end
+ local plugin
- local plugin_name = "apisix.plugins." .. name
+ if subsystem == "http" then
+ plugin = plugin_get_http(name)
+ else
+ plugin = plugin_get_stream(name)
+ end
- local ok, plugin = pcall(require, plugin_name)
- if not ok then
- core.log.warn("failed to load plugin [", name, "] err: ", plugin)
- return 400, {error_msg = "failed to load plugin " .. name}
+ if not plugin then
+ local err = "plugin not found in subsystem " .. subsystem
+ core.log.warn(err)
+ return 404, {error_msg = err}
end
local json_schema = plugin.schema
@@ -85,16 +96,34 @@ function _M.get(name)
end
-function _M.get_plugins_list()
- local plugins = core.config.local_conf().plugins
+function _M.get_plugins_list(subsystem)
+ local http_plugins
+ local stream_plugins
+ if subsystem == "http" then
+ http_plugins = core.config.local_conf().plugins
+ else
+ stream_plugins = core.config.local_conf().stream_plugins
+ end
+
local priorities = {}
local success = {}
- for i, name in ipairs(plugins) do
- local plugin_name = "apisix.plugins." .. name
- local ok, plugin = pcall(require, plugin_name)
- if ok and plugin.priority then
- priorities[name] = plugin.priority
- table_insert(success, name)
+ if http_plugins then
+ for i, name in ipairs(http_plugins) do
+ local plugin = plugin_get_http(name)
+ if plugin and plugin.priority then
+ priorities[name] = plugin.priority
+ table_insert(success, name)
+ end
+ end
+ end
+
+ if stream_plugins then
+ for i, name in ipairs(stream_plugins) do
+ local plugin = plugin_get_stream(name)
+ if plugin and plugin.priority then
+ priorities[name] = plugin.priority
+ table_insert(success, name)
+ end
end
end
diff --git a/apisix/admin/proto.lua b/apisix/admin/proto.lua
index de4d24e23e88..f8133cc80b71 100644
--- a/apisix/admin/proto.lua
+++ b/apisix/admin/proto.lua
@@ -50,7 +50,7 @@ local function check_proto_used(plugins, deleting, ptype, pid)
if type(plugins) == "table" and plugins["grpc-transcode"]
and plugins["grpc-transcode"].proto_id
and tostring(plugins["grpc-transcode"].proto_id) == deleting then
- return false, {error_msg = "can not delete this proto,"
+ return false, {error_msg = "can not delete this proto, "
.. ptype .. " [" .. pid
.. "] is still using it now"}
end
diff --git a/apisix/admin/resource.lua b/apisix/admin/resource.lua
index ac204dcf574a..35fe3bba2476 100644
--- a/apisix/admin/resource.lua
+++ b/apisix/admin/resource.lua
@@ -230,7 +230,7 @@ function _M:put(id, conf, sub_path, args)
end
-- Keep the unused conf to make the args list consistent with other methods
-function _M:delete(id, conf, sub_path)
+function _M:delete(id, conf, sub_path, uri_args)
if core.table.array_find(self.unsupported_methods, "delete") then
return 405, {error_msg = "not supported `DELETE` method for " .. self.kind}
end
@@ -253,7 +253,7 @@ function _M:delete(id, conf, sub_path)
key = key .. "/" .. id
- if self.delete_checker then
+ if self.delete_checker and uri_args.force ~= "true" then
local code, err = self.delete_checker(id)
if err then
return code, err
@@ -279,8 +279,8 @@ function _M:patch(id, conf, sub_path, args)
local typ = nil
if self.name == "secrets" then
local uri_segs = core.utils.split_uri(sub_path)
- if #uri_segs < 2 then
- return 400, {error_msg = "no secret id and/or sub path in uri"}
+ if #uri_segs < 1 then
+ return 400, {error_msg = "no secret id"}
end
typ = id
id = uri_segs[1]
diff --git a/apisix/admin/upstreams.lua b/apisix/admin/upstreams.lua
index a85d24d8d9e1..6c04d9379d52 100644
--- a/apisix/admin/upstreams.lua
+++ b/apisix/admin/upstreams.lua
@@ -15,13 +15,17 @@
-- limitations under the License.
--
local core = require("apisix.core")
+local config_util = require("apisix.core.config_util")
local get_routes = require("apisix.router").http_routes
local get_services = require("apisix.http.service").services
+local get_plugin_configs = require("apisix.plugin_config").plugin_configs
+local get_consumers = require("apisix.consumer").consumers
+local get_consumer_groups = require("apisix.consumer_group").consumer_groups
+local get_global_rules = require("apisix.global_rules").global_rules
local apisix_upstream = require("apisix.upstream")
local resource = require("apisix.admin.resource")
local tostring = tostring
local ipairs = ipairs
-local type = type
local function check_conf(id, conf, need_id)
@@ -34,33 +38,87 @@ local function check_conf(id, conf, need_id)
end
-local function delete_checker(id)
- local routes, routes_ver = get_routes()
- if routes_ver and routes then
- for _, route in ipairs(routes) do
- if type(route) == "table" and route.value
- and route.value.upstream_id
- and tostring(route.value.upstream_id) == id then
- return 400, {error_msg = "can not delete this upstream,"
- .. " route [" .. route.value.id
+local function up_id_in_plugins(plugins, up_id)
+ if plugins and plugins["traffic-split"]
+ and plugins["traffic-split"].rules then
+
+ for _, rule in ipairs(plugins["traffic-split"].rules) do
+ local plugin_upstreams = rule.weighted_upstreams
+ for _, plugin_upstream in ipairs(plugin_upstreams) do
+ if plugin_upstream.upstream_id
+ and tostring(plugin_upstream.upstream_id) == up_id then
+ return true
+ end
+ end
+ end
+
+ return false
+ end
+end
+
+
+local function check_resources_reference(resources, up_id,
+ only_check_plugin, resources_name)
+ if resources then
+ for _, resource in config_util.iterate_values(resources) do
+ if resource and resource.value then
+ if up_id_in_plugins(resource.value.plugins, up_id) then
+ return {error_msg = "can not delete this upstream,"
+ .. " plugin in "
+ .. resources_name .. " ["
+ .. resource.value.id
+ .. "] is still using it now"}
+ end
+
+ if not only_check_plugin and resource.value.upstream_id
+ and tostring(resource.value.upstream_id) == up_id then
+ return {error_msg = "can not delete this upstream, "
+ .. resources_name .. " [" .. resource.value.id
.. "] is still using it now"}
+ end
end
end
end
+end
+
+
+local function delete_checker(id)
+ local routes = get_routes()
+ local err_msg = check_resources_reference(routes, id, false, "route")
+ if err_msg then
+ return 400, err_msg
+ end
local services, services_ver = get_services()
core.log.info("services: ", core.json.delay_encode(services, true))
core.log.info("services_ver: ", services_ver)
- if services_ver and services then
- for _, service in ipairs(services) do
- if type(service) == "table" and service.value
- and service.value.upstream_id
- and tostring(service.value.upstream_id) == id then
- return 400, {error_msg = "can not delete this upstream,"
- .. " service [" .. service.value.id
- .. "] is still using it now"}
- end
- end
+ local err_msg = check_resources_reference(services, id, false, "service")
+ if err_msg then
+ return 400, err_msg
+ end
+
+ local plugin_configs = get_plugin_configs()
+ local err_msg = check_resources_reference(plugin_configs, id, true, "plugin_config")
+ if err_msg then
+ return 400, err_msg
+ end
+
+ local consumers = get_consumers()
+ local err_msg = check_resources_reference(consumers, id, true, "consumer")
+ if err_msg then
+ return 400, err_msg
+ end
+
+ local consumer_groups = get_consumer_groups()
+ local err_msg = check_resources_reference(consumer_groups, id, true, "consumer_group")
+ if err_msg then
+ return 400, err_msg
+ end
+
+ local global_rules = get_global_rules()
+ err_msg = check_resources_reference(global_rules, id, true, "global_rules")
+ if err_msg then
+ return 400, err_msg
end
return nil, nil
diff --git a/apisix/cli/etcd.lua b/apisix/cli/etcd.lua
index 51cac2a508e6..b67248095d92 100644
--- a/apisix/cli/etcd.lua
+++ b/apisix/cli/etcd.lua
@@ -280,103 +280,7 @@ local function prepare_dirs_via_http(yaml_conf, args, index, host, host_count)
end
-local function grpc_request(url, yaml_conf, key)
- local cmd
-
- local auth = ""
- if yaml_conf.etcd.user then
- local user = yaml_conf.etcd.user
- local password = yaml_conf.etcd.password
- auth = str_format("--user=%s:%s", user, password)
- end
-
- if str_sub(url, 1, 8) == "https://" then
- local host = url:sub(9)
-
- local verify = true
- local certificate, pkey, cafile
- if yaml_conf.etcd.tls then
- local cfg = yaml_conf.etcd.tls
-
- if cfg.verify == false then
- verify = false
- end
-
- certificate = cfg.cert
- pkey = cfg.key
-
- local apisix_ssl = yaml_conf.apisix.ssl
- if apisix_ssl and apisix_ssl.ssl_trusted_certificate then
- cafile = apisix_ssl.ssl_trusted_certificate
- end
- end
-
- cmd = str_format(
- "etcdctl --insecure-transport=false %s %s %s %s " ..
- "%s --endpoints=%s put %s init_dir",
- verify and "" or "--insecure-skip-tls-verify",
- certificate and "--cert " .. certificate or "",
- pkey and "--key " .. pkey or "",
- cafile and "--cacert " .. cafile or "",
- auth, host, key)
- else
- local host = url:sub(#("http://") + 1)
-
- cmd = str_format(
- "etcdctl %s --endpoints=%s put %s init_dir",
- auth, host, key)
- end
-
- local res, err = util.execute_cmd(cmd)
- return res, err
-end
-
-
-local function prepare_dirs_via_grpc(yaml_conf, args, index, host)
- local is_success = true
-
- local errmsg
- local dirs = {}
- for name in pairs(constants.HTTP_ETCD_DIRECTORY) do
- dirs[name] = true
- end
- for name in pairs(constants.STREAM_ETCD_DIRECTORY) do
- dirs[name] = true
- end
-
- for dir_name in pairs(dirs) do
- local key = (yaml_conf.etcd.prefix or "") .. dir_name .. "/"
- local res, err
- local retry_time = 0
- while retry_time < 2 do
- res, err = grpc_request(host, yaml_conf, key)
- retry_time = retry_time + 1
- if res then
- break
- end
- print(str_format("Warning! Request etcd endpoint \'%s\' error, %s, retry time=%s",
- host, err, retry_time))
- end
-
- if not res then
- errmsg = str_format("request etcd endpoint \"%s\" error, %s\n", host, err)
- util.die(errmsg)
- end
-
- if args and args["verbose"] then
- print(res)
- end
- end
-
- return is_success
-end
-
-
-local function prepare_dirs(use_grpc, yaml_conf, args, index, host, host_count)
- if use_grpc then
- return prepare_dirs_via_grpc(yaml_conf, args, index, host)
- end
-
+local function prepare_dirs(yaml_conf, args, index, host, host_count)
return prepare_dirs_via_http(yaml_conf, args, index, host, host_count)
end
@@ -400,8 +304,6 @@ function _M.init(env, args)
util.die("failed to read `etcd` field from yaml file when init etcd")
end
- local etcd_conf = yaml_conf.etcd
-
-- convert old single etcd config to multiple etcd config
if type(yaml_conf.etcd.host) == "string" then
yaml_conf.etcd.host = {yaml_conf.etcd.host}
@@ -477,22 +379,9 @@ function _M.init(env, args)
util.die("the etcd cluster needs at least 50% and above healthy nodes\n")
end
- if etcd_conf.use_grpc and not env.use_apisix_base then
- io_stderr:write("'use_grpc: true' in the etcd configuration " ..
- "is not supported by vanilla OpenResty\n")
- end
-
- local use_grpc = etcd_conf.use_grpc and env.use_apisix_base
- if use_grpc then
- local ok, err = util.execute_cmd("command -v etcdctl")
- if not ok then
- util.die("can't find etcdctl: ", err, "\n")
- end
- end
-
local etcd_ok = false
for index, host in ipairs(etcd_healthy_hosts) do
- if prepare_dirs(use_grpc, yaml_conf, args, index, host, host_count) then
+ if prepare_dirs(yaml_conf, args, index, host, host_count) then
etcd_ok = true
break
end
diff --git a/apisix/cli/file.lua b/apisix/cli/file.lua
index 2fe9edd030b3..94e790db65e3 100644
--- a/apisix/cli/file.lua
+++ b/apisix/cli/file.lua
@@ -54,8 +54,59 @@ local function tab_is_array(t)
end
+local function var_sub(val)
+ local err
+ local var_used = false
+ -- we use '${{var}}' because '$var' and '${var}' are taken
+ -- by Nginx
+ local new_val = val:gsub("%$%{%{%s*([%w_]+[%:%=]?.-)%s*%}%}", function(var)
+ local i, j = var:find("%:%=")
+ local default
+ if i and j then
+ default = var:sub(i + 2, #var)
+ default = default:gsub('^%s*(.-)%s*$', '%1')
+ var = var:sub(1, i - 1)
+ end
+
+ local v = getenv(var) or default
+ if v then
+ if not exported_vars then
+ exported_vars = {}
+ end
+
+ exported_vars[var] = v
+ var_used = true
+ return v
+ end
+
+ err = "failed to handle configuration: " ..
+ "can't find environment variable " .. var
+ return ""
+ end)
+ return new_val, var_used, err
+end
+
+
local function resolve_conf_var(conf)
+ local new_keys = {}
for key, val in pairs(conf) do
+ -- avoid re-iterating the table for already iterated key
+ if new_keys[key] then
+ goto continue
+ end
+ -- substitute environment variables from conf keys
+ if type(key) == "string" then
+ local new_key, _, err = var_sub(key)
+ if err then
+ return nil, err
+ end
+ if new_key ~= key then
+ new_keys[new_key] = "dummy" -- we only care about checking the key
+ conf.key = nil
+ conf[new_key] = val
+ key = new_key
+ end
+ end
if type(val) == "table" then
local ok, err = resolve_conf_var(val)
if not ok then
@@ -63,34 +114,7 @@ local function resolve_conf_var(conf)
end
elseif type(val) == "string" then
- local err
- local var_used = false
- -- we use '${{var}}' because '$var' and '${var}' are taken
- -- by Nginx
- local new_val = val:gsub("%$%{%{%s*([%w_]+[%:%=]?.-)%s*%}%}", function(var)
- local i, j = var:find("%:%=")
- local default
- if i and j then
- default = var:sub(i + 2, #var)
- default = default:gsub('^%s*(.-)%s*$', '%1')
- var = var:sub(1, i - 1)
- end
-
- local v = getenv(var) or default
- if v then
- if not exported_vars then
- exported_vars = {}
- end
-
- exported_vars[var] = v
- var_used = true
- return v
- end
-
- err = "failed to handle configuration: " ..
- "can't find environment variable " .. var
- return ""
- end)
+ local new_val, var_used, err = var_sub(val)
if err then
return nil, err
@@ -108,6 +132,7 @@ local function resolve_conf_var(conf)
conf[key] = new_val
end
+ ::continue::
end
return true
@@ -223,7 +248,10 @@ function _M.read_yaml_conf(apisix_home)
return nil, "invalid config-default.yaml file"
end
- local_conf_path = profile:yaml_path("config")
+ local_conf_path = profile:customized_yaml_path()
+ if not local_conf_path then
+ local_conf_path = profile:yaml_path("config")
+ end
local user_conf_yaml, err = util.read_file(local_conf_path)
if not user_conf_yaml then
return nil, err
@@ -264,26 +292,14 @@ function _M.read_yaml_conf(apisix_home)
default_conf.apisix.enable_admin = true
elseif default_conf.deployment.role == "data_plane" then
+ default_conf.etcd = default_conf.deployment.etcd
if default_conf.deployment.role_data_plane.config_provider == "yaml" then
default_conf.deployment.config_provider = "yaml"
elseif default_conf.deployment.role_data_plane.config_provider == "xds" then
default_conf.deployment.config_provider = "xds"
- else
- default_conf.etcd = default_conf.deployment.role_data_plane.control_plane
end
default_conf.apisix.enable_admin = false
end
-
- if default_conf.etcd and default_conf.deployment.certs then
- -- copy certs configuration to keep backward compatible
- local certs = default_conf.deployment.certs
- local etcd = default_conf.etcd
- if not etcd.tls then
- etcd.tls = {}
- end
- etcd.tls.cert = certs.cert
- etcd.tls.key = certs.cert_key
- end
end
if default_conf.deployment.config_provider == "yaml" then
diff --git a/apisix/cli/ngx_tpl.lua b/apisix/cli/ngx_tpl.lua
index c81430d9e096..76c1832f3b32 100644
--- a/apisix/cli/ngx_tpl.lua
+++ b/apisix/cli/ngx_tpl.lua
@@ -115,16 +115,12 @@ http {
}
}
{% end %}
-
- {% if conf_server then %}
- {* conf_server *}
- {% end %}
}
{% end %}
{% end %}
-{% if stream_proxy then %}
+{% if enable_stream then %}
stream {
lua_package_path "{*extra_lua_path*}$prefix/deps/share/lua/5.1/?.lua;$prefix/deps/share/lua/5.1/?/init.lua;]=]
.. [=[{*apisix_lua_home*}/?.lua;{*apisix_lua_home*}/?/init.lua;;{*lua_path*};";
@@ -369,8 +365,12 @@ http {
log_format main escape={* http.access_log_format_escape *} '{* http.access_log_format *}';
uninitialized_variable_warn off;
+ {% if http.access_log_buffer then %}
+ access_log {* http.access_log *} main buffer={* http.access_log_buffer *} flush=3;
+ {% else %}
access_log {* http.access_log *} main buffer=16384 flush=3;
{% end %}
+ {% end %}
open_file_cache max=1000 inactive=60;
client_max_body_size {* http.client_max_body_size *};
keepalive_timeout {* http.keepalive_timeout *};
@@ -576,10 +576,6 @@ http {
}
{% end %}
- {% if conf_server then %}
- {* conf_server *}
- {% end %}
-
{% if deployment_role ~= "control_plane" then %}
{% if enabled_plugins["proxy-cache"] then %}
@@ -661,6 +657,10 @@ http {
}
{% if ssl.enable then %}
+ ssl_client_hello_by_lua_block {
+ apisix.http_ssl_client_hello_phase()
+ }
+
ssl_certificate_by_lua_block {
apisix.http_ssl_phase()
}
@@ -672,6 +672,7 @@ http {
{% end %}
location / {
+ set $upstream_mirror_host '';
set $upstream_mirror_uri '';
set $upstream_upgrade '';
set $upstream_connection '';
@@ -711,16 +712,11 @@ http {
### the following x-forwarded-* headers is to send to upstream server
- set $var_x_forwarded_for $remote_addr;
set $var_x_forwarded_proto $scheme;
set $var_x_forwarded_host $host;
set $var_x_forwarded_port $server_port;
- if ($http_x_forwarded_for != "") {
- set $var_x_forwarded_for "${http_x_forwarded_for}, ${realip_remote_addr}";
- }
-
- proxy_set_header X-Forwarded-For $var_x_forwarded_for;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto;
proxy_set_header X-Forwarded-Host $var_x_forwarded_host;
proxy_set_header X-Forwarded-Port $var_x_forwarded_port;
@@ -779,9 +775,14 @@ http {
grpc_set_header "Host" $upstream_host;
{% end %}
grpc_set_header Content-Type application/grpc;
+ grpc_set_header TE trailers;
grpc_socket_keepalive on;
grpc_pass $upstream_scheme://apisix_backend;
+ {% if enabled_plugins["proxy-mirror"] then %}
+ mirror /proxy_mirror_grpc;
+ {% end %}
+
header_filter_by_lua_block {
apisix.http_header_filter_phase()
}
@@ -846,6 +847,32 @@ http {
proxy_pass $upstream_mirror_uri;
}
{% end %}
+
+ {% if enabled_plugins["proxy-mirror"] then %}
+ location = /proxy_mirror_grpc {
+ internal;
+
+ {% if not use_apisix_base then %}
+ if ($upstream_mirror_uri = "") {
+ return 200;
+ }
+ {% end %}
+
+
+ {% if proxy_mirror_timeouts then %}
+ {% if proxy_mirror_timeouts.connect then %}
+ grpc_connect_timeout {* proxy_mirror_timeouts.connect *};
+ {% end %}
+ {% if proxy_mirror_timeouts.read then %}
+ grpc_read_timeout {* proxy_mirror_timeouts.read *};
+ {% end %}
+ {% if proxy_mirror_timeouts.send then %}
+ grpc_send_timeout {* proxy_mirror_timeouts.send *};
+ {% end %}
+ {% end %}
+ grpc_pass $upstream_mirror_host;
+ }
+ {% end %}
}
{% end %}
diff --git a/apisix/cli/ops.lua b/apisix/cli/ops.lua
index e7f86f562e22..a5a6975d80bd 100644
--- a/apisix/cli/ops.lua
+++ b/apisix/cli/ops.lua
@@ -21,7 +21,6 @@ local file = require("apisix.cli.file")
local schema = require("apisix.cli.schema")
local ngx_tpl = require("apisix.cli.ngx_tpl")
local cli_ip = require("apisix.cli.ip")
-local snippet = require("apisix.cli.snippet")
local profile = require("apisix.core.profile")
local template = require("resty.template")
local argparse = require("argparse")
@@ -58,7 +57,7 @@ local function help()
print([[
Usage: apisix [action]
-help: show this message, then exit
+help: print the apisix cli help message
init: initialize the local nginx.conf
init_etcd: initialize the data of etcd
start: start the apisix server
@@ -189,6 +188,13 @@ local function init(env)
and #allow_admin == 1 and allow_admin[1] == "127.0.0.0/24" then
checked_admin_key = true
end
+ -- check if admin_key is required
+ if yaml_conf.deployment.admin.admin_key_required == false then
+ checked_admin_key = true
+ print("Warning! Admin key is bypassed! "
+ .. "If you are deploying APISIX in a production environment, "
+ .. "please disable `admin_key_required` and set a secure admin key!")
+ end
if yaml_conf.apisix.enable_admin and not checked_admin_key then
local help = [[
@@ -251,7 +257,7 @@ Please modify "admin_key" in conf/config.yaml .
util.die("can not find openresty\n")
end
- local need_ver = "1.19.3"
+ local need_ver = "1.21.4"
if not version_greater_equal(or_ver, need_ver) then
util.die("openresty version must >=", need_ver, " current ", or_ver, "\n")
end
@@ -262,11 +268,24 @@ Please modify "admin_key" in conf/config.yaml .
"your openresty, please check it out.\n")
end
+ --- http is enabled by default
local enable_http = true
- if not yaml_conf.apisix.enable_admin and yaml_conf.apisix.stream_proxy and
- yaml_conf.apisix.stream_proxy.only ~= false
- then
- enable_http = false
+ --- stream is disabled by default
+ local enable_stream = false
+ if yaml_conf.apisix.proxy_mode then
+ --- check for "http"
+ if yaml_conf.apisix.proxy_mode == "http" then
+ enable_http = true
+ enable_stream = false
+ --- check for "stream"
+ elseif yaml_conf.apisix.proxy_mode == "stream" then
+ enable_stream = true
+ enable_http = false
+ --- check for "http&stream"
+ elseif yaml_conf.apisix.proxy_mode == "http&stream" then
+ enable_stream = true
+ enable_http = true
+ end
end
local enabled_discoveries = {}
@@ -481,7 +500,7 @@ Please modify "admin_key" in conf/config.yaml .
local tcp_enable_ssl
-- compatible with the original style which only has the addr
- if yaml_conf.apisix.stream_proxy and yaml_conf.apisix.stream_proxy.tcp then
+ if enable_stream and yaml_conf.apisix.stream_proxy and yaml_conf.apisix.stream_proxy.tcp then
local tcp = yaml_conf.apisix.stream_proxy.tcp
for i, item in ipairs(tcp) do
if type(item) ~= "table" then
@@ -513,11 +532,6 @@ Please modify "admin_key" in conf/config.yaml .
proxy_mirror_timeouts = yaml_conf.plugin_attr["proxy-mirror"].timeout
end
- local conf_server, err = snippet.generate_conf_server(env, yaml_conf)
- if err then
- util.die(err, "\n")
- end
-
if yaml_conf.deployment and yaml_conf.deployment.role then
local role = yaml_conf.deployment.role
env.deployment_role = role
@@ -542,6 +556,7 @@ Please modify "admin_key" in conf/config.yaml .
use_apisix_base = env.use_apisix_base,
error_log = {level = "warn"},
enable_http = enable_http,
+ enable_stream = enable_stream,
enabled_discoveries = enabled_discoveries,
enabled_plugins = enabled_plugins,
enabled_stream_plugins = enabled_stream_plugins,
@@ -551,7 +566,6 @@ Please modify "admin_key" in conf/config.yaml .
control_server_addr = control_server_addr,
prometheus_server_addr = prometheus_server_addr,
proxy_mirror_timeouts = proxy_mirror_timeouts,
- conf_server = conf_server,
opentelemetry_set_ngx_var = opentelemetry_set_ngx_var
}
@@ -737,7 +751,22 @@ local function init_etcd(env, args)
end
+local function cleanup(env)
+ if env.apisix_home then
+ profile.apisix_home = env.apisix_home
+ end
+
+ os_remove(profile:customized_yaml_index())
+end
+
+
local function start(env, ...)
+ cleanup(env)
+
+ if env.apisix_home then
+ profile.apisix_home = env.apisix_home
+ end
+
-- Because the worker process started by apisix has "nobody" permission,
-- it cannot access the `/root` directory. Therefore, it is necessary to
-- prohibit APISIX from running in the /root directory.
@@ -783,39 +812,34 @@ local function start(env, ...)
-- start a new APISIX instance
- local conf_server_sock_path = env.apisix_home .. "/conf/config_listen.sock"
- if pl_path.exists(conf_server_sock_path) then
- -- remove stale sock (if exists) so that APISIX can start
- local ok, err = os_remove(conf_server_sock_path)
- if not ok then
- util.die("failed to remove stale conf server sock file, error: ", err)
- end
- end
-
local parser = argparse()
parser:argument("_", "Placeholder")
parser:option("-c --config", "location of customized config.yaml")
-- TODO: more logs for APISIX cli could be added using this feature
- parser:flag("--verbose", "show init_etcd debug information")
+ parser:flag("-v --verbose", "show init_etcd debug information")
local args = parser:parse()
local customized_yaml = args["config"]
if customized_yaml then
- profile.apisix_home = env.apisix_home .. "/"
- local local_conf_path = profile:yaml_path("config")
- local local_conf_path_bak = local_conf_path .. ".bak"
+ local customized_yaml_path
+ local idx = str_find(customized_yaml, "/")
+ if idx and idx == 1 then
+ customized_yaml_path = customized_yaml
+ else
+ local cur_dir, err = lfs.currentdir()
+ if err then
+ util.die("failed to get current directory")
+ end
+ customized_yaml_path = cur_dir .. "/" .. customized_yaml
+ end
- local ok, err = os_rename(local_conf_path, local_conf_path_bak)
- if not ok then
- util.die("failed to backup config, error: ", err)
+ if not util.file_exists(customized_yaml_path) then
+ util.die("customized config file not exists, path: " .. customized_yaml_path)
end
- local ok, err1 = lfs.link(customized_yaml, local_conf_path)
+
+ local ok, err = util.write_file(profile:customized_yaml_index(), customized_yaml_path)
if not ok then
- ok, err = os_rename(local_conf_path_bak, local_conf_path)
- if not ok then
- util.die("failed to recover original config file, error: ", err)
- end
- util.die("failed to link customized config, error: ", err1)
+ util.die("write customized config index failed, err: " .. err)
end
print("Use customized yaml: ", customized_yaml)
@@ -831,22 +855,6 @@ local function start(env, ...)
end
-local function cleanup()
- local local_conf_path = profile:yaml_path("config")
- local local_conf_path_bak = local_conf_path .. ".bak"
- if pl_path.exists(local_conf_path_bak) then
- local ok, err = os_remove(local_conf_path)
- if not ok then
- print("failed to remove customized config, error: ", err)
- end
- ok, err = os_rename(local_conf_path_bak, local_conf_path)
- if not ok then
- util.die("failed to recover original config file, error: ", err)
- end
- end
-end
-
-
local function test(env, backup_ngx_conf)
-- backup nginx.conf
local ngx_conf_path = env.apisix_home .. "/conf/nginx.conf"
@@ -886,7 +894,7 @@ end
local function quit(env)
- cleanup()
+ cleanup(env)
local cmd = env.openresty_args .. [[ -s quit]]
util.execute_cmd(cmd)
@@ -894,7 +902,7 @@ end
local function stop(env)
- cleanup()
+ cleanup(env)
local cmd = env.openresty_args .. [[ -s stop]]
util.execute_cmd(cmd)
diff --git a/apisix/cli/schema.lua b/apisix/cli/schema.lua
index 477e2ce7e56d..6f6450b46e0c 100644
--- a/apisix/cli/schema.lua
+++ b/apisix/cli/schema.lua
@@ -62,11 +62,6 @@ local etcd_schema = {
minimum = 1,
description = "etcd connection timeout in seconds",
},
- use_grpc = {
- type = "boolean",
- -- TODO: set true by default in v3.2
- default = false,
- },
},
required = {"prefix", "host"}
}
@@ -136,6 +131,10 @@ local config_schema = {
}
}
},
+ proxy_mode = {
+ type = "string",
+ enum = {"http", "stream", "http&stream"},
+ },
stream_proxy = {
type = "object",
properties = {
@@ -353,6 +352,9 @@ local admin_schema = {
https_admin = {
type = "boolean",
},
+ admin_key_required = {
+ type = "boolean",
+ },
}
}
@@ -381,60 +383,23 @@ local deployment_schema = {
config_provider = {
enum = {"etcd"}
},
- conf_server = {
- properties = {
- listen = {
- type = "string",
- default = "0.0.0.0:9280",
- },
- cert = { type = "string" },
- cert_key = { type = "string" },
- client_ca_cert = { type = "string" },
- },
- required = {"cert", "cert_key"}
- },
- },
- required = {"config_provider", "conf_server"}
- },
- certs = {
- properties = {
- cert = { type = "string" },
- cert_key = { type = "string" },
- trusted_ca_cert = { type = "string" },
- },
- dependencies = {
- cert = {
- required = {"cert_key"},
- },
},
- default = {},
+ required = {"config_provider"}
},
},
required = {"etcd", "role_control_plane"}
},
data_plane = {
properties = {
+ etcd = etcd_schema,
role_data_plane = {
properties = {
config_provider = {
- enum = {"control_plane", "yaml", "xds"}
+ enum = {"etcd", "yaml", "xds"}
},
},
required = {"config_provider"}
},
- certs = {
- properties = {
- cert = { type = "string" },
- cert_key = { type = "string" },
- trusted_ca_cert = { type = "string" },
- },
- dependencies = {
- cert = {
- required = {"cert_key"},
- },
- },
- default = {},
- },
},
required = {"role_data_plane"}
}
diff --git a/apisix/cli/snippet.lua b/apisix/cli/snippet.lua
deleted file mode 100644
index ca7ed3f7c59a..000000000000
--- a/apisix/cli/snippet.lua
+++ /dev/null
@@ -1,203 +0,0 @@
---
--- Licensed to the Apache Software Foundation (ASF) under one or more
--- contributor license agreements. See the NOTICE file distributed with
--- this work for additional information regarding copyright ownership.
--- The ASF licenses this file to You under the Apache License, Version 2.0
--- (the "License"); you may not use this file except in compliance with
--- the License. You may obtain a copy of the License at
---
--- http://www.apache.org/licenses/LICENSE-2.0
---
--- Unless required by applicable law or agreed to in writing, software
--- distributed under the License is distributed on an "AS IS" BASIS,
--- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
--- See the License for the specific language governing permissions and
--- limitations under the License.
---
-local template = require("resty.template")
-local pl_path = require("pl.path")
-local ipairs = ipairs
-
-
--- this module provide methods to generate snippets which will be used in the nginx.conf template
-local _M = {}
-local conf_server_tpl = [[
-upstream apisix_conf_backend {
- server 0.0.0.0:80;
- balancer_by_lua_block {
- local conf_server = require("apisix.conf_server")
- conf_server.balancer()
- }
-}
-
-{% if trusted_ca_cert then %}
-lua_ssl_trusted_certificate {* trusted_ca_cert *};
-{% end %}
-
-server {
- {% if control_plane then %}
- {% if directive_prefix == "grpc" then %}
- listen {* control_plane.listen *} ssl http2;
- {% else %}
- listen {* control_plane.listen *} ssl;
- {% end %}
- ssl_certificate {* control_plane.cert *};
- ssl_certificate_key {* control_plane.cert_key *};
-
- {% if control_plane.client_ca_cert then %}
- ssl_verify_client on;
- ssl_client_certificate {* control_plane.client_ca_cert *};
- {% end %}
-
- {% else %}
- {% if directive_prefix == "grpc" then %}
- listen unix:{* home *}/conf/config_listen.sock http2;
- {% else %}
- listen unix:{* home *}/conf/config_listen.sock;
- {% end %}
- {% end %}
-
- access_log off;
-
- set $upstream_host '';
-
- access_by_lua_block {
- local conf_server = require("apisix.conf_server")
- conf_server.access()
- }
-
- location / {
- {% if enable_https then %}
- {* directive_prefix *}_pass {* scheme_name *}s://apisix_conf_backend;
- {* directive_prefix *}_ssl_protocols TLSv1.2 TLSv1.3;
- {* directive_prefix *}_ssl_server_name on;
-
- {% if etcd_tls_verify then %}
- {* directive_prefix *}_ssl_verify on;
- {* directive_prefix *}_ssl_trusted_certificate {* ssl_trusted_certificate *};
- {% end %}
-
- {% if sni then %}
- {* directive_prefix *}_ssl_name {* sni *};
- {% else %}
- {* directive_prefix *}_ssl_name $upstream_host;
- {% end %}
-
- {% if client_cert then %}
- {* directive_prefix *}_ssl_certificate {* client_cert *};
- {* directive_prefix *}_ssl_certificate_key {* client_cert_key *};
- {% end %}
-
- {% else %}
- {* directive_prefix *}_pass {* scheme_name *}://apisix_conf_backend;
- {% end %}
-
- {% if scheme_name == "http" then %}
- proxy_http_version 1.1;
- proxy_set_header Connection "";
- {% end %}
-
- {* directive_prefix *}_set_header Host $upstream_host;
- {* directive_prefix *}_next_upstream error timeout non_idempotent
- http_500 http_502 http_503 http_504;
- }
-
- log_by_lua_block {
- local conf_server = require("apisix.conf_server")
- conf_server.log()
- }
-}
-]]
-
-
-local function is_grpc_used(env, etcd)
- local is_grpc_available = env.use_apisix_base
- return is_grpc_available and etcd.use_grpc
-end
-
-
-function _M.generate_conf_server(env, conf)
- if not (conf.deployment and (
- conf.deployment.role == "traditional" or
- conf.deployment.role == "control_plane"))
- then
- return nil, nil
- end
-
- -- we use proxy even the role is traditional so that we can test the proxy in daily dev
- local etcd = conf.deployment.etcd
- local servers = etcd.host
- local enable_https = false
- local prefix = "https://"
- if servers[1]:find(prefix, 1, true) then
- enable_https = true
- end
-
- for i, s in ipairs(servers) do
- if (s:find(prefix, 1, true) ~= nil) ~= enable_https then
- return nil, "all nodes in the etcd cluster should enable/disable TLS together"
- end
-
- local _, to = s:find("://", 1, true)
- if not to then
- return nil, "bad etcd endpoint format"
- end
- end
-
- local control_plane
- if conf.deployment.role == "control_plane" then
- control_plane = conf.deployment.role_control_plane.conf_server
- control_plane.cert = pl_path.abspath(control_plane.cert)
- control_plane.cert_key = pl_path.abspath(control_plane.cert_key)
-
- if control_plane.client_ca_cert then
- control_plane.client_ca_cert = pl_path.abspath(control_plane.client_ca_cert)
- end
- end
-
- local trusted_ca_cert
- if conf.deployment.certs then
- if conf.deployment.certs.trusted_ca_cert then
- trusted_ca_cert = pl_path.abspath(conf.deployment.certs.trusted_ca_cert)
- end
- end
-
- local conf_render = template.compile(conf_server_tpl)
- local tls = etcd.tls
- local client_cert
- local client_cert_key
- local ssl_trusted_certificate
- local etcd_tls_verify
- local use_grpc = is_grpc_used(env, etcd)
- if tls then
- if tls.cert then
- client_cert = pl_path.abspath(tls.cert)
- client_cert_key = pl_path.abspath(tls.key)
- end
-
- etcd_tls_verify = tls.verify
- if enable_https and etcd_tls_verify then
- if not conf.apisix.ssl.ssl_trusted_certificate then
- return nil, "should set ssl_trusted_certificate if etcd tls verify is enabled"
- end
- ssl_trusted_certificate = pl_path.abspath(conf.apisix.ssl.ssl_trusted_certificate)
- end
- end
-
- return conf_render({
- sni = tls and tls.sni,
- home = env.apisix_home or ".",
- control_plane = control_plane,
- enable_https = enable_https,
- client_cert = client_cert,
- client_cert_key = client_cert_key,
- trusted_ca_cert = trusted_ca_cert,
- etcd_tls_verify = etcd_tls_verify,
- ssl_trusted_certificate = ssl_trusted_certificate,
- scheme_name = use_grpc and "grpc" or "http",
- directive_prefix = use_grpc and "grpc" or "proxy",
- })
-end
-
-
-return _M
diff --git a/apisix/cli/util.lua b/apisix/cli/util.lua
index cc6206a844e1..bcd56a241aa8 100644
--- a/apisix/cli/util.lua
+++ b/apisix/cli/util.lua
@@ -19,6 +19,7 @@ local require = require
local pcall = pcall
local open = io.open
local popen = io.popen
+local close = io.close
local exit = os.exit
local stderr = io.stderr
local str_format = string.format
@@ -127,4 +128,9 @@ function _M.write_file(file_path, data)
end
+function _M.file_exists(file_path)
+ local f = open(file_path, "r")
+ return f ~= nil and close(f)
+end
+
return _M
diff --git a/apisix/consumer_group.lua b/apisix/consumer_group.lua
index 8c17bdd715e5..3be59ec923f9 100644
--- a/apisix/consumer_group.lua
+++ b/apisix/consumer_group.lua
@@ -39,6 +39,14 @@ function _M.init_worker()
end
+function _M.consumer_groups()
+ if not consumer_groups then
+ return nil, nil
+ end
+ return consumer_groups.values, consumer_groups.conf_version
+end
+
+
function _M.get(id)
return consumer_groups:get(id)
end
diff --git a/apisix/control/v1.lua b/apisix/control/v1.lua
index fd031a473ca2..3143ae5948fc 100644
--- a/apisix/control/v1.lua
+++ b/apisix/control/v1.lua
@@ -14,6 +14,7 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
+local require = require
local core = require("apisix.core")
local plugin = require("apisix.plugin")
local get_routes = require("apisix.router").http_routes
@@ -22,6 +23,7 @@ local upstream_mod = require("apisix.upstream")
local get_upstreams = upstream_mod.upstreams
local collectgarbage = collectgarbage
local ipairs = ipairs
+local pcall = pcall
local str_format = string.format
local ngx_var = ngx.var
@@ -62,52 +64,137 @@ function _M.schema()
end
-local function extra_checker_info(value, src_type)
- local checker = value.checker
- local upstream = value.checker_upstream
- local host = upstream.checks and upstream.checks.active and upstream.checks.active.host
- local port = upstream.checks and upstream.checks.active and upstream.checks.active.port
- local nodes = upstream.nodes
- local healthy_nodes = core.table.new(#nodes, 0)
- for _, node in ipairs(nodes) do
- local ok = checker:get_target_status(node.host, port or node.port, host)
- if ok then
- core.table.insert(healthy_nodes, node)
- end
+local healthcheck
+local function extra_checker_info(value)
+ if not healthcheck then
+ healthcheck = require("resty.healthcheck")
end
- local conf = value.value
+ local name = upstream_mod.get_healthchecker_name(value)
+ local nodes, err = healthcheck.get_target_list(name, "upstream-healthcheck")
+ if err then
+ core.log.error("healthcheck.get_target_list failed: ", err)
+ end
return {
- name = upstream_mod.get_healthchecker_name(value),
- src_id = conf.id,
- src_type = src_type,
+ name = value.key,
nodes = nodes,
- healthy_nodes = healthy_nodes,
}
end
-local function iter_and_add_healthcheck_info(infos, values, src_type)
+local function get_checker_type(checks)
+ if checks.active and checks.active.type then
+ return checks.active.type
+ elseif checks.passive and checks.passive.type then
+ return checks.passive.type
+ end
+end
+
+
+local function iter_and_add_healthcheck_info(infos, values)
if not values then
return
end
for _, value in core.config_util.iterate_values(values) do
- if value.checker then
- core.table.insert(infos, extra_checker_info(value, src_type))
+ local checks = value.value.checks or (value.value.upstream and value.value.upstream.checks)
+ if checks then
+ local info = extra_checker_info(value)
+ info.type = get_checker_type(checks)
+ core.table.insert(infos, info)
end
end
end
-function _M.get_health_checkers()
+local HTML_TEMPLATE = [[
+
+
+ APISIX upstream check status
+
+
+APISIX upstream check status
+
+
+ Index |
+ Upstream |
+ Check type |
+ Host |
+ Status |
+ Success counts |
+ TCP Failures |
+ HTTP Failures |
+ TIMEOUT Failures |
+
+{% local i = 0 %}
+{% for _, stat in ipairs(stats) do %}
+{% for _, node in ipairs(stat.nodes) do %}
+{% i = i + 1 %}
+ {% if node.status == "healthy" then %}
+
+ {% else %}
+
+ {% end %}
+ {* i *} |
+ {* stat.name *} |
+ {* stat.type *} |
+ {* node.ip .. ":" .. node.port *} |
+ {* node.status *} |
+ {* node.counter.success *} |
+ {* node.counter.tcp_failure *} |
+ {* node.counter.http_failure *} |
+ {* node.counter.timeout_failure *} |
+
+{% end %}
+{% end %}
+
+
+
+]]
+
+local html_render
+
+local function try_render_html(data)
+ if not html_render then
+ local template = require("resty.template")
+ html_render = template.compile(HTML_TEMPLATE)
+ end
+ local accept = ngx_var.http_accept
+ if accept and accept:find("text/html") then
+ local ok, out = pcall(html_render, data)
+ if not ok then
+ local err = str_format("HTML template rendering: %s", out)
+ core.log.error(err)
+ return nil, err
+ end
+ return out
+ end
+end
+
+
+local function _get_health_checkers()
local infos = {}
local routes = get_routes()
- iter_and_add_healthcheck_info(infos, routes, "routes")
+ iter_and_add_healthcheck_info(infos, routes)
local services = get_services()
- iter_and_add_healthcheck_info(infos, services, "services")
+ iter_and_add_healthcheck_info(infos, services)
local upstreams = get_upstreams()
- iter_and_add_healthcheck_info(infos, upstreams, "upstreams")
+ iter_and_add_healthcheck_info(infos, upstreams)
+ return infos
+end
+
+
+function _M.get_health_checkers()
+ local infos = _get_health_checkers()
+ local out, err = try_render_html({stats=infos})
+ if out then
+ core.response.set_header("Content-Type", "text/html")
+ return 200, out
+ end
+ if err then
+ return 503, {error_msg = err}
+ end
+
return 200, infos
end
@@ -119,11 +206,15 @@ local function iter_and_find_healthcheck_info(values, src_type, src_id)
for _, value in core.config_util.iterate_values(values) do
if value.value.id == src_id then
- if not value.checker then
+ local checks = value.value.checks or
+ (value.value.upstream and value.value.upstream.checks)
+ if not checks then
return nil, str_format("no checker for %s[%s]", src_type, src_id)
end
- return extra_checker_info(value, src_type)
+ local info = extra_checker_info(value)
+ info.type = get_checker_type(checks)
+ return info
end
end
@@ -155,6 +246,16 @@ function _M.get_health_checker()
if not info then
return 404, {error_msg = err}
end
+
+ local out, err = try_render_html({stats={info}})
+ if out then
+ core.response.set_header("Content-Type", "text/html")
+ return 200, out
+ end
+ if err then
+ return 503, {error_msg = err}
+ end
+
return 200, info
end
@@ -372,5 +473,6 @@ return {
methods = {"GET"},
uris = {"/plugin_metadata/*"},
handler = _M.dump_plugin_metadata,
- }
+ },
+ get_health_checkers = _get_health_checkers,
}
diff --git a/apisix/core/config_etcd.lua b/apisix/core/config_etcd.lua
index 4946cc5c22c8..357f24fa1e6e 100644
--- a/apisix/core/config_etcd.lua
+++ b/apisix/core/config_etcd.lua
@@ -27,6 +27,10 @@ local json = require("apisix.core.json")
local etcd_apisix = require("apisix.core.etcd")
local core_str = require("apisix.core.string")
local new_tab = require("table.new")
+local inspect = require("inspect")
+local errlog = require("ngx.errlog")
+local log_level = errlog.get_sys_filter_level()
+local NGX_INFO = ngx.INFO
local check_schema = require("apisix.core.schema").check
local exiting = ngx.worker.exiting
local insert_tab = table.insert
@@ -43,9 +47,17 @@ local xpcall = xpcall
local debug = debug
local string = string
local error = error
+local pairs = pairs
+local next = next
+local assert = assert
local rand = math.random
local constants = require("apisix.constants")
local health_check = require("resty.etcd.health_check")
+local semaphore = require("ngx.semaphore")
+local tablex = require("pl.tablex")
+local ngx_thread_spawn = ngx.thread.spawn
+local ngx_thread_kill = ngx.thread.kill
+local ngx_thread_wait = ngx.thread.wait
local is_http = ngx.config.subsystem == "http"
@@ -58,6 +70,7 @@ if not is_http then
end
local created_obj = {}
local loaded_configuration = {}
+local watch_ctx
local _M = {
@@ -75,6 +88,232 @@ local mt = {
}
+local get_etcd
+do
+ local etcd_cli
+
+ function get_etcd()
+ if etcd_cli ~= nil then
+ return etcd_cli
+ end
+
+ local _, err
+ etcd_cli, _, err = etcd_apisix.get_etcd_syncer()
+ return etcd_cli, err
+ end
+end
+
+
+local function cancel_watch(http_cli)
+ local res, err = watch_ctx.cli:watchcancel(http_cli)
+ if res == 1 then
+ log.info("cancel watch connection success")
+ else
+ log.error("cancel watch failed: ", err)
+ end
+end
+
+
+-- append res to the queue and notify pending watchers
+local function produce_res(res, err)
+ if log_level >= NGX_INFO then
+ log.info("append res: ", inspect(res), ", err: ", inspect(err))
+ end
+ insert_tab(watch_ctx.res, {res=res, err=err})
+ for _, sema in pairs(watch_ctx.sema) do
+ sema:post()
+ end
+ table.clear(watch_ctx.sema)
+end
+
+
+local function do_run_watch(premature)
+ if premature then
+ return
+ end
+
+ local local_conf, err = config_local.local_conf()
+ if not local_conf then
+ error("no local conf: " .. err)
+ end
+ watch_ctx.prefix = local_conf.etcd.prefix .. "/"
+
+ watch_ctx.cli, err = get_etcd()
+ if not watch_ctx.cli then
+ error("failed to create etcd instance: " .. string(err))
+ end
+
+ local rev = 0
+ if loaded_configuration then
+ local _, res = next(loaded_configuration)
+ if res then
+ rev = tonumber(res.headers["X-Etcd-Index"])
+ assert(rev > 0, 'invalid res.headers["X-Etcd-Index"]')
+ end
+ end
+
+ if rev == 0 then
+ while true do
+ local res, err = watch_ctx.cli:get(watch_ctx.prefix)
+ if not res then
+ log.error("etcd get: ", err)
+ ngx_sleep(3)
+ else
+ rev = tonumber(res.body.header.revision)
+ break
+ end
+ end
+ end
+
+ watch_ctx.rev = rev + 1
+ watch_ctx.started = true
+
+ log.warn("main etcd watcher started, revision=", watch_ctx.rev)
+ for _, sema in pairs(watch_ctx.wait_init) do
+ sema:post()
+ end
+ watch_ctx.wait_init = nil
+
+ local opts = {}
+ opts.timeout = 50 -- second
+ opts.need_cancel = true
+
+ ::restart_watch::
+ while true do
+ opts.start_revision = watch_ctx.rev
+ log.info("restart watchdir: start_revision=", opts.start_revision)
+ local res_func, err, http_cli = watch_ctx.cli:watchdir(watch_ctx.prefix, opts)
+ if not res_func then
+ log.error("watchdir: ", err)
+ ngx_sleep(3)
+ goto restart_watch
+ end
+
+ ::watch_event::
+ while true do
+ local res, err = res_func()
+ if log_level >= NGX_INFO then
+ log.info("res_func: ", inspect(res))
+ end
+
+ if not res then
+ if err ~= "closed" and
+ err ~= "timeout" and
+ err ~= "broken pipe"
+ then
+ log.error("wait watch event: ", err)
+ end
+ cancel_watch(http_cli)
+ break
+ end
+
+ if res.error then
+ log.error("wait watch event: ", inspect(res.error))
+ cancel_watch(http_cli)
+ break
+ end
+
+ if res.result.created then
+ goto watch_event
+ end
+
+ if res.result.canceled then
+ log.warn("watch canceled by etcd, res: ", inspect(res))
+ if res.result.compact_revision then
+ watch_ctx.rev = tonumber(res.result.compact_revision)
+ log.warn("etcd compacted, compact_revision=", watch_ctx.rev)
+ produce_res(nil, "compacted")
+ end
+ cancel_watch(http_cli)
+ break
+ end
+
+ -- cleanup
+ local min_idx = 0
+ for _, idx in pairs(watch_ctx.idx) do
+ if (min_idx == 0) or (idx < min_idx) then
+ min_idx = idx
+ end
+ end
+
+ for i = 1, min_idx - 1 do
+ watch_ctx.res[i] = false
+ end
+
+ if min_idx > 100 then
+ for k, idx in pairs(watch_ctx.idx) do
+ watch_ctx.idx[k] = idx - min_idx + 1
+ end
+ -- trim the res table
+ for i = 1, min_idx - 1 do
+ table.remove(watch_ctx.res, 1)
+ end
+ end
+
+ local rev = tonumber(res.result.header.revision)
+ if rev > watch_ctx.rev then
+ watch_ctx.rev = rev + 1
+ end
+ produce_res(res)
+ end
+ end
+end
+
+
+local function run_watch(premature)
+ local run_watch_th = ngx_thread_spawn(do_run_watch, premature)
+
+ ::restart::
+ local check_worker_th = ngx_thread_spawn(function ()
+ while not exiting() do
+ ngx_sleep(0.1)
+ end
+ end)
+
+ local ok, err = ngx_thread_wait(check_worker_th)
+
+ if not ok then
+ log.error("check_worker thread terminates failed, retart checker, error: " .. err)
+ ngx_thread_kill(check_worker_th)
+ goto restart
+ end
+
+ ngx_thread_kill(run_watch_th)
+ -- notify child watchers
+ produce_res(nil, "worker exited")
+end
+
+
+local function init_watch_ctx(key)
+ if not watch_ctx then
+ watch_ctx = {
+ idx = {},
+ res = {},
+ sema = {},
+ wait_init = {},
+ started = false,
+ }
+ ngx_timer_at(0, run_watch)
+ end
+
+ if watch_ctx.started == false then
+ -- wait until the main watcher is started
+ local sema, err = semaphore.new()
+ if not sema then
+ error(err)
+ end
+ watch_ctx.wait_init[key] = sema
+ while true do
+ local ok, err = sema:wait(60)
+ if ok then
+ break
+ end
+ log.error("wait main watcher to start, key: ", key, ", err: ", err)
+ end
+ end
+end
+
+
local function getkey(etcd_cli, key)
if not etcd_cli then
return nil, "not inited"
@@ -123,79 +362,67 @@ local function readdir(etcd_cli, key, formatter)
end
-local function grpc_waitdir(self, etcd_cli, key, modified_index, timeout)
- local watching_stream = self.watching_stream
- if not watching_stream then
- local attr = {}
- attr.start_revision = modified_index
- local opts = {}
- opts.timeout = timeout
-
- local st, err = etcd_cli:create_grpc_watch_stream(key, attr, opts)
- if not st then
- log.error("create watch stream failed: ", err)
- return nil, err
- end
-
- log.info("create watch stream for key: ", key, ", modified_index: ", modified_index)
-
- self.watching_stream = st
- watching_stream = st
+local function http_waitdir(self, etcd_cli, key, modified_index, timeout)
+ if not watch_ctx.idx[key] then
+ watch_ctx.idx[key] = 1
end
- return etcd_cli:read_grpc_watch_stream(watching_stream)
-end
-
-
-local function flush_watching_streams(self)
- local etcd_cli = self.etcd_cli
- if not etcd_cli.use_grpc then
- return
- end
-
- self.watching_stream = nil
-end
+ ::iterate_events::
+ for i = watch_ctx.idx[key], #watch_ctx.res do
+ watch_ctx.idx[key] = i + 1
+ local item = watch_ctx.res[i]
+ if item == false then
+ goto iterate_events
+ end
-local function http_waitdir(etcd_cli, key, modified_index, timeout)
- local opts = {}
- opts.start_revision = modified_index
- opts.timeout = timeout
- opts.need_cancel = true
- local res_func, func_err, http_cli = etcd_cli:watchdir(key, opts)
- if not res_func then
- return nil, func_err
- end
+ local res, err = item.res, item.err
+ if err then
+ return res, err
+ end
- -- in etcd v3, the 1st res of watch is watch info, useless to us.
- -- try twice to skip create info
- local res, err = res_func()
- if not res or not res.result or not res.result.events then
- res, err = res_func()
- end
+ -- ignore res with revision smaller then self.prev_index
+ if tonumber(res.result.header.revision) > self.prev_index then
+ local res2
+ for _, evt in ipairs(res.result.events) do
+ if core_str.find(evt.kv.key, key) == 1 then
+ if not res2 then
+ res2 = tablex.deepcopy(res)
+ table.clear(res2.result.events)
+ end
+ insert_tab(res2.result.events, evt)
+ end
+ end
- if http_cli then
- local res_cancel, err_cancel = etcd_cli:watchcancel(http_cli)
- if res_cancel == 1 then
- log.info("cancel watch connection success")
- else
- log.error("cancel watch failed: ", err_cancel)
+ if res2 then
+ if log_level >= NGX_INFO then
+ log.info("http_waitdir: ", inspect(res2))
+ end
+ return res2
+ end
end
end
- if not res then
- return nil, err
+ -- if no events, wait via semaphore
+ if not self.watch_sema then
+ local sema, err = semaphore.new()
+ if not sema then
+ error(err)
+ end
+ self.watch_sema = sema
end
- if type(res.result) ~= "table" then
- err = "failed to wait etcd dir"
- if res.error and res.error.message then
- err = err .. ": " .. res.error.message
+ watch_ctx.sema[key] = self.watch_sema
+ local ok, err = self.watch_sema:wait(timeout or 60)
+ watch_ctx.sema[key] = nil
+ if ok then
+ goto iterate_events
+ else
+ if err ~= "timeout" then
+ log.error("wait watch event, key=", key, ", err: ", err)
end
return nil, err
end
-
- return res, err
end
@@ -209,12 +436,7 @@ local function waitdir(self)
return nil, "not inited"
end
- local res, err
- if etcd_cli.use_grpc then
- res, err = grpc_waitdir(self, etcd_cli, key, modified_index, timeout)
- else
- res, err = http_waitdir(etcd_cli, key, modified_index, timeout)
- end
+ local res, err = http_waitdir(self, etcd_cli, key, modified_index, timeout)
if not res then
-- log.error("failed to get key from etcd: ", err)
@@ -359,9 +581,9 @@ local function sync_data(self)
return nil, "missing 'key' arguments"
end
- if self.need_reload then
- flush_watching_streams(self)
+ init_watch_ctx(self.key)
+ if self.need_reload then
local res, err = readdir(self.etcd_cli, self.key)
if not res then
return false, err
@@ -555,22 +777,6 @@ function _M.getkey(self, key)
end
-local get_etcd
-do
- local etcd_cli
-
- function get_etcd()
- if etcd_cli ~= nil then
- return etcd_cli
- end
-
- local _, err
- etcd_cli, _, err = etcd_apisix.get_etcd_syncer()
- return etcd_cli, err
- end
-end
-
-
local function _automatic_fetch(premature, self)
if premature then
return
@@ -629,6 +835,9 @@ local function _automatic_fetch(premature, self)
.. backoff_duration .. "s")
end
end
+ elseif err == "worker exited" then
+ log.info("worker exited.")
+ return
elseif err ~= "timeout" and err ~= "Key not found"
and self.last_err ~= err then
log.error("failed to fetch data from etcd: ", err, ", ",
@@ -664,7 +873,6 @@ local function _automatic_fetch(premature, self)
end
if not exiting() and self.running then
- flush_watching_streams(self)
ngx_timer_at(0, _automatic_fetch, self)
end
end
@@ -866,10 +1074,6 @@ function _M.init()
return true
end
- if local_conf.etcd.use_grpc then
- return true
- end
-
-- don't go through proxy during start because the proxy is not available
local etcd_cli, prefix, err = etcd_apisix.new_without_proxy()
if not etcd_cli then
@@ -895,21 +1099,6 @@ function _M.init_worker()
return true
end
- if not local_conf.etcd.use_grpc then
- return true
- end
-
- -- don't go through proxy during start because the proxy is not available
- local etcd_cli, prefix, err = etcd_apisix.new_without_proxy()
- if not etcd_cli then
- return nil, "failed to start a etcd instance: " .. err
- end
-
- local res, err = readdir(etcd_cli, prefix, create_formatter(prefix))
- if not res then
- return nil, err
- end
-
return true
end
diff --git a/apisix/core/ctx.lua b/apisix/core/ctx.lua
index 47b6bef8584e..5128061d58fe 100644
--- a/apisix/core/ctx.lua
+++ b/apisix/core/ctx.lua
@@ -190,6 +190,7 @@ do
upstream_connection = true,
upstream_uri = true,
+ upstream_mirror_host = true,
upstream_mirror_uri = true,
upstream_cache_zone = true,
@@ -198,9 +199,9 @@ do
upstream_cache_key = true,
upstream_cache_bypass = true,
- var_x_forwarded_proto = true,
- var_x_forwarded_port = true,
- var_x_forwarded_host = true,
+ var_x_forwarded_proto = true,
+ var_x_forwarded_port = true,
+ var_x_forwarded_host = true,
}
-- sort in alphabetical
diff --git a/apisix/core/etcd.lua b/apisix/core/etcd.lua
index 99e4bbb6b7ab..5cd1038581dd 100644
--- a/apisix/core/etcd.lua
+++ b/apisix/core/etcd.lua
@@ -28,25 +28,15 @@ local clone_tab = require("table.clone")
local health_check = require("resty.etcd.health_check")
local pl_path = require("pl.path")
local ipairs = ipairs
-local pcall = pcall
local setmetatable = setmetatable
local string = string
local tonumber = tonumber
-local ngx_config_prefix = ngx.config.prefix()
-local ngx_socket_tcp = ngx.socket.tcp
local ngx_get_phase = ngx.get_phase
-local is_http = ngx.config.subsystem == "http"
local _M = {}
-local function has_mtls_support()
- local s = ngx_socket_tcp()
- return s.tlshandshake ~= nil
-end
-
-
local function _new(etcd_conf)
local prefix = etcd_conf.prefix
etcd_conf.http_host = etcd_conf.host
@@ -72,17 +62,6 @@ local function _new(etcd_conf)
end
end
- if etcd_conf.use_grpc then
- if ngx_get_phase() == "init" then
- etcd_conf.use_grpc = false
- else
- local ok = pcall(require, "resty.grpc")
- if not ok then
- etcd_conf.use_grpc = false
- end
- end
- end
-
local etcd_cli, err = etcd.new(etcd_conf)
if not etcd_cli then
return nil, nil, err
@@ -129,64 +108,7 @@ local function new()
etcd_conf.trusted_ca = local_conf.apisix.ssl.ssl_trusted_certificate
end
- local proxy_by_conf_server = false
-
- if local_conf.deployment then
- if local_conf.deployment.role == "traditional"
- -- we proxy the etcd requests in traditional mode so we can test the CP's behavior in
- -- daily development. However, a stream proxy can't be the CP.
- -- Hence, generate a HTTP conf server to proxy etcd requests in stream proxy is
- -- unnecessary and inefficient.
- and is_http
- then
- local sock_prefix = ngx_config_prefix
- etcd_conf.unix_socket_proxy =
- "unix:" .. sock_prefix .. "/conf/config_listen.sock"
- etcd_conf.host = {"http://127.0.0.1:2379"}
- proxy_by_conf_server = true
-
- elseif local_conf.deployment.role == "control_plane" then
- local addr = local_conf.deployment.role_control_plane.conf_server.listen
- etcd_conf.host = {"https://" .. addr}
- etcd_conf.tls = {
- verify = false,
- }
-
- if has_mtls_support() and local_conf.deployment.certs.cert then
- local cert = local_conf.deployment.certs.cert
- local cert_key = local_conf.deployment.certs.cert_key
- etcd_conf.tls.cert = cert
- etcd_conf.tls.key = cert_key
- end
-
- proxy_by_conf_server = true
-
- elseif local_conf.deployment.role == "data_plane" then
- if has_mtls_support() and local_conf.deployment.certs.cert then
- local cert = local_conf.deployment.certs.cert
- local cert_key = local_conf.deployment.certs.cert_key
-
- if not etcd_conf.tls then
- etcd_conf.tls = {}
- end
-
- etcd_conf.tls.cert = cert
- etcd_conf.tls.key = cert_key
- end
- end
-
- if local_conf.deployment.certs and local_conf.deployment.certs.trusted_ca_cert then
- etcd_conf.trusted_ca = local_conf.deployment.certs.trusted_ca_cert
- end
- end
-
- -- if an unhealthy etcd node is selected in a single admin read/write etcd operation,
- -- the retry mechanism for health check can select another healthy etcd node
- -- to complete the read/write etcd operation.
- if proxy_by_conf_server then
- -- health check is done in conf server
- health_check.disable()
- elseif not health_check.conf then
+ if not health_check.conf then
health_check.init({
max_fails = 1,
retry = true,
@@ -349,10 +271,6 @@ do
return nil, nil, err
end
- if tmp_etcd_cli.use_grpc then
- etcd_cli_init_phase = tmp_etcd_cli
- end
-
return tmp_etcd_cli, prefix
end
@@ -372,9 +290,7 @@ do
return nil, nil, err
end
- if tmp_etcd_cli.use_grpc then
- etcd_cli = tmp_etcd_cli
- end
+ etcd_cli = tmp_etcd_cli
return tmp_etcd_cli, prefix
end
diff --git a/apisix/core/profile.lua b/apisix/core/profile.lua
index 389a9d42ccec..a5dcdc81e10b 100644
--- a/apisix/core/profile.lua
+++ b/apisix/core/profile.lua
@@ -19,6 +19,8 @@
--
-- @module core.profile
+local util = require("apisix.cli.util")
+
local _M = {
version = 0.1,
profile = os.getenv("APISIX_PROFILE") or "",
@@ -48,4 +50,18 @@ function _M.yaml_path(self, file_name)
end
+function _M.customized_yaml_index(self)
+ return self.apisix_home .. "/conf/.customized_config_path"
+end
+
+
+function _M.customized_yaml_path(self)
+ local customized_config_index = self:customized_yaml_index()
+ if util.file_exists(customized_config_index) then
+ return util.read_file(customized_config_index)
+ end
+ return nil
+end
+
+
return _M
diff --git a/apisix/core/pubsub.lua b/apisix/core/pubsub.lua
index 25ac46f13eeb..18bb887001c1 100644
--- a/apisix/core/pubsub.lua
+++ b/apisix/core/pubsub.lua
@@ -185,9 +185,10 @@ function _M.wait(self)
end
-- recovery of stored pb_store
- pb.state(pb_state)
+ local pb_old_state = pb.state(pb_state)
local data, err = pb.decode("PubSubReq", raw_data)
+ pb.state(pb_old_state)
if not data then
log.error("pubsub server receives undecodable data, err: ", err)
send_error(ws, 0, "wrong command")
diff --git a/apisix/core/response.lua b/apisix/core/response.lua
index b934d94b93af..04430abd5266 100644
--- a/apisix/core/response.lua
+++ b/apisix/core/response.lua
@@ -70,7 +70,9 @@ function resp_exit(code, ...)
error("failed to encode data: " .. err, -2)
else
idx = idx + 1
- t[idx] = body .. "\n"
+ t[idx] = body
+ idx = idx + 1
+ t[idx] = "\n"
end
elseif v ~= nil then
@@ -80,7 +82,7 @@ function resp_exit(code, ...)
end
if idx > 0 then
- ngx_print(concat_tab(t, "", 1, idx))
+ ngx_print(t)
end
if code then
@@ -177,14 +179,19 @@ end
function _M.hold_body_chunk(ctx, hold_the_copy)
local body_buffer
local chunk, eof = arg[1], arg[2]
+
+ if not ctx._body_buffer then
+ ctx._body_buffer = {}
+ end
+
if type(chunk) == "string" and chunk ~= "" then
- body_buffer = ctx._body_buffer
+ body_buffer = ctx._body_buffer[ctx._plugin_name]
if not body_buffer then
body_buffer = {
chunk,
n = 1
}
- ctx._body_buffer = body_buffer
+ ctx._body_buffer[ctx._plugin_name] = body_buffer
else
local n = body_buffer.n + 1
body_buffer.n = n
@@ -193,13 +200,13 @@ function _M.hold_body_chunk(ctx, hold_the_copy)
end
if eof then
- body_buffer = ctx._body_buffer
+ body_buffer = ctx._body_buffer[ctx._plugin_name]
if not body_buffer then
return chunk
end
body_buffer = concat_tab(body_buffer, "", 1, body_buffer.n)
- ctx._body_buffer = nil
+ ctx._body_buffer[ctx._plugin_name] = nil
return body_buffer
end
diff --git a/apisix/core/version.lua b/apisix/core/version.lua
index d83cba52dd98..5fa6af15eb53 100644
--- a/apisix/core/version.lua
+++ b/apisix/core/version.lua
@@ -20,5 +20,5 @@
-- @module core.version
return {
- VERSION = "3.2.0"
+ VERSION = "3.5.0"
}
diff --git a/apisix/discovery/consul/init.lua b/apisix/discovery/consul/init.lua
index 686ab4120f18..ae1e4c64cc9c 100644
--- a/apisix/discovery/consul/init.lua
+++ b/apisix/discovery/consul/init.lua
@@ -32,6 +32,14 @@ local ngx_timer_every = ngx.timer.every
local log = core.log
local json_delay_encode = core.json.delay_encode
local ngx_worker_id = ngx.worker.id
+local thread_spawn = ngx.thread.spawn
+local thread_wait = ngx.thread.wait
+local thread_kill = ngx.thread.kill
+local math_random = math.random
+local pcall = pcall
+local null = ngx.null
+local type = type
+local next = next
local all_services = core.table.new(0, 5)
local default_service
@@ -44,9 +52,15 @@ local events_list
local consul_services
local default_skip_services = {"consul"}
+local default_random_range = 5
+local default_catalog_error_index = -1
+local default_health_error_index = -2
+local watch_type_catalog = 1
+local watch_type_health = 2
+local max_retry_time = 256
local _M = {
- version = 0.2,
+ version = 0.3,
}
@@ -128,7 +142,7 @@ local function read_dump_services()
local now_time = ngx.time()
log.info("dump file last_update: ", entity.last_update, ", dump_params.expire: ",
dump_params.expire, ", now_time: ", now_time)
- if dump_params.expire ~= 0 and (entity.last_update + dump_params.expire) < now_time then
+ if dump_params.expire ~= 0 and (entity.last_update + dump_params.expire) < now_time then
log.warn("dump file: ", dump_params.path, " had expired, ignored it")
return
end
@@ -145,7 +159,7 @@ local function write_dump_services()
expire = dump_params.expire, -- later need handle it
}
local data = core.json.encode(entity)
- local succ, err = util.write_file(dump_params.path, data)
+ local succ, err = util.write_file(dump_params.path, data)
if not succ then
log.error("write dump into file got error: ", err)
end
@@ -165,8 +179,9 @@ local function show_dump_file()
return 200, data
end
+
local function get_retry_delay(retry_delay)
- if not retry_delay then
+ if not retry_delay or retry_delay >= max_retry_time then
retry_delay = 1
else
retry_delay = retry_delay * 4
@@ -176,80 +191,307 @@ local function get_retry_delay(retry_delay)
end
+local function get_opts(consul_server, is_catalog)
+ local opts = {
+ host = consul_server.host,
+ port = consul_server.port,
+ connect_timeout = consul_server.connect_timeout,
+ read_timeout = consul_server.read_timeout,
+ }
+ if not consul_server.keepalive then
+ return opts
+ end
+
+ if is_catalog then
+ opts.default_args = {
+ wait = consul_server.wait_timeout, --blocked wait!=0; unblocked by wait=0
+ index = consul_server.catalog_index,
+ }
+ else
+ opts.default_args = {
+ wait = consul_server.wait_timeout, --blocked wait!=0; unblocked by wait=0
+ index = consul_server.health_index,
+ }
+ end
+
+ return opts
+end
+
+
+local function watch_catalog(consul_server)
+ local client = resty_consul:new(get_opts(consul_server, true))
+
+ ::RETRY::
+ local watch_result, watch_err = client:get(consul_server.consul_watch_catalog_url)
+ local watch_error_info = (watch_err ~= nil and watch_err)
+ or ((watch_result ~= nil and watch_result.status ~= 200)
+ and watch_result.status)
+ if watch_error_info then
+ log.error("connect consul: ", consul_server.consul_server_url,
+ " by sub url: ", consul_server.consul_watch_catalog_url,
+ ", got watch result: ", json_delay_encode(watch_result),
+ ", with error: ", watch_error_info)
+
+ return watch_type_catalog, default_catalog_error_index
+ end
+
+ if consul_server.catalog_index > 0
+ and consul_server.catalog_index == tonumber(watch_result.headers['X-Consul-Index']) then
+ local random_delay = math_random(default_random_range)
+ log.info("watch catalog has no change, re-watch consul after ", random_delay, " seconds")
+ core_sleep(random_delay)
+ goto RETRY
+ end
+
+ return watch_type_catalog, watch_result.headers['X-Consul-Index']
+end
+
+
+local function watch_health(consul_server)
+ local client = resty_consul:new(get_opts(consul_server, false))
+
+ ::RETRY::
+ local watch_result, watch_err = client:get(consul_server.consul_watch_health_url)
+ local watch_error_info = (watch_err ~= nil and watch_err)
+ or ((watch_result ~= nil and watch_result.status ~= 200)
+ and watch_result.status)
+ if watch_error_info then
+ log.error("connect consul: ", consul_server.consul_server_url,
+ " by sub url: ", consul_server.consul_watch_health_url,
+ ", got watch result: ", json_delay_encode(watch_result),
+ ", with error: ", watch_error_info)
+
+ return watch_type_health, default_health_error_index
+ end
+
+ if consul_server.health_index > 0
+ and consul_server.health_index == tonumber(watch_result.headers['X-Consul-Index']) then
+ local random_delay = math_random(default_random_range)
+ log.info("watch health has no change, re-watch consul after ", random_delay, " seconds")
+ core_sleep(random_delay)
+ goto RETRY
+ end
+
+ return watch_type_health, watch_result.headers['X-Consul-Index']
+end
+
+
+local function check_keepalive(consul_server, retry_delay)
+ if consul_server.keepalive then
+ local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay)
+ if not ok then
+ log.error("create ngx_timer_at got error: ", err)
+ return
+ end
+ end
+end
+
+
+local function update_index(consul_server, catalog_index, health_index)
+ local c_index = 0
+ local h_index = 0
+ if catalog_index ~= nil then
+ c_index = tonumber(catalog_index)
+ end
+
+ if health_index ~= nil then
+ h_index = tonumber(health_index)
+ end
+
+ if c_index > 0 then
+ consul_server.catalog_index = c_index
+ end
+
+ if h_index > 0 then
+ consul_server.health_index = h_index
+ end
+end
+
+
+local function is_not_empty(value)
+ if value == nil or value == null
+ or (type(value) == "table" and not next(value))
+ or (type(value) == "string" and value == "")
+ then
+ return false
+ end
+
+ return true
+end
+
+
+local function watch_result_is_valid(watch_type, index, catalog_index, health_index)
+ if index <= 0 then
+ return false
+ end
+
+ if watch_type == watch_type_catalog then
+ if index == catalog_index then
+ return false
+ end
+ else
+ if index == health_index then
+ return false
+ end
+ end
+
+ return true
+end
+
+
function _M.connect(premature, consul_server, retry_delay)
if premature then
return
end
+ local catalog_thread, spawn_catalog_err = thread_spawn(watch_catalog, consul_server)
+ if not catalog_thread then
+ local random_delay = math_random(default_random_range)
+ log.error("failed to spawn thread watch catalog: ", spawn_catalog_err,
+ ", retry connecting consul after ", random_delay, " seconds")
+ core_sleep(random_delay)
+
+ check_keepalive(consul_server, retry_delay)
+ return
+ end
+
+ local health_thread, err = thread_spawn(watch_health, consul_server)
+ if not health_thread then
+ thread_kill(catalog_thread)
+ local random_delay = math_random(default_random_range)
+ log.error("failed to spawn thread watch health: ", err, ", retry connecting consul after ",
+ random_delay, " seconds")
+ core_sleep(random_delay)
+
+ check_keepalive(consul_server, retry_delay)
+ return
+ end
+
+ local thread_wait_ok, watch_type, index = thread_wait(catalog_thread, health_thread)
+ thread_kill(catalog_thread)
+ thread_kill(health_thread)
+ if not thread_wait_ok then
+ local random_delay = math_random(default_random_range)
+ log.error("failed to wait thread: ", watch_type, ", retry connecting consul after ",
+ random_delay, " seconds")
+ core_sleep(random_delay)
+
+ check_keepalive(consul_server, retry_delay)
+ return
+ end
+
+ -- double check index has changed
+ if not watch_result_is_valid(tonumber(watch_type),
+ tonumber(index), consul_server.catalog_index, consul_server.health_index) then
+ retry_delay = get_retry_delay(retry_delay)
+ log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds")
+ core_sleep(retry_delay)
+
+ check_keepalive(consul_server, retry_delay)
+ return
+ end
+
local consul_client = resty_consul:new({
host = consul_server.host,
port = consul_server.port,
connect_timeout = consul_server.connect_timeout,
read_timeout = consul_server.read_timeout,
- default_args = consul_server.default_args,
})
+ local catalog_success, catalog_res, catalog_err = pcall(function()
+ return consul_client:get(consul_server.consul_watch_catalog_url)
+ end)
+ if not catalog_success then
+ log.error("connect consul: ", consul_server.consul_server_url,
+ " by sub url: ", consul_server.consul_watch_catalog_url,
+ ", got catalog result: ", json_delay_encode(catalog_res))
+ check_keepalive(consul_server, retry_delay)
+ return
+ end
+ local catalog_error_info = (catalog_err ~= nil and catalog_err)
+ or ((catalog_res ~= nil and catalog_res.status ~= 200)
+ and catalog_res.status)
+ if catalog_error_info then
+ log.error("connect consul: ", consul_server.consul_server_url,
+ " by sub url: ", consul_server.consul_watch_catalog_url,
+ ", got catalog result: ", json_delay_encode(catalog_res),
+ ", with error: ", catalog_error_info)
- log.info("consul_server: ", json_delay_encode(consul_server, true))
- local watch_result, watch_err = consul_client:get(consul_server.consul_watch_sub_url)
- local watch_error_info = (watch_err ~= nil and watch_err)
- or ((watch_result ~= nil and watch_result.status ~= 200)
- and watch_result.status)
- if watch_error_info then
+ retry_delay = get_retry_delay(retry_delay)
+ log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds")
+ core_sleep(retry_delay)
+
+ check_keepalive(consul_server, retry_delay)
+ return
+ end
+
+ -- get health index
+ local success, health_res, health_err = pcall(function()
+ return consul_client:get(consul_server.consul_watch_health_url)
+ end)
+ if not success then
log.error("connect consul: ", consul_server.consul_server_url,
- " by sub url: ", consul_server.consul_watch_sub_url,
- ", got watch result: ", json_delay_encode(watch_result, true),
- ", with error: ", watch_error_info)
+ " by sub url: ", consul_server.consul_watch_health_url,
+ ", got health result: ", json_delay_encode(health_res))
+ check_keepalive(consul_server, retry_delay)
+ return
+ end
+ local health_error_info = (health_err ~= nil and health_err)
+ or ((health_res ~= nil and health_res.status ~= 200)
+ and health_res.status)
+ if health_error_info then
+ log.error("connect consul: ", consul_server.consul_server_url,
+ " by sub url: ", consul_server.consul_watch_health_url,
+ ", got health result: ", json_delay_encode(health_res),
+ ", with error: ", health_error_info)
retry_delay = get_retry_delay(retry_delay)
- log.warn("retry connecting consul after ", retry_delay, " seconds")
+ log.warn("get all svcs got err, retry connecting consul after ", retry_delay, " seconds")
core_sleep(retry_delay)
- goto ERR
+ check_keepalive(consul_server, retry_delay)
+ return
end
log.info("connect consul: ", consul_server.consul_server_url,
- ", watch_result status: ", watch_result.status,
- ", watch_result.headers.index: ", watch_result.headers['X-Consul-Index'],
+ ", catalog_result status: ", catalog_res.status,
+ ", catalog_result.headers.index: ", catalog_res.headers['X-Consul-Index'],
", consul_server.index: ", consul_server.index,
- ", consul_server: ", json_delay_encode(consul_server, true))
-
- -- if current index different last index then update service
- if consul_server.index ~= watch_result.headers['X-Consul-Index'] then
- local up_services = core.table.new(0, #watch_result.body)
- local consul_client_svc = resty_consul:new({
- host = consul_server.host,
- port = consul_server.port,
- connect_timeout = consul_server.connect_timeout,
- read_timeout = consul_server.read_timeout,
- })
- for service_name, _ in pairs(watch_result.body) do
+ ", consul_server: ", json_delay_encode(consul_server))
+
+ -- if the current index is different from the last index, then update the service
+ if (consul_server.catalog_index ~= tonumber(catalog_res.headers['X-Consul-Index']))
+ or (consul_server.health_index ~= tonumber(health_res.headers['X-Consul-Index'])) then
+ local up_services = core.table.new(0, #catalog_res.body)
+ for service_name, _ in pairs(catalog_res.body) do
-- check if the service_name is 'skip service'
if skip_service_map[service_name] then
goto CONTINUE
end
+
-- get node from service
local svc_url = consul_server.consul_sub_url .. "/" .. service_name
- local result, err = consul_client_svc:get(svc_url)
- local error_info = (err ~= nil and err) or
+ local svc_success, result, get_err = pcall(function()
+ return consul_client:get(svc_url, {passing = true})
+ end)
+ local error_info = (get_err ~= nil and get_err) or
((result ~= nil and result.status ~= 200) and result.status)
- if error_info then
+ if not svc_success or error_info then
log.error("connect consul: ", consul_server.consul_server_url,
", by service url: ", svc_url, ", with error: ", error_info)
goto CONTINUE
end
-- decode body, decode json, update service, error handling
- if result.body then
- log.notice("service url: ", svc_url,
- ", header: ", json_delay_encode(result.headers, true),
- ", body: ", json_delay_encode(result.body, true))
+ -- check result body is not nil and not empty
+ if is_not_empty(result.body) then
-- add services to table
local nodes = up_services[service_name]
- for _, node in ipairs(result.body) do
- local svc_address, svc_port = node.ServiceAddress, node.ServicePort
- if not svc_address then
- svc_address = node.Address
+ for _, node in ipairs(result.body) do
+ if not node.Service then
+ goto CONTINUE
end
+
+ local svc_address, svc_port = node.Service.Address, node.Service.Port
-- if nodes is nil, new nodes table and set to up_services
if not nodes then
nodes = core.table.new(1, 0)
@@ -270,8 +512,9 @@ function _M.connect(premature, consul_server, retry_delay)
update_all_services(consul_server.consul_server_url, up_services)
--update events
- local ok, post_err = events.post(events_list._source, events_list.updating, all_services)
- if not ok then
+ local post_ok, post_err = events.post(events_list._source,
+ events_list.updating, all_services)
+ if not post_ok then
log.error("post_event failure with ", events_list._source,
", update all services error: ", post_err)
end
@@ -280,37 +523,17 @@ function _M.connect(premature, consul_server, retry_delay)
ngx_timer_at(0, write_dump_services)
end
- consul_server.index = watch_result.headers['X-Consul-Index']
- -- only long connect type use index
- if consul_server.keepalive then
- consul_server.default_args.index = watch_result.headers['X-Consul-Index']
- end
+ update_index(consul_server,
+ catalog_res.headers['X-Consul-Index'],
+ health_res.headers['X-Consul-Index'])
end
- :: ERR ::
- local keepalive = consul_server.keepalive
- if keepalive then
- local ok, err = ngx_timer_at(0, _M.connect, consul_server, retry_delay)
- if not ok then
- log.error("create ngx_timer_at got error: ", err)
- return
- end
- end
+ check_keepalive(consul_server, retry_delay)
end
local function format_consul_params(consul_conf)
local consul_server_list = core.table.new(0, #consul_conf.servers)
- local args
-
- if consul_conf.keepalive == false then
- args = {}
- elseif consul_conf.keepalive then
- args = {
- wait = consul_conf.timeout.wait, --blocked wait!=0; unblocked by wait=0
- index = 0,
- }
- end
for _, v in pairs(consul_conf.servers) do
local scheme, host, port, path = unpack(http.parse_uri(nil, v))
@@ -319,23 +542,23 @@ local function format_consul_params(consul_conf)
elseif path ~= "/" or core.string.has_suffix(v, '/') then
return nil, "invalid consul server address, the valid format: http://address:port"
end
-
core.table.insert(consul_server_list, {
host = host,
port = port,
connect_timeout = consul_conf.timeout.connect,
read_timeout = consul_conf.timeout.read,
- consul_sub_url = "/catalog/service",
- consul_watch_sub_url = "/catalog/services",
+ wait_timeout = consul_conf.timeout.wait,
+ consul_watch_catalog_url = "/catalog/services",
+ consul_sub_url = "/health/service",
+ consul_watch_health_url = "/health/state/any",
consul_server_url = v .. "/v1",
weight = consul_conf.weight,
keepalive = consul_conf.keepalive,
- default_args = args,
- index = 0,
+ health_index = 0,
+ catalog_index = 0,
fetch_interval = consul_conf.fetch_interval -- fetch interval to next connect consul
})
end
-
return consul_server_list, nil
end
diff --git a/apisix/discovery/kubernetes/init.lua b/apisix/discovery/kubernetes/init.lua
index 3f5f275d9aff..d16d4f4fcd31 100644
--- a/apisix/discovery/kubernetes/init.lua
+++ b/apisix/discovery/kubernetes/init.lua
@@ -26,7 +26,7 @@ local error = error
local pcall = pcall
local setmetatable = setmetatable
local is_http = ngx.config.subsystem == "http"
-local support_process, process = pcall(require, "ngx.process")
+local process = require("ngx.process")
local core = require("apisix.core")
local util = require("apisix.cli.util")
local local_conf = require("apisix.core.config_local").local_conf()
@@ -520,11 +520,6 @@ end
function _M.init_worker()
- if not support_process then
- core.log.error("kubernetes discovery not support in subsystem: ", ngx.config.subsystem,
- ", please check if your openresty version >= 1.19.9.1 or not")
- return
- end
local discovery_conf = local_conf.discovery.kubernetes
core.log.info("kubernetes discovery conf: ", core.json.delay_encode(discovery_conf))
if #discovery_conf == 0 then
diff --git a/apisix/discovery/nacos/init.lua b/apisix/discovery/nacos/init.lua
index e12c025c56d5..2e06f5553493 100644
--- a/apisix/discovery/nacos/init.lua
+++ b/apisix/discovery/nacos/init.lua
@@ -361,7 +361,9 @@ function _M.nodes(service_name, discovery_args)
waiting_time = waiting_time - step
end
- if not applications[namespace_id] or not applications[namespace_id][group_name] then
+ if not applications or not applications[namespace_id]
+ or not applications[namespace_id][group_name]
+ then
return nil
end
return applications[namespace_id][group_name][service_name]
diff --git a/apisix/discovery/tars/init.lua b/apisix/discovery/tars/init.lua
index 14f658dce5d2..17bb2758b199 100644
--- a/apisix/discovery/tars/init.lua
+++ b/apisix/discovery/tars/init.lua
@@ -23,7 +23,7 @@ local local_conf = require("apisix.core.config_local").local_conf()
local core = require("apisix.core")
local mysql = require("resty.mysql")
local is_http = ngx.config.subsystem == "http"
-local support_process, process = pcall(require, "ngx.process")
+local process = require("ngx.process")
local endpoint_dict
@@ -343,12 +343,6 @@ local function get_endpoint_dict()
end
function _M.init_worker()
- if not support_process then
- core.log.error("tars discovery not support in subsystem: ", ngx.config.subsystem,
- ", please check if your openresty version >= 1.19.9.1 or not")
- return
- end
-
endpoint_dict = get_endpoint_dict()
if not endpoint_dict then
error("failed to get lua_shared_dict: tars, please check your APISIX version")
diff --git a/apisix/global_rules.lua b/apisix/global_rules.lua
new file mode 100644
index 000000000000..93fa2890d423
--- /dev/null
+++ b/apisix/global_rules.lua
@@ -0,0 +1,56 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local core = require("apisix.core")
+local plugin_checker = require("apisix.plugin").plugin_checker
+local error = error
+
+
+local _M = {}
+
+local global_rules
+
+function _M.init_worker()
+ local err
+ global_rules, err = core.config.new("/global_rules", {
+ automatic = true,
+ item_schema = core.schema.global_rule,
+ checker = plugin_checker,
+ })
+ if not global_rules then
+ error("failed to create etcd instance for fetching /global_rules : "
+ .. err)
+ end
+end
+
+
+function _M.global_rules()
+ if not global_rules then
+ return nil, nil
+ end
+ return global_rules.values, global_rules.conf_version
+end
+
+
+function _M.get_pre_index()
+ if not global_rules then
+ return nil
+ end
+ return global_rules.prev_index
+end
+
+return _M
diff --git a/apisix/http/route.lua b/apisix/http/route.lua
index d475646b56c6..dbf11abf5e28 100644
--- a/apisix/http/route.lua
+++ b/apisix/http/route.lua
@@ -103,8 +103,8 @@ function _M.create_radixtree_uri_router(routes, uri_routes, with_parameter)
end
-function _M.match_uri(uri_router, match_opts, api_ctx)
- core.table.clear(match_opts)
+function _M.match_uri(uri_router, api_ctx)
+ local match_opts = core.tablepool.fetch("route_match_opts", 0, 4)
match_opts.method = api_ctx.var.request_method
match_opts.host = api_ctx.var.host
match_opts.remote_addr = api_ctx.var.remote_addr
@@ -112,6 +112,7 @@ function _M.match_uri(uri_router, match_opts, api_ctx)
match_opts.matched = core.tablepool.fetch("matched_route_record", 0, 4)
local ok = uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts)
+ core.tablepool.release("route_match_opts", match_opts)
return ok
end
diff --git a/apisix/http/router/radixtree_host_uri.lua b/apisix/http/router/radixtree_host_uri.lua
index 532576e53d4a..680a04fbe815 100644
--- a/apisix/http/router/radixtree_host_uri.lua
+++ b/apisix/http/router/radixtree_host_uri.lua
@@ -142,8 +142,6 @@ local function create_radixtree_router(routes)
return true
end
-
- local match_opts = {}
function _M.match(api_ctx)
local user_routes = _M.user_routes
local _, service_version = get_services()
@@ -162,7 +160,7 @@ end
function _M.matching(api_ctx)
core.log.info("route match mode: radixtree_host_uri")
- core.table.clear(match_opts)
+ local match_opts = core.tablepool.fetch("route_match_opts", 0, 16)
match_opts.method = api_ctx.var.request_method
match_opts.remote_addr = api_ctx.var.remote_addr
match_opts.vars = api_ctx.var
@@ -181,11 +179,13 @@ function _M.matching(api_ctx)
api_ctx.curr_req_matched._host = api_ctx.real_curr_req_matched_host:reverse()
api_ctx.real_curr_req_matched_host = nil
end
+ core.tablepool.release("route_match_opts", match_opts)
return true
end
end
local ok = only_uri_router:dispatch(api_ctx.var.uri, match_opts, api_ctx, match_opts)
+ core.tablepool.release("route_match_opts", match_opts)
return ok
end
diff --git a/apisix/http/router/radixtree_uri.lua b/apisix/http/router/radixtree_uri.lua
index 6e546364ac14..7c1b5c0c147a 100644
--- a/apisix/http/router/radixtree_uri.lua
+++ b/apisix/http/router/radixtree_uri.lua
@@ -27,7 +27,6 @@ local _M = {version = 0.2}
local uri_routes = {}
local uri_router
- local match_opts = {}
function _M.match(api_ctx)
local user_routes = _M.user_routes
local _, service_version = get_services()
@@ -51,8 +50,7 @@ end
function _M.matching(api_ctx)
core.log.info("route match mode: radixtree_uri")
-
- return base_router.match_uri(uri_router, match_opts, api_ctx)
+ return base_router.match_uri(uri_router, api_ctx)
end
diff --git a/apisix/http/router/radixtree_uri_with_parameter.lua b/apisix/http/router/radixtree_uri_with_parameter.lua
index 4bf7f3ebee5f..3f10f4fcac49 100644
--- a/apisix/http/router/radixtree_uri_with_parameter.lua
+++ b/apisix/http/router/radixtree_uri_with_parameter.lua
@@ -27,7 +27,6 @@ local _M = {}
local uri_routes = {}
local uri_router
- local match_opts = {}
function _M.match(api_ctx)
local user_routes = _M.user_routes
local _, service_version = get_services()
@@ -51,8 +50,7 @@ end
function _M.matching(api_ctx)
core.log.info("route match mode: radixtree_uri_with_parameter")
-
- return base_router.match_uri(uri_router, match_opts, api_ctx)
+ return base_router.match_uri(uri_router, api_ctx)
end
diff --git a/apisix/init.lua b/apisix/init.lua
index aef5b7ebeb23..86b68cf62208 100644
--- a/apisix/init.lua
+++ b/apisix/init.lua
@@ -40,6 +40,7 @@ local apisix_upstream = require("apisix.upstream")
local apisix_secret = require("apisix.secret")
local set_upstream = apisix_upstream.set_by_route
local apisix_ssl = require("apisix.ssl")
+local apisix_global_rules = require("apisix.global_rules")
local upstream_util = require("apisix.utils.upstream")
local xrpc = require("apisix.stream.xrpc")
local ctxdump = require("resty.ctxdump")
@@ -59,6 +60,7 @@ local str_sub = string.sub
local tonumber = tonumber
local type = type
local pairs = pairs
+local ngx_re_match = ngx.re.match
local control_api_router
local is_http = false
@@ -76,6 +78,7 @@ local load_balancer
local local_conf
local ver_header = "APISIX/" .. core.version.VERSION
+local has_mod, apisix_ngx_client = pcall(require, "resty.apisix.client")
local _M = {version = 0.4}
@@ -153,6 +156,8 @@ function _M.http_init_worker()
consumer_group.init_worker()
apisix_secret.init_worker()
+ apisix_global_rules.init_worker()
+
apisix_upstream.init_worker()
require("apisix.plugins.ext-plugin.init").init_worker()
@@ -173,12 +178,33 @@ end
function _M.http_ssl_phase()
+ local ok, err = router.router_ssl.set(ngx.ctx.matched_ssl)
+ if not ok then
+ if err then
+ core.log.error("failed to fetch ssl config: ", err)
+ end
+ ngx_exit(-1)
+ end
+end
+
+
+function _M.http_ssl_client_hello_phase()
+ local sni, err = apisix_ssl.server_name(true)
+ if not sni or type(sni) ~= "string" then
+ local advise = "please check if the client requests via IP or uses an outdated " ..
+ "protocol. If you need to report an issue, " ..
+ "provide a packet capture file of the TLS handshake."
+ core.log.error("failed to find SNI: " .. (err or advise))
+ ngx_exit(-1)
+ end
+
local ngx_ctx = ngx.ctx
local api_ctx = core.tablepool.fetch("api_ctx", 0, 32)
ngx_ctx.api_ctx = api_ctx
- local ok, err = router.router_ssl.match_and_set(api_ctx)
+ local ok, err = router.router_ssl.match_and_set(api_ctx, true, sni)
+ ngx_ctx.matched_ssl = api_ctx.matched_ssl
core.tablepool.release("api_ctx", api_ctx)
ngx_ctx.api_ctx = nil
@@ -186,6 +212,13 @@ function _M.http_ssl_phase()
if err then
core.log.error("failed to fetch ssl config: ", err)
end
+ core.log.error("failed to match any SSL certificate by SNI: ", sni)
+ ngx_exit(-1)
+ end
+
+ ok, err = apisix_ssl.set_protocols_by_clienthello(ngx_ctx.matched_ssl.value.ssl_protocols)
+ if not ok then
+ core.log.error("failed to set ssl protocols: ", err)
ngx_exit(-1)
end
end
@@ -310,12 +343,38 @@ local function verify_tls_client(ctx)
end
+local function uri_matches_skip_mtls_route_patterns(ssl, uri)
+ for _, pat in ipairs(ssl.value.client.skip_mtls_uri_regex) do
+ if ngx_re_match(uri, pat, "jo") then
+ return true
+ end
+ end
+end
+
+
local function verify_https_client(ctx)
local scheme = ctx.var.scheme
if scheme ~= "https" then
return true
end
+ local matched_ssl = ngx.ctx.matched_ssl
+ if matched_ssl.value.client
+ and matched_ssl.value.client.skip_mtls_uri_regex
+ and apisix_ssl.support_client_verification()
+ and (not uri_matches_skip_mtls_route_patterns(matched_ssl, ngx.var.uri)) then
+ local res = ctx.var.ssl_client_verify
+ if res ~= "SUCCESS" then
+ if res == "NONE" then
+ core.log.error("client certificate was not present")
+ else
+ core.log.error("client certificate verification is not passed: ", res)
+ end
+
+ return false
+ end
+ end
+
local host = ctx.var.host
local matched = router.router_ssl.match_and_set(ctx, true, host)
if not matched then
@@ -568,7 +627,8 @@ function _M.http_access_phase()
local route = api_ctx.matched_route
if not route then
-- run global rule when there is no matching route
- plugin.run_global_rules(api_ctx, router.global_rules, nil)
+ local global_rules = apisix_global_rules.global_rules()
+ plugin.run_global_rules(api_ctx, global_rules, nil)
core.log.info("not find any matched route")
return core.response.exit(404,
@@ -620,7 +680,8 @@ function _M.http_access_phase()
api_ctx.route_name = route.value.name
-- run global rule
- plugin.run_global_rules(api_ctx, router.global_rules, nil)
+ local global_rules = apisix_global_rules.global_rules()
+ plugin.run_global_rules(api_ctx, global_rules, nil)
if route.value.script then
script.load(route, api_ctx)
@@ -688,6 +749,10 @@ function _M.grpc_access_phase()
core.log.error("failed to set grpcs upstream param: ", err)
core.response.exit(code)
end
+
+ if api_ctx.enable_mirror == true and has_mod then
+ apisix_ngx_client.enable_mirror()
+ end
end
diff --git a/apisix/inspect/dbg.lua b/apisix/inspect/dbg.lua
index 7f4e7b114424..2fd78782faac 100644
--- a/apisix/inspect/dbg.lua
+++ b/apisix/inspect/dbg.lua
@@ -98,6 +98,9 @@ local function hook(_, arg)
if #hooks == 0 then
core.log.warn("inspect: all hooks removed")
debug.sethook()
+ if jit then
+ jit.on()
+ end
end
end
end
diff --git a/apisix/plugin.lua b/apisix/plugin.lua
index 3b5cecac6ded..bde2b89a5393 100644
--- a/apisix/plugin.lua
+++ b/apisix/plugin.lua
@@ -775,6 +775,11 @@ function _M.get(name)
end
+function _M.get_stream(name)
+ return stream_local_plugins_hash and stream_local_plugins_hash[name]
+end
+
+
function _M.get_all(attrs)
local http_plugins = {}
local stream_plugins = {}
@@ -858,10 +863,6 @@ end
local function get_plugin_schema_for_gde(name, schema_type)
- if not enable_gde() then
- return nil
- end
-
local plugin_schema = local_plugins_hash and local_plugins_hash[name]
if not plugin_schema then
return nil
@@ -881,6 +882,9 @@ end
local function decrypt_conf(name, conf, schema_type)
+ if not enable_gde() then
+ return
+ end
local schema = get_plugin_schema_for_gde(name, schema_type)
if not schema then
core.log.warn("failed to get schema for plugin: ", name)
@@ -924,6 +928,9 @@ _M.decrypt_conf = decrypt_conf
local function encrypt_conf(name, conf, schema_type)
+ if not enable_gde() then
+ return
+ end
local schema = get_plugin_schema_for_gde(name, schema_type)
if not schema then
core.log.warn("failed to get schema for plugin: ", name)
@@ -1091,7 +1098,9 @@ function _M.run_plugin(phase, plugins, api_ctx)
end
plugin_run = true
+ api_ctx._plugin_name = plugins[i]["name"]
local code, body = phase_func(conf, api_ctx)
+ api_ctx._plugin_name = nil
if code or body then
if is_http then
if code >= 400 then
@@ -1126,7 +1135,9 @@ function _M.run_plugin(phase, plugins, api_ctx)
local conf = plugins[i + 1]
if phase_func and meta_filter(api_ctx, plugins[i]["name"], conf) then
plugin_run = true
+ api_ctx._plugin_name = plugins[i]["name"]
phase_func(conf, api_ctx)
+ api_ctx._plugin_name = nil
end
end
@@ -1135,8 +1146,7 @@ end
function _M.run_global_rules(api_ctx, global_rules, phase_name)
- if global_rules and global_rules.values
- and #global_rules.values > 0 then
+ if global_rules and #global_rules > 0 then
local orig_conf_type = api_ctx.conf_type
local orig_conf_version = api_ctx.conf_version
local orig_conf_id = api_ctx.conf_id
@@ -1146,7 +1156,7 @@ function _M.run_global_rules(api_ctx, global_rules, phase_name)
end
local plugins = core.tablepool.fetch("plugins", 32, 0)
- local values = global_rules.values
+ local values = global_rules
local route = api_ctx.matched_route
for _, global_rule in config_util.iterate_values(values) do
api_ctx.conf_type = "global_rule"
diff --git a/apisix/plugin_config.lua b/apisix/plugin_config.lua
index cc5a6ff38456..88b17d4b83ac 100644
--- a/apisix/plugin_config.lua
+++ b/apisix/plugin_config.lua
@@ -40,6 +40,14 @@ function _M.init_worker()
end
+function _M.plugin_configs()
+ if not plugin_configs then
+ return nil, nil
+ end
+ return plugin_configs.values, plugin_configs.conf_version
+end
+
+
function _M.get(id)
return plugin_configs:get(id)
end
@@ -70,8 +78,7 @@ function _M.merge(route_conf, plugin_config)
end
end
- route_conf.update_count = route_conf.update_count + 1
- route_conf.modifiedIndex = route_conf.orig_modifiedIndex .. "#" .. route_conf.update_count
+ route_conf.modifiedIndex = route_conf.orig_modifiedIndex .. "#" .. plugin_config.modifiedIndex
route_conf.prev_plugin_config_ver = plugin_config.modifiedIndex
return route_conf
diff --git a/apisix/plugins/ai.lua b/apisix/plugins/ai.lua
index b46249a9a190..39430c7ad014 100644
--- a/apisix/plugins/ai.lua
+++ b/apisix/plugins/ai.lua
@@ -18,6 +18,7 @@ local require = require
local apisix = require("apisix")
local core = require("apisix.core")
local router = require("apisix.router")
+local get_global_rules = require("apisix.global_rules").global_rules
local event = require("apisix.core.event")
local balancer = require("ngx.balancer")
local ngx = ngx
@@ -229,8 +230,8 @@ local function routes_analyze(routes)
end
end
- local global_rules_flag = router.global_rules and router.global_rules.values
- and #router.global_rules.values ~= 0
+ local global_rules, _ = get_global_rules()
+ local global_rules_flag = global_rules and #global_rules ~= 0
if route_flags["vars"] or route_flags["filter_func"]
or route_flags["remote_addr"]
diff --git a/apisix/plugins/aws-lambda.lua b/apisix/plugins/aws-lambda.lua
index fe4d7f394cd2..1b172af4135d 100644
--- a/apisix/plugins/aws-lambda.lua
+++ b/apisix/plugins/aws-lambda.lua
@@ -122,8 +122,10 @@ local function request_processor(conf, ctx, params)
-- computing canonical query string
local canonical_qs = {}
+ local canonical_qs_i = 0
for k, v in pairs(params.query) do
- canonical_qs[#canonical_qs+1] = ngx.unescape_uri(k) .. "=" .. ngx.unescape_uri(v)
+ canonical_qs_i = canonical_qs_i + 1
+ canonical_qs[canonical_qs_i] = ngx.unescape_uri(k) .. "=" .. ngx.unescape_uri(v)
end
tab_sort(canonical_qs)
@@ -132,10 +134,12 @@ local function request_processor(conf, ctx, params)
-- computing canonical and signed headers
local canonical_headers, signed_headers = {}, {}
+ local signed_headers_i = 0
for k, v in pairs(headers) do
k = k:lower()
if k ~= "connection" then
- signed_headers[#signed_headers+1] = k
+ signed_headers_i = signed_headers_i + 1
+ signed_headers[signed_headers_i] = k
-- strip starting and trailing spaces including strip multiple spaces into single space
canonical_headers[k] = str_strip(v)
end
diff --git a/apisix/plugins/batch-requests.lua b/apisix/plugins/batch-requests.lua
index 54dfba07d488..a1b574307ef6 100644
--- a/apisix/plugins/batch-requests.lua
+++ b/apisix/plugins/batch-requests.lua
@@ -275,7 +275,14 @@ local function batch_requests(ctx)
headers = resp.headers,
}
if resp.has_body then
- sub_resp.body = resp:read_body()
+ local err
+ sub_resp.body, err = resp:read_body()
+ if err then
+ sub_resp.read_body_err = err
+ core.log.error("read pipeline response body failed: ", err)
+ else
+ resp:read_trailers()
+ end
end
core.table.insert(aggregated_resp, sub_resp)
end
diff --git a/apisix/plugins/body-transformer.lua b/apisix/plugins/body-transformer.lua
index 9ca6b86bad51..5b5557f7d4d7 100644
--- a/apisix/plugins/body-transformer.lua
+++ b/apisix/plugins/body-transformer.lua
@@ -25,6 +25,7 @@ local str_format = string.format
local type = type
local pcall = pcall
local pairs = pairs
+local next = next
local transform_schema = {
@@ -74,6 +75,10 @@ end
local function remove_namespace(tbl)
for k, v in pairs(tbl) do
+ if type(v) == "table" and next(v) == nil then
+ v = ""
+ tbl[k] = v
+ end
if type(k) == "string" then
local newk = k:match(".*:(.*)")
if newk then
@@ -112,7 +117,7 @@ end
local function transform(conf, body, typ, ctx)
- local out = {_body = body}
+ local out = {}
if body then
local err
local format = conf[typ].input_format
@@ -123,6 +128,8 @@ local function transform(conf, body, typ, ctx)
core.log.error(err, ", body=", body)
return nil, 400, err
end
+ else
+ core.log.warn("no input format to parse ", typ, " body")
end
end
@@ -137,6 +144,7 @@ local function transform(conf, body, typ, ctx)
end
out._ctx = ctx
+ out._body = body
out._escape_xml = escape_xml
out._escape_json = escape_json
local ok, render_out = pcall(render, out)
diff --git a/apisix/plugins/chaitin-waf.lua b/apisix/plugins/chaitin-waf.lua
new file mode 100644
index 000000000000..cc870a47f47e
--- /dev/null
+++ b/apisix/plugins/chaitin-waf.lua
@@ -0,0 +1,373 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local require = require
+local core = require("apisix.core")
+local rr_balancer = require("apisix.balancer.roundrobin")
+local plugin = require("apisix.plugin")
+local t1k = require "resty.t1k"
+local expr = require("resty.expr.v1")
+
+local ngx = ngx
+local ngx_now = ngx.now
+local string = string
+local fmt = string.format
+local tostring = tostring
+local tonumber = tonumber
+local ipairs = ipairs
+
+local plugin_name = "chaitin-waf"
+
+local vars_schema = {
+ type = "array",
+}
+
+local match_schema = {
+ type = "array",
+ items = {
+ type = "object",
+ properties = {
+ vars = vars_schema
+ }
+ },
+}
+
+local plugin_schema = {
+ type = "object",
+ properties = {
+ -- TODO: we should add a configuration "mode" here
+ -- It can be one of off, block and monitor
+ match = match_schema,
+ append_waf_resp_header = {
+ type = "boolean",
+ default = true
+ },
+ append_waf_debug_header = {
+ type = "boolean",
+ default = false
+ },
+ config = {
+ type = "object",
+ properties = {
+ connect_timeout = {
+ type = "integer",
+ },
+ send_timeout = {
+ type = "integer",
+ },
+ read_timeout = {
+ type = "integer",
+ },
+ req_body_size = {
+ type = "integer",
+ },
+ keepalive_size = {
+ type = "integer",
+ },
+ keepalive_timeout = {
+ type = "integer",
+ }
+ },
+ },
+ },
+}
+
+local metadata_schema = {
+ type = "object",
+ properties = {
+ nodes = {
+ type = "array",
+ items = {
+ type = "object",
+ properties = {
+ host = {
+ type = "string",
+ pattern = "^\\*?[0-9a-zA-Z-._\\[\\]:/]+$"
+ },
+ port = {
+ type = "integer",
+ minimum = 1,
+ default = 80
+ },
+ },
+ required = { "host" }
+ },
+ minItems = 1,
+ },
+ config = {
+ type = "object",
+ properties = {
+ connect_timeout = {
+ type = "integer",
+ default = 1000 -- milliseconds
+ },
+ send_timeout = {
+ type = "integer",
+ default = 1000 -- milliseconds
+ },
+ read_timeout = {
+ type = "integer",
+ default = 1000 -- milliseconds
+ },
+ req_body_size = {
+ type = "integer",
+ default = 1024 -- milliseconds
+ },
+ -- maximum concurrent idle connections to
+ -- the SafeLine WAF detection service
+ keepalive_size = {
+ type = "integer",
+ default = 256
+ },
+ keepalive_timeout = {
+ type = "integer",
+ default = 60000 -- milliseconds
+ },
+ -- TODO: we need a configuration to enable/disable the real client ip
+ -- the real client ip is calculated by APISIX
+ },
+ default = {},
+ },
+ },
+ required = { "nodes" },
+}
+
+local _M = {
+ version = 0.1,
+ priority = 2700,
+ name = plugin_name,
+ schema = plugin_schema,
+ metadata_schema = metadata_schema
+}
+
+local global_server_picker
+
+local HEADER_CHAITIN_WAF = "X-APISIX-CHAITIN-WAF"
+local HEADER_CHAITIN_WAF_ERROR = "X-APISIX-CHAITIN-WAF-ERROR"
+local HEADER_CHAITIN_WAF_TIME = "X-APISIX-CHAITIN-WAF-TIME"
+local HEADER_CHAITIN_WAF_STATUS = "X-APISIX-CHAITIN-WAF-STATUS"
+local HEADER_CHAITIN_WAF_ACTION = "X-APISIX-CHAITIN-WAF-ACTION"
+local HEADER_CHAITIN_WAF_SERVER = "X-APISIX-CHAITIN-WAF-SERVER"
+local blocked_message = [[{"code": %s, "success":false, ]] ..
+ [["message": "blocked by Chaitin SafeLine Web Application Firewall", "event_id": "%s"}]]
+
+
+function _M.check_schema(conf, schema_type)
+ if schema_type == core.schema.TYPE_METADATA then
+ return core.schema.check(metadata_schema, conf)
+ end
+
+ local ok, err = core.schema.check(plugin_schema, conf)
+
+ if not ok then
+ return false, err
+ end
+
+ if conf.match then
+ for _, m in ipairs(conf.match) do
+ local ok, err = expr.new(m.vars)
+ if not ok then
+ return false, "failed to validate the 'vars' expression: " .. err
+ end
+ end
+ end
+
+ return true
+end
+
+
+local function get_healthy_chaitin_server_nodes(metadata, checker)
+ local nodes = metadata.nodes
+ local new_nodes = core.table.new(0, #nodes)
+
+ for i = 1, #nodes do
+ local host, port = nodes[i].host, nodes[i].port
+ new_nodes[host .. ":" .. tostring(port)] = 1
+ end
+
+ return new_nodes
+end
+
+
+local function get_chaitin_server(metadata, ctx)
+ if not global_server_picker or global_server_picker.upstream ~= metadata.value.nodes then
+ local up_nodes = get_healthy_chaitin_server_nodes(metadata.value)
+ if core.table.nkeys(up_nodes) == 0 then
+ return nil, nil, "no healthy nodes"
+ end
+ core.log.info("chaitin-waf nodes: ", core.json.delay_encode(up_nodes))
+
+ global_server_picker = rr_balancer.new(up_nodes, metadata.value.nodes)
+ end
+
+ local server = global_server_picker.get(ctx)
+ local host, port, err = core.utils.parse_addr(server)
+ if err then
+ return nil, nil, err
+ end
+
+ return host, port, nil
+end
+
+
+local function check_match(conf, ctx)
+ local match_passed = true
+
+ if conf.match then
+ for _, match in ipairs(conf.match) do
+ -- todo: use lrucache to cache the result
+ local exp, err = expr.new(match.vars)
+ if err then
+ local msg = "failed to create match expression for " ..
+ tostring(match.vars) .. ", err: " .. tostring(err)
+ core.log.error(msg)
+ return false, msg
+ end
+
+ match_passed = exp:eval(ctx.var)
+ if match_passed then
+ break
+ end
+ end
+ end
+
+ return match_passed, nil
+end
+
+
+local function get_conf(conf, metadata)
+ local t = {
+ mode = "block",
+ }
+
+ if metadata.config then
+ t.connect_timeout = metadata.config.connect_timeout
+ t.send_timeout = metadata.config.send_timeout
+ t.read_timeout = metadata.config.read_timeout
+ t.req_body_size = metadata.config.req_body_size
+ t.keepalive_size = metadata.config.keepalive_size
+ t.keepalive_timeout = metadata.config.keepalive_timeout
+ end
+
+ if conf.config then
+ t.connect_timeout = conf.config.connect_timeout
+ t.send_timeout = conf.config.send_timeout
+ t.read_timeout = conf.config.read_timeout
+ t.req_body_size = conf.config.req_body_size
+ t.keepalive_size = conf.config.keepalive_size
+ t.keepalive_timeout = conf.config.keepalive_timeout
+ end
+
+ return t
+end
+
+
+local function do_access(conf, ctx)
+ local extra_headers = {}
+
+ local match, err = check_match(conf, ctx)
+ if not match then
+ if err then
+ extra_headers[HEADER_CHAITIN_WAF] = "err"
+ extra_headers[HEADER_CHAITIN_WAF_ERROR] = tostring(err)
+ return 500, nil, extra_headers
+ else
+ extra_headers[HEADER_CHAITIN_WAF] = "no"
+ return nil, nil, extra_headers
+ end
+ end
+
+ local metadata = plugin.plugin_metadata(plugin_name)
+ if not core.table.try_read_attr(metadata, "value", "nodes") then
+ extra_headers[HEADER_CHAITIN_WAF] = "err"
+ extra_headers[HEADER_CHAITIN_WAF_ERROR] = "missing metadata"
+ return 500, nil, extra_headers
+ end
+
+ local host, port, err = get_chaitin_server(metadata, ctx)
+ if err then
+ extra_headers[HEADER_CHAITIN_WAF] = "unhealthy"
+ extra_headers[HEADER_CHAITIN_WAF_ERROR] = tostring(err)
+
+ return 500, nil, extra_headers
+ end
+
+ core.log.info("picked chaitin-waf server: ", host, ":", port)
+
+ local t = get_conf(conf, metadata.value)
+ t.host = host
+ t.port = port
+
+ extra_headers[HEADER_CHAITIN_WAF_SERVER] = host
+ extra_headers[HEADER_CHAITIN_WAF] = "yes"
+
+ local start_time = ngx_now() * 1000
+ local ok, err, result = t1k.do_access(t, false)
+ if not ok then
+ extra_headers[HEADER_CHAITIN_WAF] = "waf-err"
+ local err_msg = tostring(err)
+ if core.string.find(err_msg, "timeout") then
+ extra_headers[HEADER_CHAITIN_WAF] = "timeout"
+ end
+ extra_headers[HEADER_CHAITIN_WAF_ERROR] = tostring(err)
+ else
+ extra_headers[HEADER_CHAITIN_WAF_ACTION] = "pass"
+ end
+ extra_headers[HEADER_CHAITIN_WAF_TIME] = ngx_now() * 1000 - start_time
+
+ local code = 200
+ extra_headers[HEADER_CHAITIN_WAF_STATUS] = code
+ if result then
+ if result.status then
+ code = result.status
+ extra_headers[HEADER_CHAITIN_WAF_STATUS] = code
+ extra_headers[HEADER_CHAITIN_WAF_ACTION] = "reject"
+
+ core.log.error("request rejected by chaitin-waf, event_id: " .. result.event_id)
+ return tonumber(code), fmt(blocked_message, code,
+ result.event_id) .. "\n", extra_headers
+ end
+ end
+ if not ok then
+ extra_headers[HEADER_CHAITIN_WAF_STATUS] = nil
+ end
+
+ return nil, nil, extra_headers
+end
+
+
+function _M.access(conf, ctx)
+ local code, msg, extra_headers = do_access(conf, ctx)
+
+ if not conf.append_waf_debug_header then
+ extra_headers[HEADER_CHAITIN_WAF_ERROR] = nil
+ extra_headers[HEADER_CHAITIN_WAF_SERVER] = nil
+ end
+
+ if conf.append_waf_resp_header then
+ core.response.set_header(extra_headers)
+ end
+
+ return code, msg
+end
+
+
+function _M.header_filter(conf, ctx)
+ t1k.do_header_filter()
+end
+
+
+return _M
diff --git a/apisix/plugins/consumer-restriction.lua b/apisix/plugins/consumer-restriction.lua
index 93f3b9f59f5f..88c2bbd959d6 100644
--- a/apisix/plugins/consumer-restriction.lua
+++ b/apisix/plugins/consumer-restriction.lua
@@ -128,7 +128,9 @@ function _M.access(conf, ctx)
local method = ngx.req.get_method()
if not value then
- return 401, { message = "Missing authentication or identity verification."}
+ local err_msg = "The request is rejected, please check the "
+ .. conf.type .. " for this request"
+ return 401, { message = err_msg}
end
core.log.info("value: ", value)
diff --git a/apisix/plugins/error-log-logger.lua b/apisix/plugins/error-log-logger.lua
index a0a364564a27..5d5c5d2a9f8b 100644
--- a/apisix/plugins/error-log-logger.lua
+++ b/apisix/plugins/error-log-logger.lua
@@ -237,10 +237,15 @@ local function send_to_skywalking(log_message)
httpc:set_timeout(config.timeout * 1000)
local entries = {}
+ local service_instance_name = config.skywalking.service_instance_name
+ if service_instance_name == "$hostname" then
+ service_instance_name = core.utils.gethostname()
+ end
+
for i = 1, #log_message, 2 do
local content = {
service = config.skywalking.service_name,
- serviceInstance = config.skywalking.service_instance_name,
+ serviceInstance = service_instance_name,
endpoint = "",
body = {
text = {
diff --git a/apisix/plugins/fault-injection.lua b/apisix/plugins/fault-injection.lua
index 9089b41681ac..34ca05e817fb 100644
--- a/apisix/plugins/fault-injection.lua
+++ b/apisix/plugins/fault-injection.lua
@@ -20,6 +20,9 @@ local expr = require("resty.expr.v1")
local sleep = core.sleep
local random = math.random
local ipairs = ipairs
+local ngx = ngx
+local pairs = pairs
+local type = type
local plugin_name = "fault-injection"
@@ -32,6 +35,18 @@ local schema = {
properties = {
http_status = {type = "integer", minimum = 200},
body = {type = "string", minLength = 0},
+ headers = {
+ type = "object",
+ minProperties = 1,
+ patternProperties = {
+ ["^[^:]+$"] = {
+ oneOf = {
+ { type = "string" },
+ { type = "number" }
+ }
+ }
+ }
+ },
percentage = {type = "integer", minimum = 0, maximum = 100},
vars = {
type = "array",
@@ -144,6 +159,14 @@ function _M.rewrite(conf, ctx)
end
if conf.abort and sample_hit(conf.abort.percentage) and abort_vars then
+ if conf.abort.headers then
+ for header_name, header_value in pairs(conf.abort.headers) do
+ if type(header_value) == "string" then
+ header_value = core.utils.resolve_var(header_value, ctx.var)
+ end
+ ngx.header[header_name] = header_value
+ end
+ end
return conf.abort.http_status, core.utils.resolve_var(conf.abort.body, ctx.var)
end
end
diff --git a/apisix/plugins/file-logger.lua b/apisix/plugins/file-logger.lua
index 97140abfac91..d07c49270798 100644
--- a/apisix/plugins/file-logger.lua
+++ b/apisix/plugins/file-logger.lua
@@ -16,6 +16,7 @@
--
local log_util = require("apisix.utils.log-util")
local core = require("apisix.core")
+local expr = require("resty.expr.v1")
local ngx = ngx
local io_open = io.open
local is_apisix_or, process = pcall(require, "resty.apisix.process")
@@ -38,6 +39,13 @@ local schema = {
items = {
type = "array"
}
+ },
+ match = {
+ type = "array",
+ maxItems = 20,
+ items = {
+ type = "array",
+ },
}
},
required = {"path"}
@@ -65,6 +73,12 @@ function _M.check_schema(conf, schema_type)
if schema_type == core.schema.TYPE_METADATA then
return core.schema.check(metadata_schema, conf)
end
+ if conf.match then
+ local ok, err = expr.new(conf.match)
+ if not ok then
+ return nil, "failed to validate the 'match' expression: " .. err
+ end
+ end
return core.schema.check(schema, conf)
end
@@ -150,6 +164,9 @@ end
function _M.log(conf, ctx)
local entry = log_util.get_log_entry(plugin_name, conf, ctx)
+ if entry == nil then
+ return
+ end
write_file_data(conf, entry)
end
diff --git a/apisix/plugins/forward-auth.lua b/apisix/plugins/forward-auth.lua
index 3bc8a0e50265..39e690038890 100644
--- a/apisix/plugins/forward-auth.lua
+++ b/apisix/plugins/forward-auth.lua
@@ -23,6 +23,7 @@ local schema = {
type = "object",
properties = {
uri = {type = "string"},
+ allow_degradation = {type = "boolean", default = false},
ssl_verify = {
type = "boolean",
default = true,
@@ -118,9 +119,9 @@ function _M.access(conf, ctx)
httpc:set_timeout(conf.timeout)
local res, err = httpc:request_uri(conf.uri, params)
-
- -- block by default when authorization service is unavailable
- if not res then
+ if not res and conf.allow_degradation then
+ return
+ elseif not res then
core.log.error("failed to process forward auth, err: ", err)
return 403
end
diff --git a/apisix/plugins/google-cloud-logging.lua b/apisix/plugins/google-cloud-logging.lua
index 3fb34ab43efe..6e71cc81b300 100644
--- a/apisix/plugins/google-cloud-logging.lua
+++ b/apisix/plugins/google-cloud-logging.lua
@@ -35,6 +35,7 @@ local schema = {
auth_config = {
type = "object",
properties = {
+ client_email = { type = "string" },
private_key = { type = "string" },
project_id = { type = "string" },
token_uri = {
@@ -62,7 +63,7 @@ local schema = {
default = "https://logging.googleapis.com/v2/entries:write"
},
},
- required = { "private_key", "project_id", "token_uri" }
+ required = { "client_email", "private_key", "project_id", "token_uri" }
},
ssl_verify = {
type = "boolean",
diff --git a/apisix/plugins/grpc-transcode/proto.lua b/apisix/plugins/grpc-transcode/proto.lua
index 1c9b7718fdc1..347ec39eae17 100644
--- a/apisix/plugins/grpc-transcode/proto.lua
+++ b/apisix/plugins/grpc-transcode/proto.lua
@@ -99,7 +99,7 @@ end
local function compile_proto(content)
-- clear pb state
- pb.state(nil)
+ local old_pb_state = pb.state(nil)
local compiled, err = compile_proto_text(content)
if not compiled then
@@ -110,7 +110,7 @@ local function compile_proto(content)
end
-- fetch pb state
- compiled.pb_state = pb.state(nil)
+ compiled.pb_state = pb.state(old_pb_state)
return compiled
end
diff --git a/apisix/plugins/grpc-transcode/request.lua b/apisix/plugins/grpc-transcode/request.lua
index 88d2fcfb9ec8..934a1c95657c 100644
--- a/apisix/plugins/grpc-transcode/request.lua
+++ b/apisix/plugins/grpc-transcode/request.lua
@@ -36,10 +36,12 @@ return function (proto, service, method, pb_option, deadline, default_values)
req_read_body()
+ local pb_old_state = pb.state(proto.pb_state)
util.set_options(proto, pb_option)
local map_message = util.map_message(m.input_type, default_values or {})
local ok, encoded = pcall(pb.encode, m.input_type, map_message)
+ pb.state(pb_old_state)
if not ok or not encoded then
return false, "failed to encode request data to protobuf", 400
diff --git a/apisix/plugins/grpc-transcode/response.lua b/apisix/plugins/grpc-transcode/response.lua
index dee267b77c1f..9dd6780f049d 100644
--- a/apisix/plugins/grpc-transcode/response.lua
+++ b/apisix/plugins/grpc-transcode/response.lua
@@ -25,7 +25,7 @@ local ipairs = ipairs
local pcall = pcall
-local function handle_error_response(status_detail_type)
+local function handle_error_response(status_detail_type, proto)
local err_msg
local grpc_status = ngx.header["grpc-status-details-bin"]
@@ -58,7 +58,9 @@ local function handle_error_response(status_detail_type)
if status_detail_type and details then
local decoded_details = {}
for _, detail in ipairs(details) do
+ local pb_old_state = pb.state(proto.pb_state)
local ok, err_or_value = pcall(pb.decode, status_detail_type, detail.value)
+ pb.state(pb_old_state)
if not ok then
err_msg = "failed to call pb.decode to decode details in "
.. "grpc-status-details-bin"
@@ -99,7 +101,7 @@ return function(ctx, proto, service, method, pb_option, show_status_in_body, sta
-- handle error response after the last response chunk
if ngx.status >= 300 and show_status_in_body then
- return handle_error_response(status_detail_type)
+ return handle_error_response(status_detail_type, proto)
end
-- when body has already been read by other plugin
@@ -118,10 +120,12 @@ return function(ctx, proto, service, method, pb_option, show_status_in_body, sta
buffer = string.sub(buffer, 6)
end
+ local pb_old_state = pb.state(proto.pb_state)
util.set_options(proto, pb_option)
local err_msg
local decoded = pb.decode(m.output_type, buffer)
+ pb.state(pb_old_state)
if not decoded then
err_msg = "failed to decode response data by protobuf"
ngx.arg[1] = err_msg
diff --git a/apisix/plugins/grpc-transcode/util.lua b/apisix/plugins/grpc-transcode/util.lua
index 4a27c1d3b9df..a95cb8202041 100644
--- a/apisix/plugins/grpc-transcode/util.lua
+++ b/apisix/plugins/grpc-transcode/util.lua
@@ -48,8 +48,6 @@ function _M.find_method(proto, service, method)
return nil
end
- -- restore pb state
- pb.state(proto.pb_state)
return res
end
diff --git a/apisix/plugins/http-logger.lua b/apisix/plugins/http-logger.lua
index 399d50da1eed..5dd52240f8df 100644
--- a/apisix/plugins/http-logger.lua
+++ b/apisix/plugins/http-logger.lua
@@ -122,7 +122,7 @@ local function send_http_data(conf, log_message)
local httpc_res, httpc_err = httpc:request({
method = "POST",
- path = url_decoded.path,
+ path = #url_decoded.path ~= 0 and url_decoded.path or "/",
query = url_decoded.query,
body = log_message,
headers = {
diff --git a/apisix/plugins/jwt-auth.lua b/apisix/plugins/jwt-auth.lua
index f195830a696c..26211b98dece 100644
--- a/apisix/plugins/jwt-auth.lua
+++ b/apisix/plugins/jwt-auth.lua
@@ -220,7 +220,7 @@ local function fetch_jwt_token(conf, ctx)
return val
end
-local function get_secret(conf, consumer_name)
+local function get_secret(conf)
local secret = conf.secret
if conf.base64_secret then
@@ -231,7 +231,7 @@ local function get_secret(conf, consumer_name)
end
-local function get_rsa_or_ecdsa_keypair(conf, consumer_name)
+local function get_rsa_or_ecdsa_keypair(conf)
local public_key = conf.public_key
local private_key = conf.private_key
@@ -261,7 +261,7 @@ end
local function sign_jwt_with_HS(key, consumer, payload)
- local auth_secret, err = get_secret(consumer.auth_conf, consumer.username)
+ local auth_secret, err = get_secret(consumer.auth_conf)
if not auth_secret then
core.log.error("failed to sign jwt, err: ", err)
core.response.exit(503, "failed to sign jwt")
@@ -286,7 +286,7 @@ end
local function sign_jwt_with_RS256_ES256(key, consumer, payload)
local public_key, private_key, err = get_rsa_or_ecdsa_keypair(
- consumer.auth_conf, consumer.username
+ consumer.auth_conf
)
if not public_key then
core.log.error("failed to sign jwt, err: ", err)
@@ -321,13 +321,13 @@ local function algorithm_handler(consumer, method_only)
return sign_jwt_with_HS
end
- return get_secret(consumer.auth_conf, consumer.username)
+ return get_secret(consumer.auth_conf)
elseif consumer.auth_conf.algorithm == "RS256" or consumer.auth_conf.algorithm == "ES256" then
if method_only then
return sign_jwt_with_RS256_ES256
end
- local public_key, _, err = get_rsa_or_ecdsa_keypair(consumer.auth_conf, consumer.username)
+ local public_key, _, err = get_rsa_or_ecdsa_keypair(consumer.auth_conf)
return public_key, err
end
end
diff --git a/apisix/plugins/ldap-auth.lua b/apisix/plugins/ldap-auth.lua
index 41156c1bfb6e..11f205c6b8f5 100644
--- a/apisix/plugins/ldap-auth.lua
+++ b/apisix/plugins/ldap-auth.lua
@@ -18,7 +18,7 @@ local core = require("apisix.core")
local ngx = ngx
local ngx_re = require("ngx.re")
local consumer_mod = require("apisix.consumer")
-local ok, ldap_cli = pcall(require, "resty.ldap.client")
+local ldap = require("resty.ldap")
local schema = {
type = "object",
@@ -100,11 +100,6 @@ local function extract_auth_header(authorization)
end
function _M.rewrite(conf, ctx)
- if not ok then -- ensure rasn library loaded
- core.log.error("failed to load lua-resty-ldap lib: ", ldap_cli)
- return 501
- end
-
core.log.info("plugin rewrite phase, conf: ", core.json.delay_encode(conf))
-- 1. extract authorization from header
@@ -115,31 +110,36 @@ function _M.rewrite(conf, ctx)
end
local user, err = extract_auth_header(auth_header)
- if err then
- core.log.warn(err)
+ if err or not user then
+ if err then
+ core.log.warn(err)
+ else
+ core.log.warn("nil user")
+ end
return 401, { message = "Invalid authorization in request" }
end
-- 2. try authenticate the user against the ldap server
local ldap_host, ldap_port = core.utils.parse_addr(conf.ldap_uri)
- local ldap_client = ldap_cli:new(ldap_host, ldap_port, {
+ local ldapconf = {
+ timeout = 10000,
start_tls = false,
+ ldap_host = ldap_host,
+ ldap_port = ldap_port or 389,
ldaps = conf.use_tls,
- ssl_verify = conf.tls_verify,
- socket_timeout = 10000,
- keepalive_pool_name = ldap_host .. ":" .. ldap_port .. "_ldapauth"
- .. (conf.use_tls and "_tls" or ""),
- keepalive_pool_size = 5,
- keepalive_timeout = 60000,
- })
-
- local user_dn = conf.uid .. "=" .. user.username .. "," .. conf.base_dn
- local res, err = ldap_client:simple_bind(user_dn, user.password)
+ tls_verify = conf.tls_verify,
+ base_dn = conf.base_dn,
+ attribute = conf.uid,
+ keepalive = 60000,
+ }
+ local res, err = ldap.ldap_authenticate(user.username, user.password, ldapconf)
if not res then
core.log.warn("ldap-auth failed: ", err)
return 401, { message = "Invalid user authorization" }
end
+ local user_dn = conf.uid .. "=" .. user.username .. "," .. conf.base_dn
+
-- 3. Retrieve consumer for authorization plugin
local consumer_conf = consumer_mod.plugin(plugin_name)
if not consumer_conf then
diff --git a/apisix/plugins/limit-conn/init.lua b/apisix/plugins/limit-conn/init.lua
index 3337980fd815..c6ce55f240d0 100644
--- a/apisix/plugins/limit-conn/init.lua
+++ b/apisix/plugins/limit-conn/init.lua
@@ -16,6 +16,7 @@
--
local limit_conn_new = require("resty.limit.conn").new
local core = require("apisix.core")
+local is_http = ngx.config.subsystem == "http"
local sleep = core.sleep
local shdict_name = "plugin-limit-conn"
if ngx.config.subsystem == "stream" then
@@ -115,11 +116,13 @@ function _M.decrease(conf, ctx)
local use_delay = limit_conn[i + 3]
local latency
- if not use_delay then
- if ctx.proxy_passed then
- latency = ctx.var.upstream_response_time
- else
- latency = ctx.var.request_time - delay
+ if is_http then
+ if not use_delay then
+ if ctx.proxy_passed then
+ latency = ctx.var.upstream_response_time
+ else
+ latency = ctx.var.request_time - delay
+ end
end
end
core.log.debug("request latency is ", latency) -- for test
diff --git a/apisix/plugins/limit-count.lua b/apisix/plugins/limit-count.lua
index 0eafd64235b3..72ab574df775 100644
--- a/apisix/plugins/limit-count.lua
+++ b/apisix/plugins/limit-count.lua
@@ -31,7 +31,7 @@ end
function _M.access(conf, ctx)
- return limit_count.rate_limit(conf, ctx)
+ return limit_count.rate_limit(conf, ctx, plugin_name, 1)
end
diff --git a/apisix/plugins/limit-count/init.lua b/apisix/plugins/limit-count/init.lua
index 58fbf7969e2a..069ab6739ee5 100644
--- a/apisix/plugins/limit-count/init.lua
+++ b/apisix/plugins/limit-count/init.lua
@@ -20,7 +20,6 @@ local tab_insert = table.insert
local ipairs = ipairs
local pairs = pairs
-local plugin_name = "limit-count"
local limit_redis_cluster_new
local limit_redis_new
local limit_local_new
@@ -50,6 +49,9 @@ local policy_to_additional_properties = {
redis_port = {
type = "integer", minimum = 1, default = 6379,
},
+ redis_username = {
+ type = "string", minLength = 1,
+ },
redis_password = {
type = "string", minLength = 0,
},
@@ -59,6 +61,12 @@ local policy_to_additional_properties = {
redis_timeout = {
type = "integer", minimum = 1, default = 1000,
},
+ redis_ssl = {
+ type = "boolean", default = false,
+ },
+ redis_ssl_verify = {
+ type = "boolean", default = false,
+ },
},
required = {"redis_host"},
},
@@ -191,8 +199,8 @@ function _M.check_schema(conf)
end
-local function create_limit_obj(conf)
- core.log.info("create new limit-count plugin instance")
+local function create_limit_obj(conf, plugin_name)
+ core.log.info("create new " .. plugin_name .. " plugin instance")
if not conf.policy or conf.policy == "local" then
return limit_local_new("plugin-" .. plugin_name, conf.count,
@@ -234,9 +242,9 @@ local function gen_limit_key(conf, ctx, key)
end
-local function gen_limit_obj(conf, ctx)
+local function gen_limit_obj(conf, ctx, plugin_name)
if conf.group then
- return lrucache(conf.group, "", create_limit_obj, conf)
+ return lrucache(conf.group, "", create_limit_obj, conf, plugin_name)
end
local extra_key
@@ -246,13 +254,13 @@ local function gen_limit_obj(conf, ctx)
extra_key = conf.policy
end
- return core.lrucache.plugin_ctx(lrucache, ctx, extra_key, create_limit_obj, conf)
+ return core.lrucache.plugin_ctx(lrucache, ctx, extra_key, create_limit_obj, conf, plugin_name)
end
-function _M.rate_limit(conf, ctx)
+function _M.rate_limit(conf, ctx, name, cost)
core.log.info("ver: ", ctx.conf_version)
- local lim, err = gen_limit_obj(conf, ctx)
+ local lim, err = gen_limit_obj(conf, ctx, name)
if not lim then
core.log.error("failed to fetch limit.count object: ", err)
@@ -289,7 +297,13 @@ function _M.rate_limit(conf, ctx)
key = gen_limit_key(conf, ctx, key)
core.log.info("limit key: ", key)
- local delay, remaining, reset = lim:incoming(key, true, conf)
+ local delay, remaining, reset
+ if not conf.policy or conf.policy == "local" then
+ delay, remaining, reset = lim:incoming(key, true, conf, cost)
+ else
+ delay, remaining, reset = lim:incoming(key, cost)
+ end
+
if not delay then
local err = remaining
if err == "rejected" then
diff --git a/apisix/plugins/limit-count/limit-count-local.lua b/apisix/plugins/limit-count/limit-count-local.lua
index f6909403141e..b6f319ae0106 100644
--- a/apisix/plugins/limit-count/limit-count-local.lua
+++ b/apisix/plugins/limit-count/limit-count-local.lua
@@ -14,7 +14,8 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-local limit_local_new = require("resty.limit.count").new
+local limit_count = require("resty.limit.count")
+
local ngx = ngx
local ngx_time = ngx.time
local assert = assert
@@ -55,21 +56,18 @@ function _M.new(plugin_name, limit, window)
assert(limit > 0 and window > 0)
local self = {
- limit_count = limit_local_new(plugin_name, limit, window),
- dict = ngx.shared["plugin-limit-count-reset-header"]
+ limit_count = limit_count.new(plugin_name, limit, window),
+ dict = ngx.shared[plugin_name .. "-reset-header"]
}
return setmetatable(self, mt)
end
-function _M.incoming(self, key, commit, conf)
- local delay, remaining = self.limit_count:incoming(key, commit)
- local reset = 0
- if not delay then
- return delay, remaining, reset
- end
+function _M.incoming(self, key, commit, conf, cost)
+ local delay, remaining = self.limit_count:incoming(key, commit, cost)
+ local reset
- if remaining == conf.count - 1 then
+ if remaining == conf.count - cost then
reset = set_endtime(self, key, conf.time_window)
else
reset = read_reset(self, key)
diff --git a/apisix/plugins/limit-count/limit-count-redis-cluster.lua b/apisix/plugins/limit-count/limit-count-redis-cluster.lua
index 25a3c1c57b21..7800f1c888f9 100644
--- a/apisix/plugins/limit-count/limit-count-redis-cluster.lua
+++ b/apisix/plugins/limit-count/limit-count-redis-cluster.lua
@@ -30,12 +30,13 @@ local mt = {
local script = core.string.compress_script([=[
+ assert(tonumber(ARGV[3]) >= 1, "cost must be at least 1")
local ttl = redis.call('ttl', KEYS[1])
if ttl < 0 then
- redis.call('set', KEYS[1], ARGV[1] - 1, 'EX', ARGV[2])
- return {ARGV[1] - 1, ARGV[2]}
+ redis.call('set', KEYS[1], ARGV[1] - ARGV[3], 'EX', ARGV[2])
+ return {ARGV[1] - ARGV[3], ARGV[2]}
end
- return {redis.call('incrby', KEYS[1], -1), ttl}
+ return {redis.call('incrby', KEYS[1], 0 - ARGV[3]), ttl}
]=])
@@ -90,14 +91,14 @@ function _M.new(plugin_name, limit, window, conf)
end
-function _M.incoming(self, key)
+function _M.incoming(self, key, cost)
local red = self.red_cli
local limit = self.limit
local window = self.window
key = self.plugin_name .. tostring(key)
local ttl = 0
- local res, err = red:eval(script, 1, key, limit, window)
+ local res, err = red:eval(script, 1, key, limit, window, cost or 1)
if err then
return nil, err, ttl
diff --git a/apisix/plugins/limit-count/limit-count-redis.lua b/apisix/plugins/limit-count/limit-count-redis.lua
index bed920229b0c..85f769cbda56 100644
--- a/apisix/plugins/limit-count/limit-count-redis.lua
+++ b/apisix/plugins/limit-count/limit-count-redis.lua
@@ -30,12 +30,13 @@ local mt = {
local script = core.string.compress_script([=[
+ assert(tonumber(ARGV[3]) >= 1, "cost must be at least 1")
local ttl = redis.call('ttl', KEYS[1])
if ttl < 0 then
- redis.call('set', KEYS[1], ARGV[1] - 1, 'EX', ARGV[2])
- return {ARGV[1] - 1, ARGV[2]}
+ redis.call('set', KEYS[1], ARGV[1] - ARGV[3], 'EX', ARGV[2])
+ return {ARGV[1] - ARGV[3], ARGV[2]}
end
- return {redis.call('incrby', KEYS[1], -1), ttl}
+ return {redis.call('incrby', KEYS[1], 0 - ARGV[3]), ttl}
]=])
local function redis_cli(conf)
@@ -44,7 +45,12 @@ local function redis_cli(conf)
red:set_timeouts(timeout, timeout, timeout)
- local ok, err = red:connect(conf.redis_host, conf.redis_port or 6379)
+ local sock_opts = {
+ ssl = conf.redis_ssl,
+ ssl_verify = conf.redis_ssl_verify
+ }
+
+ local ok, err = red:connect(conf.redis_host, conf.redis_port or 6379, sock_opts)
if not ok then
return false, err
end
@@ -53,7 +59,12 @@ local function redis_cli(conf)
count, err = red:get_reused_times()
if 0 == count then
if conf.redis_password and conf.redis_password ~= '' then
- local ok, err = red:auth(conf.redis_password)
+ local ok, err
+ if conf.redis_username then
+ ok, err = red:auth(conf.redis_username, conf.redis_password)
+ else
+ ok, err = red:auth(conf.redis_password)
+ end
if not ok then
return nil, err
end
@@ -85,7 +96,7 @@ function _M.new(plugin_name, limit, window, conf)
return setmetatable(self, mt)
end
-function _M.incoming(self, key)
+function _M.incoming(self, key, cost)
local conf = self.conf
local red, err = redis_cli(conf)
if not red then
@@ -98,7 +109,7 @@ function _M.incoming(self, key)
key = self.plugin_name .. tostring(key)
local ttl = 0
- res, err = red:eval(script, 1, key, limit, window)
+ res, err = red:eval(script, 1, key, limit, window, cost or 1)
if err then
return nil, err, ttl
diff --git a/apisix/plugins/log-rotate.lua b/apisix/plugins/log-rotate.lua
index 7380b0d1c64f..db3360e448b4 100644
--- a/apisix/plugins/log-rotate.lua
+++ b/apisix/plugins/log-rotate.lua
@@ -32,10 +32,10 @@ local os_date = os.date
local os_remove = os.remove
local os_rename = os.rename
local str_sub = string.sub
-local str_find = string.find
local str_format = string.format
-local str_reverse = string.reverse
+local str_byte = string.byte
local ngx_sleep = require("apisix.core.utils").sleep
+local string_rfind = require("pl.stringx").rfind
local local_conf
@@ -49,6 +49,7 @@ local default_logs
local enable_compression = false
local DEFAULT_ACCESS_LOG_FILENAME = "access.log"
local DEFAULT_ERROR_LOG_FILENAME = "error.log"
+local SLASH_BYTE = str_byte("/")
local schema = {
type = "object",
@@ -74,18 +75,6 @@ local function file_exists(path)
end
-local function get_last_index(str, key)
- local rev = str_reverse(str)
- local _, idx = str_find(rev, key)
- local n
- if idx then
- n = #rev - idx + 1
- end
-
- return n
-end
-
-
local function get_log_path_info(file_type)
local_conf = core.config.local_conf()
local conf_path
@@ -101,12 +90,11 @@ local function get_log_path_info(file_type)
local prefix = ngx.config.prefix()
if conf_path then
- local root = str_sub(conf_path, 1, 1)
-- relative path
- if root ~= "/" then
+ if str_byte(conf_path) ~= SLASH_BYTE then
conf_path = prefix .. conf_path
end
- local n = get_last_index(conf_path, "/")
+ local n = string_rfind(conf_path, "/")
if n ~= nil and n ~= #conf_path then
local dir = str_sub(conf_path, 1, n)
local name = str_sub(conf_path, n + 1)
@@ -126,14 +114,14 @@ end
local function scan_log_folder(log_file_name)
local t = {}
- local log_dir, _ = get_log_path_info(log_file_name)
+ local log_dir, log_name = get_log_path_info(log_file_name)
- local compression_log_type = log_file_name .. COMPRESSION_FILE_SUFFIX
+ local compression_log_type = log_name .. COMPRESSION_FILE_SUFFIX
for file in lfs.dir(log_dir) do
- local n = get_last_index(file, "__")
+ local n = string_rfind(file, "__")
if n ~= nil then
local log_type = file:sub(n + 2)
- if log_type == log_file_name or log_type == compression_log_type then
+ if log_type == log_name or log_type == compression_log_type then
core.table.insert(t, file)
end
end
@@ -174,7 +162,7 @@ local function compression_file(new_file)
return
end
- local n = get_last_index(new_file, "/")
+ local n = string_rfind(new_file, "/")
local new_filepath = str_sub(new_file, 1, n)
local new_filename = str_sub(new_file, n + 1)
local com_filename = new_filename .. COMPRESSION_FILE_SUFFIX
diff --git a/apisix/plugins/loki-logger.lua b/apisix/plugins/loki-logger.lua
new file mode 100644
index 000000000000..357ab1978202
--- /dev/null
+++ b/apisix/plugins/loki-logger.lua
@@ -0,0 +1,234 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+
+local bp_manager_mod = require("apisix.utils.batch-processor-manager")
+local log_util = require("apisix.utils.log-util")
+local core = require("apisix.core")
+local http = require("resty.http")
+local new_tab = require("table.new")
+
+local pairs = pairs
+local ipairs = ipairs
+local tostring = tostring
+local math_random = math.random
+local table_insert = table.insert
+local ngx = ngx
+local str_format = core.string.format
+
+local plugin_name = "loki-logger"
+local batch_processor_manager = bp_manager_mod.new("loki logger")
+
+local schema = {
+ type = "object",
+ properties = {
+ -- core configurations
+ endpoint_addrs = {
+ type = "array",
+ minItems = 1,
+ items = core.schema.uri_def,
+ },
+ endpoint_uri = {
+ type = "string",
+ minLength = 1,
+ default = "/loki/api/v1/push"
+ },
+ tenant_id = {type = "string", default = "fake"},
+ log_labels = {
+ type = "object",
+ patternProperties = {
+ [".*"] = {
+ type = "string",
+ minLength = 1,
+ },
+ },
+ default = {
+ job = "apisix",
+ },
+ },
+
+ -- connection layer configurations
+ ssl_verify = {type = "boolean", default = false},
+ timeout = {
+ type = "integer",
+ minimum = 1,
+ maximum = 60000,
+ default = 3000,
+ description = "timeout in milliseconds",
+ },
+ keepalive = {type = "boolean", default = true},
+ keepalive_timeout = {
+ type = "integer",
+ minimum = 1000,
+ default = 60000,
+ description = "keepalive timeout in milliseconds",
+ },
+ keepalive_pool = {type = "integer", minimum = 1, default = 5},
+
+ -- logger related configurations
+ log_format = {type = "object"},
+ include_req_body = {type = "boolean", default = false},
+ include_req_body_expr = {
+ type = "array",
+ minItems = 1,
+ items = {
+ type = "array"
+ }
+ },
+ include_resp_body = {type = "boolean", default = false},
+ include_resp_body_expr = {
+ type = "array",
+ minItems = 1,
+ items = {
+ type = "array"
+ }
+ },
+ },
+ required = {"endpoint_addrs"}
+}
+
+
+local metadata_schema = {
+ type = "object",
+ properties = {
+ log_format = log_util.metadata_schema_log_format,
+ },
+}
+
+
+local _M = {
+ version = 0.1,
+ priority = 414,
+ name = plugin_name,
+ schema = batch_processor_manager:wrap_schema(schema),
+ metadata_schema = metadata_schema,
+}
+
+
+function _M.check_schema(conf, schema_type)
+ if schema_type == core.schema.TYPE_METADATA then
+ return core.schema.check(metadata_schema, conf)
+ end
+
+ local ok, err = core.schema.check(schema, conf)
+ if not ok then
+ return nil, err
+ end
+ return log_util.check_log_schema(conf)
+end
+
+
+local function send_http_data(conf, log)
+ local params = {
+ headers = {
+ ["Content-Type"] = "application/json",
+ ["X-Scope-OrgID"] = conf.tenant_id,
+ },
+ keepalive = conf.keepalive,
+ ssl_verify = conf.ssl_verify,
+ method = "POST",
+ body = core.json.encode(log)
+ }
+
+ if conf.keepalive then
+ params.keepalive_timeout = conf.keepalive_timeout
+ params.keepalive_pool = conf.keepalive_pool
+ end
+
+ local httpc, err = http.new()
+ if not httpc then
+ return false, str_format("create http client error: %s", err)
+ end
+ httpc:set_timeout(conf.timeout)
+
+ -- select an random endpoint and build URL
+ local endpoint_url = conf.endpoint_addrs[math_random(#conf.endpoint_addrs)] .. conf.endpoint_uri
+ local res, err = httpc:request_uri(endpoint_url, params)
+ if not res then
+ return false, err
+ end
+
+ if res.status >= 300 then
+ return false, str_format("loki server returned status: %d, body: %s",
+ res.status, res.body or "")
+ end
+
+ return true
+end
+
+
+function _M.body_filter(conf, ctx)
+ log_util.collect_body(conf, ctx)
+end
+
+
+function _M.log(conf, ctx)
+ local entry = log_util.get_log_entry(plugin_name, conf, ctx)
+
+ if not entry.route_id then
+ entry.route_id = "no-matched"
+ end
+
+ -- insert start time as log time, multiply to nanoseconds
+ -- use string concat to circumvent 64bit integers that LuaVM cannot handle
+ -- that is, first process the decimal part of the millisecond value
+ -- and then add 6 zeros by string concatenation
+ entry.loki_log_time = tostring(ngx.req.start_time() * 1000) .. "000000"
+
+ if batch_processor_manager:add_entry(conf, entry) then
+ return
+ end
+
+ local labels = conf.log_labels
+
+ -- parsing possible variables in label value
+ for key, value in pairs(labels) do
+ local new_val, err, n_resolved = core.utils.resolve_var(value, ctx.var)
+ if not err and n_resolved > 0 then
+ labels[key] = new_val
+ end
+ end
+
+ -- generate a function to be executed by the batch processor
+ local func = function(entries)
+ -- build loki request data
+ local data = {
+ streams = {
+ {
+ stream = labels,
+ values = new_tab(1, 0),
+ }
+ }
+ }
+
+ -- add all entries to the batch
+ for _, entry in ipairs(entries) do
+ local log_time = entry.loki_log_time
+ entry.loki_log_time = nil -- clean logger internal field
+
+ table_insert(data.streams[1].values, {
+ log_time, core.json.encode(entry)
+ })
+ end
+
+ return send_http_data(conf, data)
+ end
+
+ batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func)
+end
+
+
+return _M
diff --git a/apisix/plugins/mocking.lua b/apisix/plugins/mocking.lua
index 134647f71d9f..af5bc75edb2a 100644
--- a/apisix/plugins/mocking.lua
+++ b/apisix/plugins/mocking.lua
@@ -49,7 +49,19 @@ local schema = {
-- specify response json schema, if response_example is not nil, this conf will be ignore.
-- generate random response by json schema.
response_schema = { type = "object" },
- with_mock_header = { type = "boolean", default = true }
+ with_mock_header = { type = "boolean", default = true },
+ response_headers = {
+ type = "object",
+ minProperties = 1,
+ patternProperties = {
+ ["^[^:]+$"] = {
+ oneOf = {
+ { type = "string" },
+ { type = "number" }
+ }
+ }
+ },
+ }
},
anyOf = {
{ required = { "response_example" } },
@@ -215,6 +227,12 @@ function _M.access(conf, ctx)
ngx.header["x-mock-by"] = "APISIX/" .. core.version.VERSION
end
+ if conf.response_headers then
+ for key, value in pairs(conf.response_headers) do
+ core.response.add_header(key, value)
+ end
+ end
+
if conf.delay > 0 then
ngx.sleep(conf.delay)
end
diff --git a/apisix/plugins/opa.lua b/apisix/plugins/opa.lua
index 1b7db1b62d73..24bdb5be1a46 100644
--- a/apisix/plugins/opa.lua
+++ b/apisix/plugins/opa.lua
@@ -19,6 +19,7 @@ local core = require("apisix.core")
local http = require("resty.http")
local helper = require("apisix.plugins.opa.helper")
local type = type
+local ipairs = ipairs
local schema = {
type = "object",
@@ -37,6 +38,14 @@ local schema = {
description = "timeout in milliseconds",
},
keepalive = {type = "boolean", default = true},
+ send_headers_upstream = {
+ type = "array",
+ minItems = 1,
+ items = {
+ type = "string"
+ },
+ description = "list of headers to pass to upstream in request"
+ },
keepalive_timeout = {type = "integer", minimum = 1000, default = 60000},
keepalive_pool = {type = "integer", minimum = 1, default = 5},
with_route = {type = "boolean", default = false},
@@ -125,6 +134,14 @@ function _M.access(conf, ctx)
end
return status_code, reason
+ else if result.headers and conf.send_headers_upstream then
+ for _, name in ipairs(conf.send_headers_upstream) do
+ local value = result.headers[name]
+ if value then
+ core.request.set_header(ctx, name, value)
+ end
+ end
+ end
end
end
diff --git a/apisix/plugins/openid-connect.lua b/apisix/plugins/openid-connect.lua
index 5058eba47d28..927e4ddbd8aa 100644
--- a/apisix/plugins/openid-connect.lua
+++ b/apisix/plugins/openid-connect.lua
@@ -130,6 +130,32 @@ local schema = {
"header to the request for downstream.",
type = "boolean",
default = false
+ },
+ proxy_opts = {
+ description = "HTTP proxy server be used to access identity server.",
+ type = "object",
+ properties = {
+ http_proxy = {
+ type = "string",
+ description = "HTTP proxy like: http://proxy-server:80.",
+ },
+ https_proxy = {
+ type = "string",
+ description = "HTTPS proxy like: http://proxy-server:80.",
+ },
+ http_proxy_authorization = {
+ type = "string",
+ description = "Basic [base64 username:password].",
+ },
+ https_proxy_authorization = {
+ type = "string",
+ description = "Basic [base64 username:password].",
+ },
+ no_proxy = {
+ type = "string",
+ description = "Comma separated list of hosts that should not be proxied.",
+ }
+ },
}
},
encrypt_fields = {"client_secret"},
diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua
index 45ff94c3f631..623a9eddf113 100644
--- a/apisix/plugins/prometheus/exporter.lua
+++ b/apisix/plugins/prometheus/exporter.lua
@@ -17,6 +17,7 @@
local base_prometheus = require("prometheus")
local core = require("apisix.core")
local plugin = require("apisix.plugin")
+local control = require("apisix.control.v1")
local ipairs = ipairs
local pairs = pairs
local ngx = ngx
@@ -33,6 +34,8 @@ local get_ssls = router.ssls
local get_services = require("apisix.http.service").services
local get_consumers = require("apisix.consumer").consumers
local get_upstreams = require("apisix.upstream").upstreams
+local get_global_rules = require("apisix.global_rules").global_rules
+local get_global_rules_prev_index = require("apisix.global_rules").get_pre_index
local clear_tab = core.table.clear
local get_stream_routes = router.stream_routes
local get_protos = require("apisix.plugins.grpc-transcode.proto").protos
@@ -158,6 +161,10 @@ function _M.http_init(prometheus_enabled_in_stream)
"The free space of each nginx shared DICT since APISIX start",
{"name"})
+ metrics.upstream_status = prometheus:gauge("upstream_status",
+ "Upstream status from health check",
+ {"name", "ip", "port"})
+
-- per service
-- The consumer label indicates the name of consumer corresponds to the
@@ -168,10 +175,15 @@ function _M.http_init(prometheus_enabled_in_stream)
{"code", "route", "matched_uri", "matched_host", "service", "consumer", "node",
unpack(extra_labels("http_status"))})
+ local buckets = DEFAULT_BUCKETS
+ if attr and attr.default_buckets then
+ buckets = attr.default_buckets
+ end
+
metrics.latency = prometheus:histogram("http_latency",
"HTTP request latency in milliseconds per service in APISIX",
{"type", "route", "service", "consumer", "node", unpack(extra_labels("http_latency"))},
- DEFAULT_BUCKETS)
+ buckets)
metrics.bandwidth = prometheus:counter("bandwidth",
"Total bandwidth in bytes consumed per service in APISIX",
@@ -367,14 +379,15 @@ local function etcd_modify_index()
global_max_idx = set_modify_index("consumers", consumers, consumers_ver, global_max_idx)
-- global_rules
- local global_rules = router.global_rules
+ local global_rules, global_rules_ver = get_global_rules()
if global_rules then
- global_max_idx = set_modify_index("global_rules", global_rules.values,
- global_rules.conf_version, global_max_idx)
+ global_max_idx = set_modify_index("global_rules", global_rules,
+ global_rules_ver, global_max_idx)
-- prev_index
key_values[1] = "prev_index"
- metrics.etcd_modify_indexes:set(global_rules.prev_index, key_values)
+ local prev_index = get_global_rules_prev_index()
+ metrics.etcd_modify_indexes:set(prev_index, key_values)
else
global_max_idx = set_modify_index("global_rules", nil, nil, global_max_idx)
@@ -458,6 +471,15 @@ local function collect(ctx, stream_only)
metrics.node_info:set(1, gen_arr(hostname))
+ -- update upstream_status metrics
+ local stats = control.get_health_checkers()
+ for _, stat in ipairs(stats) do
+ for _, node in ipairs(stat.nodes) do
+ metrics.upstream_status:set((node.status == "healthy") and 1 or 0,
+ gen_arr(stat.name, node.ip, node.port))
+ end
+ end
+
core.response.set_header("content_type", "text/plain")
return 200, core.table.concat(prometheus:metric_data())
end
diff --git a/apisix/plugins/proxy-cache/init.lua b/apisix/plugins/proxy-cache/init.lua
index 59569980cc5c..918f755994ea 100644
--- a/apisix/plugins/proxy-cache/init.lua
+++ b/apisix/plugins/proxy-cache/init.lua
@@ -25,6 +25,7 @@ local plugin_name = "proxy-cache"
local STRATEGY_DISK = "disk"
local STRATEGY_MEMORY = "memory"
+local DEFAULT_CACHE_ZONE = "disk_cache_one"
local schema = {
type = "object",
@@ -33,7 +34,7 @@ local schema = {
type = "string",
minLength = 1,
maxLength = 100,
- default = "disk_cache_one",
+ default = DEFAULT_CACHE_ZONE,
},
cache_strategy = {
type = "string",
@@ -108,7 +109,7 @@ local schema = {
local _M = {
version = 0.2,
- priority = 1009,
+ priority = 1085,
name = plugin_name,
schema = schema,
}
@@ -129,14 +130,23 @@ function _M.check_schema(conf)
local found = false
local local_conf = core.config.local_conf()
if local_conf.apisix.proxy_cache then
+ local err = "cache_zone " .. conf.cache_zone .. " not found"
for _, cache in ipairs(local_conf.apisix.proxy_cache.zones) do
+ -- cache_zone passed in plugin config matched one of the proxy_cache zones
if cache.name == conf.cache_zone then
- found = true
+ -- check for the mismatch between cache_strategy and corresponding cache zone
+ if (conf.cache_strategy == STRATEGY_MEMORY and cache.disk_path) or
+ (conf.cache_strategy == STRATEGY_DISK and not cache.disk_path) then
+ err = "invalid or empty cache_zone for cache_strategy: "..conf.cache_strategy
+ else
+ found = true
+ end
+ break
end
end
if found == false then
- return false, "cache_zone " .. conf.cache_zone .. " not found"
+ return false, err
end
end
diff --git a/apisix/plugins/proxy-cache/memory.lua b/apisix/plugins/proxy-cache/memory.lua
index 0112db63b568..9d5c665a8d92 100644
--- a/apisix/plugins/proxy-cache/memory.lua
+++ b/apisix/plugins/proxy-cache/memory.lua
@@ -32,6 +32,10 @@ end
function _M:set(key, obj, ttl)
+ if self.dict == nil then
+ return nil, "invalid cache_zone provided"
+ end
+
local obj_json = core.json.encode(obj)
if not obj_json then
return nil, "could not encode object"
@@ -43,6 +47,10 @@ end
function _M:get(key)
+ if self.dict == nil then
+ return nil, "invalid cache_zone provided"
+ end
+
-- If the key does not exist or has expired, then res_json will be nil.
local res_json, err = self.dict:get(key)
if not res_json then
@@ -63,6 +71,9 @@ end
function _M:purge(key)
+ if self.dict == nil then
+ return nil, "invalid cache_zone provided"
+ end
self.dict:delete(key)
end
diff --git a/apisix/plugins/proxy-mirror.lua b/apisix/plugins/proxy-mirror.lua
index 312d3ec37b68..d6cede6e9b59 100644
--- a/apisix/plugins/proxy-mirror.lua
+++ b/apisix/plugins/proxy-mirror.lua
@@ -27,7 +27,7 @@ local schema = {
properties = {
host = {
type = "string",
- pattern = [=[^http(s)?:\/\/([\da-zA-Z.-]+|\[[\da-fA-F:]+\])(:\d+)?$]=],
+ pattern = [=[^(http(s)?|grpc(s)?):\/\/([\da-zA-Z.-]+|\[[\da-fA-F:]+\])(:\d+)?$]=],
},
path = {
type = "string",
@@ -76,15 +76,15 @@ local function resolver_host(prop_host)
if not ip then
core.log.error("dns resolver resolves domain: ", decoded_host," error: ", err,
" will continue to use the host: ", decoded_host)
- return prop_host
+ return url_decoded.scheme, prop_host
end
local host = url_decoded.scheme .. '://' .. ip ..
(url_decoded.port and ':' .. url_decoded.port or '')
core.log.info(prop_host, " is resolved to: ", host)
- return host
+ return url_decoded.scheme, host
end
- return prop_host
+ return url_decoded.scheme, prop_host
end
@@ -101,7 +101,9 @@ local function enable_mirror(ctx, conf)
end
end
- ctx.var.upstream_mirror_uri = resolver_host(conf.host) .. uri
+ local _, mirror_host = resolver_host(conf.host)
+ ctx.var.upstream_mirror_host = mirror_host
+ ctx.var.upstream_mirror_uri = mirror_host .. uri
if has_mod then
apisix_ngx_client.enable_mirror()
@@ -114,12 +116,14 @@ function _M.rewrite(conf, ctx)
if conf.sample_ratio == 1 then
enable_mirror(ctx, conf)
+ ctx.enable_mirror = true
else
local val = math_random()
core.log.info("mirror request sample_ratio conf: ", conf.sample_ratio,
", random value: ", val)
if val < conf.sample_ratio then
enable_mirror(ctx, conf)
+ ctx.enable_mirror = true
end
end
diff --git a/apisix/plugins/proxy-rewrite.lua b/apisix/plugins/proxy-rewrite.lua
index 8b1df5aaa876..087686edfbf8 100644
--- a/apisix/plugins/proxy-rewrite.lua
+++ b/apisix/plugins/proxy-rewrite.lua
@@ -22,6 +22,7 @@ local ngx = ngx
local type = type
local re_sub = ngx.re.sub
local re_match = ngx.re.match
+local req_set_uri = ngx.req.set_uri
local sub_str = string.sub
local str_find = core.string.find
@@ -65,7 +66,6 @@ local schema = {
description = "new uri that substitute from client uri " ..
"for upstream, lower priority than uri property",
type = "array",
- maxItems = 2,
minItems = 2,
items = {
description = "regex uri",
@@ -191,11 +191,16 @@ function _M.check_schema(conf)
end
if conf.regex_uri and #conf.regex_uri > 0 then
- local _, _, err = re_sub("/fake_uri", conf.regex_uri[1],
- conf.regex_uri[2], "jo")
- if err then
- return false, "invalid regex_uri(" .. conf.regex_uri[1] ..
- ", " .. conf.regex_uri[2] .. "): " .. err
+ if (#conf.regex_uri % 2 ~= 0) then
+ return false, "The length of regex_uri should be an even number"
+ end
+ for i = 1, #conf.regex_uri, 2 do
+ local _, _, err = re_sub("/fake_uri", conf.regex_uri[i],
+ conf.regex_uri[i + 1], "jo")
+ if err then
+ return false, "invalid regex_uri(" .. conf.regex_uri[i] ..
+ ", " .. conf.regex_uri[i + 1] .. "): " .. err
+ end
end
end
@@ -274,32 +279,47 @@ function _M.rewrite(conf, ctx)
local separator_escaped = false
if conf.use_real_request_uri_unsafe then
upstream_uri = ctx.var.real_request_uri
- elseif conf.uri ~= nil then
+ end
+
+ if conf.uri ~= nil then
separator_escaped = true
upstream_uri = core.utils.resolve_var(conf.uri, ctx.var, escape_separator)
+
elseif conf.regex_uri ~= nil then
if not str_find(upstream_uri, "?") then
separator_escaped = true
end
- local uri, _, err = re_sub(upstream_uri, conf.regex_uri[1],
- conf.regex_uri[2], "jo")
- if not uri then
- local msg = "failed to substitute the uri " .. ctx.var.uri ..
- " (" .. conf.regex_uri[1] .. ") with " ..
- conf.regex_uri[2] .. " : " .. err
- core.log.error(msg)
- return 500, {message = msg}
- end
+ local error_msg
+ for i = 1, #conf.regex_uri, 2 do
+ local captures, err = re_match(upstream_uri, conf.regex_uri[i], "jo")
+ if err then
+ error_msg = "failed to match the uri " .. ctx.var.uri ..
+ " (" .. conf.regex_uri[i] .. ") " .. " : " .. err
+ break
+ end
+
+ if captures then
+ ctx.proxy_rewrite_regex_uri_captures = captures
+
+ local uri, _, err = re_sub(upstream_uri,
+ conf.regex_uri[i], conf.regex_uri[i + 1], "jo")
+ if uri then
+ upstream_uri = uri
+ else
+ error_msg = "failed to substitute the uri " .. ngx.var.uri ..
+ " (" .. conf.regex_uri[i] .. ") with " ..
+ conf.regex_uri[i + 1] .. " : " .. err
+ end
- local m, err = re_match(upstream_uri, conf.regex_uri[1], "jo")
- if not m and err then
- core.log.error("match error in proxy-rewrite plugin, please check: ", err)
- return 500
+ break
+ end
end
- ctx.proxy_rewrite_regex_uri_captures = m
- upstream_uri = uri
+ if error_msg ~= nil then
+ core.log.error(error_msg)
+ return 500, { error_msg = error_msg }
+ end
end
if not conf.use_real_request_uri_unsafe then
@@ -317,6 +337,8 @@ function _M.rewrite(conf, ctx)
upstream_uri = core.utils.uri_safe_encode(upstream_uri)
end
+ req_set_uri(upstream_uri)
+
if ctx.var.is_args == "?" then
if index then
ctx.var.upstream_uri = upstream_uri .. "&" .. (ctx.var.args or "")
@@ -326,6 +348,8 @@ function _M.rewrite(conf, ctx)
else
ctx.var.upstream_uri = upstream_uri
end
+ else
+ ctx.var.upstream_uri = upstream_uri
end
if conf.headers then
diff --git a/apisix/plugins/request-id.lua b/apisix/plugins/request-id.lua
index 3a7e6bc39ee8..dac3162db5ed 100644
--- a/apisix/plugins/request-id.lua
+++ b/apisix/plugins/request-id.lua
@@ -16,28 +16,15 @@
--
local ngx = ngx
-local bit = require("bit")
local core = require("apisix.core")
-local snowflake = require("snowflake")
local uuid = require("resty.jit-uuid")
local nanoid = require("nanoid")
-local process = require("ngx.process")
-local timers = require("apisix.timers")
-local tostring = tostring
-local math_pow = math.pow
-local math_ceil = math.ceil
-local math_floor = math.floor
local math_random = math.random
local str_byte = string.byte
local ffi = require "ffi"
local plugin_name = "request-id"
-local data_machine = nil
-local snowflake_inited = nil
-
-local attr = nil
-
local schema = {
type = "object",
properties = {
@@ -45,7 +32,7 @@ local schema = {
include_in_response = {type = "boolean", default = true},
algorithm = {
type = "string",
- enum = {"uuid", "snowflake", "nanoid", "range_id"},
+ enum = {"uuid", "nanoid", "range_id"},
default = "uuid"
},
range_id = {
@@ -62,25 +49,8 @@ local schema = {
minLength = 6,
default = "abcdefghijklmnopqrstuvwxyzABCDEFGHIGKLMNOPQRSTUVWXYZ0123456789"
}
- }
- }
- }
-}
-
-local attr_schema = {
- type = "object",
- properties = {
- snowflake = {
- type = "object",
- properties = {
- enable = {type = "boolean", default = false},
- snowflake_epoc = {type = "integer", minimum = 1, default = 1609459200000},
- data_machine_bits = {type = "integer", minimum = 1, maximum = 31, default = 12},
- sequence_bits = {type = "integer", minimum = 1, default = 10},
- delta_offset = {type = "integer", default = 1, enum = {1, 10, 100, 1000}},
- data_machine_ttl = {type = "integer", minimum = 1, default = 30},
- data_machine_interval = {type = "integer", minimum = 1, default = 10}
- }
+ },
+ default = {}
}
}
}
@@ -92,139 +62,10 @@ local _M = {
schema = schema
}
-
function _M.check_schema(conf)
return core.schema.check(schema, conf)
end
-
--- Generates the current process data machine
-local function gen_data_machine(max_number)
- if data_machine == nil then
- local etcd_cli, prefix = core.etcd.new()
- local prefix = prefix .. "/plugins/request-id/snowflake/"
- local uuid = uuid.generate_v4()
- local id = 1
- ::continue::
- while (id <= max_number) do
- local res, err = etcd_cli:grant(attr.snowflake.data_machine_ttl)
- if err then
- id = id + 1
- core.log.error("Etcd grant failure, err: ".. err)
- goto continue
- end
-
- local _, err1 = etcd_cli:setnx(prefix .. tostring(id), uuid)
- local res2, err2 = etcd_cli:get(prefix .. tostring(id))
-
- if err1 or err2 or res2.body.kvs[1].value ~= uuid then
- core.log.notice("data_machine " .. id .. " is not available")
- id = id + 1
- else
- data_machine = id
-
- local _, err3 =
- etcd_cli:set(
- prefix .. tostring(id),
- uuid,
- {
- prev_kv = true,
- lease = res.body.ID
- }
- )
-
- if err3 then
- id = id + 1
- etcd_cli:delete(prefix .. tostring(id))
- core.log.error("set data_machine " .. id .. " lease error: " .. err3)
- goto continue
- end
-
- local lease_id = res.body.ID
- local start_at = ngx.time()
- local handler = function()
- local now = ngx.time()
- if now - start_at < attr.snowflake.data_machine_interval then
- return
- end
-
- local _, err4 = etcd_cli:keepalive(lease_id)
- if err4 then
- snowflake_inited = nil
- data_machine = nil
- timers.unregister_timer("plugin#request-id")
- core.log.error("snowflake data_machine: " .. id .." lease failed.")
- end
- start_at = now
- core.log.info("snowflake data_machine: " .. id .." lease success.")
- end
-
- timers.register_timer("plugin#request-id", handler)
- core.log.info(
- "timer created to lease snowflake algorithm data_machine, interval: ",
- attr.snowflake.data_machine_interval)
- core.log.notice("lease snowflake data_machine: " .. id)
- break
- end
- end
-
- if data_machine == nil then
- core.log.error("No data_machine is not available")
- return nil
- end
- end
- return data_machine
-end
-
-
--- Split 'Data Machine' into 'Worker ID' and 'datacenter ID'
-local function split_data_machine(data_machine, node_id_bits, datacenter_id_bits)
- local num = bit.tobit(data_machine)
- local worker_id = bit.band(num, math_pow(2, node_id_bits) - 1)
- num = bit.rshift(num, node_id_bits)
- local datacenter_id = bit.band(num, math_pow(2, datacenter_id_bits) - 1)
- return worker_id, datacenter_id
-end
-
-
--- Initialize the snowflake algorithm
-local function snowflake_init()
- if snowflake_inited == nil then
- local max_number = math_pow(2, (attr.snowflake.data_machine_bits))
- local datacenter_id_bits = math_floor(attr.snowflake.data_machine_bits / 2)
- local node_id_bits = math_ceil(attr.snowflake.data_machine_bits / 2)
- data_machine = gen_data_machine(max_number)
- if data_machine == nil then
- return ""
- end
-
- local worker_id, datacenter_id = split_data_machine(data_machine,
- node_id_bits, datacenter_id_bits)
-
- core.log.info("snowflake init datacenter_id: " ..
- datacenter_id .. " worker_id: " .. worker_id)
- snowflake.init(
- datacenter_id,
- worker_id,
- attr.snowflake.snowflake_epoc,
- node_id_bits,
- datacenter_id_bits,
- attr.snowflake.sequence_bits,
- attr.delta_offset
- )
- snowflake_inited = true
- end
-end
-
-
--- generate snowflake id
-local function next_id()
- if snowflake_inited == nil then
- snowflake_init()
- end
- return snowflake:next_id()
-end
-
-- generate range_id
local function get_range_id(range_id)
local res = ffi.new("unsigned char[?]", range_id.length)
@@ -246,7 +87,7 @@ local function get_request_id(conf)
return get_range_id(conf.range_id)
end
- return next_id()
+ return uuid()
end
@@ -276,25 +117,4 @@ function _M.header_filter(conf, ctx)
end
end
-function _M.init()
- local local_conf = core.config.local_conf()
- attr = core.table.try_read_attr(local_conf, "plugin_attr", plugin_name)
- local ok, err = core.schema.check(attr_schema, attr)
- if not ok then
- core.log.error("failed to check the plugin_attr[", plugin_name, "]", ": ", err)
- return
- end
- if attr.snowflake.enable then
- if process.type() == "worker" then
- ngx.timer.at(0, snowflake_init)
- end
- end
-end
-
-function _M.destroy()
- if snowflake_inited then
- timers.unregister_timer("plugin#request-id")
- end
-end
-
return _M
diff --git a/apisix/plugins/response-rewrite.lua b/apisix/plugins/response-rewrite.lua
index 4c3487da0686..20458c4fcf82 100644
--- a/apisix/plugins/response-rewrite.lua
+++ b/apisix/plugins/response-rewrite.lua
@@ -58,7 +58,7 @@ local schema = {
items = {
type = "string",
-- "Set-Cookie: =; Max-Age="
- pattern = "^[^:]+:[^:]+[^/]$"
+ pattern = "^[^:]+:[^:]*[^/]$"
}
},
set = {
diff --git a/apisix/plugins/skywalking-logger.lua b/apisix/plugins/skywalking-logger.lua
index 1c1ba590a0c4..0d90b97893cb 100644
--- a/apisix/plugins/skywalking-logger.lua
+++ b/apisix/plugins/skywalking-logger.lua
@@ -132,6 +132,11 @@ function _M.log(conf, ctx)
end
end
+ local service_instance_name = conf.service_instance_name
+ if service_instance_name == "$hostname" then
+ service_instance_name = core.utils.gethostname()
+ end
+
local entry = {
traceContext = trace_context,
body = {
@@ -140,7 +145,7 @@ function _M.log(conf, ctx)
}
},
service = conf.service_name,
- serviceInstance = conf.service_instance_name,
+ serviceInstance = service_instance_name,
endpoint = ctx.var.uri,
}
diff --git a/apisix/plugins/sls-logger.lua b/apisix/plugins/sls-logger.lua
index 6c2415ac6d29..de2fbae67ffa 100644
--- a/apisix/plugins/sls-logger.lua
+++ b/apisix/plugins/sls-logger.lua
@@ -21,7 +21,7 @@ local bp_manager_mod = require("apisix.utils.batch-processor-manager")
local plugin_name = "sls-logger"
local ngx = ngx
-local rf5424 = require("apisix.plugins.slslog.rfc5424")
+local rf5424 = require("apisix.utils.rfc5424")
local tcp = ngx.socket.tcp
local tostring = tostring
local ipairs = ipairs
@@ -138,9 +138,14 @@ function _M.log(conf, ctx)
return
end
- local rf5424_data = rf5424.encode("SYSLOG", "INFO", ctx.var.host,"apisix",
- ctx.var.pid, conf.project, conf.logstore,
- conf.access_key_id, conf.access_key_secret, json_str)
+ local structured_data = {
+ {name = "project", value = conf.project},
+ {name = "logstore", value = conf.logstore},
+ {name = "access-key-id", value = conf.access_key_id},
+ {name = "access-key-secret", value = conf.access_key_secret},
+ }
+ local rf5424_data = rf5424.encode("SYSLOG", "INFO", ctx.var.host, "apisix",
+ ctx.var.pid, json_str, structured_data)
local process_context = {
data = rf5424_data,
diff --git a/apisix/plugins/splunk-hec-logging.lua b/apisix/plugins/splunk-hec-logging.lua
index 8de8be6eca9b..d7c97107e620 100644
--- a/apisix/plugins/splunk-hec-logging.lua
+++ b/apisix/plugins/splunk-hec-logging.lua
@@ -21,6 +21,9 @@ local ngx_now = ngx.now
local http = require("resty.http")
local log_util = require("apisix.utils.log-util")
local bp_manager_mod = require("apisix.utils.batch-processor-manager")
+local table_insert = core.table.insert
+local table_concat = core.table.concat
+local ipairs = ipairs
local DEFAULT_SPLUNK_HEC_ENTRY_SOURCE = "apache-apisix-splunk-hec-logging"
@@ -127,10 +130,15 @@ local function send_to_splunk(conf, entries)
local http_new = http.new()
http_new:set_timeout(conf.endpoint.timeout * 1000)
+ local t = {}
+ for _, e in ipairs(entries) do
+ table_insert(t, core.json.encode(e))
+ end
+
local res, err = http_new:request_uri(conf.endpoint.uri, {
ssl_verify = conf.ssl_verify,
method = "POST",
- body = core.json.encode(entries),
+ body = table_concat(t),
headers = request_headers,
})
diff --git a/apisix/plugins/syslog/init.lua b/apisix/plugins/syslog/init.lua
index 24f2f62ab812..0ab34f8054e9 100644
--- a/apisix/plugins/syslog/init.lua
+++ b/apisix/plugins/syslog/init.lua
@@ -18,6 +18,10 @@
local core = require("apisix.core")
local bp_manager_mod = require("apisix.utils.batch-processor-manager")
local logger_socket = require("resty.logger.socket")
+local rfc5424 = require("apisix.utils.rfc5424")
+local ipairs = ipairs
+local table_insert = core.table.insert
+local table_concat = core.table.concat
local batch_processor_manager = bp_manager_mod.new("sys logger")
@@ -63,7 +67,8 @@ local function send_syslog_data(conf, log_message, api_ctx)
end
-- reuse the logger object
- local ok, err = logger:log(core.json.encode(log_message))
+ local ok, err = logger:log(log_message)
+
if not ok then
res = false
err_msg = "failed to log message" .. err
@@ -75,28 +80,32 @@ end
-- called in log phase of APISIX
function _M.push_entry(conf, ctx, entry)
- if batch_processor_manager:add_entry(conf, entry) then
+ local json_str, err = core.json.encode(entry)
+ if not json_str then
+ core.log.error('error occurred while encoding the data: ', err)
+ return
+ end
+
+ local rfc5424_data = rfc5424.encode("SYSLOG", "INFO", ctx.var.host,
+ "apisix", ctx.var.pid, json_str)
+
+ if batch_processor_manager:add_entry(conf, rfc5424_data) then
return
end
-- Generate a function to be executed by the batch processor
local cp_ctx = core.table.clone(ctx)
- local func = function(entries, batch_max_size)
- local data, err
- if batch_max_size == 1 then
- data, err = core.json.encode(entries[1]) -- encode as single {}
- else
- data, err = core.json.encode(entries) -- encode as array [{}]
- end
-
- if not data then
- return false, 'error occurred while encoding the data: ' .. err
+ local func = function(entries)
+ local items = {}
+ for _, e in ipairs(entries) do
+ table_insert(items, e)
+ core.log.debug("buffered logs:", e)
end
- return send_syslog_data(conf, data, cp_ctx)
+ return send_syslog_data(conf, table_concat(items), cp_ctx)
end
- batch_processor_manager:add_entry_to_new_processor(conf, entry, ctx, func)
+ batch_processor_manager:add_entry_to_new_processor(conf, rfc5424_data, ctx, func)
end
diff --git a/apisix/plugins/tencent-cloud-cls/cls-sdk.lua b/apisix/plugins/tencent-cloud-cls/cls-sdk.lua
index d2b6e8ad4525..650d4ab0e92b 100644
--- a/apisix/plugins/tencent-cloud-cls/cls-sdk.lua
+++ b/apisix/plugins/tencent-cloud-cls/cls-sdk.lua
@@ -41,6 +41,7 @@ local type = type
local tostring = tostring
local setmetatable = setmetatable
local pcall = pcall
+local unpack = unpack
-- api doc https://www.tencentcloud.com/document/product/614/16873
local MAX_SINGLE_VALUE_SIZE = 1 * 1024 * 1024
@@ -62,13 +63,20 @@ local params_cache = {
local function get_ip(hostname)
local _, resolved = socket.dns.toip(hostname)
local ip_list = {}
- for _, v in ipairs(resolved.ip) do
- insert_tab(ip_list, v)
+ if not resolved.ip then
+ -- DNS parsing failure
+ local err = resolved
+ core.log.error("resolve ip failed, hostname: " .. hostname .. ", error: " .. err)
+ return nil, err
+ else
+ for _, v in ipairs(resolved.ip) do
+ insert_tab(ip_list, v)
+ end
end
return ip_list
end
-local host_ip = tostring(unpack(get_ip(core_gethostname())))
+local host_ip
local log_group_list = {}
local log_group_list_pb = {
logGroupList = log_group_list,
@@ -273,6 +281,15 @@ function _M.send_to_cls(self, logs)
-- sums of all value in all LogGroup should be no more than 5MB
-- so send whenever size exceed max size
local group_list_start = 1
+
+ if not host_ip then
+ local host_ip_list, err = get_ip(core_gethostname())
+ if not host_ip_list then
+ return false, err
+ end
+ host_ip = tostring(unpack(host_ip_list))
+ end
+
for i = 1, #logs, 1 do
local contents, log_size = normalize_log(logs[i])
if log_size > MAX_LOG_GROUP_VALUE_SIZE then
diff --git a/apisix/plugins/traffic-split.lua b/apisix/plugins/traffic-split.lua
index 38e272b7be66..1d621426a137 100644
--- a/apisix/plugins/traffic-split.lua
+++ b/apisix/plugins/traffic-split.lua
@@ -125,7 +125,7 @@ end
local function parse_domain_for_node(node)
- local host = node.host
+ local host = node.domain or node.host
if not ipmatcher.parse_ipv4(host)
and not ipmatcher.parse_ipv6(host)
then
@@ -233,6 +233,20 @@ function _M.access(conf, ctx)
local match_passed = true
for _, rule in ipairs(conf.rules) do
+ -- check if all upstream_ids are valid
+ if rule.weighted_upstreams then
+ for _, wupstream in ipairs(rule.weighted_upstreams) do
+ local ups_id = wupstream.upstream_id
+ if ups_id then
+ local ups = upstream.get_by_id(ups_id)
+ if not ups then
+ return 500, "failed to fetch upstream info by "
+ .. "upstream id: " .. ups_id
+ end
+ end
+ end
+ end
+
if not rule.match then
match_passed = true
weighted_upstreams = rule.weighted_upstreams
diff --git a/apisix/plugins/ua-restriction.lua b/apisix/plugins/ua-restriction.lua
index ec74e7592115..577dc2b67cbb 100644
--- a/apisix/plugins/ua-restriction.lua
+++ b/apisix/plugins/ua-restriction.lua
@@ -22,11 +22,8 @@ local type = type
local str_strip = stringx.strip
local re_find = ngx.re.find
-local MATCH_NONE = 0
-local MATCH_ALLOW = 1
-local MATCH_DENY = 2
-
-local lrucache_useragent = core.lrucache.new({ ttl = 300, count = 4096 })
+local lrucache_allow = core.lrucache.new({ ttl = 300, count = 4096 })
+local lrucache_deny = core.lrucache.new({ ttl = 300, count = 4096 })
local schema = {
type = "object",
@@ -58,6 +55,10 @@ local schema = {
default = "Not allowed"
},
},
+ oneOf = {
+ {required = {"allowlist"}},
+ {required = {"denylist"}}
+ }
}
local plugin_name = "ua-restriction"
@@ -69,27 +70,56 @@ local _M = {
schema = schema,
}
-local function match_user_agent(user_agent, conf)
- user_agent = str_strip(user_agent)
- if conf.allowlist then
- for _, rule in ipairs(conf.allowlist) do
+local function check_with_allow_list(user_agents, allowlist)
+ local check = function (user_agent)
+ user_agent = str_strip(user_agent)
+
+ for _, rule in ipairs(allowlist) do
if re_find(user_agent, rule, "jo") then
- return MATCH_ALLOW
+ return true
end
end
+ return false
end
- if conf.denylist then
- for _, rule in ipairs(conf.denylist) do
+ if type(user_agents) == "table" then
+ for _, v in ipairs(user_agents) do
+ if lrucache_allow(v, allowlist, check, v) then
+ return true
+ end
+ end
+ return false
+ else
+ return lrucache_allow(user_agents, allowlist, check, user_agents)
+ end
+end
+
+
+local function check_with_deny_list(user_agents, denylist)
+ local check = function (user_agent)
+ user_agent = str_strip(user_agent)
+
+ for _, rule in ipairs(denylist) do
if re_find(user_agent, rule, "jo") then
- return MATCH_DENY
+ return false
end
end
+ return true
end
- return MATCH_NONE
+ if type(user_agents) == "table" then
+ for _, v in ipairs(user_agents) do
+ if lrucache_deny(v, denylist, check, v) then
+ return false
+ end
+ end
+ return true
+ else
+ return lrucache_deny(user_agents, denylist, check, user_agents)
+ end
end
+
function _M.check_schema(conf)
local ok, err = core.schema.check(schema, conf)
@@ -118,6 +148,7 @@ function _M.check_schema(conf)
return true
end
+
function _M.access(conf, ctx)
local user_agent = core.request.header(ctx, "User-Agent")
@@ -128,21 +159,16 @@ function _M.access(conf, ctx)
return 403, { message = conf.message }
end
end
- local match = MATCH_NONE
- if type(user_agent) == "table" then
- for _, v in ipairs(user_agent) do
- if type(v) == "string" then
- match = lrucache_useragent(v, conf, match_user_agent, v, conf)
- if match > MATCH_ALLOW then
- break
- end
- end
- end
+
+ local is_passed
+
+ if conf.allowlist then
+ is_passed = check_with_allow_list(user_agent, conf.allowlist)
else
- match = lrucache_useragent(user_agent, conf, match_user_agent, user_agent, conf)
+ is_passed = check_with_deny_list(user_agent, conf.denylist)
end
- if match > MATCH_ALLOW then
+ if not is_passed then
return 403, { message = conf.message }
end
end
diff --git a/apisix/plugins/wolf-rbac.lua b/apisix/plugins/wolf-rbac.lua
index 62cb7b04fbba..154fde41a4ad 100644
--- a/apisix/plugins/wolf-rbac.lua
+++ b/apisix/plugins/wolf-rbac.lua
@@ -266,13 +266,13 @@ function _M.rewrite(conf, ctx)
local consumers = consumer.consumers_kv(plugin_name, consumer_conf, "appid")
core.log.info("------ consumers: ", core.json.delay_encode(consumers))
- local consumer = consumers[appid]
- if not consumer then
+ local cur_consumer = consumers[appid]
+ if not cur_consumer then
core.log.error("consumer [", appid, "] not found")
return 401, fail_response("Invalid appid in rbac token")
end
- core.log.info("consumer: ", core.json.delay_encode(consumer))
- local server = consumer.auth_conf.server
+ core.log.info("consumer: ", core.json.delay_encode(cur_consumer))
+ local server = cur_consumer.auth_conf.server
local res = check_url_permission(server, appid, action, url,
client_ip, wolf_token)
@@ -287,7 +287,7 @@ function _M.rewrite(conf, ctx)
local userId = userInfo.id
username = userInfo.username
nickname = userInfo.nickname or userInfo.username
- local prefix = consumer.auth_conf.header_prefix or ''
+ local prefix = cur_consumer.auth_conf.header_prefix or ''
core.response.set_header(prefix .. "UserId", userId)
core.response.set_header(prefix .. "Username", username)
core.response.set_header(prefix .. "Nickname", ngx.escape_uri(nickname))
@@ -303,6 +303,7 @@ function _M.rewrite(conf, ctx)
") failed, res: ",core.json.delay_encode(res))
return res.status, fail_response(res.err, { username = username, nickname = nickname })
end
+ consumer.attach_consumer(ctx, cur_consumer, consumer_conf)
core.log.info("wolf-rbac check permission passed")
end
diff --git a/apisix/plugins/workflow.lua b/apisix/plugins/workflow.lua
index 5adb5b455435..73d68375dd9d 100644
--- a/apisix/plugins/workflow.lua
+++ b/apisix/plugins/workflow.lua
@@ -52,7 +52,8 @@ local schema = {
required = {"case", "actions"}
}
}
- }
+ },
+ required = {"rules"}
}
local plugin_name = "workflow"
@@ -93,7 +94,7 @@ end
local function rate_limit(conf, ctx)
- return limit_count.rate_limit(conf, ctx)
+ return limit_count.rate_limit(conf, ctx, "limit-count", 1)
end
diff --git a/apisix/router.lua b/apisix/router.lua
index 2fd14917c299..93b123e5b004 100644
--- a/apisix/router.lua
+++ b/apisix/router.lua
@@ -18,9 +18,7 @@ local require = require
local http_route = require("apisix.http.route")
local apisix_upstream = require("apisix.upstream")
local core = require("apisix.core")
-local plugin_checker = require("apisix.plugin").plugin_checker
local str_lower = string.lower
-local error = error
local ipairs = ipairs
@@ -29,7 +27,6 @@ local _M = {version = 0.3}
local function filter(route)
route.orig_modifiedIndex = route.modifiedIndex
- route.update_count = 0
route.has_domain = false
if not route.value then
@@ -91,17 +88,6 @@ function _M.http_init_worker()
_M.router_ssl = router_ssl
_M.api = require("apisix.api_router")
-
- local global_rules, err = core.config.new("/global_rules", {
- automatic = true,
- item_schema = core.schema.global_rule,
- checker = plugin_checker,
- })
- if not global_rules then
- error("failed to create etcd instance for fetching /global_rules : "
- .. err)
- end
- _M.global_rules = global_rules
end
diff --git a/apisix/schema_def.lua b/apisix/schema_def.lua
index f7b117af93da..cec1c53b3438 100644
--- a/apisix/schema_def.lua
+++ b/apisix/schema_def.lua
@@ -82,7 +82,7 @@ local label_value_def = {
description = "value of label",
type = "string",
pattern = [[^\S+$]],
- maxLength = 64,
+ maxLength = 256,
minLength = 1
}
_M.label_value_def = label_value_def
@@ -283,6 +283,7 @@ local health_checker = {
{required = {"active"}},
{required = {"active", "passive"}},
},
+ additionalProperties = false,
}
@@ -501,14 +502,15 @@ local upstream_schema = {
oneOf = {
{required = {"nodes"}},
{required = {"service_name", "discovery_type"}},
- }
+ },
+ additionalProperties = false
}
-- TODO: add more nginx variable support
_M.upstream_hash_vars_schema = {
type = "string",
pattern = [[^((uri|server_name|server_addr|request_uri|remote_port]]
- .. [[|remote_addr|query_string|host|hostname)]]
+ .. [[|remote_addr|query_string|host|hostname|mqtt_client_id)]]
.. [[|arg_[0-9a-zA-z_-]+)$]],
}
@@ -662,6 +664,7 @@ _M.route = {
{required = {"script", "plugin_config_id"}},
}
},
+ additionalProperties = false,
}
@@ -689,6 +692,7 @@ _M.service = {
uniqueItems = true,
},
},
+ additionalProperties = false,
}
@@ -707,6 +711,7 @@ _M.consumer = {
desc = desc_def,
},
required = {"username"},
+ additionalProperties = false,
}
@@ -725,8 +730,19 @@ _M.ssl = {
default = "server",
enum = {"server", "client"}
},
- cert = certificate_scheme,
- key = private_key_schema,
+ cert = {
+ oneOf = {
+ certificate_scheme,
+ -- TODO: uniformly define the schema of secret_uri
+ { type = "string", pattern = "^\\$(secret|env)://"}
+ }
+ },
+ key = {
+ oneOf = {
+ private_key_schema,
+ { type = "string", pattern = "^\\$(secret|env)://"}
+ }
+ },
sni = {
type = "string",
pattern = host_def_pat,
@@ -756,6 +772,15 @@ _M.ssl = {
minimum = 0,
default = 1,
},
+ skip_mtls_uri_regex = {
+ type = "array",
+ minItems = 1,
+ uniqueItems = true,
+ items = {
+ description = "uri regular expression to skip mtls",
+ type = "string",
+ }
+ },
},
required = {"ca"},
},
@@ -770,6 +795,15 @@ _M.ssl = {
enum = {1, 0},
default = 1
},
+ ssl_protocols = {
+ description = "set ssl protocols",
+ type = "array",
+ maxItems = 3,
+ uniqueItems = true,
+ items = {
+ enum = {"TLSv1.1", "TLSv1.2", "TLSv1.3"}
+ },
+ },
validity_end = timestamp_def,
validity_start = timestamp_def,
create_time = timestamp_def,
@@ -788,7 +822,8 @@ _M.ssl = {
{required = {"snis", "key", "cert"}}
}
},
- ["else"] = {required = {"key", "cert"}}
+ ["else"] = {required = {"key", "cert"}},
+ additionalProperties = false,
}
@@ -805,6 +840,7 @@ _M.proto = {
}
},
required = {"content"},
+ additionalProperties = false,
}
@@ -816,7 +852,8 @@ _M.global_rule = {
create_time = timestamp_def,
update_time = timestamp_def
},
- required = {"plugins"},
+ required = {"id", "plugins"},
+ additionalProperties = false,
}
@@ -850,6 +887,7 @@ local xrpc_protocol_schema = {
dependencies = {
name = {"conf"},
},
+ additionalProperties = false,
},
},
@@ -884,7 +922,8 @@ _M.stream_route = {
upstream_id = id_schema,
plugins = plugins_schema,
protocol = xrpc_protocol_schema,
- }
+ },
+ additionalProperties = false,
}
@@ -900,6 +939,7 @@ _M.plugins = {
stream = {
type = "boolean"
},
+ additionalProperties = false,
},
required = {"name"}
}
@@ -917,6 +957,7 @@ _M.plugin_config = {
update_time = timestamp_def
},
required = {"id", "plugins"},
+ additionalProperties = false,
}
@@ -931,6 +972,7 @@ _M.consumer_group = {
update_time = timestamp_def
},
required = {"id", "plugins"},
+ additionalProperties = false,
}
diff --git a/apisix/secret.lua b/apisix/secret.lua
index 7dcf9cf4ab22..ca9b09190ebc 100644
--- a/apisix/secret.lua
+++ b/apisix/secret.lua
@@ -110,14 +110,27 @@ function _M.init_worker()
end
-local function parse_secret_uri(secret_uri)
+local function check_secret_uri(secret_uri)
-- Avoid the error caused by has_prefix to cause a crash.
if type(secret_uri) ~= "string" then
- return nil, "error secret_uri type: " .. type(secret_uri)
+ return false, "error secret_uri type: " .. type(secret_uri)
+ end
+
+ if not string.has_prefix(secret_uri, PREFIX) and
+ not string.has_prefix(upper(secret_uri), core.env.PREFIX) then
+ return false, "error secret_uri prefix: " .. secret_uri
end
- if not string.has_prefix(secret_uri, PREFIX) then
- return nil, "error secret_uri prefix: " .. secret_uri
+ return true
+end
+
+_M.check_secret_uri = check_secret_uri
+
+
+local function parse_secret_uri(secret_uri)
+ local is_secret_uri, err = check_secret_uri(secret_uri)
+ if not is_secret_uri then
+ return is_secret_uri, err
end
local path = sub(secret_uri, #PREFIX + 1)
diff --git a/apisix/ssl.lua b/apisix/ssl.lua
index 18898027aee5..e14c92fc3c41 100644
--- a/apisix/ssl.lua
+++ b/apisix/ssl.lua
@@ -14,8 +14,11 @@
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-local core = require("apisix.core")
-local ngx_ssl = require("ngx.ssl")
+local core = require("apisix.core")
+local secret = require("apisix.secret")
+local ngx_ssl = require("ngx.ssl")
+local ngx_ssl_client = require("ngx.ssl.clienthello")
+
local ngx_encode_base64 = ngx.encode_base64
local ngx_decode_base64 = ngx.decode_base64
local aes = require("resty.aes")
@@ -37,8 +40,13 @@ local pkey_cache = core.lrucache.new {
local _M = {}
-function _M.server_name()
- local sni, err = ngx_ssl.server_name()
+function _M.server_name(clienthello)
+ local sni, err
+ if clienthello then
+ sni, err = ngx_ssl_client.get_client_hello_server_name()
+ else
+ sni, err = ngx_ssl.server_name()
+ end
if err then
return nil, err
end
@@ -56,6 +64,14 @@ function _M.server_name()
end
+function _M.set_protocols_by_clienthello(ssl_protocols)
+ if ssl_protocols then
+ return ngx_ssl_client.set_protocols(ssl_protocols)
+ end
+ return true
+end
+
+
local function init_iv_tbl(ivs)
local _aes_128_cbc_with_iv_tbl = core.table.new(2, 0)
local type_ivs = type(ivs)
@@ -252,9 +268,13 @@ function _M.check_ssl_conf(in_dp, conf)
end
end
- local ok, err = validate(conf.cert, conf.key)
- if not ok then
- return nil, err
+ if not secret.check_secret_uri(conf.cert) and
+ not secret.check_secret_uri(conf.key) then
+
+ local ok, err = validate(conf.cert, conf.key)
+ if not ok then
+ return nil, err
+ end
end
if conf.type == "client" then
@@ -268,9 +288,13 @@ function _M.check_ssl_conf(in_dp, conf)
end
for i = 1, numcerts do
- local ok, err = validate(conf.certs[i], conf.keys[i])
- if not ok then
- return nil, "failed to handle cert-key pair[" .. i .. "]: " .. err
+ if not secret.check_secret_uri(conf.cert[i]) and
+ not secret.check_secret_uri(conf.key[i]) then
+
+ local ok, err = validate(conf.certs[i], conf.keys[i])
+ if not ok then
+ return nil, "failed to handle cert-key pair[" .. i .. "]: " .. err
+ end
end
end
diff --git a/apisix/ssl/router/radixtree_sni.lua b/apisix/ssl/router/radixtree_sni.lua
index fd1f55c395dc..b6824852e0b3 100644
--- a/apisix/ssl/router/radixtree_sni.lua
+++ b/apisix/ssl/router/radixtree_sni.lua
@@ -18,8 +18,10 @@ local get_request = require("resty.core.base").get_request
local router_new = require("apisix.utils.router").new
local core = require("apisix.core")
local apisix_ssl = require("apisix.ssl")
+local secret = require("apisix.secret")
local ngx_ssl = require("ngx.ssl")
local config_util = require("apisix.core.config_util")
+local ngx = ngx
local ipairs = ipairs
local type = type
local error = error
@@ -183,6 +185,7 @@ function _M.match_and_set(api_ctx, match_only, alt_sni)
for _, msni in ipairs(api_ctx.matched_sni) do
if sni_rev == msni or not str_find(sni_rev, ".", #msni) then
matched = true
+ break
end
end
if not matched then
@@ -203,16 +206,40 @@ function _M.match_and_set(api_ctx, match_only, alt_sni)
end
end
- local matched_ssl = api_ctx.matched_ssl
- core.log.info("debug - matched: ", core.json.delay_encode(matched_ssl, true))
+ core.log.info("debug - matched: ", core.json.delay_encode(api_ctx.matched_ssl, true))
if match_only then
return true
end
+ ok, err = _M.set(api_ctx.matched_ssl, sni)
+ if not ok then
+ return false, err
+ end
+
+ return true
+end
+
+
+function _M.set(matched_ssl, sni)
+ if not matched_ssl then
+ return false, "failed to match ssl certificate"
+ end
+ local ok, err
+ if not sni then
+ sni, err = apisix_ssl.server_name()
+ if type(sni) ~= "string" then
+ local advise = "please check if the client requests via IP or uses an outdated " ..
+ "protocol. If you need to report an issue, " ..
+ "provide a packet capture file of the TLS handshake."
+ return false, "failed to find SNI: " .. (err or advise)
+ end
+ end
ngx_ssl.clear_certs()
- ok, err = _M.set_cert_and_key(sni, matched_ssl.value)
+ local new_ssl_value = secret.fetch_secrets(matched_ssl.value) or matched_ssl.value
+
+ ok, err = _M.set_cert_and_key(sni, new_ssl_value)
if not ok then
return false, err
end
@@ -226,7 +253,11 @@ function _M.match_and_set(api_ctx, match_only, alt_sni)
return false, "failed to parse client cert: " .. err
end
- local ok, err = ngx_ssl.verify_client(parsed_cert, depth)
+ local reject_in_handshake =
+ (ngx.config.subsystem == "stream") or
+ (matched_ssl.value.client.skip_mtls_uri_regex == nil)
+ local ok, err = ngx_ssl.verify_client(parsed_cert, depth,
+ reject_in_handshake)
if not ok then
return false, err
end
diff --git a/apisix/stream/xrpc/protocols/dubbo/init.lua b/apisix/stream/xrpc/protocols/dubbo/init.lua
new file mode 100644
index 000000000000..19160d6c544e
--- /dev/null
+++ b/apisix/stream/xrpc/protocols/dubbo/init.lua
@@ -0,0 +1,231 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+local sdk = require("apisix.stream.xrpc.sdk")
+local xrpc_socket = require("resty.apisix.stream.xrpc.socket")
+local math_random = math.random
+local ngx = ngx
+local OK = ngx.OK
+local str_format = string.format
+local DECLINED = ngx.DECLINED
+local DONE = ngx.DONE
+local bit = require("bit")
+local ffi = require("ffi")
+local ffi_str = ffi.string
+
+
+-- dubbo protocol spec: https://cn.dubbo.apache.org/zh-cn/overview/reference/protocols/tcp/
+local header_len = 16
+local _M = {}
+
+
+function _M.init_downstream(session)
+ session.req_id_seq = 0
+ session.resp_id_seq = 0
+ session.cmd_labels = { session.route.id, "" }
+ return xrpc_socket.downstream.socket()
+end
+
+
+local function parse_dubbo_header(header)
+ for i = 1, header_len do
+ local currentByte = header:byte(i)
+ if not currentByte then
+ return nil
+ end
+ end
+
+ local magic_number = str_format("%04x", header:byte(1) * 256 + header:byte(2))
+ local message_flag = header:byte(3)
+ local status = header:byte(4)
+ local request_id = 0
+ for i = 5, 12 do
+ request_id = request_id * 256 + header:byte(i)
+ end
+
+ local byte13Val = header:byte(13) * 256 * 256 * 256
+ local byte14Val = header:byte(14) * 256 * 256
+ local data_length = byte13Val + byte14Val + header:byte(15) * 256 + header:byte(16)
+
+ local is_request = bit.band(bit.rshift(message_flag, 7), 0x01) == 1 and 1 or 0
+ local is_two_way = bit.band(bit.rshift(message_flag, 6), 0x01) == 1 and 1 or 0
+ local is_event = bit.band(bit.rshift(message_flag, 5), 0x01) == 1 and 1 or 0
+
+ return {
+ magic_number = magic_number,
+ message_flag = message_flag,
+ is_request = is_request,
+ is_two_way = is_two_way,
+ is_event = is_event,
+ status = status,
+ request_id = request_id,
+ data_length = data_length
+ }
+end
+
+
+local function read_data(sk, is_req)
+ local header_data, err = sk:read(header_len)
+ if not header_data then
+ return nil, err, false
+ end
+
+ local header_str = ffi_str(header_data, header_len)
+ local header_info = parse_dubbo_header(header_str)
+ if not header_info then
+ return nil, "header insufficient", false
+ end
+
+ local is_valid_magic_number = header_info.magic_number == "dabb"
+ if not is_valid_magic_number then
+ return nil, str_format("unknown magic number: \"%s\"", header_info.magic_number), false
+ end
+
+ local body_data, err = sk:read(header_info.data_length)
+ if not body_data then
+ core.log.error("failed to read dubbo request body")
+ return nil, err, false
+ end
+
+ local ctx = ngx.ctx
+ ctx.dubbo_serialization_id = bit.band(header_info.message_flag, 0x1F)
+
+ if is_req then
+ ctx.dubbo_req_body_data = body_data
+ else
+ ctx.dubbo_rsp_body_data = body_data
+ end
+
+ return true, nil, false
+end
+
+
+local function read_req(sk)
+ return read_data(sk, true)
+end
+
+
+local function read_reply(sk)
+ return read_data(sk, false)
+end
+
+
+local function handle_reply(session, sk)
+ local ok, err = read_reply(sk)
+ if not ok then
+ return nil, err
+ end
+
+ local ctx = sdk.get_req_ctx(session, 10)
+
+ return ctx
+end
+
+
+function _M.from_downstream(session, downstream)
+ local read_pipeline = false
+ session.req_id_seq = session.req_id_seq + 1
+ local ctx = sdk.get_req_ctx(session, session.req_id_seq)
+ session._downstream_ctx = ctx
+ while true do
+ local ok, err, pipelined = read_req(downstream)
+ if not ok then
+ if err ~= "timeout" and err ~= "closed" then
+ core.log.error("failed to read request: ", err)
+ end
+
+ if read_pipeline and err == "timeout" then
+ break
+ end
+
+ return DECLINED
+ end
+
+ if not pipelined then
+ break
+ end
+
+ if not read_pipeline then
+ read_pipeline = true
+ -- set minimal read timeout to read pipelined data
+ downstream:settimeouts(0, 0, 1)
+ end
+ end
+
+ if read_pipeline then
+ -- set timeout back
+ downstream:settimeouts(0, 0, 0)
+ end
+
+ return OK, ctx
+end
+
+
+function _M.connect_upstream(session, ctx)
+ local conf = session.upstream_conf
+ local nodes = conf.nodes
+ if #nodes == 0 then
+ core.log.error("failed to connect: no nodes")
+ return DECLINED
+ end
+
+ local node = nodes[math_random(#nodes)]
+ local sk = sdk.connect_upstream(node, conf)
+ if not sk then
+ return DECLINED
+ end
+
+ core.log.debug("dubbo_connect_upstream end")
+
+ return OK, sk
+end
+
+function _M.disconnect_upstream(session, upstream)
+ sdk.disconnect_upstream(upstream, session.upstream_conf)
+end
+
+
+function _M.to_upstream(session, ctx, downstream, upstream)
+ local ok, _ = upstream:move(downstream)
+ if not ok then
+ return DECLINED
+ end
+
+ return OK
+end
+
+
+function _M.from_upstream(session, downstream, upstream)
+ local ctx,err = handle_reply(session, upstream)
+ if err then
+ return DECLINED
+ end
+
+ local ok, _ = downstream:move(upstream)
+ if not ok then
+ return DECLINED
+ end
+
+ return DONE, ctx
+end
+
+
+function _M.log(_, _)
+end
+
+
+return _M
diff --git a/apisix/stream/xrpc/protocols/dubbo/schema.lua b/apisix/stream/xrpc/protocols/dubbo/schema.lua
new file mode 100644
index 000000000000..3a9d73325498
--- /dev/null
+++ b/apisix/stream/xrpc/protocols/dubbo/schema.lua
@@ -0,0 +1,32 @@
+--
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements. See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the "License"); you may not use this file except in compliance with
+-- the License. You may obtain a copy of the License at
+--
+-- http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an "AS IS" BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+--
+local core = require("apisix.core")
+
+
+local schema = {
+ type = "object",
+}
+
+local _M = {}
+
+
+function _M.check_schema(conf)
+ return core.schema.check(schema, conf)
+end
+
+
+return _M
diff --git a/apisix/utils/log-util.lua b/apisix/utils/log-util.lua
index 4aecf290e4ee..8e9cb9c01361 100644
--- a/apisix/utils/log-util.lua
+++ b/apisix/utils/log-util.lua
@@ -210,7 +210,27 @@ function _M.inject_get_full_log(f)
end
+local function is_match(match, ctx)
+ local match_result
+ for _, m in pairs(match) do
+ local expr, _ = expr.new(m)
+ match_result = expr:eval(ctx.var)
+ if match_result then
+ break
+ end
+ end
+
+ return match_result
+end
+
+
function _M.get_log_entry(plugin_name, conf, ctx)
+ -- If the "match" configuration is set and the matching conditions are not met,
+ -- then do not log the message.
+ if conf.match and not is_match(conf.match, ctx) then
+ return
+ end
+
local metadata = plugin.plugin_metadata(plugin_name)
core.log.info("metadata: ", core.json.delay_encode(metadata))
diff --git a/apisix/plugins/slslog/rfc5424.lua b/apisix/utils/rfc5424.lua
similarity index 83%
rename from apisix/plugins/slslog/rfc5424.lua
rename to apisix/utils/rfc5424.lua
index 5d09a58a5165..e046194bf349 100644
--- a/apisix/plugins/slslog/rfc5424.lua
+++ b/apisix/utils/rfc5424.lua
@@ -79,12 +79,13 @@ local Severity = {
}
local log_util = require("apisix.utils.log-util")
-
+local ipairs = ipairs
+local str_format = string.format
local _M = { version = 0.1 }
-function _M.encode(facility, severity, hostname, appname, pid, project,
- logstore, access_key_id, access_key_secret, msg)
+
+function _M.encode(facility, severity, hostname, appname, pid, msg, structured_data)
local pri = (Facility[facility] * 8 + Severity[severity])
local t = log_util.get_rfc3339_zulu_timestamp()
if not hostname then
@@ -95,10 +96,19 @@ function _M.encode(facility, severity, hostname, appname, pid, project,
appname = "-"
end
- return "<" .. pri .. ">1 " .. t .. " " .. hostname .. " " .. appname .. " " .. pid
- .. " - [logservice project=\"" .. project .. "\" logstore=\"" .. logstore
- .. "\" access-key-id=\"" .. access_key_id .. "\" access-key-secret=\""
- .. access_key_secret .. "\"] " .. msg .. "\n"
+ local structured_data_str = "-"
+
+ if structured_data then
+ structured_data_str = "[logservice"
+ for _, sd_param in ipairs(structured_data) do
+ structured_data_str = structured_data_str .. " " .. sd_param.name
+ .. "=\"" .. sd_param.value .. "\""
+ end
+ structured_data_str = structured_data_str .. "]"
+ end
+
+ return str_format("<%d>1 %s %s %s %d - %s %s\n", pri, t, hostname,
+ appname, pid, structured_data_str, msg)
end
return _M
diff --git a/apisix/wasm.lua b/apisix/wasm.lua
index a27641504a2b..c8b863aeac9d 100644
--- a/apisix/wasm.lua
+++ b/apisix/wasm.lua
@@ -15,6 +15,7 @@
-- limitations under the License.
--
local core = require("apisix.core")
+local type = type
local support_wasm, wasm = pcall(require, "resty.proxy-wasm")
local ngx_var = ngx.var
@@ -23,8 +24,10 @@ local schema = {
type = "object",
properties = {
conf = {
- type = "string",
- minLength = 1,
+ oneOf = {
+ { type = "object", minProperties = 1},
+ { type = "string", minLength = 1},
+ }
},
},
required = {"conf"}
@@ -51,7 +54,13 @@ local function fetch_plugin_ctx(conf, ctx, plugin)
local plugin_ctx = ctxs[key]
local err
if not plugin_ctx then
- plugin_ctx, err = wasm.on_configure(plugin, conf.conf)
+ if type(conf.conf) == "table" then
+ plugin_ctx, err = wasm.on_configure(plugin, core.json.encode(conf.conf))
+ elseif type(conf.conf) == "string" then
+ plugin_ctx, err = wasm.on_configure(plugin, conf.conf)
+ else
+ return nil, "invalid conf type"
+ end
if not plugin_ctx then
return nil, err
end
diff --git a/benchmark/fake-apisix/conf/nginx.conf b/benchmark/fake-apisix/conf/nginx.conf
index 9ae60d248141..f35131cbe2ea 100644
--- a/benchmark/fake-apisix/conf/nginx.conf
+++ b/benchmark/fake-apisix/conf/nginx.conf
@@ -103,25 +103,11 @@ http {
### the following x-forwarded-* headers is to send to upstream server
- set $var_x_forwarded_for $remote_addr;
set $var_x_forwarded_proto $scheme;
set $var_x_forwarded_host $host;
set $var_x_forwarded_port $server_port;
- if ($http_x_forwarded_for != "") {
- set $var_x_forwarded_for "${http_x_forwarded_for}, ${realip_remote_addr}";
- }
- if ($http_x_forwarded_proto != "") {
- set $var_x_forwarded_proto $http_x_forwarded_proto;
- }
- if ($http_x_forwarded_host != "") {
- set $var_x_forwarded_host $http_x_forwarded_host;
- }
- if ($http_x_forwarded_port != "") {
- set $var_x_forwarded_port $http_x_forwarded_port;
- }
-
- proxy_set_header X-Forwarded-For $var_x_forwarded_for;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $var_x_forwarded_proto;
proxy_set_header X-Forwarded-Host $var_x_forwarded_host;
proxy_set_header X-Forwarded-Port $var_x_forwarded_port;
diff --git a/ci/centos7-ci.sh b/ci/centos7-ci.sh
index 251dd33639b3..cf2d0387bbc2 100755
--- a/ci/centos7-ci.sh
+++ b/ci/centos7-ci.sh
@@ -33,7 +33,7 @@ install_dependencies() {
# install openresty to make apisix's rpm test work
yum install -y yum-utils && yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo
- yum install -y openresty openresty-debug openresty-openssl111-debug-devel pcre pcre-devel
+ yum install -y openresty-1.21.4.2 openresty-debug-1.21.4.2 openresty-openssl111-debug-devel pcre pcre-devel
# install luarocks
./utils/linux-install-luarocks.sh
@@ -73,9 +73,6 @@ install_dependencies() {
# install nodejs
install_nodejs
- # install rust
- install_rust
-
# grpc-web server && client
cd t/plugin/grpc-web
./setup.sh
diff --git a/ci/common.sh b/ci/common.sh
index 509647b001d2..e1961fc6b01e 100644
--- a/ci/common.sh
+++ b/ci/common.sh
@@ -21,6 +21,7 @@ export_or_prefix() {
export OPENRESTY_PREFIX="/usr/local/openresty-debug"
export APISIX_MAIN="https://raw.githubusercontent.com/apache/incubator-apisix/master/rockspec/apisix-master-0.rockspec"
export PATH=$OPENRESTY_PREFIX/nginx/sbin:$OPENRESTY_PREFIX/luajit/bin:$OPENRESTY_PREFIX/bin:$PATH
+ export OPENSSL111_BIN=$OPENRESTY_PREFIX/openssl111/bin/openssl
}
create_lua_deps() {
@@ -99,11 +100,6 @@ install_nodejs () {
npm config set registry https://registry.npmjs.org/
}
-install_rust () {
- curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sudo sh -s -- -y
- source "$HOME/.cargo/env"
-}
-
set_coredns() {
# test a domain name is configured as upstream
echo "127.0.0.1 test.com" | sudo tee -a /etc/hosts
diff --git a/ci/init-plugin-test-service.sh b/ci/init-plugin-test-service.sh
index 881f466c710d..d670b7fa39d4 100755
--- a/ci/init-plugin-test-service.sh
+++ b/ci/init-plugin-test-service.sh
@@ -22,8 +22,8 @@ after() {
docker exec -i apache-apisix_kafka-server2_1 /opt/bitnami/kafka/bin/kafka-topics.sh --create --zookeeper zookeeper-server2:2181 --replication-factor 1 --partitions 1 --topic test4
# prepare openwhisk env
- docker pull openwhisk/action-nodejs-v14:nightly
- docker run --rm -d --name openwhisk -p 3233:3233 -p 3232:3232 -v /var/run/docker.sock:/var/run/docker.sock openwhisk/standalone:nightly
+ docker pull openwhisk/action-nodejs-v14:1.20.0
+ docker run --rm -d --name openwhisk -p 3233:3233 -p 3232:3232 -v /var/run/docker.sock:/var/run/docker.sock openwhisk/standalone:1.0.0
docker exec -i openwhisk waitready
docker exec -i openwhisk bash -c "wsk package create pkg"
docker exec -i openwhisk bash -c "wsk action update /guest/pkg/testpkg <(echo 'function main(args){return {\"hello\": \"world\"}}') --kind nodejs:14"
@@ -42,8 +42,18 @@ after() {
# wait for keycloak ready
bash -c 'while true; do curl -s localhost:8080 &>/dev/null; ret=$?; [[ $ret -eq 0 ]] && break; sleep 3; done'
- docker cp ci/kcadm_configure_cas.sh apisix_keycloak_new:/tmp/
- docker exec apisix_keycloak_new bash /tmp/kcadm_configure_cas.sh
+
+ # install jq
+ wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 -O jq
+ chmod +x jq
+ docker cp jq apisix_keycloak:/usr/bin/
+
+ # configure keycloak
+ docker exec apisix_keycloak bash /tmp/kcadm_configure_cas.sh
+ docker exec apisix_keycloak bash /tmp/kcadm_configure_university.sh
+
+ echo 'CREATE TABLE default.test (`host` String, `client_ip` String, `route_id` String, `service_id` String, `@timestamp` String, PRIMARY KEY(`@timestamp`)) ENGINE = MergeTree()' | curl 'http://localhost:8123/' --data-binary @-
+ echo 'CREATE TABLE default.test (`host` String, `client_ip` String, `route_id` String, `service_id` String, `@timestamp` String, PRIMARY KEY(`@timestamp`)) ENGINE = MergeTree()' | curl 'http://localhost:8124/' --data-binary @-
}
before() {
diff --git a/ci/linux-install-openresty.sh b/ci/linux-install-openresty.sh
index 3bc2d18ab35e..8d4b6f87224a 100755
--- a/ci/linux-install-openresty.sh
+++ b/ci/linux-install-openresty.sh
@@ -38,6 +38,13 @@ OPENSSL3_PREFIX=${OPENSSL3_PREFIX-/home/runner}
SSL_LIB_VERSION=${SSL_LIB_VERSION-openssl}
if [ "$OPENRESTY_VERSION" == "source" ]; then
+ export openssl_prefix=/usr/local/openresty/openssl111
+ export zlib_prefix=/usr/local/openresty/zlib
+ export pcre_prefix=/usr/local/openresty/pcre
+
+ export cc_opt="-DNGX_LUA_ABORT_AT_PANIC -I${zlib_prefix}/include -I${pcre_prefix}/include -I${openssl_prefix}/include"
+ export ld_opt="-L${zlib_prefix}/lib -L${pcre_prefix}/lib -L${openssl_prefix}/lib -Wl,-rpath,${zlib_prefix}/lib:${pcre_prefix}/lib:${openssl_prefix}/lib"
+
if [ "$COMPILE_OPENSSL3" == "yes" ]; then
apt install -y build-essential
git clone https://github.com/openssl/openssl
@@ -72,7 +79,8 @@ if [ "$OPENRESTY_VERSION" == "source" ]; then
chmod +x build-apisix-base.sh
./build-apisix-base.sh latest
- sudo apt-get install openresty-openssl111-debug-dev
+ sudo apt-get install -y openresty-openssl111 openresty-openssl111-debug-dev libldap2-dev openresty-pcre openresty-zlib
+
exit 0
fi
diff --git a/ci/linux_apisix_current_luarocks_runner.sh b/ci/linux_apisix_current_luarocks_runner.sh
index a8836f43b691..96aac508f762 100755
--- a/ci/linux_apisix_current_luarocks_runner.sh
+++ b/ci/linux_apisix_current_luarocks_runner.sh
@@ -34,9 +34,6 @@ script() {
sudo rm -rf /usr/local/share/lua/5.1/apisix
- # install rust
- install_rust
-
# install APISIX with local version
luarocks install rockspec/apisix-master-0.rockspec --only-deps > build.log 2>&1 || (cat build.log && exit 1)
luarocks make rockspec/apisix-master-0.rockspec > build.log 2>&1 || (cat build.log && exit 1)
diff --git a/ci/linux_apisix_master_luarocks_runner.sh b/ci/linux_apisix_master_luarocks_runner.sh
index 8931ad82c5f5..afc487ddd160 100755
--- a/ci/linux_apisix_master_luarocks_runner.sh
+++ b/ci/linux_apisix_master_luarocks_runner.sh
@@ -38,9 +38,6 @@ script() {
mkdir tmp && cd tmp
cp -r ../utils ./
- # install rust
- install_rust
-
# install APISIX by luarocks
luarocks install $APISIX_MAIN > build.log 2>&1 || (cat build.log && exit 1)
cp ../bin/apisix /usr/local/bin/apisix
@@ -52,6 +49,13 @@ script() {
sudo PATH=$PATH apisix init
sudo PATH=$PATH apisix start
sudo PATH=$PATH apisix quit
+ for i in {1..10}
+ do
+ if [ ! -f /usr/local/apisix/logs/nginx.pid ];then
+ break
+ fi
+ sleep 0.3
+ done
sudo PATH=$PATH apisix start
sudo PATH=$PATH apisix stop
diff --git a/ci/linux_openresty_common_runner.sh b/ci/linux_openresty_common_runner.sh
index c96f8f6c383d..e4ed00905c86 100755
--- a/ci/linux_openresty_common_runner.sh
+++ b/ci/linux_openresty_common_runner.sh
@@ -33,9 +33,6 @@ do_install() {
./ci/linux-install-etcd-client.sh
- # install rust
- install_rust
-
create_lua_deps
# sudo apt-get install tree -y
@@ -71,6 +68,8 @@ script() {
export_or_prefix
openresty -V
+ make init
+
set_coredns
./t/grpc_server_example/grpc_server_example \
diff --git a/ci/linux_openresty_runner.sh b/ci/linux_openresty_runner.sh
index 2cdc87b218f3..2e39224efc59 100755
--- a/ci/linux_openresty_runner.sh
+++ b/ci/linux_openresty_runner.sh
@@ -18,5 +18,4 @@
export OPENRESTY_VERSION=source
-export TEST_CI_USE_GRPC=true
. ./ci/linux_openresty_common_runner.sh
diff --git a/ci/pod/docker-compose.plugin.yml b/ci/pod/docker-compose.plugin.yml
index e7878ab61b81..748b28b868f6 100644
--- a/ci/pod/docker-compose.plugin.yml
+++ b/ci/pod/docker-compose.plugin.yml
@@ -23,36 +23,23 @@ services:
# The latest image is the latest stable version
image: redis:latest
restart: unless-stopped
+ volumes:
+ - ./t/certs:/certs
+ command: "--tls-port 6380 \
+ --tls-cert-file /certs/mtls_server.crt \
+ --tls-key-file /certs/mtls_server.key \
+ --tls-ca-cert-file /certs/mtls_ca.crt \
+ --tls-auth-clients no \
+ --user alice on +@all ~* \\&* \\>somepassword"
ports:
- "6379:6379"
+ - "6380:6380"
networks:
apisix_net:
-
## keycloak
apisix_keycloak:
- image: sshniro/keycloak-apisix:1.0.0
- environment:
- KEYCLOAK_USER: admin
- KEYCLOAK_PASSWORD: 123456
- restart: unless-stopped
- ports:
- - "8090:8080"
- - "8443:8443"
- networks:
- apisix_net:
-
- ## keycloak
- # The keycloak official has two types of docker images:
- # * legacy WildFly distribution
- # * new Quarkus based distribution
- # Here we choose new version, because it's mainstream and
- # supports kcadm.sh to init the container for test.
- # The original keycloak service `apisix_keycloak` is
- # third-party personal customized image and for OIDC test only.
- # We should unify both containers in future.
- apisix_keycloak_new:
- container_name: apisix_keycloak_new
+ container_name: apisix_keycloak
image: quay.io/keycloak/keycloak:18.0.2
# use host network because in CAS auth,
# keycloak needs to send back-channel POST to apisix.
@@ -60,10 +47,16 @@ services:
environment:
KEYCLOAK_ADMIN: admin
KEYCLOAK_ADMIN_PASSWORD: admin
+ KC_HTTPS_CERTIFICATE_FILE: /opt/keycloak/conf/server.crt.pem
+ KC_HTTPS_CERTIFICATE_KEY_FILE: /opt/keycloak/conf/server.key.pem
restart: unless-stopped
- command: ["start-dev", "--http-port 8080"]
+ command: ["start-dev"]
volumes:
- /opt/keycloak-protocol-cas-18.0.2.jar:/opt/keycloak/providers/keycloak-protocol-cas-18.0.2.jar
+ - ./ci/pod/keycloak/server.crt.pem:/opt/keycloak/conf/server.crt.pem
+ - ./ci/pod/keycloak/server.key.pem:/opt/keycloak/conf/server.key.pem
+ - ./ci/pod/keycloak/kcadm_configure_cas.sh:/tmp/kcadm_configure_cas.sh
+ - ./ci/pod/keycloak/kcadm_configure_university.sh:/tmp/kcadm_configure_university.sh
## kafka-cluster
zookeeper-server1:
@@ -150,6 +143,15 @@ services:
- ./t/certs:/certs
+ ## Grafana Loki
+ loki:
+ image: grafana/loki:2.8.0
+ command: -config.file=/etc/loki/local-config.yaml -auth.enabled -querier.multi-tenant-queries-enabled
+ ports:
+ - "3100:3100"
+ networks:
+ - loki_net
+
rocketmq_namesrv:
image: apacherocketmq/rocketmq:4.6.0
container_name: rmqnamesrv
@@ -194,18 +196,6 @@ services:
networks:
opa_net:
- # Splunk HEC Logging Service
- splunk:
- image: splunk/splunk:8.2.3
- restart: unless-stopped
- ports:
- - "18088:8088"
- environment:
- SPLUNK_PASSWORD: "ApacheAPISIX@666"
- SPLUNK_START_ARGS: "--accept-license"
- SPLUNK_HEC_TOKEN: "BD274822-96AA-4DA6-90EC-18940FB2414C"
- SPLUNK_HEC_SSL: "False"
-
# Elasticsearch Logger Service
elasticsearch-noauth:
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.1
@@ -319,9 +309,52 @@ services:
ports:
- '8888:8080'
+ vector:
+ image: timberio/vector:0.29.1-debian
+ container_name: vector
+ volumes:
+ - ./ci/pod/vector:/etc/vector/
+ - ./t/certs:/certs
+ ports:
+ - '3000:3000' #tcp logger
+ - '8127:8127/udp'
+ - '43000:43000'
+ - '5140:5140'
+ - "18088:18088" # For splunk logging tests
+ - '5150:5150/udp'
+ - "3001:3001" #http logger
+ networks:
+ vector_net:
+
+ clickhouse:
+ image: clickhouse/clickhouse-server:23.4.2-alpine
+ container_name: clickhouse
+ ports:
+ - '8123:8123'
+ networks:
+ clickhouse_net:
+
+ clickhouse2:
+ image: clickhouse/clickhouse-server:23.4.2-alpine
+ container_name: clickhouse2
+ ports:
+ - '8124:8123'
+ networks:
+ clickhouse_net:
+ otel-collector:
+ image: otel/opentelemetry-collector-contrib
+ volumes:
+ - ./ci/pod/otelcol-contrib:/etc/otelcol-contrib:rw
+ ports:
+ - '4318:4318'
+
+
networks:
apisix_net:
kafka_net:
skywalk_net:
rocketmq_net:
opa_net:
+ vector_net:
+ clickhouse_net:
+ loki_net:
diff --git a/ci/kcadm_configure_cas.sh b/ci/pod/keycloak/kcadm_configure_cas.sh
similarity index 100%
rename from ci/kcadm_configure_cas.sh
rename to ci/pod/keycloak/kcadm_configure_cas.sh
diff --git a/ci/pod/keycloak/kcadm_configure_university.sh b/ci/pod/keycloak/kcadm_configure_university.sh
new file mode 100644
index 000000000000..162b624f8ab6
--- /dev/null
+++ b/ci/pod/keycloak/kcadm_configure_university.sh
@@ -0,0 +1,90 @@
+#!/usr/bin/env bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+export PATH=/opt/keycloak/bin:$PATH
+
+kcadm.sh config credentials --server http://localhost:8080 --realm master --user admin --password admin
+
+# create realm University
+kcadm.sh create realms -s realm=University -s enabled=true
+
+# create roles `Teacher, Student`
+kcadm.sh create roles -r University -s name=Teacher
+kcadm.sh create roles -r University -s name=Student
+
+# create users `teacher@gmail.com, student@gmail.com`
+kcadm.sh create users -r University -s username=teacher@gmail.com -s enabled=true
+kcadm.sh create users -r University -s username=student@gmail.com -s enabled=true
+
+# set password
+kcadm.sh set-password -r University --username teacher@gmail.com --new-password 123456
+kcadm.sh set-password -r University --username student@gmail.com --new-password 123456
+
+# bind roles to users
+kcadm.sh add-roles -r University --uusername teacher@gmail.com --rolename Teacher
+kcadm.sh add-roles -r University --uusername student@gmail.com --rolename Student
+
+# create client course_management
+kcadm.sh create clients -r University -s clientId=course_management -s enabled=true -s clientAuthenticatorType=client-secret -s secret=d1ec69e9-55d2-4109-a3ea-befa071579d5
+
+client_id=$(kcadm.sh get clients -r University --fields id,clientId 2>/dev/null | jq -r '.[] | select(.clientId=='\"course_management\"') | .id')
+teacher_id=$(kcadm.sh get roles -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"Teacher\"') | .id')
+student_id=$(kcadm.sh get roles -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"Student\"') | .id')
+
+# update client course_management
+kcadm.sh update clients/${client_id} -r University -s protocol=openid-connect -s standardFlowEnabled=true \
+ -s implicitFlowEnabled=true -s directAccessGrantsEnabled=true -s serviceAccountsEnabled=true \
+ -s authorizationServicesEnabled=true -s 'redirectUris=["*"]' -s 'webOrigins=["*"]'
+
+kcadm.sh update clients/${client_id}/authz/resource-server -r University -s allowRemoteResourceManagement=false -s policyEnforcementMode="ENFORCING"
+
+# create authz-resource with name `course_resource`, uri `/course/*`, scope `DELETE, delete, view, GET`
+kcadm.sh create clients/${client_id}/authz/resource-server/resource -r University -s name=course_resource \
+ -s ownerManagedAccess=false -s uris='["/course/*"]' -s scopes='[{"name": "DELETE"},{"name": "view"},{"name": "GET"},{"name": "delete"}]'
+
+course_resource_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/resource -r University --fields _id,name 2>/dev/null | jq -r '.[] | select(.name=='\"course_resource\"') | ._id')
+DELETE_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"DELETE\"') | .id')
+delete_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"delete\"') | .id')
+GET_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"GET\"') | .id')
+view_scope_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/scope -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"view\"') | .id')
+
+# create authz-policy `AllowTeacherPolicy, AllowStudentPolicy`
+kcadm.sh create clients/${client_id}/authz/resource-server/policy/role -r University \
+ -s name="AllowTeacherPolicy" -s logic="POSITIVE" -s decisionStrategy="UNANIMOUS" \
+ -s roles='[{"id": '\"${teacher_id}\"'}]'
+
+kcadm.sh create clients/${client_id}/authz/resource-server/policy/role -r University \
+ -s name="AllowStudentPolicy" -s logic="POSITIVE" -s decisionStrategy="UNANIMOUS" \
+ -s roles='[{"id": '\"${student_id}\"'}]'
+
+allow_teacher_policy_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/policy -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"AllowTeacherPolicy\"') | .id')
+allow_student_policy_id=$(kcadm.sh get clients/${client_id}/authz/resource-server/policy -r University --fields id,name 2>/dev/null | jq -r '.[] | select(.name=='\"AllowStudentPolicy\"') | .id')
+
+# create authz-permission `Delete Course Permission` and `View Course Permission`
+kcadm.sh create clients/${client_id}/authz/resource-server/permission/scope -r University \
+ -s name="Delete Course Permission" -s logic="POSITIVE" -s decisionStrategy="UNANIMOUS" \
+ -s policies='['\"${allow_teacher_policy_id}\"']' \
+ -s scopes='['\"${DELETE_scope_id}\"', '\"${delete_scope_id}\"']' \
+ -s resources='['\"${course_resource_id}\"']'
+
+kcadm.sh create clients/${client_id}/authz/resource-server/permission/scope -r University \
+ -s name="View Course Permission" -s logic="POSITIVE" -s decisionStrategy="AFFIRMATIVE" \
+ -s policies='['\"${allow_teacher_policy_id}\"', '\"${allow_student_policy_id}\"']' \
+ -s scopes='['\"${GET_scope_id}\"', '\"${view_scope_id}\"']' \
+ -s resources='['\"${course_resource_id}\"']'
diff --git a/ci/pod/keycloak/server.crt.pem b/ci/pod/keycloak/server.crt.pem
new file mode 100644
index 000000000000..9c7bde3bb33a
--- /dev/null
+++ b/ci/pod/keycloak/server.crt.pem
@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDazCCAlOgAwIBAgIUbZfnhty/ZiHPz5Aq8kK5Kr8kcSQwDQYJKoZIhvcNAQEL
+BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
+GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA0MTgxMTQzNDJaFw0zMzA0
+MTUxMTQzNDJaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
+HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQC/F4wK7eMTVAKGDMLCXE+Y6REdA5GU6/AakJf3NEKQ
+wCrtrqO+VBPIz445+edf3EEXhjFFGPdU6p0EkF0SMLaMsVBQQJ2qcP6FloIYiyT3
+WCs/gbtdoWq53ucAfWueIyHWsovLc0VhOXm0rhTYg88nMjJ7y6vYkfLMT6qlwASn
+9Tozgjat09fWATbN7yBi4ivVVsKDo2S3jkOyVnYYMjzZO3CSkyUSMl+ZsSesseSK
+A9c2zogfKIU833njraA8blMFfdinEMI/9yceEx57IUjnpY1iWHLSItiZF+LKEpeL
+vp9gpr88ghR85ISusqAqwcmnsdAqjjw7gbPm1DIvUgVBAgMBAAGjUzBRMB0GA1Ud
+DgQWBBRvlz5ZiE2fD9ikPRqpYwsVrxZfxTAfBgNVHSMEGDAWgBRvlz5ZiE2fD9ik
+PRqpYwsVrxZfxTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCX
+5fOeFnX67eHI5dJB8p3U2GS21qykDVLV5ZV+JZfZwXJEygIvr/T9vs772EPxv+0/
+TO0+pGdcVswXq/6BoUFCV0rWWTDP5wTS3sV1ZsSSHil5zEutXuAI1LQGlit6w5xn
+iDURFZw3ZmOFytXKXNbca1ma4yaCZtOwVe3O36GZeOiZFzBYE2DELqy77Nz1E5+3
+jZaDnx0vonV8/hhX6FAPRPQnIXkaEH3BnVQZGD1jxipbFQQtmeeNPELy18MQo30N
+W1wOsbMMouniKUjdT16tdtzJzC+l9pVqRC+8df5PJfN56Uv9Ed6pjytkSF1SvHyJ
+iTWmyxJL9AonUkc5Oiri
+-----END CERTIFICATE-----
diff --git a/ci/pod/keycloak/server.key.pem b/ci/pod/keycloak/server.key.pem
new file mode 100755
index 000000000000..f2bc2d942856
--- /dev/null
+++ b/ci/pod/keycloak/server.key.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC/F4wK7eMTVAKG
+DMLCXE+Y6REdA5GU6/AakJf3NEKQwCrtrqO+VBPIz445+edf3EEXhjFFGPdU6p0E
+kF0SMLaMsVBQQJ2qcP6FloIYiyT3WCs/gbtdoWq53ucAfWueIyHWsovLc0VhOXm0
+rhTYg88nMjJ7y6vYkfLMT6qlwASn9Tozgjat09fWATbN7yBi4ivVVsKDo2S3jkOy
+VnYYMjzZO3CSkyUSMl+ZsSesseSKA9c2zogfKIU833njraA8blMFfdinEMI/9yce
+Ex57IUjnpY1iWHLSItiZF+LKEpeLvp9gpr88ghR85ISusqAqwcmnsdAqjjw7gbPm
+1DIvUgVBAgMBAAECggEBAKUrrkGYI2mGePPzPbiP38E02zTv67sEQLJFfwUOp+bE
+I5b0F9agh8VQGghkyKgkEiNKO3YVQVuluvjB66CYeIGdleT4JQ+4wVcoo+ShCN++
+1wr6kMA6kKx+Tb8vqYCzr0ELbSf6x+Jksp0Ixz3qmHixu88jWbNFW89boQ3JrnyZ
+TUgRSRdPoXcxspwcbhy6mMhwUfUSy8Zcck81dBEAjokvzbYh4jtFYMipWqro66KJ
+B9uqQme2J/rN/2PSrA6chI85Wa+JaGOSPDaGNp+DrADjoVZf1tXgzGCsA/lmVtQ0
+8YN4Dh21EjLxz4Dj5GE7RWET4Ejvv1XEih1p+zKne00CgYEA327raCD5Fnr1nGTb
+Q4ZWkcDR6EGSD6JGD0ur+UqqJhirM/5b4iGcsVK5uufb5dwk9+9z0EucXOVq/il0
+vgG2FbgRYM8kx3CDLvMYAqKJ8e5NsGJWwJVq6DsmsO1SaEId+SVFH83RHfG5/ksq
+/DgRg0Wl9FoL7sHchuSIP2QiLrMCgYEA2vHcKsMZk/KGMBHVffY3PUckirIM6vLa
+idMmm0T0HSAdviZRxQGyOnjd93ZhMqFJPTrmHOq0uAxfdFt+oRoHk/pGarBCv76L
+NnPrSnVe1pJOh7Mm7LHLgrAgeM2WW7xz6jZwc8On+9qHK97I/wAnJB8J7DvQJ2hR
+sWCDSbfKtjsCgYEAnVE77tVIjMuGo9dfiuvLiFR7d0yzys43Bg4ByEUKCEjWQoWV
+rGJ+MVxN6YvXCME4RloS8VZLgh0GeG44BJCv5Br2IXO4MbTGqQgAn9pRxkZD7S1Q
+Z8jMvTboxypSG5ZyBDp5sSr5Ulwg2SuT2IKh0gv4DVRZkoJtA41lYTzf1IECgYBd
+3NJGgt20T4S3lu2v0p5b5uQDkdF36CVIcP1cE3OUCPC3VDY5/0ApUSfXryh8TCjZ
+1yZPv086mBNUDuV6q24UQndtxaLYERgdgBSfFzJRSuffxS4qyw40OM2y/HA5Y9FN
+14jeGEMr9cN9S0VgDPC6y5O1cu8J9e8P3BBsyh5dgQKBgHMlIhOJDO/neVnax79X
+d3+5GaiggUnkd27OkYC4LhXEc/QWeHE0ByA0bDhhnsE7IVK2CVC18axOLmEJVy2g
+F6ZtxcpNrlVtF4YaOiRVUcDNnz9gX48efrpdoX2iBSFEd1NRDo/bjkVXI1L08LNf
+BbMB104PadChoGpl5R3NQQsP
+-----END PRIVATE KEY-----
diff --git a/ci/pod/opa/example.rego b/ci/pod/opa/example.rego
index 2eb912e08c52..a9161042b887 100644
--- a/ci/pod/opa/example.rego
+++ b/ci/pod/opa/example.rego
@@ -29,6 +29,11 @@ allow {
request.query["user"]
}
+allow {
+ request.method == "GET"
+ startswith(request.path, "/echo")
+}
+
reason = users[request.query["user"]].reason {
not allow
request.query["user"]
@@ -39,6 +44,11 @@ headers = users[request.query["user"]].headers {
request.query["user"]
}
+headers = {"user": request.query["user"]} {
+ allow
+ request.query["user"]
+}
+
status_code = users[request.query["user"]].status_code {
not allow
request.query["user"]
diff --git a/ci/pod/openfunction/build-function-image.sh b/ci/pod/openfunction/build-function-image.sh
index fc4a4945ed3e..dc9f34a8b15b 100755
--- a/ci/pod/openfunction/build-function-image.sh
+++ b/ci/pod/openfunction/build-function-image.sh
@@ -23,6 +23,6 @@ if [ ! -f "./pack" ]; then
fi
# please update function-example/*/hello.go if you want to update function
-./pack build test-uri-image --path ./ci/pod/openfunction/function-example/test-uri --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn"
-./pack build test-body-image --path ./ci/pod/openfunction/function-example/test-body --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn"
-./pack build test-header-image --path ./ci/pod/openfunction/function-example/test-header --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://goproxy.cn"
+./pack build test-uri-image --path ./ci/pod/openfunction/function-example/test-uri --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://proxy.golang.org"
+./pack build test-body-image --path ./ci/pod/openfunction/function-example/test-body --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://proxy.golang.org"
+./pack build test-header-image --path ./ci/pod/openfunction/function-example/test-header --builder openfunction/builder-go:v2.4.0-1.17 --env FUNC_NAME="HelloWorld" --env FUNC_CLEAR_SOURCE=true --env FUNC_GOPROXY="https://proxy.golang.org"
diff --git a/ci/pod/openfunction/function-example/test-body/go.mod b/ci/pod/openfunction/function-example/test-body/go.mod
index 9b9c54475baa..3e2f6155748e 100644
--- a/ci/pod/openfunction/function-example/test-body/go.mod
+++ b/ci/pod/openfunction/function-example/test-body/go.mod
@@ -20,12 +20,12 @@ require (
go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.19.1 // indirect
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect
- golang.org/x/sys v0.1.0 // indirect
+ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect
google.golang.org/grpc v1.40.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
- gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+ gopkg.in/yaml.v3 v3.0.0 // indirect
k8s.io/klog/v2 v2.30.0 // indirect
skywalking.apache.org/repo/goapi v0.0.0-20220401015832-2c9eee9481eb // indirect
)
diff --git a/ci/pod/openfunction/function-example/test-body/go.sum b/ci/pod/openfunction/function-example/test-body/go.sum
index 149c3a4469d0..35f77fd70a02 100644
--- a/ci/pod/openfunction/function-example/test-body/go.sum
+++ b/ci/pod/openfunction/function-example/test-body/go.sum
@@ -1398,9 +1398,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1696,8 +1695,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v0.0.0-20181223230014-1083505acf35/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
diff --git a/ci/pod/openfunction/function-example/test-uri/go.mod b/ci/pod/openfunction/function-example/test-uri/go.mod
index 7d7fb3203cc5..94075e599c8f 100644
--- a/ci/pod/openfunction/function-example/test-uri/go.mod
+++ b/ci/pod/openfunction/function-example/test-uri/go.mod
@@ -20,9 +20,9 @@ require (
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.7.0 // indirect
go.uber.org/zap v1.21.0 // indirect
- golang.org/x/net v0.7.0 // indirect
- golang.org/x/sys v0.5.0 // indirect
- golang.org/x/text v0.7.0 // indirect
+ golang.org/x/net v0.0.0-20220621193019-9d032be2e588 // indirect
+ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
+ golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20220622171453-ea41d75dfa0f // indirect
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
diff --git a/ci/pod/openfunction/function-example/test-uri/go.sum b/ci/pod/openfunction/function-example/test-uri/go.sum
index 28f9e57c8fbe..d7837ccf355c 100644
--- a/ci/pod/openfunction/function-example/test-uri/go.sum
+++ b/ci/pod/openfunction/function-example/test-uri/go.sum
@@ -1656,7 +1656,6 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
github.com/yuin/gopher-lua v0.0.0-20200603152657-dc2b0ca8b37e/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
@@ -1856,7 +1855,6 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
-golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1941,10 +1939,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220621193019-9d032be2e588 h1:9ubFuySsnAJYGyJrZ3koiEv8FyqofCBdz3G9Mbf2YFc=
golang.org/x/net v0.0.0-20220621193019-9d032be2e588/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
-golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1977,7 +1973,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180828065106-d99a578cf41b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -2129,16 +2124,13 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2148,9 +2140,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
-golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -2247,7 +2238,6 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
-golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/ci/linux_openresty_1_19_runner.sh b/ci/pod/otelcol-contrib/config.yaml
old mode 100755
new mode 100644
similarity index 79%
rename from ci/linux_openresty_1_19_runner.sh
rename to ci/pod/otelcol-contrib/config.yaml
index ed1751308926..438f04c8b9fe
--- a/ci/linux_openresty_1_19_runner.sh
+++ b/ci/pod/otelcol-contrib/config.yaml
@@ -1,4 +1,3 @@
-#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
@@ -16,6 +15,16 @@
# limitations under the License.
#
-
-export OPENRESTY_VERSION=1.19.3.2
-. ./ci/linux_openresty_common_runner.sh
+receivers:
+ otlp:
+ protocols:
+ grpc:
+ http:
+exporters:
+ file:
+ path: /etc/otelcol-contrib/data-otlp.json
+service:
+ pipelines:
+ traces:
+ receivers: [otlp]
+ exporters: [file]
diff --git a/ci/pod/vector/vector.toml b/ci/pod/vector/vector.toml
new file mode 100644
index 000000000000..26716bc3893b
--- /dev/null
+++ b/ci/pod/vector/vector.toml
@@ -0,0 +1,111 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+[sources.log-from-tcp]
+type = "socket"
+address = "0.0.0.0:3000"
+host_key = "host"
+mode = "tcp"
+port_key = "port"
+shutdown_timeout_secs = 30
+socket_file_mode = 511
+
+[sources.log-from-http]
+type = "http_server"
+address = "0.0.0.0:3001"
+
+[sources.log-from-udp]
+type = "socket"
+address = "0.0.0.0:8127"
+host_key = "host"
+mode = "udp"
+port_key = "port"
+
+[sources.log-from-tls]
+type = "socket"
+address = "0.0.0.0:43000"
+host_key = "host"
+mode = "tcp"
+port_key = "port"
+tls.enabled = true
+tls.verify = true
+tls.ca_file = "/certs/vector_logs_ca.crt"
+tls.crt_file = "/certs/vector_logs_server.crt"
+tls.key_file = "/certs/vector_logs_server.key"
+
+[sources.log-from-syslog-tcp]
+type = "syslog"
+address = "0.0.0.0:5140"
+mode = "tcp"
+
+[sources.log-from-syslog-udp]
+type = "syslog"
+address = "0.0.0.0:5150"
+mode = "udp"
+
+[sources.log-from-splunk]
+type = "splunk_hec"
+address = "0.0.0.0:18088"
+valid_tokens = [
+ "BD274822-96AA-4DA6-90EC-18940FB2414C"
+]
+
+[sinks.log-2-console]
+inputs = [ "log-from-tcp", "log-from-tls", "log-from-syslog-tcp", "log-from-syslog-udp", "log-from-udp", "log-from-splunk", "log-from-http"]
+type = "console"
+encoding.codec = "json"
+
+[sinks.log-2-tcp-file]
+inputs = [ "log-from-tcp" ]
+type = "file"
+encoding.codec = "text"
+path = "/etc/vector/tcp.log"
+
+[sinks.log-2-http-file]
+inputs = [ "log-from-http" ]
+type = "file"
+encoding.codec = "text"
+path = "/etc/vector/http.log"
+
+[sinks.log-2-udp-file]
+inputs = [ "log-from-udp" ]
+type = "file"
+encoding.codec = "json"
+path = "/etc/vector/udp.log"
+
+[sinks.tls-log-2-file]
+inputs = [ "log-from-tls" ]
+type = "file"
+encoding.codec = "json"
+path = "/etc/vector/tls-datas.log"
+
+[sinks.log-2-syslog-tcp-file]
+inputs = [ "log-from-syslog-tcp" ]
+type = "file"
+encoding.codec = "text"
+path = "/etc/vector/syslog-tcp.log"
+
+[sinks.log-2-splunk-file]
+inputs = [ "log-from-splunk" ]
+type = "file"
+encoding.codec = "json"
+path = "/etc/vector/splunk.log"
+
+[sinks.log-2-syslog-udp-file]
+inputs = [ "log-from-syslog-udp" ]
+type = "file"
+encoding.codec = "text"
+path = "/etc/vector/syslog-udp.log"
diff --git a/ci/redhat-ci.sh b/ci/redhat-ci.sh
new file mode 100755
index 000000000000..18fed3208290
--- /dev/null
+++ b/ci/redhat-ci.sh
@@ -0,0 +1,104 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+. ./ci/common.sh
+install_dependencies() {
+ export_or_prefix
+
+ # install build & runtime deps
+ yum install -y --disablerepo=* --enablerepo=ubi-8-appstream-rpms --enablerepo=ubi-8-baseos-rpms \
+ wget tar gcc automake autoconf libtool make unzip git sudo openldap-devel hostname \
+ which ca-certificates openssl-devel
+
+ # install newer curl
+ yum makecache
+ yum install -y libnghttp2-devel
+ install_curl
+
+ # install openresty to make apisix's rpm test work
+ yum install -y yum-utils && yum-config-manager --add-repo https://openresty.org/package/centos/openresty.repo
+ yum install -y openresty-1.21.4.2 openresty-debug-1.21.4.2 openresty-openssl111-debug-devel pcre pcre-devel xz
+
+ # install luarocks
+ ./utils/linux-install-luarocks.sh
+
+ # install etcdctl
+ ./ci/linux-install-etcd-client.sh
+
+ # install vault cli capabilities
+ install_vault_cli
+
+ # install test::nginx
+ yum install -y --disablerepo=* --enablerepo=ubi-8-appstream-rpms --enablerepo=ubi-8-baseos-rpms cpanminus perl
+ cpanm --notest Test::Nginx IPC::Run > build.log 2>&1 || (cat build.log && exit 1)
+
+ # add go1.15 binary to the path
+ mkdir build-cache
+ pushd build-cache/
+ # Go is required inside the container.
+ wget -q https://golang.org/dl/go1.17.linux-amd64.tar.gz && tar -xf go1.17.linux-amd64.tar.gz
+ export PATH=$PATH:$(pwd)/go/bin
+ popd
+ # install and start grpc_server_example
+ pushd t/grpc_server_example
+
+ CGO_ENABLED=0 go build
+ ./grpc_server_example \
+ -grpc-address :50051 -grpcs-address :50052 -grpcs-mtls-address :50053 -grpc-http-address :50054 \
+ -crt ../certs/apisix.crt -key ../certs/apisix.key -ca ../certs/mtls_ca.crt \
+ > grpc_server_example.log 2>&1 || (cat grpc_server_example.log && exit 1)&
+
+ popd
+ # wait for grpc_server_example to fully start
+ sleep 3
+
+ # installing grpcurl
+ install_grpcurl
+
+ # install nodejs
+ install_nodejs
+
+ # grpc-web server && client
+ pushd t/plugin/grpc-web
+ ./setup.sh
+ # back to home directory
+ popd
+
+ # install dependencies
+ git clone https://github.com/openresty/test-nginx.git test-nginx
+ create_lua_deps
+}
+
+run_case() {
+ export_or_prefix
+ make init
+ set_coredns
+ # run test cases
+ FLUSH_ETCD=1 prove --timer -Itest-nginx/lib -I./ -r ${TEST_FILE_SUB_DIR} | tee /tmp/test.result
+ rerun_flaky_tests /tmp/test.result
+}
+
+case_opt=$1
+case $case_opt in
+ (install_dependencies)
+ install_dependencies
+ ;;
+ (run_case)
+ run_case
+ ;;
+esac
diff --git a/conf/config-default.yaml b/conf/config-default.yaml
index 2765afe9c43f..05dc50ad72d1 100755
--- a/conf/config-default.yaml
+++ b/conf/config-default.yaml
@@ -14,53 +14,48 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-# PLEASE DO NOT UPDATE THIS FILE!
-# If you want to set the specified configuration value, you can set the new
-# value in the conf/config.yaml file.
+# CAUTION: DO NOT MODIFY DEFAULT CONFIGURATIONS IN THIS FILE.
+# Keep the custom configurations in conf/config.yaml.
#
apisix:
- # node_listen: 9080 # APISIX listening port
- node_listen: # This style support multiple ports
+ # node_listen: 9080 # APISIX listening port.
+ node_listen: # APISIX listening ports.
- 9080
# - port: 9081
- # enable_http2: true # If not set, the default value is `false`.
- # - ip: 127.0.0.2 # Specific IP, If not set, the default value is `0.0.0.0`.
+ # enable_http2: true # If not set, default to `false`.
+ # - ip: 127.0.0.2 # If not set, default to `0.0.0.0`
# port: 9082
# enable_http2: true
- enable_admin: true
- enable_dev_mode: false # Sets nginx worker_processes to 1 if set to true
- enable_reuseport: true # Enable nginx SO_REUSEPORT switch if set to true.
- show_upstream_status_in_response_header: false # when true all upstream status write to `X-APISIX-Upstream-Status` otherwise only 5xx code
+ enable_admin: true # Admin API
+ enable_dev_mode: false # If true, set nginx `worker_processes` to 1.
+ enable_reuseport: true # If true, enable nginx SO_REUSEPORT option.
+ show_upstream_status_in_response_header: false # If true, include the upstream HTTP status code in
+ # the response header `X-APISIX-Upstream-Status`.
+ # If false, show `X-APISIX-Upstream-Status` only if
+ # the upstream response code is 5xx.
enable_ipv6: true
- #proxy_protocol: # Proxy Protocol configuration
- # listen_http_port: 9181 # The port with proxy protocol for http, it differs from node_listen and admin_listen.
- # This port can only receive http request with proxy protocol, but node_listen & admin_listen
- # can only receive http request. If you enable proxy protocol, you must use this port to
- # receive http request with proxy protocol
- # listen_https_port: 9182 # The port with proxy protocol for https
- # enable_tcp_pp: true # Enable the proxy protocol for tcp proxy, it works for stream_proxy.tcp option
- # enable_tcp_pp_to_upstream: true # Enables the proxy protocol to the upstream server
- enable_server_tokens: true # Whether the APISIX version number should be shown in Server header.
- # It's enabled by default.
-
- # configurations to load third party code and/or override the builtin one.
- extra_lua_path: "" # extend lua_package_path to load third party code
- extra_lua_cpath: "" # extend lua_package_cpath to load third party code
- #lua_module_hook: "my_project.my_hook" # the hook module which will be used to inject third party code into APISIX
-
- proxy_cache: # Proxy Caching configuration
- cache_ttl: 10s # The default caching time in disk if the upstream does not specify the cache time
- zones: # The parameters of a cache
- - name: disk_cache_one # The name of the cache, administrator can specify
- # which cache to use by name in the admin api (disk|memory)
- memory_size: 50m # The size of shared memory, it's used to store the cache index for
- # disk strategy, store cache content for memory strategy (disk|memory)
- disk_size: 1G # The size of disk, it's used to store the cache data (disk)
- disk_path: /tmp/disk_cache_one # The path to store the cache data (disk)
- cache_levels: 1:2 # The hierarchy levels of a cache (disk)
- #- name: disk_cache_two
+ # proxy_protocol: # PROXY Protocol configuration
+ # listen_http_port: 9181 # APISIX listening port for HTTP traffic with PROXY protocol.
+ # listen_https_port: 9182 # APISIX listening port for HTTPS traffic with PROXY protocol.
+ # enable_tcp_pp: true # Enable the PROXY protocol when stream_proxy.tcp is set.
+ # enable_tcp_pp_to_upstream: true # Enable the PROXY protocol.
+
+ enable_server_tokens: true # If true, show APISIX version in the `Server` response header.
+ extra_lua_path: "" # Extend lua_package_path to load third-party code.
+ extra_lua_cpath: "" # Extend lua_package_cpath to load third-party code.
+ # lua_module_hook: "my_project.my_hook" # Hook module used to inject third-party code into APISIX.
+
+ proxy_cache: # Proxy Caching configuration
+ cache_ttl: 10s # The default caching time on disk if the upstream does not specify a caching time.
+ zones:
+ - name: disk_cache_one # Name of the cache.
+ memory_size: 50m # Size of the memory to store the cache index.
+ disk_size: 1G # Size of the disk to store the cache data.
+ disk_path: /tmp/disk_cache_one # Path to the cache file for disk cache.
+ cache_levels: 1:2 # Cache hierarchy levels of disk cache.
+ # - name: disk_cache_two
# memory_size: 50m
# disk_size: 1G
# disk_path: "/tmp/disk_cache_two"
@@ -68,108 +63,131 @@ apisix:
- name: memory_cache
memory_size: 50m
- delete_uri_tail_slash: false # delete the '/' at the end of the URI
- # The URI normalization in servlet is a little different from the RFC's.
- # See https://github.com/jakartaee/servlet/blob/master/spec/src/main/asciidoc/servlet-spec-body.adoc#352-uri-path-canonicalization,
- # which is used under Tomcat.
- # Turn this option on if you want to be compatible with servlet when matching URI path.
- normalize_uri_like_servlet: false
+ delete_uri_tail_slash: false # Delete the '/' at the end of the URI
+ normalize_uri_like_servlet: false # If true, use the same path normalization rules as the Java
+ # servlet specification. See https://github.com/jakartaee/servlet/blob/master/spec/src/main/asciidoc/servlet-spec-body.adoc#352-uri-path-canonicalization, which is used in Tomcat.
+
router:
- http: radixtree_host_uri # radixtree_uri: match route by uri(base on radixtree)
- # radixtree_host_uri: match route by host + uri(base on radixtree)
- # radixtree_uri_with_parameter: like radixtree_uri but match uri with parameters,
- # see https://github.com/api7/lua-resty-radixtree/#parameters-in-path for
- # more details.
- ssl: radixtree_sni # radixtree_sni: match route by SNI(base on radixtree)
- #stream_proxy: # TCP/UDP proxy
- # only: true # use stream proxy only, don't enable HTTP stuff
- # tcp: # TCP proxy port list
- # - addr: 9100
- # tls: true
- # - addr: "127.0.0.1:9101"
- # udp: # UDP proxy port list
- # - 9200
- # - "127.0.0.1:9201"
- #dns_resolver: # If not set, read from `/etc/resolv.conf`
- # - 1.1.1.1
- # - 8.8.8.8
- #dns_resolver_valid: 30 # if given, override the TTL of the valid records. The unit is second.
- resolver_timeout: 5 # resolver timeout
- enable_resolv_search_opt: true # enable search option in resolv.conf
+ http: radixtree_host_uri # radixtree_host_uri: match route by host and URI
+ # radixtree_uri: match route by URI
+ # radixtree_uri_with_parameter: similar to radixtree_uri but match URI with parameters. See https://github.com/api7/lua-resty-radixtree/#parameters-in-path for more details.
+ ssl: radixtree_sni # radixtree_sni: match route by SNI
+
+ # http is the default proxy mode. proxy_mode can be one of `http`, `stream`, or `http&stream`
+ proxy_mode: http
+ # stream_proxy: # TCP/UDP L4 proxy
+ # only: true # Enable L4 proxy only without L7 proxy.
+ # tcp:
+ # - addr: 9100 # Set the TCP proxy listening ports.
+ # tls: true
+ # - addr: "127.0.0.1:9101"
+ # udp: # Set the UDP proxy listening ports.
+ # - 9200
+ # - "127.0.0.1:9201"
+
+ # dns_resolver: # If not set, read from `/etc/resolv.conf`
+ # - 1.1.1.1
+ # - 8.8.8.8
+ # dns_resolver_valid: 30 # Override the default TTL of the DNS records.
+ resolver_timeout: 5 # Set the time in seconds that the server will wait for a response from the
+ # DNS resolver before timing out.
+ enable_resolv_search_opt: true # If true, use search option in the resolv.conf file in DNS lookups.
+
ssl:
enable: true
- listen: # APISIX listening port in https.
+ listen: # APISIX listening port for HTTPS traffic.
- port: 9443
enable_http2: true
- # - ip: 127.0.0.3 # Specific IP, If not set, the default value is `0.0.0.0`.
- # port: 9445
- # enable_http2: true
- #ssl_trusted_certificate: /path/to/ca-cert # Specifies a file path with trusted CA certificates in the PEM format
- # used to verify the certificate when APISIX needs to do SSL/TLS handshaking
- # with external services (e.g. etcd)
- ssl_protocols: TLSv1.2 TLSv1.3
+ # - ip: 127.0.0.3 # If not set, default to `0.0.0.0`.
+ # port: 9445
+ # enable_http2: true
+ # ssl_trusted_certificate: /path/to/ca-cert # Set the path to CA certificates used to verify client
+ # certificates in the PEM format.
+ ssl_protocols: TLSv1.2 TLSv1.3 # TLS versions supported.
ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
- ssl_session_tickets: false # disable ssl_session_tickets by default for 'ssl_session_tickets' would make Perfect Forward Secrecy useless.
- # ref: https://github.com/mozilla/server-side-tls/issues/135
-
- key_encrypt_salt: # If not set, will save origin ssl key into etcd.
- - edd1c9f0985e76a2 # If set this, the key_encrypt_salt should be an array whose elements are string, and the size is also 16, and it will encrypt ssl key with AES-128-CBC
- # !!! So do not change it after saving your ssl, it can't decrypt the ssl keys have be saved if you change !!
- # Only use the first key to encrypt, and decrypt in the order of the array.
-
- #fallback_sni: "my.default.domain" # If set this, when the client doesn't send SNI during handshake, the fallback SNI will be used instead
- enable_control: true
- #control:
+ ssl_session_tickets: false # If true, session tickets are used for SSL/TLS connections.
+ # Disabled by default because it renders Perfect Forward Secrecy (FPS)
+ # useless. See https://github.com/mozilla/server-side-tls/issues/135.
+
+ key_encrypt_salt: # This field is only used to encrypt the private key of SSL.
+ - edd1c9f0985e76a2 # Set the encryption key for AES-128-CBC. It should be a
+ # hexadecimal string of length 16.
+ # If not set, APISIX saves the original data into etcd.
+ # CAUTION: If you would like to update the key, add the new key as the
+ # first item in the array and keep the older keys below the newly added
+ # key, so that data can be decrypted with the older keys and encrypted
+ # with the new key. Removing the old keys directly can render the data
+ # unrecoverable.
+
+ # fallback_sni: "my.default.domain" # Fallback SNI to be used if the client does not send SNI during
+ # # the handshake.
+
+ enable_control: true # Control API
+ # control:
# ip: 127.0.0.1
# port: 9090
- disable_sync_configuration_during_start: false # safe exit. Remove this once the feature is stable
- data_encryption: # add `encrypt_fields = { $field },` in plugin schema to enable encryption
- enable: false # if not set, the default value is `false`.
- keyring:
- - qeddd145sfvddff3 # If not set, will save origin value into etcd.
- # If set this, the keyring should be an array whose elements are string, and the size is also 16, and it will encrypt fields with AES-128-CBC
- # !!! So do not change it after encryption, it can't decrypt the fields have be saved if you change !!
- # Only use the first key to encrypt, and decrypt in the order of the array.
-
-nginx_config: # config for render the template to generate nginx.conf
- #user: root # specifies the execution user of the worker process.
- # the "user" directive makes sense only if the master process runs with super-user privileges.
- # if you're not root user,the default is current user.
- error_log: logs/error.log
- error_log_level: warn # warn,error
- worker_processes: auto # if you want use multiple cores in container, you can inject the number of cpu as environment variable "APISIX_WORKER_PROCESSES"
- enable_cpu_affinity: false # disable CPU affinity by default, if APISIX is deployed on a physical machine, it can be enabled and work well.
- worker_rlimit_nofile: 20480 # the number of files a worker process can open, should be larger than worker_connections
- worker_shutdown_timeout: 240s # timeout for a graceful shutdown of worker processes
-
- max_pending_timers: 16384 # increase it if you see "too many pending timers" error
- max_running_timers: 4096 # increase it if you see "lua_max_running_timers are not enough" error
+
+ disable_sync_configuration_during_start: false # Safe exit. TO BE REMOVED.
+
+ data_encryption: # Encrypt fields specified in `encrypt_fields` in plugin schema.
+ enable: false
+ keyring: # Set the encryption key for AES-128-CBC. It should be a
+ - qeddd145sfvddff3 # hexadecimal string of length 16.
+ # If not set, APISIX saves the original data into etcd.
+ # CAUTION: If you would like to update the key, add the new key as the
+ # first item in the array and keep the older keys below the newly added
+ # key, so that data can be decrypted with the older keys and encrypted
+ # with the new key. Removing the old keys directly can render the data
+ # unrecoverable.
+
+nginx_config: # Config for render the template to generate nginx.conf
+ # user: root # Set the execution user of the worker process. This is only
+ # effective if the master process runs with super-user privileges.
+ error_log: logs/error.log # Location of the error log.
+ error_log_level: warn # Logging level: info, debug, notice, warn, error, crit, alert, or emerg.
+ worker_processes: auto # Automatically determine the optimal number of worker processes based
+ # on the available system resources.
+ # If you want use multiple cores in container, you can inject the number of
+ # CPU cores as environment variable "APISIX_WORKER_PROCESSES".
+ enable_cpu_affinity: false # Disable CPU affinity by default as worker_cpu_affinity affects the
+ # behavior of APISIX in containers. For example, multiple instances could
+ # be bound to one CPU core, which is not desirable.
+ # If APISIX is deployed on a physical machine, CPU affinity can be enabled.
+ worker_rlimit_nofile: 20480 # The number of files a worker process can open.
+ # The value should be larger than worker_connections.
+ worker_shutdown_timeout: 240s # Timeout for a graceful shutdown of worker processes.
+
+ max_pending_timers: 16384 # The maximum number of pending timers that can be active at any given time.
+ # Error "too many pending timers" indicates the threshold is reached.
+ max_running_timers: 4096 # The maximum number of running timers that can be active at any given time.
+ # Error "lua_max_running_timers are not enough" error indicates the
+ # threshold is reached.
event:
worker_connections: 10620
- #envs: # allow to get a list of environment variables
+
+ # envs: # Get environment variables.
# - TEST_ENV
meta:
- lua_shared_dict:
+ lua_shared_dict: # Nginx Lua shared memory zone. Size units are m or k.
prometheus-metrics: 15m
stream:
- enable_access_log: false # enable access log or not, default false
- access_log: logs/access_stream.log
- access_log_format: "$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time"
- # create your custom log format by visiting http://nginx.org/en/docs/varindex.html
- access_log_format_escape: default # allows setting json or default characters escaping in variables
- lua_shared_dict:
+ enable_access_log: false # Enable stream proxy access logging.
+ access_log: logs/access_stream.log # Location of the stream access log.
+ access_log_format: "$remote_addr [$time_local] $protocol $status $bytes_sent $bytes_received $session_time" # Customize log format: http://nginx.org/en/docs/varindex.html
+ access_log_format_escape: default # Escape default or json characters in variables.
+ lua_shared_dict: # Nginx Lua shared memory zone. Size units are m or k.
etcd-cluster-health-check-stream: 10m
lrucache-lock-stream: 10m
plugin-limit-conn-stream: 10m
worker-events-stream: 10m
tars-stream: 1m
- # As user can add arbitrary configurations in the snippet,
- # it is user's responsibility to check the configurations
- # don't conflict with APISIX.
+ # Add other custom Nginx configurations.
+ # Users are responsible for validating the custom configurations
+ # to ensure they are not in conflict with APISIX configurations.
main_configuration_snippet: |
# Add custom Nginx main configuration to nginx.conf.
# The configuration should be well indented!
@@ -193,41 +211,48 @@ nginx_config: # config for render the template to generate n
# The configuration should be well indented!
http:
- enable_access_log: true # enable access log or not, default true
- access_log: logs/access.log
+ enable_access_log: true # Enable HTTP proxy access logging.
+ access_log: logs/access.log # Location of the access log.
+ access_log_buffer: 16384 # buffer size of access log.
access_log_format: "$remote_addr - $remote_user [$time_local] $http_host \"$request\" $status $body_bytes_sent $request_time \"$http_referer\" \"$http_user_agent\" $upstream_addr $upstream_status $upstream_response_time \"$upstream_scheme://$upstream_host$upstream_uri\""
- access_log_format_escape: default # allows setting json or default characters escaping in variables
- keepalive_timeout: 60s # timeout during which a keep-alive client connection will stay open on the server side.
- client_header_timeout: 60s # timeout for reading client request header, then 408 (Request Time-out) error is returned to the client
- client_body_timeout: 60s # timeout for reading client request body, then 408 (Request Time-out) error is returned to the client
- client_max_body_size: 0 # The maximum allowed size of the client request body.
- # If exceeded, the 413 (Request Entity Too Large) error is returned to the client.
- # Note that unlike Nginx, we don't limit the body size by default.
-
- send_timeout: 10s # timeout for transmitting a response to the client.then the connection is closed
- underscores_in_headers: "on" # default enables the use of underscores in client request header fields
- real_ip_header: X-Real-IP # http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header
- real_ip_recursive: "off" # http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_recursive
- real_ip_from: # http://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from
+ # Customize log format: http://nginx.org/en/docs/varindex.html
+ access_log_format_escape: default # Escape default or json characters in variables.
+ keepalive_timeout: 60s # Set the maximum time for which TCP connection keeps alive.
+ client_header_timeout: 60s # Set the maximum time waiting for client to send the entire HTTP
+ # request header before closing the connection.
+ client_body_timeout: 60s # Set the maximum time waiting for client to send the request body.
+ client_max_body_size: 0 # Set the maximum allowed size of the client request body.
+ # Default to 0, unlimited.
+ # Unlike Nginx, APISIX does not limit the body size by default.
+ # If exceeded, the 413 (Request Entity Too Large) error is returned.
+ send_timeout: 10s # Set the maximum time for transmitting a response to the client before closing.
+ underscores_in_headers: "on" # Allow HTTP request headers to contain underscores in their names.
+ real_ip_header: X-Real-IP # https://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_header
+ real_ip_recursive: "off" # http://nginx.org/en/docs/http/ngx_http_realip_module.html#real_ip_recursive
+ real_ip_from: # http://nginx.org/en/docs/http/ngx_http_realip_module.html#set_real_ip_from
- 127.0.0.1
- "unix:"
- #custom_lua_shared_dict: # add custom shared cache to nginx.conf
- # ipc_shared_dict: 100m # custom shared cache, format: `cache-key: cache-size`
- # Enables or disables passing of the server name through TLS Server Name Indication extension (SNI, RFC 6066)
- # when establishing a connection with the proxied HTTPS server.
- proxy_ssl_server_name: true
+ # custom_lua_shared_dict: # Custom Nginx Lua shared memory zone for nginx.conf. Size units are m or k.
+ # ipc_shared_dict: 100m # Custom shared cache, format: `cache-key: cache-size`
+
+ proxy_ssl_server_name: true # Send the server name in the SNI extension when establishing an SSL/TLS
+ # connection with the upstream server, allowing the upstream server to
+ # select the appropriate SSL/TLS certificate and configuration based on
+ # the requested server name.
+
upstream:
- keepalive: 320 # Sets the maximum number of idle keepalive connections to upstream servers that are preserved in the cache of each worker process.
- # When this number is exceeded, the least recently used connections are closed.
- keepalive_requests: 1000 # Sets the maximum number of requests that can be served through one keepalive connection.
- # After the maximum number of requests is made, the connection is closed.
- keepalive_timeout: 60s # Sets a timeout during which an idle keepalive connection to an upstream server will stay open.
- charset: utf-8 # Adds the specified charset to the "Content-Type" response header field, see
- # http://nginx.org/en/docs/http/ngx_http_charset_module.html#charset
- variables_hash_max_size: 2048 # Sets the maximum size of the variables hash table.
-
- lua_shared_dict:
+ keepalive: 320 # Set the maximum time of keep-alive connections to the upstream servers.
+ # When the value is exceeded, the least recently used connection is closed.
+ keepalive_requests: 1000 # Set the maximum number of requests that can be served through one
+ # keep-alive connection.
+ # After the maximum number of requests is made, the connection is closed.
+ keepalive_timeout: 60s # Set the maximum time for which TCP connection keeps alive.
+ charset: utf-8 # Add the charset to the "Content-Type" response header field.
+ # See http://nginx.org/en/docs/http/ngx_http_charset_module.html#charset
+ variables_hash_max_size: 2048 # Set the maximum size of the variables hash table.
+
+ lua_shared_dict: # Nginx Lua shared memory zone. Size units are m or k.
internal-status: 10m
plugin-limit-req: 10m
plugin-limit-count: 10m
@@ -251,93 +276,94 @@ nginx_config: # config for render the template to generate n
tars: 1m
cas-auth: 10m
-#discovery: # service discovery center
+# discovery: # Service Discovery
# dns:
# servers:
-# - "127.0.0.1:8600" # use the real address of your dns server
-# order: # order in which to try different dns record types when resolving
-# - last # "last" will try the last previously successful type for a hostname.
+# - "127.0.0.1:8600" # Replace with the address of your DNS server.
+# order: # Resolve DNS records this order.
+# - last # Try the latest successful type for a hostname.
# - SRV
# - A
# - AAAA
# - CNAME
-# eureka:
-# host: # it's possible to define multiple eureka hosts addresses of the same eureka cluster.
+# eureka: # Eureka
+# host: # Eureka address(es)
# - "http://127.0.0.1:8761"
# prefix: /eureka/
-# fetch_interval: 30 # default 30s
-# weight: 100 # default weight for node
+# fetch_interval: 30 # Default 30s
+# weight: 100 # Default weight for node
# timeout:
-# connect: 2000 # default 2000ms
-# send: 2000 # default 2000ms
-# read: 5000 # default 5000ms
-# nacos:
-# host:
+# connect: 2000 # Default 2000ms
+# send: 2000 # Default 2000ms
+# read: 5000 # Default 5000ms
+# nacos: # Nacos
+# host: # Nacos address(es)
# - "http://${username}:${password}@${host1}:${port1}"
# prefix: "/nacos/v1/"
-# fetch_interval: 30 # default 30 sec
-# weight: 100 # default 100
+# fetch_interval: 30 # Default 30s
+# `weight` is the `default_weight` that will be attached to each discovered node that
+# doesn't have a weight explicitly provided in nacos results
+# weight: 100 # Default 100.
# timeout:
-# connect: 2000 # default 2000 ms
-# send: 2000 # default 2000 ms
-# read: 5000 # default 5000 ms
-# consul_kv:
-# servers:
+# connect: 2000 # Default 2000ms
+# send: 2000 # Default 2000ms
+# read: 5000 # Default 5000ms
+# consul_kv: # Consul KV
+# servers: # Consul KV address(es)
# - "http://127.0.0.1:8500"
# - "http://127.0.0.1:8600"
# prefix: "upstreams"
-# skip_keys: # if you need to skip special keys
+# skip_keys: # Skip special keys
# - "upstreams/unused_api/"
# timeout:
-# connect: 2000 # default 2000 ms
-# read: 2000 # default 2000 ms
-# wait: 60 # default 60 sec
-# weight: 1 # default 1
-# fetch_interval: 3 # default 3 sec, only take effect for keepalive: false way
-# keepalive: true # default true, use the long pull way to query consul servers
-# default_server: # you can define default server when missing hit
+# connect: 2000 # Default 2000ms
+# read: 2000 # Default 2000ms
+# wait: 60 # Default 60s
+# weight: 1 # Default 1
+# fetch_interval: 3 # Default 3s. Effective only when keepalive is false.
+# keepalive: true # Default to true. Use long pull to query Consul.
+# default_server: # Define default server to route traffic to.
# host: "127.0.0.1"
# port: 20999
# metadata:
-# fail_timeout: 1 # default 1 ms
-# weight: 1 # default 1
-# max_fails: 1 # default 1
-# dump: # if you need, when registered nodes updated can dump into file
-# path: "logs/consul_kv.dump"
-# expire: 2592000 # unit sec, here is 30 day
-# consul:
-# servers: # make sure service name is unique in these consul servers
-# - "http://127.0.0.1:8500" # `http://127.0.0.1:8500` and `http://127.0.0.1:8600` are different clusters
+# fail_timeout: 1 # Default 1ms
+# weight: 1 # Default 1
+# max_fails: 1 # Default 1
+# dump: # Dump the Consul key-value (KV) store to a file.
+# path: "logs/consul_kv.dump" # Location of the dump file.
+# expire: 2592000 # Specify the expiration time of the dump file in units of seconds.
+# consul: # Consul
+# servers: # Consul address(es)
+# - "http://127.0.0.1:8500"
# - "http://127.0.0.1:8600"
-# skip_services: # if you need to skip special services
-# - "service_a" # `consul` service is default skip service
+# skip_services: # Skip services during service discovery.
+# - "service_a"
# timeout:
-# connect: 2000 # default 2000 ms
-# read: 2000 # default 2000 ms
-# wait: 60 # default 60 sec
-# weight: 1 # default 1
-# fetch_interval: 3 # default 3 sec, only take effect for keepalive: false way
-# keepalive: true # default true, use the long pull way to query consul servers
-# default_service: # you can define default server when missing hit
+# connect: 2000 # Default 2000ms
+# read: 2000 # Default 2000ms
+# wait: 60 # Default 60s
+# weight: 1 # Default 1
+# fetch_interval: 3 # Default 3s. Effective only when keepalive is false.
+# keepalive: true # Default to true. Use long pull to query Consul.
+# default_service: # Define the default service to route traffic to.
# host: "127.0.0.1"
# port: 20999
# metadata:
-# fail_timeout: 1 # default 1 ms
-# weight: 1 # default 1
-# max_fails: 1 # default 1
-# dump: # if you need, when registered nodes updated can dump into file
-# path: "logs/consul.dump"
-# expire: 2592000 # unit sec, here is 30 day
-# load_on_init: true # default true, load the consul dump file on init
-# kubernetes:
+# fail_timeout: 1 # Default 1ms
+# weight: 1 # Default 1
+# max_fails: 1 # Default 1
+# dump: # Dump the Consul key-value (KV) store to a file.
+# path: "logs/consul_kv.dump" # Location of the dump file.
+# expire: 2592000 # Specify the expiration time of the dump file in units of seconds.
+# load_on_init: true # Default true, load the consul dump file on init
+# kubernetes: # Kubernetes service discovery
# ### kubernetes service discovery both support single-cluster and multi-cluster mode
# ### applicable to the case where the service is distributed in a single or multiple kubernetes clusters.
-#
# ### single-cluster mode ###
# service:
-# schema: https #apiserver schema, options [http, https], default https
-# host: ${KUBERNETES_SERVICE_HOST} #apiserver host, options [ipv4, ipv6, domain, environment variable], default ${KUBERNETES_SERVICE_HOST}
-# port: ${KUBERNETES_SERVICE_PORT} #apiserver port, options [port number, environment variable], default ${KUBERNETES_SERVICE_PORT}
+# schema: https # apiserver schema, options [http, https], default https
+# host: ${KUBERNETES_SERVICE_HOST} # apiserver host, options [ipv4, ipv6, domain, environment variable], default ${KUBERNETES_SERVICE_HOST}
+# port: ${KUBERNETES_SERVICE_PORT} # apiserver port, options [port number, environment variable], default ${KUBERNETES_SERVICE_PORT}
# client:
# # serviceaccount token or path of serviceaccount token_file
# token_file: ${KUBERNETES_CLIENT_TOKEN_FILE}
@@ -366,13 +392,12 @@ nginx_config: # config for render the template to generate n
# # reserved lua shared memory size,1m memory can store about 1000 pieces of endpoint
# shared_size: 1m #default 1m
# ### single-cluster mode ###
-#
# ### multi-cluster mode ###
# - id: release # a custom name refer to the cluster, pattern ^[a-z0-9]{1,8}
# service:
-# schema: https #apiserver schema, options [http, https], default https
-# host: ${KUBERNETES_SERVICE_HOST} #apiserver host, options [ipv4, ipv6, domain, environment variable]
-# port: ${KUBERNETES_SERVICE_PORT} #apiserver port, options [port number, environment variable]
+# schema: https # apiserver schema, options [http, https], default https
+# host: ${KUBERNETES_SERVICE_HOST} # apiserver host, options [ipv4, ipv6, domain, environment variable]
+# port: ${KUBERNETES_SERVICE_PORT} # apiserver port, options [port number, environment variable]
# client:
# # serviceaccount token or path of serviceaccount token_file
# token_file: ${KUBERNETES_CLIENT_TOKEN_FILE}
@@ -403,12 +428,12 @@ nginx_config: # config for render the template to generate n
# ### multi-cluster mode ###
graphql:
- max_size: 1048576 # the maximum size limitation of graphql in bytes, default 1MiB
+ max_size: 1048576 # Set the maximum size limitation of graphql in bytes. Default to 1MiB.
-#ext-plugin:
- #cmd: ["ls", "-l"]
+# ext-plugin:
+# cmd: ["ls", "-l"]
-plugins: # plugin list (sorted by priority)
+plugins: # plugin list (sorted by priority)
- real-ip # priority: 23000
- ai # priority: 22900
- client-control # priority: 22000
@@ -429,6 +454,7 @@ plugins: # plugin list (sorted by priority)
- csrf # priority: 2980
- uri-blocker # priority: 2900
- request-validation # priority: 2800
+ - chaitin-waf # priority: 2700
- openid-connect # priority: 2599
- cas-auth # priority: 2597
- authz-casbin # priority: 2560
@@ -444,9 +470,9 @@ plugins: # plugin list (sorted by priority)
- opa # priority: 2001
- authz-keycloak # priority: 2000
#- error-log-logger # priority: 1091
+ - proxy-cache # priority: 1085
- body-transformer # priority: 1080
- proxy-mirror # priority: 1010
- - proxy-cache # priority: 1009
- proxy-rewrite # priority: 1008
- workflow # priority: 1006
- api-breaker # priority: 1005
@@ -467,6 +493,7 @@ plugins: # plugin list (sorted by priority)
- public-api # priority: 501
- prometheus # priority: 500
- datadog # priority: 495
+ - loki-logger # priority: 414
- elasticsearch-logger # priority: 413
- echo # priority: 412
- loggly # priority: 411
@@ -496,7 +523,7 @@ plugins: # plugin list (sorted by priority)
- ext-plugin-post-req # priority: -3000
- ext-plugin-post-resp # priority: -4000
-stream_plugins: # sorted by priority
+stream_plugins: # stream plugin list (sorted by priority)
- ip-restriction # priority: 3000
- limit-conn # priority: 1003
- mqtt-proxy # priority: 1000
@@ -504,141 +531,145 @@ stream_plugins: # sorted by priority
- syslog # priority: 401
# <- recommend to use priority (0, 100) for your custom plugins
-#wasm:
- #plugins:
- #- name: wasm_log
- #priority: 7999
- #file: t/wasm/log/main.go.wasm
-
-#xrpc:
- #protocols:
- #- name: pingpong
-
-plugin_attr:
- log-rotate:
- interval: 3600 # rotate interval (unit: second)
- max_kept: 168 # max number of log files will be kept
- max_size: -1 # max size bytes of log files to be rotated, size check would be skipped with a value less than 0
- enable_compression: false # enable log file compression(gzip) or not, default false
- skywalking:
- service_name: APISIX
- service_instance_name: APISIX Instance Name
- endpoint_addr: http://127.0.0.1:12800
- opentelemetry:
- trace_id_source: x-request-id
+
+# wasm:
+# plugins:
+# - name: wasm_log
+# priority: 7999
+# file: t/wasm/log/main.go.wasm
+
+# xrpc:
+# protocols:
+# - name: pingpong
+plugin_attr: # Plugin attributes
+ log-rotate: # Plugin: log-rotate
+ interval: 3600 # Set the log rotate interval in seconds.
+ max_kept: 168 # Set the maximum number of log files to keep. If exceeded, historic logs are deleted.
+ max_size: -1 # Set the maximum size of log files in bytes before a rotation.
+ # Skip size check if max_size is less than 0.
+ enable_compression: false # Enable log file compression (gzip).
+ skywalking: # Plugin: skywalking
+ service_name: APISIX # Set the service name for SkyWalking reporter.
+ service_instance_name: APISIX Instance Name # Set the service instance name for SkyWalking reporter.
+ endpoint_addr: http://127.0.0.1:12800 # Set the SkyWalking HTTP endpoint.
+ report_interval: 3 # Set the reporting interval in second.
+ opentelemetry: # Plugin: opentelemetry
+ trace_id_source: x-request-id # Specify the source of the trace ID for OpenTelemetry traces.
resource:
- service.name: APISIX
+ service.name: APISIX # Set the service name for OpenTelemetry traces.
collector:
- address: 127.0.0.1:4318
- request_timeout: 3
- request_headers:
- Authorization: token
+ address: 127.0.0.1:4318 # Set the address of the OpenTelemetry collector to send traces to.
+ request_timeout: 3 # Set the timeout for requests to the OpenTelemetry collector in seconds.
+ request_headers: # Set the headers to include in requests to the OpenTelemetry collector.
+ Authorization: token # Set the authorization header to include an access token.
batch_span_processor:
- drop_on_queue_full: false
- max_queue_size: 1024
- batch_timeout: 2
- inactive_timeout: 1
- max_export_batch_size: 16
- prometheus:
- export_uri: /apisix/prometheus/metrics
- metric_prefix: apisix_
- enable_export_server: true
- export_addr:
- ip: 127.0.0.1
- port: 9091
- #metrics:
+ drop_on_queue_full: false # Drop spans when the export queue is full.
+ max_queue_size: 1024 # Set the maximum size of the span export queue.
+ batch_timeout: 2 # Set the timeout for span batches to wait in the export queue before
+ # being sent.
+ inactive_timeout: 1 # Set the timeout for spans to wait in the export queue before being sent,
+ # if the queue is not full.
+ max_export_batch_size: 16 # Set the maximum number of spans to include in each batch sent to the
+ # OpenTelemetry collector.
+ prometheus: # Plugin: prometheus
+ export_uri: /apisix/prometheus/metrics # Set the URI for the Prometheus metrics endpoint.
+ metric_prefix: apisix_ # Set the prefix for Prometheus metrics generated by APISIX.
+ enable_export_server: true # Enable the Prometheus export server.
+ export_addr: # Set the address for the Prometheus export server.
+ ip: 127.0.0.1 # Set the IP.
+ port: 9091 # Set the port.
+ # metrics: # Create extra labels from nginx variables: https://nginx.org/en/docs/varindex.html
# http_status:
- # # extra labels from nginx variables
# extra_labels:
- # # the label name doesn't need to be the same as variable name
- # # below labels are only examples, you could add any valid variables as you need
# - upstream_addr: $upstream_addr
- # - upstream_status: $upstream_status
+ # - status: $upstream_status # The label name does not need to be the same as the variable name.
# http_latency:
# extra_labels:
# - upstream_addr: $upstream_addr
# bandwidth:
# extra_labels:
# - upstream_addr: $upstream_addr
- server-info:
- report_ttl: 60 # live time for server info in etcd (unit: second)
- dubbo-proxy:
- upstream_multiplex_count: 32
- request-id:
- snowflake:
- enable: false
- snowflake_epoc: 1609459200000 # the starting timestamp is expressed in milliseconds
- data_machine_bits: 12 # data machine bit, maximum 31, because Lua cannot do bit operations greater than 31
- sequence_bits: 10 # each machine generates a maximum of (1 << sequence_bits) serial numbers per millisecond
- data_machine_ttl: 30 # live time for data_machine in etcd (unit: second)
- data_machine_interval: 10 # lease renewal interval in etcd (unit: second)
- proxy-mirror:
- timeout: # proxy timeout in mirrored sub-request
+ # default_buckets:
+ # - 10
+ # - 50
+ # - 100
+ # - 200
+ # - 500
+ server-info: # Plugin: server-info
+ report_ttl: 60 # Set the TTL in seconds for server info in etcd.
+ # Maximum: 86400. Minimum: 3.
+ dubbo-proxy: # Plugin: dubbo-proxy
+ upstream_multiplex_count: 32 # Set the maximum number of connections that can be multiplexed over
+ # a single network connection between the Dubbo Proxy and the upstream
+ # Dubbo services.
+ proxy-mirror: # Plugin: proxy-mirror
+ timeout: # Set the timeout for mirrored requests.
connect: 60s
read: 60s
send: 60s
-# redirect:
-# https_port: 8443 # the default port for use by HTTP redirects to HTTPS
- inspect:
- delay: 3 # in seconds
- hooks_file: "/usr/local/apisix/plugin_inspect_hooks.lua"
-
-deployment:
- role: traditional
+ # redirect: # Plugin: redirect
+ # https_port: 8443 # Set the default port used to redirect HTTP to HTTPS.
+ inspect: # Plugin: inspect
+ delay: 3 # Set the delay in seconds for the frequency of checking the hooks file.
+ hooks_file: "/usr/local/apisix/plugin_inspect_hooks.lua" # Set the path to the Lua file that defines
+ # hooks. Only administrators should have
+ # write access to this file for security.
+
+deployment: # Deployment configurations
+ role: traditional # Set deployment mode: traditional, control_plane, or data_plane.
role_traditional:
- config_provider: etcd
- admin:
- # Default token when use API to call for Admin API.
- # *NOTE*: Highly recommended to modify this value to protect APISIX's Admin API.
- # Disabling this configuration item means that the Admin API does not
- # require any authentication.
+ config_provider: etcd # Set the configuration center.
+
+ #role_data_plane: # Set data plane details if role is data_plane.
+ # config_provider: etcd # Set the configuration center: etcd, xds, or yaml.
+
+ #role_control_plane: # Set control plane details if role is control_plane.
+ # config_provider: etcd # Set the configuration center.
+
+ admin: # Admin API
+ admin_key_required: true # Enable Admin API authentication by default for security.
admin_key:
-
- name: admin
- key: edd1c9f034335f136f87ad84b625c8f1
- role: admin # admin: manage all configuration data
- # viewer: only can view configuration data
+ name: admin # admin: write access to configurations.
+ key: edd1c9f034335f136f87ad84b625c8f1 # Set API key for the admin of Admin API.
+ role: admin
-
- name: viewer
- key: 4054f7cf07e344346cd3f287985e76a2
+ name: viewer # viewer: read-only to configurations.
+ key: 4054f7cf07e344346cd3f287985e76a2 # Set API key for the viewer of Admin API.
role: viewer
- enable_admin_cors: true # Admin API support CORS response headers.
- allow_admin: # http://nginx.org/en/docs/http/ngx_http_access_module.html#allow
- - 127.0.0.0/24 # If we don't set any IP list, then any IP access is allowed by default.
- #- "::/64"
- admin_listen: # use a separate port
- ip: 0.0.0.0 # Specific IP, if not set, the default value is `0.0.0.0`.
- port: 9180 # Specific port, which must be different from node_listen's port.
-
- #https_admin: true # enable HTTPS when use a separate port for Admin API.
- # Admin API will use conf/apisix_admin_api.crt and conf/apisix_admin_api.key as certificate.
+ enable_admin_cors: true # Enable Admin API CORS response header `Access-Control-Allow-Origin`.
+ allow_admin: # Limit Admin API access by IP addresses.
+ - 127.0.0.0/24 # If not set, any IP address is allowed.
+ # - "::/64"
+ admin_listen: # Set the Admin API listening addresses.
+ ip: 0.0.0.0 # Set listening IP.
+ port: 9180 # Set listening port. Beware of port conflict with node_listen.
- admin_api_mtls: # Depends on `admin_listen` and `https_admin`.
- admin_ssl_cert: "" # Path of your self-signed server side cert.
- admin_ssl_cert_key: "" # Path of your self-signed server side key.
- admin_ssl_ca_cert: "" # Path of your self-signed ca cert.The CA is used to sign all admin api callers' certificates.
+ # https_admin: true # Enable SSL for Admin API on IP and port specified in admin_listen.
+ # Use admin_api_mtls.admin_ssl_cert and admin_api_mtls.admin_ssl_cert_key.
+ # admin_api_mtls: # Set this if `https_admin` is true.
+ # admin_ssl_cert: "" # Set path to SSL/TLS certificate.
+ # admin_ssl_cert_key: "" # Set path to SSL/TLS key.
+ # admin_ssl_ca_cert: "" # Set path to CA certificate used to sign client certificates.
- admin_api_version: v3 # The version of admin api, latest version is v3.
+ admin_api_version: v3 # Set the version of Admin API (latest: v3).
etcd:
- host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster.
- - "http://127.0.0.1:2379" # multiple etcd address, if your etcd cluster enables TLS, please use https scheme,
- # e.g. https://127.0.0.1:2379.
- prefix: /apisix # configuration prefix in etcd
- use_grpc: false # enable the experimental configuration sync via gRPC
- timeout: 30 # 30 seconds. Use a much higher timeout (like an hour) if the `use_grpc` is true.
- #resync_delay: 5 # when sync failed and a rest is needed, resync after the configured seconds plus 50% random jitter
- #health_check_timeout: 10 # etcd retry the unhealthy nodes after the configured seconds
- startup_retry: 2 # the number of retry to etcd during the startup, default to 2
- #user: root # root username for etcd
- #password: 5tHkHhYkjr6cQY # root password for etcd
+ host: # Set etcd address(es) in the same etcd cluster.
+ - "http://127.0.0.1:2379" # If TLS is enabled for etcd, use https://127.0.0.1:2379.
+ prefix: /apisix # Set etcd prefix.
+ timeout: 30 # Set timeout in seconds.
+ # resync_delay: 5 # Set resync time in seconds after a sync failure.
+ # The actual resync time would be resync_delay plus 50% random jitter.
+ # health_check_timeout: 10 # Set timeout in seconds for etcd health check.
+ # Default to 10 if not set or a negative value is provided.
+ startup_retry: 2 # Set the number of retries to etcd on startup. Default to 2.
+ # user: root # Set the root username for etcd.
+ # password: 5tHkHhYkjr6cQ # Set the root password for etcd.
tls:
- # To enable etcd client certificate you need to build APISIX-Base, see
- # https://apisix.apache.org/docs/apisix/FAQ#how-do-i-build-the-apisix-base-environment
- #cert: /path/to/cert # path of certificate used by the etcd client
- #key: /path/to/key # path of key used by the etcd client
-
- verify: true # whether to verify the etcd endpoint certificate when setup a TLS connection to etcd,
- # the default value is true, e.g. the certificate will be verified strictly.
- #sni: # the SNI for etcd TLS requests. If missed, the host part of the URL will be used.
+ # cert: /path/to/cert # Set the path to certificate used by the etcd client
+ # key: /path/to/key # Set the path to path of key used by the etcd client
+ verify: true # Verify the etcd certificate when establishing a TLS connection with etcd.
+ # sni: # The SNI for etcd TLS requests.
+ # If not set, the host from the URL is used.
diff --git a/docs/assets/images/health_check_status_page.png b/docs/assets/images/health_check_status_page.png
new file mode 100644
index 000000000000..ed4aebead0fd
Binary files /dev/null and b/docs/assets/images/health_check_status_page.png differ
diff --git a/docs/assets/images/skip-mtls.png b/docs/assets/images/skip-mtls.png
new file mode 100644
index 000000000000..a739e66dfe74
Binary files /dev/null and b/docs/assets/images/skip-mtls.png differ
diff --git a/docs/assets/other/json/apisix-grafana-dashboard.json b/docs/assets/other/json/apisix-grafana-dashboard.json
index 1ea90c10ca34..e4e5b94681c3 100644
--- a/docs/assets/other/json/apisix-grafana-dashboard.json
+++ b/docs/assets/other/json/apisix-grafana-dashboard.json
@@ -520,7 +520,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(apisix_bandwidth{instance=~\"$instance\"}[30s])) by (type)",
+ "expr": "sum(rate(apisix_bandwidth{instance=~\"$instance\"}[$__rate_interval])) by (type)",
"legendFormat": "{{type}}",
"refId": "A"
}
@@ -620,12 +620,12 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(irate(apisix_bandwidth{type=\"ingress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (service)",
+ "expr": "sum(irate(apisix_bandwidth{type=\"ingress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (service)",
"legendFormat": "service:{{service}}",
"refId": "A"
},
{
- "expr": "sum(irate(apisix_bandwidth{type=\"ingress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (route)",
+ "expr": "sum(irate(apisix_bandwidth{type=\"ingress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (route)",
"legendFormat": "route:{{route}}",
"refId": "B"
}
@@ -725,13 +725,13 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(apisix_bandwidth{type=\"egress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (service)",
+ "expr": "sum(rate(apisix_bandwidth{type=\"egress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (service)",
"interval": "",
"legendFormat": "service:{{service}}",
"refId": "A"
},
{
- "expr": "sum(rate(apisix_bandwidth{type=\"egress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (route)",
+ "expr": "sum(rate(apisix_bandwidth{type=\"egress\", service =~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (route)",
"legendFormat": "route:{{route}}",
"refId": "B"
}
@@ -853,7 +853,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (code)",
+ "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (code)",
"instant": false,
"interval": "",
"intervalFactor": 1,
@@ -965,7 +965,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (service)",
+ "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (service)",
"instant": false,
"interval": "",
"intervalFactor": 1,
@@ -973,7 +973,7 @@
"refId": "A"
},
{
- "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[1m])) by (route)",
+ "expr": "sum(rate(apisix_http_status{service=~\"$service\",route=~\"$route\",instance=~\"$instance\"}[$__rate_interval])) by (route)",
"interval": "",
"legendFormat": "route:{{route}}",
"refId": "D"
@@ -1083,20 +1083,20 @@
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))",
+ "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))",
"format": "time_series",
"interval": "",
"legendFormat": "P90",
"refId": "A"
},
{
- "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))",
+ "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))",
"interval": "",
"legendFormat": "P95",
"refId": "B"
},
{
- "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))",
+ "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"request\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))",
"interval": "",
"legendFormat": "P99",
"refId": "C"
@@ -1206,7 +1206,7 @@
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))",
+ "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))",
"format": "time_series",
"instant": false,
"interval": "",
@@ -1215,13 +1215,13 @@
"refId": "A"
},
{
- "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))",
+ "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))",
"interval": "",
"legendFormat": "P95",
"refId": "B"
},
{
- "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))",
+ "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"apisix\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))",
"interval": "",
"legendFormat": "P99",
"refId": "C"
@@ -1330,20 +1330,20 @@
"steppedLine": false,
"targets": [
{
- "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))",
+ "expr": "histogram_quantile(0.90, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))",
"format": "time_series",
"interval": "",
"legendFormat": "P90",
"refId": "A"
},
{
- "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))",
+ "expr": "histogram_quantile(0.95, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))",
"interval": "",
"legendFormat": "P95",
"refId": "B"
},
{
- "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\"}[1m])) by (le))",
+ "expr": "histogram_quantile(0.99, sum(rate(apisix_http_latency_bucket{type=~\"upstream\",service=~\"$service\",consumer=~\"$consumer\",node=~\"$node\",route=~\"$route\"}[$__rate_interval])) by (le))",
"interval": "",
"legendFormat": "P99",
"refId": "C"
diff --git a/docs/en/latest/FAQ.md b/docs/en/latest/FAQ.md
index 02691f17bb3b..79e3f3d48a51 100644
--- a/docs/en/latest/FAQ.md
+++ b/docs/en/latest/FAQ.md
@@ -105,7 +105,7 @@ Mainland China users can use `luarocks.cn` as the LuaRocks server. You can use t
make deps ENV_LUAROCKS_SERVER=https://luarocks.cn
```
-If this does not solve your problem, you can try getting a detailed log by using the `--verbose` flag to diagnose the problem.
+If this does not solve your problem, you can try getting a detailed log by using the `--verbose` or `-v` flag to diagnose the problem.
## How do I build the APISIX-Base environment?
@@ -732,6 +732,7 @@ curl http://127.0.0.1:9180/apisix/admin/routes/1 \
}
}
}'
+```
## How does APISIX configure ETCD with authentication?
diff --git a/docs/en/latest/admin-api.md b/docs/en/latest/admin-api.md
index 6e714cc9d8e2..6f96e18abd88 100644
--- a/docs/en/latest/admin-api.md
+++ b/docs/en/latest/admin-api.md
@@ -103,6 +103,46 @@ deployment:
This will find the environment variable `ADMIN_KEY` first, and if it does not exist, it will use `edd1c9f034335f136f87ad84b625c8f1` as the default value.
+You can also specify environment variables in yaml keys. This is specifically useful in the `standalone` [mode](./deployment-modes.md#standalone) where you can specify the upstream nodes as follows:
+
+```yaml title="./conf/apisix.yaml"
+routes:
+ -
+ uri: "/test"
+ upstream:
+ nodes:
+ "${{HOST_IP}}:${{PORT}}": 1
+ type: roundrobin
+#END
+```
+
+### Force Delete
+
+By default, the Admin API checks for references between resources and will refuse to delete resources in use.
+
+You can make a force deletion by adding the request argument `force=true` to the delete request, for example:
+
+```bash
+$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '{
+ "nodes": {
+ "127.0.0.1:8080": 1
+ },
+ "type": "roundrobin"
+}'
+$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '{
+ "uri": "/*",
+ "upstream_id": 1
+}'
+{"value":{"priority":0,"upstream_id":1,"uri":"/*","create_time":1689038794,"id":"1","status":1,"update_time":1689038916},"key":"/apisix/routes/1"}
+
+$ curl http://127.0.0.1:9180/apisix/admin/upstreams/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE
+{"error_msg":"can not delete this upstream, route [1] is still using it now"}
+$ curl "http://127.0.0.1:9180/apisix/admin/upstreams/1?force=anyvalue" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE
+{"error_msg":"can not delete this upstream, route [1] is still using it now"}
+$ curl "http://127.0.0.1:9180/apisix/admin/upstreams/1?force=true" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X DELETE
+{"deleted":"1","key":"/apisix/upstreams/1"}
+```
+
## V3 new feature
The Admin API has made some breaking changes in V3 version, as well as supporting additional features.
@@ -237,6 +277,10 @@ curl 'http://127.0.0.1:9180/apisix/admin/routes?name=test&uri=foo&label=' \
Route resource request address: /apisix/admin/routes/{id}?ttl=0
+### Quick Note on ID Syntax
+
+ID's as a text string must be of a length between 1 and 64 characters and they should only contain uppercase, lowercase, numbers and no special characters apart from dashes ( - ), periods ( . ) and underscores ( _ ). For integer values they simply must have a minimum character count of 1.
+
### Request Methods
| Method | Request URI | Request Body | Description |
@@ -270,7 +314,7 @@ Route resource request address: /apisix/admin/routes/{id}?ttl=0
| methods | False | Match Rules | Matches with the specified methods. Matches all methods if empty or unspecified. | ["GET", "POST"] |
| priority | False | Match Rules | If different Routes matches to the same `uri`, then the Route is matched based on its `priority`. A higher value corresponds to higher priority. It is set to `0` by default. | priority = 10 |
| vars | False | Match Rules | Matches based on the specified variables consistent with variables in Nginx. Takes the form `[[var, operator, val], [var, operator, val], ...]]`. Note that this is case sensitive when matching a cookie name. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more details. | [["arg_name", "==", "json"], ["arg_age", ">", 18]] |
-| filter_func | False | Match Rules | Matches based on a user-defined filtering function. Used in scenarios requiring complex matching. These functions can accept an input parameter `vars` which can be used to access the Nginx variables. | function(vars) return vars["arg_name"] == "json" end |
+| filter_func | False | Match Rules | Matches using a user-defined function in Lua. Used in scenarios where `vars` is not sufficient. Functions accept an argument `vars` which provides access to built-in variables (including Nginx variables). | function(vars) return tonumber(vars.arg_userid) % 4 > 2; end |
| plugins | False | Plugin | Plugins that are executed during the request/response cycle. See [Plugin](terminology/plugin.md) for more. | |
| script | False | Script | Used for writing arbitrary Lua code or directly calling existing plugins to be executed. See [Script](terminology/script.md) for more. | |
| upstream | False | Upstream | Configuration of the [Upstream](./terminology/upstream.md). | |
@@ -831,6 +875,8 @@ An Upstream configuration can be directly bound to a Route or a Service, but the
Upstream resource request address: /apisix/admin/upstreams/{id}
+For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax)
+
### Request Methods
| Method | Request URI | Request Body | Description |
@@ -854,11 +900,11 @@ In addition to the equalization algorithm selections, Upstream also supports pas
| service_name | required, can't be used with `nodes` | Service name used for [service discovery](discovery.md). | `a-bootiful-client` |
| discovery_type | required, if `service_name` is used | The type of service [discovery](discovery.md). | `eureka` |
| hash_on | optional | Only valid if the `type` is `chash`. Supports Nginx variables (`vars`), custom headers (`header`), `cookie` and `consumer`. Defaults to `vars`. | |
-| key | optional | Only valid if the `type` is `chash`. Finds the corresponding node `id` according to `hash_on` and `key` values. When `hash_on` is set to `vars`, `key` is a required parameter and it supports [Nginx variables](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is a required parameter, and `header name` can be customized. When `hash_on` is set to `cookie`, `key` is also a required parameter, and `cookie name` can be customized. When `hash_on` is set to `consumer`, `key` need not be set and the `key` used by the hash algorithm would be the authenticated `consumer_name`. If the specified `hash_on` and `key` fail to fetch the values, it will default to `remote_addr`. | `uri`, `server_name`, `server_addr`, `request_uri`, `remote_port`, `remote_addr`, `query_string`, `host`, `hostname`, `arg_***`, `arg_***` |
+| key | optional | Only valid if the `type` is `chash`. Finds the corresponding node `id` according to `hash_on` and `key` values. When `hash_on` is set to `vars`, `key` is a required parameter and it supports [Nginx variables](http://nginx.org/en/docs/varindex.html). When `hash_on` is set as `header`, `key` is a required parameter, and `header name` can be customized. When `hash_on` is set to `cookie`, `key` is also a required parameter, and `cookie name` can be customized. When `hash_on` is set to `consumer`, `key` need not be set and the `key` used by the hash algorithm would be the authenticated `consumer_name`. | `uri`, `server_name`, `server_addr`, `request_uri`, `remote_port`, `remote_addr`, `query_string`, `host`, `hostname`, `arg_***`, `arg_***` |
| checks | optional | Configures the parameters for the [health check](./tutorials/health-check.md). | |
| retries | optional | Sets the number of retries while passing the request to Upstream using the underlying Nginx mechanism. Set according to the number of available backend nodes by default. Setting this to `0` disables retry. | |
| retry_timeout | optional | Timeout to continue with retries. Setting this to `0` disables the retry timeout. | |
-| timeout | optional | Sets the timeout (in seconds) for connecting to, and sending and receiving messages to and from the Upstream. | |
+| timeout | optional | Sets the timeout (in seconds) for connecting to, and sending and receiving messages to and from the Upstream. | `{"connect": 0.5,"send": 0.5,"read": 0.5}` |
| name | optional | Identifier for the Upstream. | |
| desc | optional | Description of usage scenarios. | |
| pass_host | optional | Configures the `host` when the request is forwarded to the upstream. Can be one of `pass`, `node` or `rewrite`. Defaults to `pass` if not specified. `pass`- transparently passes the client's host to the Upstream. `node`- uses the host configured in the node of the Upstream. `rewrite`- Uses the value configured in `upstream_host`. | |
@@ -889,7 +935,6 @@ The following should be considered when setting the `hash_on` value:
- When set to `cookie`, a `key` is required. This key is equal to "cookie\_`key`". The cookie name is case-sensitive.
- When set to `consumer`, the `key` is optional and the key is set to the `consumer_name` captured from the authentication Plugin.
- When set to `vars_combinations`, the `key` is required. The value of the key can be a combination of any of the [Nginx variables](http://nginx.org/en/docs/varindex.html) like `$request_uri$remote_addr`.
-- When no value is set for either `hash_on` or `key`, the key defaults to `remote_addr`.
The features described below requires APISIX to be run on [APISIX-Base](./FAQ.md#how-do-i-build-the-apisix-base-environment):
@@ -1140,6 +1185,8 @@ Currently, the response is returned from etcd.
SSL resource request address: /apisix/admin/ssls/{id}
+For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax)
+
### Request Methods
| Method | Request URI | Request Body | Description |
@@ -1154,18 +1201,20 @@ SSL resource request address: /apisix/admin/ssls/{id}
| Parameter | Required | Type | Description | Example |
| ------------ | -------- | ------------------------ | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ |
-| cert | True | Certificate | HTTPS certificate. | |
-| key | True | Private key | HTTPS private key. | |
+| cert | True | Certificate | HTTPS certificate. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | |
+| key | True | Private key | HTTPS private key. This field supports saving the value in Secret Manager using the [APISIX Secret](./terminology/secret.md) resource. | |
| certs | False | An array of certificates | Used for configuring multiple certificates for the same domain excluding the one provided in the `cert` field. | |
| keys | False | An array of private keys | Private keys to pair with the `certs`. | |
| client.ca | False | Certificate | Sets the CA certificate that verifies the client. Requires OpenResty 1.19+. | |
| client.depth | False | Certificate | Sets the verification depth in client certificate chains. Defaults to 1. Requires OpenResty 1.19+. | |
-| snis | True | Match Rules | A non-empty array of HTTPS SNI | |
+| client.skip_mtls_uri_regex | False | An array of regular expressions, in PCRE format | Used to match URI, if matched, this request bypasses the client certificate checking, i.e. skip the MTLS. | ["/hello[0-9]+", "/foobar"] |
+| snis | True, only if `type` is `server` | Match Rules | A non-empty array of HTTPS SNI | |
| labels | False | Match Rules | Attributes of the resource specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} |
| create_time | False | Auxiliary | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
| update_time | False | Auxiliary | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
| type | False | Auxiliary | Identifies the type of certificate, default `server`. | `client` Indicates that the certificate is a client certificate, which is used when APISIX accesses the upstream; `server` Indicates that the certificate is a server-side certificate, which is used by APISIX when verifying client requests. |
| status | False | Auxiliary | Enables the current SSL. Set to `1` (enabled) by default. | `1` to enable, `0` to disable |
+| ssl_protocols | False | An array of ssl protocols | It is used to control the SSL/TLS protocol version used between servers and clients. See [SSL Protocol](./ssl-protocol.md) for more examples. | `["TLSv1.2", "TLSv2.3"]` |
Example Configuration:
@@ -1312,11 +1361,31 @@ Plugin resource request address: /apisix/admin/plugins/{plugin_name}
| ------ | ----------------------------------- | ------------ | ---------------------------------------------- |
| GET | /apisix/admin/plugins/list | NULL | Fetches a list of all Plugins. |
| GET | /apisix/admin/plugins/{plugin_name} | NULL | Fetches the specified Plugin by `plugin_name`. |
+| GET | /apisix/admin/plugins?all=true | NULL | Get all properties of all plugins. |
+| GET | /apisix/admin/plugins?all=true&subsystem=stream| NULL | Gets properties of all Stream plugins.|
+| GET | /apisix/admin/plugins?all=true&subsystem=http | NULL | Gets properties of all HTTP plugins. |
+| PUT | /apisix/admin/plugins/reload | NULL | Reloads the plugin according to the changes made in code |
+| GET | apisix/admin/plugins/{plugin_name}?subsystem=stream | NULL | Gets properties of a specified plugin if it is supported in Stream/L4 subsystem. |
+| GET | apisix/admin/plugins/{plugin_name}?subsystem=http | NULL | Gets properties of a specified plugin if it is supported in HTTP/L7 subsystem. |
+
+:::caution
+
+The interface of getting properties of all plugins via `/apisix/admin/plugins?all=true` will be deprecated soon.
+
+:::
### Request Body Parameters
The Plugin ({plugin_name}) of the data structure.
+### Request Arguments
+
+| Name | Description | Default |
+| --------- | ----------------------------- | ------- |
+| subsystem | The subsystem of the Plugins. | http |
+
+The plugin can be filtered on subsystem so that the ({plugin_name}) is searched in the subsystem passed through query params.
+
### Example API usage:
```shell
@@ -1329,7 +1398,7 @@ curl "http://127.0.0.1:9180/apisix/admin/plugins/list" \
```
```shell
-curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1'
+curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth?subsystem=http" -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1'
```
```json
@@ -1338,26 +1407,10 @@ curl "http://127.0.0.1:9180/apisix/admin/plugins/key-auth" -H 'X-API-KEY: ed
:::tip
-You can use the `/apisix/admin/plugins?all=true` API to get all properties of all plugins.
-
-Each Plugin has the attributes `name`, `priority`, `type`, `schema`, `consumer_schema` and `version`.
-
-Defaults to only HTTP Plugins. If you need to get attributes from stream Plugins, use `/apisix/admin/plugins?all=true&subsystem=stream`.
+You can use the `/apisix/admin/plugins?all=true` API to get all properties of all plugins. This API will be deprecated soon.
:::
-### Request Methods
-
-| Method | Request URI | Request Body | Description |
-| ------ | ------------------------------ | ------------ | ---------------------------------------- |
-| GET | /apisix/admin/plugins?all=true | NULL | Fetches all attributes from all Plugins. |
-
-### Request Arguments
-
-| Name | Description | Default |
-| --------- | ----------------------------- | ------- |
-| subsystem | The subsystem of the Plugins. | http |
-
## Stream Route
Route used in the [Stream Proxy](./stream-proxy.md).
@@ -1382,8 +1435,8 @@ Stream Route resource request address: /apisix/admin/stream_routes/{id}
| ----------- | -------- | -------- | ------------------------------------------------------------------- | ----------------------------- |
| upstream | False | Upstream | Configuration of the [Upstream](./terminology/upstream.md). | |
| upstream_id | False | Upstream | Id of the [Upstream](terminology/upstream.md) service. | |
-| remote_addr | False | IP/CIDR | Filters Upstream forwards by matching with client IP. | "127.0.0.1/32" or "127.0.0.1" |
-| server_addr | False | IP/CIDR | Filters Upstream forwards by matching with APISIX Server IP. | "127.0.0.1/32" or "127.0.0.1" |
+| remote_addr | False | IPv4, IPv4 CIDR, IPv6 | Filters Upstream forwards by matching with client IP. | "127.0.0.1" or "127.0.0.1/32" or "::1" |
+| server_addr | False | IPv4, IPv4 CIDR, IPv6 | Filters Upstream forwards by matching with APISIX Server IP. | "127.0.0.1" or "127.0.0.1/32" or "::1" |
| server_port | False | Integer | Filters Upstream forwards by matching with APISIX Server port. | 9090 |
| sni | False | Host | Server Name Indication. | "test.com" |
| protocol.name | False | String | Name of the protocol proxyed by xRPC framework. | "redis" |
@@ -1452,3 +1505,80 @@ HTTP/1.1 200 OK
### Response Parameters
Currently, the response is returned from etcd.
+
+## Proto
+
+Proto is used to store protocol buffers so that APISIX can communicate in gRPC.
+
+See [grpc-transcode plugin](./plugins/grpc-transcode.md#enabling-the-plugin) doc for more examples.
+
+### Proto API
+
+Proto resource request address: /apisix/admin/protos/{id}
+
+### Request Methods
+
+| Method | Request URI | Request Body | Description |
+| ------ | -------------------------------- | ------------ | ----------------------------------------------- |
+| GET | /apisix/admin/protos | NULL | List all Protos. |
+| GET | /apisix/admin/protos/{id} | NULL | Get a Proto by id. |
+| PUT | /apisix/admin/protos/{id} | {...} | Create or update a Proto with the given id. |
+| POST | /apisix/admin/protos | {...} | Create a Proto with a random id. |
+| DELETE | /apisix/admin/protos/{id} | NULL | Delete Proto by id. |
+
+### Request Body Parameters
+
+| Parameter | Required | Type | Description | Example |
+| ----------- | -------- | -------- | ------------------------------------------------------------------- | ----------------------------- |
+| content | True | String | content of `.proto` or `.pb` files | See [here](./plugins/grpc-transcode.md#enabling-the-plugin) |
+| create_time | False | Epoch timestamp (in seconds) of the created time. If missing, this field will be populated automatically. | 1602883670 |
+| update_time | False | Epoch timestamp (in seconds) of the updated time. If missing, this field will be populated automatically. | 1602883670 |
+
+## Schema validation
+
+Check the validity of a configuration against its entity schema. This allows you to test your input before submitting a request to the entity endpoints of the Admin API.
+
+Note that this only performs the schema validation checks, checking that the input configuration is well-formed. Requests to the entity endpoint using the given configuration may still fail due to other reasons, such as invalid foreign key relationships or uniqueness check failures against the contents of the data store.
+
+### Schema validation
+
+Schema validation request address: /apisix/admin/schema/validate/{resource}
+
+### Request Methods
+
+| Method | Request URI | Request Body | Description |
+| ------ | -------------------------------- | ------------ | ----------------------------------------------- |
+| POST | /apisix/admin/schema/validate/{resource} | {..resource conf..} | Validate the resource configuration against corresponding schema. |
+
+### Request Body Parameters
+
+* 200: validate ok.
+* 400: validate failed, with error as response body in JSON format.
+
+Example:
+
+```bash
+curl http://127.0.0.1:9180/apisix/admin/schema/validate/routes \
+ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X POST -i -d '{
+ "uri": 1980,
+ "upstream": {
+ "scheme": "https",
+ "type": "roundrobin",
+ "nodes": {
+ "nghttp2.org": 1
+ }
+ }
+}'
+HTTP/1.1 400 Bad Request
+Date: Mon, 21 Aug 2023 07:37:13 GMT
+Content-Type: application/json
+Transfer-Encoding: chunked
+Connection: keep-alive
+Server: APISIX/3.4.0
+Access-Control-Allow-Origin: *
+Access-Control-Allow-Credentials: true
+Access-Control-Expose-Headers: *
+Access-Control-Max-Age: 3600
+
+{"error_msg":"property \"uri\" validation failed: wrong type: expected string, got number"}
+```
diff --git a/docs/en/latest/building-apisix.md b/docs/en/latest/building-apisix.md
index a0a20b40ee98..31de4b081c27 100644
--- a/docs/en/latest/building-apisix.md
+++ b/docs/en/latest/building-apisix.md
@@ -31,42 +31,41 @@ description: Guide for building and running APISIX locally for development.
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-If you are looking to contribute to APISIX or setup a development environment, this guide is for you.
+If you are looking to setup a development environment or contribute to APISIX, this guide is for you.
-If you are looking to install and run APISIX, check out the [Installation](./installation-guide.md) docs.
+If you are looking to quickly get started with APISIX, check out the other [installation methods](./installation-guide.md).
:::note
-If you want to build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools).
+To build an APISIX docker image from source code, see [build image from source code](https://apisix.apache.org/docs/docker/build/#build-an-image-from-customizedpatched-source-code).
+
+To build and package APISIX for a specific platform, see [apisix-build-tools](https://github.com/api7/apisix-build-tools) instead.
:::
## Building APISIX from source
-To start, you have to install some dependencies. APISIX provides a handy script to get these installed:
+Install dependencies using the script provided by APISIX:
```shell
curl https://raw.githubusercontent.com/apache/apisix/master/utils/install-dependencies.sh -sL | bash -
```
-Then, create a directory and set the environment variable `APISIX_VERSION`:
+Save the APISIX version to an environment variable to be used next:
```shell
-APISIX_VERSION='3.2.0'
-mkdir apisix-${APISIX_VERSION}
+APISIX_VERSION='3.5.0'
```
-You can now clone the APISIX source code from Github by running the command below:
+Clone the APISIX source code of this version into a new directory `apisix-APISIX_VERSION`:
```shell
git clone --depth 1 --branch ${APISIX_VERSION} https://github.com/apache/apisix.git apisix-${APISIX_VERSION}
```
-You can also download the source package from the [Downloads page](https://apisix.apache.org/downloads/). But the source package missing the test case. This may affect subsequent operations.
-
-And you will also find source packages for APISIX Dashboard and APISIX Ingress Controller from [Downloads page](https://apisix.apache.org/downloads/).
+Alternatively, you can also download the source package from the [Downloads](https://apisix.apache.org/downloads/) page. Note that source packages here are not distributed with test cases.
-Now, navigate to the directory, create dependencies, and install APISIX as shown below:
+Next, navigate to the directory, install dependencies, and build APISIX.
```shell
cd apisix-${APISIX_VERSION}
@@ -74,7 +73,7 @@ make deps
make install
```
-This will install the runtime dependent Lua libraries and the `apisix` command.
+This will install the runtime-dependent Lua libraries and the `apisix` CLI tool.
:::note
@@ -275,4 +274,4 @@ To run a specific test case, use the command below:
prove -Itest-nginx/lib -r t/plugin/openid-connect.t
```
-See [testing framework](https://github.com/apache/apisix/blob/master/docs/en/latest/internal/testing-framework.md) for more details.
+See [testing framework](./internal/testing-framework.md) for more details.
diff --git a/docs/en/latest/certificate.md b/docs/en/latest/certificate.md
index 4faf62ceb1ce..146a153fbe25 100644
--- a/docs/en/latest/certificate.md
+++ b/docs/en/latest/certificate.md
@@ -31,124 +31,121 @@ It is most common for an SSL certificate to contain only one domain. We can crea
* `key`: PEM-encoded private key of the SSL key pair.
* `snis`: Hostname(s) to associate with this certificate as SNIs. To set this attribute this certificate must have a valid private key associated with it.
-We will use the Python script below to simplify the example:
-
-```python title="create-ssl.py"
-#!/usr/bin/env python
-# coding: utf-8
-import sys
-# sudo pip install requests
-import requests
-
-if len(sys.argv) <= 3:
- print("bad argument")
- sys.exit(1)
-with open(sys.argv[1]) as f:
- cert = f.read()
-with open(sys.argv[2]) as f:
- key = f.read()
-sni = sys.argv[3]
-api_key = "edd1c9f034335f136f87ad84b625c8f1"
-resp = requests.put("http://127.0.0.1:9180/apisix/admin/ssls/1", json={
- "cert": cert,
- "key": key,
- "snis": [sni],
-}, headers={
- "X-API-KEY": api_key,
-})
-print(resp.status_code)
-print(resp.text)
-```
+The following is an example of configuring an SSL certificate with a single SNI in APISIX.
+
+Create an SSL object with the certificate and key valid for the SNI:
```shell
-# create SSL object
-./create-ssl.py t.crt t.key test.com
+curl http://127.0.0.1:9180/apisix/admin/ssls/1 \
+-H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "cert" : "'"$(cat t/certs/apisix.crt)"'",
+ "key": "'"$(cat t/certs/apisix.key)"'",
+ "snis": ["test.com"]
+}'
+```
+
+Create a Router object:
-# create Router object
+```shell
curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
{
- "uri": "/hello",
+ "uri": "/get",
"hosts": ["test.com"],
"methods": ["GET"],
"upstream": {
"type": "roundrobin",
"nodes": {
- "127.0.0.1:1980": 1
+ "httpbin.org": 1
}
}
}'
+```
+
+Send a request to verify:
-# make a test
+```shell
+curl --resolve 'test.com:9443:127.0.0.1' https://test.com:9443/hello -k -vvv
-curl --resolve 'test.com:9443:127.0.0.1' https://test.com:9443/hello -vvv
* Added test.com:9443:127.0.0.1 to DNS cache
* About to connect() to test.com port 9443 (#0)
* Trying 127.0.0.1...
* Connected to test.com (127.0.0.1) port 9443 (#0)
-* Initializing NSS with certpath: sql:/etc/pki/nssdb
-* skipping SSL peer certificate verification
-* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
+* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384
+* ALPN, server accepted to use h2
* Server certificate:
-* subject: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN
-* start date: Jun 24 22:18:05 2019 GMT
-* expire date: May 31 22:18:05 2119 GMT
-* common name: test.com
-* issuer: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN
-> GET /hello HTTP/1.1
-> User-Agent: curl/7.29.0
+* subject: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com
+* start date: Jun 24 22:18:05 2019 GMT
+* expire date: May 31 22:18:05 2119 GMT
+* issuer: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com
+* SSL certificate verify result: self-signed certificate (18), continuing anyway.
+> GET /get HTTP/2
> Host: test.com:9443
-> Accept: */*
+> user-agent: curl/7.81.0
+> accept: */*
```
### wildcard SNI
-Sometimes, one SSL certificate may contain a wildcard domain like `*.test.com`,
-that means it can accept more than one domain, eg: `www.test.com` or `mail.test.com`.
+An SSL certificate could also be valid for a wildcard domain like `*.test.com`, which means it is valid for any domain of that pattern, including `www.test.com` and `mail.test.com`.
+
+The following is an example of configuring an SSL certificate with a wildcard SNI in APISIX.
-Here is an example, note that the value we pass as `sni` is `*.test.com`.
+Create an SSL object with the certificate and key valid for the SNI:
```shell
-./create-ssl.py t.crt t.key '*.test.com'
+curl http://127.0.0.1:9180/apisix/admin/ssls/1 \
+ -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+ {
+ "cert" : "'"$(cat t/certs/apisix.crt)"'",
+ "key": "'"$(cat t/certs/apisix.key)"'",
+ "snis": ["*.test.com"]
+ }'
+```
+
+Create a Router object:
+```shell
curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -i -d '
{
- "uri": "/hello",
+ "uri": "/get",
"hosts": ["*.test.com"],
"methods": ["GET"],
"upstream": {
"type": "roundrobin",
"nodes": {
- "127.0.0.1:1980": 1
+ "httpbin.org": 1
}
}
}'
+```
-# make a test
+Send a request to verify:
-curl --resolve 'www.test.com:9443:127.0.0.1' https://www.test.com:9443/hello -vvv
-* Added test.com:9443:127.0.0.1 to DNS cache
-* About to connect() to test.com port 9443 (#0)
-* Trying 127.0.0.1...
-* Connected to test.com (127.0.0.1) port 9443 (#0)
-* Initializing NSS with certpath: sql:/etc/pki/nssdb
-* skipping SSL peer certificate verification
-* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
+```shell
+curl --resolve 'www.test.com:9443:127.0.0.1' https://www.test.com:9443/get -k -vvv
+
+* Added www.test.com:9443:127.0.0.1 to DNS cache
+* Hostname www.test.com was found in DNS cache
+* Trying 127.0.0.1:9443...
+* Connected to www.test.com (127.0.0.1) port 9443 (#0)
+* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384
+* ALPN, server accepted to use h2
* Server certificate:
-* subject: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN
-* start date: Jun 24 22:18:05 2019 GMT
-* expire date: May 31 22:18:05 2119 GMT
-* common name: test.com
-* issuer: CN=test.com,O=iresty,L=ZhuHai,ST=GuangDong,C=CN
-> GET /hello HTTP/1.1
-> User-Agent: curl/7.29.0
-> Host: test.com:9443
-> Accept: */*
+* subject: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com
+* start date: Jun 24 22:18:05 2019 GMT
+* expire date: May 31 22:18:05 2119 GMT
+* issuer: C=CN; ST=GuangDong; L=ZhuHai; O=iresty; CN=test.com
+* SSL certificate verify result: self signed certificate (18), continuing anyway.
+> GET /get HTTP/2
+> Host: www.test.com:9443
+> user-agent: curl/7.74.0
+> accept: */*
```
### multiple domain
-If your SSL certificate may contain more than one domain, like `www.test.com`
-and `mail.test.com`, then you can add them into the `snis` array. For example:
+If your SSL certificate may contain more than one domain, like `www.test.com` and `mail.test.com`, then you can add them into the `snis` array. For example:
```json
{
diff --git a/docs/en/latest/config.json b/docs/en/latest/config.json
index d9804a15770a..bec485d7a291 100644
--- a/docs/en/latest/config.json
+++ b/docs/en/latest/config.json
@@ -1,5 +1,5 @@
{
- "version": "3.2.0",
+ "version": "3.5.0",
"sidebar": [
{
"type": "category",
@@ -26,12 +26,20 @@
"items": [
"tutorials/expose-api",
"tutorials/protect-api",
- "tutorials/observe-your-api",
+ {
+ "type": "category",
+ "label": "Observability",
+ "items": [
+ "tutorials/observe-your-api",
+ "tutorials/health-check",
+ "tutorials/monitor-api-health-check"
+ ]
+ },
"tutorials/manage-api-consumers",
"tutorials/cache-api-responses",
"tutorials/add-multiple-api-versions",
- "tutorials/health-check",
- "tutorials/client-to-apisix-mtls"
+ "tutorials/client-to-apisix-mtls",
+ "tutorials/websocket-authentication"
]
},
{
@@ -118,7 +126,8 @@
"plugins/consumer-restriction",
"plugins/csrf",
"plugins/public-api",
- "plugins/gm"
+ "plugins/gm",
+ "plugins/chaitin-waf"
]
},
{
@@ -180,7 +189,8 @@
"plugins/file-logger",
"plugins/loggly",
"plugins/elasticsearch-logger",
- "plugins/tencent-cloud-cls"
+ "plugins/tencent-cloud-cls",
+ "plugins/loki-logger"
]
}
]
@@ -357,6 +367,10 @@
{
"type": "doc",
"id": "profile"
+ },
+ {
+ "type": "doc",
+ "id": "ssl-protocol"
}
]
},
diff --git a/docs/en/latest/control-api.md b/docs/en/latest/control-api.md
index c6944f2b5421..e21ca86d502d 100644
--- a/docs/en/latest/control-api.md
+++ b/docs/en/latest/control-api.md
@@ -98,71 +98,50 @@ Returns a [health check](./tutorials/health-check.md) of the APISIX instance.
```json
[
- {
- "healthy_nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- }
- ],
- "name": "upstream#/upstreams/1",
- "nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- },
- {
- "host": "127.0.0.2",
- "port": 1988,
- "priority": 0,
- "weight": 1
- }
- ],
- "src_id": "1",
- "src_type": "upstreams"
- },
- {
- "healthy_nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- }
- ],
- "name": "upstream#/routes/1",
- "nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- },
- {
- "host": "127.0.0.1",
- "port": 1988,
- "priority": 0,
- "weight": 1
- }
- ],
- "src_id": "1",
- "src_type": "routes"
- }
+ {
+ "nodes": [
+ {
+ "ip": "52.86.68.46",
+ "counter": {
+ "http_failure": 0,
+ "success": 0,
+ "timeout_failure": 0,
+ "tcp_failure": 0
+ },
+ "port": 80,
+ "status": "healthy"
+ },
+ {
+ "ip": "100.24.156.8",
+ "counter": {
+ "http_failure": 5,
+ "success": 0,
+ "timeout_failure": 0,
+ "tcp_failure": 0
+ },
+ "port": 80,
+ "status": "unhealthy"
+ }
+ ],
+ "name": "/apisix/routes/1",
+ "type": "http"
+ }
]
+
```
Each of the returned objects contain the following fields:
-* src_type: where the health checker is reporting from. Value is one of `["routes", "services", "upstreams"]`.
-* src_id: id of the object creating the health checker. For example, if an Upstream
-object with id `1` creates a health checker, the `src_type` is `upstreams` and the `src_id` is `1`.
-* name: name of the health checker.
+* name: resource id, where the health checker is reporting from.
+* type: health check type: `["http", "https", "tcp"]`.
* nodes: target nodes of the health checker.
-* healthy_nodes: healthy nodes discovered by the health checker.
+* nodes[i].ip: ip address.
+* nodes[i].port: port number.
+* nodes[i].status: health check result: `["healthy", "unhealthy", "mostly_healthy", "mostly_unhealthy"]`.
+* nodes[i].counter.success: success health check count.
+* nodes[i].counter.http_failure: http failures count.
+* nodes[i].counter.tcp_failure: tcp connect/read/write failures count.
+* nodes[i].counter.timeout_failure: timeout count.
You can also use `/v1/healthcheck/$src_type/$src_id` to get the health status of specific nodes.
@@ -170,40 +149,50 @@ For example, `GET /v1/healthcheck/upstreams/1` returns:
```json
{
- "healthy_nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- }
- ],
- "name": "upstream#/upstreams/1",
- "nodes": [
- {
- "host": "127.0.0.1",
- "port": 1980,
- "priority": 0,
- "weight": 1
- },
- {
- "host": "127.0.0.2",
- "port": 1988,
- "priority": 0,
- "weight": 1
- }
- ],
- "src_id": "1",
- "src_type": "upstreams"
+ "nodes": [
+ {
+ "ip": "52.86.68.46",
+ "counter": {
+ "http_failure": 0,
+ "success": 2,
+ "timeout_failure": 0,
+ "tcp_failure": 0
+ },
+ "port": 80,
+ "status": "healthy"
+ },
+ {
+ "ip": "100.24.156.8",
+ "counter": {
+ "http_failure": 5,
+ "success": 0,
+ "timeout_failure": 0,
+ "tcp_failure": 0
+ },
+ "port": 80,
+ "status": "unhealthy"
+ }
+ ],
+ "type": "http"
+ "name": "/apisix/routes/1"
}
+
```
:::note
-As APISIX uses multiple-process architecture, if the process never handles the request of a specific upstream, then the upstream's health check information will not appear on the process. This may result in the health check API can't get all data during testing.
+Only when one upstream is satisfied by the conditions below,
+its status is shown in the result list:
+
+* The upstream is configured with a health checker
+* The upstream has served requests in any worker process
:::
+If you use browser to access the control API URL, then you will get the HTML output:
+
+![Health Check Status Page](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/health_check_status_page.png)
+
### POST /v1/gc
Introduced in [v2.8](https://github.com/apache/apisix/releases/tag/2.8).
@@ -221,7 +210,6 @@ Returns all configured [Routes](./terminology/route.md):
```json
[
{
- "update_count": 0,
"value": {
"priority": 0,
"uris": [
@@ -260,7 +248,6 @@ Returns the Route with the specified `route_id`:
```json
{
- "update_count": 0,
"value": {
"priority": 0,
"uris": [
diff --git a/docs/en/latest/debug-mode.md b/docs/en/latest/debug-mode.md
index e1438d0c3438..b933dfd88868 100644
--- a/docs/en/latest/debug-mode.md
+++ b/docs/en/latest/debug-mode.md
@@ -38,13 +38,15 @@ basic:
enable: true
```
+APISIX loads the configurations of `debug.yaml` on startup and then checks if the file is modified on an interval of 1 second. If the file is changed, APISIX automatically applies the configuration changes.
+
:::note
For APISIX releases prior to v2.10, basic debug mode is enabled by setting `apisix.enable_debug = true` in your configuration file (`conf/config.yaml`).
:::
-If you have configured two Plgins `limit-conn` and `limit-count` on the Route `/hello`, you will receive a response with the header `Apisix-Plugins: limit-conn, limit-count` when you enable the basic debug mode.
+If you have configured two Plugins `limit-conn` and `limit-count` on the Route `/hello`, you will receive a response with the header `Apisix-Plugins: limit-conn, limit-count` when you enable the basic debug mode.
```shell
curl http://127.0.0.1:1984/hello -i
diff --git a/docs/en/latest/deployment-modes.md b/docs/en/latest/deployment-modes.md
index 7e6d8da7d3f9..bc195121c946 100644
--- a/docs/en/latest/deployment-modes.md
+++ b/docs/en/latest/deployment-modes.md
@@ -39,10 +39,6 @@ Each of these deployment modes are explained in detail below.
In the traditional deployment mode, one instance of APISIX will be both the `data_plane` and the `control_plane`.
-![traditional deployment mode](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-traditional.png)
-
-There will be a conf server that listens on the UNIX socket and acts as a proxy between APISIX and etcd. Both the data and the control planes connect to this conf server via HTTP.
-
An example configuration of the traditional deployment mode is shown below:
```yaml title="conf/config.yaml"
@@ -73,16 +69,9 @@ The instance of APISIX deployed as the traditional role will:
In the decoupled deployment mode the `data_plane` and `control_plane` instances of APISIX are deployed separately, i.e., one instance of APISIX is configured to be a *data plane* and the other to be a *control plane*.
-![decoupled](https://raw.githubusercontent.com/apache/apisix/master/docs/assets/images/deployment-cp_and_dp.png)
-
The instance of APISIX deployed as the data plane will:
-1. Fetch the configuration from the *control plane*. The default port is `9280`.
-2. Performs a health check on all configured control plane addresses before starting the service.
- 1. If the control plane addresses are unavailable, the startup fails and an exception is thrown.
- 2. If at least one control plane address is available, it prints the unhealthy control planes logs, and starts the APISIX service.
- 3. If all control planes are normal, APISIX service is started normally.
-3. Once the service is started, it will handle the user requests.
+Once the service is started, it will handle the user requests.
The example below shows the configuration of an APISIX instance as *data plane* in the decoupled mode:
@@ -90,23 +79,13 @@ The example below shows the configuration of an APISIX instance as *data plane*
deployment:
role: data_plane
role_data_plane:
- config_provider: control_plane
- control_plane:
- host:
- - https://${Control_Plane_IP}:9280
- prefix: /apisix
- timeout: 30
- certs:
- cert: /path/to/ca-cert
- cert_key: /path/to/ca-cert
- trusted_ca_cert: /path/to/ca-cert
+ config_provider: etcd
#END
```
The instance of APISIX deployed as the control plane will:
1. Listen on port `9180` and handle Admin API requests.
-2. Provide the conf server which will listen on port `9280`. Both the control plane and the data plane will connect to this via HTTPS enforced by mTLS.
The example below shows the configuration of an APISIX instance as *control plane* in the decoupled mode:
@@ -115,51 +94,17 @@ deployment:
role: control_plane
role_control_plane:
config_provider: etcd
- conf_server:
- listen: 0.0.0.0:9280
- cert: /path/to/ca-cert
- cert_key: /path/to/ca-cert
- client_ca_cert: /path/to/ca-cert
- etcd:
- host:
- - https://${etcd_IP}:${etcd_Port}
- prefix: /apisix
- timeout: 30
- certs:
- cert: /path/to/ca-cert
- cert_key: /path/to/ca-cert
- trusted_ca_cert: /path/to/ca-cert
-#END
-```
-
-:::tip
-
-As OpenResty <= 1.21.4 does not support sending mTLS requests, to accept connections from APISIX running on these OpenResty versions, you need to disable the client certificate verification in the control plane instance as shown below:
-
-```yaml title="conf/config.yaml"
-deployment:
- role: control_plane
- role_control_plane:
- config_provider: etcd
- conf_server:
- listen: 0.0.0.0:9280
- cert: /path/to/ca-cert
- cert_key: /path/to/ca-cert
etcd:
host:
- https://${etcd_IP}:${etcd_Port}
prefix: /apisix
timeout: 30
- certs:
- trusted_ca_cert: /path/to/ca-cert
#END
```
-:::
-
## Standalone
-Turning on the APISIX node in Stand-alone mode will no longer use the default etcd as the configuration center.
+Turning on the APISIX node in Standalone mode will no longer use the default etcd as the configuration center.
This method is more suitable for two types of users:
@@ -170,7 +115,7 @@ The routing rules in the `conf/apisix.yaml` file are loaded into memory immediat
*Note*: Reloading and updating routing rules are all hot memory updates. There is no replacement of working processes, since it's a hot update.
-Since the current Admin API is based on the etcd configuration center solution, enable Admin API is not allowed when the Stand-alone mode is enabled.
+Since the current Admin API is based on the etcd configuration center solution, enable Admin API is not allowed when the Standalone mode is enabled.
Standalone mode can only be enabled when we set the role of APISIX as data plane. We set `deployment.role` to `data_plane` and `deployment.role_data_plane.config_provider` to `yaml`.
@@ -434,3 +379,25 @@ upstreams:
id: 1
#END
```
+
+### How to configure protos
+
+```yaml
+protos:
+ - id: helloworld
+ desc: hello world
+ content: >
+ syntax = "proto3";
+ package helloworld;
+
+ service Greeter {
+ rpc SayHello (HelloRequest) returns (HelloReply) {}
+ }
+ message HelloRequest {
+ string name = 1;
+ }
+ message HelloReply {
+ string message = 1;
+ }
+#END
+```
diff --git a/docs/en/latest/discovery/consul.md b/docs/en/latest/discovery/consul.md
index e31a83690b2b..b4eab61e982d 100644
--- a/docs/en/latest/discovery/consul.md
+++ b/docs/en/latest/discovery/consul.md
@@ -143,7 +143,7 @@ curl -X PUT 'http://127.0.0.1:8500/v1/agent/service/register' \
}'
```
-In some case, same service name exist in different consul servers.
+In some cases, same service name might exist in different consul servers.
To avoid confusion, use the full consul key url path as service name in practice.
### Upstream setting
diff --git a/docs/en/latest/discovery/control-plane-service-discovery.md b/docs/en/latest/discovery/control-plane-service-discovery.md
index acd4923a92e0..71fc6c7c46ce 100644
--- a/docs/en/latest/discovery/control-plane-service-discovery.md
+++ b/docs/en/latest/discovery/control-plane-service-discovery.md
@@ -6,7 +6,7 @@ keywords:
- ZooKeeper
- Nacos
- APISIX-Seed
-description: This documentation describes implement service discovery through Nacos and ZooKeeper on the API Gateway APISIX Control Plane.
+description: This documentation describes implementing service discovery through Nacos and ZooKeeper on the API Gateway APISIX Control Plane.
---
+
+## Description
+
+After enabling the chaitin-waf plugin, the traffic will be forwarded to the Chaitin WAF service for the detection and
+prevention of various web application attacks, ensuring the security of the application and user data.
+
+## Response Headers
+
+Depending on the plugin configuration, it is optional to add additional response headers.
+
+The response headers are listed below:
+
+- **X-APISIX-CHAITIN-WAF**: Whether APISIX forwards the request to the WAF server.
+ - yes: forwarded
+ - no: no forwarded
+ - unhealthy: matches the match variables, but no WAF server is available.
+ - err: an error occurred during the execution of the plugin. Also with **X-APISIX-CHAITIN-WAF-ERROR** request header
+ - waf-err: Error while interacting with the WAF server. Also with **X-APISIX-CHAITIN-WAF-ERROR** request header
+ - timeout: Timeout for request to the WAF server
+- **X-APISIX-CHAITIN-WAF-ERROR**: Debug header. WAF error message
+- **X-APISIX-CHAITIN-WAF-TIME**: The time in milliseconds that APISIX spent interacting with WAF.
+- **X-APISIX-CHAITIN-WAF-STATUS**: The status code returned to APISIX by the WAF server.
+- **X-APISIX-CHAITIN-WAF-ACTION**: Processing result returned to APISIX by the WAF server.
+ - pass: request valid and passed
+ - reject: request rejected by WAF service
+- **X-APISIX-CHAITIN-WAF-SERVER**: Debug header. Picked WAF server.
+
+## Plugin Metadata
+
+| Name | Type | Required | Default value | Description |
+|--------------------------|---------------|----------|---------------|------------------------------------------------------------------------------------------------------------------------------|
+| nodes | array(object) | true | | A list of addresses for the Chaitin SafeLine WAF service. |
+| nodes[0].host | string | true | | The address of Chaitin SafeLine WAF service. Supports IPV4, IPV6, Unix Socket, etc. |
+| nodes[0].port | string | false | 80 | The port of Chaitin SafeLine WAF service. |
+| config | object | false | | Configuration of the Chaitin SafeLine WAF service. The parameters configured here will be used when route is not configured. |
+| config.connect_timeout | integer | false | 1000 | connect timeout, in milliseconds |
+| config.send_timeout | integer | false | 1000 | send timeout, in milliseconds |
+| config.read_timeout | integer | false | 1000 | read timeout, in milliseconds |
+| config.req_body_size | integer | false | 1024 | request body size, in KB |
+| config.keepalive_size | integer | false | 256 | maximum concurrent idle connections to the SafeLine WAF detection service |
+| config.keepalive_timeout | integer | false | 60000 | idle connection timeout, in milliseconds |
+
+An example configuration is as follows.
+
+```bash
+curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/chaitin-waf -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "nodes":[
+ {
+ "host": "unix:/path/to/safeline/resources/detector/snserver.sock",
+ "port": 8000
+ }
+ ]
+}'
+```
+
+## Attributes
+
+| Name | Type | Required | Default value | Description |
+|--------------------------|---------------|----------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| match | array[object] | false | | The list of matching rules, default is empty |
+| match.vars | array[array] | false | | List of variables to match for filtering requests for conditional traffic split. It is in the format `{variable operator value}`. For example, `{"arg_name", "==", "json"}`. The variables here are consistent with NGINX internal variables. For details on supported operators, [lua-resty-expr](https://github.com/api7/lua-resty-expr#operator-list). |
+| append_waf_resp_header | bool | false | true | Whether to add response headers |
+| append_waf_debug_header | bool | false | false | Whether or not to add debugging headers, effective when `add_header` is `true`. |
+| config | object | false | | Configuration of the Chaitin SafeLine WAF service. When the route is not configured, the parameters configured in the metadata are used. |
+| config.connect_timeout | integer | false | | connect timeout, in milliseconds |
+| config.send_timeout | integer | false | | send timeout, in milliseconds |
+| config.read_timeout | integer | false | | read timeout, in milliseconds |
+| config.req_body_size | integer | false | | request body size, in KB |
+| config.keepalive_size | integer | false | | maximum concurrent idle connections to the SafeLine WAF detection service |
+| config.keepalive_timeout | integer | false | | idle connection timeout, in milliseconds |
+
+A sample configuration is shown below, using `httpbun.org` as the example backend, which can be replaced as needed:
+
+```bash
+curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/*",
+ "plugins": {
+ "chaitin-waf": {
+ "match": [
+ {
+ "vars": [
+ ["http_waf","==","true"]
+ ]
+ }
+ ]
+ }
+ },
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "httpbun.org:80": 1
+ }
+ }
+}'
+```
+
+## Test Plugin
+
+Test the above example configuration.
+
+If the match condition is not met, the request can be reached normally:
+
+```bash
+curl -H "Host: httpbun.org" http://127.0.0.1:9080/get -i
+
+HTTP/1.1 200 OK
+Content-Type: application/json
+Content-Length: 408
+Connection: keep-alive
+X-APISIX-CHAITIN-WAF: no
+Date: Wed, 19 Jul 2023 09:30:42 GMT
+X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596
+Server: APISIX/3.3.0
+
+{
+ "args": {},
+ "headers": {
+ "Accept": "*/*",
+ "Connection": "close",
+ "Host": "httpbun.org",
+ "User-Agent": "curl/8.1.2",
+ "X-Forwarded-For": "127.0.0.1",
+ "X-Forwarded-Host": "httpbun.org",
+ "X-Forwarded-Port": "9080",
+ "X-Forwarded-Proto": "http",
+ "X-Real-Ip": "127.0.0.1"
+ },
+ "method": "GET",
+ "origin": "127.0.0.1, 122.231.76.178",
+ "url": "http://httpbun.org/get"
+}
+```
+
+Potential injection requests are also forwarded as is and encounter a 404 error:
+
+```bash
+curl -H "Host: httpbun.org" http://127.0.0.1:9080/getid=1%20AND%201=1 -i
+
+HTTP/1.1 404 Not Found
+Content-Type: text/plain; charset=utf-8
+Content-Length: 19
+Connection: keep-alive
+X-APISIX-CHAITIN-WAF: no
+Date: Wed, 19 Jul 2023 09:30:28 GMT
+X-Content-Type-Options: nosniff
+X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596
+Server: APISIX/3.3.0
+
+404 page not found
+```
+
+Normal requests are still reachable when the match condition is met:
+
+```bash
+curl -H "Host: httpbun.org" -H "waf: true" http://127.0.0.1:9080/get -i
+
+HTTP/1.1 200 OK
+Content-Type: application/json
+Content-Length: 427
+Connection: keep-alive
+X-APISIX-CHAITIN-WAF-TIME: 2
+X-APISIX-CHAITIN-WAF-STATUS: 200
+X-APISIX-CHAITIN-WAF: yes
+X-APISIX-CHAITIN-WAF-ACTION: pass
+Date: Wed, 19 Jul 2023 09:29:58 GMT
+X-Powered-By: httpbun/3c0dc05883dd9212ac38b04705037d50b02f2596
+Server: APISIX/3.3.0
+
+{
+ "args": {},
+ "headers": {
+ "Accept": "*/*",
+ "Connection": "close",
+ "Host": "httpbun.org",
+ "User-Agent": "curl/8.1.2",
+ "Waf": "true",
+ "X-Forwarded-For": "127.0.0.1",
+ "X-Forwarded-Host": "httpbun.org",
+ "X-Forwarded-Port": "9080",
+ "X-Forwarded-Proto": "http",
+ "X-Real-Ip": "127.0.0.1"
+ },
+ "method": "GET",
+ "origin": "127.0.0.1, 122.231.76.178",
+ "url": "http://httpbun.org/get"
+}
+```
+
+Potential attack requests will be intercepted and returned a 403 error:
+
+```bash
+curl -H "Host: httpbun.org" -H "waf: true" http://127.0.0.1:9080/getid=1%20AND%201=1 -i
+
+HTTP/1.1 403 Forbidden
+Date: Wed, 19 Jul 2023 09:29:06 GMT
+Content-Type: text/plain; charset=utf-8
+Transfer-Encoding: chunked
+Connection: keep-alive
+X-APISIX-CHAITIN-WAF: yes
+X-APISIX-CHAITIN-WAF-TIME: 2
+X-APISIX-CHAITIN-WAF-ACTION: reject
+X-APISIX-CHAITIN-WAF-STATUS: 403
+Server: APISIX/3.3.0
+Set-Cookie: sl-session=UdywdGL+uGS7q8xMfnJlbQ==; Domain=; Path=/; Max-Age=86400
+
+{"code": 403, "success":false, "message": "blocked by Chaitin SafeLine Web Application Firewall", "event_id": "51a268653f2c4189bfa3ec66afbcb26d"}
+```
+
+## Delete Plugin
+
+To remove the `chaitin-waf` plugin, you can delete the corresponding JSON configuration from the Plugin configuration.
+APISIX will automatically reload and you do not have to restart for this to take effect:
+
+```bash
+$ curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
+{
+ "uri": "/*",
+ "upstream": {
+ "type": "roundrobin",
+ "nodes": {
+ "httpbun.org:80": 1
+ }
+ }
+}'
+```
diff --git a/docs/en/latest/plugins/clickhouse-logger.md b/docs/en/latest/plugins/clickhouse-logger.md
index 8b6067d0a589..2f3a66e0c6fb 100644
--- a/docs/en/latest/plugins/clickhouse-logger.md
+++ b/docs/en/latest/plugins/clickhouse-logger.md
@@ -44,7 +44,7 @@ The `clickhouse-logger` Plugin is used to push logs to [ClickHouse](https://clic
| timeout | integer | False | 3 | [1,...] | Time to keep the connection alive for after sending a request. |
| name | string | False | "clickhouse logger" | | Unique identifier for the logger. |
| ssl_verify | boolean | False | true | [true,false] | When set to `true`, verifies SSL. |
-| log_format | object | False | | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. |
+| log_format | object | False | {"host": "$host", "@timestamp": "$time_iso8601", "client_ip": "$remote_addr"} | | Log format declared as key value pairs in JSON format. Values only support strings. [APISIX](../apisix-variable.md) or [Nginx](http://nginx.org/en/docs/varindex.html) variables can be used by prefixing the string with `$`. |
| include_req_body | boolean | False | false | [false, true] | When set to `true` includes the request body in the log. If the request body is too big to be kept in the memory, it can't be logged due to Nginx's limitations. |
| include_req_body_expr | array | False | | | Filter for when the `include_req_body` attribute is set to `true`. Request body is only logged when the expression set here evaluates to `true`. See [lua-resty-expr](https://github.com/api7/lua-resty-expr) for more. |
| include_resp_body | boolean | False | false | [false, true] | When set to `true` includes the response body in the log. |
@@ -81,28 +81,19 @@ curl http://127.0.0.1:9180/apisix/admin/plugin_metadata/clickhouse-logger -H 'X-
}'
```
-You have to then create a table in your ClickHouse database to store the logs:
-
-```sql
-CREATE TABLE default.test (
- `host` String,
- `client_ip` String,
- `route_id` String,
- `service_id` String,
- `@timestamp` String,
- PRIMARY KEY(`@timestamp`)
-) ENGINE = MergeTree()
+You can use the clickhouse docker image to create a container like so:
+
+```shell
+docker run -d -p 8123:8123 -p 9000:9000 -p 9009:9009 --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
-Now, if you run `select * from default.test;`, you will get the following row:
+Then create a table in your ClickHouse database to store the logs.
-```
-┌─host──────┬─client_ip─┬─route_id─┬─@timestamp────────────────┐
-│ 127.0.0.1 │ 127.0.0.1 │ 1 │ 2022-01-17T10:03:10+08:00 │
-└───────────┴───────────┴──────────┴───────────────────────────┘
+```shell
+echo "CREATE TABLE default.test (\`host\` String, \`client_ip\` String, \`route_id\` String, \`service_id\` String, \`@timestamp\` String, PRIMARY KEY(\`@timestamp\`)) ENGINE = MergeTree()" | curl 'http://localhost:8123/'
```
-## Enabling the Plugin
+## Enable Plugin
If multiple endpoints are configured, they will be written randomly.
The example below shows how you can enable the Plugin on a specific Route:
@@ -137,9 +128,16 @@ Now, if you make a request to APISIX, it will be logged in your ClickHouse datab
curl -i http://127.0.0.1:9080/hello
```
-## Disable Plugin
+Now, if you check for the rows in the table, you will get the following output:
+
+```shell
+curl 'http://localhost:8123/?query=select%20*%20from%20default.test'
+127.0.0.1 127.0.0.1 1 2023-05-08T19:15:53+05:30
+```
+
+## Delete Plugin
-To disable the `clickhouse-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect.
+To remove the `clickhouse-logger` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect.
```shell
curl http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d '
diff --git a/docs/en/latest/plugins/client-control.md b/docs/en/latest/plugins/client-control.md
index 6aeb82abb393..fd226f7b2ba1 100644
--- a/docs/en/latest/plugins/client-control.md
+++ b/docs/en/latest/plugins/client-control.md
@@ -42,7 +42,7 @@ This Plugin requires APISIX to run on APISIX-Base. See [apisix-build-tools](http
| ------------- | ------- | -------- | ------------ | ------------------------------------------------------------------------------------------------------------------------------------ |
| max_body_size | integer | False | [0,...] | Dynamically set the [`client_max_body_size`](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) directive. |
-## Enabling the Plugin
+## Enable Plugin
The example below enables the Plugin on a specific Route:
@@ -85,9 +85,9 @@ HTTP/1.1 413 Request Entity Too Large