diff --git a/apisix-master-0.rockspec b/apisix-master-0.rockspec index 5d6c6dafd60d..c9432cc329b1 100644 --- a/apisix-master-0.rockspec +++ b/apisix-master-0.rockspec @@ -71,7 +71,6 @@ dependencies = { "ext-plugin-proto = 0.6.1", "casbin = 1.41.9-1", "inspect == 3.1.1", - "lualdap = 1.2.6-1", "lua-resty-rocketmq = 0.3.0-0", "opentelemetry-lua = 0.2-3", "net-url = 0.9-1", diff --git a/apisix/cli/ops.lua b/apisix/cli/ops.lua index 16547fce360c..c10bcfaa769e 100644 --- a/apisix/cli/ops.lua +++ b/apisix/cli/ops.lua @@ -49,6 +49,9 @@ local str_find = string.find local str_byte = string.byte local str_sub = string.sub local str_format = string.format +local string = string +local table = table + local _M = {} @@ -502,17 +505,34 @@ Please modify "admin_key" in conf/config.yaml . if yaml_conf.apisix.ssl.ssl_trusted_certificate ~= nil then - local cert_path = yaml_conf.apisix.ssl.ssl_trusted_certificate - -- During validation, the path is relative to PWD - -- When Nginx starts, the path is relative to conf - -- Therefore we need to check the absolute version instead - cert_path = pl_path.abspath(cert_path) + local cert_paths = {} + local ssl_certificates = yaml_conf.apisix.ssl.ssl_trusted_certificate + for cert_path in string.gmatch(ssl_certificates, '([^,]+)') do + cert_path = util.trim(cert_path) + if cert_path == "system" then + local trusted_certs_path, err = util.get_system_trusted_certs_filepath() + if not trusted_certs_path then + util.die(err) + end + table.insert(cert_paths, trusted_certs_path) + else + -- During validation, the path is relative to PWD + -- When Nginx starts, the path is relative to conf + -- Therefore we need to check the absolute version instead + cert_path = pl_path.abspath(cert_path) + if not pl_path.exists(cert_path) then + util.die("certificate path", cert_path, "doesn't exist\n") + end - if not pl_path.exists(cert_path) then - util.die("certificate path", cert_path, "doesn't exist\n") + table.insert(cert_paths, cert_path) + end end - yaml_conf.apisix.ssl.ssl_trusted_certificate = cert_path + local combined_cert_filepath = yaml_conf.apisix.ssl.ssl_trusted_combined_path + or "/usr/local/apisix/conf/ssl_trusted_combined.pem" + util.gen_trusted_certs_combined_file(combined_cert_filepath, cert_paths) + + yaml_conf.apisix.ssl.ssl_trusted_certificate = combined_cert_filepath end -- enable ssl with place holder crt&key diff --git a/apisix/cli/schema.lua b/apisix/cli/schema.lua index e6720f88fa2a..1def95484bcb 100644 --- a/apisix/cli/schema.lua +++ b/apisix/cli/schema.lua @@ -209,6 +209,9 @@ local config_schema = { ssl_trusted_certificate = { type = "string", }, + ssl_trusted_combined_path = { + type = "string", + }, listen = { type = "array", items = { diff --git a/apisix/cli/util.lua b/apisix/cli/util.lua index bcd56a241aa8..d69468efb5da 100644 --- a/apisix/cli/util.lua +++ b/apisix/cli/util.lua @@ -24,6 +24,9 @@ local exit = os.exit local stderr = io.stderr local str_format = string.format local tonumber = tonumber +local io = io +local ipairs = ipairs +local assert = assert local _M = {} @@ -133,4 +136,54 @@ function _M.file_exists(file_path) return f ~= nil and close(f) end +do + local trusted_certs_paths = { + "/etc/ssl/certs/ca-certificates.crt", -- Debian/Ubuntu/Gentoo + "/etc/pki/tls/certs/ca-bundle.crt", -- Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", -- OpenSUSE + "/etc/pki/tls/cacert.pem", -- OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", -- CentOS/RHEL 7 + "/etc/ssl/cert.pem", -- OpenBSD, Alpine + } + + -- Check if a file exists using Lua's built-in `io.open` + local function file_exists(path) + local file = io.open(path, "r") + if file then + file:close() + return true + else + return false + end + end + + function _M.get_system_trusted_certs_filepath() + for _, path in ipairs(trusted_certs_paths) do + if file_exists(path) then + return path + end + end + + return nil, + "Could not find trusted certs file in " .. + "any of the `system`-predefined locations. " .. + "Please install a certs file there or set " .. + "`lua_ssl_trusted_certificate` to a " .. + "specific file path instead of `system`" + end +end + + +function _M.gen_trusted_certs_combined_file(combined_filepath, paths) + local combined_file = assert(io.open(combined_filepath, "w")) + for _, path in ipairs(paths) do + local cert_file = assert(io.open(path, "r")) + combined_file:write(cert_file:read("*a")) + combined_file:write("\n") + cert_file:close() + end + combined_file:close() +end + + return _M diff --git a/apisix/core/config_etcd.lua b/apisix/core/config_etcd.lua index 5734106e7a7b..a415fbfe527b 100644 --- a/apisix/core/config_etcd.lua +++ b/apisix/core/config_etcd.lua @@ -257,6 +257,11 @@ local function do_run_watch(premature) end local rev = tonumber(res.result.header.revision) + if rev == nil then + log.warn("receive a invalid revision header, header: ", inspect(res.result.header)) + cancel_watch(http_cli) + break + end if rev > watch_ctx.rev then watch_ctx.rev = rev + 1 end @@ -284,7 +289,8 @@ local function run_watch(premature) local ok, err = ngx_thread_wait(run_watch_th, check_worker_th) if not ok then - log.error("check_worker thread terminates failed, retart checker, error: " .. err) + log.error("run_watch or check_worker thread terminates failed", + " restart those threads, error: ", inspect(err)) end ngx_thread_kill(run_watch_th) diff --git a/apisix/core/table.lua b/apisix/core/table.lua index 4346863079cf..ed9450a8d854 100644 --- a/apisix/core/table.lua +++ b/apisix/core/table.lua @@ -23,6 +23,7 @@ local newproxy = newproxy local getmetatable = getmetatable local setmetatable = setmetatable local select = select +local tostring = tostring local new_tab = require("table.new") local nkeys = require("table.nkeys") local ipairs = ipairs @@ -91,7 +92,7 @@ end -- @usage -- local arr = {"a", "b", "c"} -- local idx = core.table.array_find(arr, "b") -- idx = 2 -function _M.array_find(array, val) +local function array_find(array, val) if type(array) ~= "table" then return nil end @@ -104,6 +105,7 @@ function _M.array_find(array, val) return nil end +_M.array_find = array_find -- only work under lua51 or luajit @@ -117,19 +119,28 @@ end local deepcopy do - local function _deepcopy(orig, copied) - -- prevent infinite loop when a field refers its parent - copied[orig] = true + local function _deepcopy(orig, copied, parent, opts) -- If the array-like table contains nil in the middle, -- the len might be smaller than the expected. -- But it doesn't affect the correctness. local len = #orig local copy = new_tab(len, nkeys(orig) - len) + -- prevent infinite loop when a field refers its parent + copied[orig] = copy for orig_key, orig_value in pairs(orig) do - if type(orig_value) == "table" and not copied[orig_value] then - copy[orig_key] = _deepcopy(orig_value, copied) - else + local path = parent .. "." .. tostring(orig_key) + if opts and array_find(opts.shallows, path) then copy[orig_key] = orig_value + else + if type(orig_value) == "table" then + if copied[orig_value] then + copy[orig_key] = copied[orig_value] + else + copy[orig_key] = _deepcopy(orig_value, copied, path, opts) + end + else + copy[orig_key] = orig_value + end end end @@ -144,13 +155,13 @@ do local copied_recorder = {} - function deepcopy(orig) + function deepcopy(orig, opts) local orig_type = type(orig) if orig_type ~= 'table' then return orig end - local res = _deepcopy(orig, copied_recorder) + local res = _deepcopy(orig, copied_recorder, "self", opts) _M.clear(copied_recorder) return res end diff --git a/apisix/init.lua b/apisix/init.lua index 103a8c1d7584..4b2ad17e1ef5 100644 --- a/apisix/init.lua +++ b/apisix/init.lua @@ -246,15 +246,7 @@ local function parse_domain_in_route(route) -- don't modify the modifiedIndex to avoid plugin cache miss because of DNS resolve result -- has changed - local parent = route.value.upstream.parent - if parent then - route.value.upstream.parent = nil - end - route.dns_value = core.table.deepcopy(route.value) - if parent then - route.value.upstream.parent = parent - route.dns_value.upstream.parent = parent - end + route.dns_value = core.table.deepcopy(route.value, { shallows = { "self.upstream.parent"}}) route.dns_value.upstream.nodes = new_nodes core.log.info("parse route which contain domain: ", core.json.delay_encode(route, true)) diff --git a/apisix/plugin.lua b/apisix/plugin.lua index dc22459aaf9d..b3dadcb49588 100644 --- a/apisix/plugin.lua +++ b/apisix/plugin.lua @@ -185,6 +185,10 @@ local function load_plugin(name, plugins_list, plugin_type) plugin.init() end + if plugin.workflow_handler then + plugin.workflow_handler() + end + return end @@ -580,7 +584,7 @@ end local function merge_service_route(service_conf, route_conf) - local new_conf = core.table.deepcopy(service_conf) + local new_conf = core.table.deepcopy(service_conf, { shallows = {"self.value.upstream.parent"}}) new_conf.value.service_id = new_conf.value.id new_conf.value.id = route_conf.value.id new_conf.modifiedIndex = route_conf.modifiedIndex @@ -654,7 +658,7 @@ end local function merge_service_stream_route(service_conf, route_conf) -- because many fields in Service are not supported by stream route, -- so we copy the stream route as base object - local new_conf = core.table.deepcopy(route_conf) + local new_conf = core.table.deepcopy(route_conf, { shallows = {"self.value.upstream.parent"}}) if service_conf.value.plugins then for name, conf in pairs(service_conf.value.plugins) do if not new_conf.value.plugins then @@ -702,7 +706,8 @@ local function merge_consumer_route(route_conf, consumer_conf, consumer_group_co return route_conf end - local new_route_conf = core.table.deepcopy(route_conf) + local new_route_conf = core.table.deepcopy(route_conf, + { shallows = {"self.value.upstream.parent"}}) if consumer_group_conf then for name, conf in pairs(consumer_group_conf.value.plugins) do diff --git a/apisix/plugins/ai-proxy/drivers/openai.lua b/apisix/plugins/ai-proxy/drivers/openai.lua index c8f7f4b6223f..cefc5a728eaa 100644 --- a/apisix/plugins/ai-proxy/drivers/openai.lua +++ b/apisix/plugins/ai-proxy/drivers/openai.lua @@ -42,11 +42,11 @@ function _M.request(conf, request_table, ctx) end local ok, err = httpc:connect({ - scheme = parsed_url.scheme or "https", - host = parsed_url.host or DEFAULT_HOST, - port = parsed_url.port or DEFAULT_PORT, + scheme = endpoint and parsed_url.scheme or "https", + host = endpoint and parsed_url.host or DEFAULT_HOST, + port = endpoint and parsed_url.port or DEFAULT_PORT, ssl_verify = conf.ssl_verify, - ssl_server_name = parsed_url.host or DEFAULT_HOST, + ssl_server_name = endpoint and parsed_url.host or DEFAULT_HOST, pool_size = conf.keepalive and conf.keepalive_pool, }) @@ -54,7 +54,7 @@ function _M.request(conf, request_table, ctx) return nil, "failed to connect to LLM server: " .. err end - local path = (parsed_url.path or DEFAULT_PATH) + local path = (endpoint and parsed_url.path or DEFAULT_PATH) local headers = (conf.auth.header or {}) headers["Content-Type"] = "application/json" diff --git a/apisix/plugins/ai.lua b/apisix/plugins/ai.lua index 39430c7ad014..278201d4e56e 100644 --- a/apisix/plugins/ai.lua +++ b/apisix/plugins/ai.lua @@ -69,7 +69,9 @@ local default_keepalive_pool = {} local function create_router_matching_cache(api_ctx) orig_router_http_matching(api_ctx) - return core.table.deepcopy(api_ctx) + return core.table.deepcopy(api_ctx, { + shallows = { "self.matched_route.value.upstream.parent" } + }) end diff --git a/apisix/plugins/jwt-auth.lua b/apisix/plugins/jwt-auth.lua index 4b8d3e0f9560..db8e49c8a0e5 100644 --- a/apisix/plugins/jwt-auth.lua +++ b/apisix/plugins/jwt-auth.lua @@ -62,8 +62,14 @@ local consumer_schema = { type = "object", -- can't use additionalProperties with dependencies properties = { - key = {type = "string"}, - secret = {type = "string"}, + key = { + type = "string", + minLength = 1, + }, + secret = { + type = "string", + minLength = 1, + }, algorithm = { type = "string", enum = {"HS256", "HS512", "RS256", "ES256"}, diff --git a/apisix/plugins/limit-count.lua b/apisix/plugins/limit-count.lua index e3b4c0f6c8bb..1472a6db19e1 100644 --- a/apisix/plugins/limit-count.lua +++ b/apisix/plugins/limit-count.lua @@ -16,6 +16,7 @@ -- local fetch_secrets = require("apisix.secret").fetch_secrets local limit_count = require("apisix.plugins.limit-count.init") +local workflow = require("apisix.plugins.workflow") local plugin_name = "limit-count" local _M = { @@ -23,11 +24,12 @@ local _M = { priority = 1002, name = plugin_name, schema = limit_count.schema, + metadata_schema = limit_count.metadata_schema, } -function _M.check_schema(conf) - return limit_count.check_schema(conf) +function _M.check_schema(conf, schema_type) + return limit_count.check_schema(conf, schema_type) end @@ -36,5 +38,14 @@ function _M.access(conf, ctx) return limit_count.rate_limit(conf, ctx, plugin_name, 1) end +function _M.workflow_handler() + workflow.register(plugin_name, + function (conf, ctx) + return limit_count.rate_limit(conf, ctx, plugin_name, 1) + end, + function (conf) + return limit_count.check_schema(conf) + end) +end return _M diff --git a/apisix/plugins/limit-count/init.lua b/apisix/plugins/limit-count/init.lua index e7d03028e077..08a4c9763551 100644 --- a/apisix/plugins/limit-count/init.lua +++ b/apisix/plugins/limit-count/init.lua @@ -42,6 +42,30 @@ local group_conf_lru = core.lrucache.new({ type = 'plugin', }) +local metadata_defaults = { + limit_header = "X-RateLimit-Limit", + remaining_header = "X-RateLimit-Remaining", + reset_header = "X-RateLimit-Reset", +} + +local metadata_schema = { + type = "object", + properties = { + limit_header = { + type = "string", + default = metadata_defaults.limit_header, + }, + remaining_header = { + type = "string", + default = metadata_defaults.remaining_header, + }, + reset_header = { + type = "string", + default = metadata_defaults.reset_header, + }, + }, +} + local schema = { type = "object", properties = { @@ -91,7 +115,8 @@ local schema = { local schema_copy = core.table.deepcopy(schema) local _M = { - schema = schema + schema = schema, + metadata_schema = metadata_schema, } @@ -100,7 +125,12 @@ local function group_conf(conf) end -function _M.check_schema(conf) + +function _M.check_schema(conf, schema_type) + if schema_type == core.schema.TYPE_METADATA then + return core.schema.check(metadata_schema, conf) + end + local ok, err = core.schema.check(schema, conf) if not ok then return false, err @@ -250,14 +280,22 @@ function _M.rate_limit(conf, ctx, name, cost) delay, remaining, reset = lim:incoming(key, cost) end + local metadata = apisix_plugin.plugin_metadata("limit-count") + if metadata then + metadata = metadata.value + else + metadata = metadata_defaults + end + core.log.info("limit-count plugin-metadata: ", core.json.delay_encode(metadata)) + if not delay then local err = remaining if err == "rejected" then -- show count limit header when rejected if conf.show_limit_quota_header then - core.response.set_header("X-RateLimit-Limit", conf.count, - "X-RateLimit-Remaining", 0, - "X-RateLimit-Reset", reset) + core.response.set_header(metadata.limit_header, conf.count, + metadata.remaining_header, 0, + metadata.reset_header, reset) end if conf.rejected_msg then @@ -274,9 +312,9 @@ function _M.rate_limit(conf, ctx, name, cost) end if conf.show_limit_quota_header then - core.response.set_header("X-RateLimit-Limit", conf.count, - "X-RateLimit-Remaining", remaining, - "X-RateLimit-Reset", reset) + core.response.set_header(metadata.limit_header, conf.count, + metadata.remaining_header, remaining, + metadata.reset_header, reset) end end diff --git a/apisix/plugins/multi-auth.lua b/apisix/plugins/multi-auth.lua index 9a259d038238..7d34ffb23c41 100644 --- a/apisix/plugins/multi-auth.lua +++ b/apisix/plugins/multi-auth.lua @@ -18,6 +18,7 @@ local core = require("apisix.core") local require = require local pairs = pairs local type = type +local plugin = require("apisix.plugin") local schema = { type = "object", @@ -48,7 +49,7 @@ function _M.check_schema(conf) local auth_plugins = conf.auth_plugins for k, auth_plugin in pairs(auth_plugins) do for auth_plugin_name, auth_plugin_conf in pairs(auth_plugin) do - local auth = require("apisix.plugins." .. auth_plugin_name) + local auth = plugin.get(auth_plugin_name) if auth == nil then return false, auth_plugin_name .. " plugin did not found" else @@ -73,7 +74,7 @@ function _M.rewrite(conf, ctx) for k, auth_plugin in pairs(auth_plugins) do for auth_plugin_name, auth_plugin_conf in pairs(auth_plugin) do - local auth = require("apisix.plugins." .. auth_plugin_name) + local auth = plugin.get(auth_plugin_name) -- returns 401 HTTP status code if authentication failed, otherwise returns nothing. local auth_code, err = auth.rewrite(auth_plugin_conf, ctx) if type(err) == "table" then diff --git a/apisix/plugins/prometheus/exporter.lua b/apisix/plugins/prometheus/exporter.lua index fc282031ee70..a255e25ab550 100644 --- a/apisix/plugins/prometheus/exporter.lua +++ b/apisix/plugins/prometheus/exporter.lua @@ -136,10 +136,14 @@ function _M.http_init(prometheus_enabled_in_stream) metric_prefix = attr.metric_prefix end - local exptime - if attr and attr.expire then - exptime = attr.expire - end + local status_metrics_exptime = core.table.try_read_attr(attr, "metrics", + "http_status", "expire") + local latency_metrics_exptime = core.table.try_read_attr(attr, "metrics", + "http_latency", "expire") + local bandwidth_metrics_exptime = core.table.try_read_attr(attr, "metrics", + "bandwidth", "expire") + local upstream_status_exptime = core.table.try_read_attr(attr, "metrics", + "upstream_status", "expire") prometheus = base_prometheus.init("prometheus-metrics", metric_prefix) @@ -172,7 +176,7 @@ function _M.http_init(prometheus_enabled_in_stream) metrics.upstream_status = prometheus:gauge("upstream_status", "Upstream status from health check", {"name", "ip", "port"}, - exptime) + upstream_status_exptime) -- per service @@ -183,7 +187,7 @@ function _M.http_init(prometheus_enabled_in_stream) "HTTP status codes per service in APISIX", {"code", "route", "matched_uri", "matched_host", "service", "consumer", "node", unpack(extra_labels("http_status"))}, - exptime) + status_metrics_exptime) local buckets = DEFAULT_BUCKETS if attr and attr.default_buckets then @@ -193,12 +197,12 @@ function _M.http_init(prometheus_enabled_in_stream) metrics.latency = prometheus:histogram("http_latency", "HTTP request latency in milliseconds per service in APISIX", {"type", "route", "service", "consumer", "node", unpack(extra_labels("http_latency"))}, - buckets, exptime) + buckets, latency_metrics_exptime) metrics.bandwidth = prometheus:counter("bandwidth", "Total bandwidth in bytes consumed per service in APISIX", {"type", "route", "service", "consumer", "node", unpack(extra_labels("bandwidth"))}, - exptime) + bandwidth_metrics_exptime) if prometheus_enabled_in_stream then init_stream_metrics() diff --git a/apisix/plugins/workflow.lua b/apisix/plugins/workflow.lua index bf5221dd1d91..e41679ba0c03 100644 --- a/apisix/plugins/workflow.lua +++ b/apisix/plugins/workflow.lua @@ -15,7 +15,6 @@ -- limitations under the License. -- local core = require("apisix.core") -local limit_count = require("apisix.plugins.limit-count.init") local expr = require("resty.expr.v1") local ipairs = ipairs @@ -93,23 +92,22 @@ local function exit(conf) end -local function rate_limit(conf, ctx) - return limit_count.rate_limit(conf, ctx, "limit-count", 1) -end - local support_action = { ["return"] = { handler = exit, check_schema = check_return_schema, - }, - ["limit-count"] = { - handler = rate_limit, - check_schema = limit_count.check_schema, } } +function _M.register(plugin_name, handler, check_schema) + support_action[plugin_name] = { + handler = handler, + check_schema = check_schema + } +end + function _M.check_schema(conf) local ok, err = core.schema.check(schema, conf) if not ok then diff --git a/ci/common.sh b/ci/common.sh index 8c8a40435e86..1439242fff3b 100644 --- a/ci/common.sh +++ b/ci/common.sh @@ -182,7 +182,7 @@ GRPC_SERVER_EXAMPLE_VER=20210819 linux_get_dependencies () { apt update - apt install -y cpanminus build-essential libncurses5-dev libreadline-dev libssl-dev perl libpcre3 libpcre3-dev libldap2-dev + apt install -y cpanminus build-essential libncurses5-dev libreadline-dev libssl-dev perl libpcre3 libpcre3-dev apt-get install -y libyaml-dev wget https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_amd64 -O /usr/bin/yq && sudo chmod +x /usr/bin/yq } diff --git a/ci/linux-install-openresty.sh b/ci/linux-install-openresty.sh index f55bb114095f..465df32bb694 100755 --- a/ci/linux-install-openresty.sh +++ b/ci/linux-install-openresty.sh @@ -35,7 +35,7 @@ sudo add-apt-repository -y "deb https://openresty.org/package/${arch_path}ubuntu sudo add-apt-repository -y "deb http://repos.apiseven.com/packages/${arch_path}debian bullseye main" sudo apt-get update -sudo apt-get install -y libldap2-dev openresty-pcre-dev openresty-zlib-dev build-essential gcc g++ cpanminus +sudo apt-get install -y openresty-pcre-dev openresty-zlib-dev build-essential gcc g++ cpanminus SSL_LIB_VERSION=${SSL_LIB_VERSION-openssl} ENABLE_FIPS=${ENABLE_FIPS:-"false"} diff --git a/ci/linux_openresty_common_runner.sh b/ci/linux_openresty_common_runner.sh index 1b73ceec92c6..6dda698ffefc 100755 --- a/ci/linux_openresty_common_runner.sh +++ b/ci/linux_openresty_common_runner.sh @@ -38,7 +38,15 @@ do_install() { # sudo apt-get install tree -y # tree deps + # The latest version of test-nginx is not compatible with the current set of tests with ---http2 + # due to this commit: https://github.com/openresty/test-nginx/commit/0ccd106cbe6878318e5a591634af8f1707c411a6 + # This change pins test-nginx to a commit before this one. git clone --depth 1 https://github.com/openresty/test-nginx.git test-nginx + cd test-nginx + git fetch --depth=1 origin ced30a31bafab6c68873efb17b6d80f39bcd95f5 + git checkout ced30a31bafab6c68873efb17b6d80f39bcd95f5 + cd .. + make utils mkdir -p build-cache diff --git a/conf/config.yaml.example b/conf/config.yaml.example index eea2335bc99d..8052beef6854 100644 --- a/conf/config.yaml.example +++ b/conf/config.yaml.example @@ -99,8 +99,9 @@ apisix: # - ip: 127.0.0.3 # If not set, default to `0.0.0.0`. # port: 9445 # enable_http3: true - # ssl_trusted_certificate: /path/to/ca-cert # Set the path to CA certificates used to verify client - # certificates in the PEM format. + ssl_trusted_combined_path: /usr/local/apisix/conf/ssl_trusted_combined.pem # All the trusted certificates will be combined into a single file + #ssl_trusted_certificate: system # Specifies comma separated list of trusted CA. Value can be either "system"(for using system available ca certs) or + # a file path with trusted CA certificates in the PEM format ssl_protocols: TLSv1.2 TLSv1.3 # TLS versions supported. ssl_ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384 ssl_session_tickets: false # If true, session tickets are used for SSL/TLS connections. @@ -595,24 +596,29 @@ plugin_attr: # Plugin attributes port: 9091 # Set the port. # metrics: # Create extra labels from nginx variables: https://nginx.org/en/docs/varindex.html # http_status: + # expire: 0 # The expiration time after which metrics are removed. unit: second. + # # 0 means the metrics will not expire # extra_labels: # - upstream_addr: $upstream_addr # - status: $upstream_status # The label name does not need to be the same as the variable name. # http_latency: + # expire: 0 # The expiration time after which metrics are removed. unit: second. + # # 0 means the metrics will not expire # extra_labels: # - upstream_addr: $upstream_addr # bandwidth: + # expire: 0 # The expiration time after which metrics are removed. unit: second. + # # 0 means the metrics will not expire # extra_labels: # - upstream_addr: $upstream_addr + # upstream_status: + # expire: 0 # The expiration time after which metrics are removed. unit: second. # default_buckets: # - 10 # - 50 # - 100 # - 200 # - 500 - # expire: 0 # The expiration time after metrics become inactive, unit: second. - # 0 means the metrics will not expire - # If you need to set the expiration time, it is recommended to use 600, which is 10 minutes. server-info: # Plugin: server-info report_ttl: 60 # Set the TTL in seconds for server info in etcd. # Maximum: 86400. Minimum: 3. diff --git a/docs/en/latest/admin-api.md b/docs/en/latest/admin-api.md index c7a236da77d0..e6099e65a2c4 100644 --- a/docs/en/latest/admin-api.md +++ b/docs/en/latest/admin-api.md @@ -913,25 +913,25 @@ Prerequisite: Consumer `jack` has been created. Create the `key-auth` Credential for consumer `jack`: - ```shell - curl http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials/auth-one \ - -H "X-API-KEY: $admin_key" -X PUT -i -d ' - { - "plugins": { - "key-auth": { - "key": "auth-one" - } +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials/auth-one \ +-H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "plugins": { + "key-auth": { + "key": "auth-one" } - }' - ``` + } +}' +``` - ``` - HTTP/1.1 200 OK - Date: Thu, 26 Dec 2019 08:17:49 GMT - ... +``` +HTTP/1.1 200 OK +Date: Thu, 26 Dec 2019 08:17:49 GMT +... - {"key":"\/apisix\/consumers\/jack\/credentials\/auth-one","value":{"update_time":1666260780,"plugins":{"key-auth":{"key":"auth-one"}},"create_time":1666260780}} - ``` +{"key":"\/apisix\/consumers\/jack\/credentials\/auth-one","value":{"update_time":1666260780,"plugins":{"key-auth":{"key":"auth-one"}},"create_time":1666260780}} +``` ## Upstream @@ -1278,7 +1278,7 @@ For notes on ID syntax please refer to: [ID Syntax](#quick-note-on-id-syntax) | labels | False | Match Rules | Attributes of the resource specified as key-value pairs. | {"version":"v2","build":"16","env":"production"} | | type | False | Auxiliary | Identifies the type of certificate, default `server`. | `client` Indicates that the certificate is a client certificate, which is used when APISIX accesses the upstream; `server` Indicates that the certificate is a server-side certificate, which is used by APISIX when verifying client requests. | | status | False | Auxiliary | Enables the current SSL. Set to `1` (enabled) by default. | `1` to enable, `0` to disable | -| ssl_protocols | False | An array of ssl protocols | It is used to control the SSL/TLS protocol version used between servers and clients. See [SSL Protocol](./ssl-protocol.md) for more examples. | `["TLSv1.2", "TLSv2.3"]` | +| ssl_protocols | False | An array of ssl protocols | It is used to control the SSL/TLS protocol version used between servers and clients. See [SSL Protocol](./ssl-protocol.md) for more examples. | `["TLSv1.1", "TLSv1.2", "TLSv1.3"]` | Example Configuration: diff --git a/docs/en/latest/plugins/jwt-auth.md b/docs/en/latest/plugins/jwt-auth.md index 1fcf56ca8cee..f9e5c820edf5 100644 --- a/docs/en/latest/plugins/jwt-auth.md +++ b/docs/en/latest/plugins/jwt-auth.md @@ -51,7 +51,7 @@ For Consumer/Credential: | public_key | string | True if `RS256` or `ES256` is set for the `algorithm` attribute. | | | RSA or ECDSA public key. This field supports saving the value in Secret Manager using the [APISIX Secret](../terminology/secret.md) resource. | | algorithm | string | False | HS256 | ["HS256", "HS512", "RS256", "ES256"] | Encryption algorithm. | | exp | integer | False | 86400 | [1,...] | Expiry time of the token in seconds. | -| base64_secret | boolean | False | false | | If true, encode the secret with base64. | +| base64_secret | boolean | False | false | | Set to true if the secret is base64 encoded. | | lifetime_grace_period | integer | False | 0 | [0,...] | Grace period in seconds. Used to account for clock skew between the server generating the JWT and the server validating the JWT. | | key_claim_name | string | False | key | | The claim in the JWT payload that identifies the associated secret, such as `iss`. | @@ -64,8 +64,9 @@ For Route: | header | string | False | authorization | The header to get the token from. | | query | string | False | jwt | The query string to get the token from. Lower priority than header. | | cookie | string | False | jwt | The cookie to get the token from. Lower priority than query. | -| hide_credentials | boolean | False | false | If true, do not pass the header, query, or cookie with JWT to Upstream services. | -| anonymous_consumer | string | False | false | Anonymous Consumer name. If configured, allow anonymous users to bypass the authentication. | +| hide_credentials| boolean | False | false | If true, do not pass the header, query, or cookie with JWT to Upstream services. | +| key_claim_name | string | False | key | The name of the JWT claim that contains the user key (corresponds to Consumer's key attribute). | +| anonymous_consumer | string | False | false | Anonymous Consumer name. If configured, allow anonymous users to bypass the authentication. | You can implement `jwt-auth` with [HashiCorp Vault](https://www.vaultproject.io/) to store and fetch secrets and RSA keys pairs from its [encrypted KV engine](https://developer.hashicorp.com/vault/docs/secrets/kv) using the [APISIX Secret](../terminology/secret.md) resource. diff --git a/docs/en/latest/plugins/limit-count.md b/docs/en/latest/plugins/limit-count.md index 4c019332a899..c5c2250173e5 100644 --- a/docs/en/latest/plugins/limit-count.md +++ b/docs/en/latest/plugins/limit-count.md @@ -344,6 +344,65 @@ Server: APISIX web server {"error_msg":"Requests are too frequent, please try again later."} ``` +### Customize Rate Limiting Headers + +The following example demonstrates how you can use plugin metadata to customize the rate limiting response header names, which are by default `X-RateLimit-Limit`, `X-RateLimit-Remaining`, and `X-RateLimit-Reset`. + +Configure the plugin metadata for this plugin and update the headers: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/plugin_metadata/limit-count" -X PUT -d ' +{ + "log_format": { + "limit_header": "X-Custom-RateLimit-Limit", + "remaining_header": "X-Custom-RateLimit-Remaining", + "reset_header": "X-Custom-RateLimit-Reset" + } +}' +``` + +Create a route with `limit-count` plugin that allows for a quota of 1 within a 30-second window per remote address: + +```shell +curl "http://127.0.0.1:9180/apisix/admin/routes" -X PUT \ + -H "X-API-KEY: ${ADMIN_API_KEY}" \ + -d '{ + "id": "limit-count-route", + "uri": "/get", + "plugins": { + "limit-count": { + "count": 1, + "time_window": 30, + "rejected_code": 429, + "key_type": "var", + "key": "remote_addr", + "window_type": "sliding" + } + # highlight-end + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org:80": 1 + } + } + }' +``` + +Send a request to verify: + +```shell +curl -i "http://127.0.0.1:9080/get" +``` + +You should receive an `HTTP/1.1 200 OK` response and see the following headers: + +```text +X-Custom-RateLimit-Limit: 1 +X-Custom-RateLimit-Remaining: 0 +X-Custom-RateLimit-Reset: 28 +``` + ## Delete Plugin To remove the `limit-count` Plugin, you can delete the corresponding JSON configuration from the Plugin configuration. APISIX will automatically reload and you do not have to restart for this to take effect. diff --git a/docs/en/latest/plugins/openid-connect.md b/docs/en/latest/plugins/openid-connect.md index 4483abd656dd..0ee7d7eb86b4 100644 --- a/docs/en/latest/plugins/openid-connect.md +++ b/docs/en/latest/plugins/openid-connect.md @@ -70,7 +70,7 @@ description: OpenID Connect allows the client to obtain user information from th | proxy_opts.http_proxy_authorization | string | False | | Basic [base64 username:password] | Default `Proxy-Authorization` header value to be used with `http_proxy`. Can be overridden with custom `Proxy-Authorization` request header. | | proxy_opts.https_proxy_authorization | string | False | | Basic [base64 username:password] | Default `Proxy-Authorization` header value to be used with `https_proxy`. Cannot be overridden with custom `Proxy-Authorization` request header since with with HTTPS the authorization is completed when connecting. | | proxy_opts.no_proxy | string | False | | | Comma separated list of hosts that should not be proxied. | -| authorization_params | object | False | | | Additional parameters to send in the in the request to the authorization endpoint. | +| authorization_params | object | False | | | Additional parameters to send in the request to the authorization endpoint. | | client_rsa_private_key | string | False | | | Client RSA private key used to sign JWT. | | client_rsa_private_key_id | string | False | | | Client RSA private key ID used to compute a signed JWT. | | client_jwt_assertion_expires_in | integer | False | 60 | | Life duration of the signed JWT in seconds. | @@ -101,7 +101,7 @@ Tutorial: [Use Keycloak with API Gateway to secure APIs](https://apisix.apache.o ::: -This plugin offers two scenorios: +This plugin offers two scenarios: 1. Authentication between Services: Set `bearer_only` to `true` and configure the `introspection_endpoint` or `public_key` attribute. In this scenario, APISIX will reject requests without a token or invalid token in the request header. diff --git a/docs/en/latest/plugins/prometheus.md b/docs/en/latest/plugins/prometheus.md index 01301c284e97..3200fb48a09c 100644 --- a/docs/en/latest/plugins/prometheus.md +++ b/docs/en/latest/plugins/prometheus.md @@ -96,6 +96,18 @@ plugin_attr: - 505 ``` +### Specifying `expire` + +`expire` sets the expiration time of `apisix_http_status`, `apisix_bandwidth`, and `apisix_http_latency` metrics in seconds. When set to 0, metrics will not expire. + +Here is a configuration example: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: + expire: 86400 +``` + ## Metrics endpoint This Plugin will add the metrics endpoint `/apisix/prometheus/metrics` or your custom export URI for exposing the metrics. @@ -257,6 +269,7 @@ The following metrics are exported by the `prometheus` Plugin: | Name | Description | |----------|-------------------------------------------------------------------------------------------------------------------------------------| | type | Value can be one of `apisix`, `upstream`, or `request`. This translates to latency caused by APISIX, Upstream, or both (their sum). | + | route | `route_id` of the matched Route with request. Defaults to an empty string if the Routes don't match. | | service | `service_id` of the Route matching the request. If the Route does not have a `service_id` configured, it defaults to `$host`. | | consumer | `consumer_name` of the Consumer matching the request. Defaults to an empty string if it does not match. | | node | IP address of the Upstream node. | diff --git a/docs/en/latest/plugins/traffic-split.md b/docs/en/latest/plugins/traffic-split.md index 3a206a92405c..9eb00d65127f 100644 --- a/docs/en/latest/plugins/traffic-split.md +++ b/docs/en/latest/plugins/traffic-split.md @@ -63,7 +63,7 @@ The traffic ratio between Upstream services may be less accurate since round rob :::note -Some of the configuration fields supported in Upstream are not supported in weighted_upstreams.upstream. These fields are `service_name`, `discovery_type`, `checks`, `retries`, `retry_timeout`, `desc`, `scheme`, `labels`, `create_time`, and `update_time`. +Some of the configuration fields supported in Upstream are not supported in weighted_upstreams.upstream. These fields are `service_name`, `discovery_type`, `checks`, `retries`, `retry_timeout`, `desc`, `labels`, `create_time`, and `update_time`. As a workaround, you can create an Upstream object and configure it in `weighted_upstreams.upstream_id` to achieve these functionalities. diff --git a/docs/en/latest/terminology/credential.md b/docs/en/latest/terminology/credential.md index 560d4314a704..21b263f38685 100644 --- a/docs/en/latest/terminology/credential.md +++ b/docs/en/latest/terminology/credential.md @@ -123,14 +123,14 @@ admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"/ 4. Test. -Test the request with the `auth-one` and `auth-two` keys, and they both respond correctly. + Test the request with the `auth-one` and `auth-two` keys, and they both respond correctly. ```shell curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I curl http://127.0.0.1:9080/hello -H 'apikey: auth-two' -I ``` -Enable the `limit-count` plugin for the Consumer. + Enable the `limit-count` plugin for the Consumer. ```shell curl http://127.0.0.1:9180/apisix/admin/consumers \ @@ -148,4 +148,4 @@ Enable the `limit-count` plugin for the Consumer. }' ``` -Requesting the route more than 3 times in a row with each of the two keys, the test returns `503` and the request is restricted. + Requesting the route more than 3 times in a row with each of the two keys, the test returns `503` and the request is restricted. diff --git a/docs/zh/latest/admin-api.md b/docs/zh/latest/admin-api.md index f5cd5b144b05..0bc5a963c21b 100644 --- a/docs/zh/latest/admin-api.md +++ b/docs/zh/latest/admin-api.md @@ -926,25 +926,25 @@ Credential 对象 JSON 配置示例: 创建 Credential,并启用认证插件 `key-auth`: - ```shell - curl http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials/auth-one \ - -H "X-API-KEY: $admin_key" -X PUT -i -d ' - { - "plugins": { - "key-auth": { - "key": "auth-one" - } +```shell +curl http://127.0.0.1:9180/apisix/admin/consumers/jack/credentials/auth-one \ +-H "X-API-KEY: $admin_key" -X PUT -i -d ' +{ + "plugins": { + "key-auth": { + "key": "auth-one" } - }' - ``` + } +}' +``` - ``` - HTTP/1.1 200 OK - Date: Thu, 26 Dec 2019 08:17:49 GMT - ... +``` +HTTP/1.1 200 OK +Date: Thu, 26 Dec 2019 08:17:49 GMT +... - {"key":"\/apisix\/consumers\/jack\/credentials\/auth-one","value":{"update_time":1666260780,"plugins":{"key-auth":{"key":"auth-one"}},"create_time":1666260780}} - ``` +{"key":"\/apisix\/consumers\/jack\/credentials\/auth-one","value":{"update_time":1666260780,"plugins":{"key-auth":{"key":"auth-one"}},"create_time":1666260780}} +``` ## Upstream @@ -1278,7 +1278,7 @@ SSL 资源请求地址:/apisix/admin/ssls/{id} | labels | 否 | 匹配规则 | 标识附加属性的键值对。 | {"version":"v2","build":"16","env":"production"} | | type | 否 | 辅助 | 标识证书的类型,默认值为 `server`。 | `client` 表示证书是客户端证书,APISIX 访问上游时使用;`server` 表示证书是服务端证书,APISIX 验证客户端请求时使用。 | | status | 否 | 辅助 | 当设置为 `1` 时,启用此 SSL,默认值为 `1`。 | `1` 表示启用,`0` 表示禁用 | -| ssl_protocols | 否 | tls 协议字符串数组 | 用于控制服务器与客户端之间使用的 SSL/TLS 协议版本。更多的配置示例,请参考[SSL 协议](./ssl-protocol.md)。 | | +| ssl_protocols | 否 | tls 协议字符串数组 | 用于控制服务器与客户端之间使用的 SSL/TLS 协议版本。更多的配置示例,请参考[SSL 协议](./ssl-protocol.md)。 | `["TLSv1.1", "TLSv1.2", "TLSv1.3"]` | SSL 对象 JSON 配置示例: diff --git a/docs/zh/latest/plugins/jwt-auth.md b/docs/zh/latest/plugins/jwt-auth.md index cafa78f67bad..159a2ef783bc 100644 --- a/docs/zh/latest/plugins/jwt-auth.md +++ b/docs/zh/latest/plugins/jwt-auth.md @@ -61,6 +61,7 @@ Route 端: | query | string | 否 | jwt | 设置我们从哪个 query string 获取 token,优先级低于 header。 | | cookie | string | 否 | jwt | 设置我们从哪个 cookie 获取 token,优先级低于 query。 | | hide_credentials | boolean | 否 | false | 如果为 true ,则不要将 header、query 或带有 JWT 的 cookie 传递给上游服务。 | +| key_claim_name | string | 否 | key | 包含用户密钥(对应消费者的密钥属性)的 JWT 声明的名称。| | anonymous_consumer | string | 否 | false | 匿名消费者名称。如果已配置,则允许匿名用户绕过身份验证。 | 您可以使用 [HashiCorp Vault](https://www.vaultproject.io/) 实施 `jwt-auth`,以从其[加密的 KV 引擎](https://developer.hashicorp.com/vault/docs/secrets/kv) 使用 [APISIX Secret](../terminology/secret.md) 资源。 diff --git a/docs/zh/latest/plugins/prometheus.md b/docs/zh/latest/plugins/prometheus.md index f6809a26eb31..39720704d289 100644 --- a/docs/zh/latest/plugins/prometheus.md +++ b/docs/zh/latest/plugins/prometheus.md @@ -43,7 +43,7 @@ description: 本文将介绍 API 网关 Apache APISIX 如何通过 prometheus ::: -## 如何修改暴露指标的 uri +### 如何修改暴露指标的 `export_uri` 你可以在配置文件 `./conf/config.yaml` 的 `plugin_attr` 列表下修改默认的 URI。 @@ -78,6 +78,18 @@ plugin_attr: - 505 ``` +### 如何修改指标的 `expire` + +`expire` 用于设置 `apisix_http_status`、`apisix_bandwidth` 和 `apisix_http_latency` 指标的过期时间(以秒为单位)。当设置为 0 时,指标不会过期。 + +配置示例如下: + +```yaml title="conf/config.yaml" +plugin_attr: + prometheus: + expire: 86400 +``` + ## API `prometheus` 插件会增加 `/apisix/prometheus/metrics` 接口或者你自定义的 URI 来暴露其指标信息。 @@ -231,6 +243,7 @@ scrape_configs: | 名称 | 描述 | | -------------| --------------------------------------------------------------------------------------- | | type | 该值可以是 `apisix`、`upstream` 和 `request`,分别表示耗时的来源是 APISIX、上游以及两者总和。 | + | route | 与请求匹配的路由的 `route_id`,如果未匹配,则默认为空字符串。 | | service | 与请求匹配的路由 的 `service_id`。当路由缺少 `service_id` 时,则默认为 `$host`。 | | consumer | 与请求匹配的消费者的 `consumer_name`。未匹配,则默认为空字符串。 | | node | 上游节点的 IP 地址。 | diff --git a/docs/zh/latest/plugins/traffic-split.md b/docs/zh/latest/plugins/traffic-split.md index 0cd3874d039b..1a725ae9834b 100644 --- a/docs/zh/latest/plugins/traffic-split.md +++ b/docs/zh/latest/plugins/traffic-split.md @@ -61,7 +61,7 @@ description: 本文介绍了 Apache APISIX traffic-split 插件的相关操作 :::note 注意 -目前 `weighted_upstreams.upstream` 的配置不支持 `service_name`、`discovery_type`、`checks`、`retries`、`retry_timeout`、`desc`、`scheme`、`labels`、`create_time` 和 `update_time` 等字段。如果你需要使用这些字段,可以在创建上游对象时指定这些字段,然后在该插件中配置 `weighted_upstreams.upstream_id` 属性即可。 +目前 `weighted_upstreams.upstream` 的配置不支持 `service_name`、`discovery_type`、`checks`、`retries`、`retry_timeout`、`desc`、`labels`、`create_time` 和 `update_time` 等字段。如果你需要使用这些字段,可以在创建上游对象时指定这些字段,然后在该插件中配置 `weighted_upstreams.upstream_id` 属性即可。 ::: diff --git a/docs/zh/latest/terminology/credential.md b/docs/zh/latest/terminology/credential.md index 4d183620240f..48ba4ff181e4 100644 --- a/docs/zh/latest/terminology/credential.md +++ b/docs/zh/latest/terminology/credential.md @@ -124,14 +124,14 @@ admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"/ 4. 测试插件 -分别使用 `auth-one` 和 `auth-two` 两个 key 来测试请求,都响应正常。 + 分别使用 `auth-one` 和 `auth-two` 两个 key 来测试请求,都响应正常。 ```shell curl http://127.0.0.1:9080/hello -H 'apikey: auth-one' -I curl http://127.0.0.1:9080/hello -H 'apikey: auth-two' -I ``` -为该 Consumer 启用 `limit-count` 插件。 + 为该 Consumer 启用 `limit-count` 插件。 ```shell curl http://127.0.0.1:9180/apisix/admin/consumers \ @@ -149,4 +149,4 @@ admin_key=$(yq '.deployment.admin.admin_key[0].key' conf/config.yaml | sed 's/"/ }' ``` -分别使用这两个 key 连续 3 次以上请求该路由,测试返回 `503`,请求被限制。 + 分别使用这两个 key 连续 3 次以上请求该路由,测试返回 `503`,请求被限制。 diff --git a/docs/zh/latest/terminology/secret.md b/docs/zh/latest/terminology/secret.md index 810abb7ddf6f..22a3f4902ed8 100644 --- a/docs/zh/latest/terminology/secret.md +++ b/docs/zh/latest/terminology/secret.md @@ -173,7 +173,7 @@ secrets: :::tip -它现在支持使用 [`namespace` 字段](../admin-api.md#secret-config-body-requset-parameters] 设置 [HashiCorp Vault Enterprise](https://developer.hashicorp.com/vault/docs/enterprise/namespaces#vault-api-and-namespaces) 和 HCP Vault 所支持的多租户命名空间概念。 +它现在支持使用 [`namespace` 字段](../admin-api.md#secret-config-body-requset-parameters) 设置 [HashiCorp Vault Enterprise](https://developer.hashicorp.com/vault/docs/enterprise/namespaces#vault-api-and-namespaces) 和 HCP Vault 所支持的多租户命名空间概念。 ::: diff --git a/t/APISIX.pm b/t/APISIX.pm index 50f7cfaecab6..2e1724a12aa1 100644 --- a/t/APISIX.pm +++ b/t/APISIX.pm @@ -465,6 +465,8 @@ _EOC_ $block->set_value("stream_config", $stream_config); } + my $custom_trusted_cert = $block->custom_trusted_cert // 'cert/apisix.crt'; + my $stream_server_config = $block->stream_server_config // <<_EOC_; listen 2005 ssl; ssl_certificate cert/apisix.crt; @@ -737,7 +739,7 @@ _EOC_ http3 off; ssl_certificate cert/apisix.crt; ssl_certificate_key cert/apisix.key; - lua_ssl_trusted_certificate cert/apisix.crt; + lua_ssl_trusted_certificate $custom_trusted_cert; ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3; diff --git a/t/cli/test_stream_config.sh b/t/cli/test_stream_config.sh index baab138a0c99..2843b5c5d13a 100755 --- a/t/cli/test_stream_config.sh +++ b/t/cli/test_stream_config.sh @@ -78,6 +78,7 @@ echo " apisix: ssl: ssl_trusted_certificate: t/certs/mtls_ca.crt + ssl_trusted_combined_path: t/certs/mtls_ca_combined.crt proxy_mode: http&stream stream_proxy: tcp: @@ -86,7 +87,7 @@ apisix: make init -if ! grep "t/certs/mtls_ca.crt;" conf/nginx.conf > /dev/null; then +if ! grep "t/certs/mtls_ca_combined.crt;" conf/nginx.conf > /dev/null; then echo "failed: failed to set trust certificate" exit 1 fi diff --git a/t/cli/test_upstream_mtls.sh b/t/cli/test_upstream_mtls.sh index 0318a4539a27..b2b366aa0635 100755 --- a/t/cli/test_upstream_mtls.sh +++ b/t/cli/test_upstream_mtls.sh @@ -149,3 +149,63 @@ if ! grep -E 'self-signed certificate' logs/error.log; then fi echo "passed: when proxy_ssl_verify is enabled and ssl_trusted_certificate is wrong ca cert, got 502" + + +# test combined proxy_ssl_trusted_certificate success +echo ' +apisix: + ssl: + ssl_trusted_certificate: system, t/certs/apisix.crt +nginx_config: + http_configuration_snippet: | + server { + listen 1983 ssl; + server_name test.com; + ssl_certificate ../t/certs/apisix.crt; + ssl_certificate_key ../t/certs/apisix.key; + location /hello { + return 200 "hello world"; + } + } + http_server_configuration_snippet: | + proxy_ssl_verify on; +' > conf/config.yaml + +rm logs/error.log || true +make init +make run +sleep 0.1 + +curl -k -i http://127.0.0.1:9180/apisix/admin/routes/1 -H 'X-API-KEY: edd1c9f034335f136f87ad84b625c8f1' -X PUT -d ' +{ + "uri": "/hello", + "upstream": { + "pass_host": "rewrite", + "nodes": { + "127.0.0.1:1983": 1 + }, + "scheme": "https", + "hash_on": "vars", + "upstream_host": "test.com", + "type": "roundrobin", + "tls": { + "client_cert": "-----BEGIN CERTIFICATE-----\nMIIEojCCAwqgAwIBAgIJAK253pMhgCkxMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV\nBAYTAkNOMRIwEAYDVQQIDAlHdWFuZ0RvbmcxDzANBgNVBAcMBlpodUhhaTEPMA0G\nA1UECgwGaXJlc3R5MREwDwYDVQQDDAh0ZXN0LmNvbTAgFw0xOTA2MjQyMjE4MDVa\nGA8yMTE5MDUzMTIyMTgwNVowVjELMAkGA1UEBhMCQ04xEjAQBgNVBAgMCUd1YW5n\nRG9uZzEPMA0GA1UEBwwGWmh1SGFpMQ8wDQYDVQQKDAZpcmVzdHkxETAPBgNVBAMM\nCHRlc3QuY29tMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKCAYEAyCM0rqJe\ncvgnCfOw4fATotPwk5Ba0gC2YvIrO+gSbQkyxXF5jhZB3W6BkWUWR4oNFLLSqcVb\nVDPitz/Mt46Mo8amuS6zTbQetGnBARzPLtmVhJfoeLj0efMiOepOSZflj9Ob4yKR\n2bGdEFOdHPjm+4ggXU9jMKeLqdVvxll/JiVFBW5smPtW1Oc/BV5terhscJdOgmRr\nabf9xiIis9/qVYfyGn52u9452V0owUuwP7nZ01jt6iMWEGeQU6mwPENgvj1olji2\nWjdG2UwpUVp3jp3l7j1ekQ6mI0F7yI+LeHzfUwiyVt1TmtMWn1ztk6FfLRqwJWR/\nEvm95vnfS3Le4S2ky3XAgn2UnCMyej3wDN6qHR1onpRVeXhrBajbCRDRBMwaNw/1\n/3Uvza8QKK10PzQR6OcQ0xo9psMkd9j9ts/dTuo2fzaqpIfyUbPST4GdqNG9NyIh\n/B9g26/0EWcjyO7mYVkaycrtLMaXm1u9jyRmcQQI1cGrGwyXbrieNp63AgMBAAGj\ncTBvMB0GA1UdDgQWBBSZtSvV8mBwl0bpkvFtgyiOUUcbszAfBgNVHSMEGDAWgBSZ\ntSvV8mBwl0bpkvFtgyiOUUcbszAMBgNVHRMEBTADAQH/MB8GA1UdEQQYMBaCCHRl\nc3QuY29tggoqLnRlc3QuY29tMA0GCSqGSIb3DQEBCwUAA4IBgQAHGEul/x7ViVgC\ntC8CbXEslYEkj1XVr2Y4hXZXAXKd3W7V3TC8rqWWBbr6L/tsSVFt126V5WyRmOaY\n1A5pju8VhnkhYxYfZALQxJN2tZPFVeME9iGJ9BE1wPtpMgITX8Rt9kbNlENfAgOl\nPYzrUZN1YUQjX+X8t8/1VkSmyZysr6ngJ46/M8F16gfYXc9zFj846Z9VST0zCKob\nrJs3GtHOkS9zGGldqKKCj+Awl0jvTstI4qtS1ED92tcnJh5j/SSXCAB5FgnpKZWy\nhme45nBQj86rJ8FhN+/aQ9H9/2Ib6Q4wbpaIvf4lQdLUEcWAeZGW6Rk0JURwEog1\n7/mMgkapDglgeFx9f/XztSTrkHTaX4Obr+nYrZ2V4KOB4llZnK5GeNjDrOOJDk2y\nIJFgBOZJWyS93dQfuKEj42hA79MuX64lMSCVQSjX+ipR289GQZqFrIhiJxLyA+Ve\nU/OOcSRr39Kuis/JJ+DkgHYa/PWHZhnJQBxcqXXk1bJGw9BNbhM=\n-----END CERTIFICATE-----\n", + "client_key": "HrMHUvE9Esvn7GnZ+vAynaIg/8wlB3r0zm0htmnwofYLp1VhtLeU1EmMJkPLUkcn2+v6Uav9bOQMkPdSpUMcEpRplLSXs+miu+B07CCUnsMrXkfQawRMIoePJZSLH5+PfDAlWIK2Q+ruYnjtnpNziiAtXf/HRRwHHMelnfedXqD8kn3Toe46ZYyBir99o/r/do5ludez5oY7qhOgNSWKCfnZE8Ip82g7t7n7jsAf5tTdRulUGBQ4ITV2zM3cxpD0PWnWMbOfygZIDxR8QU9wj8ihuFL1s1NM8PplcKbUxC4QlrSN+ZNkr6mxy+akPmXlABwcFIiSK7c/xvU1NjoILnhPpL6aRpbhmQX/a1XUCl+2INlQ5QbXbTN+JmDBhrU9NiYecRJMfmA1N/lhwgt01tUnxMoAhfpUVgEbZNalCJt+wn8TC+Xp3DZ0bCpXrfzqsprGKan9qC3mCN03jj50JyGFL+xt8wX8D0uaIsu4cVk4et7kbTIj9rvucsh0cfKn8va8/cdjw5QhFSRBkW5Vuz9NwvzVQ6DHWs1a8VZbN/hERxcbWNk/p1VgGLHioqZZTOd5CYdN4dGjnksjXa0Z77mTSoNx3U79FQPAgUMEA1phnO/jdryM3g5M+UvESXA/75we435xg5tLRDvNwJw2NlosQsGY7fzUi2+HFo436htydRFv8ChHezs2v99mjfCUijrWYoeJ5OB2+KO9XiOIz7gpqhTef9atajSYRhxhcwdCVupC1PrPGn9MzhdQLeqQCJj3kyazPfO3xPkNpMAqd2lXnLR4HGd9SBHe75Sik3jW9W1sUqrn2fDjyWd0jz57pl4qyHjbzjd3uE5qbH/QuYZBIzI9tEn7tj12brWrwHsMt+/4M7zp8Opsia64V3Y7ICLIi7fiYfr70RujXyn8Ik5TB1QC98JrnDjgQlTPDhHLk1r8XhZXqIIg6DmaN7UUjIuZhKxARTs8b5WMPvVV4GownlPN28sHIMAX84BNbP0597Fxipwp2oTMFKTzvxm+QUtbWvIPzF3n25L4sPCyUx5PRIRCJ5kDNQfhiN6o3Y/fAY0PyxI06PWYoNvSn3uO24XNXbF3RkpwKtV8n/iNo5dyM1VqFPWDuKRSLHY7E4lQTdqx4/n+rrnoH6SlmQ0zwxwxBeAz/TvkmiW7WLe3C5cUDKF9yYwvAe8ek4oTR3GxaiDWjNFsu7DUoDjpH5f3IxrX2IN4FyzE47hMeg4muPov7h74WwosqgnfmwoAEFV4+ldmzpdSjghZoF2M9EZI24Xa9rVdd6j2t6IjX20oL+SLQL/9HppMi1nC+3Zby1WOvuTR4g8K1QP75OeY4xTD1iEAXpd0WOX7C3ndceVF4THLCI4Imcf9FH9MBrE55FPMEsAk54HiAoyMd6tgqv/akRqmuAmnSsrWALhqiCnAVh2uzk644gSzmsFbh7zF33qrcafPpU4PxUEvpqbLz7asoNUDf4YB4gCcgZx30eK/w9FpMaLveiNq77EW7qcvJQPcjZ4uLaKkQVODJsd+1CbZF6370aiLxouXLFT3eQI7Ovu6be8D3MmazRPgCV36qzMwONqrXE/JbMFMKe5l1e4Y6avMejrj43BMgGo2u8LimCWkBeNwqIjH7plwbpDKo4OKZVbrzSZ0hplUDd/jMrb6Ulbc04uMeEigehrhSsZ0ZwoDiZcf/fDIclaTGNMl40N2wBiqdnw9uKTqD1YxzqDQ7vgiXG55ae31lvevPTgk/lLvpwzlyitjGs+6LJPu/wSCKA2VIyhJfK+8EnItEKjBUrXdOklBdOmTpUpdQ+zfd2NCrFRDJZKl26Uh412adFEkqY37O/0FbSCpAIsUCvaItcqK7qh5Rq26hVR0nS1MRs+MjGBzGqudXPQZHy+Yp7AlAa5UgJUaAwn2b/id6kNdv6hNWqSzHvOAVKdgC9/j0yN1VJD92+IoJTTiXsMQELcgm1Ehj2GZpTHu+GPuaOovHBnZMq/Kg4nUS+ig86X01jV28uGGtglERf1HqVQpdZwbrXtUqH0cbjlvUwQ1j7zp9yhs+0ta87v0I+elAZhXzqvehMiLJu2o9/k2+4dPvkEscduHOU6jZqe8ndNEMQWiaZEYJKxNWPTaQ6nZSlFTsT7GlENeJlFzlw8QkyRJPMBWkXuaymQUcu43Pm+gAjinHSAGUeaSaIdL2Yb0M88qNwG+UlNEslx/J37pA1oMJyxb7XOeySxkP7dXi5JvygLIfkEA3ENC4NHU9nsUvTvp5AZidZCxxtYCNYfjY6xyrlfnE+V+us31LA9Wc/tKa4y3Ldj30IT2sssUrdZ0l7UbwfcZT42ZeJpxDofpZ2rjgswTs0Upr72VuOCzjpKa1CJwxhVVtPVJJovcXp4bsNPJers+yIYfTl1aqaf4qSzU5OL/cze2e6qAh7622zEa/q6klpUx9b1f8YGlQhjQcy3++JnwwsHR71Ofh9woXq57LDCHFA6f95zdkadDDhwgRcvWVnbA2Szps8iJv7h2m25qZPFtN6puJj3RlmT6hnfBeYCjpfy/2TxyCqm6bG3HZxGuhzWs2ZGxzsjBJ3ueO1pAOjtDhkRqzoWt/v2o367IYP7iTcp4pi+qJHIWCN1ElDI0BVoZ+Xq9iLfKmjrjcxQ7EYGHfQDE52QaCQ3nMB7oiqncZ1Q5n/ICDHha9RkPP9V9vWiJIZwgOJtPfGzsGQ9AigH6po65IJyxmY5upuhg7DTmsLQnKC/fwjkBF9So/4cdZuqDbxGrDDOgpL7uvWXANRNMrqYoMFUG7M90QJHj7NgSL+B6mSNwa9ctTua7Estkoyvavda3Bl3qHQ0Hva5gjSg6elL6PQ4ksqhESvjztuy58qk9aZHsQB8ZKRu8VSay40a/3ueX6bnd0hwsYy42aWJR1z+uie3yTWPuG2JZ7DjkgDduWdC+cxfvTVTG58E5luafy5j/t85UVoB2nr46VHlt/vg4M9G8/4F0d0Y6ThI4/XTfg6l1vq5ouzhQxd+SRwnuXieZy+4/2XKJnrV6t+JbNAvwdGR1V9VPLlnb+IqpvOCYyL1YLYSlNubb9HU0wxVPppGSpJLmi+njQzl71PBgMm6QV9j889wPUo387fRbJjXbSSVLon61xk/4dNvjsgfv9rF+/qEML0q4tXBJVOJ1iwKjn84Nk6vdHM3Hu8knp0hYFa4AECYKInSTVXajWAKFx4SOq8G8MA/0YlIN872LBjUm2GKs17wsJuWID+mSyVE5pV5gQ+r92YvPcC+yIvB8hTTaRclAP/KyJesDTA==" + } + } +}' + +sleep 1 + +code=$(curl -v -k -i -m 20 -o /dev/null -s -w %{http_code} http://127.0.0.1:9080/hello) + +if [ ! $code -eq 200 ]; then + echo "failed: connection to upstream with mTLS failed" + exit 1 +fi + +sleep 0.1 + +make stop + +echo "passed: connection to upstream with mTLS success" diff --git a/t/core/config_etcd.t b/t/core/config_etcd.t index 7f31fc8592be..75b0e9bb67a2 100644 --- a/t/core/config_etcd.t +++ b/t/core/config_etcd.t @@ -60,7 +60,7 @@ qr/(connection refused){1,}/ apisix: node_listen: 1984 ssl: - ssl_trusted_certificate: t/servroot/conf/cert/etcd.pem + ssl_trusted_combined_path: t/servroot/conf/cert/etcd.pem deployment: role: traditional role_traditional: diff --git a/t/core/table.t b/t/core/table.t index c3ec5a7c0d1f..38616ae535bf 100644 --- a/t/core/table.t +++ b/t/core/table.t @@ -215,3 +215,147 @@ GET /t GET /t --- response_body ok + + + +=== TEST 8: deepcopy copy same table only once +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local tmp = { name = "tmp", priority = 1, enabled = true } + local origin = { a = { b = tmp }, c = tmp} + local copy = core.table.deepcopy(origin) + if not core.table.deep_eq(copy, origin) then + ngx.say("copy: ", json.encode(expect), ", origin: ", json.encode(actual)) + return + end + if copy.a.b ~= copy.c then + ngx.say("copy.a.b should be the same as copy.c") + return + end + ngx.say("ok") + } + } +--- request +GET /t +--- response_body +ok + + + +=== TEST 9: reference same table +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local deepcopy = core.table.deepcopy + local tab1 = {name = "tab1"} + local tab2 = { + a = tab1, + b = tab1 + } + local tab_copied = deepcopy(tab2) + + ngx.say("table copied: ", require("toolkit.json").encode(tab_copied)) + + ngx.say("tab1 == tab2.a: ", tab1 == tab2.a) + ngx.say("tab2.a == tab2.b: ", tab2.a == tab2.b) + + ngx.say("tab_copied.a == tab1: ", tab_copied.a == tab1) + ngx.say("tab_copied.a == tab_copied.b: ", tab_copied.a == tab_copied.b) + } + } +--- request +GET /t +--- response_body +table copied: {"a":{"name":"tab1"},"b":{"name":"tab1"}} +tab1 == tab2.a: true +tab2.a == tab2.b: true +tab_copied.a == tab1: false +tab_copied.a == tab_copied.b: true + + + +=== TEST 10: reference table self(root node) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local deepcopy = core.table.deepcopy + local tab1 = {name = "tab1"} + local tab2 = { + a = tab1, + } + tab2.c = tab2 + + local tab_copied = deepcopy(tab2) + + ngx.say("tab_copied.a == tab1: ", tab_copied.a == tab_copied.b) + ngx.say("tab_copied == tab_copied.c: ", tab_copied == tab_copied.c) + } + } +--- request +GET /t +--- response_body +tab_copied.a == tab1: false +tab_copied == tab_copied.c: true + + + +=== TEST 11: reference table self(sub node) +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local deepcopy = core.table.deepcopy + local tab_org = { + a = { + a2 = "a2" + }, + } + tab_org.b = tab_org.a + + local tab_copied = deepcopy(tab_org) + ngx.say("table copied: ", require("toolkit.json").encode(tab_copied)) + ngx.say("tab_copied.a == tab_copied.b: ", tab_copied.a == tab_copied.b) + } + } +--- request +GET /t +--- response_body +table copied: {"a":{"a2":"a2"},"b":{"a2":"a2"}} +tab_copied.a == tab_copied.b: true + + + +=== TEST 12: shallow copy +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local deepcopy = core.table.deepcopy + local t1 = {name = "tab1"} + local t2 = {name = "tab2"} + local tab = { + a = {b = {c = t1}}, + x = {y = t2}, + } + local tab_copied = deepcopy(tab, { shallows = { "self.a.b.c" }}) + + ngx.say("table copied: ", require("toolkit.json").encode(tab_copied)) + + ngx.say("tab_copied.a.b.c == tab.a.b.c1: ", tab_copied.a.b.c == tab.a.b.c) + ngx.say("tab_copied.a.b.c == t1: ", tab_copied.a.b.c == t1) + ngx.say("tab_copied.x.y == tab.x.y: ", tab_copied.x.y == tab.x.y) + ngx.say("tab_copied.x.y == t2: ", tab_copied.x.y == t2) + } + } +--- request +GET /t +--- response_body +table copied: {"a":{"b":{"c":{"name":"tab1"}}},"x":{"y":{"name":"tab2"}}} +tab_copied.a.b.c == tab.a.b.c1: true +tab_copied.a.b.c == t1: true +tab_copied.x.y == tab.x.y: false +tab_copied.x.y == t2: false diff --git a/t/plugin/ai-proxy2.t b/t/plugin/ai-proxy2.t index 6e398e5665a4..cda3786b7e75 100644 --- a/t/plugin/ai-proxy2.t +++ b/t/plugin/ai-proxy2.t @@ -198,3 +198,58 @@ POST /anything --- error_code: 200 --- response_body passed + + + +=== TEST 5: set route without overriding the endpoint_url +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "uri": "/anything", + "plugins": { + "ai-proxy": { + "auth": { + "header": { + "Authorization": "some-key" + } + }, + "model": { + "provider": "openai", + "name": "gpt-4", + "options": { + "max_tokens": 512, + "temperature": 1.0 + } + } + } + }, + "upstream": { + "type": "roundrobin", + "nodes": { + "httpbin.org": 1 + } + } + }]] + ) + + if code >= 300 then + ngx.status = code + end + ngx.say(body) + } + } +--- response_body +passed + + + +=== TEST 6: send request +--- custom_trusted_cert: /etc/ssl/certs/ca-certificates.crt +--- request +POST /anything +{ "messages": [ { "role": "system", "content": "You are a mathematician" }, { "role": "user", "content": "What is 1+1?"} ] } +--- error_code: 401 diff --git a/t/plugin/jwt-auth4.t b/t/plugin/jwt-auth4.t index 075fbb85f01f..48fbc5de1baf 100644 --- a/t/plugin/jwt-auth4.t +++ b/t/plugin/jwt-auth4.t @@ -160,3 +160,73 @@ GET /t --- more_headers --- response_body hello world + + + +=== TEST 4: ensure secret is non empty +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + -- prepare consumer with a custom key claim name + local csm_code, csm_body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "mike", + "plugins": { + "jwt-auth": { + "key": "custom-user-key", + "secret": "" + } + } + }]] + ) + if csm_code == 200 then + ngx.status = 500 + ngx.say("error") + return + end + ngx.status = csm_code + ngx.say(csm_body) + } + } +--- error_code: 400 +--- response_body eval +qr/\\"secret\\" validation failed: string too short, expected at least 1, got 0/ + + + +=== TEST 5: ensure key is non empty +--- config + location /t { + content_by_lua_block { + local core = require("apisix.core") + local t = require("lib.test_admin").test + -- prepare consumer with a custom key claim name + local csm_code, csm_body = t('/apisix/admin/consumers', + ngx.HTTP_PUT, + [[{ + "username": "mike", + "plugins": { + "jwt-auth": { + "key": "", + "algorithm": "RS256", + "public_key": "somekey", + "private_key": "someprivkey" + } + } + }]] + ) + if csm_code == 200 then + ngx.status = 500 + ngx.say("error") + return + end + ngx.status = csm_code + ngx.say(csm_body) + } + } +--- error_code: 400 +--- response_body eval +qr/\\"key\\" validation failed: string too short, expected at least 1, got 0/ diff --git a/t/plugin/limit-count5.t b/t/plugin/limit-count5.t index cb4615990fb1..4227b4f10891 100644 --- a/t/plugin/limit-count5.t +++ b/t/plugin/limit-count5.t @@ -137,3 +137,66 @@ passed ["GET /hello", "GET /hello", "GET /hello", "GET /hello"] --- error_code eval [200, 200, 503, 503] + + + +=== TEST 4: customize rate limit headers by plugin metadata +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "methods": ["GET"], + "plugins": { + "limit-count": { + "count": 10, + "time_window": 60, + "rejected_code": 503, + "key_type": "var", + "key": "remote_addr" + } + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + local code, meta_body = t('/apisix/admin/plugin_metadata/limit-count', + ngx.HTTP_PUT, + [[{ + "limit_header":"APISIX-RATELIMIT-QUOTA", + "remaining_header":"APISIX-RATELIMIT-REMAINING", + "reset_header":"APISIX-RATELIMIT-RESET" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say("fail") + return + end + ngx.say("passed") + } + } +--- response_body +passed + + + +=== TEST 5: check rate limit headers +--- request +GET /hello +--- response_headers_like +APISIX-RATELIMIT-QUOTA: 10 +APISIX-RATELIMIT-REMAINING: 9 +APISIX-RATELIMIT-RESET: \d+ diff --git a/t/plugin/prometheus-metric-expire.t b/t/plugin/prometheus-metric-expire.t new file mode 100644 index 000000000000..caad85ea04a5 --- /dev/null +++ b/t/plugin/prometheus-metric-expire.t @@ -0,0 +1,132 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +BEGIN { + if ($ENV{TEST_NGINX_CHECK_LEAK}) { + $SkipReason = "unavailable for the hup tests"; + + } else { + $ENV{TEST_NGINX_USE_HUP} = 1; + undef $ENV{TEST_NGINX_USE_STAP}; + } +} + +use t::APISIX 'no_plan'; + +add_block_preprocessor(sub { + my ($block) = @_; + + if (!defined $block->request) { + $block->set_value("request", "GET /t"); + } +}); + +run_tests; + +__DATA__ + +=== TEST 1: set route with prometheus ttl +--- yaml_config +plugin_attr: + prometheus: + default_buckets: + - 15 + - 55 + - 105 + - 205 + - 505 + metrics: + http_status: + expire: 1 + http_latency: + expire: 1 + bandwidth: + expire: 1 +--- config + location /t { + content_by_lua_block { + local t = require("lib.test_admin").test + local code = t('/apisix/admin/routes/metrics', + ngx.HTTP_PUT, + [[{ + "plugins": { + "public-api": {} + }, + "uri": "/apisix/prometheus/metrics" + }]] + ) + if code >= 300 then + ngx.status = code + return + end + local code, body = t('/apisix/admin/routes/1', + ngx.HTTP_PUT, + [[{ + "plugins": { + "prometheus": {} + }, + "upstream": { + "nodes": { + "127.0.0.1:1980": 1 + }, + "type": "roundrobin" + }, + "uri": "/hello1" + }]] + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + local code, body = t('/hello1', + ngx.HTTP_GET, + "", + nil, + nil + ) + if code >= 300 then + ngx.status = code + ngx.say(body) + return + end + ngx.sleep(2) + local code, pass, body = t('/apisix/prometheus/metrics', + ngx.HTTP_GET, + "", + nil, + nil + ) + + local metrics_to_check = {"apisix_bandwidth", "http_latency", "http_status",} + + -- verify that above mentioned metrics are not in the metrics response + for _, v in pairs(metrics_to_check) do + local match, err = ngx.re.match(body, "\\b" .. v .. "\\b", "m") + if match then + ngx.status = 500 + ngx.say("error found " .. v .. " in metrics") + return + end + end + + ngx.say("passed") + } + } +--- request +GET /t +--- response_body +passed diff --git a/t/plugin/prometheus4.t b/t/plugin/prometheus4.t index 758f2aae984f..2cc1508c8ae4 100644 --- a/t/plugin/prometheus4.t +++ b/t/plugin/prometheus4.t @@ -192,95 +192,7 @@ apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",nod -=== TEST 9: set route with prometheus ttl ---- yaml_config -plugin_attr: - prometheus: - default_buckets: - - 15 - - 55 - - 105 - - 205 - - 505 - expire: 1 ---- config - location /t { - content_by_lua_block { - local t = require("lib.test_admin").test - - local code = t('/apisix/admin/routes/metrics', - ngx.HTTP_PUT, - [[{ - "plugins": { - "public-api": {} - }, - "uri": "/apisix/prometheus/metrics" - }]] - ) - if code >= 300 then - ngx.status = code - return - end - - local code, body = t('/apisix/admin/routes/1', - ngx.HTTP_PUT, - [[{ - "plugins": { - "prometheus": {} - }, - "upstream": { - "nodes": { - "127.0.0.1:1980": 1 - }, - "type": "roundrobin" - }, - "uri": "/hello1" - }]] - ) - - if code >= 300 then - ngx.status = code - ngx.say(body) - return - end - - local code, body = t('/hello1', - ngx.HTTP_GET, - "", - nil, - nil - ) - - if code >= 300 then - ngx.status = code - ngx.say(body) - return - end - - ngx.sleep(2) - - local code, pass, body = t('/apisix/prometheus/metrics', - ngx.HTTP_GET, - "", - nil, - nil - ) - ngx.status = code - ngx.say(body) - } - } ---- request -GET /t ---- response_body_unlike eval -qr/apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="15"\} \d+ -apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="55"\} \d+ -apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="105"\} \d+ -apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="205"\} \d+ -apisix_http_latency_bucket\{type="upstream",route="1",service="",consumer="",node="127.0.0.1",le="505"\} \d+/ - - - -=== TEST 10: set sys plugins +=== TEST 9: set sys plugins --- config location /t { content_by_lua_block { @@ -332,7 +244,7 @@ passed -=== TEST 11: remove prometheus -> reload -> send batch request -> add prometheus for next tests +=== TEST 10: remove prometheus -> reload -> send batch request -> add prometheus for next tests --- yaml_config deployment: role: traditional @@ -366,7 +278,7 @@ qr/404 Not Found/ -=== TEST 12: fetch prometheus metrics -> batch_process_entries metrics should not be present +=== TEST 11: fetch prometheus metrics -> batch_process_entries metrics should not be present --- yaml_config deployment: role: traditional @@ -387,14 +299,14 @@ qr/apisix_batch_process_entries\{name="sys-logger",route_id="9",server_addr="127 -=== TEST 13: hit batch-process-metrics with prometheus enabled from TEST 11 +=== TEST 12: hit batch-process-metrics with prometheus enabled from TEST 11 --- request GET /batch-process-metrics --- error_code: 404 -=== TEST 14: batch_process_entries metrics should be present now +=== TEST 13: batch_process_entries metrics should be present now --- request GET /apisix/prometheus/metrics --- error_code: 200 diff --git a/utils/install-dependencies.sh b/utils/install-dependencies.sh index 305421e8363e..b084a35369e6 100755 --- a/utils/install-dependencies.sh +++ b/utils/install-dependencies.sh @@ -78,7 +78,7 @@ function install_dependencies_with_apt() { sudo apt-get update # install some compilation tools - sudo apt-get install -y curl make gcc g++ cpanminus libpcre3 libpcre3-dev libldap2-dev libyaml-dev unzip openresty-zlib-dev openresty-pcre-dev + sudo apt-get install -y curl make gcc g++ cpanminus libpcre3 libpcre3-dev libyaml-dev unzip openresty-zlib-dev openresty-pcre-dev } # Identify the different distributions and call the corresponding function